Skip to content

Commit

Permalink
upgrade to 1.7
Browse files Browse the repository at this point in the history
  • Loading branch information
wenbingl committed Apr 9, 2020
1 parent 6546438 commit 5f803a9
Show file tree
Hide file tree
Showing 11 changed files with 26 additions and 27 deletions.
2 changes: 1 addition & 1 deletion onnxmltools/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
This framework converts any machine learned model into onnx format
which is a common language to describe any machine learned model.
"""
__version__ = "1.6.5"
__version__ = "1.7.0"
__author__ = "Microsoft"
__producer__ = "OnnxMLTools"
__producer_version__ = __version__
Expand Down
5 changes: 3 additions & 2 deletions onnxmltools/convert/coreml/convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,9 @@

import coremltools
from uuid import uuid4
from onnxconverter_common.onnx_ex import get_maximum_opset_supported
from ...proto import onnx
from ...proto import onnx_proto, get_opset_number_from_onnx
from ...proto import onnx_proto
from ..common._topology import convert_topology
from ._parse import parse_coreml

Expand Down Expand Up @@ -55,7 +56,7 @@ def convert(model, name=None, initial_types=None, doc_string='', target_opset=No
if name is None:
name = str(uuid4().hex)

target_opset = target_opset if target_opset else get_opset_number_from_onnx()
target_opset = target_opset if target_opset else get_maximum_opset_supported()
# Parse CoreML model as our internal data structure (i.e., Topology)
topology = parse_coreml(spec, initial_types, target_opset, custom_conversion_functions, custom_shape_calculators)

Expand Down
5 changes: 3 additions & 2 deletions onnxmltools/convert/h2o/convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,8 @@
import tempfile
import h2o

from ...proto import onnx, get_opset_number_from_onnx
from onnxconverter_common.onnx_ex import get_maximum_opset_supported
from ...proto import onnx
from ..common._topology import convert_topology
from ..common.data_types import FloatTensorType
from ._parse import parse_h2o
Expand Down Expand Up @@ -64,7 +65,7 @@ def convert(model, name=None, initial_types=None, doc_string='', target_opset=No
if mojo_model["params"]["algo"] != "gbm":
raise ValueError("Model type not supported (algo=%s). Only GBM Mojo supported for now." % mojo_model["params"]["algo"])

target_opset = target_opset if target_opset else get_opset_number_from_onnx()
target_opset = target_opset if target_opset else get_maximum_opset_supported()
topology = parse_h2o(mojo_model, initial_types, target_opset, custom_conversion_functions, custom_shape_calculators)
topology.compile()
onnx_model = convert_topology(topology, name, doc_string, target_opset, targeted_onnx)
Expand Down
2 changes: 2 additions & 0 deletions onnxmltools/convert/libsvm/convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#--------------------------------------------------------------------------

from uuid import uuid4
from onnxconverter_common.onnx_ex import get_maximum_opset_supported
from ...proto import onnx
from ..common._topology import convert_topology
from ._parse import parse_libsvm
Expand Down Expand Up @@ -34,6 +35,7 @@ def convert(model, name=None, initial_types=None, doc_string='', target_opset=No

if name is None:
name = str(uuid4().hex)
target_opset = target_opset if target_opset else get_maximum_opset_supported()

# Parse scikit-learn model as our internal data structure (i.e., Topology)
topology = parse_libsvm(model, initial_types, custom_conversion_functions,
Expand Down
5 changes: 3 additions & 2 deletions onnxmltools/convert/lightgbm/convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@

from uuid import uuid4
import lightgbm
from ...proto import onnx, get_opset_number_from_onnx
from onnxconverter_common.onnx_ex import get_maximum_opset_supported
from ...proto import onnx
from ..common._topology import convert_topology
from ._parse import parse_lightgbm, WrappedBooster

Expand Down Expand Up @@ -45,7 +46,7 @@ def convert(model, name=None, initial_types=None, doc_string='', target_opset=No
if name is None:
name = str(uuid4().hex)

target_opset = target_opset if target_opset else get_opset_number_from_onnx()
target_opset = target_opset if target_opset else get_maximum_opset_supported()
topology = parse_lightgbm(model, initial_types, target_opset, custom_conversion_functions, custom_shape_calculators)
topology.compile()
onnx_model = convert_topology(topology, name, doc_string, target_opset, targeted_onnx)
Expand Down
5 changes: 3 additions & 2 deletions onnxmltools/convert/sparkml/convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@
# --------------------------------------------------------------------------

from uuid import uuid4
from ...proto import onnx, get_opset_number_from_onnx
from onnxconverter_common.onnx_ex import get_maximum_opset_supported
from ...proto import onnx
from ..common._topology import convert_topology
from ._parse import parse_sparkml
from . import operator_converters
Expand Down Expand Up @@ -63,7 +64,7 @@ def convert(model, name=None, initial_types=None, doc_string='', target_opset=No
if name is None:
name = str(uuid4().hex)

target_opset = target_opset if target_opset else get_opset_number_from_onnx()
target_opset = target_opset if target_opset else get_maximum_opset_supported()
# Parse spark-ml model as our internal data structure (i.e., Topology)
topology = parse_sparkml(spark_session, model, initial_types, target_opset, custom_conversion_functions, custom_shape_calculators)

Expand Down
5 changes: 3 additions & 2 deletions onnxmltools/convert/xgboost/convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@

from uuid import uuid4
import xgboost
from ...proto import onnx, get_opset_number_from_onnx
from onnxconverter_common.onnx_ex import get_maximum_opset_supported
from ...proto import onnx
from ..common._topology import convert_topology
from ._parse import parse_xgboost, WrappedBooster

Expand Down Expand Up @@ -40,7 +41,7 @@ def convert(model, name=None, initial_types=None, doc_string='', target_opset=No

if isinstance(model, xgboost.Booster):
model = WrappedBooster(model)
target_opset = target_opset if target_opset else get_opset_number_from_onnx()
target_opset = target_opset if target_opset else get_maximum_opset_supported()
topology = parse_xgboost(model, initial_types, target_opset, custom_conversion_functions, custom_shape_calculators)
topology.compile()
onnx_model = convert_topology(topology, name, doc_string, target_opset, targeted_onnx)
Expand Down
13 changes: 0 additions & 13 deletions onnxmltools/proto/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,16 +52,3 @@ def _make_tensor_fixed(name, data_type, dims, vals, raw=False):


helper.make_tensor = _make_tensor_fixed


def get_opset_number_from_onnx():
# since the method was widely used among while it is buggy to get the opset number...
# ... blindly, so change it to be safer without the name change.

default_max_opset = 11
try:
from onnxconverter_common.topology import DEFAULT_OPSET_NUMBER
default_max_opset = DEFAULT_OPSET_NUMBER
except: # noqa
pass
return min(default_max_opset, onnx.defs.onnx_opset_version())
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
keras2onnx
numpy
onnx
onnxconverter-common>=1.6.5, <1.7.0
onnxconverter-common>=1.7.0, <1.8.0
protobuf
six
skl2onnx
4 changes: 4 additions & 0 deletions tests/baseline/test_convert_baseline.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,3 +50,7 @@ def test_keras2coreml_Dense_ImageNet_small(self):
"""
self.assertFalse(self.check_baseline(
"keras2coreml_Dense_ImageNet_small.mlmodel", "keras2coreml_Dense_ImageNet_small.json"))


if __name__ == "__main__":
unittest.main()
5 changes: 3 additions & 2 deletions tests/xgboost/test_xgboost_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@
from onnxmltools.convert.common.data_types import FloatTensorType
from onnxmltools.utils import dump_data_and_model
from onnxmltools.convert.xgboost.operator_converters.XGBoost import convert_xgboost as convert_xgb
from onnxmltools.proto import get_opset_number_from_onnx
from onnxconverter_common.onnx_ex import get_maximum_opset_supported


can_test = True
except ImportError:
Expand Down Expand Up @@ -135,7 +136,7 @@ def common_test_xgboost_10_skl(self, missing, replace=False):
input_xgb[input_xgb[:, :] == missing] = np.nan
onnx_last = convert_sklearn(model.steps[1][-1],
initial_types=[('X', FloatTensorType(shape=[None, input_xgb.shape[1]]))],
target_opset=get_opset_number_from_onnx())
target_opset=get_maximum_opset_supported())
session = rt.InferenceSession(onnx_last.SerializeToString())
pred_skl = model.steps[1][-1].predict(input_xgb).ravel()
pred_onx = session.run(None, {'X': input_xgb})[0].ravel()
Expand Down

0 comments on commit 5f803a9

Please sign in to comment.