diff --git a/.azure-pipelines/linux-CI-keras-applications-nightly.yml b/.azure-pipelines/linux-CI-keras-applications-nightly.yml index ed6df241..a10d62f7 100644 --- a/.azure-pipelines/linux-CI-keras-applications-nightly.yml +++ b/.azure-pipelines/linux-CI-keras-applications-nightly.yml @@ -54,14 +54,14 @@ jobs: INSTALL_KERAS: pip install keras UNINSTALL_KERAS: INSTALL_TENSORFLOW: pip install tensorflow==1.15.0 - INSTALL_ORT: pip install onnxruntime==1.1.1 + INSTALL_ORT: pip install onnxruntime==1.3.0 INSTALL_KERAS_RESNET: pip install keras-resnet INSTALL_TRANSFORMERS: NIGHTLY_BUILD_TEST: python run_all.py --exclude "test_keras_applications_v2.py" - Python37-tf2: - python.version: '3.7.3' - ONNX_PATH: onnx==1.6.0 + Python38-tf2: + python.version: '3.8' + ONNX_PATH: onnx==1.7.0 INSTALL_KERAS: UNINSTALL_KERAS: pip uninstall keras -y INSTALL_TENSORFLOW: pip install tensorflow==2.2.0 diff --git a/.azure-pipelines/linux-conda-CI-tf-keras.yml b/.azure-pipelines/linux-conda-CI-tf-keras.yml index cf98957b..b6fd63a6 100644 --- a/.azure-pipelines/linux-conda-CI-tf-keras.yml +++ b/.azure-pipelines/linux-conda-CI-tf-keras.yml @@ -32,6 +32,12 @@ jobs: TENSORFLOW_PATH: tensorflow-cpu INSTALL_ORT: + Python38: + python.version: '3.8' + ONNX_PATH: onnx==1.7.0 + TENSORFLOW_PATH: tensorflow-cpu + INSTALL_ORT: + maxParallel: 3 steps: diff --git a/.azure-pipelines/linux-conda-CI.yml b/.azure-pipelines/linux-conda-CI.yml index d1b03596..6d0dc766 100644 --- a/.azure-pipelines/linux-conda-CI.yml +++ b/.azure-pipelines/linux-conda-CI.yml @@ -28,6 +28,7 @@ jobs: TENSORFLOW_PATH: tensorflow==1.15.0 INSTALL_ORT: pip install onnxruntime==1.1.1 + # UT for standalone keras need tensorflow <= 2.0.0 (python <= 3.7) Python37-tf200: python.version: '3.7' ONNX_PATH: onnx==1.6.0 diff --git a/.azure-pipelines/win32-CI-keras-applications-nightly.yml b/.azure-pipelines/win32-CI-keras-applications-nightly.yml index ae7c29af..acfc4527 100644 --- a/.azure-pipelines/win32-CI-keras-applications-nightly.yml +++ b/.azure-pipelines/win32-CI-keras-applications-nightly.yml @@ -59,13 +59,13 @@ jobs: INSTALL_TRANSFORMERS: NIGHTLY_BUILD_TEST: python run_all.py --exclude "test_keras_applications_v2.py test_mask_rcnn.py" - Python37-tf2: - python.version: '3.7' - ONNX_PATH: onnx==1.6.0 + Python38-tf2: + python.version: '3.8' + ONNX_PATH: onnx==1.7.0 INSTALL_KERAS: UNINSTALL_KERAS: pip uninstall keras -y INSTALL_TENSORFLOW: pip install tensorflow==2.2.0 - INSTALL_ORT: pip install onnxruntime==1.1.1 + INSTALL_ORT: pip install onnxruntime==1.3.0 INSTALL_KERAS_RESNET: pip install keras-resnet INSTALL_TRANSFORMERS: pip install transformers NIGHTLY_BUILD_TEST: pytest test_keras_applications_v2.py test_transformers.py test_efn.py --doctest-modules --junitxml=junit/test-results-v2.xml diff --git a/.azure-pipelines/win32-conda-CI-tf-keras.yml b/.azure-pipelines/win32-conda-CI-tf-keras.yml index 69cd7d35..184d0b59 100644 --- a/.azure-pipelines/win32-conda-CI-tf-keras.yml +++ b/.azure-pipelines/win32-conda-CI-tf-keras.yml @@ -32,6 +32,12 @@ jobs: TENSORFLOW_PATH: tensorflow-cpu INSTALL_ORT: + Python38: + python.version: '3.8' + ONNX_PATH: onnx==1.7.0 + TENSORFLOW_PATH: tensorflow-cpu + INSTALL_ORT: + maxParallel: 3 steps: diff --git a/.azure-pipelines/win32-conda-CI.yml b/.azure-pipelines/win32-conda-CI.yml index cfdde6c5..165312e9 100644 --- a/.azure-pipelines/win32-conda-CI.yml +++ b/.azure-pipelines/win32-conda-CI.yml @@ -28,9 +28,10 @@ jobs: TENSORFLOW_PATH: tensorflow==1.15.0 INSTALL_ORT: pip install onnxruntime==1.1.1 + # UT for standalone keras need tensorflow <= 2.0.0 (python <= 3.7) Python37-tf200: python.version: '3.7' - ONNX_PATH: onnx==1.6.0 + ONNX_PATH: onnx==1.7.0 KERAS: keras TENSORFLOW_PATH: tensorflow==2.0.0 INSTALL_ORT: diff --git a/applications/nightly_build/test_transformers.py b/applications/nightly_build/test_transformers.py index bce74ef4..7dccfd19 100644 --- a/applications/nightly_build/test_transformers.py +++ b/applications/nightly_build/test_transformers.py @@ -201,19 +201,18 @@ def test_TFGPT2(self): def test_TFXLNet(self): if enable_full_transformer_test: from transformers import XLNetConfig, TFXLNetModel, TFXLNetLMHeadModel, TFXLNetForSequenceClassification, \ - TFXLNetForTokenClassification, TFXLNetForQuestionAnsweringSimple + TFXLNetForTokenClassification, TFXLNetForQuestionAnsweringSimple, XLNetTokenizer model_list = [TFXLNetModel, TFXLNetLMHeadModel, TFXLNetForSequenceClassification, \ TFXLNetForTokenClassification, TFXLNetForQuestionAnsweringSimple] else: - from transformers import XLNetConfig, TFXLNetModel + from transformers import XLNetConfig, TFXLNetModel, XLNetTokenizer model_list = [TFXLNetModel] - # pretrained_weights = 'xlnet-large-cased' - tokenizer_file = 'xlnet_xlnet-large-cased.pickle' - tokenizer = self._get_tokenzier(tokenizer_file) + # XLNetTokenizer need SentencePiece, so the pickle file does not work here. + tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased') config = XLNetConfig(n_layer=2) # The model with input mask has MatrixDiagV3 which is not a registered function/op - token = tokenizer.encode(self.text_str, add_special_tokens=True) + token = np.asarray(tokenizer.encode(self.text_str, add_special_tokens=True), dtype=np.int32) inputs_onnx = {'input_1': np.expand_dims(token, axis=0)} inputs = tf.constant(token)[None, :] # Batch size 1 diff --git a/keras2onnx/_builtin.py b/keras2onnx/_builtin.py index 2cbe4e64..90990dda 100644 --- a/keras2onnx/_builtin.py +++ b/keras2onnx/_builtin.py @@ -1689,6 +1689,26 @@ def convert_tf_one_hot(scope, operator, container): name=operator.full_name + '_one_hot', axis=axis) +@converter_func(TYPES.Pow) +def convert_tf_pow(scope, operator, container): + oopb = OnnxOperatorBuilder(container, scope) + node = operator.raw_operator + if container.target_opset < 12: + supported_types = [oopb.float16, oopb.float, oopb.double] + for input_idx_ in range(2): + dtype = _to_onnx_type(node.inputs[input_idx_].dtype) + if dtype not in supported_types: + raise ValueError("The input type of Pow is not supported for opset < 12.") + dtype = _to_onnx_type(node.outputs[0].dtype) + if dtype not in supported_types: + raise ValueError("The output type of Pow is not supported for opset < 12.") + + oopb.apply_op_with_output("apply_pow", + operator.input_full_names, + operator.output_full_names, + name=operator.full_name) + + @converter_func(TYPES.ReadVariableOp) def convert_tf_read_variable_op(scope, operator, container): oopb = OnnxOperatorBuilder(container, scope) @@ -1835,7 +1855,6 @@ def _prepare_StridedSlice(node, target_opset): # onnx slice op can't remove a axis, track axis and add a squeeze op if needed needs_squeeze = [] ellipsis_gap = 0 - data_input = node.inputs[0] new_axis_len = 0 cur_new_axis_mask = new_axis_mask @@ -2121,7 +2140,6 @@ def convert_tf_zeros_like(scope, operator, container): "Log": ("apply_log",), "Mul": ("apply_mul",), "Neg": ("apply_neg",), - "Pow": ("apply_pow",), "RealDiv": ("apply_div",), "Reciprocal": ("apply_reciprocal",), "Relu": ("apply_relu",), diff --git a/keras2onnx/_consts.py b/keras2onnx/_consts.py index b978262e..48bf4eea 100644 --- a/keras2onnx/_consts.py +++ b/keras2onnx/_consts.py @@ -57,6 +57,7 @@ class TYPES: Pack = 'Pack' Pad = 'Pad' PadV2 = 'PadV2' + Pow = 'Pow' Prod = 'Prod' Range = 'Range' ReadVariableOp = 'ReadVariableOp' diff --git a/tests/test_layers.py b/tests/test_layers.py index 62796a4e..a9b589a4 100644 --- a/tests/test_layers.py +++ b/tests/test_layers.py @@ -205,6 +205,27 @@ def test_tf_bias_add(runner): assert runner('onnx_bias_add', onnx_model, data, expected) +def test_tf_clip(runner): + model = Sequential() + model.add(Lambda(lambda x: K.clip(x, 0, 10), input_shape=[5, 5])) + data = np.random.randint(-5, 15, size=(1, 5, 5)).astype(np.float32) + expected = model.predict(data) + onnx_model = keras2onnx.convert_keras(model, 'test_tf_clip') + assert runner('onnx_tf_clip', onnx_model, data, expected) + + +@pytest.mark.skipif(get_maximum_opset_supported() < 12, + reason="Result mismatch on ORT, skip conversion for unsupported types.") +def test_tf_pow(runner): + model = Sequential() + y = tf.constant([[2.0, 2.0], [2.0, 2.0]]) + model.add(Lambda(lambda x: tf.math.pow(tf.cast(x, tf.int32), tf.cast(y, tf.int32)), input_shape=[2, 2])) + data = (100 * np.random.rand(3, 2, 2)).astype(np.float32) + expected = model.predict(data) + onnx_model = keras2onnx.convert_keras(model, 'test_tf_pow') + assert runner('onnx_tf_pow', onnx_model, data, expected) + + def test_tf_concat(runner): def my_func_1(x): return tf.concat([x[0], x[1]], 1)