diff --git a/keras2onnx/ke2onnx/pooling.py b/keras2onnx/ke2onnx/pooling.py index d094224d..bc3dc5c6 100644 --- a/keras2onnx/ke2onnx/pooling.py +++ b/keras2onnx/ke2onnx/pooling.py @@ -9,9 +9,9 @@ def convert_keras_pooling_core(scope, operator, container, is_global, n_dims, op_type, input_perm_axes, output_perm_axes): op = operator.raw_operator - channels_first = n_dims > 1 and op.data_format == 'channels_first' + no_permutation_required = op.data_format == 'channels_first' if hasattr(op, 'data_format') else False - if channels_first: + if no_permutation_required: adjusted_pooling_input = operator.inputs[0].full_name else: adjusted_pooling_input = scope.get_unique_variable_name('input_transposed') @@ -23,9 +23,14 @@ def convert_keras_pooling_core(scope, operator, container, is_global, n_dims, op_type_prefix = 'Global' if is_global else '' onnx_op_type = "AveragePool" if op_type == 'Avg' else 'MaxPool' attrs = {'name': operator.full_name} + op_version = 10 if container.target_opset >= 10 else 7 if not is_global: attrs['strides'] = list(op.strides) attrs['kernel_shape'] = op.pool_size + attrs['op_version'] = op_version + # In ONNX opset 10, the ceil_mode attribute was added to local MaxPool and AveragePool + if container.target_opset >= 10: + attrs['ceil_mode'] = 0 if op.padding == 'valid': attrs['auto_pad'] = 'VALID' elif op.padding == 'same': @@ -33,7 +38,7 @@ def convert_keras_pooling_core(scope, operator, container, is_global, n_dims, else: raise RuntimeError("Unsupported padding type '{0}'".format(op.padding)) - if channels_first: + if no_permutation_required: # In this case, the output of our Pool operator just match what Keras produces. container.add_node(op_type_prefix + onnx_op_type, adjusted_pooling_input, operator.outputs[0].full_name, **attrs) @@ -63,14 +68,14 @@ def convert_keras_max_pooling_2d(scope, operator, container): def convert_keras_max_pooling_3d(scope, operator, container): - input_perm_axes, output_perm_axes = get_permutation_config(2) + input_perm_axes, output_perm_axes = get_permutation_config(3) convert_keras_pooling_core(scope, operator, container, is_global=False, n_dims=3, op_type='Max', input_perm_axes=input_perm_axes, output_perm_axes=output_perm_axes) def convert_keras_average_pooling_1d(scope, operator, container): input_perm_axes, output_perm_axes = get_permutation_config(1) - convert_keras_pooling_core(scope, operator, container, is_global=True, n_dims=1, op_type='Avg', + convert_keras_pooling_core(scope, operator, container, is_global=False, n_dims=1, op_type='Avg', input_perm_axes=input_perm_axes, output_perm_axes=output_perm_axes) diff --git a/keras2onnx/ke2onnx/upsample.py b/keras2onnx/ke2onnx/upsample.py index 78592171..0d365a6c 100644 --- a/keras2onnx/ke2onnx/upsample.py +++ b/keras2onnx/ke2onnx/upsample.py @@ -10,22 +10,13 @@ def convert_keras_upsample(scope, operator, container, n_dims): op = operator.raw_operator + # op.size type is tuple, even if we set a int in keras.layers API if n_dims == 1: - scales = [1, int(op.size), 1] - elif n_dims == 2: + scales = [1] + list(d for d in op.size) + elif n_dims == 2 or n_dims == 3: # Always create the list of sampling factors in channels_first format because the input will be converted into # channels_first if it's in channels_last - if isinstance(op.size, collections.Iterable): - scales = [1, 1] + list(d for d in op.size) - else: - scales = [1, 1, int(op.size), int(op.size)] - elif n_dims == 3: - # Always create the list of sampling factors in channels_first format because the input will be converted into - # channels_first if it's in channels_last - if isinstance(op.size, collections.Iterable): - scales = [1, 1] + list(int(d) for d in op.size) - else: - scales = [1, 1] + [int(op.size)] * 3 + scales = [1, 1] + list(d for d in op.size) else: raise ValueError('Unsupported dimension %s when converting Keras Upsampling layer' % n_dims) @@ -33,10 +24,11 @@ def convert_keras_upsample(scope, operator, container, n_dims): # to manipulate the input and output of ONNX Upsample. input_perm_axes, output_perm_axes = get_permutation_config(n_dims) channels_first = n_dims > 1 and op.data_format == 'channels_first' + no_permutation_required = channels_first or n_dims < 2 # Before creating the main Upsample operator, we need to permute the input tensor if the original operator is # working under channels_last mode. - if channels_first: + if no_permutation_required: # No permutation is required. Use input as it is. input_tensor_name = operator.inputs[0].full_name else: @@ -44,9 +36,9 @@ def convert_keras_upsample(scope, operator, container, n_dims): input_tensor_name = scope.get_unique_variable_name(operator.inputs[0].full_name + '_permuted') apply_transpose(scope, operator.inputs[0].full_name, input_tensor_name, container, perm=input_perm_axes) - # If channels_first is True, we don't need to permute the output of ONNX Upsample. Otherwise, similar to Crop's + # If no_permutation_required is True, we don't need to permute the output of ONNX Upsample. Otherwise, similar to Crop's # conversion, a Transpose would be added. - if channels_first: + if no_permutation_required: apply_upsample(scope, input_tensor_name, operator.outputs[0].full_name, container, scales=scales) else: upsampled_tensor_name = scope.get_unique_variable_name(input_tensor_name + '_upsampled') diff --git a/tests/test_layers.py b/tests/test_layers.py index 2bfaffbe..45df6324 100644 --- a/tests/test_layers.py +++ b/tests/test_layers.py @@ -326,10 +326,14 @@ def test_repeat_vector(self): expected = model.predict(data) self.assertTrue(self.run_onnx_runtime('repeat_vector', onnx_model, data, expected)) - def _pooling_test_helper(self, layer, ishape): + def _pooling_test_helper(self, layer, ishape, data_format='channels_last'): model = keras.Sequential() - nlayer = layer(input_shape=ishape) if \ - (layer.__name__.startswith("Global")) else layer(2, input_shape=ishape) + if sys.version_info >= (3, 6): + nlayer = layer(data_format=data_format, input_shape=ishape) if \ + (layer.__name__.startswith("Global")) else layer(2, data_format=data_format, input_shape=ishape) + else: + nlayer = layer(input_shape=ishape) if \ + (layer.__name__.startswith("Global")) else layer(2, input_shape=ishape) model.add(nlayer) onnx_model = keras2onnx.convert_keras(model, model.name) @@ -339,14 +343,16 @@ def _pooling_test_helper(self, layer, ishape): expected = model.predict(data) self.assertTrue(self.run_onnx_runtime(onnx_model.graph.name, onnx_model, data, expected)) - @unittest.skip("ONNXRuntime doesn't support 3D average pooling yet.") - def test_pooling_avg3d(self): - self._pooling_test_helper(keras.layers.AveragePooling3D, (4, 4, 4, 3)) - - def test_pooling_max1d(self): + def test_pooling_1d(self): + self._pooling_test_helper(keras.layers.AveragePooling1D, (4, 6)) self._pooling_test_helper(keras.layers.MaxPool1D, (4, 6)) + if sys.version_info >= (3, 6): + self._pooling_test_helper(keras.layers.AveragePooling1D, (4, 6), 'channels_first') + self._pooling_test_helper(keras.layers.MaxPool1D, (4, 6), 'channels_first') + + def test_pooling_2d(self): + self._pooling_test_helper(keras.layers.AveragePooling2D, (4, 4, 3)) - def test_pooling_max2d(self): N, C, H, W = 2, 3, 5, 5 x = np.random.rand(N, H, W, C).astype(np.float32, copy=False) @@ -365,6 +371,10 @@ def test_pooling_max2d(self): expected = model.predict(x) self.assertTrue(self.run_onnx_runtime('max_pooling_2d', onnx_model, x, expected)) + def test_pooling_3d(self): + self._pooling_test_helper(keras.layers.AveragePooling3D, (4, 4, 4, 3)) + self._pooling_test_helper(keras.layers.MaxPool3D, (4, 4, 4, 3)) + def test_pooling_global(self): self._pooling_test_helper(keras.layers.GlobalAveragePooling2D, (4, 6, 2)) @@ -452,7 +462,6 @@ def test_Softmax(self): self.activationlayer_helper(layer, data) def _misc_conv_helper(self, layer, ishape): - ishape = (20, 20, 1) input = keras.Input(ishape) out = layer(input) model = keras.models.Model(input, out) @@ -469,8 +478,16 @@ def test_crop(self): self._misc_conv_helper(layer, ishape) def test_upsample(self): + if sys.version_info >= (3, 6): + ishape = (20,) + layer = keras.layers.UpSampling1D(size=2) + self._misc_conv_helper(layer, ishape) ishape = (20, 20, 1) - layer = keras.layers.UpSampling2D(size=(2, 3), data_format='channels_last') + for size in [2, (2, 3)]: + layer = keras.layers.UpSampling2D(size=size, data_format='channels_last') + self._misc_conv_helper(layer, ishape) + ishape = (20, 20, 20, 1) + layer = keras.layers.UpSampling3D(size=(2, 3, 4), data_format='channels_last') self._misc_conv_helper(layer, ishape) def test_padding(self):