Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 31 additions & 2 deletions python/tvm/relax/frontend/onnx/onnx_frontend.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def get_constant(
# Convert if possible
if isinstance(var, relax.Var) and var.name_hint in params:
# When converting a parameter to a constant, update references to it as well.
_, value = params.pop(var.name_hint)
_, value = params[var.name_hint]
const_value = relax.const(value)
graph_nodes[var.name_hint] = const_value
return const_value
Expand Down Expand Up @@ -1074,6 +1074,35 @@ def _impl_v13(cls, bb, inputs, attr, params):
class Pad(OnnxOpConverter):
"""Converts an onnx Pad node into an equivalent Relax expression."""

@classmethod
def _impl_v2(cls, bb, inputs, attr, params):
pads = attr.get("pads")
pads = relax.const(_np.array(pads), inputs[0].struct_info.shape[0].dtype)
constant_value = attr.get("value")
if constant_value is None:
constant_value = 0.0

if isinstance(pads, relax.Constant):
pad_before, pad_after = _np.split(pads.data.numpy(), 2)
pad_before = _np.ndarray.tolist(pad_before)
pad_after = _np.ndarray.tolist(pad_after)
else:
raise ValueError("Dynamic pads are not supported yet.")

pad_mode = attr.get("mode", b"constant").decode("utf-8")
if not pad_mode in ["constant", "edge", "reflect"]:
raise tvm.error.OpAttributeInvalid(
"Value " + pad_mode + ' in attribute "mode" is invalid for operator Pad.'
)

if pad_mode == "constant":
return bb.emit_te(topi.nn.pad, inputs[0], pad_before, pad_after, constant_value)
elif pad_mode == "reflect":
return bb.emit_te(topi.nn.mirror_pad, inputs[0], pad_before, pad_after, "REFLECT")
else:
# TODO(gigiblender) Support edge mode.
raise NotImplementedError("Pad mode {} not implemented".format(pad_mode))

@classmethod
def _impl_v11(cls, bb, inputs, attr, params):
pads = get_constant(inputs[1], params)
Expand Down Expand Up @@ -2152,7 +2181,7 @@ def _parse_graph_initializers(self, graph: onnx.onnx_ml_pb2.GraphProto):
init_var = self._new_var(var_name, shape=array.shape, dtype=array.dtype)
self._nodes[init_tensor.name] = init_var
# We need to keep track of both the real value and variable for this variable.
self._params[init_tensor.name] = (init_var, array)
self._params[var_name] = (init_var, array)
# Otherwise we can use the weight as a constant.
else:
self._nodes[init_tensor.name] = relax.const(array)
Expand Down
57 changes: 57 additions & 0 deletions tests/python/relax/test_frontend_onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -1399,6 +1399,63 @@ def verify_pad(input_shape, pads, mode="constant", value=0.0):
verify_pad((1, 3, 4, 5), [0, 1, 1, 1, 0, 0, 1, 1], "reflect")


@pytest.mark.parametrize("dynamic", [True, False])
def test_pad_v2(dynamic):

if dynamic:
pytest.skip("Dynamic pad not supported")

def verify_pad(input_shape, pads, mode="constant", value=0.0):
indata = np.random.normal(size=input_shape).astype(np.float32)
# numpy expect result
len_dim = len(pads) // 2
np_pads = [(pads[i], pads[i + len_dim]) for i in range(len_dim)]
pads = np.array(pads)
# onnx graph
if mode in ["edge", "reflect"]:
outdata = np.pad(indata, pad_width=np_pads, mode=mode)
node = helper.make_node(
"Pad", inputs=["input"], outputs=["output"], mode=mode, pads=pads
)
graph = helper.make_graph(
[node],
"pad_test",
inputs=[
helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape))
],
outputs=[
helper.make_tensor_value_info("output", TensorProto.FLOAT, list(outdata.shape))
],
)
else:
outdata = np.pad(indata, pad_width=np_pads, mode="constant", constant_values=value)
node = helper.make_node(
"Pad",
inputs=["input"],
outputs=["output"],
mode="constant",
pads=pads,
value=value,
)
graph = helper.make_graph(
[node],
"pad_test",
inputs=[
helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape))
],
outputs=[
helper.make_tensor_value_info("output", TensorProto.FLOAT, list(outdata.shape))
],
)
model = helper.make_model(graph, producer_name="pad_test")
check_correctness(model=model, opset=10)

verify_pad((2, 2), [0, 1, 0, 0], "constant", 0.0)
verify_pad((2, 3), [1, 0, 0, 1], "constant", 0.0)
verify_pad((3, 2), [0, 0, 1, 0], "constant", 5.0)
verify_pad((1, 3, 4, 5), [0, 1, 1, 1, 0, 0, 1, 1], "reflect")


@pytest.mark.parametrize("fp_arith", [np.float16, np.float32])
@pytest.mark.parametrize("dynamic", [True, False])
def test_split(fp_arith, dynamic):
Expand Down