Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 27 additions & 11 deletions python/tvm/relax/frontend/onnx/onnx_frontend.py
Original file line number Diff line number Diff line change
Expand Up @@ -2779,9 +2779,7 @@ def _impl_v18(cls, bb, inputs, attr, params):
else:
roi_static = roi_np
else:
roi_dynamic_vec = bb.normalize(
_onnx_resize_spatial_roi_vector(roi, ndims)
)
roi_dynamic_vec = bb.normalize(_onnx_resize_spatial_roi_vector(roi, ndims))
else:
roi_static = [0.0] * (2 * (ndims - 2))

Expand Down Expand Up @@ -3757,6 +3755,30 @@ def _impl_v18(cls, bb, inputs, attr, params):
return relax.op.sqrt(relax.op.sum(relax.op.multiply(data, data), axes, keepdims))


def _argreduce_select_last_index(bb, data, axis, keepdims, op):
"""Helper for ArgMax/ArgMin with select_last_index=1.

Reverses the tensor along the reduction axis, runs the reduction op,
then remaps the index back: last_idx = (axis_size - 1) - flipped_idx.
Handles both static and dynamic axis sizes.
"""
data_flipped = relax.op.flip(data, axis=axis)
flipped_idx = bb.normalize(op(data_flipped, axis, keepdims))
axis_size = data.struct_info.shape[axis]
if isinstance(axis_size, tirx.IntImm):
offset = relax.const(int(axis_size) - 1, "int64")
else:
# dynamic: get axis size at runtime and subtract 1
shape_tensor = bb.normalize(relax.op.shape_to_tensor(
bb.normalize(relax.op.shape_of(data))
))
offset = bb.normalize(relax.op.subtract(
bb.normalize(relax.op.take(shape_tensor, relax.const(axis, "int64"), axis=0)),
relax.const(1, "int64"),
))
return relax.op.subtract(offset, flipped_idx)


class ArgMax(OnnxOpConverter):
"""Converts an onnx ArgMax node into an equivalent Relax expression."""

Expand Down Expand Up @@ -3788,10 +3810,7 @@ def _impl_v12(cls, bb, inputs, attr, params):
axis, keepdims = cls._check_attrs(data, attr)
select_last_index = attr.get("select_last_index", False)
if select_last_index:
# TODO(vvchernov): support attr
raise tvm.error.OpAttributeUnImplemented(
"'select_last_index' attribute has not been supported yet"
)
return _argreduce_select_last_index(bb, data, axis, keepdims, relax.op.argmax)
return relax.op.argmax(data, axis, keepdims)


Expand Down Expand Up @@ -3826,10 +3845,7 @@ def _impl_v12(cls, bb, inputs, attr, params):
axis, keepdims = cls._check_attrs(data, attr)
select_last_index = attr.get("select_last_index", False)
if select_last_index:
# TODO(vvchernov): support attr
raise tvm.error.OpAttributeUnImplemented(
"'select_last_index' attribute has not been supported yet"
)
return _argreduce_select_last_index(bb, data, axis, keepdims, relax.op.argmin)
return relax.op.argmin(data, axis, keepdims)
Comment on lines 3847 to 3849
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

This implementation for select_last_index is nearly identical to the one for ArgMax. To improve maintainability and avoid code duplication, consider refactoring this logic into a shared helper function. This would also allow you to apply the fix for the dynamic shape issue (mentioned in the ArgMax comment) in a single place.

A possible refactoring could look like this:

def _get_last_index(bb, data, axis, keepdims, op):
    data_flipped = relax.op.flip(data, axis=axis)
    flipped_idx = bb.normalize(op(data_flipped, axis, keepdims))
    offset = bb.normalize(
        relax.op.full(
            flipped_idx.struct_info.shape,
            relax.PrimValue(relax.op.shape_of(data)[axis] - 1),
            dtype="int64",
        )
    )
    return relax.op.subtract(offset, flipped_idx)

...
# in ArgMax._impl_v12
if select_last_index:
    return _get_last_index(bb, data, axis, keepdims, relax.op.argmax)
...

# in ArgMin._impl_v12
if select_last_index:
    return _get_last_index(bb, data, axis, keepdims, relax.op.argmin)
...



Expand Down
98 changes: 98 additions & 0 deletions tests/python/relax/test_frontend_onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -5360,5 +5360,103 @@ def test_max_roi_pool(pooled_shape, rois):
check_correctness(model, inputs=inputs, opset=16, rtol=1e-5, atol=1e-5)


@pytest.mark.parametrize("op_name", ["ArgMax", "ArgMin"])
@pytest.mark.parametrize("axis", [0, 1, 2])
@pytest.mark.parametrize("keepdims", [True, False])
def test_arg_min_max_select_last_index(op_name, axis, keepdims):
"""select_last_index=1 must return the LAST occurrence of the extreme value."""
shape = [3, 4, 5]

# Force a tie: place the extreme value at both index 0 and index (axis_size-1)
# so that select_last_index=0 and =1 give observably different results.
data = np.random.uniform(-10, 10, shape).astype(np.float32)
slices_first = [slice(None)] * len(shape)
slices_last = [slice(None)] * len(shape)
slices_first[axis] = 0
slices_last[axis] = shape[axis] - 1
extreme = data.max() + 1.0 if op_name == "ArgMax" else data.min() - 1.0
data[tuple(slices_first)] = extreme
data[tuple(slices_last)] = extreme

node = helper.make_node(
op_name,
inputs=["data"],
outputs=["out"],
axis=axis,
keepdims=int(keepdims),
select_last_index=1,
)

out_shape = list(shape)
if keepdims:
out_shape[axis] = 1
else:
out_shape.pop(axis)

graph = helper.make_graph(
[node],
"arg_select_last_index_test",
inputs=[helper.make_tensor_value_info("data", TensorProto.FLOAT, shape)],
outputs=[helper.make_tensor_value_info("out", TensorProto.INT64, out_shape)],
)
model = helper.make_model(graph, producer_name="arg_select_last_index_test")
check_correctness(model, inputs={"data": data}, opset=12)


@pytest.mark.parametrize("op_name", ["ArgMax", "ArgMin"])
def test_arg_min_max_select_last_index_no_tie(op_name):
"""With all-unique values, select_last_index=1 must agree with select_last_index=0."""
shape = [4, 5]
# arange guarantees uniqueness so first == last for every row
data = np.arange(20, dtype=np.float32).reshape(shape)

for select_last in [0, 1]:
node = helper.make_node(
op_name,
inputs=["data"],
outputs=["out"],
axis=1,
keepdims=1,
select_last_index=select_last,
)
graph = helper.make_graph(
[node],
"arg_no_tie_test",
inputs=[helper.make_tensor_value_info("data", TensorProto.FLOAT, shape)],
outputs=[helper.make_tensor_value_info("out", TensorProto.INT64, [4, 1])],
)
model = helper.make_model(graph, producer_name="arg_no_tie_test")
check_correctness(model, inputs={"data": data}, opset=12)


@pytest.mark.parametrize("op_name", ["ArgMax", "ArgMin"])
def test_arg_min_max_select_last_index_ir(op_name):
"""select_last_index=1 must lower to flip + argmax/argmin + subtract in the Relax IR."""
shape = [3, 4, 5]
relax_op = "relax.argmax" if op_name == "ArgMax" else "relax.argmin"

node = helper.make_node(
op_name,
inputs=["data"],
outputs=["out"],
axis=1,
keepdims=1,
select_last_index=1,
)
graph = helper.make_graph(
[node],
"arg_select_last_index_ir_test",
inputs=[helper.make_tensor_value_info("data", TensorProto.FLOAT, shape)],
outputs=[helper.make_tensor_value_info("out", TensorProto.INT64, [3, 1, 5])],
)
model = helper.make_model(graph, producer_name="arg_select_last_index_ir_test")
tvm_model = from_onnx(model, opset=12, keep_params_in_input=True)

call_ops = collect_relax_call_ops(tvm_model["main"])
assert relax_op in call_ops, f"Expected {relax_op} in IR, got {call_ops}"
assert "relax.flip" in call_ops, f"Expected relax.flip in IR, got {call_ops}"
assert "relax.subtract" in call_ops, f"Expected relax.subtract in IR, got {call_ops}"


if __name__ == "__main__":
tvm.testing.main()
Loading