Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions python/tvm/relax/frontend/onnx/onnx_frontend.py
Original file line number Diff line number Diff line change
Expand Up @@ -911,8 +911,7 @@ class Size(OnnxOpConverter):

@classmethod
def _impl_v1(cls, bb, inputs, attr, params):
# TODO(tvm-team): add native support for size op
return relax.op.prod(relax.op.shape_to_tensor(relax.op.shape_of(inputs[0])))
return relax.op.size(inputs[0])


class EyeLike(OnnxOpConverter):
Expand Down
1 change: 1 addition & 0 deletions python/tvm/relax/op/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
register_gradient,
shape_of,
shape_to_tensor,
size,
tensor_to_shape,
to_vdevice,
)
Expand Down
28 changes: 23 additions & 5 deletions python/tvm/relax/op/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -634,6 +634,22 @@ def shape_of(expr: Expr) -> Expr:
return _ffi_api.shape_of(expr) # type: ignore # pylint: disable=no-member


def size(expr: Expr) -> Expr:
"""Get the total number of elements in a tensor.

Parameters
----------
expr : Expr
The input tensor.

Returns
-------
result : Expr
A scalar tensor of dtype int64 containing the total number of elements.
"""
return _ffi_api.size(expr) # type: ignore # pylint: disable=no-member


def tensor_to_shape(expr: Expr) -> Expr:
"""Convert tensor to shape expr.
Parameters
Expand Down Expand Up @@ -777,11 +793,13 @@ def call_pure_packed(
sinfo_args = [sinfo_args]

sinfo_args = [
sinfo()
if callable(sinfo)
else sinfo.asobject()
if isinstance(sinfo, ObjectConvertible)
else sinfo
(
sinfo()
if callable(sinfo)
else sinfo.asobject()
if isinstance(sinfo, ObjectConvertible)
else sinfo
)
for sinfo in sinfo_args
]

Expand Down
6 changes: 6 additions & 0 deletions python/tvm/relax/transform/legalize_ops/inspect_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@

from ...block_builder import BlockBuilder
from ...expr import Call, Expr
from ... import op
from .common import register_legalize


Expand Down Expand Up @@ -126,3 +127,8 @@ def _get_tensor_elem_offset(dlpack_handle: T.handle) -> T.int64:

gvar = bb.add_func(_get_tensor_elem_offset, "_get_tensor_elem_offset")
return Call(gvar, call.args)


@register_legalize("relax.size")
def _size(_bb: BlockBuilder, call: Call) -> Expr:
return op.prod(op.shape_to_tensor(op.shape_of(call.args[0])))
2 changes: 2 additions & 0 deletions python/tvm/script/ir_builder/relax/ir.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,7 @@
sign,
sin,
sinh,
size,
slice_scatter,
sort,
split,
Expand Down Expand Up @@ -938,6 +939,7 @@ def dtype(value: Union[py_str, DataType]) -> Expr:
"shape",
"shape_of",
"ShapeExpr",
"size",
"std",
"str",
"sum",
Expand Down
26 changes: 26 additions & 0 deletions src/relax/op/op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1125,6 +1125,32 @@ TVM_FFI_STATIC_INIT_BLOCK() {
refl::GlobalDef().def("relax.op.shape_of", MakeShapeOf);
}

// size

StructInfo InferStructInfoSize(const Call& call, const BlockBuilder& ctx) {
auto arg_sinfo = GetStructInfo(call->args[0]);
auto* tensor_sinfo = GetStructInfo(call->args[0]).as<TensorStructInfoNode>();
CHECK(tensor_sinfo) << "size expects a tensor input, but received " << arg_sinfo
<< "; use MatchCast if necessary";
return TensorStructInfo(ShapeExpr(ffi::Array<PrimExpr>{}), DataType::Int(64));
}

TVM_REGISTER_OP("relax.size")
.set_num_inputs(1)
.add_argument("input", "Expr", "The input tensor")
.set_attr<FInferStructInfo>("FInferStructInfo", InferStructInfoSize)
.set_attr<Bool>("FPurity", Bool(true));

Expr MakeSize(Expr expr) {
static const Op& op = Op::Get("relax.size");
return Call(op, {expr}, {}, {});
}

TVM_FFI_STATIC_INIT_BLOCK() {
namespace refl = tvm::ffi::reflection;
refl::GlobalDef().def("relax.op.size", MakeSize);
}

// tensor_to_shape

StructInfo ReturnTensorToShapeStructInfo(const Call& call, const BlockBuilder& ctx) {
Expand Down
63 changes: 63 additions & 0 deletions tests/python/relax/test_op_size.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.

import numpy as np

import tvm
import tvm.testing
from tvm import relax
from tvm.script import relax as R


def test_op_size():
@tvm.script.ir_module
class Module:
@R.function
def main(x: R.Tensor((2, 3), "float32")) -> R.Tensor((), "int64"):
return R.size(x)

x_np = np.random.rand(2, 3).astype("float32")
x = tvm.runtime.tensor(x_np)

target = tvm.target.Target("llvm")
ex = relax.build(Module, target)
vm = relax.VirtualMachine(ex, tvm.cpu())

res = vm["main"](x)
assert res.numpy() == 6


def test_op_size_dynamic():
@tvm.script.ir_module
class Module:
@R.function
def main(x: R.Tensor(("m", "n"), "float32")) -> R.Tensor((), "int64"):
return R.size(x)

x_np = np.random.rand(4, 5).astype("float32")
x = tvm.runtime.tensor(x_np)

target = tvm.target.Target("llvm")
ex = relax.build(Module, target)
vm = relax.VirtualMachine(ex, tvm.cpu())

res = vm["main"](x)
assert res.numpy() == 20


if __name__ == "__main__":
tvm.testing.main()
Comment on lines +1 to +63
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The current tests cover static and dynamic shapes, which is great. To make the test suite more robust, consider adding tests for a few edge cases:

  1. Scalar input (0-dimensional tensor): The size should be 1.
  2. Tensor with a zero-sized dimension: The size should be 0.

Here are some examples of how you could write these tests:

def test_op_size_scalar():
    @tvm.script.ir_module
    class Module:
        @R.function
        def main(x: R.Tensor((), "float32")) -> R.Tensor((), "int64"):
            return R.size(x)

    x_np = np.array(1.0, dtype="float32")
    x = tvm.runtime.tensor(x_np)

    target = tvm.target.Target("llvm")
    ex = relax.build(Module, target)
    vm = relax.VirtualMachine(ex, tvm.cpu())

    res = vm["main"](x)
    assert res.numpy() == 1

def test_op_size_zero_dim():
    @tvm.script.ir_module
    class Module:
        @R.function
        def main(x: R.Tensor((2, 0, 3), "float32")) -> R.Tensor((), "int64"):
            return R.size(x)

    x_np = np.empty((2, 0, 3), dtype="float32")
    x = tvm.runtime.tensor(x_np)

    target = tvm.target.Target("llvm")
    ex = relax.build(Module, target)
    vm = relax.VirtualMachine(ex, tvm.cpu())

    res = vm["main"](x)
    assert res.numpy() == 0

Loading