diff --git a/python/tvm/relay/frontend/pytorch.py b/python/tvm/relay/frontend/pytorch.py index 683b94dd9290..9ddd04b5b4ee 100644 --- a/python/tvm/relay/frontend/pytorch.py +++ b/python/tvm/relay/frontend/pytorch.py @@ -4424,7 +4424,7 @@ def _create_typed_const(data, dtype): dtype should be a TVM dtype""" if dtype == "float64": - typed_data = _expr.const(np.float64(data), dtype=dtype) + typed_data = _expr.const(np.asarray(data, dtype="float64"), dtype=dtype) elif dtype == "float32": typed_data = _expr.const(np.float32(data), dtype=dtype) elif dtype == "float16": diff --git a/tests/python/frontend/pytorch/test_forward.py b/tests/python/frontend/pytorch/test_forward.py index 8c1cdbb0cf0b..9ee03512e7ae 100644 --- a/tests/python/frontend/pytorch/test_forward.py +++ b/tests/python/frontend/pytorch/test_forward.py @@ -3449,6 +3449,20 @@ def test_forward_adaptive_max_pool1d(): verify_model(m.float().eval(), input_data=input_data) +@tvm.testing.uses_gpu +def test_forward_instance_norm(): + """test_forward_instance_norm""" + + class instance_norm(Module): + def forward(self, *args): + return torch.nn.functional.instance_norm(args[0], use_input_stats=True) + + m = instance_norm().float().eval() + input_data = torch.randn([1, 1, 1, 2], dtype=torch.float64) + + verify_model(m.float().eval(), input_data=input_data) + + @tvm.testing.uses_gpu def test_forward_full_like(): """test_forward_full_like"""