Hello.
I'm trying to transform the graph before passing the graph as inputs of the my backend's serialization process. Below is very similar with my circumstances.
|
AddmmToLinearTransform(), |
The problem is that there is output spec change of the graph after running AddmmToLinearTransform pass. In the serialization, I refer to the edge_program.graph_signature but it failed because of the mismatch between the graph module and the exported program input/output signature.
class SimpleLinear(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 3)
def forward(self, arg):
return (self.linear(arg),)
def get_example_inputs(self):
return (torch.randn(3, 3),)
### Before the transformation
class GraphModule(torch.nn.Module):
def forward(self, p_linear_weight: "f32[3, 3]", p_linear_bias: "f32[3]", arg: "f32[3, 3]"):
# File: /home/seongwoo/-exir/test/modules/single/op/linear.py:10 in forward, code: return (self.linear(arg),)
aten_permute_copy_default: "f32[3, 3]" = executorch_exir_dialects_edge__ops_aten_permute_copy_default(p_linear_weight, [1, 0]); p_linear_weight = None
aten_addmm_default: "f32[3, 3]" = executorch_exir_dialects_edge__ops_aten_addmm_default(p_linear_bias, arg, aten_permute_copy_default); p_linear_bias = arg = aten_permute_copy_default = None
return (aten_addmm_default,)
Graph signature: ExportGraphSignature(input_specs=[InputSpec(kind=<InputKind.PARAMETER: 2>, arg=TensorArgument(name='p_linear_weight'), target='linear.weight', persistent=None), InputSpec(kind=<InputKind.PARAMETER: 2>, arg=TensorArgument(name='p_linear_bias'), target='linear.bias', persistent=None), InputSpec(kind=<InputKind.USER_INPUT: 1>, arg=TensorArgument(name='arg'), target=None, persistent=None)], output_specs=[OutputSpec(kind=<OutputKind.USER_OUTPUT: 1>, arg=TensorArgument(name='aten_addmm_default'), target=None)])
Range constraints: {}
### After the transformation
class GraphModule(torch.nn.Module):
def forward(self, p_linear_weight: "f32[3, 3]", p_linear_bias: "f32[3]", arg: "f32[3, 3]"):
# File: /home/seongwoo/-exir/test/modules/single/op/linear.py:10 in forward, code: return (self.linear(arg),)
aten_linear_default: "f32[3, 3]" = executorch_exir_dialects_edge__ops_aten_linear_default(arg, p_linear_weight, p_linear_bias); arg = p_linear_weight = p_linear_bias = None
return [aten_linear_default]
Graph signature: ExportGraphSignature(input_specs=[InputSpec(kind=<InputKind.PARAMETER: 2>, arg=TensorArgument(name='p_linear_weight'), target='linear.weight', persistent=None), InputSpec(kind=<InputKind.PARAMETER: 2>, arg=TensorArgument(name='p_linear_bias'), target='linear.bias', persistent=None), InputSpec(kind=<InputKind.USER_INPUT: 1>, arg=TensorArgument(name='arg'), target=None, persistent=None)], output_specs=[OutputSpec(kind=<OutputKind.USER_OUTPUT: 1>, arg=TensorArgument(name='aten_addmm_default'), target=None)])
Range constraints: {}
###############
# Not changed despite of the transoformation
# arg=TensorArgument(name='aten_addmm_default'), target=None)]
Should I just update the exported program like below or is there any related API?
for node in edge_program.graph.nodes:
if node.op != "output":
continue
edge_program.graph_signature.output_specs = [
OutputSpec(
kind=OutputKind.USER_OUTPUT,
arg=TensorArgument(name=arg.name),
target=None,
)
for arg in node.args[0]
]
Hello.
I'm trying to transform the graph before passing the graph as inputs of the my backend's serialization process. Below is very similar with my circumstances.
executorch/backends/vulkan/vulkan_preprocess.py
Line 47 in 51f6455
The problem is that there is output spec change of the graph after running
AddmmToLinearTransformpass. In the serialization, I refer to theedge_program.graph_signaturebut it failed because of the mismatch between the graph module and the exported program input/output signature.Should I just update the exported program like below or is there any related API?