3131from tvm .relay .backend .contrib .ethosu import legalize , preprocess
3232from tvm .relay import dataflow_pattern
3333from tvm .relay .op .contrib import ethosu
34- from tvm .relay .backend .contrib .ethosu import util
34+ from tvm .relay .backend .contrib .ethosu import util , codegen
3535from tvm .relay .build_module import bind_params_by_name
3636from tvm .relay .frontend .tflite import get_pad_value
3737from tvm .relay .expr_functor import ExprVisitor
@@ -44,6 +44,8 @@ def partition_ethosu_by_table(mod, pattern_table):
4444 want to add the operator's pattern to the pattern table so that the compiler
4545 wouldn't attempt to offload an operator without full stack support."""
4646 mod = relay .transform .InferType ()(mod )
47+ mod = mod = codegen .replicate_pads (mod )
48+ mod = relay .transform .InferType ()(mod )
4749 mod = relay .transform .MergeComposite (pattern_table )(mod )
4850 mod = relay .transform .AnnotateTarget ("ethos-u" )(mod )
4951 mod = relay .transform .MergeCompilerRegions ()(mod )
@@ -3676,5 +3678,133 @@ def _visit(stmt):
36763678 verify (mod ["tvmgen_default_ethos_u_main_0" ])
36773679
36783680
3681+ @pytest .mark .parametrize ("ifm_shape" , [(1 , 55 , 55 , 3 )])
3682+ @pytest .mark .parametrize ("kernel_shape" , [(3 , 3 )])
3683+ @pytest .mark .parametrize ("strides, dilation" , [((1 , 1 ), (1 , 1 ))])
3684+ @pytest .mark .parametrize ("op_padding" , ["SAME" , "VALID" ])
3685+ @pytest .mark .parametrize ("sep_padding" , [(0 , 0 , 1 , 1 ), (7 , 5 , 4 , 5 )])
3686+ @pytest .mark .parametrize (
3687+ "op_pairs" , [("conv2d" , "conv2d" ), ("depthwise" , "depthwise" ), ("conv2d" , "depthwise" )]
3688+ )
3689+ def test_tflite_shared_pad_legalize (
3690+ ifm_shape ,
3691+ kernel_shape ,
3692+ strides ,
3693+ dilation ,
3694+ op_padding ,
3695+ sep_padding ,
3696+ op_pairs ,
3697+ ):
3698+ dtype = "int8"
3699+
3700+ def create_tflite_graph ():
3701+ class Model (tf .Module ):
3702+ @tf .function
3703+ def tf_function (self , x ):
3704+ def make_depthwise_or_conv2d (pair_idx ):
3705+ if op_pairs [pair_idx ] == "depthwise" :
3706+ weight_shape = [kernel_shape [0 ], kernel_shape [1 ], ifm_shape [3 ], 1 ]
3707+ weight = tf .constant (np .random .uniform (size = weight_shape ), dtype = tf .float32 )
3708+ return tf .nn .depthwise_conv2d (
3709+ x , weight , strides = tf_strides , padding = op_padding , dilations = dilation
3710+ )
3711+ weight_shape = [kernel_shape [0 ], kernel_shape [1 ], ifm_shape [3 ], 3 ]
3712+ weight = tf .constant (np .random .uniform (size = weight_shape ), dtype = tf .float32 )
3713+ return tf .nn .conv2d (
3714+ x ,
3715+ weight ,
3716+ strides = tf_strides ,
3717+ padding = op_padding ,
3718+ dilations = dilation ,
3719+ )
3720+
3721+ x = tf .pad (
3722+ x ,
3723+ [
3724+ [0 , 0 ],
3725+ [sep_padding [0 ], sep_padding [2 ]],
3726+ [sep_padding [1 ], sep_padding [3 ]],
3727+ [0 , 0 ],
3728+ ],
3729+ "CONSTANT" ,
3730+ )
3731+
3732+ # The input strides to the TensorFlow API needs to be of shape 1x4
3733+ tf_strides = [1 , strides [0 ], strides [1 ], 1 ]
3734+
3735+ x1 = make_depthwise_or_conv2d (0 )
3736+ x2 = make_depthwise_or_conv2d (1 )
3737+
3738+ x3 = tf .math .add (x1 , x2 )
3739+ return x3
3740+
3741+ model = Model ()
3742+ concrete_func = model .tf_function .get_concrete_function (
3743+ tf .TensorSpec (ifm_shape , dtype = tf .float32 )
3744+ )
3745+ # Convert the model
3746+ def representative_dataset ():
3747+ for _ in range (100 ):
3748+ data = np .random .rand (* tuple (ifm_shape ))
3749+ yield [data .astype (np .float32 )]
3750+
3751+ converter = tf .lite .TFLiteConverter .from_concrete_functions ([concrete_func ])
3752+ converter .optimizations = [tf .lite .Optimize .DEFAULT ]
3753+ converter .representative_dataset = representative_dataset
3754+ converter .target_spec .supported_ops = [tf .lite .OpsSet .TFLITE_BUILTINS_INT8 ]
3755+ converter .inference_input_type = tf .int8
3756+ converter .inference_output_type = tf .int8
3757+ tflite_model = converter .convert ()
3758+ return tflite_model
3759+
3760+ conv2d_pattern_table = [
3761+ (
3762+ ethosu .QnnConv2DParams .composite_name ,
3763+ ethosu .qnn_conv2d_pattern (),
3764+ lambda pat : ethosu .QnnConv2DParams (pat ).is_valid (),
3765+ ),
3766+ (
3767+ ethosu .QnnDepthwiseConv2DParams .composite_name ,
3768+ ethosu .qnn_depthwise_conv2d_pattern (),
3769+ lambda pat : ethosu .QnnDepthwiseConv2DParams (pat ).is_valid (),
3770+ ),
3771+ ]
3772+
3773+ tflite_graph = create_tflite_graph ()
3774+ tflite_model = tflite .Model .Model .GetRootAsModel (tflite_graph , 0 )
3775+
3776+ mod , params = relay .frontend .from_tflite (
3777+ tflite_model ,
3778+ shape_dict = {"input" : ifm_shape },
3779+ dtype_dict = {"input" : dtype },
3780+ )
3781+
3782+ mod ["main" ] = bind_params_by_name (mod ["main" ], params )
3783+ mod = partition_ethosu_by_table (mod , conv2d_pattern_table )
3784+
3785+ mod ["tvmgen_default_ethos_u_main_0" ] = dataflow_pattern .rewrite (
3786+ [legalize .Conv2DRewriter (), legalize .DepthwiseConv2DRewriter ()],
3787+ mod ["tvmgen_default_ethos_u_main_0" ],
3788+ )
3789+ mod ["tvmgen_default_ethos_u_main_1" ] = dataflow_pattern .rewrite (
3790+ [legalize .Conv2DRewriter (), legalize .DepthwiseConv2DRewriter ()],
3791+ mod ["tvmgen_default_ethos_u_main_1" ],
3792+ )
3793+
3794+ if op_pairs [0 ] == "depthwise" :
3795+ assert (
3796+ mod ["tvmgen_default_ethos_u_main_0" ].body .op .name == "contrib.ethosu.depthwise_conv2d"
3797+ )
3798+ else :
3799+ assert mod ["tvmgen_default_ethos_u_main_0" ].body .op .name == "contrib.ethosu.conv2d"
3800+
3801+ if op_pairs [1 ] == "depthwise" :
3802+ assert (
3803+ mod ["tvmgen_default_ethos_u_main_1" ].body .op .name == "contrib.ethosu.depthwise_conv2d"
3804+ )
3805+ else :
3806+ assert mod ["tvmgen_default_ethos_u_main_1" ].body .op .name == "contrib.ethosu.conv2d"
3807+
3808+
36793809if __name__ == "__main__" :
36803810 tvm .testing .main ()
0 commit comments