forked from pytorch/executorch
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtargets.bzl
More file actions
150 lines (138 loc) · 5.52 KB
/
targets.bzl
File metadata and controls
150 lines (138 loc) · 5.52 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "get_aten_mode_options", "runtime")
load("@fbsource//xplat/executorch/codegen:codegen.bzl", "et_operator_library", "executorch_generated_lib", "exir_custom_ops_aot_lib")
def define_common_targets():
runtime.export_file(
name = "quantized.yaml",
visibility = [
"@EXECUTORCH_CLIENTS",
],
)
# Excluding embedding_byte ops because we choose to define them
# in python separately, mostly to be easy to share with oss.
et_operator_library(
name = "quantized_ops_need_aot_registration",
ops = [
"quantized_decomposed::add.out",
"quantized_decomposed::choose_qparams.Tensor_out",
"quantized_decomposed::choose_qparams_per_token_asymmetric.out",
"quantized_decomposed::dequantize_per_channel.out",
"quantized_decomposed::dequantize_per_tensor.out",
"quantized_decomposed::dequantize_per_tensor.Tensor_out",
"quantized_decomposed::dequantize_per_token.out",
"quantized_decomposed::mixed_linear.out",
"quantized_decomposed::mixed_mm.out",
"quantized_decomposed::quantize_per_channel.out",
"quantized_decomposed::quantize_per_tensor.out",
"quantized_decomposed::quantize_per_tensor.Tensor_out",
"quantized_decomposed::quantize_per_token.out",
],
define_static_targets = True,
)
# lib used to register quantized ops into EXIR
exir_custom_ops_aot_lib(
name = "custom_ops_generated_lib",
yaml_target = ":quantized.yaml",
visibility = ["//executorch/...", "@EXECUTORCH_CLIENTS"],
kernels = [":quantized_operators_aten"],
deps = [
":quantized_ops_need_aot_registration",
],
)
# lib used to register quantized ops into EXIR
# TODO: merge this with custom_ops_generated_lib
exir_custom_ops_aot_lib(
name = "aot_lib",
yaml_target = ":quantized.yaml",
visibility = [
"//executorch/...",
"@EXECUTORCH_CLIENTS",
],
kernels = [":quantized_operators_aten"],
deps = [
":quantized_ops_need_aot_registration",
],
)
et_operator_library(
name = "all_quantized_ops",
ops_schema_yaml_target = ":quantized.yaml",
define_static_targets = True,
visibility = [
"//executorch/...",
"@EXECUTORCH_CLIENTS",
],
)
# On Windows we can only compile these two ops currently, so adding a
# separate target for this.
et_operator_library(
name = "q_dq_ops",
ops = [
"quantized_decomposed::dequantize_per_tensor.out",
"quantized_decomposed::dequantize_per_tensor.Tensor_out",
"quantized_decomposed::quantize_per_tensor.out",
"quantized_decomposed::quantize_per_tensor.Tensor_out",
"quantized_decomposed::dequantize_per_channel.out",
"quantized_decomposed::quantize_per_channel.out",
],
)
for aten_mode in get_aten_mode_options():
aten_suffix = "_aten" if aten_mode else ""
runtime.cxx_library(
name = "quantized_operators" + aten_suffix,
srcs = [],
visibility = [
"//executorch/...",
"@EXECUTORCH_CLIENTS",
],
exported_deps = [
"//executorch/kernels/quantized/cpu:quantized_cpu" + aten_suffix,
],
)
for support_exceptions in [True, False]:
exception_suffix = "_no_exceptions" if not support_exceptions else ""
executorch_generated_lib(
name = "generated_lib" + aten_suffix + exception_suffix,
deps = [
":quantized_operators" + aten_suffix,
":all_quantized_ops",
],
custom_ops_yaml_target = ":quantized.yaml",
custom_ops_aten_kernel_deps = [":quantized_operators_aten"] if aten_mode else [],
custom_ops_requires_aot_registration = False,
aten_mode = aten_mode,
support_exceptions = support_exceptions,
visibility = [
"//executorch/...",
"@EXECUTORCH_CLIENTS",
],
define_static_targets = True,
)
# On Windows we can only compile these two ops currently, so adding a
# separate target for this.
executorch_generated_lib(
name = "q_dq_ops_generated_lib" + aten_suffix + exception_suffix,
custom_ops_yaml_target = ":quantized.yaml",
kernel_deps = [
"//executorch/kernels/quantized/cpu:op_quantize" + aten_suffix,
"//executorch/kernels/quantized/cpu:op_dequantize" + aten_suffix,
],
aten_mode = aten_mode,
deps = [
":q_dq_ops",
],
support_exceptions = support_exceptions,
visibility = [
"//executorch/...",
"@EXECUTORCH_CLIENTS",
],
)
runtime.python_library(
name = "quantized_ops_lib",
srcs = ["__init__.py"],
deps = [
"//caffe2:torch",
],
visibility = [
"//executorch/kernels/quantized/...",
"@EXECUTORCH_CLIENTS",
],
)