Skip to content

Commit 10ef41e

Browse files
kimishpatelfacebook-github-bot
authored andcommitted
Fix portable kernel utils for aten mode (#1563)
Summary: Pull Request resolved: #1563 In supporting build for aten mode, reduce_util has dependency on index_util.cpp. Reduce util depends on index_util for a util function, check_index_out_args However this function is used only when not being built for aten mode. When being built with aten mode, index_util.cpp ends up including c10/ScalarType.h (via scalar_type_utils.h). Both c10/ScalartType.h and scalar_type_utils.h define toString which results in compiler error. We should really fix scalar_type_utils.h to allow for aten mode build, but that needs more work. In the mean time check_index_out_args is used only in reduce_util.cpp so there is no need to really factor out index_util.cpp, so removin this refactor. ghstack-source-id: 211679772 exported-using-ghexport Reviewed By: larryliu0820, GregoryComer Differential Revision: D52626505 fbshipit-source-id: 57d4fdda77117347813738d86d611f8c17d5e660
1 parent 845ce38 commit 10ef41e

5 files changed

Lines changed: 6 additions & 27 deletions

File tree

kernels/portable/cpu/util/index_util.cpp

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -129,18 +129,5 @@ bool check_scatter_add_args(
129129
return true;
130130
}
131131

132-
bool check_index_out_args(
133-
const Tensor& in,
134-
const Tensor& out,
135-
const Tensor& index_out) {
136-
ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out));
137-
ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_shape(out, index_out));
138-
ET_LOG_AND_RETURN_IF_FALSE(
139-
tensor_is_default_or_channels_last_dim_order(index_out));
140-
ET_LOG_AND_RETURN_IF_FALSE(index_out.scalar_type() == ScalarType::Long);
141-
142-
return true;
143-
}
144-
145132
} // namespace executor
146133
} // namespace torch

kernels/portable/cpu/util/index_util.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -34,10 +34,5 @@ bool check_scatter_add_args(
3434
const Tensor& src,
3535
Tensor& out);
3636

37-
bool check_index_out_args(
38-
const Tensor& in,
39-
const Tensor& out,
40-
const Tensor& index_out);
41-
4237
} // namespace executor
4338
} // namespace torch

kernels/portable/cpu/util/reduce_util.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,6 @@
1212
#include <executorch/runtime/platform/assert.h>
1313
#include <cstring>
1414

15-
#ifndef USE_ATEN_LIB
16-
#include <executorch/kernels/portable/cpu/util/index_util.h>
17-
#endif
18-
1915
namespace torch {
2016
namespace executor {
2117

@@ -426,7 +422,11 @@ bool check_min_max_args(
426422
Tensor& max_indices) {
427423
ET_LOG_AND_RETURN_IF_FALSE(
428424
check_reduction_args_single_dim(in, dim, keepdim, max));
429-
ET_LOG_AND_RETURN_IF_FALSE(check_index_out_args(in, max, max_indices));
425+
ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, max));
426+
ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_shape(max, max_indices));
427+
ET_LOG_AND_RETURN_IF_FALSE(
428+
tensor_is_default_or_channels_last_dim_order(max_indices));
429+
ET_LOG_AND_RETURN_IF_FALSE(max_indices.scalar_type() == ScalarType::Long);
430430

431431
return true;
432432
}

kernels/portable/cpu/util/targets.bzl

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,6 @@ def define_common_targets():
167167
deps = [
168168
"//executorch/runtime/kernel:kernel_includes{}".format(suffix),
169169
"//executorch/runtime/core/exec_aten/util:tensor_util{}".format(suffix),
170-
":index_util",
171170
],
172171
exported_preprocessor_flags = ["-DUSE_ATEN_LIB"] if aten_mode else [],
173172
visibility = ["//executorch/kernels/portable/cpu/...", "//executorch/kernels/quantized/..."],

kernels/quantized/CMakeLists.txt

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,7 @@ include(${EXECUTORCH_ROOT}/build/Utils.cmake)
3333
include(${EXECUTORCH_ROOT}/build/Codegen.cmake)
3434
# Quantized ops kernel sources TODO(larryliu0820): use buck2 to gather the
3535
# sources
36-
file(GLOB_RECURSE _quantized_kernels__srcs
37-
"${CMAKE_CURRENT_SOURCE_DIR}/cpu/*.cpp")
36+
list(TRANSFORM _quantized_kernels__srcs PREPEND "${EXECUTORCH_ROOT}/")
3837
# Generate C++ bindings to register kernels into both PyTorch (for AOT) and
3938
# Executorch (for runtime). Here select all ops in quantized.yaml
4039
gen_selected_ops("${CMAKE_CURRENT_LIST_DIR}/quantized.yaml" "" "")
@@ -47,7 +46,6 @@ set(_quantized_sources
4746
${_quantized_kernels__srcs}
4847
${EXECUTORCH_ROOT}/runtime/core/exec_aten/util/tensor_util_aten.cpp # This
4948
# is a hack
50-
${EXECUTORCH_ROOT}/kernels/portable/cpu/util/reduce_util.cpp
5149
)
5250
gen_custom_ops_aot_lib("quantized_ops_aot_lib" "${_quantized_sources}")
5351

0 commit comments

Comments
 (0)