From f776870f47485fcf3793e0cd7440ba78a409ce6c Mon Sep 17 00:00:00 2001 From: Github Executorch Date: Tue, 23 Dec 2025 09:31:51 -0800 Subject: [PATCH 1/2] Fix circular import in cortex_m passes using relative imports MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Changed cortex_m_pass_manager.py to use relative imports (from .activation_fusion_pass) instead of absolute imports (from executorch.backends.cortex_m.passes) for intra-package references. This breaks the circular import chain: __init__.py → cortex_m_pass_manager.py → __init__.py Resolves: ImportError: cannot import name 'ActivationFusionPass' from partially initialized module 'executorch.backends.cortex_m.passes' --- backends/cortex_m/passes/cortex_m_pass_manager.py | 15 +++++++-------- backends/cortex_m/quantizer/__init__.py | 1 - 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/backends/cortex_m/passes/cortex_m_pass_manager.py b/backends/cortex_m/passes/cortex_m_pass_manager.py index bd3fad1cf94..c47bcc357ea 100644 --- a/backends/cortex_m/passes/cortex_m_pass_manager.py +++ b/backends/cortex_m/passes/cortex_m_pass_manager.py @@ -10,14 +10,6 @@ FoldAndAnnotateQParamsPass, ScalarsToAttributePass, ) -from executorch.backends.cortex_m.passes import ( - ActivationFusionPass, - ClampHardswishPass, - ConvertToCortexMPass, - DecomposeHardswishPass, - QuantizedOpFusionPass, - ReplaceQuantNodesPass, -) from executorch.backends.transforms.replace_scalar_with_tensor import ( ReplaceScalarWithTensorArgPass, ) @@ -26,6 +18,13 @@ from executorch.exir.program._program import _transform from torch.export import ExportedProgram +from .activation_fusion_pass import ActivationFusionPass +from .clamp_hardswish_pass import ClampHardswishPass +from .convert_to_cortex_m_pass import ConvertToCortexMPass +from .decompose_hardswish_pass import DecomposeHardswishPass +from .quantized_op_fusion_pass import QuantizedOpFusionPass +from .replace_quant_nodes_pass import ReplaceQuantNodesPass + class CortexMPassManager(PassManager): diff --git a/backends/cortex_m/quantizer/__init__.py b/backends/cortex_m/quantizer/__init__.py index 39a3de431ff..c55f200bbaa 100644 --- a/backends/cortex_m/quantizer/__init__.py +++ b/backends/cortex_m/quantizer/__init__.py @@ -16,4 +16,3 @@ SOFTMAX_OUTPUT_FIXED_QSPEC, SOFTMAX_PER_TENSOR_CONFIG, ) -from .quantizer import CortexMQuantizer, SharedQspecQuantizer # noqa From 8186e269364db35989630321ce78e419dc21c966 Mon Sep 17 00:00:00 2001 From: Github Executorch Date: Mon, 5 Jan 2026 14:27:25 -0800 Subject: [PATCH 2/2] Fix division by zero in reduce_util.h grain size calculation Add zero check for reduction_size before dividing to calculate grain_size, preventing crash when fuzzer triggers edge case with empty reduction dimension. --- kernels/portable/cpu/util/reduce_util.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/kernels/portable/cpu/util/reduce_util.h b/kernels/portable/cpu/util/reduce_util.h index 51981328c4f..f800fb6f736 100644 --- a/kernels/portable/cpu/util/reduce_util.h +++ b/kernels/portable/cpu/util/reduce_util.h @@ -814,10 +814,12 @@ template const Func& func) { #ifdef ET_USE_THREADPOOL const ssize_t reduction_size = get_reduced_dim_product(in, dim); - const auto grain_size = std::max( - static_cast(1), - static_cast(executorch::extension::internal::GRAIN_SIZE) / - reduction_size); + const auto grain_size = reduction_size == 0 + ? 1 + : std::max( + static_cast(1), + static_cast(executorch::extension::internal::GRAIN_SIZE) / + reduction_size); #else // ET_USE_THREADPOOL const auto grain_size = 1; #endif // ET_USE_THREADPOOL