From 7e4988653762800a7fd5d57cf15686687baf43d9 Mon Sep 17 00:00:00 2001 From: Siddartha Pothapragada Date: Tue, 6 Jan 2026 11:25:47 -0800 Subject: [PATCH] Fix heap-buffer-overflow in constant_pad_nd with overflow-safe bounds checking (#16468) Summary: Fix fuzzer-discovered heap-buffer-overflow (T250636018) in the constant_pad_nd kernel. The bounds checking was missing the actual output buffer end pointer and used arithmetic that could overflow with crafted inputs. This adds proper out_data_end tracking and rewrites the bounds checks to use overflow-safe division instead of potentially-overflowing multiplication. Reviewed By: manuelcandales Differential Revision: D90188241 --- kernels/portable/cpu/op_constant_pad_nd.cpp | 30 ++++++++++++++------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/kernels/portable/cpu/op_constant_pad_nd.cpp b/kernels/portable/cpu/op_constant_pad_nd.cpp index c3fd4dffb08..d3f3fdd75d7 100644 --- a/kernels/portable/cpu/op_constant_pad_nd.cpp +++ b/kernels/portable/cpu/op_constant_pad_nd.cpp @@ -36,6 +36,7 @@ void apply_padding_to_dim( IntArrayRef self_sizes, IntArrayRef self_strides, CTYPE* out_data, + CTYPE* out_data_end, IntArrayRef out_sizes, IntArrayRef out_strides, IntArrayRef pad, @@ -59,14 +60,12 @@ void apply_padding_to_dim( size_t in_step_len = self_strides[dim]; // Do not copy padding beyond the out tensor bounds. + // Use division to avoid potential overflow in multiplication. if (pad_before > 0) { - size_t numel = 1; - for (ET_UNUSED const auto i : c10::irange(out_sizes.size())) { - numel *= out_sizes[i]; - } + size_t remaining = out_data_end - out_data; ET_KERNEL_CHECK_MSG( ctx, - numel >= pad_before * out_step_len, + out_step_len > 0 && remaining / out_step_len >= pad_before, InvalidArgument, /* void */, "Out tensor is too small for the requested padding."); @@ -92,6 +91,15 @@ void apply_padding_to_dim( InvalidArgument, /* void */, "Out tensor overlaps with the input tensor. This is not supported."); + // Bounds check before memcpy + // Use overflow-safe check for remaining >= copy_len + size_t remaining = out_data_end - out_data; + ET_KERNEL_CHECK_MSG( + ctx, + remaining >= copy_len, + InvalidArgument, + /* void */, + "Out tensor is too small for the copy operation."); memcpy(out_data, self_data, copy_nbytes); out_data += copy_len; self_data += copy_len; @@ -107,6 +115,7 @@ void apply_padding_to_dim( self_sizes, self_strides, out_data, + out_data_end, out_sizes, out_strides, pad, @@ -120,14 +129,12 @@ void apply_padding_to_dim( } // Do not copy padding beyond the out tensor bounds. + // Use division to avoid potential overflow in multiplication. if (pad_after > 0) { - size_t numel = 1; - for (ET_UNUSED const auto i : c10::irange(out_sizes.size())) { - numel *= out_sizes[i]; - } + size_t remaining = out_data_end - out_data; ET_KERNEL_CHECK_MSG( ctx, - numel >= pad_after * out_step_len, + out_step_len > 0 && remaining / out_step_len >= pad_after, InvalidArgument, /* void */, "Out tensor is too small for the requested padding."); @@ -182,6 +189,8 @@ void constant_pad_nd_out_impl( IntArrayRef out_sizes_ref(out_sizes, ndim); IntArrayRef out_strides_ref(out_strides, ndim); + CTYPE* out_data_end = out_data + out.numel(); + apply_padding_to_dim( ctx, ndim, @@ -189,6 +198,7 @@ void constant_pad_nd_out_impl( self_sizes_ref, self_strides_ref, out_data, + out_data_end, out_sizes_ref, out_strides_ref, pad,