-
Notifications
You must be signed in to change notification settings - Fork 950
Preserve caller-provided non-contiguous strides in make_tensor_ptr #19006
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -86,21 +86,22 @@ TensorPtr make_tensor_ptr( | |
| ET_CHECK_MSG(error == runtime::Error::Ok, "Failed to compute strides."); | ||
|
|
||
| if (!strides.empty()) { | ||
| bool is_contiguous = true; | ||
| for (size_t i = 0; i < dim; i++) { | ||
| ET_CHECK_MSG( | ||
| strides[i] == computed_strides[i] || sizes[i] == 1, | ||
| "invalid strides for dim %zu: %" ET_PRI_SIZES_AND_STRIDES | ||
| "!= %" ET_PRI_SIZES_AND_STRIDES | ||
| " while its size is %" ET_PRI_SIZES_AND_STRIDES " != 1", | ||
| i, | ||
| strides[i], | ||
| computed_strides[i], | ||
| sizes[i]); | ||
| if (strides[i] != computed_strides[i] && sizes[i] != 1) { | ||
| is_contiguous = false; | ||
| break; | ||
| } | ||
| } | ||
| if (is_contiguous) { | ||
| strides = std::move(computed_strides); | ||
| } | ||
| // else: keep the caller-provided non-contiguous strides (e.g. from | ||
| // reinterpret_tensor views like chunk/split). | ||
| } else { | ||
| strides = std::move(computed_strides); | ||
| } | ||
|
Comment on lines
88
to
103
|
||
|
|
||
| strides = std::move(computed_strides); | ||
|
|
||
| #ifndef USE_ATEN_LIB | ||
| executorch::aten::TensorImpl tensor_impl( | ||
| type, | ||
|
|
||
| Original file line number | Diff line number | Diff line change | ||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -420,14 +420,19 @@ TEST_F(TensorPtrTest, MakeViewReuseMetadataWhenShapeSame) { | |||||||||||||
| EXPECT_EQ(view->strides()[1], 3); | ||||||||||||||
| } | ||||||||||||||
|
|
||||||||||||||
| TEST_F(TensorPtrTest, MakeViewShapeChangeWithExplicitOldStridesExpectDeath) { | ||||||||||||||
| TEST_F(TensorPtrTest, MakeViewShapeChangeWithNonContiguousStrides) { | ||||||||||||||
| float data[12] = {0}; | ||||||||||||||
| auto tensor = make_tensor_ptr({3, 4}, data); | ||||||||||||||
| std::vector<executorch::aten::StridesType> old_strides( | ||||||||||||||
| tensor->strides().begin(), tensor->strides().end()); | ||||||||||||||
|
|
||||||||||||||
| ET_EXPECT_DEATH( | ||||||||||||||
| { auto _ = make_tensor_ptr(tensor, {2, 6}, {}, old_strides); }, ""); | ||||||||||||||
| // Reshaping [3,4] to [2,6] with old strides [4,1] creates a non-contiguous | ||||||||||||||
| // view (stride[0]=4 != contiguous 6). This is allowed for reinterpret_tensor. | ||||||||||||||
| auto view = make_tensor_ptr(tensor, {2, 6}, {}, old_strides); | ||||||||||||||
| EXPECT_EQ(view->size(0), 2); | ||||||||||||||
| EXPECT_EQ(view->size(1), 6); | ||||||||||||||
| EXPECT_EQ(view->strides()[0], 4); | ||||||||||||||
| EXPECT_EQ(view->strides()[1], 1); | ||||||||||||||
| } | ||||||||||||||
|
|
||||||||||||||
| TEST_F(TensorPtrTest, MakeViewInvalidDimOrderExpectDeath) { | ||||||||||||||
|
|
@@ -967,8 +972,11 @@ TEST_F(TensorPtrTest, TensorDefaultDimOrderAndStrides) { | |||||||||||||
|
|
||||||||||||||
| TEST_F(TensorPtrTest, TensorMismatchStridesAndDimOrder) { | ||||||||||||||
| float data[12] = {0}; | ||||||||||||||
| ET_EXPECT_DEATH( | ||||||||||||||
| { auto _ = make_tensor_ptr({3, 4}, data, {1, 0}, {1, 4}); }, ""); | ||||||||||||||
| // dim_order={1,0} implies strides={1,3}, but caller provides {1,4}. | ||||||||||||||
| // Non-contiguous strides are preserved for reinterpret_tensor views. | ||||||||||||||
|
Comment on lines
+975
to
+976
|
||||||||||||||
| // dim_order={1,0} implies strides={1,3}, but caller provides {1,4}. | |
| // Non-contiguous strides are preserved for reinterpret_tensor views. | |
| // This directly constructs a tensor from explicit sizes, dim_order, and | |
| // strides. dim_order={1,0} would normally imply strides={1,3}, but the | |
| // caller provides mismatched strides {1,4}; verify make_tensor_ptr preserves | |
| // the provided strides rather than inferring them from dim_order. |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -151,20 +151,19 @@ void assert_dim_order_and_strides_valid( | |
| THROW_IF_ERROR(error, "Failed to compute strides."); | ||
|
|
||
| if (!strides.empty()) { | ||
| bool is_contiguous = true; | ||
| for (size_t i = 0; i < sizes.size(); i++) { | ||
| THROW_IF_FALSE( | ||
| strides[i] == computed_strides[i] || sizes[i] == 1, | ||
| "invalid strides for dim %zu: %" ET_PRI_SIZES_AND_STRIDES | ||
| "!= %" ET_PRI_SIZES_AND_STRIDES | ||
| " while its size is %" ET_PRI_SIZES_AND_STRIDES " != 1", | ||
| i, | ||
| strides[i], | ||
| computed_strides[i], | ||
| sizes[i]); | ||
| if (strides[i] != computed_strides[i] && sizes[i] != 1) { | ||
| is_contiguous = false; | ||
| break; | ||
| } | ||
| } | ||
| if (is_contiguous) { | ||
| strides = std::move(computed_strides); | ||
| } | ||
| } else { | ||
| strides = std::move(computed_strides); | ||
| } | ||
|
Comment on lines
153
to
166
|
||
|
|
||
| strides = std::move(computed_strides); | ||
| } | ||
|
|
||
| /** | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This removes the only enforcement that strides are a safe/expected layout. As a result, callers can now pass arbitrary strides (including negative/zero strides, or extremely large strides) which can lead to out-of-bounds accesses or integer overflow when computing element offsets/required storage size downstream. At minimum, add validation for obviously unsafe stride patterns (e.g., non-negative strides for non-broadcast dims if negative strides aren’t supported, and overflow-safe computation of the maximum addressable offset for the view). If the API can’t validate against the actual backing allocation size, consider adding an overload that takes the buffer length in bytes (or an explicit storage descriptor) so
from_blob/make_tensor_ptrcan enforce that the view is within bounds.