Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
41 commits
Select commit Hold shift + click to select a range
c20e8ab
Create intermediate directories in ETCoreMLModelManager (#9562)
metascroy Mar 25, 2025
3a55f32
Bump HF version (#9408)
guangy10 Mar 25, 2025
cabc4e9
Add a test target to SwiftPM package. (#9557)
shoumikhin Mar 25, 2025
f13aeff
Utility helper to deduce scalar type from NSNumber. (#9552)
shoumikhin Mar 25, 2025
fd75d05
Remove Android ExecuTorch demo
kirklandsign Mar 25, 2025
72f879c
Utility helper to extract a value from NSNumber. (#9595)
shoumikhin Mar 25, 2025
7248b19
Fix conflicting pytorch nightly versions (#9593)
jathu Mar 25, 2025
f7e6dbf
Reset Dynamo Before Export for all XNNPACK Tests (#9561)
mcr229 Mar 25, 2025
fc25829
Switch to new ao quant api for 8da4w (#8501)
jackzhxng Mar 25, 2025
be3c1ec
Remove usage of environment variables in wheel building (#9583)
jathu Mar 25, 2025
9e8503c
Rename deduceScalarType to deduceType (#9604)
shoumikhin Mar 25, 2025
2e2cf98
Utility helpers to convert between std::vector and NSArray.
shoumikhin Mar 25, 2025
02729b8
Enum for Module load mode.
shoumikhin Mar 25, 2025
811352d
Enum for Value tag.
shoumikhin Mar 25, 2025
610f398
Enum for Tensor scalar type and shape dynamism.
shoumikhin Mar 25, 2025
4b8ac94
Revert "Bump HF version" (#9600)
larryliu0820 Mar 25, 2025
0342bab
Turn on EXECUTORCH_BUILD_PYBIND when implicitly wanted (#9611)
jathu Mar 26, 2025
306b649
Update Qualcomm SoCs support list (#9592)
cccclai Mar 26, 2025
785afce
Arm backend: fix test_llama.py (#9575)
mansnils Mar 26, 2025
50ec82e
Arm backend: Set qtol=1 for flaky tests. (#9641)
Erik-Lundell Mar 26, 2025
2f65c3a
Add export_llama performance regression test using expected ops (#9158)
jackzhxng Mar 26, 2025
7159650
Add buck target for hf_download
jackzhxng Mar 26, 2025
265b9b7
Arm backend: Extend convolution support check to 3d (#9640)
Erik-Lundell Mar 26, 2025
cb1d175
Arm backend: set scalar div xfails to non strict. (#9644)
Erik-Lundell Mar 26, 2025
c18e5f6
Qualcomm AI Engine Direct - Mimi Enablement Stage 1 (#9570)
winskuo-quic Mar 26, 2025
9d243e9
Update docs after moving Android ExecuTorchDemo app
kirklandsign Mar 26, 2025
c43d5ad
Alternative approach to handling memory offset shift (#9406)
JakeStevens Mar 26, 2025
46937eb
Enable CoreML by default on macOS wheel builds (#9483)
jathu Mar 26, 2025
91be93c
Default Uninitialized Llama2 Weights to Zeros, and Provide Better Qua…
mcr229 Mar 26, 2025
74e4308
Updates docs to reflect changes in CoreML (#9645)
jathu Mar 26, 2025
751e646
Add portable ELU implementation + test (#9520)
swolchok Mar 26, 2025
12240cf
add sample input for new core op elu.out (#9577)
swolchok Mar 26, 2025
4873681
Fix -Wglobal-constructors/-Wshadow for executor_runner targets (#9509)
swolchok Mar 26, 2025
b89328a
Depend on extension/threadpool, not thread_parallel_interface, in buc…
swolchok Mar 26, 2025
118f0a4
elementwise_util: s/common/compute/ almost everywhere and deprecate S…
swolchok Mar 26, 2025
f8c1014
Deprecate non-internal elementwise_util APIs (#9621)
swolchok Mar 26, 2025
7d614a5
Update pinned optimum-executorch commit hash in the CI (#9653)
guangy10 Mar 26, 2025
80992a9
Remove tombstone messages proactively
bigfootjon Mar 26, 2025
f5bbad1
Upgrade arm runner for phi4-mini (#9658)
mergennachin Mar 26, 2025
28af462
fix merge conflict: openvino pybind
cavusmustafa Mar 26, 2025
6258b42
pybind openvino merge update
cavusmustafa Mar 26, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .ci/docker/ci_commit_pins/pytorch.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
295f2ed4d103017f7e19a7b8263ece606cd629db
7ae0ce6360b6e4f944906502d20da24c04debee5
4 changes: 2 additions & 2 deletions .ci/scripts/gather_test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from typing import Any

from examples.models import MODEL_NAME_TO_MODEL
from examples.xnnpack import MODEL_NAME_TO_OPTIONS
from examples.xnnpack import MODEL_NAME_TO_OPTIONS, QuantType

DEFAULT_RUNNERS = {
"linux": "linux.2xlarge",
Expand Down Expand Up @@ -154,7 +154,7 @@ def export_models_for_ci() -> dict[str, dict]:
if backend == "xnnpack":
if name not in MODEL_NAME_TO_OPTIONS:
continue
if MODEL_NAME_TO_OPTIONS[name].quantization:
if MODEL_NAME_TO_OPTIONS[name].quantization != QuantType.NONE:
backend += "-quantization"

if MODEL_NAME_TO_OPTIONS[name].delegation:
Expand Down
3 changes: 1 addition & 2 deletions .ci/scripts/unittest-linux.sh
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,7 @@ if [[ "$BUILD_TOOL" == "cmake" ]]; then
source .ci/scripts/setup-vulkan-linux-deps.sh

PYTHON_EXECUTABLE=python \
EXECUTORCH_BUILD_PYBIND=ON \
CMAKE_ARGS="-DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" \
CMAKE_ARGS="-DEXECUTORCH_BUILD_PYBIND=ON -DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" \
.ci/scripts/setup-linux.sh "$@"

# Install llama3_2_vision dependencies.
Expand Down
3 changes: 1 addition & 2 deletions .ci/scripts/unittest-macos.sh
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,7 @@ trap 'rm -rfv ${TMP_DIR}' EXIT

# Setup MacOS dependencies as there is no Docker support on MacOS atm
PYTHON_EXECUTABLE=python \
EXECUTORCH_BUILD_PYBIND=ON \
CMAKE_ARGS="-DEXECUTORCH_BUILD_COREML=ON -DEXECUTORCH_BUILD_MPS=ON -DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" \
CMAKE_ARGS="-DEXECUTORCH_BUILD_PYBIND=ON -DEXECUTORCH_BUILD_COREML=ON -DEXECUTORCH_BUILD_MPS=ON -DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" \
${CONDA_RUN} --no-capture-output \
.ci/scripts/setup-macos.sh "$@"

Expand Down
5 changes: 1 addition & 4 deletions .ci/scripts/wheel/envvar_base.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,10 @@
# should typically only contain shell variable assignments. Be sure to export
# any variables so that subprocesses will see them.

# Enable pybindings so that users can execute ExecuTorch programs from python.
export EXECUTORCH_BUILD_PYBIND=1

# Ensure that CMAKE_ARGS is defined before referencing it. Defaults to empty
# if not defined.
export CMAKE_ARGS="${CMAKE_ARGS:-}"

# Link the XNNPACK backend into the pybindings runtime so that users can execute
# ExecuTorch programs that delegate to it.
CMAKE_ARGS="${CMAKE_ARGS} -DEXECUTORCH_BUILD_XNNPACK=ON"
CMAKE_ARGS="${CMAKE_ARGS} -DEXECUTORCH_BUILD_PYBIND=ON -DEXECUTORCH_BUILD_XNNPACK=ON"
5 changes: 2 additions & 3 deletions .github/workflows/pull.yml
Original file line number Diff line number Diff line change
Expand Up @@ -365,8 +365,7 @@ jobs:
# build module for executorch.extension.pybindings.portable_lib
BUILD_TOOL="cmake"
PYTHON_EXECUTABLE=python \
EXECUTORCH_BUILD_XNNPACK=ON \
EXECUTORCH_BUILD_PYBIND=ON \
CMAKE_ARGS="-DEXECUTORCH_BUILD_PYBIND=ON -DEXECUTORCH_BUILD_XNNPACK=ON" \
bash .ci/scripts/setup-linux.sh --build-tool "${BUILD_TOOL}"

# see if we can import the module successfully
Expand Down Expand Up @@ -504,7 +503,7 @@ jobs:

# Setup MacOS dependencies as there is no Docker support on MacOS atm
PYTHON_EXECUTABLE=python \
EXECUTORCH_BUILD_PYBIND=ON \
CMAKE_ARGS="-DEXECUTORCH_BUILD_PYBIND=ON" \
EXECUTORCH_BUILD_ARM_BAREMETAL=ON \
.ci/scripts/setup-linux.sh --build-tool "${BUILD_TOOL}"

Expand Down
16 changes: 11 additions & 5 deletions .github/workflows/trunk.yml
Original file line number Diff line number Diff line change
Expand Up @@ -65,22 +65,29 @@ jobs:
matrix:
model: [linear, add, add_mul, ic3, ic4, mv2, mv3, resnet18, resnet50, vit, w2l, mobilebert, emformer_join, emformer_transcribe]
backend: [portable, xnnpack-quantization-delegation]
runner: [linux.arm64.2xlarge]
include:
- model: lstm
backend: portable
runner: linux.arm64.2xlarge
- model: mul
backend: portable
runner: linux.arm64.2xlarge
- model: softmax
backend: portable
runner: linux.arm64.2xlarge
- model: phi_4_mini
backend: portable
runner: linux.arm64.m7g.4xlarge
- model: qwen2_5
backend: portable
runner: linux.arm64.2xlarge
- model: llama3_2_vision_encoder
backend: portable
runner: linux.arm64.2xlarge
fail-fast: false
with:
runner: linux.arm64.2xlarge
runner: ${{ matrix.runner }}
docker-image: executorch-ubuntu-22.04-gcc11-aarch64
submodules: 'true'
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
Expand Down Expand Up @@ -261,7 +268,7 @@ jobs:

# build module for executorch.extension.pybindings.portable_lib
BUILD_TOOL=${{ matrix.build-tool }}
EXECUTORCH_BUILD_PYBIND=ON PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh --build-tool "${BUILD_TOOL}"
CMAKE_ARGS="-DEXECUTORCH_BUILD_PYBIND=ON" PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh --build-tool "${BUILD_TOOL}"

# see if we can import the module successfully
${CONDA_RUN} python -c "from executorch.extension.pybindings import portable_lib; print('success!')"
Expand Down Expand Up @@ -536,9 +543,8 @@ jobs:
git clone https://github.com/huggingface/optimum-executorch
cd optimum-executorch
# There is no release yet, for CI stability, always test from the same commit on main
git checkout 6a7e83f3eee2976fa809335bfb78a45b1ea1cb25
pip install .
pip install accelerate sentencepiece
git checkout 577a2b19670e4c643a5c6ecb09bf47b9a699e7c6
pip install .[tests]
pip list
echo "::endgroup::"

Expand Down
21 changes: 20 additions & 1 deletion Package.swift
Original file line number Diff line number Diff line change
Expand Up @@ -82,5 +82,24 @@ let package = Package(
(value["libraries"] as? [String] ?? []).map { .linkedLibrary($0) }
),
]
}
} + [
.testTarget(
name: "tests",
dependencies: [
.target(name: "executorch_debug"),
.target(name: "kernels_portable"),
],
path: "extension/apple/ExecuTorch/__tests__",
resources: [
.copy("resources/add.pte")
],
linkerSettings: [
.linkedLibrary("c++"),
.unsafeFlags([
"-Xlinker", "-force_load",
"-Xlinker", "cmake-out/kernels_portable.xcframework/macos-arm64/libkernels_portable_macos.a",
])
]
)
]
)
2 changes: 1 addition & 1 deletion backends/apple/coreml/compiler/coreml_preprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@

import coremltools as ct
import coremltools.optimize as cto
import executorchcoreml

from executorch.backends.apple.coreml import executorchcoreml
from executorch.exir.backend.backend_details import (
BackendDetails,
ExportedProgram,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ void set_outputs(std::vector<executorchcoreml::MultiArray>& outputs,
const inmemoryfs::InMemoryFileSystem *inmemory_fs,
NSError * __autoreleasing *error) {
NSError *local_error = nil;
if (![fm createDirectoryAtURL:dst_url withIntermediateDirectories:NO attributes:@{} error:error]) {
if (![fm createDirectoryAtURL:dst_url withIntermediateDirectories:YES attributes:@{} error:error]) {
ETCoreMLLogUnderlyingErrorAndSetNSError(error,
ETCoreMLErrorModelSaveFailed,
local_error,
Expand Down
52 changes: 0 additions & 52 deletions backends/apple/coreml/runtime/inmemoryfs/setup.py

This file was deleted.

25 changes: 0 additions & 25 deletions backends/apple/coreml/scripts/install_inmemoryfs.sh

This file was deleted.

19 changes: 6 additions & 13 deletions backends/apple/coreml/scripts/install_requirements.sh
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,10 @@ SCRIPT_DIR_PATH="$(
pwd -P
)"

# TODO(jathu): remove the need to fetch coremltools to build deps for coreml_executor_runner.
# Keep this version in sync with: pyproject.toml
COREMLTOOLS_VERSION="8.1"

red=`tput setaf 1`
green=`tput setaf 2`

Expand All @@ -24,7 +28,7 @@ rm -rf "$COREML_DIR_PATH/third-party"
mkdir "$COREML_DIR_PATH/third-party"

echo "${green}ExecuTorch: Cloning coremltools."
git clone --depth 1 --branch 8.1 "https://github.com/apple/coremltools.git" $COREMLTOOLS_DIR_PATH
git clone --depth 1 --branch "${COREMLTOOLS_VERSION}" "https://github.com/apple/coremltools.git" $COREMLTOOLS_DIR_PATH
cd $COREMLTOOLS_DIR_PATH

STATUS=$?
Expand All @@ -43,16 +47,7 @@ fi

mkdir "$COREMLTOOLS_DIR_PATH/build"
cmake -S "$COREMLTOOLS_DIR_PATH" -B "$COREMLTOOLS_DIR_PATH/build"
cmake --build "$COREMLTOOLS_DIR_PATH/build" --parallel

echo "${green}ExecuTorch: Installing coremltools."
pip install "$COREMLTOOLS_DIR_PATH"

STATUS=$?
if [ $STATUS -ne 0 ]; then
echo "${red}ExecuTorch: Failed to install coremltools."
exit 1
fi
cmake --build "$COREMLTOOLS_DIR_PATH/build" --parallel --target mlmodel

echo "${green}ExecuTorch: Cloning nlohmann."
git clone https://github.com/nlohmann/json.git "$COREML_DIR_PATH/third-party/nlohmann_json"
Expand All @@ -62,8 +57,6 @@ if [ $STATUS -ne 0 ]; then
exit 1
fi

sh "$COREML_DIR_PATH/scripts/install_inmemoryfs.sh"

echo "${green}ExecuTorch: Copying protobuf files."
mkdir -p "$COREML_DIR_PATH/runtime/sdk/format/"
cp -rf "$PROTOBUF_FILES_DIR_PATH" "$COREML_DIR_PATH/runtime/sdk/format/"
12 changes: 2 additions & 10 deletions backends/apple/coreml/setup.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,16 +6,8 @@ This is a tutorial for setting up the Core ML backend.

1. Follow the instructions described in [Setting Up ExecuTorch](/docs/source/getting-started-setup.md) to set up ExecuTorch environment.

2. Run `install_requirements.sh` to install dependencies required by the **Core ML** backend.

```
cd executorch

./backends/apple/coreml/scripts/install_requirements.sh

```

3. Run the example script to validate that the **Core ML** backend is set up correctly.
2. Run the example script to validate that the **Core ML** backend is set up correctly.

```
cd executorch
Expand All @@ -26,7 +18,7 @@ python3 -m examples.apple.coreml.scripts.export --model_name add

```

4. You can now integrate the **Core ML** backend in code.
3. You can now integrate the **Core ML** backend in code.

```python
# Delegate to Core ML backend
Expand Down
16 changes: 15 additions & 1 deletion backends/arm/operator_support/convolution_support.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def _is_node_supported_u55(self, node: fx.Node):

C_in = shape_in[1]
C_out = shape_out[1]
if (C_in == group) and (C_out % C_in) == 0:
if (C_in == group) and (C_out % C_in) == 0 and len(shape_in) <= 4:
# Depthwise convolution
for dim in shape_in[1:]:
if not 1 <= dim <= 65536:
Expand All @@ -74,13 +74,19 @@ def _is_node_supported_u55(self, node: fx.Node):

kernel_w = kernel[2]
kernel_h = kernel[3] if len(kernel) > 3 else 1
kernel_z = kernel[4] if len(kernel) > 4 else 1
# Kernel condition misses constraint on sum of absolute weights
if not 1 <= kernel_h <= 64 or not 1 <= kernel_w * kernel_h <= 4096:
self.reporter.report_reject(
node,
f"Convolution needs to have kernel_y<=64, kernel_x*kernel_y<=4096, got kernel ({kernel_w}, {kernel_h})",
)
return False
if kernel_z != 1:
self.reporter.report_reject(
node, f"Convolution3d needs to have kernel_z==1, got {kernel_z}."
)
return False

if not self._stride_condition(node):
self.reporter.report_reject(
Expand All @@ -107,6 +113,14 @@ def _stride_condition(self, node: fx.Node) -> bool:
if len(strides) == 1:
strides = [strides[0]] * 2

if len(strides) > 2:
stride_z = strides[2]
if stride_z > 1:
self.reporter.report_reject(
node, f"Convolution3d only supports stride_z<=1, got {stride_z}."
)
return False

for stride, dilation in zip(strides, dilations):
stride_condition = 1 <= stride <= 3
dilation_condition = (not has_padding) and (dilation == 1)
Expand Down
Loading
Loading