Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
11e7ee5
First commit for openvino backend
ynimmaga Nov 6, 2024
be927aa
Added example for openvino backend
ynimmaga Nov 12, 2024
06759ef
Updated CMakeLists.txt to add openvino build option
ynimmaga Nov 12, 2024
2b5f599
Updated headers for openvino aot steps
ynimmaga Nov 13, 2024
86f685e
Fixed library path errors in cmake
ynimmaga Nov 13, 2024
bb12f60
Configure device with compile spec
ynimmaga Nov 20, 2024
c9c8967
Added aot compiler script for openvino
ynimmaga Nov 21, 2024
f5bd832
Temporary build instructions
ynimmaga Dec 10, 2024
f3dc62c
Added standalone build script for openvino backend
ynimmaga Dec 11, 2024
eec7831
Merge branch 'openvino_backend' of https://github.com/ynimmaga/execut…
ynimmaga Dec 11, 2024
ca852de
Handling multiple inputs/outputs with zero-copy
cavusmustafa Dec 13, 2024
bdddb16
Merge branch 'pytorch:main' into openvino_backend
ynimmaga Dec 19, 2024
9803bd8
Merge branch 'pytorch:main' into openvino_backend
ynimmaga Jan 2, 2025
b1f38f8
Merge branch 'pytorch:main' into openvino_backend
ynimmaga Jan 3, 2025
7c43014
Merge pull request #5 from cavusmustafa/zero_copy_output
ynimmaga Jan 21, 2025
0703814
Added fallback with portable kernels
ynimmaga Jan 21, 2025
0a769be
Added openvino_functions.yaml containing only unsupported ops
ynimmaga Jan 21, 2025
0e6707d
Updated the unsupported ops for fallback
ynimmaga Jan 21, 2025
68a1cd4
Removed redundant example for openvino backend
ynimmaga Jan 21, 2025
4d1b4eb
Reconfigured openvino backend functions
ynimmaga Jan 22, 2025
5c55a56
Updated openvino backend cmake file
ynimmaga Jan 22, 2025
3c6d123
Merge pull request #6 from ynimmaga/portable_kernels
ynimmaga Jan 28, 2025
29d8400
Added arguments for model path and num iters to openvino example
ynimmaga Jan 30, 2025
379937e
Merge pull request #8 from ynimmaga/portable_kernels
cavusmustafa Jan 30, 2025
5806788
Initial unit tests for OpenVINO backend
cavusmustafa Jan 29, 2025
916ba64
Unit test update and cleanup
cavusmustafa Jan 30, 2025
e0b1bb7
Input/Output processing for example and unit tests
cavusmustafa Jan 31, 2025
9108770
Added executorch parameter to openvino_compile call
cavusmustafa Jan 31, 2025
ecbe5e2
New op unit tests added
cavusmustafa Feb 1, 2025
d563e4c
Merge pull request #7 from cavusmustafa/openvino_backend_unit_tests
ynimmaga Feb 1, 2025
a4d7458
Added license headers to the openvino files
ynimmaga Feb 1, 2025
197cdc2
Merge branch 'pytorch:main' into openvino_backend
ynimmaga Feb 2, 2025
268255b
Merge pull request #9 from ynimmaga/doc_changes
ynimmaga Feb 3, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -196,6 +196,8 @@ option(EXECUTORCH_BUILD_MPS "Build the MPS backend" OFF)

option(EXECUTORCH_BUILD_NEURON "Build the backends/mediatek directory" OFF)

option(EXECUTORCH_BUILD_OPENVINO "Build the Openvino backend" ON)

option(EXECUTORCH_BUILD_PYBIND "Build the Python Bindings" OFF)

option(EXECUTORCH_BUILD_QNN "Build the Qualcomm backend" OFF)
Expand Down Expand Up @@ -656,6 +658,10 @@ if(EXECUTORCH_BUILD_NEURON)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backends/mediatek)
endif()

if(EXECUTORCH_BUILD_OPENVINO)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backends/openvino)
endif()

if(EXECUTORCH_BUILD_QNN)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backends/qualcomm)
endif()
Expand Down
77 changes: 77 additions & 0 deletions backends/openvino/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
# Copyright (c) Intel Corporation
#
# Licensed under the BSD License (the "License"); you may not use this file
# except in compliance with the License. See the license file in the root
# directory of this source tree for more details.

# Set C++ standard
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)

# Ensure compile_commands are generated
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)

# Define common include directories
set(COMMON_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/../../..)

# Include common directories before others to ensure proper order
include_directories(BEFORE ${COMMON_INCLUDE_DIRS})

# Set up EXECUTORCH_ROOT if not already set
if(NOT EXECUTORCH_ROOT)
set(EXECUTORCH_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../..)
endif()

# Include utility cmake script from the executorch repository
include(${EXECUTORCH_ROOT}/build/Utils.cmake)

# Update common include directory for ExecuteTorch
set(COMMON_INCLUDE_DIRS ${EXECUTORCH_ROOT}/..)

# Set OpenVINO directory and include directories from environment variable
set(OPENVINO_DIR "$ENV{INTEL_OPENVINO_DIR}")
if(NOT OPENVINO_DIR)
message(FATAL_ERROR "INTEL_OPENVINO_DIR environment variable is not set.")
endif()

set(OPENVINO_INCLUDE_DIRS
${OPENVINO_DIR}/deployment_tools/inference_engine/include
${OPENVINO_DIR}/runtime/include
)

# Define OpenVINO library path
set(OPENVINO_LIB_PATH ${OPENVINO_DIR}/runtime/lib/intel64)

# Define OpenVINO libraries
set(OPENVINO_LIB ${OPENVINO_LIB_PATH}/libopenvino.so)

# Add the OpenVINO backend library as a shared library
add_library(openvino_backend SHARED)

# Enable exceptions and RTTI for OpenVINO backend
target_compile_options(openvino_backend PRIVATE "-frtti" "-fexceptions")

# Include directories for ExecuteTorch and OpenVINO
target_include_directories(
openvino_backend PUBLIC
${COMMON_INCLUDE_DIRS}
${OPENVINO_INCLUDE_DIRS}
)

# Link OpenVINO libraries and executorch core to the backend
target_link_libraries(openvino_backend PRIVATE
${OPENVINO_LIB}
executorch_core
)

# Add source files to the OpenVINO backend library
target_sources(openvino_backend PRIVATE
${CMAKE_CURRENT_LIST_DIR}/runtime/OpenvinoBackend.cpp
)

# Set additional link options for shared library
target_link_options(openvino_backend PRIVATE -Wl,-rpath=${OPENVINO_LIB_PATH})

# Install the OpenVINO backend library to the lib directory
install(TARGETS openvino_backend DESTINATION lib)

4 changes: 4 additions & 0 deletions backends/openvino/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
from .partitioner import OpenvinoPartitioner
from .preprocess import OpenvinoBackend

__all__ = [OpenvinoBackend, OpenvinoPartitioner]
242 changes: 242 additions & 0 deletions backends/openvino/openvino_functions.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,242 @@
# This yaml file contains operators that are unsupported with openvino backend and
# will use portable kernels for fall back

- op: _cdist_forward.out
kernels:
- arg_meta: null
kernel_name: torch::executor::_cdist_forward_out

- op: _pdist_forward.out
kernels:
- arg_meta: null
kernel_name: torch::executor::_pdist_forward_out

- op: alias_copy.out
kernels:
- arg_meta: null
kernel_name: torch::executor::alias_copy_out

- op: any.all_out
kernels:
- arg_meta: null
kernel_name: torch::executor::any_all_out

- op: any.dims_out
kernels:
- arg_meta: null
kernel_name: torch::executor::any_dims_out

- op: atan.out
kernels:
- arg_meta: null
kernel_name: torch::executor::atan_out

- op: atan2.out
kernels:
- arg_meta: null
kernel_name: torch::executor::atan2_out

- op: bitwise_or.Scalar_out
kernels:
- arg_meta: null
kernel_name: torch::executor::bitwise_or_Scalar_out

- op: bitwise_xor.Scalar_out
kernels:
- arg_meta: null
kernel_name: torch::executor::bitwise_xor_Scalar_out

- op: clamp.Tensor_out
kernels:
- arg_meta: null
kernel_name: torch::executor::clamp_tensor_out

- op: convolution_backward.out
kernels:
- arg_meta: null
kernel_name: torch::executor::convolution_backward_out

- op: detach_copy.out
kernels:
- arg_meta: null
kernel_name: torch::executor::detach_copy_out

- op: diagonal_copy.out
kernels:
- arg_meta: null
kernel_name: torch::executor::diagonal_copy_out

- op: expm1.out
kernels:
- arg_meta: null
kernel_name: torch::executor::expm1_out

- op: floor_divide.out
kernels:
- arg_meta: null
kernel_name: torch::executor::floor_divide_out

- op: index_put.out
kernels:
- arg_meta: null
kernel_name: torch::executor::index_put_out

- op: logical_and.out
kernels:
- arg_meta: null
kernel_name: torch::executor::logical_and_out

- op: logical_or.out
kernels:
- arg_meta: null
kernel_name: torch::executor::logical_or_out

- op: logical_xor.out
kernels:
- arg_meta: null
kernel_name: torch::executor::logical_xor_out

- op: logit.out
kernels:
- arg_meta: null
kernel_name: torch::executor::logit_out

- op: masked_scatter.out
kernels:
- arg_meta: null
kernel_name: torch::executor::masked_scatter_out

- op: masked_select.out
kernels:
- arg_meta: null
kernel_name: torch::executor::masked_select_out

- op: narrow_copy.out
kernels:
- arg_meta: null
kernel_name: torch::executor::narrow_copy_out

- op: nonzero.out
kernels:
- arg_meta: null
kernel_name: torch::executor::nonzero_out

- op: pixel_shuffle.out
kernels:
- arg_meta: null
kernel_name: torch::executor::pixel_shuffle_out

- op: pixel_unshuffle.out
kernels:
- arg_meta: null
kernel_name: torch::executor::pixel_unshuffle_out

- op: prod.int_out
kernels:
- arg_meta: null
kernel_name: torch::executor::prod_int_out

- op: prod.out
kernels:
- arg_meta: null
kernel_name: torch::executor::prod_out

- op: remainder.Tensor_out
kernels:
- arg_meta: null
kernel_name: torch::executor::remainder_Tensor_out

- op: remainder.Scalar_out
kernels:
- arg_meta: null
kernel_name: torch::executor::remainder_Scalar_out

- op: repeat_interleave.Tensor_out
kernels:
- arg_meta: null
kernel_name: torch::executor::repeat_interleave_Tensor_out

- op: reflection_pad1d.out
kernels:
- arg_meta: null
kernel_name: torch::executor::reflection_pad1d_out

- op: reflection_pad3d.out
kernels:
- arg_meta: null
kernel_name: torch::executor::reflection_pad3d_out

- op: replication_pad1d.out
kernels:
- arg_meta: null
kernel_name: torch::executor::replication_pad1d_out

- op: replication_pad2d.out
kernels:
- arg_meta: null
kernel_name: torch::executor::replication_pad2d_out

- op: replication_pad3d.out
kernels:
- arg_meta: null
kernel_name: torch::executor::replication_pad3d_out

- op: round.out
kernels:
- arg_meta: null
kernel_name: torch::executor::round_out

- op: scatter_add.out
kernels:
- arg_meta: null
kernel_name: torch::executor::scatter_add_out

- op: split_copy.Tensor_out
kernels:
- arg_meta: null
kernel_name: torch::executor::split_copy_Tensor_out

- op: squeeze_copy.dim_out
kernels:
- arg_meta: null
kernel_name: torch::executor::squeeze_copy_dim_out

- op: sub.Scalar_out
kernels:
- arg_meta: null
kernel_name: torch::executor::sub_scalar_out

- op: t_copy.out
kernels:
- arg_meta: null
kernel_name: torch::executor::t_copy_out

- op: transpose_copy.int_out
kernels:
- arg_meta: null
kernel_name: torch::executor::transpose_copy_int_out

- op: trunc.out
kernels:
- arg_meta: null
kernel_name: torch::executor::trunc_out

- op: unbind_copy.int_out
kernels:
- arg_meta: null
kernel_name: torch::executor::unbind_copy_int_out

- op: upsample_bilinear2d.vec_out
kernels:
- arg_meta: null
kernel_name: torch::executor::upsample_bilinear2d_vec_out

- func: dim_order_ops::_empty_dim_order.out(int[] size, *, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)
kernels:
- arg_meta: null
kernel_name: torch::executor::_empty_dim_order_out

- func: dim_order_ops::_to_dim_order_copy.out(Tensor self, *, bool non_blocking=False, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)
kernels:
- arg_meta: null
kernel_name: torch::executor::_to_dim_order_copy_out
Loading