diff --git a/cgmanifests/generate_cgmanifest.py b/cgmanifests/generate_cgmanifest.py
index a9eaacc6f2938..81181d3ccfb20 100644
--- a/cgmanifests/generate_cgmanifest.py
+++ b/cgmanifests/generate_cgmanifest.py
@@ -90,55 +90,6 @@ def add_github_dep(name, parsed_url):
git_deps[dep] = name
-with open(
- os.path.join(REPO_DIR, "tools", "ci_build", "github", "linux", "docker", "Dockerfile.manylinux2_28_cuda11"),
-) as f:
- for line in f:
- if not line.strip():
- package_name = None
- package_filename = None
- package_url = None
- if package_filename is None:
- m = re.match(r"RUN\s+export\s+(.+?)_ROOT=(\S+).*", line)
- if m is not None:
- package_name = m.group(1)
- package_filename = m.group(2)
- else:
- m = re.match(r"RUN\s+export\s+(.+?)_VERSION=(\S+).*", line)
- if m is not None:
- package_name = m.group(1)
- package_filename = m.group(2)
- elif package_url is None:
- m = re.match(r"(.+?)_DOWNLOAD_URL=(\S+)", line)
- if m is not None:
- package_url = m.group(2)
- if package_name == "LIBXCRYPT":
- package_url = m.group(2) + "/v" + package_filename + ".tar.gz"
- elif package_name == "CMAKE":
- package_url = m.group(2) + "/v" + package_filename + "/cmake-" + package_filename + ".tar.gz"
- else:
- package_url = m.group(2) + "/" + package_filename + ".tar.gz"
- parsed_url = urlparse(package_url)
- if parsed_url.hostname == "github.com":
- add_github_dep("manylinux dependency " + package_name, parsed_url)
- else:
- registration = {
- "Component": {
- "Type": "other",
- "other": {
- "Name": package_name.lower(),
- "Version": package_filename.split("-")[-1],
- "DownloadUrl": package_url,
- },
- "comments": "manylinux dependency",
- }
- }
- registrations.append(registration)
- package_name = None
- package_filename = None
- package_url = None
-
-
def normalize_path_separators(path):
return path.replace(os.path.sep, "/")
diff --git a/cgmanifests/generated/cgmanifest.json b/cgmanifests/generated/cgmanifest.json
index 6f1ca84e1a304..08ca90d7c3b7f 100644
--- a/cgmanifests/generated/cgmanifest.json
+++ b/cgmanifests/generated/cgmanifest.json
@@ -2,112 +2,6 @@
"$schema": "https://json.schemastore.org/component-detection-manifest.json",
"Version": 1,
"Registrations": [
- {
- "Component": {
- "Type": "other",
- "other": {
- "Name": "autoconf",
- "Version": "2.71",
- "DownloadUrl": "http://ftp.gnu.org/gnu/autoconf/autoconf-2.71.tar.gz"
- },
- "comments": "manylinux dependency"
- }
- },
- {
- "Component": {
- "Type": "other",
- "other": {
- "Name": "automake",
- "Version": "1.16.5",
- "DownloadUrl": "http://ftp.gnu.org/gnu/automake/automake-1.16.5.tar.gz"
- },
- "comments": "manylinux dependency"
- }
- },
- {
- "Component": {
- "Type": "other",
- "other": {
- "Name": "libtool",
- "Version": "2.4.7",
- "DownloadUrl": "http://ftp.gnu.org/gnu/libtool/libtool-2.4.7.tar.gz"
- },
- "comments": "manylinux dependency"
- }
- },
- {
- "Component": {
- "Type": "other",
- "other": {
- "Name": "git",
- "Version": "2.36.2",
- "DownloadUrl": "https://www.kernel.org/pub/software/scm/git/git-2.36.2.tar.gz"
- },
- "comments": "manylinux dependency"
- }
- },
- {
- "Component": {
- "Type": "other",
- "other": {
- "Name": "sqlite_autoconf",
- "Version": "3390200",
- "DownloadUrl": "https://www.sqlite.org/2022/sqlite-autoconf-3390200.tar.gz"
- },
- "comments": "manylinux dependency"
- }
- },
- {
- "Component": {
- "Type": "other",
- "other": {
- "Name": "openssl",
- "Version": "1.1.1q",
- "DownloadUrl": "https://www.openssl.org/source/openssl-1.1.1q.tar.gz"
- },
- "comments": "manylinux dependency"
- }
- },
- {
- "component": {
- "type": "git",
- "git": {
- "commitHash": "50cf2b6dd4fdf04309445f2eec8de7051d953abf",
- "repositoryUrl": "https://github.com/besser82/libxcrypt.git"
- },
- "comments": "manylinux dependency LIBXCRYPT"
- }
- },
- {
- "component": {
- "type": "git",
- "git": {
- "commitHash": "a896e3d066448b3530dbcaa48869fafefd738f57",
- "repositoryUrl": "https://github.com/emscripten-core/emsdk.git"
- },
- "comments": "git submodule at cmake/external/emsdk"
- }
- },
- {
- "component": {
- "type": "git",
- "git": {
- "commitHash": "7a2ed51a6b682a83e345ff49fc4cfd7ca47550db",
- "repositoryUrl": "https://github.com/google/libprotobuf-mutator.git"
- },
- "comments": "git submodule at cmake/external/libprotobuf-mutator"
- }
- },
- {
- "component": {
- "type": "git",
- "git": {
- "commitHash": "e2525550194ce3d8a2c4a3af451c9d9b3ae6650e",
- "repositoryUrl": "https://github.com/onnx/onnx.git"
- },
- "comments": "git submodule at cmake/external/onnx"
- }
- },
{
"component": {
"type": "git",
@@ -268,6 +162,16 @@
"comments": "mp11"
}
},
+ {
+ "component": {
+ "type": "git",
+ "git": {
+ "commitHash": "fdefbe85ed9c362b95b9b401cd19db068a76141f",
+ "repositoryUrl": "https://github.com/onnx/onnx.git"
+ },
+ "comments": "onnx"
+ }
+ },
{
"component": {
"type": "git",
diff --git a/cmake/deps.txt b/cmake/deps.txt
index 279b5ca649dba..7cf49f02333a4 100644
--- a/cmake/deps.txt
+++ b/cmake/deps.txt
@@ -24,7 +24,7 @@ microsoft_gsl;https://github.com/microsoft/GSL/archive/refs/tags/v4.0.0.zip;cf36
microsoft_wil;https://github.com/microsoft/wil/archive/refs/tags/v1.0.230629.1.zip;e4a542a323c070376f7c2d1973d0f7ddbc1d2fa5
mimalloc;https://github.com/microsoft/mimalloc/archive/refs/tags/v2.1.1.zip;d5ee7d34223d0567892db5179849939c8769dc41
mp11;https://github.com/boostorg/mp11/archive/refs/tags/boost-1.82.0.zip;9bc9e01dffb64d9e0773b2e44d2f22c51aace063
-onnx;https://github.com/onnx/onnx/archive/e2525550194ce3d8a2c4a3af451c9d9b3ae6650e.zip;782f23d788185887f520a90535513e244218e928
+onnx;https://github.com/onnx/onnx/archive/14303de049144035dfd94ace5f7a3b44773b1aad.zip;250eab9690392b248d75b56e605fb49eca373442
#use the commit of supporting all the plugins and TRT 8.6-GA (https://github.com/onnx/onnx-tensorrt/commit/0462dc31ae78f48744b6141ae376df1f96d3f459)
onnx_tensorrt;https://github.com/onnx/onnx-tensorrt/archive/0462dc31ae78f48744b6141ae376df1f96d3f459.zip;5ff086361956cceb81ed17453a1fd8db2aa4328d
protobuf;https://github.com/protocolbuffers/protobuf/archive/refs/tags/v21.12.zip;7cf2733949036c7d52fda017badcab093fe73bfa
@@ -44,4 +44,4 @@ tensorboard;https://github.com/tensorflow/tensorboard/archive/373eb09e4c5d2b3cc2
cutlass;https://github.com/NVIDIA/cutlass/archive/refs/tags/v3.0.0.zip;0f95b3c1fc1bd1175c4a90b2c9e39074d1bccefd
utf8_range;https://github.com/protocolbuffers/utf8_range/archive/72c943dea2b9240cd09efde15191e144bc7c7d38.zip;9925739c9debc0efa2adcb194d371a35b6a03156
extensions;https://github.com/microsoft/onnxruntime-extensions/archive/94142d8391c9791ec71c38336436319a2d4ac7a0.zip;4365ac5140338b4cb75a39944a4be276e3829b3c
-composable_kernel;https://github.com/ROCmSoftwarePlatform/composable_kernel/archive/d52ec01652b7d620386251db92455968d8d90bdc.zip;6b5ce8edf3625f8817086c194fbf94b664e1b0e0
\ No newline at end of file
+composable_kernel;https://github.com/ROCmSoftwarePlatform/composable_kernel/archive/d52ec01652b7d620386251db92455968d8d90bdc.zip;6b5ce8edf3625f8817086c194fbf94b664e1b0e0
diff --git a/cmake/deps_update_and_upload.py b/cmake/deps_update_and_upload.py
new file mode 100644
index 0000000000000..194d21435f0be
--- /dev/null
+++ b/cmake/deps_update_and_upload.py
@@ -0,0 +1,56 @@
+# in case deps.txt is updated, run this file to update and upload the dependencies so that CI can use them.
+# Before running the script, increase the version number found at:
+# https://aiinfra.visualstudio.com/Lotus/_artifacts/feed/Lotus/UPack/onnxruntime_build_dependencies/versions
+# Run without --do-upload once to verify downloading. Use --do-upload when you are ready to publish.
+# python cmake/deps_update_and_upload.py --root-path C:/temp/onnxruntime_deps --version 1.0.82 --do-upload
+# update version number in tools\ci_build\github\azure-pipelines\templates\download-deps.yml
+import re
+import subprocess
+import os
+import argparse
+import tempfile
+
+parser = argparse.ArgumentParser(description="Update dependencies and publish to Azure Artifacts")
+parser.add_argument(
+ "--root-path", type=str, default=tempfile.gettempdir(), help="Target root path for downloaded files"
+)
+parser.add_argument("--version", type=str, default="1.0.82", help="Package version to publish")
+parser.add_argument("--do-upload", action="store_true", help="Upload the package to Azure Artifacts")
+args = parser.parse_args()
+
+with open("cmake/deps.txt") as file:
+ text = file.read()
+
+lines = [line for line in text.split("\n") if not line.startswith("#") and ";" in line]
+
+root_path = args.root_path
+
+for line in lines:
+ url = re.sub("^[^;]+?;https://([^;]+?);.*", r"https://\1", line)
+ filename = re.sub("^[^;]+?;https://([^;]+?);.*", r"\1", line)
+ full_path = os.path.join(root_path, filename)
+ subprocess.run(["curl", "-sSL", "--create-dirs", "-o", full_path, url])
+
+package_name = "onnxruntime_build_dependencies"
+version = args.version
+
+# Check if the user is logged in to Azure
+result = subprocess.run("az account show", shell=True, capture_output=True, text=True)
+if "No subscriptions found" in result.stderr:
+ # Prompt the user to log in to Azure
+ print("You are not logged in to Azure. Please log in to continue.")
+ subprocess.run("az login", shell=True)
+
+# Publish the package to Azure Artifacts if --no-upload is not specified
+
+cmd = f'az artifacts universal publish --organization https://dev.azure.com/onnxruntime --feed onnxruntime --name {package_name} --version {version} --description "onnxruntime build time dependencies" --path {root_path}'
+if args.do_upload:
+ subprocess.run(cmd, shell=True)
+else:
+ print("would have run: " + cmd)
+
+cmd = f'az artifacts universal publish --organization https://dev.azure.com/aiinfra --feed Lotus --name {package_name} --version {version} --description "onnxruntime build time dependencies" --path {root_path}'
+if args.do_upload:
+ subprocess.run(cmd, shell=True)
+else:
+ print("would have run: " + cmd)
diff --git a/cmake/patches/onnx/onnx.patch b/cmake/patches/onnx/onnx.patch
index 155d153019f85..a2d7672a3d48d 100644
--- a/cmake/patches/onnx/onnx.patch
+++ b/cmake/patches/onnx/onnx.patch
@@ -64,16 +64,3 @@ index 0aab3e26..0f859267 100644
+#endif
+
#endif // ! ONNX_ONNX_PB_H
-diff --git a/onnx/checker.cc b/onnx/checker.cc
-index 8fdaf037..1beb1b88 100644
---- a/onnx/checker.cc
-+++ b/onnx/checker.cc
-@@ -190,7 +190,7 @@ void check_tensor(const TensorProto& tensor, const CheckerContext& ctx) {
- }
- std::string data_path = path_join(ctx.get_model_dir(), relative_path);
- // use stat64 to check whether the file exists
--#ifdef __APPLE__
-+#if defined(__APPLE__) || defined(__wasm__)
- struct stat buffer; // APPLE does not have stat64
- if (stat((data_path).c_str(), &buffer) != 0) {
- #else
diff --git a/docs/OperatorKernels.md b/docs/OperatorKernels.md
index 33c187a28b62e..14b6b339c11f3 100644
--- a/docs/OperatorKernels.md
+++ b/docs/OperatorKernels.md
@@ -67,7 +67,8 @@ Do not modify directly.*
|||[11, 12]|**T** = tensor(bfloat16), tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(string), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|
|||[4, 10]|**T** = tensor(bfloat16), tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(string), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|
|ConcatFromSequence|*in* input_sequence:**S**
*out* concat_result:**T**|11+|**S** = seq(tensor(bfloat16)), seq(tensor(bool)), seq(tensor(double)), seq(tensor(float)), seq(tensor(float16)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(int8)), seq(tensor(string)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(uint8))|
-|ConstantOfShape|*in* input:**T1**
*out* output:**T2**|9+|**T1** = tensor(int64)
**T2** = tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|
+|ConstantOfShape|*in* input:**T1**
*out* output:**T2**|20+|**T1** = tensor(int64)
**T2** = tensor(bfloat16), tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|
+|||[9, 19]|**T1** = tensor(int64)
**T2** = tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|
|Conv|*in* X:**T**
*in* W:**T**
*in* B:**T**
*out* Y:**T**|11+|**T** = tensor(float)|
|||[1, 10]|**T** = tensor(float)|
|ConvInteger|*in* x:**T1**
*in* w:**T2**
*in* x_zero_point:**T1**
*in* w_zero_point:**T2**
*out* y:**T3**|10+|**T1** = tensor(uint8)
**T2** = tensor(uint8)
**T3** = tensor(int32)|
@@ -78,7 +79,7 @@ Do not modify directly.*
|Crop|*in* input:**T**
*out* output:**T**|1+|**T** = tensor(float)|
|CumSum|*in* x:**T**
*in* axis:**T2**
*out* y:**T**|14+|**T** = tensor(double), tensor(float), tensor(int32), tensor(int64)
**T2** = tensor(int32), tensor(int64)|
|||[11, 13]|**T** = tensor(double), tensor(float), tensor(int32), tensor(int64)
**T2** = tensor(int32), tensor(int64)|
-|DFT|*in* input:**T1**
*in* dft_length:**T2**
*out* output:**T1**|17+|**T1** = tensor(double), tensor(float)
**T2** = tensor(int32), tensor(int64)|
+|DFT|*in* input:**T1**
*in* dft_length:**T2**
*in* axis:**tensor(int64)**
*out* output:**T1**
or
*in* input:**T1**
*in* dft_length:**T2**
*out* output:**T1**|17+|**T1** = tensor(double), tensor(float)
**T2** = tensor(int32), tensor(int64)|
|DepthToSpace|*in* input:**T**
*out* output:**T**|13+|**T** = tensor(double), tensor(float)|
|||[11, 12]|**T** = tensor(double), tensor(float)|
|||[1, 10]|**T** = tensor(double), tensor(float)|
@@ -935,7 +936,7 @@ Do not modify directly.*
|Crop|*in* input:**T**
*out* output:**T**|1+|**T** = tensor(float), tensor(float16)|
|CumSum|*in* x:**T**
*in* axis:**T2**
*out* y:**T**|14+|**T** = tensor(float), tensor(float16), tensor(int32), tensor(int64), tensor(uint32), tensor(uint64)|
|||11+|**T** = tensor(float), tensor(float16), tensor(int32), tensor(int64), tensor(uint32), tensor(uint64)|
-|DFT|*in* input:**T1**
*in* dft_length:**T2**
*out* output:**T1**|17+|**T1** = tensor(float), tensor(float16)
**T2** = tensor(int64)|
+|DFT|*in* input:**T1**
*in* dft_length:**T2**
*in* axis:**tensor(int64)**
*out* output:**T1**
or
*in* input:**T1**
*in* dft_length:**T2**
*out* output:**T1**|17+|**T1** = tensor(float), tensor(float16)
**T2** = tensor(int64)|
|DepthToSpace|*in* input:**T**
*out* output:**T**|13+|**T** = tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|
|||11+|**T** = tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|
|||1+|**T** = tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|
diff --git a/onnxruntime/core/optimizer/transpose_optimization/optimizer_api.h b/onnxruntime/core/optimizer/transpose_optimization/optimizer_api.h
index 40a03f24f7648..ec5c92f0c7b68 100644
--- a/onnxruntime/core/optimizer/transpose_optimization/optimizer_api.h
+++ b/onnxruntime/core/optimizer/transpose_optimization/optimizer_api.h
@@ -442,7 +442,7 @@ class GraphRef {
} // namespace api
constexpr int64_t kMinSupportedOpset = 7;
-constexpr int64_t kMaxSupportedOpset = 19;
+constexpr int64_t kMaxSupportedOpset = 20;
// enum of results that a CostCheckFn can return.
enum class CostCheckResult {
diff --git a/onnxruntime/core/providers/coreml/builders/impl/base_op_builder.h b/onnxruntime/core/providers/coreml/builders/impl/base_op_builder.h
index b142db86a7902..b4132d3b770ec 100644
--- a/onnxruntime/core/providers/coreml/builders/impl/base_op_builder.h
+++ b/onnxruntime/core/providers/coreml/builders/impl/base_op_builder.h
@@ -51,7 +51,7 @@ class BaseOpBuilder : public IOpBuilder {
virtual bool HasSupportedInputsImpl(const Node& node, const logging::Logger& logger) const;
virtual int GetMinSupportedOpSet(const Node& /* node */) const { return 1; }
- virtual int GetMaxSupportedOpSet(const Node& /* node */) const { return 19; }
+ virtual int GetMaxSupportedOpSet(const Node& /* node */) const { return 20; }
private:
bool HasSupportedOpSet(const Node& node, const logging::Logger& logger) const;
diff --git a/onnxruntime/core/providers/cpu/cpu_execution_provider.cc b/onnxruntime/core/providers/cpu/cpu_execution_provider.cc
index 18010960e11c8..3d03abf5b7ebc 100644
--- a/onnxruntime/core/providers/cpu/cpu_execution_provider.cc
+++ b/onnxruntime/core/providers/cpu/cpu_execution_provider.cc
@@ -273,7 +273,7 @@ class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDoma
// Opset 9
class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, 10, Compress);
-class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, ConstantOfShape);
+class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, 19, ConstantOfShape);
class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, 12, MeanVarianceNormalization);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, 12, float, Greater);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, 12, double, Greater);
@@ -958,6 +958,9 @@ class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain,
class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 19, Scan);
class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 19, Shape);
+// Opset 20
+class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 20, ConstantOfShape);
+
// !!PLEASE READ BELOW!! Following that, add new entries above this comment
/* *** IMPORTANT! ***
@@ -1332,7 +1335,7 @@ Status RegisterOnnxOperatorKernels(KernelRegistry& kernel_registry) {
// Opset 9
BuildKernelCreateInfo,
- BuildKernelCreateInfo,
+ BuildKernelCreateInfo,
BuildKernelCreateInfo,
BuildKernelCreateInfo,
BuildKernelCreateInfo,
BuildKernelCreateInfo,
+
+ // Opset 20
+ BuildKernelCreateInfo,
};
for (auto& function_table_entry : function_table) {
diff --git a/onnxruntime/core/providers/cpu/generator/constant_of_shape.cc b/onnxruntime/core/providers/cpu/generator/constant_of_shape.cc
index 920db5ed34dd1..a93da12ccf595 100644
--- a/onnxruntime/core/providers/cpu/generator/constant_of_shape.cc
+++ b/onnxruntime/core/providers/cpu/generator/constant_of_shape.cc
@@ -11,11 +11,16 @@ ORT_SPECIFY_OP_KERNEL_ARG_DEFAULT_TYPE_LIST_ALL_OPSETS(
kCpuExecutionProvider, kOnnxDomain, ConstantOfShape, Output, 0,
ConstantOfShapeDefaultOutputTypes);
+ORT_SPECIFY_OP_KERNEL_ARG_DEFAULT_TYPE_LIST(
+ kCpuExecutionProvider, kOnnxDomain, ConstantOfShape, 20, Output, 0,
+ ConstantOfShapeDefaultOutputTypesOpset20);
+
// pytorch converter uses ConstantOfShape with int64 to create Pad input
// https://github.com/pytorch/pytorch/blob/044b519a80459f6787f6723c1c091a18b153d184/torch/onnx/symbolic_opset11.py#L449
ORT_SPECIFY_OP_KERNEL_ARG_REQUIRED_TYPES_ALL_OPSETS(
kCpuExecutionProvider, kOnnxDomain, ConstantOfShape, Output, 0,
int64_t);
+
} // namespace op_kernel_type_control
namespace {
@@ -24,6 +29,10 @@ using EnabledOutputTypes =
ORT_OP_KERNEL_ARG_ENABLED_TYPE_LIST_ALL_OPSETS(
kCpuExecutionProvider, kOnnxDomain, ConstantOfShape, Output, 0);
+using EnabledOutputTypesOpset20 =
+ ORT_OP_KERNEL_ARG_ENABLED_TYPE_LIST(
+ kCpuExecutionProvider, kOnnxDomain, ConstantOfShape, 20, Output, 0);
+
class ConstantOfShape final : public ConstantOfShapeBase, public OpKernel {
public:
explicit ConstantOfShape(const OpKernelInfo& info) : ConstantOfShapeBase(info), OpKernel(info) {}
@@ -66,13 +75,22 @@ Status ConstantOfShape::Compute(OpKernelContext* ctx) const {
} // namespace
-ONNX_CPU_OPERATOR_KERNEL(
+ONNX_CPU_OPERATOR_VERSIONED_KERNEL(
ConstantOfShape,
9,
+ 19,
KernelDefBuilder()
.TypeConstraint("T1", DataTypeImpl::GetTensorType())
.TypeConstraint("T2",
BuildKernelDefConstraintsFromTypeList()),
ConstantOfShape);
+ONNX_CPU_OPERATOR_KERNEL(
+ ConstantOfShape,
+ 20,
+ KernelDefBuilder()
+ .TypeConstraint("T1", DataTypeImpl::GetTensorType())
+ .TypeConstraint("T2",
+ BuildKernelDefConstraintsFromTypeList()),
+ ConstantOfShape);
} // namespace onnxruntime
diff --git a/onnxruntime/core/providers/cpu/generator/constant_of_shape_base.h b/onnxruntime/core/providers/cpu/generator/constant_of_shape_base.h
index d96ff06e3d6d8..9aa73c714daea 100644
--- a/onnxruntime/core/providers/cpu/generator/constant_of_shape_base.h
+++ b/onnxruntime/core/providers/cpu/generator/constant_of_shape_base.h
@@ -23,6 +23,18 @@ using ConstantOfShapeDefaultOutputTypes =
uint8_t, uint16_t, uint32_t, uint64_t,
bool>;
+using ConstantOfShapeDefaultOutputTypesOpset20 =
+ TypeList<
+ BFloat16,
+ MLFloat16,
+ float, double,
+#if !defined(DISABLE_FLOAT8_TYPES)
+ Float8E4M3FN, Float8E4M3FNUZ, Float8E5M2, Float8E5M2FNUZ,
+#endif
+ int8_t, int16_t, int32_t, int64_t,
+ uint8_t, uint16_t, uint32_t, uint64_t,
+ bool>;
+
template
class ConstantOfShapeBase {
protected:
diff --git a/onnxruntime/core/providers/webnn/builders/impl/base_op_builder.h b/onnxruntime/core/providers/webnn/builders/impl/base_op_builder.h
index 301927d9c658f..01e4a3c60281f 100644
--- a/onnxruntime/core/providers/webnn/builders/impl/base_op_builder.h
+++ b/onnxruntime/core/providers/webnn/builders/impl/base_op_builder.h
@@ -46,7 +46,7 @@ class BaseOpBuilder : public IOpBuilder {
// We still set the mininal supported opset to 1 as we couldn't
// get the model opset version at this stage.
virtual int GetMinSupportedOpSet(const Node& /* node */) const { return 1; }
- virtual int GetMaxSupportedOpSet(const Node& /* node */) const { return 19; }
+ virtual int GetMaxSupportedOpSet(const Node& /* node */) const { return 20; }
private:
bool HasSupportedOpSet(const Node& node, const logging::Logger& logger) const;
diff --git a/onnxruntime/test/framework/function_test.cc b/onnxruntime/test/framework/function_test.cc
index e126979532644..6e745776ab6b0 100644
--- a/onnxruntime/test/framework/function_test.cc
+++ b/onnxruntime/test/framework/function_test.cc
@@ -6,6 +6,7 @@
#include "onnx/defs/parser.h"
#include "core/common/span_utils.h"
+#include "core/framework/float8.h"
#include "core/graph/model.h"
#include "core/providers/cpu/cpu_execution_provider.h"
#include "core/session/inference_session.h"
@@ -69,7 +70,9 @@ static void Check(const char* source,
float threshold = 0.001f;
for (size_t i = 0; i < size; ++i) {
- ASSERT_NEAR(data[i], output_values[i], threshold) << "at position i:" << i;
+ if (!std::isnan(data[i]) && !std::isnan(output_values[i])) {
+ ASSERT_NEAR(data[i], output_values[i], threshold) << "at position i:" << i;
+ }
}
}
@@ -389,25 +392,13 @@ TEST(FunctionTest, AttrSaturateNan) {
>
agraph (float[N] x) => (float[N] y)
{
- y0 = local.myfun (x)
- y1 = local.myfun (x)
- y = Add (y0, y1)
- }
-
- <
- opset_import: [ "" : 19 ],
- domain: "local"
- >
- myfun (x) => (y) {
- x2 = Constant ()
- x2_ = Cast(x2)
- x3 = CastLike(x2, x2_)
- x3_ = Cast(x3)
- y = Add (x, x3_)
+ x_E4M3FNUZ = Cast(x)
+ x_E4M3FNUZ_2 = CastLike(x, x_E4M3FNUZ) # NaN when OOR
+ y = Cast(x_E4M3FNUZ_2)
}
)";
- Check(code, "x", {1.0, 2.0, 1e6}, "y", {243.0, 245.0, 2000241}); // std::numeric_limits::quiet_NaN()});
+ Check(code, "x", {1.0, 2.0, 1e6}, "y", {1.0, 2.0, std::numeric_limits::quiet_NaN()});
}
#endif
diff --git a/onnxruntime/test/framework/inference_session_test.cc b/onnxruntime/test/framework/inference_session_test.cc
index 077c6ff58e2da..2298e4afa6de0 100644
--- a/onnxruntime/test/framework/inference_session_test.cc
+++ b/onnxruntime/test/framework/inference_session_test.cc
@@ -2056,7 +2056,7 @@ TEST(InferenceSessionTests, TestStrictShapeInference) {
ASSERT_STATUS_OK(session_options.config_options.AddConfigEntry(kOrtSessionOptionsConfigStrictShapeTypeInference, "1"));
tester.Run(session_options, OpTester::ExpectResult::kExpectFailure,
- "Mismatch between number of source and target dimensions. Source=1 Target=2",
+ "Mismatch between number of inferred and declared dimensions. inferred=1 declared=2",
excluded_provider_types);
}
diff --git a/onnxruntime/test/optimizer/graph_transform_test.cc b/onnxruntime/test/optimizer/graph_transform_test.cc
index dce1f2d40e8b9..f6482fb865c9b 100755
--- a/onnxruntime/test/optimizer/graph_transform_test.cc
+++ b/onnxruntime/test/optimizer/graph_transform_test.cc
@@ -6280,7 +6280,7 @@ TEST_F(GraphTransformationTests, ConstantSharing_ShouldNotShareForGraphOutput) {
TEST_F(GraphTransformationTests, GatherToSplitFusion) {
auto build_test_case = [&](ModelTestBuilder& builder) {
auto* data_arg = builder.MakeInput({{54}});
- auto* shape_arg = builder.MakeInput({{1}});
+ auto* shape_arg = builder.MakeInput({{4}});
auto* reshape_out = builder.MakeIntermediate({{2, 3, 3, 3}});
auto* gather_index_1 = builder.MakeInitializer({}, {static_cast(0)});
auto* gather_index_2 = builder.MakeInitializer({}, {static_cast(1)});
@@ -6393,7 +6393,7 @@ TEST_F(GraphTransformationTests, GatherToSplitFusion) {
TEST_F(GraphTransformationTests, GatherToSplitFusion_NoSqueeze) {
auto build_test_case = [&](ModelTestBuilder& builder) {
auto* data_arg = builder.MakeInput({{54}});
- auto* shape_arg = builder.MakeInput({{1}});
+ auto* shape_arg = builder.MakeInput({{4}});
auto* reshape_out = builder.MakeIntermediate({{2, 3, 3, 3}});
auto* gather_index_1 = builder.MakeInitializer({1}, {static_cast(0)});
auto* gather_index_2 = builder.MakeInitializer({1}, {static_cast(1)});
diff --git a/onnxruntime/test/providers/cpu/controlflow/scan_test.cc b/onnxruntime/test/providers/cpu/controlflow/scan_test.cc
index 6d8e05b93510a..8008fd129c19b 100644
--- a/onnxruntime/test/providers/cpu/controlflow/scan_test.cc
+++ b/onnxruntime/test/providers/cpu/controlflow/scan_test.cc
@@ -578,7 +578,7 @@ TEST(Scan9, DISABLED_BadShape) {
ShortSequenceOneInBatchOneLoopStateVar(
options,
"Node:concat Output:concat_out_1 [ShapeInferenceError] Mismatch between number of source and target dimensions. "
- "Source=2 Target=1");
+ "inferred=2 declared=1");
}
TEST(Scan8, ShortSequenceTwoInBatchOneLoopStateVar) {
diff --git a/onnxruntime/test/providers/cpu/nn/conv_fp16_test.cc b/onnxruntime/test/providers/cpu/nn/conv_fp16_test.cc
index c8343483b80a6..cb5fc8095982c 100644
--- a/onnxruntime/test/providers/cpu/nn/conv_fp16_test.cc
+++ b/onnxruntime/test/providers/cpu/nn/conv_fp16_test.cc
@@ -109,7 +109,7 @@ TEST(ConvFp16Test, Conv1D_Invalid_Input_Shape) {
TestConvFp16Op(attrs, {X, dummy_vals}, {X_shape, dummy_shape}, dummy_vals, dummy_shape, false,
OpTester::ExpectResult::kExpectFailure,
"Node:node1 Output:Y [ShapeInferenceError] Can't merge shape info. "
- "Both source and target dimension have values but they differ. Source=0 Target=2 Dimension=2",
+ "Both inferred and declared dimension have values but they differ. Inferred=0 Declared=2 Dimension=2",
-1); // use latest opset for shape inferencing errors
}
@@ -132,7 +132,7 @@ TEST(ConvFp16Test, Conv2D_Invalid_Input_Shape) {
TestConvFp16Op(attrs, {X, dummy_vals}, {X_shape, dummy_shape}, dummy_vals, dummy_shape, false,
OpTester::ExpectResult::kExpectFailure,
"Node:node1 Output:Y [ShapeInferenceError] Can't merge shape info. "
- "Both source and target dimension have values but they differ. Source=1 Target=2 Dimension=0",
+ "Both inferred and declared dimension have values but they differ. Inferred=1 Declared=2 Dimension=0",
-1); // use latest opset for shape inferencing errors
}
diff --git a/onnxruntime/test/providers/cpu/nn/conv_op_test.cc b/onnxruntime/test/providers/cpu/nn/conv_op_test.cc
index e01fd8c78e55f..5103aed50b152 100644
--- a/onnxruntime/test/providers/cpu/nn/conv_op_test.cc
+++ b/onnxruntime/test/providers/cpu/nn/conv_op_test.cc
@@ -249,7 +249,7 @@ TEST(ConvTest, Conv1D_Invalid_Input_Shape) {
TestConvOp(attrs, {X, dummy_vals}, {X_shape, dummy_shape}, dummy_vals, dummy_shape, false,
OpTester::ExpectResult::kExpectFailure,
"Node:node1 Output:Y [ShapeInferenceError] Can't merge shape info. "
- "Both source and target dimension have values but they differ. Source=0 Target=2 Dimension=2",
+ "Both inferred and declared dimension have values but they differ. Inferred=0 Declared=2 Dimension=2",
-1); // use latest opset for shape inferencing errors
}
@@ -272,7 +272,7 @@ TEST(ConvTest, Conv2D_Invalid_Input_Shape) {
TestConvOp(attrs, {X, dummy_vals}, {X_shape, dummy_shape}, dummy_vals, dummy_shape, false,
OpTester::ExpectResult::kExpectFailure,
"Node:node1 Output:Y [ShapeInferenceError] Can't merge shape info. "
- "Both source and target dimension have values but they differ. Source=1 Target=2 Dimension=0",
+ "Both inferred and declared dimension have values but they differ. Inferred=1 Declared=2 Dimension=0",
-1); // use latest opset for shape inferencing errors
}
diff --git a/onnxruntime/test/providers/cpu/nn/tfidfvectorizer_test.cc b/onnxruntime/test/providers/cpu/nn/tfidfvectorizer_test.cc
index a22253dbb74d4..379b892f39135 100644
--- a/onnxruntime/test/providers/cpu/nn/tfidfvectorizer_test.cc
+++ b/onnxruntime/test/providers/cpu/nn/tfidfvectorizer_test.cc
@@ -91,7 +91,7 @@ TEST(TfIdfVectorizerTest, Int32_TF_onlyBigrams_Skip0_Empty_Dim1Fail) {
test.Run(OpTester::ExpectResult::kExpectFailure,
"Can't merge shape info. "
- "Both source and target dimension have values but they differ. Source=7 Target=0 Dimension=0");
+ "Both inferred and declared dimension have values but they differ. Inferred=7 Declared=0 Dimension=0");
}
TEST(TfIdfVectorizerTest, Int32_TF_onlyBigrams_Skip0_Empty_Dim1Success) {
@@ -136,7 +136,7 @@ TEST(TfIdfVectorizerTest, Int32_TF_onlyBigrams_Skip0_Empty_Dim2) {
test.AddOutput("Y", out_dims, output);
test.Run(OpTester::ExpectResult::kExpectFailure,
- "Mismatch between number of source and target dimensions. Source=2 Target=1");
+ "Mismatch between number of inferred and declared dimensions. inferred=2 declared=1");
}
TEST(TfIdfVectorizerTest, Int32_TF_onlyBigrams_Skip01_Empty_Dim2) {
@@ -159,7 +159,7 @@ TEST(TfIdfVectorizerTest, Int32_TF_onlyBigrams_Skip01_Empty_Dim2) {
test.AddOutput("Y", out_dims, output);
test.Run(OpTester::ExpectResult::kExpectFailure,
- "Mismatch between number of source and target dimensions. Source=2 Target=1");
+ "Mismatch between number of inferred and declared dimensions. inferred=2 declared=1");
}
TEST(TfIdfVectorizerTest, Int32_TF_onlyBigrams_Skip0_Empty_Dim2N) {
diff --git a/onnxruntime/test/providers/cpu/tensor/transpose_test.cc b/onnxruntime/test/providers/cpu/tensor/transpose_test.cc
index c334e0c5ddcb6..0e7ac5ed2b2f0 100644
--- a/onnxruntime/test/providers/cpu/tensor/transpose_test.cc
+++ b/onnxruntime/test/providers/cpu/tensor/transpose_test.cc
@@ -37,7 +37,7 @@ TEST(TransposeOpTest, PermRankDoesNotMatchTensorRank) {
// This failure comes from shape inference, because in this case it knows the input dims.
// But in the real world, the model can supply different input dims at runtime.
test.Run(OpTester::ExpectResult::kExpectFailure,
- "Node:node1 Output:Y [ShapeInferenceError] Mismatch between number of source and target dimensions. Source=3 Target=4");
+ "Node:node1 Output:Y [ShapeInferenceError] Mismatch between number of inferred and declared dimensions. inferred=3 declared=4");
}
// Some of the tests can't run on TensorrtExecutionProvider because of errors.
diff --git a/onnxruntime/test/python/quantization/op_test_utils.py b/onnxruntime/test/python/quantization/op_test_utils.py
index e94ac5c961583..f26b6297cdbda 100644
--- a/onnxruntime/test/python/quantization/op_test_utils.py
+++ b/onnxruntime/test/python/quantization/op_test_utils.py
@@ -279,6 +279,9 @@ def check_model_correctness(
ops_set = set(node.op_type for node in model_onnx.graph.node)
check_reference_evaluator = not (ops_set & {"EmbedLayerNormalization", "Conv", "Attention", "Transpose"})
+ with open(model_path_to_check, "rb") as f:
+ model_check = onnx.load(f)
+
if check_reference_evaluator and onnx_recent_enough:
ref = ReferenceEvaluator(model_path_origin)
ref_origin_results = ref.run(None, inputs)
@@ -289,7 +292,7 @@ def check_model_correctness(
output,
rtol=rtol,
atol=atol,
- err_msg=f"Model {model_path_to_check!r} failed for providers={providers!r}.",
+ err_msg=f"Model {model_path_origin!r} failed for providers={providers!r}.",
)
# Verifies the shapes in the quantized model.
@@ -301,40 +304,52 @@ def check_model_correctness(
expected_shapes[init.name] = tuple(init.dims)
checked = 0
f8_quantization = False
- with open(model_path_to_check, "rb") as f:
- model_check = onnx.load(f)
- for init in model_check.graph.initializer:
- if init.name.endswith("_quantized"):
- name = init.name.replace("_quantized", "")
- expected = expected_shapes[name]
- shape = tuple(init.dims)
- if not dynamic and expected != shape:
- raise AssertionError(
- f"Shape mismatch for initializer {init.name!r} from {init.name!r}, "
- f"shape={shape} != {expected} (expected)."
- )
- else:
- checked += 1
- if "zero_point" in init.name:
- dt = init.data_type
- f8_quantization = f8_quantization or dt in (
- TensorProto.FLOAT8E4M3FN,
- TensorProto.FLOAT8E4M3FNUZ,
- TensorProto.FLOAT8E5M2,
- TensorProto.FLOAT8E5M2FNUZ,
+ for init in model_check.graph.initializer:
+ if init.name.endswith("_quantized"):
+ name = init.name.replace("_quantized", "")
+ expected = expected_shapes[name]
+ shape = tuple(init.dims)
+ if not dynamic and expected != shape:
+ raise AssertionError(
+ f"Shape mismatch for initializer {init.name!r} from {init.name!r}, "
+ f"shape={shape} != {expected} (expected)."
)
- if checked == 0:
- raise AssertionError(
- f"Unable to check expected shape, expected_shapes={expected_shapes}, "
- f"names={[init.name for init in model_check.graph.initializer]}."
+ else:
+ checked += 1
+ if "zero_point" in init.name:
+ dt = init.data_type
+ f8_quantization = f8_quantization or dt in (
+ TensorProto.FLOAT8E4M3FN,
+ TensorProto.FLOAT8E4M3FNUZ,
+ TensorProto.FLOAT8E5M2,
+ TensorProto.FLOAT8E5M2FNUZ,
)
+ if checked == 0:
+ raise AssertionError(
+ f"Unable to check expected shape, expected_shapes={expected_shapes}, "
+ f"names={[init.name for init in model_check.graph.initializer]}."
+ )
if f8_quantization:
check_sign_f8_quantization(model_path_origin, model_path_to_check)
# Verifies the expected outputs.
if check_reference_evaluator and onnx_recent_enough:
+ reference_new_ops = [QGemm]
+ has_missing_reference_ops = any(
+ node.domain not in ["", "ai.onnx"]
+ and not any(
+ node.domain == new_node.op_domain and node.op_type == new_node.__name__
+ for new_node in reference_new_ops
+ )
+ for node in model_check.graph.node
+ )
+ if has_missing_reference_ops:
+ # We need to skip the test if the model contains ops that are not supported.
+ testcase.skipTest(
+ f"Model {model_path_to_check!r} contains ops that are not supported by the reference evaluator."
+ )
# Needs pv.Version(onnx.__version__) >= pv.Version("1.16.0")
- ref = ReferenceEvaluator(model_path_to_check, new_ops=[QGemm])
+ ref = ReferenceEvaluator(model_check, new_ops=reference_new_ops)
target_results = ref.run(None, inputs)
testcase.assertEqual(len(origin_results), len(target_results), "result count are different")
for idx, ref_output in enumerate(origin_results):
diff --git a/onnxruntime/test/testdata/onnx_backend_test_series_filters.jsonc b/onnxruntime/test/testdata/onnx_backend_test_series_filters.jsonc
index 71a10f646a7c6..67391cb1a8219 100644
--- a/onnxruntime/test/testdata/onnx_backend_test_series_filters.jsonc
+++ b/onnxruntime/test/testdata/onnx_backend_test_series_filters.jsonc
@@ -233,7 +233,64 @@
"^test_resize_upsample_sizes_nearest_cuda",
"^test_resize_upsample_sizes_nearest_floor_align_corners_cuda",
"^test_resize_upsample_sizes_nearest_not_larger_cuda",
- "^test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric_cuda"
+ "^test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric_cuda",
+ // onnx 1.15 (opset 20) new and updated op tests
+ "^test_ai_onnx_ml_label_encoder_string_int",
+ "^test_ai_onnx_ml_label_encoder_string_int_no_default",
+ "^test_ai_onnx_ml_label_encoder_tensor_mapping",
+ "^test_ai_onnx_ml_label_encoder_tensor_value_only_mapping",
+ "^test_gridsample_aligncorners_true",
+ "^test_gridsample_bicubic_align_corners_0_additional_1",
+ "^test_gridsample_bicubic_align_corners_1_additional_1",
+ "^test_gridsample_bicubic",
+ "^test_gridsample_bilinear_align_corners_0_additional_1",
+ "^test_gridsample_bilinear_align_corners_1_additional_1",
+ "^test_gridsample_bilinear",
+ "^test_gridsample_border_padding",
+ "^test_gridsample",
+ "^test_gridsample_nearest_align_corners_0_additional_1",
+ "^test_gridsample_nearest_align_corners_1_additional_1",
+ "^test_gridsample_nearest",
+ "^test_gridsample_reflection_padding",
+ "^test_gridsample_volumetric_bilinear_align_corners_0",
+ "^test_gridsample_volumetric_bilinear_align_corners_1",
+ "^test_gridsample_volumetric_nearest_align_corners_0",
+ "^test_gridsample_volumetric_nearest_align_corners_1",
+ "^test_gridsample_zeros_padding",
+ "^test_image_decoder_decode_bmp_rgb",
+ "^test_image_decoder_decode_jpeg2k_rgb",
+ "^test_image_decoder_decode_jpeg_bgr",
+ "^test_image_decoder_decode_jpeg_grayscale",
+ "^test_image_decoder_decode_jpeg_rgb",
+ "^test_image_decoder_decode_png_rgb",
+ "^test_image_decoder_decode_pnm_rgb",
+ "^test_image_decoder_decode_tiff_rgb",
+ "^test_image_decoder_decode_webp_rgb",
+ "^test_regex_full_match_basic",
+ "^test_regex_full_match_email_domain",
+ "^test_regex_full_match_empty",
+ "^test_string_concat_broadcasting",
+ "^test_string_concat",
+ "^test_string_concat_empty_string",
+ "^test_string_concat_utf8",
+ "^test_string_concat_zero_dimensional",
+ "^test_string_split_basic",
+ "^test_string_split_consecutive_delimiters",
+ "^test_string_split_empty_string_delimiter",
+ "^test_string_split_empty_tensor",
+ "^test_string_split_maxsplit",
+ "^test_string_split_no_delimiter",
+ "^test_dft_axis",
+ "^test_dft",
+ "^test_dft_inverse",
+ "^test_isinf",
+ "^test_isinf_float16",
+ "^test_isinf_negative",
+ "^test_isinf_positive",
+ "^test_isnan",
+ "^test_isnan_float16",
+ "^test_reduce_max_bool_inputs",
+ "^test_reduce_min_bool_inputs"
],
"current_failing_tests_x86": [
"^test_vgg19",
@@ -316,7 +373,24 @@
"^test_layer_normalization_4d_axis_negative_1_expanded_ver18_cpu",
"^test_layer_normalization_4d_axis_negative_2_expanded_ver18_cpu",
"^test_layer_normalization_4d_axis_negative_3_expanded_ver18_cpu",
- "^test_layer_normalization_default_axis_expanded_ver18_cpu"
+ "^test_layer_normalization_default_axis_expanded_ver18_cpu",
+ // onnx 1.15 (opset 20) new and updated op tests (test_affine_grid_???_expanded utilizes ConstantOfShape so it needs to be skipped as well)
+ // https://dev.azure.com/onnxruntime/onnxruntime/_build/results?buildId=1139541&view=logs&j=249e9d58-0012-5814-27cf-6a201adbd9cf&t=bb33e81f-0527-50e0-0fd2-e94f509f0a82
+ // only supported with cpu provider
+ "^test_affine_grid_2d",
+ "^test_affine_grid_2d_align_corners",
+ "^test_affine_grid_2d_align_corners_expanded",
+ "^test_affine_grid_2d_expanded",
+ "^test_affine_grid_3d",
+ "^test_affine_grid_3d_align_corners",
+ "^test_affine_grid_3d_align_corners_expanded",
+ "^test_affine_grid_3d_expanded",
+ "^test_constantofshape_float_ones",
+ "^test_constantofshape_int_shape_zero",
+ "^test_constantofshape_int_zeros",
+ // https://dev.azure.com/onnxruntime/onnxruntime/_build/results?buildId=1141563&view=logs&j=a018b46d-e41a-509d-6581-c95fdaa42fcd&t=d61c1d37-f101-5d28-982f-e5931b720302
+ "^test_gelu_tanh_2_cpu",
+ "^test_gelu_tanh_2_expanded_cpu"
],
"current_failing_tests_NNAPI": [
"^test_maxpool_2d_uint8",
@@ -569,7 +643,22 @@
"^test_sequence_map_identity_1_sequence_cpu",
"^test_sequence_map_identity_1_sequence_expanded_cpu",
"^test_sequence_map_identity_2_sequences_cpu",
- "^test_sequence_map_identity_2_sequences_expanded_cpu"
+ "^test_sequence_map_identity_2_sequences_expanded_cpu",
+ // onnx 1.15 (opset 20) new and updated op tests (test_affine_grid_???_expanded utilizes ConstantOfShape so it needs to be skipped as well)
+ // https://dev.azure.com/onnxruntime/onnxruntime/_build/results?buildId=1139542&view=logs&j=3032dfba-5baf-5872-0871-2e69cb7f4b6a&t=f0d05deb-fc26-5aaf-e43e-7db2764c07da
+ // only supported with cpu provider
+ "^test_affine_grid_2d",
+ "^test_affine_grid_2d_align_corners",
+ "^test_affine_grid_2d_align_corners_expanded",
+ "^test_affine_grid_2d_expanded",
+ "^test_affine_grid_3d",
+ "^test_affine_grid_3d_align_corners",
+ "^test_affine_grid_3d_align_corners_expanded",
+ "^test_affine_grid_3d_expanded",
+ "^test_constantofshape_float_ones",
+ "^test_constantofshape_int_shape_zero",
+ "^test_constantofshape_int_zeros"
+
],
// ORT first supported opset 7, so models with nodes that require versions prior to opset 7 are not supported
"tests_with_pre_opset7_dependencies": [
diff --git a/onnxruntime/test/testdata/onnx_backend_test_series_overrides.jsonc b/onnxruntime/test/testdata/onnx_backend_test_series_overrides.jsonc
index caeea0a758ad9..07385ac9ade05 100644
--- a/onnxruntime/test/testdata/onnx_backend_test_series_overrides.jsonc
+++ b/onnxruntime/test/testdata/onnx_backend_test_series_overrides.jsonc
@@ -7,6 +7,9 @@
"test_dft": 1e-3,
"test_dft_axis": 1e-3,
"test_dft_inverse": 1e-3,
+ "test_dft_opset19": 1e-3,
+ "test_dft_axis_opset19": 1e-3,
+ "test_dft_inverse_opset19": 1e-3,
"test_stft": 1e-4,
"test_stft_with_window": 1e-4
},
diff --git a/orttraining/orttraining/core/graph/training_op_defs.cc b/orttraining/orttraining/core/graph/training_op_defs.cc
index 91b1df7b7cf2d..86d3cdee9ba98 100644
--- a/orttraining/orttraining/core/graph/training_op_defs.cc
+++ b/orttraining/orttraining/core/graph/training_op_defs.cc
@@ -4171,7 +4171,7 @@ Return true if all elements are true and false otherwise.
"T", OpSchema::Variadic,
/*is_homogeneous*/ false,
/*min_arity*/ 1)
- .TypeConstraint("T", OpSchema::all_tensor_types_with_bfloat(),
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(),
"Allow inputs and outputs to be any kind of tensor.");
#endif // ENABLE_TRITON
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 73e04e6b37c0b..1b5ca65cf8037 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -17,4 +17,4 @@ scikit-learn
scipy
sympy
wheel
-setuptools>=41.4.0
+setuptools>=61.0.0
diff --git a/requirements-training.txt b/requirements-training.txt
index 4b1be6cef9b7c..dbfd7305d1bec 100644
--- a/requirements-training.txt
+++ b/requirements-training.txt
@@ -6,4 +6,4 @@ onnx
packaging
protobuf
sympy
-setuptools>=41.4.0
+setuptools>=61.0.0
diff --git a/tools/ci_build/github/azure-pipelines/templates/download-deps.yml b/tools/ci_build/github/azure-pipelines/templates/download-deps.yml
index f17bc8de5739b..cf73691a5eecc 100644
--- a/tools/ci_build/github/azure-pipelines/templates/download-deps.yml
+++ b/tools/ci_build/github/azure-pipelines/templates/download-deps.yml
@@ -11,7 +11,7 @@ steps:
packageType: upack
feed: '/7424c8e4-5c62-490e-95c4-79446f31017c'
definition: '517c4f6f-5437-4392-a70d-4f15ec5be2f0'
- version: 1.0.81
+ version: 1.0.90
downloadPath: $(Build.BinariesDirectory)/deps
# The private ADO project
@@ -22,7 +22,7 @@ steps:
packageType: upack
feed: '/4c7631f5-24c0-4307-8822-1aa8f180c325'
definition: 'fd9dd5ad-b73e-4678-890e-edcf680dbc1a'
- version: 1.0.81
+ version: 1.0.90
downloadPath: $(Build.BinariesDirectory)/deps
# You can add more ADO accounts at here.
diff --git a/tools/ci_build/github/azure-pipelines/templates/jobs/win-ci-prebuild-steps.yml b/tools/ci_build/github/azure-pipelines/templates/jobs/win-ci-prebuild-steps.yml
index 8868e671a5fa5..1ba907fe30df6 100644
--- a/tools/ci_build/github/azure-pipelines/templates/jobs/win-ci-prebuild-steps.yml
+++ b/tools/ci_build/github/azure-pipelines/templates/jobs/win-ci-prebuild-steps.yml
@@ -31,7 +31,7 @@ steps:
architecture: ${{parameters.BuildArch}}
- script: |
- python -m pip install -q setuptools wheel numpy flatbuffers
+ python -m pip install --upgrade "setuptools>=61.0.0" wheel numpy flatbuffers
workingDirectory: '$(Build.BinariesDirectory)'
displayName: 'Install python modules'
diff --git a/tools/ci_build/github/linux/docker/scripts/manylinux/requirements.txt b/tools/ci_build/github/linux/docker/scripts/manylinux/requirements.txt
index 6b8003c01c24d..9eef51862c326 100644
--- a/tools/ci_build/github/linux/docker/scripts/manylinux/requirements.txt
+++ b/tools/ci_build/github/linux/docker/scripts/manylinux/requirements.txt
@@ -4,7 +4,7 @@ mypy
pytest
setuptools>=41.4.0
wheel
-git+http://github.com/onnx/onnx.git@e2525550194ce3d8a2c4a3af451c9d9b3ae6650e#egg=onnx
+git+http://github.com/onnx/onnx.git@ac3e58759463ff3a3089e3cd64fddbfad0f6724d#egg=onnx
protobuf==3.20.2
sympy==1.12
flatbuffers