diff --git a/cmake/onnxruntime_common.cmake b/cmake/onnxruntime_common.cmake
index 71a397176f3a3..0799ab9a6c79e 100644
--- a/cmake/onnxruntime_common.cmake
+++ b/cmake/onnxruntime_common.cmake
@@ -67,3 +67,18 @@ if(WIN32)
# Add Code Analysis properties to enable C++ Core checks. Have to do it via a props file include.
set_target_properties(onnxruntime_common PROPERTIES VS_USER_PROPS ${PROJECT_SOURCE_DIR}/EnableVisualStudioCodeAnalysis.props)
endif()
+
+# check if we need to link against librt on Linux
+include(CheckLibraryExists)
+include(CheckFunctionExists)
+if ("${CMAKE_SYSTEM_NAME}" STREQUAL "Linux")
+ check_library_exists(rt clock_gettime "time.h" HAVE_CLOCK_GETTIME)
+
+ if (NOT HAVE_CLOCK_GETTIME)
+ set(CMAKE_EXTRA_INCLUDE_FILES time.h)
+ check_function_exists(clock_gettime HAVE_CLOCK_GETTIME)
+ set(CMAKE_EXTRA_INCLUDE_FILES)
+ else()
+ target_link_libraries(onnxruntime_common rt)
+ endif()
+endif()
diff --git a/cmake/onnxruntime_python.cmake b/cmake/onnxruntime_python.cmake
index 5a5ecc461d9ee..8ff4f6e802fd3 100644
--- a/cmake/onnxruntime_python.cmake
+++ b/cmake/onnxruntime_python.cmake
@@ -60,7 +60,7 @@ onnxruntime_add_include_to_target(onnxruntime_pybind11_state gsl)
if(APPLE)
set(ONNXRUNTIME_SO_LINK_FLAG "-Xlinker -exported_symbols_list ${ONNXRUNTIME_ROOT}/python/exported_symbols.lst")
elseif(UNIX)
- set(ONNXRUNTIME_SO_LINK_FLAG "-Xlinker --version-script=${ONNXRUNTIME_ROOT}/python/version_script.lds -Xlinker --no-undefined -Xlinker --gc-sections")
+ set(ONNXRUNTIME_SO_LINK_FLAG "-Xlinker --version-script=${ONNXRUNTIME_ROOT}/python/version_script.lds -Xlinker --gc-sections")
else()
set(ONNXRUNTIME_SO_LINK_FLAG "-DEF:${ONNXRUNTIME_ROOT}/python/pybind.def")
endif()
diff --git a/onnxruntime/python/_ld_preload.py b/onnxruntime/python/_ld_preload.py
new file mode 100644
index 0000000000000..a67f27f68da86
--- /dev/null
+++ b/onnxruntime/python/_ld_preload.py
@@ -0,0 +1,10 @@
+#-------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License.
+#--------------------------------------------------------------------------
+
+# This file can be modified by setup.py when building a manylinux2010 wheel
+# When modified, it will preload some libraries needed for the python C extension
+# Do not remove or move the following comment
+
+# LD_PRELOAD_BEGIN_MARK
diff --git a/onnxruntime/python/_pybind_state.py b/onnxruntime/python/_pybind_state.py
index 2feda322a331d..c48bb8bdd0778 100644
--- a/onnxruntime/python/_pybind_state.py
+++ b/onnxruntime/python/_pybind_state.py
@@ -5,6 +5,7 @@
import sys
import os
import warnings
+import onnxruntime.capi._ld_preload
try:
from onnxruntime.capi.onnxruntime_pybind11_state import * # noqa
diff --git a/rename_manylinux.sh b/rename_manylinux.sh
deleted file mode 100755
index 5634a5ef442e4..0000000000000
--- a/rename_manylinux.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License.
-
-# hack script to modify whl as manylinux whl
-whl=(*whl)
-renamed_whl=`echo $whl | sed --expression='s/linux/manylinux1/g'`
-basename=`echo $whl | awk -F'-cp3' '{print $1}'`
-unzip $whl
-sed -i 's/linux/manylinux1/g' ${basename}.dist-info/WHEEL
-# explicitly set file perms
-chmod 664 ${basename}.dist-info/*
-zip -r $renamed_whl ${basename}.data ${basename}.dist-info
diff --git a/setup.py b/setup.py
index 03413d8bcc3cf..03321cb39a3ee 100644
--- a/setup.py
+++ b/setup.py
@@ -3,9 +3,14 @@
# Licensed under the MIT License.
#--------------------------------------------------------------------------
-from setuptools import setup, find_packages
-from os import path, getcwd
+from setuptools import setup, find_packages, Extension
+from distutils import log as logger
+from distutils.command.build_ext import build_ext as _build_ext
+from glob import glob
+from os import path, getcwd, environ, remove
+from shutil import copyfile
import platform
+import subprocess
import sys
import datetime
@@ -38,12 +43,69 @@
nightly_build = True
sys.argv.remove('--nightly_build')
+is_manylinux2010 = False
+if environ.get('AUDITWHEEL_PLAT', None) == 'manylinux2010_x86_64':
+ is_manylinux2010 = True
+
+
+class build_ext(_build_ext):
+ def build_extension(self, ext):
+ dest_file = self.get_ext_fullpath(ext.name)
+ logger.info('copying %s -> %s', ext.sources[0], dest_file)
+ copyfile(ext.sources[0], dest_file)
+
+
try:
from wheel.bdist_wheel import bdist_wheel as _bdist_wheel
class bdist_wheel(_bdist_wheel):
def finalize_options(self):
_bdist_wheel.finalize_options(self)
- self.root_is_pure = False
+ if not is_manylinux2010:
+ self.root_is_pure = False
+
+ def _rewrite_ld_preload(self, to_preload):
+ with open('onnxruntime/capi/_ld_preload.py', 'rt') as f:
+ ld_preload = f.read().splitlines()
+ with open('onnxruntime/capi/_ld_preload.py', 'wt') as f:
+ for line in ld_preload:
+ f.write(line)
+ f.write('\n')
+ if 'LD_PRELOAD_BEGIN_MARK' in line:
+ break
+ if len(to_preload) > 0:
+ f.write('from ctypes import CDLL, RTLD_GLOBAL\n')
+ for library in to_preload:
+ f.write('_{} = CDLL("{}", mode=RTLD_GLOBAL)\n'.format(library.split('.')[0], library))
+
+ def run(self):
+ if is_manylinux2010:
+ source = 'onnxruntime/capi/onnxruntime_pybind11_state.so'
+ dest = 'onnxruntime/capi/onnxruntime_pybind11_state_manylinux2010.so'
+ logger.info('copying %s -> %s', source, dest)
+ copyfile(source, dest)
+ result = subprocess.run(['patchelf', '--print-needed', dest], check=True, stdout=subprocess.PIPE, universal_newlines=True)
+ cuda_dependencies = ['libcublas.so', 'libcudnn.so', 'libcudart.so']
+ to_preload = []
+ args = ['patchelf', '--debug']
+ for line in result.stdout.split('\n'):
+ for dependency in cuda_dependencies:
+ if dependency in line:
+ to_preload.append(line)
+ args.extend(['--remove-needed', line])
+ args.append(dest)
+ if len(to_preload) > 0:
+ subprocess.run(args, check=True, stdout=subprocess.PIPE)
+ self._rewrite_ld_preload(to_preload)
+ _bdist_wheel.run(self)
+ if is_manylinux2010:
+ file = glob(path.join(self.dist_dir, '*linux*.whl'))[0]
+ logger.info('repairing %s for manylinux2010', file)
+ try:
+ subprocess.run(['auditwheel', 'repair', '--plat', 'manylinux2010_x86_64', '-w', self.dist_dir, file], check=True, stdout=subprocess.PIPE)
+ finally:
+ logger.info('removing %s', file)
+ remove(file)
+
except ImportError:
bdist_wheel = None
@@ -57,7 +119,18 @@ def finalize_options(self):
else:
libs = ['onnxruntime_pybind11_state.pyd', 'mkldnn.dll', 'mklml.dll', 'libiomp5md.dll']
-data = [path.join('capi', x) for x in libs if path.isfile(path.join('onnxruntime', 'capi', x))]
+if is_manylinux2010:
+ data = []
+ ext_modules = [
+ Extension(
+ 'onnxruntime.capi.onnxruntime_pybind11_state',
+ ['onnxruntime/capi/onnxruntime_pybind11_state_manylinux2010.so'],
+ ),
+ ]
+else:
+ data = [path.join('capi', x) for x in libs if path.isfile(path.join('onnxruntime', 'capi', x))]
+ ext_modules = []
+
python_modules_list = list()
if '--use_openvino' in sys.argv:
@@ -98,7 +171,7 @@ def finalize_options(self):
long_description=long_description,
author='Microsoft Corporation',
author_email='onnx@microsoft.com',
- cmdclass={'bdist_wheel': bdist_wheel},
+ cmdclass={'bdist_wheel': bdist_wheel, 'build_ext': build_ext},
license="MIT License",
packages=['onnxruntime',
'onnxruntime.backend',
@@ -106,6 +179,7 @@ def finalize_options(self):
'onnxruntime.datasets',
'onnxruntime.tools',
],
+ ext_modules=ext_modules,
package_data={
'onnxruntime': data + examples + extra,
},
diff --git a/tools/ci_build/build.py b/tools/ci_build/build.py
index c1f0a1629e35a..f20bdd682a818 100755
--- a/tools/ci_build/build.py
+++ b/tools/ci_build/build.py
@@ -683,33 +683,20 @@ def run_server_model_tests(build_dir, configs):
def build_python_wheel(source_dir, build_dir, configs, use_cuda, use_ngraph, use_tensorrt, use_openvino, nightly_build = False):
for config in configs:
cwd = get_config_build_dir(build_dir, config)
-
if is_windows():
cwd = os.path.join(cwd, config)
+ args = [sys.executable, os.path.join(source_dir, 'setup.py'), 'bdist_wheel']
if nightly_build:
- if use_tensorrt:
- run_subprocess([sys.executable, os.path.join(source_dir, 'setup.py'), 'bdist_wheel', '--use_tensorrt', '--nightly_build'], cwd=cwd)
- elif use_cuda:
- run_subprocess([sys.executable, os.path.join(source_dir, 'setup.py'), 'bdist_wheel', '--use_cuda', '--nightly_build'], cwd=cwd)
- elif use_ngraph:
- run_subprocess([sys.executable, os.path.join(source_dir, 'setup.py'), 'bdist_wheel', '--use_ngraph', '--nightly-build'], cwd=cwd)
- elif use_openvino:
- run_subprocess([sys.executable, os.path.join(source_dir, 'setup.py'), 'bdist_wheel', '--use_openvino', '--nightly-build'], cwd=cwd)
- else:
- run_subprocess([sys.executable, os.path.join(source_dir, 'setup.py'), 'bdist_wheel', '--nightly_build'], cwd=cwd)
- else:
- if use_tensorrt:
- run_subprocess([sys.executable, os.path.join(source_dir, 'setup.py'), 'bdist_wheel', '--use_tensorrt'], cwd=cwd)
- elif use_cuda:
- run_subprocess([sys.executable, os.path.join(source_dir, 'setup.py'), 'bdist_wheel', '--use_cuda'], cwd=cwd)
- elif use_ngraph:
- run_subprocess([sys.executable, os.path.join(source_dir, 'setup.py'), 'bdist_wheel', '--use_ngraph'], cwd=cwd)
- elif use_openvino:
- run_subprocess([sys.executable, os.path.join(source_dir, 'setup.py'), 'bdist_wheel', '--use_openvino'], cwd=cwd)
- else:
- run_subprocess([sys.executable, os.path.join(source_dir, 'setup.py'), 'bdist_wheel'], cwd=cwd)
- if is_ubuntu_1604():
- run_subprocess([os.path.join(source_dir, 'rename_manylinux.sh')], cwd=cwd+'/dist')
+ args.append('--nightly_build')
+ if use_tensorrt:
+ args.append('--use_tensorrt')
+ elif use_cuda:
+ args.append('--use_cuda')
+ elif use_ngraph:
+ args.append('--use_ngraph')
+ elif use_openvino:
+ args.append('--use_openvino')
+ run_subprocess(args, cwd=cwd)
def build_protoc_for_windows_host(cmake_path, source_dir, build_dir):
if not is_windows():
diff --git a/tools/ci_build/github/azure-pipelines/azure-pipelines-py-packaging.yml b/tools/ci_build/github/azure-pipelines/azure-pipelines-py-packaging.yml
index de22b1ca27798..751c351a7804d 100644
--- a/tools/ci_build/github/azure-pipelines/azure-pipelines-py-packaging.yml
+++ b/tools/ci_build/github/azure-pipelines/azure-pipelines-py-packaging.yml
@@ -1,5 +1,5 @@
jobs:
-- job: Ubuntu1604_py_Wheels
+- job: Manylinux2010_py_Wheels
pool: Linux-CPU
strategy:
matrix:
@@ -32,13 +32,13 @@ jobs:
displayName: 'Run build script'
inputs:
scriptPath: 'tools/ci_build/github/linux/run_dockerbuild.sh'
- args: '-c Release -o ubuntu16.04 -d cpu -r $(Build.BinariesDirectory) -p $(python.version) -x "--use_openmp --build_wheel"'
+ args: '-c Release -o manylinux2010 -d cpu -r $(Build.BinariesDirectory) -p $(python.version) -x "--use_openmp --build_wheel"'
- task: CopyFiles@2
displayName: 'Copy Python Wheel to: $(Build.ArtifactStagingDirectory)'
inputs:
SourceFolder: '$(Build.BinariesDirectory)'
- Contents: 'Release/dist/*-manylinux1_x86_64.whl'
+ Contents: 'Release/dist/*-manylinux2010_x86_64.whl'
TargetFolder: '$(Build.ArtifactStagingDirectory)'
- task: PublishBuildArtifacts@1
@@ -51,7 +51,7 @@ jobs:
- template: templates/clean-agent-build-directory-step.yml
-- job: Ubuntu1604_py_GPU_Wheels
+- job: Manylinux2010_py_GPU_Wheels
pool: Linux-GPU
strategy:
matrix:
@@ -94,13 +94,13 @@ jobs:
displayName: 'Run build script'
inputs:
scriptPath: 'tools/ci_build/github/linux/run_dockerbuild.sh'
- args: '-c Release -o ubuntu16.04 -d gpu -c cuda9.1-cudnn7.1 -r $(Build.BinariesDirectory) -p $(python.version) -x "--use_openmp --build_wheel"'
+ args: '-c Release -o manylinux2010 -d gpu -c cuda10.1 -r $(Build.BinariesDirectory) -p $(python.version) -x "--use_openmp --build_wheel"'
- task: CopyFiles@2
displayName: 'Copy Python Wheel to: $(Build.ArtifactStagingDirectory)'
inputs:
SourceFolder: '$(Build.BinariesDirectory)'
- Contents: 'Release/dist/*-manylinux1_x86_64.whl'
+ Contents: 'Release/dist/*-manylinux2010_x86_64.whl'
TargetFolder: '$(Build.ArtifactStagingDirectory)'
- task: PublishBuildArtifacts@1
@@ -161,7 +161,7 @@ jobs:
pool: Win-GPU
variables:
buildDirectory: '$(Build.SourcesDirectory)\build'
- CUDA_VERSION: '9.1'
+ CUDA_VERSION: '10.0'
strategy:
matrix:
Python35:
@@ -178,12 +178,6 @@ jobs:
packageSpecs: 'python=$(python.version)'
cleanEnvironment: true
- - task: PowerShell@1
- displayName: 'Set CUDA path'
- inputs:
- scriptName: 'tools/ci_build/github/windows/set_cuda_path.ps1'
- arguments: '-CudaMsbuildPath C:\local\cudaMsbuildIntegration-9.1.85-windows10-x64-0 -CudaVersion $(CUDA_VERSION)'
-
- task: BatchScript@1
displayName: 'Setup VS2017 env vars'
inputs:
@@ -195,8 +189,8 @@ jobs:
displayName: 'Run build script'
inputs:
filename: 'build.bat'
- arguments: ' --use_cuda --cuda_home="C:\local\cuda-9.1.85-windows10-x64-0"
- --cudnn_home="C:\local\cudnn-9.1-windows10-x64-v7.1\cuda" --build_dir $(buildDirectory) --config Release --use_openmp --build_wheel'
+ arguments: ' --use_cuda --cuda_home="C:\local\cuda_10.0.130_win10"
+ --cudnn_home="C:\local\cudnn-10.0-windows10-x64-v7.3.1.20\cuda" --build_dir $(buildDirectory) --config Release --use_openmp --build_wheel'
workingFolder: "$(Build.SourcesDirectory)"
- task: CopyFiles@2
@@ -215,45 +209,3 @@ jobs:
displayName: 'Component Detection'
- template: templates/clean-agent-build-directory-step.yml
-
-- job: MacOS_py_Wheels
- pool:
- vmImage: 'macOS-10.13'
- strategy:
- matrix:
- Python35:
- python.version: '3.5'
- Python36:
- python.version: '3.6'
- Python37:
- python.version: '3.7'
- steps:
- - task: CondaEnvironment@1
- inputs:
- createCustomEnvironment: true
- environmentName: 'py$(python.version)'
- packageSpecs: 'python=$(python.version)'
- cleanEnvironment: true
-
- - script: |
- sudo python -m pip install numpy==1.15.0
- sudo xcode-select --switch /Applications/Xcode_10.app/Contents/Developer
- ./build.sh --config Release --skip_submodule_sync --parallel --use_openmp --build_wheel
- displayName: 'Command Line Script'
-
- - task: CopyFiles@2
- displayName: 'Copy Python Wheel to: $(Build.ArtifactStagingDirectory)'
- inputs:
- SourceFolder: '$(Build.SourcesDirectory)'
- Contents: '**/dist/*.whl'
- TargetFolder: '$(Build.ArtifactStagingDirectory)'
-
- - task: PublishBuildArtifacts@1
- displayName: 'Publish Artifact: ONNXRuntime python wheel'
- inputs:
- ArtifactName: onnxruntime
-
- - task: ms.vss-governance-buildtask.governance-build-task-component-detection.ComponentGovernanceComponentDetection@0
- displayName: 'Component Detection'
-
- - template: templates/clean-agent-build-directory-step.yml
diff --git a/tools/ci_build/github/azure-pipelines/c-api-packaging-pipelines.yml b/tools/ci_build/github/azure-pipelines/c-api-packaging-pipelines.yml
index 22296ee997273..52d7255b52b75 100644
--- a/tools/ci_build/github/azure-pipelines/c-api-packaging-pipelines.yml
+++ b/tools/ci_build/github/azure-pipelines/c-api-packaging-pipelines.yml
@@ -180,11 +180,4 @@ jobs:
artifactName: 'onnxruntime-win-$(buildArch)-gpu-$(OnnxRuntimeVersion)'
commitId: $(OnnxRuntimeGitCommitHash)
- - task: PowerShell@2
- displayName: 'Clean up Cuda Path 9.1'
- inputs:
- targetType: 'filePath'
- filePath: '$(Build.SourcesDirectory)/tools/ci_build/github/windows/clean_up_cuda_prop_files.ps1'
- arguments: '-CudaVersion 9.1'
-
- template: templates/clean-agent-build-directory-step.yml
diff --git a/tools/ci_build/github/linux/docker/Dockerfile.manylinux2010 b/tools/ci_build/github/linux/docker/Dockerfile.manylinux2010
new file mode 100644
index 0000000000000..17a3417bfd043
--- /dev/null
+++ b/tools/ci_build/github/linux/docker/Dockerfile.manylinux2010
@@ -0,0 +1,31 @@
+FROM quay.io/pypa/manylinux2010_x86_64:latest
+
+ARG PYTHON_VERSION=3.5
+
+ADD scripts/install_manylinux2010.sh /tmp/scripts/install_manylinux2010.sh
+RUN /tmp/scripts/install_manylinux2010.sh -p ${PYTHON_VERSION}
+ADD scripts/install_protobuf.sh /tmp/scripts/install_protobuf.sh
+RUN (source /opt/onnxruntime-python/bin/activate; pip install cmake && /tmp/scripts/install_protobuf.sh && pip uninstall -y cmake)
+ADD scripts /tmp/scripts
+RUN (source /opt/onnxruntime-python/bin/activate; /tmp/scripts/install_deps.sh)
+RUN rm -rf /tmp/scripts # not useful at all except not to see the scripts
+
+RUN echo "#!/bin/bash" > /opt/entrypoint.sh && \
+ echo "set -e" >> /opt/entrypoint.sh && \
+ echo "source /opt/onnxruntime-python/bin/activate" >> /opt/entrypoint.sh && \
+ echo "exec \"\$@\"" >> /opt/entrypoint.sh
+RUN cat /opt/entrypoint.sh
+RUN chmod +x /opt/entrypoint.sh
+
+WORKDIR /root
+
+ENV LD_LIBRARY_PATH /usr/local/openblas/lib:$LD_LIBRARY_PATH
+
+ARG BUILD_UID=1000
+ARG BUILD_USER=onnxruntimedev
+WORKDIR /home/$BUILD_USER
+# --disabled-password
+RUN adduser --comment 'onnxruntime Build User' $BUILD_USER --uid $BUILD_UID
+USER $BUILD_USER
+
+ENTRYPOINT ["/opt/entrypoint.sh"]
diff --git a/tools/ci_build/github/linux/docker/Dockerfile.manylinux2010_gpu b/tools/ci_build/github/linux/docker/Dockerfile.manylinux2010_gpu
new file mode 100644
index 0000000000000..578c7b28657b0
--- /dev/null
+++ b/tools/ci_build/github/linux/docker/Dockerfile.manylinux2010_gpu
@@ -0,0 +1,66 @@
+FROM quay.io/pypa/manylinux2010_x86_64:latest
+
+ARG PYTHON_VERSION=3.5
+
+ADD scripts/install_manylinux2010.sh /tmp/scripts/install_manylinux2010.sh
+RUN /tmp/scripts/install_manylinux2010.sh -p ${PYTHON_VERSION}
+ADD scripts/install_protobuf.sh /tmp/scripts/install_protobuf.sh
+RUN (source /opt/onnxruntime-python/bin/activate; pip install cmake && /tmp/scripts/install_protobuf.sh && pip uninstall -y cmake)
+ADD scripts /tmp/scripts
+RUN (source /opt/onnxruntime-python/bin/activate; /tmp/scripts/install_deps.sh)
+RUN rm -rf /tmp/scripts # not useful at all except not to see the scripts
+
+RUN echo "#!/bin/bash" > /opt/entrypoint.sh && \
+ echo "set -e" >> /opt/entrypoint.sh && \
+ echo "source /opt/onnxruntime-python/bin/activate" >> /opt/entrypoint.sh && \
+ echo "exec \"\$@\"" >> /opt/entrypoint.sh
+RUN cat /opt/entrypoint.sh
+RUN chmod +x /opt/entrypoint.sh
+
+RUN NVIDIA_GPGKEY_SUM=d1be581509378368edeec8c1eb2958702feedf3bc3d17011adbf24efacce4ab5 && \
+ curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/rhel6/x86_64/7fa2af80.pub | sed '/^Version/d' > /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA && \
+ echo "$NVIDIA_GPGKEY_SUM /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA" | sha256sum -c -
+
+COPY cuda_manylinux2010.repo /etc/yum.repos.d/cuda.repo
+
+ENV CUDA_VERSION 10.1.168
+ENV CUDA_PKG_VERSION 10-1-$CUDA_VERSION-1
+
+# For libraries in the cuda-compat-* package: https://docs.nvidia.com/cuda/eula/index.html#attachment-a
+RUN yum install -y \
+ cuda-cudart-$CUDA_PKG_VERSION \
+ cuda-compat-10-1 \
+ cuda-libraries-$CUDA_PKG_VERSION \
+ cuda-nvtx-$CUDA_PKG_VERSION \
+ cuda-libraries-dev-$CUDA_PKG_VERSION \
+ cuda-nvml-dev-$CUDA_PKG_VERSION \
+ cuda-minimal-build-$CUDA_PKG_VERSION \
+ cuda-command-line-tools-$CUDA_PKG_VERSION \
+ && \
+ ln -s cuda-10.1 /usr/local/cuda && \
+ rm -rf /var/cache/yum/*
+
+# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
+RUN CUDNN_DOWNLOAD_SUM=e956c6f9222fcb867a10449cfc76dee5cfd7c7531021d95fe9586d7e043b57d7 && \
+ curl -fsSL http://developer.download.nvidia.com/compute/redist/cudnn/v7.6.0/cudnn-10.1-linux-x64-v7.6.0.64.tgz -O && \
+ echo "$CUDNN_DOWNLOAD_SUM cudnn-10.1-linux-x64-v7.6.0.64.tgz" | sha256sum -c - && \
+ tar --no-same-owner -xzf cudnn-10.1-linux-x64-v7.6.0.64.tgz -C /usr/local && \
+ rm cudnn-10.1-linux-x64-v7.6.0.64.tgz && \
+ ldconfig
+
+ENV LD_LIBRARY_PATH /usr/local/openblas/lib:/usr/local/cuda/lib64/stubs:/usr/local/nvidia/lib:/usr/local/nvidia/lib64:$LD_LIBRARY_PATH
+ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH}
+
+# nvidia-container-runtime
+ENV NVIDIA_VISIBLE_DEVICES all
+ENV NVIDIA_DRIVER_CAPABILITIES compute,utility
+ENV NVIDIA_REQUIRE_CUDA "cuda>=10.1 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=396,driver<397 brand=tesla,driver>=410,driver<411"
+
+ARG BUILD_UID=1000
+ARG BUILD_USER=onnxruntimedev
+WORKDIR /home/$BUILD_USER
+# --disabled-password
+RUN adduser --comment 'onnxruntime Build User' $BUILD_USER --uid $BUILD_UID
+USER $BUILD_USER
+
+ENTRYPOINT ["/opt/entrypoint.sh"]
diff --git a/tools/ci_build/github/linux/docker/cuda_manylinux2010.repo b/tools/ci_build/github/linux/docker/cuda_manylinux2010.repo
new file mode 100644
index 0000000000000..20972766acf7b
--- /dev/null
+++ b/tools/ci_build/github/linux/docker/cuda_manylinux2010.repo
@@ -0,0 +1,6 @@
+[cuda]
+name=cuda
+baseurl=http://developer.download.nvidia.com/compute/cuda/repos/rhel6/x86_64
+enabled=1
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA
\ No newline at end of file
diff --git a/tools/ci_build/github/linux/docker/scripts/install_deps.sh b/tools/ci_build/github/linux/docker/scripts/install_deps.sh
index 9cfdd6a1bd90f..c128feef450a6 100755
--- a/tools/ci_build/github/linux/docker/scripts/install_deps.sh
+++ b/tools/ci_build/github/linux/docker/scripts/install_deps.sh
@@ -37,6 +37,9 @@ rm -rf /tmp/src
DISTRIBUTOR=$(lsb_release -i -s)
if [ "$DISTRIBUTOR" = "Ubuntu" ]; then
apt-get -y remove libprotobuf-dev protobuf-compiler
+elif [ "$AUDITWHEEL_PLAT" = "manylinux2010_x86_64" ]; then
+ # we did not install protobuf 2.x no need to uninstall
+ :
else
dnf remove -y protobuf-devel protobuf-compiler
fi
diff --git a/tools/ci_build/github/linux/docker/scripts/install_manylinux2010.sh b/tools/ci_build/github/linux/docker/scripts/install_manylinux2010.sh
new file mode 100755
index 0000000000000..664684bd00c45
--- /dev/null
+++ b/tools/ci_build/github/linux/docker/scripts/install_manylinux2010.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+set -e
+set -x
+
+while getopts p: parameter_Option
+do case "${parameter_Option}"
+in
+p) PYTHON_VER=${OPTARG};;
+esac
+done
+
+PYTHON_VER=${PYTHON_VER:=3.5}
+CPYTHON_VER=cp${PYTHON_VER//./}
+
+# need to install rpmforge in order to get aria2
+curl -fsSLo /tmp/rpmforge.rpm http://repository.it4i.cz/mirrors/repoforge/redhat/el6/en/x86_64/rpmforge/RPMS/rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm
+yum -y install /tmp/rpmforge.rpm
+rm -f /tmp/rpmforge.rpm
+
+yum -y install openblas-devel zlib-devel curl-devel expat-devel aria2 rsync redhat-lsb-core
+yum -y clean all
+
+/opt/python/${CPYTHON_VER}-${CPYTHON_VER}m/bin/python -m venv /opt/onnxruntime-python
+source /opt/onnxruntime-python/bin/activate
+if [ ! -f /opt/onnxruntime-python/bin/python${PYTHON_VER} ]; then
+ ln -s python /opt/onnxruntime-python/bin/python${PYTHON_VER}
+fi
+python -m pip install --upgrade --force-reinstall pip==19.1.1
+python -m pip install --upgrade --force-reinstall numpy==1.16.3
+python -m pip install --upgrade --force-reinstall requests==2.21.0
+python -m pip install --upgrade --force-reinstall wheel==0.31.1
+python -m pip install --upgrade --force-reinstall setuptools==41.0.1
+python -m pip install --upgrade --force-reinstall pytest==4.6.2
+
+ls -al /opt/onnxruntime-python/bin
+
+echo "#!/bin/sh" > /opt/entrypoint.sh
+echo "source /opt/onnxruntime-python/bin/activate" >> /opt/entrypoint.sh
+echo "exec \"$@\"" >> /opt/entrypoint.sh
+
+mkdir -p $HOME/.aria2
+echo "ca-certificate=/opt/_internal/certs.pem" > $HOME/.aria2/aria2.conf
diff --git a/tools/ci_build/github/linux/docker/scripts/install_onnx.sh b/tools/ci_build/github/linux/docker/scripts/install_onnx.sh
index f06f000d8f7f0..36fb7fec10d03 100755
--- a/tools/ci_build/github/linux/docker/scripts/install_onnx.sh
+++ b/tools/ci_build/github/linux/docker/scripts/install_onnx.sh
@@ -1,10 +1,10 @@
#!/bin/bash
# The script is to generate all supported versions of onnx models which will be tested by onnx_test_runner
-# in the end of ci build pipeline. The purpose is to make sure latest onnxruntime has no regressions. Note
-# that the order of installation must be onnx123, onnx130, onnx141, onnx150 and onnxtip since we want
+# in the end of ci build pipeline. The purpose is to make sure latest onnxruntime has no regressions. Note
+# that the order of installation must be onnx123, onnx130, onnx141, onnx150 and onnxtip since we want
# to keep the tip of master on script exit for onnx backend test which is also a part of build pipeline.
-# One possible improvement here is to keep the models saved to some public storage instead of generating
+# One possible improvement here is to keep the models saved to some public storage instead of generating
# on the fly every time.
set -e
@@ -21,14 +21,14 @@ for v2t in ${version2tag[*]}; do
echo "first pass";
else
echo "deleting old onnx-${lastest_onnx_version}";
- /usr/bin/python${PYTHON_VER} -m pip uninstall -y onnx
+ python${PYTHON_VER} -m pip uninstall -y onnx
fi
lastest_onnx_version=$onnx_version
aria2c -q -d /tmp/src https://github.com/onnx/onnx/archive/$onnx_version.tar.gz
tar -xf /tmp/src/onnx-$onnx_version.tar.gz -C /tmp/src
cd /tmp/src/onnx-$onnx_version
git clone https://github.com/pybind/pybind11.git third_party/pybind11
- /usr/bin/python${PYTHON_VER} -m pip install .
+ python${PYTHON_VER} -m pip install .
mkdir -p /data/onnx/${onnx_tag}
backend-test-tools generate-data -o /data/onnx/$onnx_tag
done
diff --git a/tools/ci_build/github/linux/run_build.sh b/tools/ci_build/github/linux/run_build.sh
index 53c77ad40e867..44af48a2e8d27 100755
--- a/tools/ci_build/github/linux/run_build.sh
+++ b/tools/ci_build/github/linux/run_build.sh
@@ -21,13 +21,29 @@ if [ $BUILD_OS = "android" ]; then
make -j$(nproc)
else
COMMON_BUILD_ARGS="--skip_submodule_sync --enable_onnx_tests --parallel --build_shared_lib --use_openmp --cmake_path /usr/bin/cmake --ctest_path /usr/bin/ctest"
+ if [ $BUILD_OS = "manylinux2010" ]; then
+ # FindPython3 does not work on manylinux2010 image, define things manually
+ # ask python where to find includes
+ COMMON_BUILD_ARGS="${COMMON_BUILD_ARGS} --cmake_extra_defines PYTHON_INCLUDE_DIR=$(python3 -c 'import distutils.sysconfig; print(distutils.sysconfig.get_python_inc())')"
+ # Python does not provide a shared library on manylinux, use another library
+ COMMON_BUILD_ARGS="${COMMON_BUILD_ARGS} PYTHON_LIBRARY=/usr/lib64/librt.so"
+
+ fi
if [ $BUILD_DEVICE = "gpu" ]; then
- _CUDNN_VERSION=$(echo $CUDNN_VERSION | cut -d. -f1-2)
- python3 $SCRIPT_DIR/../../build.py --build_dir /build \
- --config Debug Release $COMMON_BUILD_ARGS \
- --use_cuda \
- --cuda_home /usr/local/cuda \
- --cudnn_home /usr/local/cudnn-$_CUDNN_VERSION/cuda $BUILD_EXTR_PAR
+ if [ $BUILD_OS = "manylinux2010" ]; then
+ python3 $SCRIPT_DIR/../../build.py --build_dir /build \
+ --config Debug Release $COMMON_BUILD_ARGS \
+ --use_cuda \
+ --cuda_home /usr/local/cuda \
+ --cudnn_home /usr/local/cuda $BUILD_EXTR_PAR
+ else
+ _CUDNN_VERSION=$(echo $CUDNN_VERSION | cut -d. -f1-2)
+ python3 $SCRIPT_DIR/../../build.py --build_dir /build \
+ --config Debug Release $COMMON_BUILD_ARGS \
+ --use_cuda \
+ --cuda_home /usr/local/cuda \
+ --cudnn_home /usr/local/cudnn-$_CUDNN_VERSION/cuda $BUILD_EXTR_PAR
+ fi
elif [ $BUILD_DEVICE = "tensorrt" ]; then
_CUDNN_VERSION=$(echo $CUDNN_VERSION | cut -d. -f1-2)
python3 $SCRIPT_DIR/../../build.py --build_dir /build \
diff --git a/tools/ci_build/github/linux/run_dockerbuild.sh b/tools/ci_build/github/linux/run_dockerbuild.sh
index cbd1d803067d2..8714055cbdd7f 100755
--- a/tools/ci_build/github/linux/run_dockerbuild.sh
+++ b/tools/ci_build/github/linux/run_dockerbuild.sh
@@ -8,7 +8,7 @@ CUDA_VER=cuda10.0-cudnn7.3
while getopts c:o:d:r:p:x:a:v: parameter_Option
do case "${parameter_Option}"
in
-#android, ubuntu16.04
+#android, ubuntu16.04, manylinux2010
o) BUILD_OS=${OPTARG};;
#cpu, gpu, tensorrt
d) BUILD_DEVICE=${OPTARG};;
@@ -22,7 +22,7 @@ c) CUDA_VER=${OPTARG};;
# x86 or other, only for ubuntu16.04 os
a) BUILD_ARCH=${OPTARG};;
# openvino version tag: 2018_R5, 2019_R1 (Default is 2018_R5)
-v) OPENVINO_VERSION=${OPTARG};;
+v) OPENVINO_VERSION=${OPTARG};;
esac
done
@@ -35,6 +35,15 @@ if [ $BUILD_OS = "android" ]; then
IMAGE="android"
DOCKER_FILE=Dockerfile.ubuntu_for_android
docker build -t "onnxruntime-$IMAGE" --build-arg BUILD_USER=onnxruntimedev --build-arg BUILD_UID=$(id -u) --build-arg PYTHON_VERSION=${PYTHON_VER} -f $DOCKER_FILE .
+elif [ $BUILD_OS = "manylinux2010" ]; then
+ if [ $BUILD_DEVICE = "gpu" ]; then
+ IMAGE="manylinux2010-cuda10.1"
+ DOCKER_FILE=Dockerfile.manylinux2010_gpu
+ else
+ IMAGE="manylinux2010"
+ DOCKER_FILE=Dockerfile.manylinux2010
+ fi
+ docker build -t "onnxruntime-$IMAGE" --build-arg BUILD_USER=onnxruntimedev --build-arg BUILD_UID=$(id -u) --build-arg PYTHON_VERSION=${PYTHON_VER} -f $DOCKER_FILE .
else
if [ $BUILD_DEVICE = "gpu" ]; then
IMAGE="ubuntu16.04-$CUDA_VER"
@@ -67,35 +76,26 @@ mkdir -p ~/.cache/onnxruntime
mkdir -p ~/.onnx
if [ -z "$NIGHTLY_BUILD" ]; then
-set NIGHTLY_BUILD=0
+ set NIGHTLY_BUILD=0
fi
if [ $BUILD_DEVICE = "cpu" ] || [ $BUILD_DEVICE = "ngraph" ] || [ $BUILD_DEVICE = "openvino" ]; then
- docker rm -f "onnxruntime-$BUILD_DEVICE" || true
- docker run -h $HOSTNAME \
- --name "onnxruntime-$BUILD_DEVICE" \
- --volume "$SOURCE_ROOT:/onnxruntime_src" \
- --volume "$BUILD_DIR:/build" \
- --volume "$HOME/.cache/onnxruntime:/home/onnxruntimedev/.cache/onnxruntime" \
- --volume "$HOME/.onnx:/home/onnxruntimedev/.onnx" \
- -e NIGHTLY_BUILD \
- "onnxruntime-$IMAGE" \
- /bin/bash /onnxruntime_src/tools/ci_build/github/linux/run_build.sh \
- -d $BUILD_DEVICE -x "$BUILD_EXTR_PAR" -o $BUILD_OS &
+ ONNX_DOCKER=docker
else
- docker rm -f "onnxruntime-$BUILD_DEVICE" || true
- nvidia-docker run --rm -h $HOSTNAME \
- --rm \
- --name "onnxruntime-$BUILD_DEVICE" \
- --volume "$SOURCE_ROOT:/onnxruntime_src" \
- --volume "$BUILD_DIR:/build" \
- --volume "$HOME/.cache/onnxruntime:/home/onnxruntimedev/.cache/onnxruntime" \
- --volume "$HOME/.onnx:/home/onnxruntimedev/.onnx" \
- -e NIGHTLY_BUILD \
- "onnxruntime-$IMAGE" \
- /bin/bash /onnxruntime_src/tools/ci_build/github/linux/run_build.sh \
- -d $BUILD_DEVICE -x "$BUILD_EXTR_PAR" -o $BUILD_OS &
+ ONNX_DOCKER=nvidia-docker
fi
+
+docker rm -f "onnxruntime-$BUILD_DEVICE" || true
+$ONNX_DOCKER run -h $HOSTNAME \
+ --name "onnxruntime-$BUILD_DEVICE" \
+ --volume "$SOURCE_ROOT:/onnxruntime_src" \
+ --volume "$BUILD_DIR:/build" \
+ --volume "$HOME/.cache/onnxruntime:/home/onnxruntimedev/.cache/onnxruntime" \
+ --volume "$HOME/.onnx:/home/onnxruntimedev/.onnx" \
+ -e NIGHTLY_BUILD \
+ "onnxruntime-$IMAGE" \
+ /bin/bash /onnxruntime_src/tools/ci_build/github/linux/run_build.sh \
+ -d $BUILD_DEVICE -x "$BUILD_EXTR_PAR" -o $BUILD_OS &
wait $!
EXIT_CODE=$?