diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 501c15530..d85baa045 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -163,17 +163,19 @@ jobs: include: - os: windows-2019 device: gpu + python_architecture: 'x64' onnxruntime_url: https://github.com/microsoft/onnxruntime/releases/download/v1.9.0/onnxruntime-win-x64-gpu-1.9.0.zip - cmake_additional_options: -DENABLE_CUDA=ON artifact_name: windows-x64-gpu - os: windows-2019 device: cpu-x64 + python_architecture: 'x64' onnxruntime_url: https://github.com/microsoft/onnxruntime/releases/download/v1.9.0/onnxruntime-win-x64-1.9.0.zip artifact_name: windows-x64-cpu - os: windows-2019 device: cpu-x86 + python_architecture: 'x86' onnxruntime_url: https://github.com/microsoft/onnxruntime/releases/download/v1.9.0/onnxruntime-win-x86-1.9.0.zip cmake_additional_options: -DCMAKE_GENERATOR_PLATFORM=Win32 artifact_name: windows-x86-cpu @@ -192,19 +194,21 @@ jobs: - os: macos-10.15 device: cpu-x64 + python_architecture: 'x64' onnxruntime_url: https://github.com/microsoft/onnxruntime/releases/download/v1.9.0/onnxruntime-osx-x64-1.9.0.tgz artifact_name: osx-x64-cpu - os: ubuntu-18.04 device: gpu + python_architecture: 'x64' onnxruntime_url: https://github.com/microsoft/onnxruntime/releases/download/v1.9.0/onnxruntime-linux-x64-gpu-1.9.0.tgz - cmake_additional_options: -DENABLE_CUDA=ON artifact_name: linux-x64-gpu cc_version: '8' cxx_version: '8' - os: ubuntu-18.04 device: cpu-x64 + python_architecture: 'x64' onnxruntime_url: https://github.com/microsoft/onnxruntime/releases/download/v1.9.0/onnxruntime-linux-x64-1.9.0.tgz artifact_name: linux-x64-cpu cc_version: '8' @@ -229,6 +233,14 @@ jobs: steps: - uses: actions/checkout@v2 + - name: Setup Python + if: matrix.python_architecture != '' + id: setup-python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + architecture: ${{ matrix.python_architecture }} + - run: mkdir download # ONNX Runtime @@ -282,6 +294,8 @@ jobs: # Build - if: startsWith(matrix.os, 'windows') uses: ilammy/msvc-dev-cmd@v1 + with: + arch: ${{ matrix.python_architecture }} - if: startsWith(matrix.os, 'mac') uses: jwlawson/actions-setup-cmake@v1.9 @@ -320,6 +334,13 @@ jobs: # copy lib to core/lib/* and set rpath (linux) cmake --install . + - name: Unit test ${{ matrix.python_architecture }} + if: matrix.python_architecture != '' + shell: bash + run: | + pip install -r requirements.txt + python setup.py test + - name: Organize artifact shell: bash run: | diff --git a/CMakeLists.txt b/CMakeLists.txt index 93fdf01e4..360d4ebd7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,9 +1,6 @@ cmake_minimum_required(VERSION 3.16) project(VoiceVoxCore) -# TODO: build options -set(ENABLE_CUDA OFF CACHE BOOL "use CUDA") - # TODO: download onnxruntime set(ONNXRUNTIME_DIR "${CMAKE_CURRENT_SOURCE_DIR}/onnxruntime" CACHE PATH "Path to ONNX Runtime") diff --git a/README.md b/README.md index 4498cb839..653885f6e 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,7 @@ ## 依存関係 - ONNX Runtime v1.9.0/v1.9.1: https://github.com/microsoft/onnxruntime +- CMake 環境に対応した ONNX Runtime をダウンロードし、リポジトリに`onnxruntime`というディレクトリ名で展開します。 @@ -43,6 +44,20 @@ sudo apt install libgomp1 #### ソースコードから実行 ```bash +# C++モジュールのビルド +mkdir build +cd build +# もしダウンロードしたonnx runtimeが別のところにあるなら、以下のコマンドを +# cmake .. -DONNXRUNTIME_DIR=(ダウンロードしたonnx runtimeのパス) に変更する。 +cmake .. +cmake --build . --config Release +cmake --install . +cd .. + +# (省略可能) pythonモジュールのテスト +python setup.py test + +# pythonモジュールのインストール pip install . cd example/python diff --git a/core/CMakeLists.txt b/core/CMakeLists.txt index 66f562475..1eb889412 100644 --- a/core/CMakeLists.txt +++ b/core/CMakeLists.txt @@ -17,6 +17,7 @@ else() message(FATAL_ERROR "Unable to find ONNX Runtime. Use option -DONNXRUNTIME_DIR=...") endif() file(GLOB ONNXRUNTIME_LIBS + "${ONNXRUNTIME_DIR}/lib/*.dylib" "${ONNXRUNTIME_DIR}/lib/*.dll" "${ONNXRUNTIME_DIR}/lib/*.lib" "${ONNXRUNTIME_DIR}/lib/*.so" @@ -39,11 +40,6 @@ target_link_libraries(core PUBLIC onnxruntime) # GCC 9.0以前ではstd::filesystemを使うためにリンクが必要 (https://gitlab.kitware.com/cmake/cmake/-/issues/17834) target_link_libraries(core PRIVATE $<$,$,9.0>>:stdc++fs>) -# core.cpp内にUSE_CUDAをdefineする -if(ENABLE_CUDA) - target_compile_definitions(core PRIVATE USE_CUDA) -endif(ENABLE_CUDA) - # cmake --installを行うとcoreライブラリ、onnxruntimeライブラリ、core.hがインストール先のlibフォルダにコピーされる install(TARGETS core ARCHIVE DESTINATION lib diff --git a/core/_core.pxd b/core/_core.pxd index 2194b2b3c..fc2d154f4 100644 --- a/core/_core.pxd +++ b/core/_core.pxd @@ -11,6 +11,8 @@ cdef extern from "core.h": const char *c_metas "metas" () + const char *c_supported_devices "supported_devices" () + bool c_yukarin_s_forward "yukarin_s_forward" ( int64_t length, int64_t *phoneme_list, diff --git a/core/_core.pyx b/core/_core.pyx index 0efe18d77..773bfa307 100644 --- a/core/_core.pyx +++ b/core/_core.pyx @@ -20,6 +20,9 @@ cpdef finalize(): cpdef metas(): return c_metas().decode() +cpdef supported_devices(): + return c_supported_devices().decode() + cpdef numpy.ndarray[numpy.float32_t, ndim=1] yukarin_s_forward( int64_t length, numpy.ndarray[numpy.int64_t, ndim=1] phoneme_list, diff --git a/core/src/core.cpp b/core/src/core.cpp index 5e70e53c4..58d6ed308 100644 --- a/core/src/core.cpp +++ b/core/src/core.cpp @@ -30,6 +30,7 @@ constexpr std::array speaker_shape{1}; static std::string error_message; static bool initialized = false; +static std::string supported_devices_str; bool open_models(const fs::path &yukarin_s_path, const fs::path &yukarin_sa_path, const fs::path &decode_path, std::vector &yukarin_s_model, std::vector &yukarin_sa_model, @@ -68,6 +69,23 @@ bool open_metas(const fs::path &metas_path, nlohmann::json &metas) { return true; } +struct SupportedDevices { + bool cpu = true; + bool cuda = false; +}; +NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(SupportedDevices, cpu, cuda); + +SupportedDevices get_supported_devices() { + SupportedDevices devices; + const auto providers = Ort::GetAvailableProviders(); + for (const std::string &p : providers) { + if (p == "CUDAExecutionProvider") { + devices.cuda = true; + } + } + return devices; +} + struct Status { Status(const char *root_dir_path_utf8, bool use_gpu_) : root_dir_path(root_dir_path_utf8), @@ -100,11 +118,10 @@ struct Status { Ort::SessionOptions session_options; yukarin_s = Ort::Session(env, yukarin_s_model.data(), yukarin_s_model.size(), session_options); yukarin_sa = Ort::Session(env, yukarin_sa_model.data(), yukarin_sa_model.size(), session_options); -#ifdef USE_CUDA if (use_gpu) { - Ort::ThrowOnError(OrtSessionOptionsAppendExecutionProvider_CUDA(session_options, 0)); + const OrtCUDAProviderOptions cuda_options; + session_options.AppendExecutionProvider_CUDA(cuda_options); } -#endif decode = Ort::Session(env, decode_model.data(), decode_model.size(), session_options); return true; } @@ -142,12 +159,10 @@ bool validate_speaker_id(int64_t speaker_id) { bool initialize(const char *root_dir_path, bool use_gpu) { initialized = false; -#ifndef USE_CUDA - if (use_gpu) { + if (use_gpu && !get_supported_devices().cuda) { error_message = GPU_NOT_SUPPORTED_ERR; return false; } -#endif try { status = std::make_unique(root_dir_path, use_gpu); if (!status->load()) { @@ -186,6 +201,13 @@ void finalize() { const char *metas() { return status->metas_str.c_str(); } +const char *supported_devices() { + SupportedDevices devices = get_supported_devices(); + nlohmann::json json = devices; + supported_devices_str = json.dump(); + return supported_devices_str.c_str(); +} + bool yukarin_s_forward(int64_t length, int64_t *phoneme_list, int64_t *speaker_id, float *output) { if (!initialized) { error_message = NOT_INITIALIZED_ERR; diff --git a/core/src/core.h b/core/src/core.h index ac25cdd36..eea727d4f 100644 --- a/core/src/core.h +++ b/core/src/core.h @@ -41,6 +41,14 @@ extern "C" VOICEVOX_CORE_API void finalize(); */ extern "C" VOICEVOX_CORE_API const char *metas(); +/** + * @fn + * 対応デバイス情報を取得する + * @brief cpu, cudaのうち、使用可能なデバイス情報を取得する + * @return 各デバイスが使用可能かどうかをboolで格納したjson形式の文字列 + */ +extern "C" VOICEVOX_CORE_API const char *supported_devices(); + /** * @fn * 音素ごとの長さを求める diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 000000000..03432f9fe --- /dev/null +++ b/requirements.txt @@ -0,0 +1,2 @@ +Cython +numpy \ No newline at end of file diff --git a/setup.py b/setup.py index 61dc720f0..3df56dfc2 100644 --- a/setup.py +++ b/setup.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- + from setuptools import setup, Extension -from subprocess import check_call, CalledProcessError import platform import os import sys @@ -14,30 +15,21 @@ def get_version(): with open(os.path.join(base_dir, 'VERSION.txt')) as f: return f.read().strip() -def build_src(): - """setupより前にC++モジュールのビルド""" - print('Building C++ modules...') - +if __name__ == '__main__': base_dir = os.path.dirname(os.path.abspath(__file__)) - build_dir = os.path.join(base_dir, 'build') - os.makedirs(build_dir, exist_ok=True) - try: - check_call(['cmake', '..'], cwd=build_dir) - check_call(['cmake', '--build', '.', '--config', 'Release'], cwd=build_dir) - check_call(['cmake', '--install', '.'], cwd=build_dir) - except (CalledProcessError, KeyboardInterrupt) as e: - sys.exit(1) -if __name__ == '__main__': - build_src() + # C++モジュールがすでにビルドされ、core/libに入っているか確認 + assert os.path.exists(os.path.join(base_dir, 'core', 'lib', 'core.h')), 'C++モジュールがビルドされていません' # 追加ライブラリ(pythonライブラリからの相対パスで./lib/*)を読み込めるように設定 if platform.system() == "Windows": # Windowsでは別途__init__.pyで明示的に読み込む - runtime_library_dirs = [] + extra_link_args = [] + elif platform.system() == "Darwin": + extra_link_args = ["-Wl,-rpath,@loader_path/lib"] else: # $ORIGINはpythonライブラリの読み込み時に自動的に自身のパスに展開される - runtime_library_dirs = ["$ORIGIN/lib"] + extra_link_args = ["-Wl,-rpath,$ORIGIN/lib"] ext_modules = [ Extension( @@ -47,11 +39,11 @@ def build_src(): libraries=["core"], include_dirs=["core/lib"], library_dirs=["core/lib"], - runtime_library_dirs=runtime_library_dirs, + extra_link_args=extra_link_args, ) ] - sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tests')) + sys.path.append(os.path.join(base_dir, 'tests')) setup( name="core", diff --git a/tests/core_test.py b/tests/core_test.py index d66e47b1f..4708c85bc 100644 --- a/tests/core_test.py +++ b/tests/core_test.py @@ -39,5 +39,11 @@ def test_metas(self): core.finalize() self.assertEqual(metas, core_metas) + def test_supported_devices(self): + devices = json.loads(core.supported_devices()) + for expected_device in ["cpu", "cuda"]: + self.assertIn(expected_device, devices) + self.assertTrue(devices["cpu"]) + if __name__ == '__main__': unittest.main()