Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

cmakeとsetup.pyを切り離す、テストの追加 #51

Merged
merged 14 commits into from
Dec 17, 2021
Merged
25 changes: 23 additions & 2 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -163,17 +163,19 @@ jobs:
include:
- os: windows-2019
device: gpu
python_architecture: 'x64'
onnxruntime_url: https://github.com/microsoft/onnxruntime/releases/download/v1.9.0/onnxruntime-win-x64-gpu-1.9.0.zip
cmake_additional_options: -DENABLE_CUDA=ON
artifact_name: windows-x64-gpu

- os: windows-2019
device: cpu-x64
python_architecture: 'x64'
onnxruntime_url: https://github.com/microsoft/onnxruntime/releases/download/v1.9.0/onnxruntime-win-x64-1.9.0.zip
artifact_name: windows-x64-cpu

- os: windows-2019
device: cpu-x86
python_architecture: 'x86'
onnxruntime_url: https://github.com/microsoft/onnxruntime/releases/download/v1.9.0/onnxruntime-win-x86-1.9.0.zip
cmake_additional_options: -DCMAKE_GENERATOR_PLATFORM=Win32
artifact_name: windows-x86-cpu
Expand All @@ -192,19 +194,21 @@ jobs:

- os: macos-10.15
device: cpu-x64
python_architecture: 'x64'
onnxruntime_url: https://github.com/microsoft/onnxruntime/releases/download/v1.9.0/onnxruntime-osx-x64-1.9.0.tgz
artifact_name: osx-x64-cpu

- os: ubuntu-18.04
device: gpu
python_architecture: 'x64'
onnxruntime_url: https://github.com/microsoft/onnxruntime/releases/download/v1.9.0/onnxruntime-linux-x64-gpu-1.9.0.tgz
cmake_additional_options: -DENABLE_CUDA=ON
artifact_name: linux-x64-gpu
cc_version: '8'
cxx_version: '8'

- os: ubuntu-18.04
device: cpu-x64
python_architecture: 'x64'
onnxruntime_url: https://github.com/microsoft/onnxruntime/releases/download/v1.9.0/onnxruntime-linux-x64-1.9.0.tgz
artifact_name: linux-x64-cpu
cc_version: '8'
Expand All @@ -229,6 +233,14 @@ jobs:
steps:
- uses: actions/checkout@v2

- name: Setup Python
if: matrix.python_architecture != ''
id: setup-python
uses: actions/setup-python@v2
with:
python-version: 3.8
architecture: ${{ matrix.python_architecture }}

- run: mkdir download

# ONNX Runtime
Expand Down Expand Up @@ -282,6 +294,8 @@ jobs:
# Build
- if: startsWith(matrix.os, 'windows')
uses: ilammy/msvc-dev-cmd@v1
with:
arch: ${{ matrix.python_architecture }}

- if: startsWith(matrix.os, 'mac')
uses: jwlawson/[email protected]
Expand Down Expand Up @@ -320,6 +334,13 @@ jobs:
# copy lib to core/lib/* and set rpath (linux)
cmake --install .

- name: Unit test ${{ matrix.python_architecture }}
if: matrix.python_architecture != ''
shell: bash
run: |
pip install -r requirements.txt
python setup.py test

- name: Organize artifact
shell: bash
run: |
Expand Down
3 changes: 0 additions & 3 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
cmake_minimum_required(VERSION 3.16)
project(VoiceVoxCore)

# TODO: build options
set(ENABLE_CUDA OFF CACHE BOOL "use CUDA")

# TODO: download onnxruntime
set(ONNXRUNTIME_DIR "${CMAKE_CURRENT_SOURCE_DIR}/onnxruntime" CACHE PATH "Path to ONNX Runtime")

Expand Down
15 changes: 15 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
## 依存関係

- ONNX Runtime v1.9.0/v1.9.1: https://github.com/microsoft/onnxruntime
- CMake

環境に対応した ONNX Runtime をダウンロードし、リポジトリに`onnxruntime`というディレクトリ名で展開します。

Expand Down Expand Up @@ -43,6 +44,20 @@ sudo apt install libgomp1
#### ソースコードから実行

```bash
# C++モジュールのビルド
mkdir build
cd build
# もしダウンロードしたonnx runtimeが別のところにあるなら、以下のコマンドを
# cmake .. -DONNXRUNTIME_DIR=(ダウンロードしたonnx runtimeのパス) に変更する。
cmake ..
cmake --build . --config Release
cmake --install .
cd ..

# (省略可能) pythonモジュールのテスト
python setup.py test

# pythonモジュールのインストール
pip install .

cd example/python
Expand Down
6 changes: 1 addition & 5 deletions core/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ else()
message(FATAL_ERROR "Unable to find ONNX Runtime. Use option -DONNXRUNTIME_DIR=...")
endif()
file(GLOB ONNXRUNTIME_LIBS
"${ONNXRUNTIME_DIR}/lib/*.dylib"
"${ONNXRUNTIME_DIR}/lib/*.dll"
"${ONNXRUNTIME_DIR}/lib/*.lib"
"${ONNXRUNTIME_DIR}/lib/*.so"
Expand All @@ -39,11 +40,6 @@ target_link_libraries(core PUBLIC onnxruntime)
# GCC 9.0以前ではstd::filesystemを使うためにリンクが必要 (https://gitlab.kitware.com/cmake/cmake/-/issues/17834)
target_link_libraries(core PRIVATE $<$<AND:$<CXX_COMPILER_ID:GNU>,$<VERSION_LESS:$<CXX_COMPILER_VERSION>,9.0>>:stdc++fs>)

# core.cpp内にUSE_CUDAをdefineする
if(ENABLE_CUDA)
target_compile_definitions(core PRIVATE USE_CUDA)
endif(ENABLE_CUDA)

# cmake --installを行うとcoreライブラリ、onnxruntimeライブラリ、core.hがインストール先のlibフォルダにコピーされる
install(TARGETS core
ARCHIVE DESTINATION lib
Expand Down
2 changes: 2 additions & 0 deletions core/_core.pxd
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ cdef extern from "core.h":

const char *c_metas "metas" ()

const char *c_supported_devices "supported_devices" ()

bool c_yukarin_s_forward "yukarin_s_forward" (
int64_t length,
int64_t *phoneme_list,
Expand Down
3 changes: 3 additions & 0 deletions core/_core.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@ cpdef finalize():
cpdef metas():
return c_metas().decode()

cpdef supported_devices():
return c_supported_devices().decode()

cpdef numpy.ndarray[numpy.float32_t, ndim=1] yukarin_s_forward(
int64_t length,
numpy.ndarray[numpy.int64_t, ndim=1] phoneme_list,
Expand Down
34 changes: 28 additions & 6 deletions core/src/core.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ constexpr std::array<int64_t, 1> speaker_shape{1};

static std::string error_message;
static bool initialized = false;
static std::string supported_devices_str;
Oyaki122 marked this conversation as resolved.
Show resolved Hide resolved

bool open_models(const fs::path &yukarin_s_path, const fs::path &yukarin_sa_path, const fs::path &decode_path,
std::vector<unsigned char> &yukarin_s_model, std::vector<unsigned char> &yukarin_sa_model,
Expand Down Expand Up @@ -68,6 +69,23 @@ bool open_metas(const fs::path &metas_path, nlohmann::json &metas) {
return true;
}

struct SupportedDevices {
bool cpu = true;
bool cuda = false;
};
NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(SupportedDevices, cpu, cuda);

SupportedDevices get_supported_devices() {
SupportedDevices devices;
const auto providers = Ort::GetAvailableProviders();
for (const std::string &p : providers) {
if (p == "CUDAExecutionProvider") {
devices.cuda = true;
}
}
return devices;
}

struct Status {
Status(const char *root_dir_path_utf8, bool use_gpu_)
: root_dir_path(root_dir_path_utf8),
Expand Down Expand Up @@ -100,11 +118,10 @@ struct Status {
Ort::SessionOptions session_options;
yukarin_s = Ort::Session(env, yukarin_s_model.data(), yukarin_s_model.size(), session_options);
yukarin_sa = Ort::Session(env, yukarin_sa_model.data(), yukarin_sa_model.size(), session_options);
#ifdef USE_CUDA
if (use_gpu) {
Ort::ThrowOnError(OrtSessionOptionsAppendExecutionProvider_CUDA(session_options, 0));
const OrtCUDAProviderOptions cuda_options;
session_options.AppendExecutionProvider_CUDA(cuda_options);
}
#endif
decode = Ort::Session(env, decode_model.data(), decode_model.size(), session_options);
return true;
}
Expand Down Expand Up @@ -142,12 +159,10 @@ bool validate_speaker_id(int64_t speaker_id) {

bool initialize(const char *root_dir_path, bool use_gpu) {
initialized = false;
#ifndef USE_CUDA
if (use_gpu) {
if (use_gpu && !get_supported_devices().cuda) {
error_message = GPU_NOT_SUPPORTED_ERR;
return false;
}
#endif
try {
status = std::make_unique<Status>(root_dir_path, use_gpu);
if (!status->load()) {
Expand Down Expand Up @@ -186,6 +201,13 @@ void finalize() {

const char *metas() { return status->metas_str.c_str(); }

const char *supported_devices() {
SupportedDevices devices = get_supported_devices();
nlohmann::json json = devices;
supported_devices_str = json.dump();
return supported_devices_str.c_str();
}

bool yukarin_s_forward(int64_t length, int64_t *phoneme_list, int64_t *speaker_id, float *output) {
if (!initialized) {
error_message = NOT_INITIALIZED_ERR;
Expand Down
8 changes: 8 additions & 0 deletions core/src/core.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,14 @@ extern "C" VOICEVOX_CORE_API void finalize();
*/
extern "C" VOICEVOX_CORE_API const char *metas();

/**
* @fn
* 対応デバイス情報を取得する
* @brief cpu, cudaのうち、使用可能なデバイス情報を取得する
* @return 各デバイスが使用可能かどうかをboolで格納したjson形式の文字列
*/
extern "C" VOICEVOX_CORE_API const char *supported_devices();

/**
* @fn
* 音素ごとの長さを求める
Expand Down
2 changes: 2 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
Cython
numpy
30 changes: 11 additions & 19 deletions setup.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-

from setuptools import setup, Extension
from subprocess import check_call, CalledProcessError
import platform
import os
import sys
Expand All @@ -14,30 +15,21 @@ def get_version():
with open(os.path.join(base_dir, 'VERSION.txt')) as f:
return f.read().strip()

def build_src():
"""setupより前にC++モジュールのビルド"""
print('Building C++ modules...')

if __name__ == '__main__':
base_dir = os.path.dirname(os.path.abspath(__file__))
build_dir = os.path.join(base_dir, 'build')
os.makedirs(build_dir, exist_ok=True)
try:
check_call(['cmake', '..'], cwd=build_dir)
check_call(['cmake', '--build', '.', '--config', 'Release'], cwd=build_dir)
check_call(['cmake', '--install', '.'], cwd=build_dir)
except (CalledProcessError, KeyboardInterrupt) as e:
sys.exit(1)

if __name__ == '__main__':
build_src()
# C++モジュールがすでにビルドされ、core/libに入っているか確認
assert os.path.exists(os.path.join(base_dir, 'core', 'lib', 'core.h')), 'C++モジュールがビルドされていません'

# 追加ライブラリ(pythonライブラリからの相対パスで./lib/*)を読み込めるように設定
if platform.system() == "Windows":
# Windowsでは別途__init__.pyで明示的に読み込む
runtime_library_dirs = []
extra_link_args = []
elif platform.system() == "Darwin":
extra_link_args = ["-Wl,-rpath,@loader_path/lib"]
else:
# $ORIGINはpythonライブラリの読み込み時に自動的に自身のパスに展開される
runtime_library_dirs = ["$ORIGIN/lib"]
extra_link_args = ["-Wl,-rpath,$ORIGIN/lib"]

ext_modules = [
Extension(
Expand All @@ -47,11 +39,11 @@ def build_src():
libraries=["core"],
include_dirs=["core/lib"],
library_dirs=["core/lib"],
runtime_library_dirs=runtime_library_dirs,
extra_link_args=extra_link_args,
)
]

sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tests'))
sys.path.append(os.path.join(base_dir, 'tests'))

setup(
name="core",
Expand Down
6 changes: 6 additions & 0 deletions tests/core_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,5 +39,11 @@ def test_metas(self):
core.finalize()
self.assertEqual(metas, core_metas)

def test_supported_devices(self):
devices = json.loads(core.supported_devices())
for expected_device in ["cpu", "cuda"]:
self.assertIn(expected_device, devices)
self.assertTrue(devices["cpu"])

if __name__ == '__main__':
unittest.main()