From c00bd164d56fd9b145530f1b96eedcfcf73c9f05 Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Wed, 7 Dec 2022 18:32:18 +0000 Subject: [PATCH 1/2] feat: add LRS API PiperOrigin-RevId: 493606501 Source-Link: https://github.com/googleapis/googleapis/commit/a1b5429eec257c4f3a8425b20d4689d1df5812bc Source-Link: https://github.com/googleapis/googleapis-gen/commit/ea4e7802b7da7db3a2f422a5dbf4e3f4f8c6067d Copy-Tag: eyJwIjoicGFja2FnZXMvZ29vZ2xlLWNsb3VkLXRleHR0b3NwZWVjaC8uT3dsQm90LnlhbWwiLCJoIjoiZWE0ZTc4MDJiN2RhN2RiM2EyZjQyMmE1ZGJmNGUzZjRmOGM2MDY3ZCJ9 --- .../v1/.eslintignore | 7 + .../v1/.eslintrc.json | 3 + .../google-cloud-texttospeech/v1/.gitignore | 14 + .../google-cloud-texttospeech/v1/.jsdoc.js | 55 ++ .../google-cloud-texttospeech/v1/.mocharc.js | 33 + .../v1/.prettierrc.js | 22 + .../google-cloud-texttospeech/v1/README.md | 1 + .../v1/linkinator.config.json | 16 + .../google-cloud-texttospeech/v1/package.json | 65 ++ .../cloud/texttospeech/v1/cloud_tts.proto | 304 +++++++++ .../cloud/texttospeech/v1/cloud_tts_lrs.proto | 90 +++ ...metadata.google.cloud.texttospeech.v1.json | 159 +++++ .../v1/text_to_speech.list_voices.js | 67 ++ .../v1/text_to_speech.synthesize_speech.js | 71 ++ ..._audio_synthesize.synthesize_long_audio.js | 82 +++ .../google-cloud-texttospeech/v1/src/index.ts | 27 + .../v1/src/v1/gapic_metadata.json | 67 ++ .../v1/src/v1/index.ts | 20 + .../v1/src/v1/text_to_speech_client.ts | 513 +++++++++++++++ .../src/v1/text_to_speech_client_config.json | 36 + ..._to_speech_long_audio_synthesize_client.ts | 614 ++++++++++++++++++ ...h_long_audio_synthesize_client_config.json | 31 + ...eech_long_audio_synthesize_proto_list.json | 4 + .../v1/src/v1/text_to_speech_proto_list.json | 4 + .../system-test/fixtures/sample/src/index.js | 28 + .../system-test/fixtures/sample/src/index.ts | 38 ++ .../v1/system-test/install.ts | 49 ++ ...text_to_speech_long_audio_synthesize_v1.ts | 591 +++++++++++++++++ .../v1/test/gapic_text_to_speech_v1.ts | 349 ++++++++++ .../v1/tsconfig.json | 19 + .../v1/webpack.config.js | 64 ++ .../v1beta1/.eslintignore | 7 + .../v1beta1/.eslintrc.json | 3 + .../v1beta1/.gitignore | 14 + .../v1beta1/.jsdoc.js | 55 ++ .../v1beta1/.mocharc.js | 33 + .../v1beta1/.prettierrc.js | 22 + .../v1beta1/README.md | 1 + .../v1beta1/linkinator.config.json | 16 + .../v1beta1/package.json | 65 ++ .../texttospeech/v1beta1/cloud_tts.proto | 334 ++++++++++ .../texttospeech/v1beta1/cloud_tts_lrs.proto | 90 +++ ...ata.google.cloud.texttospeech.v1beta1.json | 163 +++++ .../v1beta1/text_to_speech.list_voices.js | 67 ++ .../text_to_speech.synthesize_speech.js | 75 +++ ..._audio_synthesize.synthesize_long_audio.js | 82 +++ .../v1beta1/src/index.ts | 27 + .../v1beta1/src/v1beta1/gapic_metadata.json | 67 ++ .../v1beta1/src/v1beta1/index.ts | 20 + .../src/v1beta1/text_to_speech_client.ts | 515 +++++++++++++++ .../v1beta1/text_to_speech_client_config.json | 36 + ..._to_speech_long_audio_synthesize_client.ts | 614 ++++++++++++++++++ ...h_long_audio_synthesize_client_config.json | 31 + ...eech_long_audio_synthesize_proto_list.json | 4 + .../v1beta1/text_to_speech_proto_list.json | 4 + .../system-test/fixtures/sample/src/index.js | 28 + .../system-test/fixtures/sample/src/index.ts | 38 ++ .../v1beta1/system-test/install.ts | 49 ++ ...to_speech_long_audio_synthesize_v1beta1.ts | 591 +++++++++++++++++ .../test/gapic_text_to_speech_v1beta1.ts | 349 ++++++++++ .../v1beta1/tsconfig.json | 19 + .../v1beta1/webpack.config.js | 64 ++ 62 files changed, 6926 insertions(+) create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/.eslintignore create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/.eslintrc.json create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/.gitignore create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/.jsdoc.js create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/.mocharc.js create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/.prettierrc.js create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/README.md create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/linkinator.config.json create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/package.json create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/protos/google/cloud/texttospeech/v1/cloud_tts.proto create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/protos/google/cloud/texttospeech/v1/cloud_tts_lrs.proto create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/snippet_metadata.google.cloud.texttospeech.v1.json create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/text_to_speech.list_voices.js create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/text_to_speech.synthesize_speech.js create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/src/index.ts create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/src/v1/gapic_metadata.json create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/src/v1/index.ts create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_client.ts create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_client_config.json create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_long_audio_synthesize_client.ts create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_long_audio_synthesize_client_config.json create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_long_audio_synthesize_proto_list.json create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_proto_list.json create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/system-test/fixtures/sample/src/index.js create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/system-test/fixtures/sample/src/index.ts create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/system-test/install.ts create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/test/gapic_text_to_speech_long_audio_synthesize_v1.ts create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/test/gapic_text_to_speech_v1.ts create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/tsconfig.json create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/webpack.config.js create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/.eslintignore create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/.eslintrc.json create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/.gitignore create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/.jsdoc.js create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/.mocharc.js create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/.prettierrc.js create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/README.md create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/linkinator.config.json create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/package.json create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/protos/google/cloud/texttospeech/v1beta1/cloud_tts.proto create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/protos/google/cloud/texttospeech/v1beta1/cloud_tts_lrs.proto create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/snippet_metadata.google.cloud.texttospeech.v1beta1.json create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/text_to_speech.list_voices.js create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/text_to_speech.synthesize_speech.js create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/src/index.ts create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/gapic_metadata.json create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/index.ts create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_client.ts create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_client_config.json create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_long_audio_synthesize_client.ts create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_long_audio_synthesize_client_config.json create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_long_audio_synthesize_proto_list.json create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_proto_list.json create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/system-test/fixtures/sample/src/index.js create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/system-test/fixtures/sample/src/index.ts create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/system-test/install.ts create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/test/gapic_text_to_speech_long_audio_synthesize_v1beta1.ts create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/test/gapic_text_to_speech_v1beta1.ts create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/tsconfig.json create mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/webpack.config.js diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/.eslintignore b/owl-bot-staging/google-cloud-texttospeech/v1/.eslintignore new file mode 100644 index 00000000000..cfc348ec4d1 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/.eslintignore @@ -0,0 +1,7 @@ +**/node_modules +**/.coverage +build/ +docs/ +protos/ +system-test/ +samples/generated/ diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/.eslintrc.json b/owl-bot-staging/google-cloud-texttospeech/v1/.eslintrc.json new file mode 100644 index 00000000000..78215349546 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/.eslintrc.json @@ -0,0 +1,3 @@ +{ + "extends": "./node_modules/gts" +} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/.gitignore b/owl-bot-staging/google-cloud-texttospeech/v1/.gitignore new file mode 100644 index 00000000000..5d32b23782f --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/.gitignore @@ -0,0 +1,14 @@ +**/*.log +**/node_modules +.coverage +coverage +.nyc_output +docs/ +out/ +build/ +system-test/secrets.js +system-test/*key.json +*.lock +.DS_Store +package-lock.json +__pycache__ diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/.jsdoc.js b/owl-bot-staging/google-cloud-texttospeech/v1/.jsdoc.js new file mode 100644 index 00000000000..929b3c59840 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/.jsdoc.js @@ -0,0 +1,55 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +'use strict'; + +module.exports = { + opts: { + readme: './README.md', + package: './package.json', + template: './node_modules/jsdoc-fresh', + recurse: true, + verbose: true, + destination: './docs/' + }, + plugins: [ + 'plugins/markdown', + 'jsdoc-region-tag' + ], + source: { + excludePattern: '(^|\\/|\\\\)[._]', + include: [ + 'build/src', + 'protos' + ], + includePattern: '\\.js$' + }, + templates: { + copyright: 'Copyright 2022 Google LLC', + includeDate: false, + sourceFiles: false, + systemName: '@google-cloud/text-to-speech', + theme: 'lumen', + default: { + outputSourceFiles: false + } + }, + markdown: { + idInHeadings: true + } +}; diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/.mocharc.js b/owl-bot-staging/google-cloud-texttospeech/v1/.mocharc.js new file mode 100644 index 00000000000..481c522b00f --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/.mocharc.js @@ -0,0 +1,33 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +const config = { + "enable-source-maps": true, + "throw-deprecation": true, + "timeout": 10000 +} +if (process.env.MOCHA_THROW_DEPRECATION === 'false') { + delete config['throw-deprecation']; +} +if (process.env.MOCHA_REPORTER) { + config.reporter = process.env.MOCHA_REPORTER; +} +if (process.env.MOCHA_REPORTER_OUTPUT) { + config['reporter-option'] = `output=${process.env.MOCHA_REPORTER_OUTPUT}`; +} +module.exports = config diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/.prettierrc.js b/owl-bot-staging/google-cloud-texttospeech/v1/.prettierrc.js new file mode 100644 index 00000000000..494e147865d --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/.prettierrc.js @@ -0,0 +1,22 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +module.exports = { + ...require('gts/.prettierrc.json') +} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/README.md b/owl-bot-staging/google-cloud-texttospeech/v1/README.md new file mode 100644 index 00000000000..3eaadd6a1c8 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/README.md @@ -0,0 +1 @@ +Texttospeech: Nodejs Client diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/linkinator.config.json b/owl-bot-staging/google-cloud-texttospeech/v1/linkinator.config.json new file mode 100644 index 00000000000..befd23c8633 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/linkinator.config.json @@ -0,0 +1,16 @@ +{ + "recurse": true, + "skip": [ + "https://codecov.io/gh/googleapis/", + "www.googleapis.com", + "img.shields.io", + "https://console.cloud.google.com/cloudshell", + "https://support.google.com" + ], + "silent": true, + "concurrency": 5, + "retry": true, + "retryErrors": true, + "retryErrorsCount": 5, + "retryErrorsJitter": 3000 +} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/package.json b/owl-bot-staging/google-cloud-texttospeech/v1/package.json new file mode 100644 index 00000000000..e0bd4130ffb --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/package.json @@ -0,0 +1,65 @@ +{ + "name": "@google-cloud/text-to-speech", + "version": "0.1.0", + "description": "Texttospeech client for Node.js", + "repository": "googleapis/nodejs-texttospeech", + "license": "Apache-2.0", + "author": "Google LLC", + "main": "build/src/index.js", + "files": [ + "build/src", + "build/protos" + ], + "keywords": [ + "google apis client", + "google api client", + "google apis", + "google api", + "google", + "google cloud platform", + "google cloud", + "cloud", + "google texttospeech", + "texttospeech", + "text to speech", + "text to speech long audio synthesize" + ], + "scripts": { + "clean": "gts clean", + "compile": "tsc -p . && cp -r protos build/ && minifyProtoJson", + "compile-protos": "compileProtos src", + "docs": "jsdoc -c .jsdoc.js", + "predocs-test": "npm run docs", + "docs-test": "linkinator docs", + "fix": "gts fix", + "lint": "gts check", + "prepare": "npm run compile-protos && npm run compile", + "system-test": "c8 mocha build/system-test", + "test": "c8 mocha build/test" + }, + "dependencies": { + "google-gax": "^3.5.2" + }, + "devDependencies": { + "@types/mocha": "^9.1.1", + "@types/node": "^16.11.62", + "@types/sinon": "^10.0.13", + "c8": "^7.12.0", + "gts": "^3.1.1", + "jsdoc": "^3.6.11", + "jsdoc-fresh": "^2.0.1", + "jsdoc-region-tag": "^2.0.1", + "linkinator": "^4.0.3", + "mocha": "^10.0.0", + "null-loader": "^4.0.1", + "pack-n-play": "^1.0.0-2", + "sinon": "^14.0.0", + "ts-loader": "^8.4.0", + "typescript": "^4.8.3", + "webpack": "^4.46.0", + "webpack-cli": "^4.10.0" + }, + "engines": { + "node": ">=v12" + } +} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/protos/google/cloud/texttospeech/v1/cloud_tts.proto b/owl-bot-staging/google-cloud-texttospeech/v1/protos/google/cloud/texttospeech/v1/cloud_tts.proto new file mode 100644 index 00000000000..b50d3698fb7 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/protos/google/cloud/texttospeech/v1/cloud_tts.proto @@ -0,0 +1,304 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.texttospeech.v1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; + +option cc_enable_arenas = true; +option csharp_namespace = "Google.Cloud.TextToSpeech.V1"; +option go_package = "google.golang.org/genproto/googleapis/cloud/texttospeech/v1;texttospeech"; +option java_multiple_files = true; +option java_outer_classname = "TextToSpeechProto"; +option java_package = "com.google.cloud.texttospeech.v1"; +option php_namespace = "Google\\Cloud\\TextToSpeech\\V1"; +option ruby_package = "Google::Cloud::TextToSpeech::V1"; +option (google.api.resource_definition) = { + type: "automl.googleapis.com/Model" + pattern: "projects/{project}/locations/{location}/models/{model}" +}; + +// Service that implements Google Cloud Text-to-Speech API. +service TextToSpeech { + option (google.api.default_host) = "texttospeech.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; + + // Returns a list of Voice supported for synthesis. + rpc ListVoices(ListVoicesRequest) returns (ListVoicesResponse) { + option (google.api.http) = { + get: "/v1/voices" + }; + option (google.api.method_signature) = "language_code"; + } + + // Synthesizes speech synchronously: receive results after all text input + // has been processed. + rpc SynthesizeSpeech(SynthesizeSpeechRequest) + returns (SynthesizeSpeechResponse) { + option (google.api.http) = { + post: "/v1/text:synthesize" + body: "*" + }; + option (google.api.method_signature) = "input,voice,audio_config"; + } +} + +// Gender of the voice as described in +// [SSML voice element](https://www.w3.org/TR/speech-synthesis11/#edef_voice). +enum SsmlVoiceGender { + // An unspecified gender. + // In VoiceSelectionParams, this means that the client doesn't care which + // gender the selected voice will have. In the Voice field of + // ListVoicesResponse, this may mean that the voice doesn't fit any of the + // other categories in this enum, or that the gender of the voice isn't known. + SSML_VOICE_GENDER_UNSPECIFIED = 0; + + // A male voice. + MALE = 1; + + // A female voice. + FEMALE = 2; + + // A gender-neutral voice. This voice is not yet supported. + NEUTRAL = 3; +} + +// Configuration to set up audio encoder. The encoding determines the output +// audio format that we'd like. +enum AudioEncoding { + // Not specified. Will return result + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. + AUDIO_ENCODING_UNSPECIFIED = 0; + + // Uncompressed 16-bit signed little-endian samples (Linear PCM). + // Audio content returned as LINEAR16 also contains a WAV header. + LINEAR16 = 1; + + // MP3 audio at 32kbps. + MP3 = 2; + + // Opus encoded audio wrapped in an ogg container. The result will be a + // file which can be played natively on Android, and in browsers (at least + // Chrome and Firefox). The quality of the encoding is considerably higher + // than MP3 while using approximately the same bitrate. + OGG_OPUS = 3; + + // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law. + // Audio content returned as MULAW also contains a WAV header. + MULAW = 5; + + // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/A-law. + // Audio content returned as ALAW also contains a WAV header. + ALAW = 6; +} + +// The top-level message sent by the client for the `ListVoices` method. +message ListVoicesRequest { + // Optional. Recommended. + // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. + // If not specified, the API will return all supported voices. + // If specified, the ListVoices call will only return voices that can be used + // to synthesize this language_code. For example, if you specify `"en-NZ"`, + // all `"en-NZ"` voices will be returned. If you specify `"no"`, both + // `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices will be + // returned. + string language_code = 1 [(google.api.field_behavior) = OPTIONAL]; +} + +// The message returned to the client by the `ListVoices` method. +message ListVoicesResponse { + // The list of voices. + repeated Voice voices = 1; +} + +// Description of a voice supported by the TTS service. +message Voice { + // The languages that this voice supports, expressed as + // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g. + // "en-US", "es-419", "cmn-tw"). + repeated string language_codes = 1; + + // The name of this voice. Each distinct voice has a unique name. + string name = 2; + + // The gender of this voice. + SsmlVoiceGender ssml_gender = 3; + + // The natural sample rate (in hertz) for this voice. + int32 natural_sample_rate_hertz = 4; +} + +// The top-level message sent by the client for the `SynthesizeSpeech` method. +message SynthesizeSpeechRequest { + // Required. The Synthesizer requires either plain text or SSML as input. + SynthesisInput input = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The desired voice of the synthesized audio. + VoiceSelectionParams voice = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The configuration of the synthesized audio. + AudioConfig audio_config = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Contains text input to be synthesized. Either `text` or `ssml` must be +// supplied. Supplying both or neither returns +// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. The +// input size is limited to 5000 bytes. +message SynthesisInput { + // The input source, which is either plain text or SSML. + oneof input_source { + // The raw text to be synthesized. + string text = 1; + + // The SSML document to be synthesized. The SSML document must be valid + // and well-formed. Otherwise the RPC will fail and return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. For + // more information, see + // [SSML](https://cloud.google.com/text-to-speech/docs/ssml). + string ssml = 2; + } +} + +// Description of which voice to use for a synthesis request. +message VoiceSelectionParams { + // Required. The language (and potentially also the region) of the voice + // expressed as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) + // language tag, e.g. "en-US". This should not include a script tag (e.g. use + // "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred + // from the input provided in the SynthesisInput. The TTS service + // will use this parameter to help choose an appropriate voice. Note that + // the TTS service may choose a voice with a slightly different language code + // than the one selected; it may substitute a different region + // (e.g. using en-US rather than en-CA if there isn't a Canadian voice + // available), or even a different language, e.g. using "nb" (Norwegian + // Bokmal) instead of "no" (Norwegian)". + string language_code = 1 [(google.api.field_behavior) = REQUIRED]; + + // The name of the voice. If not set, the service will choose a + // voice based on the other parameters such as language_code and gender. + string name = 2; + + // The preferred gender of the voice. If not set, the service will + // choose a voice based on the other parameters such as language_code and + // name. Note that this is only a preference, not requirement; if a + // voice of the appropriate gender is not available, the synthesizer should + // substitute a voice with a different gender rather than failing the request. + SsmlVoiceGender ssml_gender = 3; + + // The configuration for a custom voice. If [CustomVoiceParams.model] is set, + // the service will choose the custom voice matching the specified + // configuration. + CustomVoiceParams custom_voice = 4; +} + +// Description of audio data to be synthesized. +message AudioConfig { + // Required. The format of the audio byte stream. + AudioEncoding audio_encoding = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Input only. Speaking rate/speed, in the range [0.25, 4.0]. 1.0 is + // the normal native speed supported by the specific voice. 2.0 is twice as + // fast, and 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 + // speed. Any other values < 0.25 or > 4.0 will return an error. + double speaking_rate = 2 [ + (google.api.field_behavior) = INPUT_ONLY, + (google.api.field_behavior) = OPTIONAL + ]; + + // Optional. Input only. Speaking pitch, in the range [-20.0, 20.0]. 20 means + // increase 20 semitones from the original pitch. -20 means decrease 20 + // semitones from the original pitch. + double pitch = 3 [ + (google.api.field_behavior) = INPUT_ONLY, + (google.api.field_behavior) = OPTIONAL + ]; + + // Optional. Input only. Volume gain (in dB) of the normal native volume + // supported by the specific voice, in the range [-96.0, 16.0]. If unset, or + // set to a value of 0.0 (dB), will play at normal native signal amplitude. A + // value of -6.0 (dB) will play at approximately half the amplitude of the + // normal native signal amplitude. A value of +6.0 (dB) will play at + // approximately twice the amplitude of the normal native signal amplitude. + // Strongly recommend not to exceed +10 (dB) as there's usually no effective + // increase in loudness for any value greater than that. + double volume_gain_db = 4 [ + (google.api.field_behavior) = INPUT_ONLY, + (google.api.field_behavior) = OPTIONAL + ]; + + // Optional. The synthesis sample rate (in hertz) for this audio. When this is + // specified in SynthesizeSpeechRequest, if this is different from the voice's + // natural sample rate, then the synthesizer will honor this request by + // converting to the desired sample rate (which might result in worse audio + // quality), unless the specified sample rate is not supported for the + // encoding chosen, in which case it will fail the request and return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. + int32 sample_rate_hertz = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Input only. An identifier which selects 'audio effects' profiles + // that are applied on (post synthesized) text to speech. Effects are applied + // on top of each other in the order they are given. See + // [audio + // profiles](https://cloud.google.com/text-to-speech/docs/audio-profiles) for + // current supported profile ids. + repeated string effects_profile_id = 6 [ + (google.api.field_behavior) = INPUT_ONLY, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Description of the custom voice to be synthesized. +message CustomVoiceParams { + // The usage of the synthesized audio. You must report your honest and + // correct usage of the service as it's regulated by contract and will cause + // significant difference in billing. + enum ReportedUsage { + // Request with reported usage unspecified will be rejected. + REPORTED_USAGE_UNSPECIFIED = 0; + + // For scenarios where the synthesized audio is not downloadable and can + // only be used once. For example, real-time request in IVR system. + REALTIME = 1; + + // For scenarios where the synthesized audio is downloadable and can be + // reused. For example, the synthesized audio is downloaded, stored in + // customer service system and played repeatedly. + OFFLINE = 2; + } + + // Required. The name of the AutoML model that synthesizes the custom voice. + string model = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "automl.googleapis.com/Model" } + ]; + + // Optional. The usage of the synthesized audio to be reported. + ReportedUsage reported_usage = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// The message returned to the client by the `SynthesizeSpeech` method. +message SynthesizeSpeechResponse { + // The audio data bytes encoded as specified in the request, including the + // header for encodings that are wrapped in containers (e.g. MP3, OGG_OPUS). + // For LINEAR16 audio, we include the WAV header. Note: as + // with all bytes fields, protobuffers use a pure binary representation, + // whereas JSON representations use base64. + bytes audio_content = 1; +} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/protos/google/cloud/texttospeech/v1/cloud_tts_lrs.proto b/owl-bot-staging/google-cloud-texttospeech/v1/protos/google/cloud/texttospeech/v1/cloud_tts_lrs.proto new file mode 100644 index 00000000000..d90f587160c --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/protos/google/cloud/texttospeech/v1/cloud_tts_lrs.proto @@ -0,0 +1,90 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.texttospeech.v1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/cloud/texttospeech/v1/cloud_tts.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/timestamp.proto"; + +option cc_enable_arenas = true; +option csharp_namespace = "Google.Cloud.TextToSpeech.V1"; +option go_package = "google.golang.org/genproto/googleapis/cloud/texttospeech/v1;texttospeech"; +option java_multiple_files = true; +option java_outer_classname = "TextToSpeechLongAudioSynthesisProto"; +option java_package = "com.google.cloud.texttospeech.v1"; +option php_namespace = "Google\\Cloud\\TextToSpeech\\V1"; +option ruby_package = "Google::Cloud::TextToSpeech::V1"; + +// Service that implements Google Cloud Text-to-Speech API. +service TextToSpeechLongAudioSynthesize { + option (google.api.default_host) = "texttospeech.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; + + // Synthesizes long form text asynchronously. + rpc SynthesizeLongAudio(SynthesizeLongAudioRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/locations/*/voices/*}:SynthesizeLongAudio" + body: "*" + }; + option (google.longrunning.operation_info) = { + response_type: "SynthesizeLongAudioResponse" + metadata_type: "SynthesizeLongAudioMetadata" + }; + } +} + +// The top-level message sent by the client for the +// `SynthesizeLongAudio` method. +message SynthesizeLongAudioRequest { + // The resource states of the request in the form of + // `projects/*/locations/*/voices/*`. + string parent = 1; + + // Required. The Synthesizer requires either plain text or SSML as input. + SynthesisInput input = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The configuration of the synthesized audio. + AudioConfig audio_config = 3 [(google.api.field_behavior) = REQUIRED]; + + // Specifies a Cloud Storage URI for the synthesis results. Must be + // specified in the format: `gs://bucket_name/object_name`, and the bucket + // must already exist. + string output_gcs_uri = 4; + + // The desired voice of the synthesized audio. + VoiceSelectionParams voice = 5; +} + +// The message returned to the client by the `SynthesizeLongAudio` method. +message SynthesizeLongAudioResponse {} + +// Metadata for response returned by the `SynthesizeLongAudio` method. +message SynthesizeLongAudioMetadata { + // Time when the request was received. + google.protobuf.Timestamp start_time = 1; + + // Time of the most recent processing update. + google.protobuf.Timestamp last_update_time = 2; + + // The progress of the most recent processing update in percentage, ie. 70.0%. + double progress_percentage = 3; +} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/snippet_metadata.google.cloud.texttospeech.v1.json b/owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/snippet_metadata.google.cloud.texttospeech.v1.json new file mode 100644 index 00000000000..9e72ec44856 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/snippet_metadata.google.cloud.texttospeech.v1.json @@ -0,0 +1,159 @@ +{ + "clientLibrary": { + "name": "nodejs-texttospeech", + "version": "0.1.0", + "language": "TYPESCRIPT", + "apis": [ + { + "id": "google.cloud.texttospeech.v1", + "version": "v1" + } + ] + }, + "snippets": [ + { + "regionTag": "texttospeech_v1_generated_TextToSpeech_ListVoices_async", + "title": "TextToSpeech listVoices Sample", + "origin": "API_DEFINITION", + "description": " Returns a list of Voice supported for synthesis.", + "canonical": true, + "file": "text_to_speech.list_voices.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 59, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "ListVoices", + "fullName": "google.cloud.texttospeech.v1.TextToSpeech.ListVoices", + "async": true, + "parameters": [ + { + "name": "language_code", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.texttospeech.v1.ListVoicesResponse", + "client": { + "shortName": "TextToSpeechClient", + "fullName": "google.cloud.texttospeech.v1.TextToSpeechClient" + }, + "method": { + "shortName": "ListVoices", + "fullName": "google.cloud.texttospeech.v1.TextToSpeech.ListVoices", + "service": { + "shortName": "TextToSpeech", + "fullName": "google.cloud.texttospeech.v1.TextToSpeech" + } + } + } + }, + { + "regionTag": "texttospeech_v1_generated_TextToSpeech_SynthesizeSpeech_async", + "title": "TextToSpeech synthesizeSpeech Sample", + "origin": "API_DEFINITION", + "description": " Synthesizes speech synchronously: receive results after all text input has been processed.", + "canonical": true, + "file": "text_to_speech.synthesize_speech.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 63, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "SynthesizeSpeech", + "fullName": "google.cloud.texttospeech.v1.TextToSpeech.SynthesizeSpeech", + "async": true, + "parameters": [ + { + "name": "input", + "type": ".google.cloud.texttospeech.v1.SynthesisInput" + }, + { + "name": "voice", + "type": ".google.cloud.texttospeech.v1.VoiceSelectionParams" + }, + { + "name": "audio_config", + "type": ".google.cloud.texttospeech.v1.AudioConfig" + } + ], + "resultType": ".google.cloud.texttospeech.v1.SynthesizeSpeechResponse", + "client": { + "shortName": "TextToSpeechClient", + "fullName": "google.cloud.texttospeech.v1.TextToSpeechClient" + }, + "method": { + "shortName": "SynthesizeSpeech", + "fullName": "google.cloud.texttospeech.v1.TextToSpeech.SynthesizeSpeech", + "service": { + "shortName": "TextToSpeech", + "fullName": "google.cloud.texttospeech.v1.TextToSpeech" + } + } + } + }, + { + "regionTag": "texttospeech_v1_generated_TextToSpeechLongAudioSynthesize_SynthesizeLongAudio_async", + "title": "TextToSpeech synthesizeLongAudio Sample", + "origin": "API_DEFINITION", + "description": " Synthesizes long form text asynchronously.", + "canonical": true, + "file": "text_to_speech_long_audio_synthesize.synthesize_long_audio.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 74, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "SynthesizeLongAudio", + "fullName": "google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize.SynthesizeLongAudio", + "async": true, + "parameters": [ + { + "name": "parent", + "type": "TYPE_STRING" + }, + { + "name": "input", + "type": ".google.cloud.texttospeech.v1.SynthesisInput" + }, + { + "name": "audio_config", + "type": ".google.cloud.texttospeech.v1.AudioConfig" + }, + { + "name": "output_gcs_uri", + "type": "TYPE_STRING" + }, + { + "name": "voice", + "type": ".google.cloud.texttospeech.v1.VoiceSelectionParams" + } + ], + "resultType": ".google.longrunning.Operation", + "client": { + "shortName": "TextToSpeechLongAudioSynthesizeClient", + "fullName": "google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesizeClient" + }, + "method": { + "shortName": "SynthesizeLongAudio", + "fullName": "google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize.SynthesizeLongAudio", + "service": { + "shortName": "TextToSpeechLongAudioSynthesize", + "fullName": "google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize" + } + } + } + } + ] +} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/text_to_speech.list_voices.js b/owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/text_to_speech.list_voices.js new file mode 100644 index 00000000000..93e3359b3bc --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/text_to_speech.list_voices.js @@ -0,0 +1,67 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main() { + // [START texttospeech_v1_generated_TextToSpeech_ListVoices_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Optional. Recommended. + * BCP-47 (https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. + * If not specified, the API will return all supported voices. + * If specified, the ListVoices call will only return voices that can be used + * to synthesize this language_code. For example, if you specify `"en-NZ"`, + * all `"en-NZ"` voices will be returned. If you specify `"no"`, both + * `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices will be + * returned. + */ + // const languageCode = 'abc123' + + // Imports the Texttospeech library + const {TextToSpeechClient} = require('@google-cloud/text-to-speech').v1; + + // Instantiates a client + const texttospeechClient = new TextToSpeechClient(); + + async function callListVoices() { + // Construct request + const request = { + }; + + // Run request + const response = await texttospeechClient.listVoices(request); + console.log(response); + } + + callListVoices(); + // [END texttospeech_v1_generated_TextToSpeech_ListVoices_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/text_to_speech.synthesize_speech.js b/owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/text_to_speech.synthesize_speech.js new file mode 100644 index 00000000000..e8a34543f95 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/text_to_speech.synthesize_speech.js @@ -0,0 +1,71 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(input, voice, audioConfig) { + // [START texttospeech_v1_generated_TextToSpeech_SynthesizeSpeech_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The Synthesizer requires either plain text or SSML as input. + */ + // const input = {} + /** + * Required. The desired voice of the synthesized audio. + */ + // const voice = {} + /** + * Required. The configuration of the synthesized audio. + */ + // const audioConfig = {} + + // Imports the Texttospeech library + const {TextToSpeechClient} = require('@google-cloud/text-to-speech').v1; + + // Instantiates a client + const texttospeechClient = new TextToSpeechClient(); + + async function callSynthesizeSpeech() { + // Construct request + const request = { + input, + voice, + audioConfig, + }; + + // Run request + const response = await texttospeechClient.synthesizeSpeech(request); + console.log(response); + } + + callSynthesizeSpeech(); + // [END texttospeech_v1_generated_TextToSpeech_SynthesizeSpeech_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js b/owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js new file mode 100644 index 00000000000..09eebcc9d52 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js @@ -0,0 +1,82 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(input, audioConfig) { + // [START texttospeech_v1_generated_TextToSpeechLongAudioSynthesize_SynthesizeLongAudio_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * The resource states of the request in the form of + * `projects/* /locations/* /voices/*`. + */ + // const parent = 'abc123' + /** + * Required. The Synthesizer requires either plain text or SSML as input. + */ + // const input = {} + /** + * Required. The configuration of the synthesized audio. + */ + // const audioConfig = {} + /** + * Specifies a Cloud Storage URI for the synthesis results. Must be + * specified in the format: `gs://bucket_name/object_name`, and the bucket + * must already exist. + */ + // const outputGcsUri = 'abc123' + /** + * The desired voice of the synthesized audio. + */ + // const voice = {} + + // Imports the Texttospeech library + const {TextToSpeechLongAudioSynthesizeClient} = require('@google-cloud/text-to-speech').v1; + + // Instantiates a client + const texttospeechClient = new TextToSpeechLongAudioSynthesizeClient(); + + async function callSynthesizeLongAudio() { + // Construct request + const request = { + input, + audioConfig, + }; + + // Run request + const [operation] = await texttospeechClient.synthesizeLongAudio(request); + const [response] = await operation.promise(); + console.log(response); + } + + callSynthesizeLongAudio(); + // [END texttospeech_v1_generated_TextToSpeechLongAudioSynthesize_SynthesizeLongAudio_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/src/index.ts b/owl-bot-staging/google-cloud-texttospeech/v1/src/index.ts new file mode 100644 index 00000000000..ce80840dd10 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/src/index.ts @@ -0,0 +1,27 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as v1 from './v1'; +const TextToSpeechClient = v1.TextToSpeechClient; +type TextToSpeechClient = v1.TextToSpeechClient; +const TextToSpeechLongAudioSynthesizeClient = v1.TextToSpeechLongAudioSynthesizeClient; +type TextToSpeechLongAudioSynthesizeClient = v1.TextToSpeechLongAudioSynthesizeClient; +export {v1, TextToSpeechClient, TextToSpeechLongAudioSynthesizeClient}; +export default {v1, TextToSpeechClient, TextToSpeechLongAudioSynthesizeClient}; +import * as protos from '../protos/protos'; +export {protos} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/gapic_metadata.json b/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/gapic_metadata.json new file mode 100644 index 00000000000..2d09d513d5f --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/gapic_metadata.json @@ -0,0 +1,67 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "typescript", + "protoPackage": "google.cloud.texttospeech.v1", + "libraryPackage": "@google-cloud/text-to-speech", + "services": { + "TextToSpeech": { + "clients": { + "grpc": { + "libraryClient": "TextToSpeechClient", + "rpcs": { + "ListVoices": { + "methods": [ + "listVoices" + ] + }, + "SynthesizeSpeech": { + "methods": [ + "synthesizeSpeech" + ] + } + } + }, + "grpc-fallback": { + "libraryClient": "TextToSpeechClient", + "rpcs": { + "ListVoices": { + "methods": [ + "listVoices" + ] + }, + "SynthesizeSpeech": { + "methods": [ + "synthesizeSpeech" + ] + } + } + } + } + }, + "TextToSpeechLongAudioSynthesize": { + "clients": { + "grpc": { + "libraryClient": "TextToSpeechLongAudioSynthesizeClient", + "rpcs": { + "SynthesizeLongAudio": { + "methods": [ + "synthesizeLongAudio" + ] + } + } + }, + "grpc-fallback": { + "libraryClient": "TextToSpeechLongAudioSynthesizeClient", + "rpcs": { + "SynthesizeLongAudio": { + "methods": [ + "synthesizeLongAudio" + ] + } + } + } + } + } + } +} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/index.ts b/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/index.ts new file mode 100644 index 00000000000..3cf28b93bc3 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/index.ts @@ -0,0 +1,20 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +export {TextToSpeechClient} from './text_to_speech_client'; +export {TextToSpeechLongAudioSynthesizeClient} from './text_to_speech_long_audio_synthesize_client'; diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_client.ts b/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_client.ts new file mode 100644 index 00000000000..3095c93c41d --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_client.ts @@ -0,0 +1,513 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import type * as gax from 'google-gax'; +import type {Callback, CallOptions, Descriptors, ClientOptions} from 'google-gax'; + +import * as protos from '../../protos/protos'; +import jsonProtos = require('../../protos/protos.json'); +/** + * Client JSON configuration object, loaded from + * `src/v1/text_to_speech_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +import * as gapicConfig from './text_to_speech_client_config.json'; +const version = require('../../../package.json').version; + +/** + * Service that implements Google Cloud Text-to-Speech API. + * @class + * @memberof v1 + */ +export class TextToSpeechClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + pathTemplates: {[name: string]: gax.PathTemplate}; + textToSpeechStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of TextToSpeechClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you + * need to avoid loading the default gRPC version and want to use the fallback + * HTTP implementation. Load only fallback version and pass it to the constructor: + * ``` + * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC + * const client = new TextToSpeechClient({fallback: 'rest'}, gax); + * ``` + */ + constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof TextToSpeechClient; + const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // Request numeric enum values if REST transport is used. + opts.numericEnums = true; + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Load google-gax module synchronously if needed + if (!gaxInstance) { + gaxInstance = require('google-gax') as typeof gax; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = staticMembers.servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === staticMembers.servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process !== 'undefined' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else if (opts.fallback === 'rest' ) { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); + + // This API contains "path templates"; forward-slash-separated + // identifiers to uniquely identify resources within the API. + // Create useful helper objects for these. + this.pathTemplates = { + modelPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/models/{model}' + ), + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.texttospeech.v1.TextToSpeech', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = this._gaxModule.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.textToSpeechStub) { + return this.textToSpeechStub; + } + + // Put together the "service stub" for + // google.cloud.texttospeech.v1.TextToSpeech. + this.textToSpeechStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.texttospeech.v1.TextToSpeech') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.texttospeech.v1.TextToSpeech, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const textToSpeechStubMethods = + ['listVoices', 'synthesizeSpeech']; + for (const methodName of textToSpeechStubMethods) { + const callPromise = this.textToSpeechStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor, + this._opts.fallback + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.textToSpeechStub; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + return 'texttospeech.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + return 'texttospeech.googleapis.com'; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/cloud-platform' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- +/** + * Returns a list of Voice supported for synthesis. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} [request.languageCode] + * Optional. Recommended. + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. + * If not specified, the API will return all supported voices. + * If specified, the ListVoices call will only return voices that can be used + * to synthesize this language_code. For example, if you specify `"en-NZ"`, + * all `"en-NZ"` voices will be returned. If you specify `"no"`, both + * `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices will be + * returned. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [ListVoicesResponse]{@link google.cloud.texttospeech.v1.ListVoicesResponse}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/text_to_speech.list_voices.js + * region_tag:texttospeech_v1_generated_TextToSpeech_ListVoices_async + */ + listVoices( + request?: protos.google.cloud.texttospeech.v1.IListVoicesRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.texttospeech.v1.IListVoicesResponse, + protos.google.cloud.texttospeech.v1.IListVoicesRequest|undefined, {}|undefined + ]>; + listVoices( + request: protos.google.cloud.texttospeech.v1.IListVoicesRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.texttospeech.v1.IListVoicesResponse, + protos.google.cloud.texttospeech.v1.IListVoicesRequest|null|undefined, + {}|null|undefined>): void; + listVoices( + request: protos.google.cloud.texttospeech.v1.IListVoicesRequest, + callback: Callback< + protos.google.cloud.texttospeech.v1.IListVoicesResponse, + protos.google.cloud.texttospeech.v1.IListVoicesRequest|null|undefined, + {}|null|undefined>): void; + listVoices( + request?: protos.google.cloud.texttospeech.v1.IListVoicesRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.texttospeech.v1.IListVoicesResponse, + protos.google.cloud.texttospeech.v1.IListVoicesRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.texttospeech.v1.IListVoicesResponse, + protos.google.cloud.texttospeech.v1.IListVoicesRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.texttospeech.v1.IListVoicesResponse, + protos.google.cloud.texttospeech.v1.IListVoicesRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + this.initialize(); + return this.innerApiCalls.listVoices(request, options, callback); + } +/** + * Synthesizes speech synchronously: receive results after all text input + * has been processed. + * + * @param {Object} request + * The request object that will be sent. + * @param {google.cloud.texttospeech.v1.SynthesisInput} request.input + * Required. The Synthesizer requires either plain text or SSML as input. + * @param {google.cloud.texttospeech.v1.VoiceSelectionParams} request.voice + * Required. The desired voice of the synthesized audio. + * @param {google.cloud.texttospeech.v1.AudioConfig} request.audioConfig + * Required. The configuration of the synthesized audio. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [SynthesizeSpeechResponse]{@link google.cloud.texttospeech.v1.SynthesizeSpeechResponse}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/text_to_speech.synthesize_speech.js + * region_tag:texttospeech_v1_generated_TextToSpeech_SynthesizeSpeech_async + */ + synthesizeSpeech( + request?: protos.google.cloud.texttospeech.v1.ISynthesizeSpeechRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.texttospeech.v1.ISynthesizeSpeechResponse, + protos.google.cloud.texttospeech.v1.ISynthesizeSpeechRequest|undefined, {}|undefined + ]>; + synthesizeSpeech( + request: protos.google.cloud.texttospeech.v1.ISynthesizeSpeechRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.texttospeech.v1.ISynthesizeSpeechResponse, + protos.google.cloud.texttospeech.v1.ISynthesizeSpeechRequest|null|undefined, + {}|null|undefined>): void; + synthesizeSpeech( + request: protos.google.cloud.texttospeech.v1.ISynthesizeSpeechRequest, + callback: Callback< + protos.google.cloud.texttospeech.v1.ISynthesizeSpeechResponse, + protos.google.cloud.texttospeech.v1.ISynthesizeSpeechRequest|null|undefined, + {}|null|undefined>): void; + synthesizeSpeech( + request?: protos.google.cloud.texttospeech.v1.ISynthesizeSpeechRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.texttospeech.v1.ISynthesizeSpeechResponse, + protos.google.cloud.texttospeech.v1.ISynthesizeSpeechRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.texttospeech.v1.ISynthesizeSpeechResponse, + protos.google.cloud.texttospeech.v1.ISynthesizeSpeechRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.texttospeech.v1.ISynthesizeSpeechResponse, + protos.google.cloud.texttospeech.v1.ISynthesizeSpeechRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + this.initialize(); + return this.innerApiCalls.synthesizeSpeech(request, options, callback); + } + + // -------------------- + // -- Path templates -- + // -------------------- + + /** + * Return a fully-qualified model resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} model + * @returns {string} Resource name string. + */ + modelPath(project:string,location:string,model:string) { + return this.pathTemplates.modelPathTemplate.render({ + project: project, + location: location, + model: model, + }); + } + + /** + * Parse the project from Model resource. + * + * @param {string} modelName + * A fully-qualified path representing Model resource. + * @returns {string} A string representing the project. + */ + matchProjectFromModelName(modelName: string) { + return this.pathTemplates.modelPathTemplate.match(modelName).project; + } + + /** + * Parse the location from Model resource. + * + * @param {string} modelName + * A fully-qualified path representing Model resource. + * @returns {string} A string representing the location. + */ + matchLocationFromModelName(modelName: string) { + return this.pathTemplates.modelPathTemplate.match(modelName).location; + } + + /** + * Parse the model from Model resource. + * + * @param {string} modelName + * A fully-qualified path representing Model resource. + * @returns {string} A string representing the model. + */ + matchModelFromModelName(modelName: string) { + return this.pathTemplates.modelPathTemplate.match(modelName).model; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.textToSpeechStub && !this._terminated) { + return this.textToSpeechStub.then(stub => { + this._terminated = true; + stub.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_client_config.json b/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_client_config.json new file mode 100644 index 00000000000..716efc6e558 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_client_config.json @@ -0,0 +1,36 @@ +{ + "interfaces": { + "google.cloud.texttospeech.v1.TextToSpeech": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "ListVoices": { + "timeout_millis": 300000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "SynthesizeSpeech": { + "timeout_millis": 300000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_long_audio_synthesize_client.ts b/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_long_audio_synthesize_client.ts new file mode 100644 index 00000000000..326aaafef56 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_long_audio_synthesize_client.ts @@ -0,0 +1,614 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import type * as gax from 'google-gax'; +import type {Callback, CallOptions, Descriptors, ClientOptions, GrpcClientOptions, LROperation} from 'google-gax'; + +import * as protos from '../../protos/protos'; +import jsonProtos = require('../../protos/protos.json'); +/** + * Client JSON configuration object, loaded from + * `src/v1/text_to_speech_long_audio_synthesize_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +import * as gapicConfig from './text_to_speech_long_audio_synthesize_client_config.json'; +const version = require('../../../package.json').version; + +/** + * Service that implements Google Cloud Text-to-Speech API. + * @class + * @memberof v1 + */ +export class TextToSpeechLongAudioSynthesizeClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + operationsClient: gax.OperationsClient; + textToSpeechLongAudioSynthesizeStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of TextToSpeechLongAudioSynthesizeClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you + * need to avoid loading the default gRPC version and want to use the fallback + * HTTP implementation. Load only fallback version and pass it to the constructor: + * ``` + * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC + * const client = new TextToSpeechLongAudioSynthesizeClient({fallback: 'rest'}, gax); + * ``` + */ + constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof TextToSpeechLongAudioSynthesizeClient; + const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // Request numeric enum values if REST transport is used. + opts.numericEnums = true; + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Load google-gax module synchronously if needed + if (!gaxInstance) { + gaxInstance = require('google-gax') as typeof gax; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = staticMembers.servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === staticMembers.servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process !== 'undefined' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else if (opts.fallback === 'rest' ) { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); + + const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); + // This API contains "long-running operations", which return a + // an Operation object that allows for tracking of the operation, + // rather than holding a request open. + const lroOptions: GrpcClientOptions = { + auth: this.auth, + grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined + }; + if (opts.fallback === 'rest') { + lroOptions.protoJson = protoFilesRoot; + lroOptions.httpRules = []; + } + this.operationsClient = this._gaxModule.lro(lroOptions).operationsClient(opts); + const synthesizeLongAudioResponse = protoFilesRoot.lookup( + '.google.cloud.texttospeech.v1.SynthesizeLongAudioResponse') as gax.protobuf.Type; + const synthesizeLongAudioMetadata = protoFilesRoot.lookup( + '.google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata') as gax.protobuf.Type; + + this.descriptors.longrunning = { + synthesizeLongAudio: new this._gaxModule.LongrunningDescriptor( + this.operationsClient, + synthesizeLongAudioResponse.decode.bind(synthesizeLongAudioResponse), + synthesizeLongAudioMetadata.decode.bind(synthesizeLongAudioMetadata)) + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = this._gaxModule.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.textToSpeechLongAudioSynthesizeStub) { + return this.textToSpeechLongAudioSynthesizeStub; + } + + // Put together the "service stub" for + // google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize. + this.textToSpeechLongAudioSynthesizeStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const textToSpeechLongAudioSynthesizeStubMethods = + ['synthesizeLongAudio']; + for (const methodName of textToSpeechLongAudioSynthesizeStubMethods) { + const callPromise = this.textToSpeechLongAudioSynthesizeStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.longrunning[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor, + this._opts.fallback + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.textToSpeechLongAudioSynthesizeStub; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + return 'texttospeech.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + return 'texttospeech.googleapis.com'; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/cloud-platform' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- + +/** + * Synthesizes long form text asynchronously. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.parent + * The resource states of the request in the form of + * `projects/* /locations/* /voices/*`. + * @param {google.cloud.texttospeech.v1.SynthesisInput} request.input + * Required. The Synthesizer requires either plain text or SSML as input. + * @param {google.cloud.texttospeech.v1.AudioConfig} request.audioConfig + * Required. The configuration of the synthesized audio. + * @param {string} request.outputGcsUri + * Specifies a Cloud Storage URI for the synthesis results. Must be + * specified in the format: `gs://bucket_name/object_name`, and the bucket + * must already exist. + * @param {google.cloud.texttospeech.v1.VoiceSelectionParams} request.voice + * The desired voice of the synthesized audio. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * a long running operation. Its `promise()` method returns a promise + * you can `await` for. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js + * region_tag:texttospeech_v1_generated_TextToSpeechLongAudioSynthesize_SynthesizeLongAudio_async + */ + synthesizeLongAudio( + request?: protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioRequest, + options?: CallOptions): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>; + synthesizeLongAudio( + request: protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioRequest, + options: CallOptions, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + synthesizeLongAudio( + request: protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioRequest, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + synthesizeLongAudio( + request?: protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioRequest, + optionsOrCallback?: CallOptions|Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>, + callback?: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'parent': request.parent ?? '', + }); + this.initialize(); + return this.innerApiCalls.synthesizeLongAudio(request, options, callback); + } +/** + * Check the status of the long running operation returned by `synthesizeLongAudio()`. + * @param {String} name + * The operation name that will be passed. + * @returns {Promise} - The promise which resolves to an object. + * The decoded operation object has result and metadata field to get information from. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js + * region_tag:texttospeech_v1_generated_TextToSpeechLongAudioSynthesize_SynthesizeLongAudio_async + */ + async checkSynthesizeLongAudioProgress(name: string): Promise>{ + const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); + const [operation] = await this.operationsClient.getOperation(request); + const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.synthesizeLongAudio, this._gaxModule.createDefaultBackoffSettings()); + return decodeOperation as LROperation; + } +/** + * Gets the latest state of a long-running operation. Clients can use this + * method to poll the operation result at intervals as recommended by the API + * service. + * + * @param {Object} request - The request object that will be sent. + * @param {string} request.name - The name of the operation resource. + * @param {Object=} options + * Optional parameters. You can override the default settings for this call, + * e.g, timeout, retries, paginations, etc. See [gax.CallOptions]{@link + * https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the + * details. + * @param {function(?Error, ?Object)=} callback + * The function which will be called with the result of the API call. + * + * The second parameter to the callback is an object representing + * [google.longrunning.Operation]{@link + * external:"google.longrunning.Operation"}. + * @return {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * [google.longrunning.Operation]{@link + * external:"google.longrunning.Operation"}. The promise has a method named + * "cancel" which cancels the ongoing API call. + * + * @example + * ``` + * const client = longrunning.operationsClient(); + * const name = ''; + * const [response] = await client.getOperation({name}); + * // doThingsWith(response) + * ``` + */ + getOperation( + request: protos.google.longrunning.GetOperationRequest, + options?: + | gax.CallOptions + | Callback< + protos.google.longrunning.Operation, + protos.google.longrunning.GetOperationRequest, + {} | null | undefined + >, + callback?: Callback< + protos.google.longrunning.Operation, + protos.google.longrunning.GetOperationRequest, + {} | null | undefined + > + ): Promise<[protos.google.longrunning.Operation]> { + return this.operationsClient.getOperation(request, options, callback); + } + /** + * Lists operations that match the specified filter in the request. If the + * server doesn't support this method, it returns `UNIMPLEMENTED`. Returns an iterable object. + * + * For-await-of syntax is used with the iterable to recursively get response element on-demand. + * + * @param {Object} request - The request object that will be sent. + * @param {string} request.name - The name of the operation collection. + * @param {string} request.filter - The standard list filter. + * @param {number=} request.pageSize - + * The maximum number of resources contained in the underlying API + * response. If page streaming is performed per-resource, this + * parameter does not affect the return value. If page streaming is + * performed per-page, this determines the maximum number of + * resources in a page. + * @param {Object=} options + * Optional parameters. You can override the default settings for this call, + * e.g, timeout, retries, paginations, etc. See [gax.CallOptions]{@link + * https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the + * details. + * @returns {Object} + * An iterable Object that conforms to @link https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols. + * + * @example + * ``` + * const client = longrunning.operationsClient(); + * for await (const response of client.listOperationsAsync(request)); + * // doThingsWith(response) + * ``` + */ + listOperationsAsync( + request: protos.google.longrunning.ListOperationsRequest, + options?: gax.CallOptions + ): AsyncIterable { + return this.operationsClient.listOperationsAsync(request, options); + } + /** + * Starts asynchronous cancellation on a long-running operation. The server + * makes a best effort to cancel the operation, but success is not + * guaranteed. If the server doesn't support this method, it returns + * `google.rpc.Code.UNIMPLEMENTED`. Clients can use + * {@link Operations.GetOperation} or + * other methods to check whether the cancellation succeeded or whether the + * operation completed despite cancellation. On successful cancellation, + * the operation is not deleted; instead, it becomes an operation with + * an {@link Operation.error} value with a {@link google.rpc.Status.code} of + * 1, corresponding to `Code.CANCELLED`. + * + * @param {Object} request - The request object that will be sent. + * @param {string} request.name - The name of the operation resource to be cancelled. + * @param {Object=} options + * Optional parameters. You can override the default settings for this call, + * e.g, timeout, retries, paginations, etc. See [gax.CallOptions]{@link + * https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the + * details. + * @param {function(?Error)=} callback + * The function which will be called with the result of the API call. + * @return {Promise} - The promise which resolves when API call finishes. + * The promise has a method named "cancel" which cancels the ongoing API + * call. + * + * @example + * ``` + * const client = longrunning.operationsClient(); + * await client.cancelOperation({name: ''}); + * ``` + */ + cancelOperation( + request: protos.google.longrunning.CancelOperationRequest, + options?: + | gax.CallOptions + | Callback< + protos.google.protobuf.Empty, + protos.google.longrunning.CancelOperationRequest, + {} | undefined | null + >, + callback?: Callback< + protos.google.longrunning.CancelOperationRequest, + protos.google.protobuf.Empty, + {} | undefined | null + > + ): Promise { + return this.operationsClient.cancelOperation(request, options, callback); + } + + /** + * Deletes a long-running operation. This method indicates that the client is + * no longer interested in the operation result. It does not cancel the + * operation. If the server doesn't support this method, it returns + * `google.rpc.Code.UNIMPLEMENTED`. + * + * @param {Object} request - The request object that will be sent. + * @param {string} request.name - The name of the operation resource to be deleted. + * @param {Object=} options + * Optional parameters. You can override the default settings for this call, + * e.g, timeout, retries, paginations, etc. See [gax.CallOptions]{@link + * https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the + * details. + * @param {function(?Error)=} callback + * The function which will be called with the result of the API call. + * @return {Promise} - The promise which resolves when API call finishes. + * The promise has a method named "cancel" which cancels the ongoing API + * call. + * + * @example + * ``` + * const client = longrunning.operationsClient(); + * await client.deleteOperation({name: ''}); + * ``` + */ + deleteOperation( + request: protos.google.longrunning.DeleteOperationRequest, + options?: + | gax.CallOptions + | Callback< + protos.google.protobuf.Empty, + protos.google.longrunning.DeleteOperationRequest, + {} | null | undefined + >, + callback?: Callback< + protos.google.protobuf.Empty, + protos.google.longrunning.DeleteOperationRequest, + {} | null | undefined + > + ): Promise { + return this.operationsClient.deleteOperation(request, options, callback); + } + + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.textToSpeechLongAudioSynthesizeStub && !this._terminated) { + return this.textToSpeechLongAudioSynthesizeStub.then(stub => { + this._terminated = true; + stub.close(); + this.operationsClient.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_long_audio_synthesize_client_config.json b/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_long_audio_synthesize_client_config.json new file mode 100644 index 00000000000..155a14a844d --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_long_audio_synthesize_client_config.json @@ -0,0 +1,31 @@ +{ + "interfaces": { + "google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "SynthesizeLongAudio": { + "timeout_millis": 5000000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_long_audio_synthesize_proto_list.json b/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_long_audio_synthesize_proto_list.json new file mode 100644 index 00000000000..58814dcb836 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_long_audio_synthesize_proto_list.json @@ -0,0 +1,4 @@ +[ + "../../protos/google/cloud/texttospeech/v1/cloud_tts.proto", + "../../protos/google/cloud/texttospeech/v1/cloud_tts_lrs.proto" +] diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_proto_list.json b/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_proto_list.json new file mode 100644 index 00000000000..58814dcb836 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_proto_list.json @@ -0,0 +1,4 @@ +[ + "../../protos/google/cloud/texttospeech/v1/cloud_tts.proto", + "../../protos/google/cloud/texttospeech/v1/cloud_tts_lrs.proto" +] diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/system-test/fixtures/sample/src/index.js b/owl-bot-staging/google-cloud-texttospeech/v1/system-test/fixtures/sample/src/index.js new file mode 100644 index 00000000000..a08648fec57 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/system-test/fixtures/sample/src/index.js @@ -0,0 +1,28 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +/* eslint-disable node/no-missing-require, no-unused-vars */ +const texttospeech = require('@google-cloud/text-to-speech'); + +function main() { + const textToSpeechClient = new texttospeech.TextToSpeechClient(); + const textToSpeechLongAudioSynthesizeClient = new texttospeech.TextToSpeechLongAudioSynthesizeClient(); +} + +main(); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/system-test/fixtures/sample/src/index.ts b/owl-bot-staging/google-cloud-texttospeech/v1/system-test/fixtures/sample/src/index.ts new file mode 100644 index 00000000000..14b3691b2df --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/system-test/fixtures/sample/src/index.ts @@ -0,0 +1,38 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import {TextToSpeechClient, TextToSpeechLongAudioSynthesizeClient} from '@google-cloud/text-to-speech'; + +// check that the client class type name can be used +function doStuffWithTextToSpeechClient(client: TextToSpeechClient) { + client.close(); +} +function doStuffWithTextToSpeechLongAudioSynthesizeClient(client: TextToSpeechLongAudioSynthesizeClient) { + client.close(); +} + +function main() { + // check that the client instance can be created + const textToSpeechClient = new TextToSpeechClient(); + doStuffWithTextToSpeechClient(textToSpeechClient); + // check that the client instance can be created + const textToSpeechLongAudioSynthesizeClient = new TextToSpeechLongAudioSynthesizeClient(); + doStuffWithTextToSpeechLongAudioSynthesizeClient(textToSpeechLongAudioSynthesizeClient); +} + +main(); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/system-test/install.ts b/owl-bot-staging/google-cloud-texttospeech/v1/system-test/install.ts new file mode 100644 index 00000000000..557a57558e1 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/system-test/install.ts @@ -0,0 +1,49 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import {packNTest} from 'pack-n-play'; +import {readFileSync} from 'fs'; +import {describe, it} from 'mocha'; + +describe('📦 pack-n-play test', () => { + + it('TypeScript code', async function() { + this.timeout(300000); + const options = { + packageDir: process.cwd(), + sample: { + description: 'TypeScript user can use the type definitions', + ts: readFileSync('./system-test/fixtures/sample/src/index.ts').toString() + } + }; + await packNTest(options); + }); + + it('JavaScript code', async function() { + this.timeout(300000); + const options = { + packageDir: process.cwd(), + sample: { + description: 'JavaScript user can use the library', + ts: readFileSync('./system-test/fixtures/sample/src/index.js').toString() + } + }; + await packNTest(options); + }); + +}); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/test/gapic_text_to_speech_long_audio_synthesize_v1.ts b/owl-bot-staging/google-cloud-texttospeech/v1/test/gapic_text_to_speech_long_audio_synthesize_v1.ts new file mode 100644 index 00000000000..1cd8b48eaf5 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/test/gapic_text_to_speech_long_audio_synthesize_v1.ts @@ -0,0 +1,591 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import {describe, it} from 'mocha'; +import * as texttospeechlongaudiosynthesizeModule from '../src'; + +import {protobuf, LROperation, operationsProtos} from 'google-gax'; + +// Dynamically loaded proto JSON is needed to get the type information +// to fill in default values for request objects +const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +function getTypeDefaultValue(typeName: string, fields: string[]) { + let type = root.lookupType(typeName) as protobuf.Type; + for (const field of fields.slice(0, -1)) { + type = type.fields[field]?.resolvedType as protobuf.Type; + } + return type.fields[fields[fields.length - 1]]?.defaultValue; +} + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubLongRunningCall(response?: ResponseType, callError?: Error, lroError?: Error) { + const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError ? sinon.stub().rejects(callError) : sinon.stub().resolves([mockOperation]); +} + +function stubLongRunningCallWithCallback(response?: ResponseType, callError?: Error, lroError?: Error) { + const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError ? sinon.stub().callsArgWith(2, callError) : sinon.stub().callsArgWith(2, null, mockOperation); +} + +function stubAsyncIterationCall(responses?: ResponseType[], error?: Error) { + let counter = 0; + const asyncIterable = { + [Symbol.asyncIterator]() { + return { + async next() { + if (error) { + return Promise.reject(error); + } + if (counter >= responses!.length) { + return Promise.resolve({done: true, value: undefined}); + } + return Promise.resolve({done: false, value: responses![counter++]}); + } + }; + } + }; + return sinon.stub().returns(asyncIterable); +} + +describe('v1.TextToSpeechLongAudioSynthesizeClient', () => { + describe('Common methods', () => { + it('has servicePath', () => { + const servicePath = texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient.servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient.apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.textToSpeechLongAudioSynthesizeStub, undefined); + await client.initialize(); + assert(client.textToSpeechLongAudioSynthesizeStub); + }); + + it('has close method for the initialized client', done => { + const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.textToSpeechLongAudioSynthesizeStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.textToSpeechLongAudioSynthesizeStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + }); + + describe('synthesizeLongAudio', () => { + it('invokes synthesizeLongAudio without error', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.synthesizeLongAudio = stubLongRunningCall(expectedResponse); + const [operation] = await client.synthesizeLongAudio(request); + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.synthesizeLongAudio as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.synthesizeLongAudio as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes synthesizeLongAudio without error using callback', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.synthesizeLongAudio = stubLongRunningCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.synthesizeLongAudio( + request, + (err?: Error|null, + result?: LROperation|null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const operation = await promise as LROperation; + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.synthesizeLongAudio as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.synthesizeLongAudio as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes synthesizeLongAudio with call error', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.synthesizeLongAudio = stubLongRunningCall(undefined, expectedError); + await assert.rejects(client.synthesizeLongAudio(request), expectedError); + const actualRequest = (client.innerApiCalls.synthesizeLongAudio as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.synthesizeLongAudio as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes synthesizeLongAudio with LRO error', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.synthesizeLongAudio = stubLongRunningCall(undefined, undefined, expectedError); + const [operation] = await client.synthesizeLongAudio(request); + await assert.rejects(operation.promise(), expectedError); + const actualRequest = (client.innerApiCalls.synthesizeLongAudio as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.synthesizeLongAudio as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes checkSynthesizeLongAudioProgress without error', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedResponse = generateSampleMessage( + new operationsProtos.google.longrunning.Operation() + ); + expectedResponse.name = 'test'; + expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; + expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} + + client.operationsClient.getOperation = stubSimpleCall(expectedResponse); + const decodedOperation = await client.checkSynthesizeLongAudioProgress(expectedResponse.name); + assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); + assert(decodedOperation.metadata); + assert((client.operationsClient.getOperation as SinonStub).getCall(0)); + }); + + it('invokes checkSynthesizeLongAudioProgress with error', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedError = new Error('expected'); + + client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.checkSynthesizeLongAudioProgress(''), expectedError); + assert((client.operationsClient.getOperation as SinonStub) + .getCall(0)); + }); + }); + describe('getOperation', () => { + it('invokes getOperation without error', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.GetOperationRequest() + ); + const expectedResponse = generateSampleMessage( + new operationsProtos.google.longrunning.Operation() + ); + client.operationsClient.getOperation = stubSimpleCall(expectedResponse); + const response = await client.getOperation(request); + assert.deepStrictEqual(response, [expectedResponse]); + assert((client.operationsClient.getOperation as SinonStub) + .getCall(0).calledWith(request) + ); + }); + it('invokes getOperation without error using callback', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.GetOperationRequest() + ); + const expectedResponse = generateSampleMessage( + new operationsProtos.google.longrunning.Operation() + ); + client.operationsClient.getOperation = sinon.stub().callsArgWith(2, null, expectedResponse); + const promise = new Promise((resolve, reject) => { + client.operationsClient.getOperation( + request, + undefined, + ( + err?: Error | null, + result?: operationsProtos.google.longrunning.Operation | null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + assert((client.operationsClient.getOperation as SinonStub) + .getCall(0)); + }); + it('invokes getOperation with error', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.GetOperationRequest() + ); + const expectedError = new Error('expected'); + client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); + await assert.rejects(async () => {await client.getOperation(request)}, expectedError); + assert((client.operationsClient.getOperation as SinonStub) + .getCall(0).calledWith(request)); + }); + }); + describe('cancelOperation', () => { + it('invokes cancelOperation without error', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.CancelOperationRequest() + ); + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.operationsClient.cancelOperation = stubSimpleCall(expectedResponse); + const response = await client.cancelOperation(request); + assert.deepStrictEqual(response, [expectedResponse]); + assert((client.operationsClient.cancelOperation as SinonStub) + .getCall(0).calledWith(request) + ); + }); + it('invokes cancelOperation without error using callback', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.CancelOperationRequest() + ); + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.operationsClient.cancelOperation = sinon.stub().callsArgWith(2, null, expectedResponse); + const promise = new Promise((resolve, reject) => { + client.operationsClient.cancelOperation( + request, + undefined, + ( + err?: Error | null, + result?: protos.google.protobuf.Empty | null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + assert((client.operationsClient.cancelOperation as SinonStub) + .getCall(0)); + }); + it('invokes cancelOperation with error', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.CancelOperationRequest() + ); + const expectedError = new Error('expected'); + client.operationsClient.cancelOperation = stubSimpleCall(undefined, expectedError); + await assert.rejects(async () => {await client.cancelOperation(request)}, expectedError); + assert((client.operationsClient.cancelOperation as SinonStub) + .getCall(0).calledWith(request)); + }); + }); + describe('deleteOperation', () => { + it('invokes deleteOperation without error', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.DeleteOperationRequest() + ); + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.operationsClient.deleteOperation = stubSimpleCall(expectedResponse); + const response = await client.deleteOperation(request); + assert.deepStrictEqual(response, [expectedResponse]); + assert((client.operationsClient.deleteOperation as SinonStub) + .getCall(0).calledWith(request) + ); + }); + it('invokes deleteOperation without error using callback', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.DeleteOperationRequest() + ); + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.operationsClient.deleteOperation = sinon.stub().callsArgWith(2, null, expectedResponse); + const promise = new Promise((resolve, reject) => { + client.operationsClient.deleteOperation( + request, + undefined, + ( + err?: Error | null, + result?: protos.google.protobuf.Empty | null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + assert((client.operationsClient.deleteOperation as SinonStub) + .getCall(0)); + }); + it('invokes deleteOperation with error', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.DeleteOperationRequest() + ); + const expectedError = new Error('expected'); + client.operationsClient.deleteOperation = stubSimpleCall(undefined, expectedError); + await assert.rejects(async () => {await client.deleteOperation(request)}, expectedError); + assert((client.operationsClient.deleteOperation as SinonStub) + .getCall(0).calledWith(request)); + }); + }); + describe('listOperationsAsync', () => { + it('uses async iteration with listOperations without error', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.ListOperationsRequest() + ); + const expectedResponse = [ + generateSampleMessage( + new operationsProtos.google.longrunning.ListOperationsResponse() + ), + generateSampleMessage( + new operationsProtos.google.longrunning.ListOperationsResponse() + ), + generateSampleMessage( + new operationsProtos.google.longrunning.ListOperationsResponse() + ), + ]; + client.operationsClient.descriptor.listOperations.asyncIterate = stubAsyncIterationCall(expectedResponse); + const responses: operationsProtos.google.longrunning.ListOperationsResponse[] = []; + const iterable = client.operationsClient.listOperationsAsync(request); + for await (const resource of iterable) { + responses.push(resource!); + } + assert.deepStrictEqual(responses, expectedResponse); + assert.deepStrictEqual( + (client.operationsClient.descriptor.listOperations.asyncIterate as SinonStub) + .getCall(0).args[1], request); + }); + it('uses async iteration with listOperations with error', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.ListOperationsRequest() + ); + const expectedError = new Error('expected'); + client.operationsClient.descriptor.listOperations.asyncIterate = stubAsyncIterationCall(undefined, expectedError); + const iterable = client.operationsClient.listOperationsAsync(request); + await assert.rejects(async () => { + const responses: operationsProtos.google.longrunning.ListOperationsResponse[] = []; + for await (const resource of iterable) { + responses.push(resource!); + } + }); + assert.deepStrictEqual( + (client.operationsClient.descriptor.listOperations.asyncIterate as SinonStub) + .getCall(0).args[1], request); + }); + }); +}); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/test/gapic_text_to_speech_v1.ts b/owl-bot-staging/google-cloud-texttospeech/v1/test/gapic_text_to_speech_v1.ts new file mode 100644 index 00000000000..c74edffd0ec --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/test/gapic_text_to_speech_v1.ts @@ -0,0 +1,349 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import {describe, it} from 'mocha'; +import * as texttospeechModule from '../src'; + +import {protobuf} from 'google-gax'; + +// Dynamically loaded proto JSON is needed to get the type information +// to fill in default values for request objects +const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +function getTypeDefaultValue(typeName: string, fields: string[]) { + let type = root.lookupType(typeName) as protobuf.Type; + for (const field of fields.slice(0, -1)) { + type = type.fields[field]?.resolvedType as protobuf.Type; + } + return type.fields[fields[fields.length - 1]]?.defaultValue; +} + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { + return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); +} + +describe('v1.TextToSpeechClient', () => { + describe('Common methods', () => { + it('has servicePath', () => { + const servicePath = texttospeechModule.v1.TextToSpeechClient.servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = texttospeechModule.v1.TextToSpeechClient.apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = texttospeechModule.v1.TextToSpeechClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new texttospeechModule.v1.TextToSpeechClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new texttospeechModule.v1.TextToSpeechClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new texttospeechModule.v1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.textToSpeechStub, undefined); + await client.initialize(); + assert(client.textToSpeechStub); + }); + + it('has close method for the initialized client', done => { + const client = new texttospeechModule.v1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.textToSpeechStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new texttospeechModule.v1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.textToSpeechStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new texttospeechModule.v1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new texttospeechModule.v1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + }); + + describe('listVoices', () => { + it('invokes listVoices without error', async () => { + const client = new texttospeechModule.v1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1.ListVoicesRequest() + ); + const expectedResponse = generateSampleMessage( + new protos.google.cloud.texttospeech.v1.ListVoicesResponse() + ); + client.innerApiCalls.listVoices = stubSimpleCall(expectedResponse); + const [response] = await client.listVoices(request); + assert.deepStrictEqual(response, expectedResponse); + }); + + it('invokes listVoices without error using callback', async () => { + const client = new texttospeechModule.v1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1.ListVoicesRequest() + ); + const expectedResponse = generateSampleMessage( + new protos.google.cloud.texttospeech.v1.ListVoicesResponse() + ); + client.innerApiCalls.listVoices = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.listVoices( + request, + (err?: Error|null, result?: protos.google.cloud.texttospeech.v1.IListVoicesResponse|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + }); + + it('invokes listVoices with error', async () => { + const client = new texttospeechModule.v1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1.ListVoicesRequest() + ); + const expectedError = new Error('expected'); + client.innerApiCalls.listVoices = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.listVoices(request), expectedError); + }); + + it('invokes listVoices with closed client', async () => { + const client = new texttospeechModule.v1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1.ListVoicesRequest() + ); + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.listVoices(request), expectedError); + }); + }); + + describe('synthesizeSpeech', () => { + it('invokes synthesizeSpeech without error', async () => { + const client = new texttospeechModule.v1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1.SynthesizeSpeechRequest() + ); + const expectedResponse = generateSampleMessage( + new protos.google.cloud.texttospeech.v1.SynthesizeSpeechResponse() + ); + client.innerApiCalls.synthesizeSpeech = stubSimpleCall(expectedResponse); + const [response] = await client.synthesizeSpeech(request); + assert.deepStrictEqual(response, expectedResponse); + }); + + it('invokes synthesizeSpeech without error using callback', async () => { + const client = new texttospeechModule.v1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1.SynthesizeSpeechRequest() + ); + const expectedResponse = generateSampleMessage( + new protos.google.cloud.texttospeech.v1.SynthesizeSpeechResponse() + ); + client.innerApiCalls.synthesizeSpeech = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.synthesizeSpeech( + request, + (err?: Error|null, result?: protos.google.cloud.texttospeech.v1.ISynthesizeSpeechResponse|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + }); + + it('invokes synthesizeSpeech with error', async () => { + const client = new texttospeechModule.v1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1.SynthesizeSpeechRequest() + ); + const expectedError = new Error('expected'); + client.innerApiCalls.synthesizeSpeech = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.synthesizeSpeech(request), expectedError); + }); + + it('invokes synthesizeSpeech with closed client', async () => { + const client = new texttospeechModule.v1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1.SynthesizeSpeechRequest() + ); + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.synthesizeSpeech(request), expectedError); + }); + }); + + describe('Path templates', () => { + + describe('model', () => { + const fakePath = "/rendered/path/model"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + model: "modelValue", + }; + const client = new texttospeechModule.v1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.modelPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.modelPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('modelPath', () => { + const result = client.modelPath("projectValue", "locationValue", "modelValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.modelPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromModelName', () => { + const result = client.matchProjectFromModelName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.modelPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromModelName', () => { + const result = client.matchLocationFromModelName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.modelPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchModelFromModelName', () => { + const result = client.matchModelFromModelName(fakePath); + assert.strictEqual(result, "modelValue"); + assert((client.pathTemplates.modelPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + }); +}); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/tsconfig.json b/owl-bot-staging/google-cloud-texttospeech/v1/tsconfig.json new file mode 100644 index 00000000000..c78f1c884ef --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/tsconfig.json @@ -0,0 +1,19 @@ +{ + "extends": "./node_modules/gts/tsconfig-google.json", + "compilerOptions": { + "rootDir": ".", + "outDir": "build", + "resolveJsonModule": true, + "lib": [ + "es2018", + "dom" + ] + }, + "include": [ + "src/*.ts", + "src/**/*.ts", + "test/*.ts", + "test/**/*.ts", + "system-test/*.ts" + ] +} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/webpack.config.js b/owl-bot-staging/google-cloud-texttospeech/v1/webpack.config.js new file mode 100644 index 00000000000..25f059a0979 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1/webpack.config.js @@ -0,0 +1,64 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const path = require('path'); + +module.exports = { + entry: './src/index.ts', + output: { + library: 'TextToSpeech', + filename: './text-to-speech.js', + }, + node: { + child_process: 'empty', + fs: 'empty', + crypto: 'empty', + }, + resolve: { + alias: { + '../../../package.json': path.resolve(__dirname, 'package.json'), + }, + extensions: ['.js', '.json', '.ts'], + }, + module: { + rules: [ + { + test: /\.tsx?$/, + use: 'ts-loader', + exclude: /node_modules/ + }, + { + test: /node_modules[\\/]@grpc[\\/]grpc-js/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]grpc/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]retry-request/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]https?-proxy-agent/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]gtoken/, + use: 'null-loader' + }, + ], + }, + mode: 'production', +}; diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/.eslintignore b/owl-bot-staging/google-cloud-texttospeech/v1beta1/.eslintignore new file mode 100644 index 00000000000..cfc348ec4d1 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/.eslintignore @@ -0,0 +1,7 @@ +**/node_modules +**/.coverage +build/ +docs/ +protos/ +system-test/ +samples/generated/ diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/.eslintrc.json b/owl-bot-staging/google-cloud-texttospeech/v1beta1/.eslintrc.json new file mode 100644 index 00000000000..78215349546 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/.eslintrc.json @@ -0,0 +1,3 @@ +{ + "extends": "./node_modules/gts" +} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/.gitignore b/owl-bot-staging/google-cloud-texttospeech/v1beta1/.gitignore new file mode 100644 index 00000000000..5d32b23782f --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/.gitignore @@ -0,0 +1,14 @@ +**/*.log +**/node_modules +.coverage +coverage +.nyc_output +docs/ +out/ +build/ +system-test/secrets.js +system-test/*key.json +*.lock +.DS_Store +package-lock.json +__pycache__ diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/.jsdoc.js b/owl-bot-staging/google-cloud-texttospeech/v1beta1/.jsdoc.js new file mode 100644 index 00000000000..929b3c59840 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/.jsdoc.js @@ -0,0 +1,55 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +'use strict'; + +module.exports = { + opts: { + readme: './README.md', + package: './package.json', + template: './node_modules/jsdoc-fresh', + recurse: true, + verbose: true, + destination: './docs/' + }, + plugins: [ + 'plugins/markdown', + 'jsdoc-region-tag' + ], + source: { + excludePattern: '(^|\\/|\\\\)[._]', + include: [ + 'build/src', + 'protos' + ], + includePattern: '\\.js$' + }, + templates: { + copyright: 'Copyright 2022 Google LLC', + includeDate: false, + sourceFiles: false, + systemName: '@google-cloud/text-to-speech', + theme: 'lumen', + default: { + outputSourceFiles: false + } + }, + markdown: { + idInHeadings: true + } +}; diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/.mocharc.js b/owl-bot-staging/google-cloud-texttospeech/v1beta1/.mocharc.js new file mode 100644 index 00000000000..481c522b00f --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/.mocharc.js @@ -0,0 +1,33 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +const config = { + "enable-source-maps": true, + "throw-deprecation": true, + "timeout": 10000 +} +if (process.env.MOCHA_THROW_DEPRECATION === 'false') { + delete config['throw-deprecation']; +} +if (process.env.MOCHA_REPORTER) { + config.reporter = process.env.MOCHA_REPORTER; +} +if (process.env.MOCHA_REPORTER_OUTPUT) { + config['reporter-option'] = `output=${process.env.MOCHA_REPORTER_OUTPUT}`; +} +module.exports = config diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/.prettierrc.js b/owl-bot-staging/google-cloud-texttospeech/v1beta1/.prettierrc.js new file mode 100644 index 00000000000..494e147865d --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/.prettierrc.js @@ -0,0 +1,22 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +module.exports = { + ...require('gts/.prettierrc.json') +} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/README.md b/owl-bot-staging/google-cloud-texttospeech/v1beta1/README.md new file mode 100644 index 00000000000..3eaadd6a1c8 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/README.md @@ -0,0 +1 @@ +Texttospeech: Nodejs Client diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/linkinator.config.json b/owl-bot-staging/google-cloud-texttospeech/v1beta1/linkinator.config.json new file mode 100644 index 00000000000..befd23c8633 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/linkinator.config.json @@ -0,0 +1,16 @@ +{ + "recurse": true, + "skip": [ + "https://codecov.io/gh/googleapis/", + "www.googleapis.com", + "img.shields.io", + "https://console.cloud.google.com/cloudshell", + "https://support.google.com" + ], + "silent": true, + "concurrency": 5, + "retry": true, + "retryErrors": true, + "retryErrorsCount": 5, + "retryErrorsJitter": 3000 +} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/package.json b/owl-bot-staging/google-cloud-texttospeech/v1beta1/package.json new file mode 100644 index 00000000000..e0bd4130ffb --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/package.json @@ -0,0 +1,65 @@ +{ + "name": "@google-cloud/text-to-speech", + "version": "0.1.0", + "description": "Texttospeech client for Node.js", + "repository": "googleapis/nodejs-texttospeech", + "license": "Apache-2.0", + "author": "Google LLC", + "main": "build/src/index.js", + "files": [ + "build/src", + "build/protos" + ], + "keywords": [ + "google apis client", + "google api client", + "google apis", + "google api", + "google", + "google cloud platform", + "google cloud", + "cloud", + "google texttospeech", + "texttospeech", + "text to speech", + "text to speech long audio synthesize" + ], + "scripts": { + "clean": "gts clean", + "compile": "tsc -p . && cp -r protos build/ && minifyProtoJson", + "compile-protos": "compileProtos src", + "docs": "jsdoc -c .jsdoc.js", + "predocs-test": "npm run docs", + "docs-test": "linkinator docs", + "fix": "gts fix", + "lint": "gts check", + "prepare": "npm run compile-protos && npm run compile", + "system-test": "c8 mocha build/system-test", + "test": "c8 mocha build/test" + }, + "dependencies": { + "google-gax": "^3.5.2" + }, + "devDependencies": { + "@types/mocha": "^9.1.1", + "@types/node": "^16.11.62", + "@types/sinon": "^10.0.13", + "c8": "^7.12.0", + "gts": "^3.1.1", + "jsdoc": "^3.6.11", + "jsdoc-fresh": "^2.0.1", + "jsdoc-region-tag": "^2.0.1", + "linkinator": "^4.0.3", + "mocha": "^10.0.0", + "null-loader": "^4.0.1", + "pack-n-play": "^1.0.0-2", + "sinon": "^14.0.0", + "ts-loader": "^8.4.0", + "typescript": "^4.8.3", + "webpack": "^4.46.0", + "webpack-cli": "^4.10.0" + }, + "engines": { + "node": ">=v12" + } +} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/protos/google/cloud/texttospeech/v1beta1/cloud_tts.proto b/owl-bot-staging/google-cloud-texttospeech/v1beta1/protos/google/cloud/texttospeech/v1beta1/cloud_tts.proto new file mode 100644 index 00000000000..63e5361dc0a --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/protos/google/cloud/texttospeech/v1beta1/cloud_tts.proto @@ -0,0 +1,334 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.texttospeech.v1beta1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; + +option cc_enable_arenas = true; +option csharp_namespace = "Google.Cloud.TextToSpeech.V1Beta1"; +option go_package = "google.golang.org/genproto/googleapis/cloud/texttospeech/v1beta1;texttospeech"; +option java_multiple_files = true; +option java_outer_classname = "TextToSpeechProto"; +option java_package = "com.google.cloud.texttospeech.v1beta1"; +option php_namespace = "Google\\Cloud\\TextToSpeech\\V1beta1"; +option ruby_package = "Google::Cloud::TextToSpeech::V1beta1"; +option (google.api.resource_definition) = { + type: "automl.googleapis.com/Model" + pattern: "projects/{project}/locations/{location}/models/{model}" +}; + +// Service that implements Google Cloud Text-to-Speech API. +service TextToSpeech { + option (google.api.default_host) = "texttospeech.googleapis.com"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + + // Returns a list of Voice supported for synthesis. + rpc ListVoices(ListVoicesRequest) returns (ListVoicesResponse) { + option (google.api.http) = { + get: "/v1beta1/voices" + }; + option (google.api.method_signature) = "language_code"; + } + + // Synthesizes speech synchronously: receive results after all text input + // has been processed. + rpc SynthesizeSpeech(SynthesizeSpeechRequest) returns (SynthesizeSpeechResponse) { + option (google.api.http) = { + post: "/v1beta1/text:synthesize" + body: "*" + }; + option (google.api.method_signature) = "input,voice,audio_config"; + } +} + +// Gender of the voice as described in +// [SSML voice element](https://www.w3.org/TR/speech-synthesis11/#edef_voice). +enum SsmlVoiceGender { + // An unspecified gender. + // In VoiceSelectionParams, this means that the client doesn't care which + // gender the selected voice will have. In the Voice field of + // ListVoicesResponse, this may mean that the voice doesn't fit any of the + // other categories in this enum, or that the gender of the voice isn't known. + SSML_VOICE_GENDER_UNSPECIFIED = 0; + + // A male voice. + MALE = 1; + + // A female voice. + FEMALE = 2; + + // A gender-neutral voice. This voice is not yet supported. + NEUTRAL = 3; +} + +// Configuration to set up audio encoder. The encoding determines the output +// audio format that we'd like. +enum AudioEncoding { + // Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. + AUDIO_ENCODING_UNSPECIFIED = 0; + + // Uncompressed 16-bit signed little-endian samples (Linear PCM). + // Audio content returned as LINEAR16 also contains a WAV header. + LINEAR16 = 1; + + // MP3 audio at 32kbps. + MP3 = 2; + + // MP3 at 64kbps. + MP3_64_KBPS = 4; + + // Opus encoded audio wrapped in an ogg container. The result will be a + // file which can be played natively on Android, and in browsers (at least + // Chrome and Firefox). The quality of the encoding is considerably higher + // than MP3 while using approximately the same bitrate. + OGG_OPUS = 3; + + // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law. + // Audio content returned as MULAW also contains a WAV header. + MULAW = 5; + + // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/A-law. + // Audio content returned as ALAW also contains a WAV header. + ALAW = 6; +} + +// The top-level message sent by the client for the `ListVoices` method. +message ListVoicesRequest { + // Optional. Recommended. + // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. + // If not specified, the API will return all supported voices. + // If specified, the ListVoices call will only return voices that can be used + // to synthesize this language_code. For example, if you specify `"en-NZ"`, + // all `"en-NZ"` voices will be returned. If you specify `"no"`, both + // `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices will be + // returned. + string language_code = 1 [(google.api.field_behavior) = OPTIONAL]; +} + +// The message returned to the client by the `ListVoices` method. +message ListVoicesResponse { + // The list of voices. + repeated Voice voices = 1; +} + +// Description of a voice supported by the TTS service. +message Voice { + // The languages that this voice supports, expressed as + // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g. + // "en-US", "es-419", "cmn-tw"). + repeated string language_codes = 1; + + // The name of this voice. Each distinct voice has a unique name. + string name = 2; + + // The gender of this voice. + SsmlVoiceGender ssml_gender = 3; + + // The natural sample rate (in hertz) for this voice. + int32 natural_sample_rate_hertz = 4; +} + +// The top-level message sent by the client for the `SynthesizeSpeech` method. +message SynthesizeSpeechRequest { + // The type of timepoint information that is returned in the response. + enum TimepointType { + // Not specified. No timepoint information will be returned. + TIMEPOINT_TYPE_UNSPECIFIED = 0; + + // Timepoint information of `` tags in SSML input will be returned. + SSML_MARK = 1; + } + + // Required. The Synthesizer requires either plain text or SSML as input. + SynthesisInput input = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The desired voice of the synthesized audio. + VoiceSelectionParams voice = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The configuration of the synthesized audio. + AudioConfig audio_config = 3 [(google.api.field_behavior) = REQUIRED]; + + // Whether and what timepoints are returned in the response. + repeated TimepointType enable_time_pointing = 4; +} + +// Contains text input to be synthesized. Either `text` or `ssml` must be +// supplied. Supplying both or neither returns +// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. The input size is limited to 5000 +// bytes. +message SynthesisInput { + // The input source, which is either plain text or SSML. + oneof input_source { + // The raw text to be synthesized. + string text = 1; + + // The SSML document to be synthesized. The SSML document must be valid + // and well-formed. Otherwise the RPC will fail and return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. For more information, see + // [SSML](https://cloud.google.com/text-to-speech/docs/ssml). + string ssml = 2; + } +} + +// Description of which voice to use for a synthesis request. +message VoiceSelectionParams { + // Required. The language (and potentially also the region) of the voice expressed as a + // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g. + // "en-US". This should not include a script tag (e.g. use + // "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred + // from the input provided in the SynthesisInput. The TTS service + // will use this parameter to help choose an appropriate voice. Note that + // the TTS service may choose a voice with a slightly different language code + // than the one selected; it may substitute a different region + // (e.g. using en-US rather than en-CA if there isn't a Canadian voice + // available), or even a different language, e.g. using "nb" (Norwegian + // Bokmal) instead of "no" (Norwegian)". + string language_code = 1 [(google.api.field_behavior) = REQUIRED]; + + // The name of the voice. If not set, the service will choose a + // voice based on the other parameters such as language_code and gender. + string name = 2; + + // The preferred gender of the voice. If not set, the service will + // choose a voice based on the other parameters such as language_code and + // name. Note that this is only a preference, not requirement; if a + // voice of the appropriate gender is not available, the synthesizer should + // substitute a voice with a different gender rather than failing the request. + SsmlVoiceGender ssml_gender = 3; + + // The configuration for a custom voice. If [CustomVoiceParams.model] is set, + // the service will choose the custom voice matching the specified + // configuration. + CustomVoiceParams custom_voice = 4; +} + +// Description of audio data to be synthesized. +message AudioConfig { + // Required. The format of the audio byte stream. + AudioEncoding audio_encoding = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Input only. Speaking rate/speed, in the range [0.25, 4.0]. 1.0 is + // the normal native speed supported by the specific voice. 2.0 is twice as + // fast, and 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 + // speed. Any other values < 0.25 or > 4.0 will return an error. + double speaking_rate = 2 [ + (google.api.field_behavior) = INPUT_ONLY, + (google.api.field_behavior) = OPTIONAL + ]; + + // Optional. Input only. Speaking pitch, in the range [-20.0, 20.0]. 20 means + // increase 20 semitones from the original pitch. -20 means decrease 20 + // semitones from the original pitch. + double pitch = 3 [ + (google.api.field_behavior) = INPUT_ONLY, + (google.api.field_behavior) = OPTIONAL + ]; + + // Optional. Input only. Volume gain (in dB) of the normal native volume + // supported by the specific voice, in the range [-96.0, 16.0]. If unset, or + // set to a value of 0.0 (dB), will play at normal native signal amplitude. A + // value of -6.0 (dB) will play at approximately half the amplitude of the + // normal native signal amplitude. A value of +6.0 (dB) will play at + // approximately twice the amplitude of the normal native signal amplitude. + // Strongly recommend not to exceed +10 (dB) as there's usually no effective + // increase in loudness for any value greater than that. + double volume_gain_db = 4 [ + (google.api.field_behavior) = INPUT_ONLY, + (google.api.field_behavior) = OPTIONAL + ]; + + // Optional. The synthesis sample rate (in hertz) for this audio. When this is + // specified in SynthesizeSpeechRequest, if this is different from the voice's + // natural sample rate, then the synthesizer will honor this request by + // converting to the desired sample rate (which might result in worse audio + // quality), unless the specified sample rate is not supported for the + // encoding chosen, in which case it will fail the request and return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. + int32 sample_rate_hertz = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Input only. An identifier which selects 'audio effects' profiles + // that are applied on (post synthesized) text to speech. Effects are applied + // on top of each other in the order they are given. See + // [audio + // profiles](https://cloud.google.com/text-to-speech/docs/audio-profiles) for + // current supported profile ids. + repeated string effects_profile_id = 6 [ + (google.api.field_behavior) = INPUT_ONLY, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Description of the custom voice to be synthesized. +message CustomVoiceParams { + // The usage of the synthesized audio. You must report your honest and + // correct usage of the service as it's regulated by contract and will cause + // significant difference in billing. + enum ReportedUsage { + // Request with reported usage unspecified will be rejected. + REPORTED_USAGE_UNSPECIFIED = 0; + + // For scenarios where the synthesized audio is not downloadable and can + // only be used once. For example, real-time request in IVR system. + REALTIME = 1; + + // For scenarios where the synthesized audio is downloadable and can be + // reused. For example, the synthesized audio is downloaded, stored in + // customer service system and played repeatedly. + OFFLINE = 2; + } + + // Required. The name of the AutoML model that synthesizes the custom voice. + string model = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Model" + } + ]; + + // Optional. The usage of the synthesized audio to be reported. + ReportedUsage reported_usage = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// The message returned to the client by the `SynthesizeSpeech` method. +message SynthesizeSpeechResponse { + // The audio data bytes encoded as specified in the request, including the + // header for encodings that are wrapped in containers (e.g. MP3, OGG_OPUS). + // For LINEAR16 audio, we include the WAV header. Note: as + // with all bytes fields, protobuffers use a pure binary representation, + // whereas JSON representations use base64. + bytes audio_content = 1; + + // A link between a position in the original request input and a corresponding + // time in the output audio. It's only supported via `` of SSML input. + repeated Timepoint timepoints = 2; + + // The audio metadata of `audio_content`. + AudioConfig audio_config = 4; +} + +// This contains a mapping between a certain point in the input text and a +// corresponding time in the output audio. +message Timepoint { + // Timepoint name as received from the client within `` tag. + string mark_name = 4; + + // Time offset in seconds from the start of the synthesized audio. + double time_seconds = 3; +} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/protos/google/cloud/texttospeech/v1beta1/cloud_tts_lrs.proto b/owl-bot-staging/google-cloud-texttospeech/v1beta1/protos/google/cloud/texttospeech/v1beta1/cloud_tts_lrs.proto new file mode 100644 index 00000000000..9adcd6ae600 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/protos/google/cloud/texttospeech/v1beta1/cloud_tts_lrs.proto @@ -0,0 +1,90 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.texttospeech.v1beta1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/cloud/texttospeech/v1beta1/cloud_tts.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/timestamp.proto"; + +option cc_enable_arenas = true; +option csharp_namespace = "Google.Cloud.TextToSpeech.V1Beta1"; +option go_package = "google.golang.org/genproto/googleapis/cloud/texttospeech/v1beta1;texttospeech"; +option java_multiple_files = true; +option java_outer_classname = "TextToSpeechLongAudioSynthesisProto"; +option java_package = "com.google.cloud.texttospeech.v1beta1"; +option php_namespace = "Google\\Cloud\\TextToSpeech\\V1beta1"; +option ruby_package = "Google::Cloud::TextToSpeech::V1beta1"; + +// Service that implements Google Cloud Text-to-Speech API. +service TextToSpeechLongAudioSynthesize { + option (google.api.default_host) = "texttospeech.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; + + // Synthesizes long form text asynchronously. + rpc SynthesizeLongAudio(SynthesizeLongAudioRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1beta1/{parent=projects/*/locations/*/voices/*}:SynthesizeLongAudio" + body: "*" + }; + option (google.longrunning.operation_info) = { + response_type: "SynthesizeLongAudioResponse" + metadata_type: "SynthesizeLongAudioMetadata" + }; + } +} + +// The top-level message sent by the client for the +// `SynthesizeLongAudio` method. +message SynthesizeLongAudioRequest { + // The resource states of the request in the form of + // projects/*/locations/*/voices/*. + string parent = 1; + + // Required. The Synthesizer requires either plain text or SSML as input. + SynthesisInput input = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The configuration of the synthesized audio. + AudioConfig audio_config = 3 [(google.api.field_behavior) = REQUIRED]; + + // Specifies a Cloud Storage URI for the synthesis results. Must be + // specified in the format: `gs://bucket_name/object_name`, and the bucket + // must already exist. + string output_gcs_uri = 4; + + // The desired voice of the synthesized audio. + VoiceSelectionParams voice = 5; +} + +// The message returned to the client by the `SynthesizeLongAudio` method. +message SynthesizeLongAudioResponse {} + +// Metadata for response returned by the `SynthesizeLongAudio` method. +message SynthesizeLongAudioMetadata { + // Time when the request was received. + google.protobuf.Timestamp start_time = 1; + + // Time of the most recent processing update. + google.protobuf.Timestamp last_update_time = 2; + + // The progress of the most recent processing update in percentage, ie. 70.0%. + double progress_percentage = 3; +} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/snippet_metadata.google.cloud.texttospeech.v1beta1.json b/owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/snippet_metadata.google.cloud.texttospeech.v1beta1.json new file mode 100644 index 00000000000..496c28f83c0 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/snippet_metadata.google.cloud.texttospeech.v1beta1.json @@ -0,0 +1,163 @@ +{ + "clientLibrary": { + "name": "nodejs-texttospeech", + "version": "0.1.0", + "language": "TYPESCRIPT", + "apis": [ + { + "id": "google.cloud.texttospeech.v1beta1", + "version": "v1beta1" + } + ] + }, + "snippets": [ + { + "regionTag": "texttospeech_v1beta1_generated_TextToSpeech_ListVoices_async", + "title": "TextToSpeech listVoices Sample", + "origin": "API_DEFINITION", + "description": " Returns a list of Voice supported for synthesis.", + "canonical": true, + "file": "text_to_speech.list_voices.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 59, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "ListVoices", + "fullName": "google.cloud.texttospeech.v1beta1.TextToSpeech.ListVoices", + "async": true, + "parameters": [ + { + "name": "language_code", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.texttospeech.v1beta1.ListVoicesResponse", + "client": { + "shortName": "TextToSpeechClient", + "fullName": "google.cloud.texttospeech.v1beta1.TextToSpeechClient" + }, + "method": { + "shortName": "ListVoices", + "fullName": "google.cloud.texttospeech.v1beta1.TextToSpeech.ListVoices", + "service": { + "shortName": "TextToSpeech", + "fullName": "google.cloud.texttospeech.v1beta1.TextToSpeech" + } + } + } + }, + { + "regionTag": "texttospeech_v1beta1_generated_TextToSpeech_SynthesizeSpeech_async", + "title": "TextToSpeech synthesizeSpeech Sample", + "origin": "API_DEFINITION", + "description": " Synthesizes speech synchronously: receive results after all text input has been processed.", + "canonical": true, + "file": "text_to_speech.synthesize_speech.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 67, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "SynthesizeSpeech", + "fullName": "google.cloud.texttospeech.v1beta1.TextToSpeech.SynthesizeSpeech", + "async": true, + "parameters": [ + { + "name": "input", + "type": ".google.cloud.texttospeech.v1beta1.SynthesisInput" + }, + { + "name": "voice", + "type": ".google.cloud.texttospeech.v1beta1.VoiceSelectionParams" + }, + { + "name": "audio_config", + "type": ".google.cloud.texttospeech.v1beta1.AudioConfig" + }, + { + "name": "enable_time_pointing", + "type": "TYPE_ENUM[]" + } + ], + "resultType": ".google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse", + "client": { + "shortName": "TextToSpeechClient", + "fullName": "google.cloud.texttospeech.v1beta1.TextToSpeechClient" + }, + "method": { + "shortName": "SynthesizeSpeech", + "fullName": "google.cloud.texttospeech.v1beta1.TextToSpeech.SynthesizeSpeech", + "service": { + "shortName": "TextToSpeech", + "fullName": "google.cloud.texttospeech.v1beta1.TextToSpeech" + } + } + } + }, + { + "regionTag": "texttospeech_v1beta1_generated_TextToSpeechLongAudioSynthesize_SynthesizeLongAudio_async", + "title": "TextToSpeech synthesizeLongAudio Sample", + "origin": "API_DEFINITION", + "description": " Synthesizes long form text asynchronously.", + "canonical": true, + "file": "text_to_speech_long_audio_synthesize.synthesize_long_audio.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 74, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "SynthesizeLongAudio", + "fullName": "google.cloud.texttospeech.v1beta1.TextToSpeechLongAudioSynthesize.SynthesizeLongAudio", + "async": true, + "parameters": [ + { + "name": "parent", + "type": "TYPE_STRING" + }, + { + "name": "input", + "type": ".google.cloud.texttospeech.v1beta1.SynthesisInput" + }, + { + "name": "audio_config", + "type": ".google.cloud.texttospeech.v1beta1.AudioConfig" + }, + { + "name": "output_gcs_uri", + "type": "TYPE_STRING" + }, + { + "name": "voice", + "type": ".google.cloud.texttospeech.v1beta1.VoiceSelectionParams" + } + ], + "resultType": ".google.longrunning.Operation", + "client": { + "shortName": "TextToSpeechLongAudioSynthesizeClient", + "fullName": "google.cloud.texttospeech.v1beta1.TextToSpeechLongAudioSynthesizeClient" + }, + "method": { + "shortName": "SynthesizeLongAudio", + "fullName": "google.cloud.texttospeech.v1beta1.TextToSpeechLongAudioSynthesize.SynthesizeLongAudio", + "service": { + "shortName": "TextToSpeechLongAudioSynthesize", + "fullName": "google.cloud.texttospeech.v1beta1.TextToSpeechLongAudioSynthesize" + } + } + } + } + ] +} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/text_to_speech.list_voices.js b/owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/text_to_speech.list_voices.js new file mode 100644 index 00000000000..3410c0eddfc --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/text_to_speech.list_voices.js @@ -0,0 +1,67 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main() { + // [START texttospeech_v1beta1_generated_TextToSpeech_ListVoices_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Optional. Recommended. + * BCP-47 (https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. + * If not specified, the API will return all supported voices. + * If specified, the ListVoices call will only return voices that can be used + * to synthesize this language_code. For example, if you specify `"en-NZ"`, + * all `"en-NZ"` voices will be returned. If you specify `"no"`, both + * `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices will be + * returned. + */ + // const languageCode = 'abc123' + + // Imports the Texttospeech library + const {TextToSpeechClient} = require('@google-cloud/text-to-speech').v1beta1; + + // Instantiates a client + const texttospeechClient = new TextToSpeechClient(); + + async function callListVoices() { + // Construct request + const request = { + }; + + // Run request + const response = await texttospeechClient.listVoices(request); + console.log(response); + } + + callListVoices(); + // [END texttospeech_v1beta1_generated_TextToSpeech_ListVoices_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/text_to_speech.synthesize_speech.js b/owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/text_to_speech.synthesize_speech.js new file mode 100644 index 00000000000..5857dd63dde --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/text_to_speech.synthesize_speech.js @@ -0,0 +1,75 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(input, voice, audioConfig) { + // [START texttospeech_v1beta1_generated_TextToSpeech_SynthesizeSpeech_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The Synthesizer requires either plain text or SSML as input. + */ + // const input = {} + /** + * Required. The desired voice of the synthesized audio. + */ + // const voice = {} + /** + * Required. The configuration of the synthesized audio. + */ + // const audioConfig = {} + /** + * Whether and what timepoints are returned in the response. + */ + // const enableTimePointing = 1234 + + // Imports the Texttospeech library + const {TextToSpeechClient} = require('@google-cloud/text-to-speech').v1beta1; + + // Instantiates a client + const texttospeechClient = new TextToSpeechClient(); + + async function callSynthesizeSpeech() { + // Construct request + const request = { + input, + voice, + audioConfig, + }; + + // Run request + const response = await texttospeechClient.synthesizeSpeech(request); + console.log(response); + } + + callSynthesizeSpeech(); + // [END texttospeech_v1beta1_generated_TextToSpeech_SynthesizeSpeech_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js b/owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js new file mode 100644 index 00000000000..6c71d6fdceb --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js @@ -0,0 +1,82 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(input, audioConfig) { + // [START texttospeech_v1beta1_generated_TextToSpeechLongAudioSynthesize_SynthesizeLongAudio_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * The resource states of the request in the form of + * projects/* /locations/* /voices/*. + */ + // const parent = 'abc123' + /** + * Required. The Synthesizer requires either plain text or SSML as input. + */ + // const input = {} + /** + * Required. The configuration of the synthesized audio. + */ + // const audioConfig = {} + /** + * Specifies a Cloud Storage URI for the synthesis results. Must be + * specified in the format: `gs://bucket_name/object_name`, and the bucket + * must already exist. + */ + // const outputGcsUri = 'abc123' + /** + * The desired voice of the synthesized audio. + */ + // const voice = {} + + // Imports the Texttospeech library + const {TextToSpeechLongAudioSynthesizeClient} = require('@google-cloud/text-to-speech').v1beta1; + + // Instantiates a client + const texttospeechClient = new TextToSpeechLongAudioSynthesizeClient(); + + async function callSynthesizeLongAudio() { + // Construct request + const request = { + input, + audioConfig, + }; + + // Run request + const [operation] = await texttospeechClient.synthesizeLongAudio(request); + const [response] = await operation.promise(); + console.log(response); + } + + callSynthesizeLongAudio(); + // [END texttospeech_v1beta1_generated_TextToSpeechLongAudioSynthesize_SynthesizeLongAudio_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/index.ts b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/index.ts new file mode 100644 index 00000000000..c84882a6a53 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/index.ts @@ -0,0 +1,27 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as v1beta1 from './v1beta1'; +const TextToSpeechClient = v1beta1.TextToSpeechClient; +type TextToSpeechClient = v1beta1.TextToSpeechClient; +const TextToSpeechLongAudioSynthesizeClient = v1beta1.TextToSpeechLongAudioSynthesizeClient; +type TextToSpeechLongAudioSynthesizeClient = v1beta1.TextToSpeechLongAudioSynthesizeClient; +export {v1beta1, TextToSpeechClient, TextToSpeechLongAudioSynthesizeClient}; +export default {v1beta1, TextToSpeechClient, TextToSpeechLongAudioSynthesizeClient}; +import * as protos from '../protos/protos'; +export {protos} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/gapic_metadata.json b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/gapic_metadata.json new file mode 100644 index 00000000000..9e8fec144a6 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/gapic_metadata.json @@ -0,0 +1,67 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "typescript", + "protoPackage": "google.cloud.texttospeech.v1beta1", + "libraryPackage": "@google-cloud/text-to-speech", + "services": { + "TextToSpeech": { + "clients": { + "grpc": { + "libraryClient": "TextToSpeechClient", + "rpcs": { + "ListVoices": { + "methods": [ + "listVoices" + ] + }, + "SynthesizeSpeech": { + "methods": [ + "synthesizeSpeech" + ] + } + } + }, + "grpc-fallback": { + "libraryClient": "TextToSpeechClient", + "rpcs": { + "ListVoices": { + "methods": [ + "listVoices" + ] + }, + "SynthesizeSpeech": { + "methods": [ + "synthesizeSpeech" + ] + } + } + } + } + }, + "TextToSpeechLongAudioSynthesize": { + "clients": { + "grpc": { + "libraryClient": "TextToSpeechLongAudioSynthesizeClient", + "rpcs": { + "SynthesizeLongAudio": { + "methods": [ + "synthesizeLongAudio" + ] + } + } + }, + "grpc-fallback": { + "libraryClient": "TextToSpeechLongAudioSynthesizeClient", + "rpcs": { + "SynthesizeLongAudio": { + "methods": [ + "synthesizeLongAudio" + ] + } + } + } + } + } + } +} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/index.ts b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/index.ts new file mode 100644 index 00000000000..3cf28b93bc3 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/index.ts @@ -0,0 +1,20 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +export {TextToSpeechClient} from './text_to_speech_client'; +export {TextToSpeechLongAudioSynthesizeClient} from './text_to_speech_long_audio_synthesize_client'; diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_client.ts b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_client.ts new file mode 100644 index 00000000000..bc08778d65f --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_client.ts @@ -0,0 +1,515 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import type * as gax from 'google-gax'; +import type {Callback, CallOptions, Descriptors, ClientOptions} from 'google-gax'; + +import * as protos from '../../protos/protos'; +import jsonProtos = require('../../protos/protos.json'); +/** + * Client JSON configuration object, loaded from + * `src/v1beta1/text_to_speech_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +import * as gapicConfig from './text_to_speech_client_config.json'; +const version = require('../../../package.json').version; + +/** + * Service that implements Google Cloud Text-to-Speech API. + * @class + * @memberof v1beta1 + */ +export class TextToSpeechClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + pathTemplates: {[name: string]: gax.PathTemplate}; + textToSpeechStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of TextToSpeechClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you + * need to avoid loading the default gRPC version and want to use the fallback + * HTTP implementation. Load only fallback version and pass it to the constructor: + * ``` + * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC + * const client = new TextToSpeechClient({fallback: 'rest'}, gax); + * ``` + */ + constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof TextToSpeechClient; + const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // Request numeric enum values if REST transport is used. + opts.numericEnums = true; + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Load google-gax module synchronously if needed + if (!gaxInstance) { + gaxInstance = require('google-gax') as typeof gax; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = staticMembers.servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === staticMembers.servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process !== 'undefined' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else if (opts.fallback === 'rest' ) { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); + + // This API contains "path templates"; forward-slash-separated + // identifiers to uniquely identify resources within the API. + // Create useful helper objects for these. + this.pathTemplates = { + modelPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/models/{model}' + ), + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.texttospeech.v1beta1.TextToSpeech', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = this._gaxModule.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.textToSpeechStub) { + return this.textToSpeechStub; + } + + // Put together the "service stub" for + // google.cloud.texttospeech.v1beta1.TextToSpeech. + this.textToSpeechStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.texttospeech.v1beta1.TextToSpeech') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.texttospeech.v1beta1.TextToSpeech, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const textToSpeechStubMethods = + ['listVoices', 'synthesizeSpeech']; + for (const methodName of textToSpeechStubMethods) { + const callPromise = this.textToSpeechStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor, + this._opts.fallback + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.textToSpeechStub; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + return 'texttospeech.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + return 'texttospeech.googleapis.com'; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/cloud-platform' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- +/** + * Returns a list of Voice supported for synthesis. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} [request.languageCode] + * Optional. Recommended. + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. + * If not specified, the API will return all supported voices. + * If specified, the ListVoices call will only return voices that can be used + * to synthesize this language_code. For example, if you specify `"en-NZ"`, + * all `"en-NZ"` voices will be returned. If you specify `"no"`, both + * `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices will be + * returned. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [ListVoicesResponse]{@link google.cloud.texttospeech.v1beta1.ListVoicesResponse}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1beta1/text_to_speech.list_voices.js + * region_tag:texttospeech_v1beta1_generated_TextToSpeech_ListVoices_async + */ + listVoices( + request?: protos.google.cloud.texttospeech.v1beta1.IListVoicesRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.texttospeech.v1beta1.IListVoicesResponse, + protos.google.cloud.texttospeech.v1beta1.IListVoicesRequest|undefined, {}|undefined + ]>; + listVoices( + request: protos.google.cloud.texttospeech.v1beta1.IListVoicesRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.texttospeech.v1beta1.IListVoicesResponse, + protos.google.cloud.texttospeech.v1beta1.IListVoicesRequest|null|undefined, + {}|null|undefined>): void; + listVoices( + request: protos.google.cloud.texttospeech.v1beta1.IListVoicesRequest, + callback: Callback< + protos.google.cloud.texttospeech.v1beta1.IListVoicesResponse, + protos.google.cloud.texttospeech.v1beta1.IListVoicesRequest|null|undefined, + {}|null|undefined>): void; + listVoices( + request?: protos.google.cloud.texttospeech.v1beta1.IListVoicesRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.texttospeech.v1beta1.IListVoicesResponse, + protos.google.cloud.texttospeech.v1beta1.IListVoicesRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.texttospeech.v1beta1.IListVoicesResponse, + protos.google.cloud.texttospeech.v1beta1.IListVoicesRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.texttospeech.v1beta1.IListVoicesResponse, + protos.google.cloud.texttospeech.v1beta1.IListVoicesRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + this.initialize(); + return this.innerApiCalls.listVoices(request, options, callback); + } +/** + * Synthesizes speech synchronously: receive results after all text input + * has been processed. + * + * @param {Object} request + * The request object that will be sent. + * @param {google.cloud.texttospeech.v1beta1.SynthesisInput} request.input + * Required. The Synthesizer requires either plain text or SSML as input. + * @param {google.cloud.texttospeech.v1beta1.VoiceSelectionParams} request.voice + * Required. The desired voice of the synthesized audio. + * @param {google.cloud.texttospeech.v1beta1.AudioConfig} request.audioConfig + * Required. The configuration of the synthesized audio. + * @param {number[]} request.enableTimePointing + * Whether and what timepoints are returned in the response. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [SynthesizeSpeechResponse]{@link google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1beta1/text_to_speech.synthesize_speech.js + * region_tag:texttospeech_v1beta1_generated_TextToSpeech_SynthesizeSpeech_async + */ + synthesizeSpeech( + request?: protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechResponse, + protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechRequest|undefined, {}|undefined + ]>; + synthesizeSpeech( + request: protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechResponse, + protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechRequest|null|undefined, + {}|null|undefined>): void; + synthesizeSpeech( + request: protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechRequest, + callback: Callback< + protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechResponse, + protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechRequest|null|undefined, + {}|null|undefined>): void; + synthesizeSpeech( + request?: protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechResponse, + protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechResponse, + protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechResponse, + protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + this.initialize(); + return this.innerApiCalls.synthesizeSpeech(request, options, callback); + } + + // -------------------- + // -- Path templates -- + // -------------------- + + /** + * Return a fully-qualified model resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} model + * @returns {string} Resource name string. + */ + modelPath(project:string,location:string,model:string) { + return this.pathTemplates.modelPathTemplate.render({ + project: project, + location: location, + model: model, + }); + } + + /** + * Parse the project from Model resource. + * + * @param {string} modelName + * A fully-qualified path representing Model resource. + * @returns {string} A string representing the project. + */ + matchProjectFromModelName(modelName: string) { + return this.pathTemplates.modelPathTemplate.match(modelName).project; + } + + /** + * Parse the location from Model resource. + * + * @param {string} modelName + * A fully-qualified path representing Model resource. + * @returns {string} A string representing the location. + */ + matchLocationFromModelName(modelName: string) { + return this.pathTemplates.modelPathTemplate.match(modelName).location; + } + + /** + * Parse the model from Model resource. + * + * @param {string} modelName + * A fully-qualified path representing Model resource. + * @returns {string} A string representing the model. + */ + matchModelFromModelName(modelName: string) { + return this.pathTemplates.modelPathTemplate.match(modelName).model; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.textToSpeechStub && !this._terminated) { + return this.textToSpeechStub.then(stub => { + this._terminated = true; + stub.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_client_config.json b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_client_config.json new file mode 100644 index 00000000000..9c26e72b9e3 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_client_config.json @@ -0,0 +1,36 @@ +{ + "interfaces": { + "google.cloud.texttospeech.v1beta1.TextToSpeech": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "ListVoices": { + "timeout_millis": 300000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "SynthesizeSpeech": { + "timeout_millis": 300000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_long_audio_synthesize_client.ts b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_long_audio_synthesize_client.ts new file mode 100644 index 00000000000..e13731b24df --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_long_audio_synthesize_client.ts @@ -0,0 +1,614 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import type * as gax from 'google-gax'; +import type {Callback, CallOptions, Descriptors, ClientOptions, GrpcClientOptions, LROperation} from 'google-gax'; + +import * as protos from '../../protos/protos'; +import jsonProtos = require('../../protos/protos.json'); +/** + * Client JSON configuration object, loaded from + * `src/v1beta1/text_to_speech_long_audio_synthesize_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +import * as gapicConfig from './text_to_speech_long_audio_synthesize_client_config.json'; +const version = require('../../../package.json').version; + +/** + * Service that implements Google Cloud Text-to-Speech API. + * @class + * @memberof v1beta1 + */ +export class TextToSpeechLongAudioSynthesizeClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + operationsClient: gax.OperationsClient; + textToSpeechLongAudioSynthesizeStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of TextToSpeechLongAudioSynthesizeClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you + * need to avoid loading the default gRPC version and want to use the fallback + * HTTP implementation. Load only fallback version and pass it to the constructor: + * ``` + * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC + * const client = new TextToSpeechLongAudioSynthesizeClient({fallback: 'rest'}, gax); + * ``` + */ + constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof TextToSpeechLongAudioSynthesizeClient; + const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // Request numeric enum values if REST transport is used. + opts.numericEnums = true; + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Load google-gax module synchronously if needed + if (!gaxInstance) { + gaxInstance = require('google-gax') as typeof gax; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = staticMembers.servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === staticMembers.servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process !== 'undefined' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else if (opts.fallback === 'rest' ) { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); + + const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); + // This API contains "long-running operations", which return a + // an Operation object that allows for tracking of the operation, + // rather than holding a request open. + const lroOptions: GrpcClientOptions = { + auth: this.auth, + grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined + }; + if (opts.fallback === 'rest') { + lroOptions.protoJson = protoFilesRoot; + lroOptions.httpRules = []; + } + this.operationsClient = this._gaxModule.lro(lroOptions).operationsClient(opts); + const synthesizeLongAudioResponse = protoFilesRoot.lookup( + '.google.cloud.texttospeech.v1beta1.SynthesizeLongAudioResponse') as gax.protobuf.Type; + const synthesizeLongAudioMetadata = protoFilesRoot.lookup( + '.google.cloud.texttospeech.v1beta1.SynthesizeLongAudioMetadata') as gax.protobuf.Type; + + this.descriptors.longrunning = { + synthesizeLongAudio: new this._gaxModule.LongrunningDescriptor( + this.operationsClient, + synthesizeLongAudioResponse.decode.bind(synthesizeLongAudioResponse), + synthesizeLongAudioMetadata.decode.bind(synthesizeLongAudioMetadata)) + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.texttospeech.v1beta1.TextToSpeechLongAudioSynthesize', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = this._gaxModule.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.textToSpeechLongAudioSynthesizeStub) { + return this.textToSpeechLongAudioSynthesizeStub; + } + + // Put together the "service stub" for + // google.cloud.texttospeech.v1beta1.TextToSpeechLongAudioSynthesize. + this.textToSpeechLongAudioSynthesizeStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.texttospeech.v1beta1.TextToSpeechLongAudioSynthesize') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.texttospeech.v1beta1.TextToSpeechLongAudioSynthesize, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const textToSpeechLongAudioSynthesizeStubMethods = + ['synthesizeLongAudio']; + for (const methodName of textToSpeechLongAudioSynthesizeStubMethods) { + const callPromise = this.textToSpeechLongAudioSynthesizeStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.longrunning[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor, + this._opts.fallback + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.textToSpeechLongAudioSynthesizeStub; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + return 'texttospeech.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + return 'texttospeech.googleapis.com'; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/cloud-platform' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- + +/** + * Synthesizes long form text asynchronously. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.parent + * The resource states of the request in the form of + * projects/* /locations/* /voices/*. + * @param {google.cloud.texttospeech.v1beta1.SynthesisInput} request.input + * Required. The Synthesizer requires either plain text or SSML as input. + * @param {google.cloud.texttospeech.v1beta1.AudioConfig} request.audioConfig + * Required. The configuration of the synthesized audio. + * @param {string} request.outputGcsUri + * Specifies a Cloud Storage URI for the synthesis results. Must be + * specified in the format: `gs://bucket_name/object_name`, and the bucket + * must already exist. + * @param {google.cloud.texttospeech.v1beta1.VoiceSelectionParams} request.voice + * The desired voice of the synthesized audio. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * a long running operation. Its `promise()` method returns a promise + * you can `await` for. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1beta1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js + * region_tag:texttospeech_v1beta1_generated_TextToSpeechLongAudioSynthesize_SynthesizeLongAudio_async + */ + synthesizeLongAudio( + request?: protos.google.cloud.texttospeech.v1beta1.ISynthesizeLongAudioRequest, + options?: CallOptions): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>; + synthesizeLongAudio( + request: protos.google.cloud.texttospeech.v1beta1.ISynthesizeLongAudioRequest, + options: CallOptions, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + synthesizeLongAudio( + request: protos.google.cloud.texttospeech.v1beta1.ISynthesizeLongAudioRequest, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + synthesizeLongAudio( + request?: protos.google.cloud.texttospeech.v1beta1.ISynthesizeLongAudioRequest, + optionsOrCallback?: CallOptions|Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>, + callback?: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'parent': request.parent ?? '', + }); + this.initialize(); + return this.innerApiCalls.synthesizeLongAudio(request, options, callback); + } +/** + * Check the status of the long running operation returned by `synthesizeLongAudio()`. + * @param {String} name + * The operation name that will be passed. + * @returns {Promise} - The promise which resolves to an object. + * The decoded operation object has result and metadata field to get information from. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1beta1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js + * region_tag:texttospeech_v1beta1_generated_TextToSpeechLongAudioSynthesize_SynthesizeLongAudio_async + */ + async checkSynthesizeLongAudioProgress(name: string): Promise>{ + const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); + const [operation] = await this.operationsClient.getOperation(request); + const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.synthesizeLongAudio, this._gaxModule.createDefaultBackoffSettings()); + return decodeOperation as LROperation; + } +/** + * Gets the latest state of a long-running operation. Clients can use this + * method to poll the operation result at intervals as recommended by the API + * service. + * + * @param {Object} request - The request object that will be sent. + * @param {string} request.name - The name of the operation resource. + * @param {Object=} options + * Optional parameters. You can override the default settings for this call, + * e.g, timeout, retries, paginations, etc. See [gax.CallOptions]{@link + * https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the + * details. + * @param {function(?Error, ?Object)=} callback + * The function which will be called with the result of the API call. + * + * The second parameter to the callback is an object representing + * [google.longrunning.Operation]{@link + * external:"google.longrunning.Operation"}. + * @return {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * [google.longrunning.Operation]{@link + * external:"google.longrunning.Operation"}. The promise has a method named + * "cancel" which cancels the ongoing API call. + * + * @example + * ``` + * const client = longrunning.operationsClient(); + * const name = ''; + * const [response] = await client.getOperation({name}); + * // doThingsWith(response) + * ``` + */ + getOperation( + request: protos.google.longrunning.GetOperationRequest, + options?: + | gax.CallOptions + | Callback< + protos.google.longrunning.Operation, + protos.google.longrunning.GetOperationRequest, + {} | null | undefined + >, + callback?: Callback< + protos.google.longrunning.Operation, + protos.google.longrunning.GetOperationRequest, + {} | null | undefined + > + ): Promise<[protos.google.longrunning.Operation]> { + return this.operationsClient.getOperation(request, options, callback); + } + /** + * Lists operations that match the specified filter in the request. If the + * server doesn't support this method, it returns `UNIMPLEMENTED`. Returns an iterable object. + * + * For-await-of syntax is used with the iterable to recursively get response element on-demand. + * + * @param {Object} request - The request object that will be sent. + * @param {string} request.name - The name of the operation collection. + * @param {string} request.filter - The standard list filter. + * @param {number=} request.pageSize - + * The maximum number of resources contained in the underlying API + * response. If page streaming is performed per-resource, this + * parameter does not affect the return value. If page streaming is + * performed per-page, this determines the maximum number of + * resources in a page. + * @param {Object=} options + * Optional parameters. You can override the default settings for this call, + * e.g, timeout, retries, paginations, etc. See [gax.CallOptions]{@link + * https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the + * details. + * @returns {Object} + * An iterable Object that conforms to @link https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols. + * + * @example + * ``` + * const client = longrunning.operationsClient(); + * for await (const response of client.listOperationsAsync(request)); + * // doThingsWith(response) + * ``` + */ + listOperationsAsync( + request: protos.google.longrunning.ListOperationsRequest, + options?: gax.CallOptions + ): AsyncIterable { + return this.operationsClient.listOperationsAsync(request, options); + } + /** + * Starts asynchronous cancellation on a long-running operation. The server + * makes a best effort to cancel the operation, but success is not + * guaranteed. If the server doesn't support this method, it returns + * `google.rpc.Code.UNIMPLEMENTED`. Clients can use + * {@link Operations.GetOperation} or + * other methods to check whether the cancellation succeeded or whether the + * operation completed despite cancellation. On successful cancellation, + * the operation is not deleted; instead, it becomes an operation with + * an {@link Operation.error} value with a {@link google.rpc.Status.code} of + * 1, corresponding to `Code.CANCELLED`. + * + * @param {Object} request - The request object that will be sent. + * @param {string} request.name - The name of the operation resource to be cancelled. + * @param {Object=} options + * Optional parameters. You can override the default settings for this call, + * e.g, timeout, retries, paginations, etc. See [gax.CallOptions]{@link + * https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the + * details. + * @param {function(?Error)=} callback + * The function which will be called with the result of the API call. + * @return {Promise} - The promise which resolves when API call finishes. + * The promise has a method named "cancel" which cancels the ongoing API + * call. + * + * @example + * ``` + * const client = longrunning.operationsClient(); + * await client.cancelOperation({name: ''}); + * ``` + */ + cancelOperation( + request: protos.google.longrunning.CancelOperationRequest, + options?: + | gax.CallOptions + | Callback< + protos.google.protobuf.Empty, + protos.google.longrunning.CancelOperationRequest, + {} | undefined | null + >, + callback?: Callback< + protos.google.longrunning.CancelOperationRequest, + protos.google.protobuf.Empty, + {} | undefined | null + > + ): Promise { + return this.operationsClient.cancelOperation(request, options, callback); + } + + /** + * Deletes a long-running operation. This method indicates that the client is + * no longer interested in the operation result. It does not cancel the + * operation. If the server doesn't support this method, it returns + * `google.rpc.Code.UNIMPLEMENTED`. + * + * @param {Object} request - The request object that will be sent. + * @param {string} request.name - The name of the operation resource to be deleted. + * @param {Object=} options + * Optional parameters. You can override the default settings for this call, + * e.g, timeout, retries, paginations, etc. See [gax.CallOptions]{@link + * https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the + * details. + * @param {function(?Error)=} callback + * The function which will be called with the result of the API call. + * @return {Promise} - The promise which resolves when API call finishes. + * The promise has a method named "cancel" which cancels the ongoing API + * call. + * + * @example + * ``` + * const client = longrunning.operationsClient(); + * await client.deleteOperation({name: ''}); + * ``` + */ + deleteOperation( + request: protos.google.longrunning.DeleteOperationRequest, + options?: + | gax.CallOptions + | Callback< + protos.google.protobuf.Empty, + protos.google.longrunning.DeleteOperationRequest, + {} | null | undefined + >, + callback?: Callback< + protos.google.protobuf.Empty, + protos.google.longrunning.DeleteOperationRequest, + {} | null | undefined + > + ): Promise { + return this.operationsClient.deleteOperation(request, options, callback); + } + + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.textToSpeechLongAudioSynthesizeStub && !this._terminated) { + return this.textToSpeechLongAudioSynthesizeStub.then(stub => { + this._terminated = true; + stub.close(); + this.operationsClient.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_long_audio_synthesize_client_config.json b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_long_audio_synthesize_client_config.json new file mode 100644 index 00000000000..a640bca5ce1 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_long_audio_synthesize_client_config.json @@ -0,0 +1,31 @@ +{ + "interfaces": { + "google.cloud.texttospeech.v1beta1.TextToSpeechLongAudioSynthesize": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "SynthesizeLongAudio": { + "timeout_millis": 5000000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_long_audio_synthesize_proto_list.json b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_long_audio_synthesize_proto_list.json new file mode 100644 index 00000000000..c7c739dbc73 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_long_audio_synthesize_proto_list.json @@ -0,0 +1,4 @@ +[ + "../../protos/google/cloud/texttospeech/v1beta1/cloud_tts.proto", + "../../protos/google/cloud/texttospeech/v1beta1/cloud_tts_lrs.proto" +] diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_proto_list.json b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_proto_list.json new file mode 100644 index 00000000000..c7c739dbc73 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_proto_list.json @@ -0,0 +1,4 @@ +[ + "../../protos/google/cloud/texttospeech/v1beta1/cloud_tts.proto", + "../../protos/google/cloud/texttospeech/v1beta1/cloud_tts_lrs.proto" +] diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/system-test/fixtures/sample/src/index.js b/owl-bot-staging/google-cloud-texttospeech/v1beta1/system-test/fixtures/sample/src/index.js new file mode 100644 index 00000000000..a08648fec57 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/system-test/fixtures/sample/src/index.js @@ -0,0 +1,28 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +/* eslint-disable node/no-missing-require, no-unused-vars */ +const texttospeech = require('@google-cloud/text-to-speech'); + +function main() { + const textToSpeechClient = new texttospeech.TextToSpeechClient(); + const textToSpeechLongAudioSynthesizeClient = new texttospeech.TextToSpeechLongAudioSynthesizeClient(); +} + +main(); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/system-test/fixtures/sample/src/index.ts b/owl-bot-staging/google-cloud-texttospeech/v1beta1/system-test/fixtures/sample/src/index.ts new file mode 100644 index 00000000000..14b3691b2df --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/system-test/fixtures/sample/src/index.ts @@ -0,0 +1,38 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import {TextToSpeechClient, TextToSpeechLongAudioSynthesizeClient} from '@google-cloud/text-to-speech'; + +// check that the client class type name can be used +function doStuffWithTextToSpeechClient(client: TextToSpeechClient) { + client.close(); +} +function doStuffWithTextToSpeechLongAudioSynthesizeClient(client: TextToSpeechLongAudioSynthesizeClient) { + client.close(); +} + +function main() { + // check that the client instance can be created + const textToSpeechClient = new TextToSpeechClient(); + doStuffWithTextToSpeechClient(textToSpeechClient); + // check that the client instance can be created + const textToSpeechLongAudioSynthesizeClient = new TextToSpeechLongAudioSynthesizeClient(); + doStuffWithTextToSpeechLongAudioSynthesizeClient(textToSpeechLongAudioSynthesizeClient); +} + +main(); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/system-test/install.ts b/owl-bot-staging/google-cloud-texttospeech/v1beta1/system-test/install.ts new file mode 100644 index 00000000000..557a57558e1 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/system-test/install.ts @@ -0,0 +1,49 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import {packNTest} from 'pack-n-play'; +import {readFileSync} from 'fs'; +import {describe, it} from 'mocha'; + +describe('📦 pack-n-play test', () => { + + it('TypeScript code', async function() { + this.timeout(300000); + const options = { + packageDir: process.cwd(), + sample: { + description: 'TypeScript user can use the type definitions', + ts: readFileSync('./system-test/fixtures/sample/src/index.ts').toString() + } + }; + await packNTest(options); + }); + + it('JavaScript code', async function() { + this.timeout(300000); + const options = { + packageDir: process.cwd(), + sample: { + description: 'JavaScript user can use the library', + ts: readFileSync('./system-test/fixtures/sample/src/index.js').toString() + } + }; + await packNTest(options); + }); + +}); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/test/gapic_text_to_speech_long_audio_synthesize_v1beta1.ts b/owl-bot-staging/google-cloud-texttospeech/v1beta1/test/gapic_text_to_speech_long_audio_synthesize_v1beta1.ts new file mode 100644 index 00000000000..7e48b40b7bc --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/test/gapic_text_to_speech_long_audio_synthesize_v1beta1.ts @@ -0,0 +1,591 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import {describe, it} from 'mocha'; +import * as texttospeechlongaudiosynthesizeModule from '../src'; + +import {protobuf, LROperation, operationsProtos} from 'google-gax'; + +// Dynamically loaded proto JSON is needed to get the type information +// to fill in default values for request objects +const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +function getTypeDefaultValue(typeName: string, fields: string[]) { + let type = root.lookupType(typeName) as protobuf.Type; + for (const field of fields.slice(0, -1)) { + type = type.fields[field]?.resolvedType as protobuf.Type; + } + return type.fields[fields[fields.length - 1]]?.defaultValue; +} + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubLongRunningCall(response?: ResponseType, callError?: Error, lroError?: Error) { + const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError ? sinon.stub().rejects(callError) : sinon.stub().resolves([mockOperation]); +} + +function stubLongRunningCallWithCallback(response?: ResponseType, callError?: Error, lroError?: Error) { + const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError ? sinon.stub().callsArgWith(2, callError) : sinon.stub().callsArgWith(2, null, mockOperation); +} + +function stubAsyncIterationCall(responses?: ResponseType[], error?: Error) { + let counter = 0; + const asyncIterable = { + [Symbol.asyncIterator]() { + return { + async next() { + if (error) { + return Promise.reject(error); + } + if (counter >= responses!.length) { + return Promise.resolve({done: true, value: undefined}); + } + return Promise.resolve({done: false, value: responses![counter++]}); + } + }; + } + }; + return sinon.stub().returns(asyncIterable); +} + +describe('v1beta1.TextToSpeechLongAudioSynthesizeClient', () => { + describe('Common methods', () => { + it('has servicePath', () => { + const servicePath = texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient.servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient.apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.textToSpeechLongAudioSynthesizeStub, undefined); + await client.initialize(); + assert(client.textToSpeechLongAudioSynthesizeStub); + }); + + it('has close method for the initialized client', done => { + const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.textToSpeechLongAudioSynthesizeStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.textToSpeechLongAudioSynthesizeStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + }); + + describe('synthesizeLongAudio', () => { + it('invokes synthesizeLongAudio without error', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1beta1.SynthesizeLongAudioRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.texttospeech.v1beta1.SynthesizeLongAudioRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.synthesizeLongAudio = stubLongRunningCall(expectedResponse); + const [operation] = await client.synthesizeLongAudio(request); + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.synthesizeLongAudio as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.synthesizeLongAudio as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes synthesizeLongAudio without error using callback', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1beta1.SynthesizeLongAudioRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.texttospeech.v1beta1.SynthesizeLongAudioRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.synthesizeLongAudio = stubLongRunningCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.synthesizeLongAudio( + request, + (err?: Error|null, + result?: LROperation|null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const operation = await promise as LROperation; + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.synthesizeLongAudio as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.synthesizeLongAudio as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes synthesizeLongAudio with call error', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1beta1.SynthesizeLongAudioRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.texttospeech.v1beta1.SynthesizeLongAudioRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.synthesizeLongAudio = stubLongRunningCall(undefined, expectedError); + await assert.rejects(client.synthesizeLongAudio(request), expectedError); + const actualRequest = (client.innerApiCalls.synthesizeLongAudio as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.synthesizeLongAudio as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes synthesizeLongAudio with LRO error', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1beta1.SynthesizeLongAudioRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.texttospeech.v1beta1.SynthesizeLongAudioRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.synthesizeLongAudio = stubLongRunningCall(undefined, undefined, expectedError); + const [operation] = await client.synthesizeLongAudio(request); + await assert.rejects(operation.promise(), expectedError); + const actualRequest = (client.innerApiCalls.synthesizeLongAudio as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.synthesizeLongAudio as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes checkSynthesizeLongAudioProgress without error', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedResponse = generateSampleMessage( + new operationsProtos.google.longrunning.Operation() + ); + expectedResponse.name = 'test'; + expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; + expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} + + client.operationsClient.getOperation = stubSimpleCall(expectedResponse); + const decodedOperation = await client.checkSynthesizeLongAudioProgress(expectedResponse.name); + assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); + assert(decodedOperation.metadata); + assert((client.operationsClient.getOperation as SinonStub).getCall(0)); + }); + + it('invokes checkSynthesizeLongAudioProgress with error', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedError = new Error('expected'); + + client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.checkSynthesizeLongAudioProgress(''), expectedError); + assert((client.operationsClient.getOperation as SinonStub) + .getCall(0)); + }); + }); + describe('getOperation', () => { + it('invokes getOperation without error', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.GetOperationRequest() + ); + const expectedResponse = generateSampleMessage( + new operationsProtos.google.longrunning.Operation() + ); + client.operationsClient.getOperation = stubSimpleCall(expectedResponse); + const response = await client.getOperation(request); + assert.deepStrictEqual(response, [expectedResponse]); + assert((client.operationsClient.getOperation as SinonStub) + .getCall(0).calledWith(request) + ); + }); + it('invokes getOperation without error using callback', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.GetOperationRequest() + ); + const expectedResponse = generateSampleMessage( + new operationsProtos.google.longrunning.Operation() + ); + client.operationsClient.getOperation = sinon.stub().callsArgWith(2, null, expectedResponse); + const promise = new Promise((resolve, reject) => { + client.operationsClient.getOperation( + request, + undefined, + ( + err?: Error | null, + result?: operationsProtos.google.longrunning.Operation | null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + assert((client.operationsClient.getOperation as SinonStub) + .getCall(0)); + }); + it('invokes getOperation with error', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.GetOperationRequest() + ); + const expectedError = new Error('expected'); + client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); + await assert.rejects(async () => {await client.getOperation(request)}, expectedError); + assert((client.operationsClient.getOperation as SinonStub) + .getCall(0).calledWith(request)); + }); + }); + describe('cancelOperation', () => { + it('invokes cancelOperation without error', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.CancelOperationRequest() + ); + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.operationsClient.cancelOperation = stubSimpleCall(expectedResponse); + const response = await client.cancelOperation(request); + assert.deepStrictEqual(response, [expectedResponse]); + assert((client.operationsClient.cancelOperation as SinonStub) + .getCall(0).calledWith(request) + ); + }); + it('invokes cancelOperation without error using callback', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.CancelOperationRequest() + ); + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.operationsClient.cancelOperation = sinon.stub().callsArgWith(2, null, expectedResponse); + const promise = new Promise((resolve, reject) => { + client.operationsClient.cancelOperation( + request, + undefined, + ( + err?: Error | null, + result?: protos.google.protobuf.Empty | null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + assert((client.operationsClient.cancelOperation as SinonStub) + .getCall(0)); + }); + it('invokes cancelOperation with error', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.CancelOperationRequest() + ); + const expectedError = new Error('expected'); + client.operationsClient.cancelOperation = stubSimpleCall(undefined, expectedError); + await assert.rejects(async () => {await client.cancelOperation(request)}, expectedError); + assert((client.operationsClient.cancelOperation as SinonStub) + .getCall(0).calledWith(request)); + }); + }); + describe('deleteOperation', () => { + it('invokes deleteOperation without error', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.DeleteOperationRequest() + ); + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.operationsClient.deleteOperation = stubSimpleCall(expectedResponse); + const response = await client.deleteOperation(request); + assert.deepStrictEqual(response, [expectedResponse]); + assert((client.operationsClient.deleteOperation as SinonStub) + .getCall(0).calledWith(request) + ); + }); + it('invokes deleteOperation without error using callback', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.DeleteOperationRequest() + ); + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.operationsClient.deleteOperation = sinon.stub().callsArgWith(2, null, expectedResponse); + const promise = new Promise((resolve, reject) => { + client.operationsClient.deleteOperation( + request, + undefined, + ( + err?: Error | null, + result?: protos.google.protobuf.Empty | null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + assert((client.operationsClient.deleteOperation as SinonStub) + .getCall(0)); + }); + it('invokes deleteOperation with error', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.DeleteOperationRequest() + ); + const expectedError = new Error('expected'); + client.operationsClient.deleteOperation = stubSimpleCall(undefined, expectedError); + await assert.rejects(async () => {await client.deleteOperation(request)}, expectedError); + assert((client.operationsClient.deleteOperation as SinonStub) + .getCall(0).calledWith(request)); + }); + }); + describe('listOperationsAsync', () => { + it('uses async iteration with listOperations without error', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.ListOperationsRequest() + ); + const expectedResponse = [ + generateSampleMessage( + new operationsProtos.google.longrunning.ListOperationsResponse() + ), + generateSampleMessage( + new operationsProtos.google.longrunning.ListOperationsResponse() + ), + generateSampleMessage( + new operationsProtos.google.longrunning.ListOperationsResponse() + ), + ]; + client.operationsClient.descriptor.listOperations.asyncIterate = stubAsyncIterationCall(expectedResponse); + const responses: operationsProtos.google.longrunning.ListOperationsResponse[] = []; + const iterable = client.operationsClient.listOperationsAsync(request); + for await (const resource of iterable) { + responses.push(resource!); + } + assert.deepStrictEqual(responses, expectedResponse); + assert.deepStrictEqual( + (client.operationsClient.descriptor.listOperations.asyncIterate as SinonStub) + .getCall(0).args[1], request); + }); + it('uses async iteration with listOperations with error', async () => { + const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.ListOperationsRequest() + ); + const expectedError = new Error('expected'); + client.operationsClient.descriptor.listOperations.asyncIterate = stubAsyncIterationCall(undefined, expectedError); + const iterable = client.operationsClient.listOperationsAsync(request); + await assert.rejects(async () => { + const responses: operationsProtos.google.longrunning.ListOperationsResponse[] = []; + for await (const resource of iterable) { + responses.push(resource!); + } + }); + assert.deepStrictEqual( + (client.operationsClient.descriptor.listOperations.asyncIterate as SinonStub) + .getCall(0).args[1], request); + }); + }); +}); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/test/gapic_text_to_speech_v1beta1.ts b/owl-bot-staging/google-cloud-texttospeech/v1beta1/test/gapic_text_to_speech_v1beta1.ts new file mode 100644 index 00000000000..009d1c69428 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/test/gapic_text_to_speech_v1beta1.ts @@ -0,0 +1,349 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import {describe, it} from 'mocha'; +import * as texttospeechModule from '../src'; + +import {protobuf} from 'google-gax'; + +// Dynamically loaded proto JSON is needed to get the type information +// to fill in default values for request objects +const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +function getTypeDefaultValue(typeName: string, fields: string[]) { + let type = root.lookupType(typeName) as protobuf.Type; + for (const field of fields.slice(0, -1)) { + type = type.fields[field]?.resolvedType as protobuf.Type; + } + return type.fields[fields[fields.length - 1]]?.defaultValue; +} + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { + return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); +} + +describe('v1beta1.TextToSpeechClient', () => { + describe('Common methods', () => { + it('has servicePath', () => { + const servicePath = texttospeechModule.v1beta1.TextToSpeechClient.servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = texttospeechModule.v1beta1.TextToSpeechClient.apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = texttospeechModule.v1beta1.TextToSpeechClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new texttospeechModule.v1beta1.TextToSpeechClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new texttospeechModule.v1beta1.TextToSpeechClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new texttospeechModule.v1beta1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.textToSpeechStub, undefined); + await client.initialize(); + assert(client.textToSpeechStub); + }); + + it('has close method for the initialized client', done => { + const client = new texttospeechModule.v1beta1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.textToSpeechStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new texttospeechModule.v1beta1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.textToSpeechStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new texttospeechModule.v1beta1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new texttospeechModule.v1beta1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + }); + + describe('listVoices', () => { + it('invokes listVoices without error', async () => { + const client = new texttospeechModule.v1beta1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1beta1.ListVoicesRequest() + ); + const expectedResponse = generateSampleMessage( + new protos.google.cloud.texttospeech.v1beta1.ListVoicesResponse() + ); + client.innerApiCalls.listVoices = stubSimpleCall(expectedResponse); + const [response] = await client.listVoices(request); + assert.deepStrictEqual(response, expectedResponse); + }); + + it('invokes listVoices without error using callback', async () => { + const client = new texttospeechModule.v1beta1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1beta1.ListVoicesRequest() + ); + const expectedResponse = generateSampleMessage( + new protos.google.cloud.texttospeech.v1beta1.ListVoicesResponse() + ); + client.innerApiCalls.listVoices = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.listVoices( + request, + (err?: Error|null, result?: protos.google.cloud.texttospeech.v1beta1.IListVoicesResponse|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + }); + + it('invokes listVoices with error', async () => { + const client = new texttospeechModule.v1beta1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1beta1.ListVoicesRequest() + ); + const expectedError = new Error('expected'); + client.innerApiCalls.listVoices = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.listVoices(request), expectedError); + }); + + it('invokes listVoices with closed client', async () => { + const client = new texttospeechModule.v1beta1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1beta1.ListVoicesRequest() + ); + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.listVoices(request), expectedError); + }); + }); + + describe('synthesizeSpeech', () => { + it('invokes synthesizeSpeech without error', async () => { + const client = new texttospeechModule.v1beta1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1beta1.SynthesizeSpeechRequest() + ); + const expectedResponse = generateSampleMessage( + new protos.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse() + ); + client.innerApiCalls.synthesizeSpeech = stubSimpleCall(expectedResponse); + const [response] = await client.synthesizeSpeech(request); + assert.deepStrictEqual(response, expectedResponse); + }); + + it('invokes synthesizeSpeech without error using callback', async () => { + const client = new texttospeechModule.v1beta1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1beta1.SynthesizeSpeechRequest() + ); + const expectedResponse = generateSampleMessage( + new protos.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse() + ); + client.innerApiCalls.synthesizeSpeech = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.synthesizeSpeech( + request, + (err?: Error|null, result?: protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechResponse|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + }); + + it('invokes synthesizeSpeech with error', async () => { + const client = new texttospeechModule.v1beta1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1beta1.SynthesizeSpeechRequest() + ); + const expectedError = new Error('expected'); + client.innerApiCalls.synthesizeSpeech = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.synthesizeSpeech(request), expectedError); + }); + + it('invokes synthesizeSpeech with closed client', async () => { + const client = new texttospeechModule.v1beta1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1beta1.SynthesizeSpeechRequest() + ); + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.synthesizeSpeech(request), expectedError); + }); + }); + + describe('Path templates', () => { + + describe('model', () => { + const fakePath = "/rendered/path/model"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + model: "modelValue", + }; + const client = new texttospeechModule.v1beta1.TextToSpeechClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.modelPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.modelPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('modelPath', () => { + const result = client.modelPath("projectValue", "locationValue", "modelValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.modelPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromModelName', () => { + const result = client.matchProjectFromModelName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.modelPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromModelName', () => { + const result = client.matchLocationFromModelName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.modelPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchModelFromModelName', () => { + const result = client.matchModelFromModelName(fakePath); + assert.strictEqual(result, "modelValue"); + assert((client.pathTemplates.modelPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + }); +}); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/tsconfig.json b/owl-bot-staging/google-cloud-texttospeech/v1beta1/tsconfig.json new file mode 100644 index 00000000000..c78f1c884ef --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/tsconfig.json @@ -0,0 +1,19 @@ +{ + "extends": "./node_modules/gts/tsconfig-google.json", + "compilerOptions": { + "rootDir": ".", + "outDir": "build", + "resolveJsonModule": true, + "lib": [ + "es2018", + "dom" + ] + }, + "include": [ + "src/*.ts", + "src/**/*.ts", + "test/*.ts", + "test/**/*.ts", + "system-test/*.ts" + ] +} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/webpack.config.js b/owl-bot-staging/google-cloud-texttospeech/v1beta1/webpack.config.js new file mode 100644 index 00000000000..25f059a0979 --- /dev/null +++ b/owl-bot-staging/google-cloud-texttospeech/v1beta1/webpack.config.js @@ -0,0 +1,64 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const path = require('path'); + +module.exports = { + entry: './src/index.ts', + output: { + library: 'TextToSpeech', + filename: './text-to-speech.js', + }, + node: { + child_process: 'empty', + fs: 'empty', + crypto: 'empty', + }, + resolve: { + alias: { + '../../../package.json': path.resolve(__dirname, 'package.json'), + }, + extensions: ['.js', '.json', '.ts'], + }, + module: { + rules: [ + { + test: /\.tsx?$/, + use: 'ts-loader', + exclude: /node_modules/ + }, + { + test: /node_modules[\\/]@grpc[\\/]grpc-js/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]grpc/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]retry-request/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]https?-proxy-agent/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]gtoken/, + use: 'null-loader' + }, + ], + }, + mode: 'production', +}; From 0f6eed885a9340443f530af924120041c099ca89 Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Wed, 7 Dec 2022 18:36:31 +0000 Subject: [PATCH 2/2] =?UTF-8?q?=F0=9F=A6=89=20Updates=20from=20OwlBot=20po?= =?UTF-8?q?st-processor?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --- .../v1/.eslintignore | 7 - .../v1/.eslintrc.json | 3 - .../google-cloud-texttospeech/v1/.gitignore | 14 - .../google-cloud-texttospeech/v1/.jsdoc.js | 55 -- .../google-cloud-texttospeech/v1/.mocharc.js | 33 - .../v1/.prettierrc.js | 22 - .../google-cloud-texttospeech/v1/README.md | 1 - .../v1/linkinator.config.json | 16 - .../google-cloud-texttospeech/v1/package.json | 65 -- .../cloud/texttospeech/v1/cloud_tts.proto | 304 ------- ...metadata.google.cloud.texttospeech.v1.json | 159 ---- .../v1/text_to_speech.list_voices.js | 67 -- .../v1/text_to_speech.synthesize_speech.js | 71 -- .../google-cloud-texttospeech/v1/src/index.ts | 27 - .../v1/src/v1/gapic_metadata.json | 67 -- .../v1/src/v1/index.ts | 20 - .../v1/src/v1/text_to_speech_client.ts | 513 ----------- .../src/v1/text_to_speech_client_config.json | 36 - .../v1/src/v1/text_to_speech_proto_list.json | 4 - .../system-test/fixtures/sample/src/index.js | 28 - .../system-test/fixtures/sample/src/index.ts | 38 - .../v1/system-test/install.ts | 49 -- ...text_to_speech_long_audio_synthesize_v1.ts | 591 ------------- .../v1/test/gapic_text_to_speech_v1.ts | 349 -------- .../v1/tsconfig.json | 19 - .../v1/webpack.config.js | 64 -- .../v1beta1/.eslintignore | 7 - .../v1beta1/.eslintrc.json | 3 - .../v1beta1/.gitignore | 14 - .../v1beta1/.jsdoc.js | 55 -- .../v1beta1/.mocharc.js | 33 - .../v1beta1/.prettierrc.js | 22 - .../v1beta1/README.md | 1 - .../v1beta1/linkinator.config.json | 16 - .../v1beta1/package.json | 65 -- .../texttospeech/v1beta1/cloud_tts.proto | 334 ------- .../texttospeech/v1beta1/cloud_tts_lrs.proto | 90 -- ...ata.google.cloud.texttospeech.v1beta1.json | 163 ---- .../v1beta1/text_to_speech.list_voices.js | 67 -- .../text_to_speech.synthesize_speech.js | 75 -- ..._audio_synthesize.synthesize_long_audio.js | 82 -- .../v1beta1/src/index.ts | 27 - .../v1beta1/src/v1beta1/gapic_metadata.json | 67 -- .../v1beta1/src/v1beta1/index.ts | 20 - .../src/v1beta1/text_to_speech_client.ts | 515 ----------- .../v1beta1/text_to_speech_client_config.json | 36 - ..._to_speech_long_audio_synthesize_client.ts | 614 ------------- ...h_long_audio_synthesize_client_config.json | 31 - ...eech_long_audio_synthesize_proto_list.json | 4 - .../v1beta1/text_to_speech_proto_list.json | 4 - .../system-test/fixtures/sample/src/index.js | 28 - .../system-test/fixtures/sample/src/index.ts | 38 - .../v1beta1/system-test/install.ts | 49 -- ...to_speech_long_audio_synthesize_v1beta1.ts | 591 ------------- .../test/gapic_text_to_speech_v1beta1.ts | 349 -------- .../v1beta1/tsconfig.json | 19 - .../v1beta1/webpack.config.js | 64 -- packages/google-cloud-texttospeech/README.md | 1 + .../cloud/texttospeech/v1/cloud_tts.proto | 28 +- .../cloud/texttospeech/v1/cloud_tts_lrs.proto | 0 .../protos/protos.d.ts | 366 ++++++++ .../protos/protos.js | 814 ++++++++++++++++++ .../protos/protos.json | 83 +- .../samples/README.md | 18 + ...metadata.google.cloud.texttospeech.v1.json | 56 ++ ..._audio_synthesize.synthesize_long_audio.js | 0 .../google-cloud-texttospeech/src/index.ts | 13 +- .../src/v1/gapic_metadata.json | 24 + .../google-cloud-texttospeech/src/v1/index.ts | 1 + ..._to_speech_long_audio_synthesize_client.ts | 328 ++++--- ...h_long_audio_synthesize_client_config.json | 0 ...eech_long_audio_synthesize_proto_list.json | 0 .../src/v1/text_to_speech_proto_list.json | 3 +- .../system-test/fixtures/sample/src/index.js | 2 + .../system-test/fixtures/sample/src/index.ts | 16 +- ...text_to_speech_long_audio_synthesize_v1.ts | 785 +++++++++++++++++ 76 files changed, 2391 insertions(+), 6252 deletions(-) delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/.eslintignore delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/.eslintrc.json delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/.gitignore delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/.jsdoc.js delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/.mocharc.js delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/.prettierrc.js delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/README.md delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/linkinator.config.json delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/package.json delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/protos/google/cloud/texttospeech/v1/cloud_tts.proto delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/snippet_metadata.google.cloud.texttospeech.v1.json delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/text_to_speech.list_voices.js delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/text_to_speech.synthesize_speech.js delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/src/index.ts delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/src/v1/gapic_metadata.json delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/src/v1/index.ts delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_client.ts delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_client_config.json delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_proto_list.json delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/system-test/fixtures/sample/src/index.js delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/system-test/fixtures/sample/src/index.ts delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/system-test/install.ts delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/test/gapic_text_to_speech_long_audio_synthesize_v1.ts delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/test/gapic_text_to_speech_v1.ts delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/tsconfig.json delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1/webpack.config.js delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/.eslintignore delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/.eslintrc.json delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/.gitignore delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/.jsdoc.js delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/.mocharc.js delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/.prettierrc.js delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/README.md delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/linkinator.config.json delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/package.json delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/protos/google/cloud/texttospeech/v1beta1/cloud_tts.proto delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/protos/google/cloud/texttospeech/v1beta1/cloud_tts_lrs.proto delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/snippet_metadata.google.cloud.texttospeech.v1beta1.json delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/text_to_speech.list_voices.js delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/text_to_speech.synthesize_speech.js delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/src/index.ts delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/gapic_metadata.json delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/index.ts delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_client.ts delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_client_config.json delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_long_audio_synthesize_client.ts delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_long_audio_synthesize_client_config.json delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_long_audio_synthesize_proto_list.json delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_proto_list.json delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/system-test/fixtures/sample/src/index.js delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/system-test/fixtures/sample/src/index.ts delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/system-test/install.ts delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/test/gapic_text_to_speech_long_audio_synthesize_v1beta1.ts delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/test/gapic_text_to_speech_v1beta1.ts delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/tsconfig.json delete mode 100644 owl-bot-staging/google-cloud-texttospeech/v1beta1/webpack.config.js rename {owl-bot-staging/google-cloud-texttospeech/v1 => packages/google-cloud-texttospeech}/protos/google/cloud/texttospeech/v1/cloud_tts_lrs.proto (100%) rename {owl-bot-staging/google-cloud-texttospeech/v1 => packages/google-cloud-texttospeech}/samples/generated/v1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js (100%) rename {owl-bot-staging/google-cloud-texttospeech/v1 => packages/google-cloud-texttospeech}/src/v1/text_to_speech_long_audio_synthesize_client.ts (71%) rename {owl-bot-staging/google-cloud-texttospeech/v1 => packages/google-cloud-texttospeech}/src/v1/text_to_speech_long_audio_synthesize_client_config.json (100%) rename {owl-bot-staging/google-cloud-texttospeech/v1 => packages/google-cloud-texttospeech}/src/v1/text_to_speech_long_audio_synthesize_proto_list.json (100%) create mode 100644 packages/google-cloud-texttospeech/test/gapic_text_to_speech_long_audio_synthesize_v1.ts diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/.eslintignore b/owl-bot-staging/google-cloud-texttospeech/v1/.eslintignore deleted file mode 100644 index cfc348ec4d1..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1/.eslintignore +++ /dev/null @@ -1,7 +0,0 @@ -**/node_modules -**/.coverage -build/ -docs/ -protos/ -system-test/ -samples/generated/ diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/.eslintrc.json b/owl-bot-staging/google-cloud-texttospeech/v1/.eslintrc.json deleted file mode 100644 index 78215349546..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1/.eslintrc.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "extends": "./node_modules/gts" -} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/.gitignore b/owl-bot-staging/google-cloud-texttospeech/v1/.gitignore deleted file mode 100644 index 5d32b23782f..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -**/*.log -**/node_modules -.coverage -coverage -.nyc_output -docs/ -out/ -build/ -system-test/secrets.js -system-test/*key.json -*.lock -.DS_Store -package-lock.json -__pycache__ diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/.jsdoc.js b/owl-bot-staging/google-cloud-texttospeech/v1/.jsdoc.js deleted file mode 100644 index 929b3c59840..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1/.jsdoc.js +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -'use strict'; - -module.exports = { - opts: { - readme: './README.md', - package: './package.json', - template: './node_modules/jsdoc-fresh', - recurse: true, - verbose: true, - destination: './docs/' - }, - plugins: [ - 'plugins/markdown', - 'jsdoc-region-tag' - ], - source: { - excludePattern: '(^|\\/|\\\\)[._]', - include: [ - 'build/src', - 'protos' - ], - includePattern: '\\.js$' - }, - templates: { - copyright: 'Copyright 2022 Google LLC', - includeDate: false, - sourceFiles: false, - systemName: '@google-cloud/text-to-speech', - theme: 'lumen', - default: { - outputSourceFiles: false - } - }, - markdown: { - idInHeadings: true - } -}; diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/.mocharc.js b/owl-bot-staging/google-cloud-texttospeech/v1/.mocharc.js deleted file mode 100644 index 481c522b00f..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1/.mocharc.js +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -const config = { - "enable-source-maps": true, - "throw-deprecation": true, - "timeout": 10000 -} -if (process.env.MOCHA_THROW_DEPRECATION === 'false') { - delete config['throw-deprecation']; -} -if (process.env.MOCHA_REPORTER) { - config.reporter = process.env.MOCHA_REPORTER; -} -if (process.env.MOCHA_REPORTER_OUTPUT) { - config['reporter-option'] = `output=${process.env.MOCHA_REPORTER_OUTPUT}`; -} -module.exports = config diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/.prettierrc.js b/owl-bot-staging/google-cloud-texttospeech/v1/.prettierrc.js deleted file mode 100644 index 494e147865d..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1/.prettierrc.js +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - -module.exports = { - ...require('gts/.prettierrc.json') -} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/README.md b/owl-bot-staging/google-cloud-texttospeech/v1/README.md deleted file mode 100644 index 3eaadd6a1c8..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1/README.md +++ /dev/null @@ -1 +0,0 @@ -Texttospeech: Nodejs Client diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/linkinator.config.json b/owl-bot-staging/google-cloud-texttospeech/v1/linkinator.config.json deleted file mode 100644 index befd23c8633..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1/linkinator.config.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "recurse": true, - "skip": [ - "https://codecov.io/gh/googleapis/", - "www.googleapis.com", - "img.shields.io", - "https://console.cloud.google.com/cloudshell", - "https://support.google.com" - ], - "silent": true, - "concurrency": 5, - "retry": true, - "retryErrors": true, - "retryErrorsCount": 5, - "retryErrorsJitter": 3000 -} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/package.json b/owl-bot-staging/google-cloud-texttospeech/v1/package.json deleted file mode 100644 index e0bd4130ffb..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1/package.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "name": "@google-cloud/text-to-speech", - "version": "0.1.0", - "description": "Texttospeech client for Node.js", - "repository": "googleapis/nodejs-texttospeech", - "license": "Apache-2.0", - "author": "Google LLC", - "main": "build/src/index.js", - "files": [ - "build/src", - "build/protos" - ], - "keywords": [ - "google apis client", - "google api client", - "google apis", - "google api", - "google", - "google cloud platform", - "google cloud", - "cloud", - "google texttospeech", - "texttospeech", - "text to speech", - "text to speech long audio synthesize" - ], - "scripts": { - "clean": "gts clean", - "compile": "tsc -p . && cp -r protos build/ && minifyProtoJson", - "compile-protos": "compileProtos src", - "docs": "jsdoc -c .jsdoc.js", - "predocs-test": "npm run docs", - "docs-test": "linkinator docs", - "fix": "gts fix", - "lint": "gts check", - "prepare": "npm run compile-protos && npm run compile", - "system-test": "c8 mocha build/system-test", - "test": "c8 mocha build/test" - }, - "dependencies": { - "google-gax": "^3.5.2" - }, - "devDependencies": { - "@types/mocha": "^9.1.1", - "@types/node": "^16.11.62", - "@types/sinon": "^10.0.13", - "c8": "^7.12.0", - "gts": "^3.1.1", - "jsdoc": "^3.6.11", - "jsdoc-fresh": "^2.0.1", - "jsdoc-region-tag": "^2.0.1", - "linkinator": "^4.0.3", - "mocha": "^10.0.0", - "null-loader": "^4.0.1", - "pack-n-play": "^1.0.0-2", - "sinon": "^14.0.0", - "ts-loader": "^8.4.0", - "typescript": "^4.8.3", - "webpack": "^4.46.0", - "webpack-cli": "^4.10.0" - }, - "engines": { - "node": ">=v12" - } -} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/protos/google/cloud/texttospeech/v1/cloud_tts.proto b/owl-bot-staging/google-cloud-texttospeech/v1/protos/google/cloud/texttospeech/v1/cloud_tts.proto deleted file mode 100644 index b50d3698fb7..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1/protos/google/cloud/texttospeech/v1/cloud_tts.proto +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.texttospeech.v1; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; - -option cc_enable_arenas = true; -option csharp_namespace = "Google.Cloud.TextToSpeech.V1"; -option go_package = "google.golang.org/genproto/googleapis/cloud/texttospeech/v1;texttospeech"; -option java_multiple_files = true; -option java_outer_classname = "TextToSpeechProto"; -option java_package = "com.google.cloud.texttospeech.v1"; -option php_namespace = "Google\\Cloud\\TextToSpeech\\V1"; -option ruby_package = "Google::Cloud::TextToSpeech::V1"; -option (google.api.resource_definition) = { - type: "automl.googleapis.com/Model" - pattern: "projects/{project}/locations/{location}/models/{model}" -}; - -// Service that implements Google Cloud Text-to-Speech API. -service TextToSpeech { - option (google.api.default_host) = "texttospeech.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform"; - - // Returns a list of Voice supported for synthesis. - rpc ListVoices(ListVoicesRequest) returns (ListVoicesResponse) { - option (google.api.http) = { - get: "/v1/voices" - }; - option (google.api.method_signature) = "language_code"; - } - - // Synthesizes speech synchronously: receive results after all text input - // has been processed. - rpc SynthesizeSpeech(SynthesizeSpeechRequest) - returns (SynthesizeSpeechResponse) { - option (google.api.http) = { - post: "/v1/text:synthesize" - body: "*" - }; - option (google.api.method_signature) = "input,voice,audio_config"; - } -} - -// Gender of the voice as described in -// [SSML voice element](https://www.w3.org/TR/speech-synthesis11/#edef_voice). -enum SsmlVoiceGender { - // An unspecified gender. - // In VoiceSelectionParams, this means that the client doesn't care which - // gender the selected voice will have. In the Voice field of - // ListVoicesResponse, this may mean that the voice doesn't fit any of the - // other categories in this enum, or that the gender of the voice isn't known. - SSML_VOICE_GENDER_UNSPECIFIED = 0; - - // A male voice. - MALE = 1; - - // A female voice. - FEMALE = 2; - - // A gender-neutral voice. This voice is not yet supported. - NEUTRAL = 3; -} - -// Configuration to set up audio encoder. The encoding determines the output -// audio format that we'd like. -enum AudioEncoding { - // Not specified. Will return result - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. - AUDIO_ENCODING_UNSPECIFIED = 0; - - // Uncompressed 16-bit signed little-endian samples (Linear PCM). - // Audio content returned as LINEAR16 also contains a WAV header. - LINEAR16 = 1; - - // MP3 audio at 32kbps. - MP3 = 2; - - // Opus encoded audio wrapped in an ogg container. The result will be a - // file which can be played natively on Android, and in browsers (at least - // Chrome and Firefox). The quality of the encoding is considerably higher - // than MP3 while using approximately the same bitrate. - OGG_OPUS = 3; - - // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law. - // Audio content returned as MULAW also contains a WAV header. - MULAW = 5; - - // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/A-law. - // Audio content returned as ALAW also contains a WAV header. - ALAW = 6; -} - -// The top-level message sent by the client for the `ListVoices` method. -message ListVoicesRequest { - // Optional. Recommended. - // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. - // If not specified, the API will return all supported voices. - // If specified, the ListVoices call will only return voices that can be used - // to synthesize this language_code. For example, if you specify `"en-NZ"`, - // all `"en-NZ"` voices will be returned. If you specify `"no"`, both - // `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices will be - // returned. - string language_code = 1 [(google.api.field_behavior) = OPTIONAL]; -} - -// The message returned to the client by the `ListVoices` method. -message ListVoicesResponse { - // The list of voices. - repeated Voice voices = 1; -} - -// Description of a voice supported by the TTS service. -message Voice { - // The languages that this voice supports, expressed as - // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g. - // "en-US", "es-419", "cmn-tw"). - repeated string language_codes = 1; - - // The name of this voice. Each distinct voice has a unique name. - string name = 2; - - // The gender of this voice. - SsmlVoiceGender ssml_gender = 3; - - // The natural sample rate (in hertz) for this voice. - int32 natural_sample_rate_hertz = 4; -} - -// The top-level message sent by the client for the `SynthesizeSpeech` method. -message SynthesizeSpeechRequest { - // Required. The Synthesizer requires either plain text or SSML as input. - SynthesisInput input = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The desired voice of the synthesized audio. - VoiceSelectionParams voice = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. The configuration of the synthesized audio. - AudioConfig audio_config = 3 [(google.api.field_behavior) = REQUIRED]; -} - -// Contains text input to be synthesized. Either `text` or `ssml` must be -// supplied. Supplying both or neither returns -// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. The -// input size is limited to 5000 bytes. -message SynthesisInput { - // The input source, which is either plain text or SSML. - oneof input_source { - // The raw text to be synthesized. - string text = 1; - - // The SSML document to be synthesized. The SSML document must be valid - // and well-formed. Otherwise the RPC will fail and return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. For - // more information, see - // [SSML](https://cloud.google.com/text-to-speech/docs/ssml). - string ssml = 2; - } -} - -// Description of which voice to use for a synthesis request. -message VoiceSelectionParams { - // Required. The language (and potentially also the region) of the voice - // expressed as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) - // language tag, e.g. "en-US". This should not include a script tag (e.g. use - // "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred - // from the input provided in the SynthesisInput. The TTS service - // will use this parameter to help choose an appropriate voice. Note that - // the TTS service may choose a voice with a slightly different language code - // than the one selected; it may substitute a different region - // (e.g. using en-US rather than en-CA if there isn't a Canadian voice - // available), or even a different language, e.g. using "nb" (Norwegian - // Bokmal) instead of "no" (Norwegian)". - string language_code = 1 [(google.api.field_behavior) = REQUIRED]; - - // The name of the voice. If not set, the service will choose a - // voice based on the other parameters such as language_code and gender. - string name = 2; - - // The preferred gender of the voice. If not set, the service will - // choose a voice based on the other parameters such as language_code and - // name. Note that this is only a preference, not requirement; if a - // voice of the appropriate gender is not available, the synthesizer should - // substitute a voice with a different gender rather than failing the request. - SsmlVoiceGender ssml_gender = 3; - - // The configuration for a custom voice. If [CustomVoiceParams.model] is set, - // the service will choose the custom voice matching the specified - // configuration. - CustomVoiceParams custom_voice = 4; -} - -// Description of audio data to be synthesized. -message AudioConfig { - // Required. The format of the audio byte stream. - AudioEncoding audio_encoding = 1 [(google.api.field_behavior) = REQUIRED]; - - // Optional. Input only. Speaking rate/speed, in the range [0.25, 4.0]. 1.0 is - // the normal native speed supported by the specific voice. 2.0 is twice as - // fast, and 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 - // speed. Any other values < 0.25 or > 4.0 will return an error. - double speaking_rate = 2 [ - (google.api.field_behavior) = INPUT_ONLY, - (google.api.field_behavior) = OPTIONAL - ]; - - // Optional. Input only. Speaking pitch, in the range [-20.0, 20.0]. 20 means - // increase 20 semitones from the original pitch. -20 means decrease 20 - // semitones from the original pitch. - double pitch = 3 [ - (google.api.field_behavior) = INPUT_ONLY, - (google.api.field_behavior) = OPTIONAL - ]; - - // Optional. Input only. Volume gain (in dB) of the normal native volume - // supported by the specific voice, in the range [-96.0, 16.0]. If unset, or - // set to a value of 0.0 (dB), will play at normal native signal amplitude. A - // value of -6.0 (dB) will play at approximately half the amplitude of the - // normal native signal amplitude. A value of +6.0 (dB) will play at - // approximately twice the amplitude of the normal native signal amplitude. - // Strongly recommend not to exceed +10 (dB) as there's usually no effective - // increase in loudness for any value greater than that. - double volume_gain_db = 4 [ - (google.api.field_behavior) = INPUT_ONLY, - (google.api.field_behavior) = OPTIONAL - ]; - - // Optional. The synthesis sample rate (in hertz) for this audio. When this is - // specified in SynthesizeSpeechRequest, if this is different from the voice's - // natural sample rate, then the synthesizer will honor this request by - // converting to the desired sample rate (which might result in worse audio - // quality), unless the specified sample rate is not supported for the - // encoding chosen, in which case it will fail the request and return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. - int32 sample_rate_hertz = 5 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Input only. An identifier which selects 'audio effects' profiles - // that are applied on (post synthesized) text to speech. Effects are applied - // on top of each other in the order they are given. See - // [audio - // profiles](https://cloud.google.com/text-to-speech/docs/audio-profiles) for - // current supported profile ids. - repeated string effects_profile_id = 6 [ - (google.api.field_behavior) = INPUT_ONLY, - (google.api.field_behavior) = OPTIONAL - ]; -} - -// Description of the custom voice to be synthesized. -message CustomVoiceParams { - // The usage of the synthesized audio. You must report your honest and - // correct usage of the service as it's regulated by contract and will cause - // significant difference in billing. - enum ReportedUsage { - // Request with reported usage unspecified will be rejected. - REPORTED_USAGE_UNSPECIFIED = 0; - - // For scenarios where the synthesized audio is not downloadable and can - // only be used once. For example, real-time request in IVR system. - REALTIME = 1; - - // For scenarios where the synthesized audio is downloadable and can be - // reused. For example, the synthesized audio is downloaded, stored in - // customer service system and played repeatedly. - OFFLINE = 2; - } - - // Required. The name of the AutoML model that synthesizes the custom voice. - string model = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "automl.googleapis.com/Model" } - ]; - - // Optional. The usage of the synthesized audio to be reported. - ReportedUsage reported_usage = 3 [(google.api.field_behavior) = OPTIONAL]; -} - -// The message returned to the client by the `SynthesizeSpeech` method. -message SynthesizeSpeechResponse { - // The audio data bytes encoded as specified in the request, including the - // header for encodings that are wrapped in containers (e.g. MP3, OGG_OPUS). - // For LINEAR16 audio, we include the WAV header. Note: as - // with all bytes fields, protobuffers use a pure binary representation, - // whereas JSON representations use base64. - bytes audio_content = 1; -} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/snippet_metadata.google.cloud.texttospeech.v1.json b/owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/snippet_metadata.google.cloud.texttospeech.v1.json deleted file mode 100644 index 9e72ec44856..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/snippet_metadata.google.cloud.texttospeech.v1.json +++ /dev/null @@ -1,159 +0,0 @@ -{ - "clientLibrary": { - "name": "nodejs-texttospeech", - "version": "0.1.0", - "language": "TYPESCRIPT", - "apis": [ - { - "id": "google.cloud.texttospeech.v1", - "version": "v1" - } - ] - }, - "snippets": [ - { - "regionTag": "texttospeech_v1_generated_TextToSpeech_ListVoices_async", - "title": "TextToSpeech listVoices Sample", - "origin": "API_DEFINITION", - "description": " Returns a list of Voice supported for synthesis.", - "canonical": true, - "file": "text_to_speech.list_voices.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 59, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "ListVoices", - "fullName": "google.cloud.texttospeech.v1.TextToSpeech.ListVoices", - "async": true, - "parameters": [ - { - "name": "language_code", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.cloud.texttospeech.v1.ListVoicesResponse", - "client": { - "shortName": "TextToSpeechClient", - "fullName": "google.cloud.texttospeech.v1.TextToSpeechClient" - }, - "method": { - "shortName": "ListVoices", - "fullName": "google.cloud.texttospeech.v1.TextToSpeech.ListVoices", - "service": { - "shortName": "TextToSpeech", - "fullName": "google.cloud.texttospeech.v1.TextToSpeech" - } - } - } - }, - { - "regionTag": "texttospeech_v1_generated_TextToSpeech_SynthesizeSpeech_async", - "title": "TextToSpeech synthesizeSpeech Sample", - "origin": "API_DEFINITION", - "description": " Synthesizes speech synchronously: receive results after all text input has been processed.", - "canonical": true, - "file": "text_to_speech.synthesize_speech.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 63, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "SynthesizeSpeech", - "fullName": "google.cloud.texttospeech.v1.TextToSpeech.SynthesizeSpeech", - "async": true, - "parameters": [ - { - "name": "input", - "type": ".google.cloud.texttospeech.v1.SynthesisInput" - }, - { - "name": "voice", - "type": ".google.cloud.texttospeech.v1.VoiceSelectionParams" - }, - { - "name": "audio_config", - "type": ".google.cloud.texttospeech.v1.AudioConfig" - } - ], - "resultType": ".google.cloud.texttospeech.v1.SynthesizeSpeechResponse", - "client": { - "shortName": "TextToSpeechClient", - "fullName": "google.cloud.texttospeech.v1.TextToSpeechClient" - }, - "method": { - "shortName": "SynthesizeSpeech", - "fullName": "google.cloud.texttospeech.v1.TextToSpeech.SynthesizeSpeech", - "service": { - "shortName": "TextToSpeech", - "fullName": "google.cloud.texttospeech.v1.TextToSpeech" - } - } - } - }, - { - "regionTag": "texttospeech_v1_generated_TextToSpeechLongAudioSynthesize_SynthesizeLongAudio_async", - "title": "TextToSpeech synthesizeLongAudio Sample", - "origin": "API_DEFINITION", - "description": " Synthesizes long form text asynchronously.", - "canonical": true, - "file": "text_to_speech_long_audio_synthesize.synthesize_long_audio.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 74, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "SynthesizeLongAudio", - "fullName": "google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize.SynthesizeLongAudio", - "async": true, - "parameters": [ - { - "name": "parent", - "type": "TYPE_STRING" - }, - { - "name": "input", - "type": ".google.cloud.texttospeech.v1.SynthesisInput" - }, - { - "name": "audio_config", - "type": ".google.cloud.texttospeech.v1.AudioConfig" - }, - { - "name": "output_gcs_uri", - "type": "TYPE_STRING" - }, - { - "name": "voice", - "type": ".google.cloud.texttospeech.v1.VoiceSelectionParams" - } - ], - "resultType": ".google.longrunning.Operation", - "client": { - "shortName": "TextToSpeechLongAudioSynthesizeClient", - "fullName": "google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesizeClient" - }, - "method": { - "shortName": "SynthesizeLongAudio", - "fullName": "google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize.SynthesizeLongAudio", - "service": { - "shortName": "TextToSpeechLongAudioSynthesize", - "fullName": "google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize" - } - } - } - } - ] -} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/text_to_speech.list_voices.js b/owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/text_to_speech.list_voices.js deleted file mode 100644 index 93e3359b3bc..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/text_to_speech.list_voices.js +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main() { - // [START texttospeech_v1_generated_TextToSpeech_ListVoices_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Optional. Recommended. - * BCP-47 (https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. - * If not specified, the API will return all supported voices. - * If specified, the ListVoices call will only return voices that can be used - * to synthesize this language_code. For example, if you specify `"en-NZ"`, - * all `"en-NZ"` voices will be returned. If you specify `"no"`, both - * `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices will be - * returned. - */ - // const languageCode = 'abc123' - - // Imports the Texttospeech library - const {TextToSpeechClient} = require('@google-cloud/text-to-speech').v1; - - // Instantiates a client - const texttospeechClient = new TextToSpeechClient(); - - async function callListVoices() { - // Construct request - const request = { - }; - - // Run request - const response = await texttospeechClient.listVoices(request); - console.log(response); - } - - callListVoices(); - // [END texttospeech_v1_generated_TextToSpeech_ListVoices_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/text_to_speech.synthesize_speech.js b/owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/text_to_speech.synthesize_speech.js deleted file mode 100644 index e8a34543f95..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/text_to_speech.synthesize_speech.js +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(input, voice, audioConfig) { - // [START texttospeech_v1_generated_TextToSpeech_SynthesizeSpeech_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The Synthesizer requires either plain text or SSML as input. - */ - // const input = {} - /** - * Required. The desired voice of the synthesized audio. - */ - // const voice = {} - /** - * Required. The configuration of the synthesized audio. - */ - // const audioConfig = {} - - // Imports the Texttospeech library - const {TextToSpeechClient} = require('@google-cloud/text-to-speech').v1; - - // Instantiates a client - const texttospeechClient = new TextToSpeechClient(); - - async function callSynthesizeSpeech() { - // Construct request - const request = { - input, - voice, - audioConfig, - }; - - // Run request - const response = await texttospeechClient.synthesizeSpeech(request); - console.log(response); - } - - callSynthesizeSpeech(); - // [END texttospeech_v1_generated_TextToSpeech_SynthesizeSpeech_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/src/index.ts b/owl-bot-staging/google-cloud-texttospeech/v1/src/index.ts deleted file mode 100644 index ce80840dd10..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1/src/index.ts +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as v1 from './v1'; -const TextToSpeechClient = v1.TextToSpeechClient; -type TextToSpeechClient = v1.TextToSpeechClient; -const TextToSpeechLongAudioSynthesizeClient = v1.TextToSpeechLongAudioSynthesizeClient; -type TextToSpeechLongAudioSynthesizeClient = v1.TextToSpeechLongAudioSynthesizeClient; -export {v1, TextToSpeechClient, TextToSpeechLongAudioSynthesizeClient}; -export default {v1, TextToSpeechClient, TextToSpeechLongAudioSynthesizeClient}; -import * as protos from '../protos/protos'; -export {protos} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/gapic_metadata.json b/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/gapic_metadata.json deleted file mode 100644 index 2d09d513d5f..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/gapic_metadata.json +++ /dev/null @@ -1,67 +0,0 @@ -{ - "schema": "1.0", - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "typescript", - "protoPackage": "google.cloud.texttospeech.v1", - "libraryPackage": "@google-cloud/text-to-speech", - "services": { - "TextToSpeech": { - "clients": { - "grpc": { - "libraryClient": "TextToSpeechClient", - "rpcs": { - "ListVoices": { - "methods": [ - "listVoices" - ] - }, - "SynthesizeSpeech": { - "methods": [ - "synthesizeSpeech" - ] - } - } - }, - "grpc-fallback": { - "libraryClient": "TextToSpeechClient", - "rpcs": { - "ListVoices": { - "methods": [ - "listVoices" - ] - }, - "SynthesizeSpeech": { - "methods": [ - "synthesizeSpeech" - ] - } - } - } - } - }, - "TextToSpeechLongAudioSynthesize": { - "clients": { - "grpc": { - "libraryClient": "TextToSpeechLongAudioSynthesizeClient", - "rpcs": { - "SynthesizeLongAudio": { - "methods": [ - "synthesizeLongAudio" - ] - } - } - }, - "grpc-fallback": { - "libraryClient": "TextToSpeechLongAudioSynthesizeClient", - "rpcs": { - "SynthesizeLongAudio": { - "methods": [ - "synthesizeLongAudio" - ] - } - } - } - } - } - } -} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/index.ts b/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/index.ts deleted file mode 100644 index 3cf28b93bc3..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/index.ts +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -export {TextToSpeechClient} from './text_to_speech_client'; -export {TextToSpeechLongAudioSynthesizeClient} from './text_to_speech_long_audio_synthesize_client'; diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_client.ts b/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_client.ts deleted file mode 100644 index 3095c93c41d..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_client.ts +++ /dev/null @@ -1,513 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -/* global window */ -import type * as gax from 'google-gax'; -import type {Callback, CallOptions, Descriptors, ClientOptions} from 'google-gax'; - -import * as protos from '../../protos/protos'; -import jsonProtos = require('../../protos/protos.json'); -/** - * Client JSON configuration object, loaded from - * `src/v1/text_to_speech_client_config.json`. - * This file defines retry strategy and timeouts for all API methods in this library. - */ -import * as gapicConfig from './text_to_speech_client_config.json'; -const version = require('../../../package.json').version; - -/** - * Service that implements Google Cloud Text-to-Speech API. - * @class - * @memberof v1 - */ -export class TextToSpeechClient { - private _terminated = false; - private _opts: ClientOptions; - private _providedCustomServicePath: boolean; - private _gaxModule: typeof gax | typeof gax.fallback; - private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; - private _protos: {}; - private _defaults: {[method: string]: gax.CallSettings}; - auth: gax.GoogleAuth; - descriptors: Descriptors = { - page: {}, - stream: {}, - longrunning: {}, - batching: {}, - }; - warn: (code: string, message: string, warnType?: string) => void; - innerApiCalls: {[name: string]: Function}; - pathTemplates: {[name: string]: gax.PathTemplate}; - textToSpeechStub?: Promise<{[name: string]: Function}>; - - /** - * Construct an instance of TextToSpeechClient. - * - * @param {object} [options] - The configuration object. - * The options accepted by the constructor are described in detail - * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). - * The common options are: - * @param {object} [options.credentials] - Credentials object. - * @param {string} [options.credentials.client_email] - * @param {string} [options.credentials.private_key] - * @param {string} [options.email] - Account email address. Required when - * using a .pem or .p12 keyFilename. - * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or - * .p12 key downloaded from the Google Developers Console. If you provide - * a path to a JSON file, the projectId option below is not necessary. - * NOTE: .pem and .p12 require you to specify options.email as well. - * @param {number} [options.port] - The port on which to connect to - * the remote host. - * @param {string} [options.projectId] - The project ID from the Google - * Developer's Console, e.g. 'grape-spaceship-123'. We will also check - * the environment variable GCLOUD_PROJECT for your project ID. If your - * app is running in an environment which supports - * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, - * your project ID will be detected automatically. - * @param {string} [options.apiEndpoint] - The domain name of the - * API remote host. - * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. - * Follows the structure of {@link gapicConfig}. - * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. - * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. - * For more information, please check the - * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. - * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you - * need to avoid loading the default gRPC version and want to use the fallback - * HTTP implementation. Load only fallback version and pass it to the constructor: - * ``` - * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC - * const client = new TextToSpeechClient({fallback: 'rest'}, gax); - * ``` - */ - constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { - // Ensure that options include all the required fields. - const staticMembers = this.constructor as typeof TextToSpeechClient; - const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; - this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); - const port = opts?.port || staticMembers.port; - const clientConfig = opts?.clientConfig ?? {}; - const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); - opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); - - // Request numeric enum values if REST transport is used. - opts.numericEnums = true; - - // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. - if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { - opts['scopes'] = staticMembers.scopes; - } - - // Load google-gax module synchronously if needed - if (!gaxInstance) { - gaxInstance = require('google-gax') as typeof gax; - } - - // Choose either gRPC or proto-over-HTTP implementation of google-gax. - this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; - - // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. - this._gaxGrpc = new this._gaxModule.GrpcClient(opts); - - // Save options to use in initialize() method. - this._opts = opts; - - // Save the auth object to the client, for use by other methods. - this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); - - // Set useJWTAccessWithScope on the auth object. - this.auth.useJWTAccessWithScope = true; - - // Set defaultServicePath on the auth object. - this.auth.defaultServicePath = staticMembers.servicePath; - - // Set the default scopes in auth client if needed. - if (servicePath === staticMembers.servicePath) { - this.auth.defaultScopes = staticMembers.scopes; - } - - // Determine the client header string. - const clientHeader = [ - `gax/${this._gaxModule.version}`, - `gapic/${version}`, - ]; - if (typeof process !== 'undefined' && 'versions' in process) { - clientHeader.push(`gl-node/${process.versions.node}`); - } else { - clientHeader.push(`gl-web/${this._gaxModule.version}`); - } - if (!opts.fallback) { - clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); - } else if (opts.fallback === 'rest' ) { - clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); - } - if (opts.libName && opts.libVersion) { - clientHeader.push(`${opts.libName}/${opts.libVersion}`); - } - // Load the applicable protos. - this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); - - // This API contains "path templates"; forward-slash-separated - // identifiers to uniquely identify resources within the API. - // Create useful helper objects for these. - this.pathTemplates = { - modelPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}/models/{model}' - ), - }; - - // Put together the default options sent with requests. - this._defaults = this._gaxGrpc.constructSettings( - 'google.cloud.texttospeech.v1.TextToSpeech', gapicConfig as gax.ClientConfig, - opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); - - // Set up a dictionary of "inner API calls"; the core implementation - // of calling the API is handled in `google-gax`, with this code - // merely providing the destination and request information. - this.innerApiCalls = {}; - - // Add a warn function to the client constructor so it can be easily tested. - this.warn = this._gaxModule.warn; - } - - /** - * Initialize the client. - * Performs asynchronous operations (such as authentication) and prepares the client. - * This function will be called automatically when any class method is called for the - * first time, but if you need to initialize it before calling an actual method, - * feel free to call initialize() directly. - * - * You can await on this method if you want to make sure the client is initialized. - * - * @returns {Promise} A promise that resolves to an authenticated service stub. - */ - initialize() { - // If the client stub promise is already initialized, return immediately. - if (this.textToSpeechStub) { - return this.textToSpeechStub; - } - - // Put together the "service stub" for - // google.cloud.texttospeech.v1.TextToSpeech. - this.textToSpeechStub = this._gaxGrpc.createStub( - this._opts.fallback ? - (this._protos as protobuf.Root).lookupService('google.cloud.texttospeech.v1.TextToSpeech') : - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (this._protos as any).google.cloud.texttospeech.v1.TextToSpeech, - this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; - - // Iterate over each of the methods that the service provides - // and create an API call method for each. - const textToSpeechStubMethods = - ['listVoices', 'synthesizeSpeech']; - for (const methodName of textToSpeechStubMethods) { - const callPromise = this.textToSpeechStub.then( - stub => (...args: Array<{}>) => { - if (this._terminated) { - return Promise.reject('The client has already been closed.'); - } - const func = stub[methodName]; - return func.apply(stub, args); - }, - (err: Error|null|undefined) => () => { - throw err; - }); - - const descriptor = - undefined; - const apiCall = this._gaxModule.createApiCall( - callPromise, - this._defaults[methodName], - descriptor, - this._opts.fallback - ); - - this.innerApiCalls[methodName] = apiCall; - } - - return this.textToSpeechStub; - } - - /** - * The DNS address for this API service. - * @returns {string} The DNS address for this service. - */ - static get servicePath() { - return 'texttospeech.googleapis.com'; - } - - /** - * The DNS address for this API service - same as servicePath(), - * exists for compatibility reasons. - * @returns {string} The DNS address for this service. - */ - static get apiEndpoint() { - return 'texttospeech.googleapis.com'; - } - - /** - * The port for this API service. - * @returns {number} The default port for this service. - */ - static get port() { - return 443; - } - - /** - * The scopes needed to make gRPC calls for every method defined - * in this service. - * @returns {string[]} List of default scopes. - */ - static get scopes() { - return [ - 'https://www.googleapis.com/auth/cloud-platform' - ]; - } - - getProjectId(): Promise; - getProjectId(callback: Callback): void; - /** - * Return the project ID used by this class. - * @returns {Promise} A promise that resolves to string containing the project ID. - */ - getProjectId(callback?: Callback): - Promise|void { - if (callback) { - this.auth.getProjectId(callback); - return; - } - return this.auth.getProjectId(); - } - - // ------------------- - // -- Service calls -- - // ------------------- -/** - * Returns a list of Voice supported for synthesis. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} [request.languageCode] - * Optional. Recommended. - * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. - * If not specified, the API will return all supported voices. - * If specified, the ListVoices call will only return voices that can be used - * to synthesize this language_code. For example, if you specify `"en-NZ"`, - * all `"en-NZ"` voices will be returned. If you specify `"no"`, both - * `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices will be - * returned. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [ListVoicesResponse]{@link google.cloud.texttospeech.v1.ListVoicesResponse}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1/text_to_speech.list_voices.js - * region_tag:texttospeech_v1_generated_TextToSpeech_ListVoices_async - */ - listVoices( - request?: protos.google.cloud.texttospeech.v1.IListVoicesRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.texttospeech.v1.IListVoicesResponse, - protos.google.cloud.texttospeech.v1.IListVoicesRequest|undefined, {}|undefined - ]>; - listVoices( - request: protos.google.cloud.texttospeech.v1.IListVoicesRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.texttospeech.v1.IListVoicesResponse, - protos.google.cloud.texttospeech.v1.IListVoicesRequest|null|undefined, - {}|null|undefined>): void; - listVoices( - request: protos.google.cloud.texttospeech.v1.IListVoicesRequest, - callback: Callback< - protos.google.cloud.texttospeech.v1.IListVoicesResponse, - protos.google.cloud.texttospeech.v1.IListVoicesRequest|null|undefined, - {}|null|undefined>): void; - listVoices( - request?: protos.google.cloud.texttospeech.v1.IListVoicesRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.texttospeech.v1.IListVoicesResponse, - protos.google.cloud.texttospeech.v1.IListVoicesRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.cloud.texttospeech.v1.IListVoicesResponse, - protos.google.cloud.texttospeech.v1.IListVoicesRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.texttospeech.v1.IListVoicesResponse, - protos.google.cloud.texttospeech.v1.IListVoicesRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - this.initialize(); - return this.innerApiCalls.listVoices(request, options, callback); - } -/** - * Synthesizes speech synchronously: receive results after all text input - * has been processed. - * - * @param {Object} request - * The request object that will be sent. - * @param {google.cloud.texttospeech.v1.SynthesisInput} request.input - * Required. The Synthesizer requires either plain text or SSML as input. - * @param {google.cloud.texttospeech.v1.VoiceSelectionParams} request.voice - * Required. The desired voice of the synthesized audio. - * @param {google.cloud.texttospeech.v1.AudioConfig} request.audioConfig - * Required. The configuration of the synthesized audio. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [SynthesizeSpeechResponse]{@link google.cloud.texttospeech.v1.SynthesizeSpeechResponse}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1/text_to_speech.synthesize_speech.js - * region_tag:texttospeech_v1_generated_TextToSpeech_SynthesizeSpeech_async - */ - synthesizeSpeech( - request?: protos.google.cloud.texttospeech.v1.ISynthesizeSpeechRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.texttospeech.v1.ISynthesizeSpeechResponse, - protos.google.cloud.texttospeech.v1.ISynthesizeSpeechRequest|undefined, {}|undefined - ]>; - synthesizeSpeech( - request: protos.google.cloud.texttospeech.v1.ISynthesizeSpeechRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.texttospeech.v1.ISynthesizeSpeechResponse, - protos.google.cloud.texttospeech.v1.ISynthesizeSpeechRequest|null|undefined, - {}|null|undefined>): void; - synthesizeSpeech( - request: protos.google.cloud.texttospeech.v1.ISynthesizeSpeechRequest, - callback: Callback< - protos.google.cloud.texttospeech.v1.ISynthesizeSpeechResponse, - protos.google.cloud.texttospeech.v1.ISynthesizeSpeechRequest|null|undefined, - {}|null|undefined>): void; - synthesizeSpeech( - request?: protos.google.cloud.texttospeech.v1.ISynthesizeSpeechRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.texttospeech.v1.ISynthesizeSpeechResponse, - protos.google.cloud.texttospeech.v1.ISynthesizeSpeechRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.cloud.texttospeech.v1.ISynthesizeSpeechResponse, - protos.google.cloud.texttospeech.v1.ISynthesizeSpeechRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.texttospeech.v1.ISynthesizeSpeechResponse, - protos.google.cloud.texttospeech.v1.ISynthesizeSpeechRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - this.initialize(); - return this.innerApiCalls.synthesizeSpeech(request, options, callback); - } - - // -------------------- - // -- Path templates -- - // -------------------- - - /** - * Return a fully-qualified model resource name string. - * - * @param {string} project - * @param {string} location - * @param {string} model - * @returns {string} Resource name string. - */ - modelPath(project:string,location:string,model:string) { - return this.pathTemplates.modelPathTemplate.render({ - project: project, - location: location, - model: model, - }); - } - - /** - * Parse the project from Model resource. - * - * @param {string} modelName - * A fully-qualified path representing Model resource. - * @returns {string} A string representing the project. - */ - matchProjectFromModelName(modelName: string) { - return this.pathTemplates.modelPathTemplate.match(modelName).project; - } - - /** - * Parse the location from Model resource. - * - * @param {string} modelName - * A fully-qualified path representing Model resource. - * @returns {string} A string representing the location. - */ - matchLocationFromModelName(modelName: string) { - return this.pathTemplates.modelPathTemplate.match(modelName).location; - } - - /** - * Parse the model from Model resource. - * - * @param {string} modelName - * A fully-qualified path representing Model resource. - * @returns {string} A string representing the model. - */ - matchModelFromModelName(modelName: string) { - return this.pathTemplates.modelPathTemplate.match(modelName).model; - } - - /** - * Terminate the gRPC channel and close the client. - * - * The client will no longer be usable and all future behavior is undefined. - * @returns {Promise} A promise that resolves when the client is closed. - */ - close(): Promise { - if (this.textToSpeechStub && !this._terminated) { - return this.textToSpeechStub.then(stub => { - this._terminated = true; - stub.close(); - }); - } - return Promise.resolve(); - } -} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_client_config.json b/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_client_config.json deleted file mode 100644 index 716efc6e558..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_client_config.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "interfaces": { - "google.cloud.texttospeech.v1.TextToSpeech": { - "retry_codes": { - "non_idempotent": [], - "idempotent": [ - "DEADLINE_EXCEEDED", - "UNAVAILABLE" - ] - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000 - } - }, - "methods": { - "ListVoices": { - "timeout_millis": 300000, - "retry_codes_name": "idempotent", - "retry_params_name": "default" - }, - "SynthesizeSpeech": { - "timeout_millis": 300000, - "retry_codes_name": "idempotent", - "retry_params_name": "default" - } - } - } - } -} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_proto_list.json b/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_proto_list.json deleted file mode 100644 index 58814dcb836..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_proto_list.json +++ /dev/null @@ -1,4 +0,0 @@ -[ - "../../protos/google/cloud/texttospeech/v1/cloud_tts.proto", - "../../protos/google/cloud/texttospeech/v1/cloud_tts_lrs.proto" -] diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/system-test/fixtures/sample/src/index.js b/owl-bot-staging/google-cloud-texttospeech/v1/system-test/fixtures/sample/src/index.js deleted file mode 100644 index a08648fec57..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1/system-test/fixtures/sample/src/index.js +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - -/* eslint-disable node/no-missing-require, no-unused-vars */ -const texttospeech = require('@google-cloud/text-to-speech'); - -function main() { - const textToSpeechClient = new texttospeech.TextToSpeechClient(); - const textToSpeechLongAudioSynthesizeClient = new texttospeech.TextToSpeechLongAudioSynthesizeClient(); -} - -main(); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/system-test/fixtures/sample/src/index.ts b/owl-bot-staging/google-cloud-texttospeech/v1/system-test/fixtures/sample/src/index.ts deleted file mode 100644 index 14b3691b2df..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1/system-test/fixtures/sample/src/index.ts +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import {TextToSpeechClient, TextToSpeechLongAudioSynthesizeClient} from '@google-cloud/text-to-speech'; - -// check that the client class type name can be used -function doStuffWithTextToSpeechClient(client: TextToSpeechClient) { - client.close(); -} -function doStuffWithTextToSpeechLongAudioSynthesizeClient(client: TextToSpeechLongAudioSynthesizeClient) { - client.close(); -} - -function main() { - // check that the client instance can be created - const textToSpeechClient = new TextToSpeechClient(); - doStuffWithTextToSpeechClient(textToSpeechClient); - // check that the client instance can be created - const textToSpeechLongAudioSynthesizeClient = new TextToSpeechLongAudioSynthesizeClient(); - doStuffWithTextToSpeechLongAudioSynthesizeClient(textToSpeechLongAudioSynthesizeClient); -} - -main(); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/system-test/install.ts b/owl-bot-staging/google-cloud-texttospeech/v1/system-test/install.ts deleted file mode 100644 index 557a57558e1..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1/system-test/install.ts +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import {packNTest} from 'pack-n-play'; -import {readFileSync} from 'fs'; -import {describe, it} from 'mocha'; - -describe('📦 pack-n-play test', () => { - - it('TypeScript code', async function() { - this.timeout(300000); - const options = { - packageDir: process.cwd(), - sample: { - description: 'TypeScript user can use the type definitions', - ts: readFileSync('./system-test/fixtures/sample/src/index.ts').toString() - } - }; - await packNTest(options); - }); - - it('JavaScript code', async function() { - this.timeout(300000); - const options = { - packageDir: process.cwd(), - sample: { - description: 'JavaScript user can use the library', - ts: readFileSync('./system-test/fixtures/sample/src/index.js').toString() - } - }; - await packNTest(options); - }); - -}); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/test/gapic_text_to_speech_long_audio_synthesize_v1.ts b/owl-bot-staging/google-cloud-texttospeech/v1/test/gapic_text_to_speech_long_audio_synthesize_v1.ts deleted file mode 100644 index 1cd8b48eaf5..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1/test/gapic_text_to_speech_long_audio_synthesize_v1.ts +++ /dev/null @@ -1,591 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as protos from '../protos/protos'; -import * as assert from 'assert'; -import * as sinon from 'sinon'; -import {SinonStub} from 'sinon'; -import {describe, it} from 'mocha'; -import * as texttospeechlongaudiosynthesizeModule from '../src'; - -import {protobuf, LROperation, operationsProtos} from 'google-gax'; - -// Dynamically loaded proto JSON is needed to get the type information -// to fill in default values for request objects -const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); - -// eslint-disable-next-line @typescript-eslint/no-unused-vars -function getTypeDefaultValue(typeName: string, fields: string[]) { - let type = root.lookupType(typeName) as protobuf.Type; - for (const field of fields.slice(0, -1)) { - type = type.fields[field]?.resolvedType as protobuf.Type; - } - return type.fields[fields[fields.length - 1]]?.defaultValue; -} - -function generateSampleMessage(instance: T) { - const filledObject = (instance.constructor as typeof protobuf.Message) - .toObject(instance as protobuf.Message, {defaults: true}); - return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; -} - -function stubSimpleCall(response?: ResponseType, error?: Error) { - return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); -} - -function stubLongRunningCall(response?: ResponseType, callError?: Error, lroError?: Error) { - const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); - const mockOperation = { - promise: innerStub, - }; - return callError ? sinon.stub().rejects(callError) : sinon.stub().resolves([mockOperation]); -} - -function stubLongRunningCallWithCallback(response?: ResponseType, callError?: Error, lroError?: Error) { - const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); - const mockOperation = { - promise: innerStub, - }; - return callError ? sinon.stub().callsArgWith(2, callError) : sinon.stub().callsArgWith(2, null, mockOperation); -} - -function stubAsyncIterationCall(responses?: ResponseType[], error?: Error) { - let counter = 0; - const asyncIterable = { - [Symbol.asyncIterator]() { - return { - async next() { - if (error) { - return Promise.reject(error); - } - if (counter >= responses!.length) { - return Promise.resolve({done: true, value: undefined}); - } - return Promise.resolve({done: false, value: responses![counter++]}); - } - }; - } - }; - return sinon.stub().returns(asyncIterable); -} - -describe('v1.TextToSpeechLongAudioSynthesizeClient', () => { - describe('Common methods', () => { - it('has servicePath', () => { - const servicePath = texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient.servicePath; - assert(servicePath); - }); - - it('has apiEndpoint', () => { - const apiEndpoint = texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient.apiEndpoint; - assert(apiEndpoint); - }); - - it('has port', () => { - const port = texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient.port; - assert(port); - assert(typeof port === 'number'); - }); - - it('should create a client with no option', () => { - const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient(); - assert(client); - }); - - it('should create a client with gRPC fallback', () => { - const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ - fallback: true, - }); - assert(client); - }); - - it('has initialize method and supports deferred initialization', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.textToSpeechLongAudioSynthesizeStub, undefined); - await client.initialize(); - assert(client.textToSpeechLongAudioSynthesizeStub); - }); - - it('has close method for the initialized client', done => { - const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - assert(client.textToSpeechLongAudioSynthesizeStub); - client.close().then(() => { - done(); - }); - }); - - it('has close method for the non-initialized client', done => { - const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.textToSpeechLongAudioSynthesizeStub, undefined); - client.close().then(() => { - done(); - }); - }); - - it('has getProjectId method', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); - const result = await client.getProjectId(); - assert.strictEqual(result, fakeProjectId); - assert((client.auth.getProjectId as SinonStub).calledWithExactly()); - }); - - it('has getProjectId method with callback', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); - const promise = new Promise((resolve, reject) => { - client.getProjectId((err?: Error|null, projectId?: string|null) => { - if (err) { - reject(err); - } else { - resolve(projectId); - } - }); - }); - const result = await promise; - assert.strictEqual(result, fakeProjectId); - }); - }); - - describe('synthesizeLongAudio', () => { - it('invokes synthesizeLongAudio without error', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.synthesizeLongAudio = stubLongRunningCall(expectedResponse); - const [operation] = await client.synthesizeLongAudio(request); - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.synthesizeLongAudio as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.synthesizeLongAudio as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes synthesizeLongAudio without error using callback', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.synthesizeLongAudio = stubLongRunningCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.synthesizeLongAudio( - request, - (err?: Error|null, - result?: LROperation|null - ) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const operation = await promise as LROperation; - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.synthesizeLongAudio as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.synthesizeLongAudio as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes synthesizeLongAudio with call error', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.synthesizeLongAudio = stubLongRunningCall(undefined, expectedError); - await assert.rejects(client.synthesizeLongAudio(request), expectedError); - const actualRequest = (client.innerApiCalls.synthesizeLongAudio as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.synthesizeLongAudio as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes synthesizeLongAudio with LRO error', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.synthesizeLongAudio = stubLongRunningCall(undefined, undefined, expectedError); - const [operation] = await client.synthesizeLongAudio(request); - await assert.rejects(operation.promise(), expectedError); - const actualRequest = (client.innerApiCalls.synthesizeLongAudio as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.synthesizeLongAudio as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes checkSynthesizeLongAudioProgress without error', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedResponse = generateSampleMessage( - new operationsProtos.google.longrunning.Operation() - ); - expectedResponse.name = 'test'; - expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; - expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} - - client.operationsClient.getOperation = stubSimpleCall(expectedResponse); - const decodedOperation = await client.checkSynthesizeLongAudioProgress(expectedResponse.name); - assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); - assert(decodedOperation.metadata); - assert((client.operationsClient.getOperation as SinonStub).getCall(0)); - }); - - it('invokes checkSynthesizeLongAudioProgress with error', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedError = new Error('expected'); - - client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.checkSynthesizeLongAudioProgress(''), expectedError); - assert((client.operationsClient.getOperation as SinonStub) - .getCall(0)); - }); - }); - describe('getOperation', () => { - it('invokes getOperation without error', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new operationsProtos.google.longrunning.GetOperationRequest() - ); - const expectedResponse = generateSampleMessage( - new operationsProtos.google.longrunning.Operation() - ); - client.operationsClient.getOperation = stubSimpleCall(expectedResponse); - const response = await client.getOperation(request); - assert.deepStrictEqual(response, [expectedResponse]); - assert((client.operationsClient.getOperation as SinonStub) - .getCall(0).calledWith(request) - ); - }); - it('invokes getOperation without error using callback', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - const request = generateSampleMessage( - new operationsProtos.google.longrunning.GetOperationRequest() - ); - const expectedResponse = generateSampleMessage( - new operationsProtos.google.longrunning.Operation() - ); - client.operationsClient.getOperation = sinon.stub().callsArgWith(2, null, expectedResponse); - const promise = new Promise((resolve, reject) => { - client.operationsClient.getOperation( - request, - undefined, - ( - err?: Error | null, - result?: operationsProtos.google.longrunning.Operation | null - ) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - assert((client.operationsClient.getOperation as SinonStub) - .getCall(0)); - }); - it('invokes getOperation with error', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - const request = generateSampleMessage( - new operationsProtos.google.longrunning.GetOperationRequest() - ); - const expectedError = new Error('expected'); - client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); - await assert.rejects(async () => {await client.getOperation(request)}, expectedError); - assert((client.operationsClient.getOperation as SinonStub) - .getCall(0).calledWith(request)); - }); - }); - describe('cancelOperation', () => { - it('invokes cancelOperation without error', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new operationsProtos.google.longrunning.CancelOperationRequest() - ); - const expectedResponse = generateSampleMessage( - new protos.google.protobuf.Empty() - ); - client.operationsClient.cancelOperation = stubSimpleCall(expectedResponse); - const response = await client.cancelOperation(request); - assert.deepStrictEqual(response, [expectedResponse]); - assert((client.operationsClient.cancelOperation as SinonStub) - .getCall(0).calledWith(request) - ); - }); - it('invokes cancelOperation without error using callback', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - const request = generateSampleMessage( - new operationsProtos.google.longrunning.CancelOperationRequest() - ); - const expectedResponse = generateSampleMessage( - new protos.google.protobuf.Empty() - ); - client.operationsClient.cancelOperation = sinon.stub().callsArgWith(2, null, expectedResponse); - const promise = new Promise((resolve, reject) => { - client.operationsClient.cancelOperation( - request, - undefined, - ( - err?: Error | null, - result?: protos.google.protobuf.Empty | null - ) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - assert((client.operationsClient.cancelOperation as SinonStub) - .getCall(0)); - }); - it('invokes cancelOperation with error', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - const request = generateSampleMessage( - new operationsProtos.google.longrunning.CancelOperationRequest() - ); - const expectedError = new Error('expected'); - client.operationsClient.cancelOperation = stubSimpleCall(undefined, expectedError); - await assert.rejects(async () => {await client.cancelOperation(request)}, expectedError); - assert((client.operationsClient.cancelOperation as SinonStub) - .getCall(0).calledWith(request)); - }); - }); - describe('deleteOperation', () => { - it('invokes deleteOperation without error', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new operationsProtos.google.longrunning.DeleteOperationRequest() - ); - const expectedResponse = generateSampleMessage( - new protos.google.protobuf.Empty() - ); - client.operationsClient.deleteOperation = stubSimpleCall(expectedResponse); - const response = await client.deleteOperation(request); - assert.deepStrictEqual(response, [expectedResponse]); - assert((client.operationsClient.deleteOperation as SinonStub) - .getCall(0).calledWith(request) - ); - }); - it('invokes deleteOperation without error using callback', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - const request = generateSampleMessage( - new operationsProtos.google.longrunning.DeleteOperationRequest() - ); - const expectedResponse = generateSampleMessage( - new protos.google.protobuf.Empty() - ); - client.operationsClient.deleteOperation = sinon.stub().callsArgWith(2, null, expectedResponse); - const promise = new Promise((resolve, reject) => { - client.operationsClient.deleteOperation( - request, - undefined, - ( - err?: Error | null, - result?: protos.google.protobuf.Empty | null - ) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - assert((client.operationsClient.deleteOperation as SinonStub) - .getCall(0)); - }); - it('invokes deleteOperation with error', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - const request = generateSampleMessage( - new operationsProtos.google.longrunning.DeleteOperationRequest() - ); - const expectedError = new Error('expected'); - client.operationsClient.deleteOperation = stubSimpleCall(undefined, expectedError); - await assert.rejects(async () => {await client.deleteOperation(request)}, expectedError); - assert((client.operationsClient.deleteOperation as SinonStub) - .getCall(0).calledWith(request)); - }); - }); - describe('listOperationsAsync', () => { - it('uses async iteration with listOperations without error', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - const request = generateSampleMessage( - new operationsProtos.google.longrunning.ListOperationsRequest() - ); - const expectedResponse = [ - generateSampleMessage( - new operationsProtos.google.longrunning.ListOperationsResponse() - ), - generateSampleMessage( - new operationsProtos.google.longrunning.ListOperationsResponse() - ), - generateSampleMessage( - new operationsProtos.google.longrunning.ListOperationsResponse() - ), - ]; - client.operationsClient.descriptor.listOperations.asyncIterate = stubAsyncIterationCall(expectedResponse); - const responses: operationsProtos.google.longrunning.ListOperationsResponse[] = []; - const iterable = client.operationsClient.listOperationsAsync(request); - for await (const resource of iterable) { - responses.push(resource!); - } - assert.deepStrictEqual(responses, expectedResponse); - assert.deepStrictEqual( - (client.operationsClient.descriptor.listOperations.asyncIterate as SinonStub) - .getCall(0).args[1], request); - }); - it('uses async iteration with listOperations with error', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new operationsProtos.google.longrunning.ListOperationsRequest() - ); - const expectedError = new Error('expected'); - client.operationsClient.descriptor.listOperations.asyncIterate = stubAsyncIterationCall(undefined, expectedError); - const iterable = client.operationsClient.listOperationsAsync(request); - await assert.rejects(async () => { - const responses: operationsProtos.google.longrunning.ListOperationsResponse[] = []; - for await (const resource of iterable) { - responses.push(resource!); - } - }); - assert.deepStrictEqual( - (client.operationsClient.descriptor.listOperations.asyncIterate as SinonStub) - .getCall(0).args[1], request); - }); - }); -}); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/test/gapic_text_to_speech_v1.ts b/owl-bot-staging/google-cloud-texttospeech/v1/test/gapic_text_to_speech_v1.ts deleted file mode 100644 index c74edffd0ec..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1/test/gapic_text_to_speech_v1.ts +++ /dev/null @@ -1,349 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as protos from '../protos/protos'; -import * as assert from 'assert'; -import * as sinon from 'sinon'; -import {SinonStub} from 'sinon'; -import {describe, it} from 'mocha'; -import * as texttospeechModule from '../src'; - -import {protobuf} from 'google-gax'; - -// Dynamically loaded proto JSON is needed to get the type information -// to fill in default values for request objects -const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); - -// eslint-disable-next-line @typescript-eslint/no-unused-vars -function getTypeDefaultValue(typeName: string, fields: string[]) { - let type = root.lookupType(typeName) as protobuf.Type; - for (const field of fields.slice(0, -1)) { - type = type.fields[field]?.resolvedType as protobuf.Type; - } - return type.fields[fields[fields.length - 1]]?.defaultValue; -} - -function generateSampleMessage(instance: T) { - const filledObject = (instance.constructor as typeof protobuf.Message) - .toObject(instance as protobuf.Message, {defaults: true}); - return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; -} - -function stubSimpleCall(response?: ResponseType, error?: Error) { - return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); -} - -function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { - return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); -} - -describe('v1.TextToSpeechClient', () => { - describe('Common methods', () => { - it('has servicePath', () => { - const servicePath = texttospeechModule.v1.TextToSpeechClient.servicePath; - assert(servicePath); - }); - - it('has apiEndpoint', () => { - const apiEndpoint = texttospeechModule.v1.TextToSpeechClient.apiEndpoint; - assert(apiEndpoint); - }); - - it('has port', () => { - const port = texttospeechModule.v1.TextToSpeechClient.port; - assert(port); - assert(typeof port === 'number'); - }); - - it('should create a client with no option', () => { - const client = new texttospeechModule.v1.TextToSpeechClient(); - assert(client); - }); - - it('should create a client with gRPC fallback', () => { - const client = new texttospeechModule.v1.TextToSpeechClient({ - fallback: true, - }); - assert(client); - }); - - it('has initialize method and supports deferred initialization', async () => { - const client = new texttospeechModule.v1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.textToSpeechStub, undefined); - await client.initialize(); - assert(client.textToSpeechStub); - }); - - it('has close method for the initialized client', done => { - const client = new texttospeechModule.v1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - assert(client.textToSpeechStub); - client.close().then(() => { - done(); - }); - }); - - it('has close method for the non-initialized client', done => { - const client = new texttospeechModule.v1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.textToSpeechStub, undefined); - client.close().then(() => { - done(); - }); - }); - - it('has getProjectId method', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new texttospeechModule.v1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); - const result = await client.getProjectId(); - assert.strictEqual(result, fakeProjectId); - assert((client.auth.getProjectId as SinonStub).calledWithExactly()); - }); - - it('has getProjectId method with callback', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new texttospeechModule.v1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); - const promise = new Promise((resolve, reject) => { - client.getProjectId((err?: Error|null, projectId?: string|null) => { - if (err) { - reject(err); - } else { - resolve(projectId); - } - }); - }); - const result = await promise; - assert.strictEqual(result, fakeProjectId); - }); - }); - - describe('listVoices', () => { - it('invokes listVoices without error', async () => { - const client = new texttospeechModule.v1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.texttospeech.v1.ListVoicesRequest() - ); - const expectedResponse = generateSampleMessage( - new protos.google.cloud.texttospeech.v1.ListVoicesResponse() - ); - client.innerApiCalls.listVoices = stubSimpleCall(expectedResponse); - const [response] = await client.listVoices(request); - assert.deepStrictEqual(response, expectedResponse); - }); - - it('invokes listVoices without error using callback', async () => { - const client = new texttospeechModule.v1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.texttospeech.v1.ListVoicesRequest() - ); - const expectedResponse = generateSampleMessage( - new protos.google.cloud.texttospeech.v1.ListVoicesResponse() - ); - client.innerApiCalls.listVoices = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.listVoices( - request, - (err?: Error|null, result?: protos.google.cloud.texttospeech.v1.IListVoicesResponse|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - }); - - it('invokes listVoices with error', async () => { - const client = new texttospeechModule.v1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.texttospeech.v1.ListVoicesRequest() - ); - const expectedError = new Error('expected'); - client.innerApiCalls.listVoices = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.listVoices(request), expectedError); - }); - - it('invokes listVoices with closed client', async () => { - const client = new texttospeechModule.v1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.texttospeech.v1.ListVoicesRequest() - ); - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.listVoices(request), expectedError); - }); - }); - - describe('synthesizeSpeech', () => { - it('invokes synthesizeSpeech without error', async () => { - const client = new texttospeechModule.v1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.texttospeech.v1.SynthesizeSpeechRequest() - ); - const expectedResponse = generateSampleMessage( - new protos.google.cloud.texttospeech.v1.SynthesizeSpeechResponse() - ); - client.innerApiCalls.synthesizeSpeech = stubSimpleCall(expectedResponse); - const [response] = await client.synthesizeSpeech(request); - assert.deepStrictEqual(response, expectedResponse); - }); - - it('invokes synthesizeSpeech without error using callback', async () => { - const client = new texttospeechModule.v1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.texttospeech.v1.SynthesizeSpeechRequest() - ); - const expectedResponse = generateSampleMessage( - new protos.google.cloud.texttospeech.v1.SynthesizeSpeechResponse() - ); - client.innerApiCalls.synthesizeSpeech = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.synthesizeSpeech( - request, - (err?: Error|null, result?: protos.google.cloud.texttospeech.v1.ISynthesizeSpeechResponse|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - }); - - it('invokes synthesizeSpeech with error', async () => { - const client = new texttospeechModule.v1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.texttospeech.v1.SynthesizeSpeechRequest() - ); - const expectedError = new Error('expected'); - client.innerApiCalls.synthesizeSpeech = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.synthesizeSpeech(request), expectedError); - }); - - it('invokes synthesizeSpeech with closed client', async () => { - const client = new texttospeechModule.v1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.texttospeech.v1.SynthesizeSpeechRequest() - ); - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.synthesizeSpeech(request), expectedError); - }); - }); - - describe('Path templates', () => { - - describe('model', () => { - const fakePath = "/rendered/path/model"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - model: "modelValue", - }; - const client = new texttospeechModule.v1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.modelPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.modelPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('modelPath', () => { - const result = client.modelPath("projectValue", "locationValue", "modelValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.modelPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromModelName', () => { - const result = client.matchProjectFromModelName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.modelPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromModelName', () => { - const result = client.matchLocationFromModelName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.modelPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchModelFromModelName', () => { - const result = client.matchModelFromModelName(fakePath); - assert.strictEqual(result, "modelValue"); - assert((client.pathTemplates.modelPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - }); -}); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/tsconfig.json b/owl-bot-staging/google-cloud-texttospeech/v1/tsconfig.json deleted file mode 100644 index c78f1c884ef..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1/tsconfig.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "extends": "./node_modules/gts/tsconfig-google.json", - "compilerOptions": { - "rootDir": ".", - "outDir": "build", - "resolveJsonModule": true, - "lib": [ - "es2018", - "dom" - ] - }, - "include": [ - "src/*.ts", - "src/**/*.ts", - "test/*.ts", - "test/**/*.ts", - "system-test/*.ts" - ] -} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/webpack.config.js b/owl-bot-staging/google-cloud-texttospeech/v1/webpack.config.js deleted file mode 100644 index 25f059a0979..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1/webpack.config.js +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -const path = require('path'); - -module.exports = { - entry: './src/index.ts', - output: { - library: 'TextToSpeech', - filename: './text-to-speech.js', - }, - node: { - child_process: 'empty', - fs: 'empty', - crypto: 'empty', - }, - resolve: { - alias: { - '../../../package.json': path.resolve(__dirname, 'package.json'), - }, - extensions: ['.js', '.json', '.ts'], - }, - module: { - rules: [ - { - test: /\.tsx?$/, - use: 'ts-loader', - exclude: /node_modules/ - }, - { - test: /node_modules[\\/]@grpc[\\/]grpc-js/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]grpc/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]retry-request/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]https?-proxy-agent/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]gtoken/, - use: 'null-loader' - }, - ], - }, - mode: 'production', -}; diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/.eslintignore b/owl-bot-staging/google-cloud-texttospeech/v1beta1/.eslintignore deleted file mode 100644 index cfc348ec4d1..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/.eslintignore +++ /dev/null @@ -1,7 +0,0 @@ -**/node_modules -**/.coverage -build/ -docs/ -protos/ -system-test/ -samples/generated/ diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/.eslintrc.json b/owl-bot-staging/google-cloud-texttospeech/v1beta1/.eslintrc.json deleted file mode 100644 index 78215349546..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/.eslintrc.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "extends": "./node_modules/gts" -} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/.gitignore b/owl-bot-staging/google-cloud-texttospeech/v1beta1/.gitignore deleted file mode 100644 index 5d32b23782f..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -**/*.log -**/node_modules -.coverage -coverage -.nyc_output -docs/ -out/ -build/ -system-test/secrets.js -system-test/*key.json -*.lock -.DS_Store -package-lock.json -__pycache__ diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/.jsdoc.js b/owl-bot-staging/google-cloud-texttospeech/v1beta1/.jsdoc.js deleted file mode 100644 index 929b3c59840..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/.jsdoc.js +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -'use strict'; - -module.exports = { - opts: { - readme: './README.md', - package: './package.json', - template: './node_modules/jsdoc-fresh', - recurse: true, - verbose: true, - destination: './docs/' - }, - plugins: [ - 'plugins/markdown', - 'jsdoc-region-tag' - ], - source: { - excludePattern: '(^|\\/|\\\\)[._]', - include: [ - 'build/src', - 'protos' - ], - includePattern: '\\.js$' - }, - templates: { - copyright: 'Copyright 2022 Google LLC', - includeDate: false, - sourceFiles: false, - systemName: '@google-cloud/text-to-speech', - theme: 'lumen', - default: { - outputSourceFiles: false - } - }, - markdown: { - idInHeadings: true - } -}; diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/.mocharc.js b/owl-bot-staging/google-cloud-texttospeech/v1beta1/.mocharc.js deleted file mode 100644 index 481c522b00f..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/.mocharc.js +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -const config = { - "enable-source-maps": true, - "throw-deprecation": true, - "timeout": 10000 -} -if (process.env.MOCHA_THROW_DEPRECATION === 'false') { - delete config['throw-deprecation']; -} -if (process.env.MOCHA_REPORTER) { - config.reporter = process.env.MOCHA_REPORTER; -} -if (process.env.MOCHA_REPORTER_OUTPUT) { - config['reporter-option'] = `output=${process.env.MOCHA_REPORTER_OUTPUT}`; -} -module.exports = config diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/.prettierrc.js b/owl-bot-staging/google-cloud-texttospeech/v1beta1/.prettierrc.js deleted file mode 100644 index 494e147865d..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/.prettierrc.js +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - -module.exports = { - ...require('gts/.prettierrc.json') -} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/README.md b/owl-bot-staging/google-cloud-texttospeech/v1beta1/README.md deleted file mode 100644 index 3eaadd6a1c8..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/README.md +++ /dev/null @@ -1 +0,0 @@ -Texttospeech: Nodejs Client diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/linkinator.config.json b/owl-bot-staging/google-cloud-texttospeech/v1beta1/linkinator.config.json deleted file mode 100644 index befd23c8633..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/linkinator.config.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "recurse": true, - "skip": [ - "https://codecov.io/gh/googleapis/", - "www.googleapis.com", - "img.shields.io", - "https://console.cloud.google.com/cloudshell", - "https://support.google.com" - ], - "silent": true, - "concurrency": 5, - "retry": true, - "retryErrors": true, - "retryErrorsCount": 5, - "retryErrorsJitter": 3000 -} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/package.json b/owl-bot-staging/google-cloud-texttospeech/v1beta1/package.json deleted file mode 100644 index e0bd4130ffb..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/package.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "name": "@google-cloud/text-to-speech", - "version": "0.1.0", - "description": "Texttospeech client for Node.js", - "repository": "googleapis/nodejs-texttospeech", - "license": "Apache-2.0", - "author": "Google LLC", - "main": "build/src/index.js", - "files": [ - "build/src", - "build/protos" - ], - "keywords": [ - "google apis client", - "google api client", - "google apis", - "google api", - "google", - "google cloud platform", - "google cloud", - "cloud", - "google texttospeech", - "texttospeech", - "text to speech", - "text to speech long audio synthesize" - ], - "scripts": { - "clean": "gts clean", - "compile": "tsc -p . && cp -r protos build/ && minifyProtoJson", - "compile-protos": "compileProtos src", - "docs": "jsdoc -c .jsdoc.js", - "predocs-test": "npm run docs", - "docs-test": "linkinator docs", - "fix": "gts fix", - "lint": "gts check", - "prepare": "npm run compile-protos && npm run compile", - "system-test": "c8 mocha build/system-test", - "test": "c8 mocha build/test" - }, - "dependencies": { - "google-gax": "^3.5.2" - }, - "devDependencies": { - "@types/mocha": "^9.1.1", - "@types/node": "^16.11.62", - "@types/sinon": "^10.0.13", - "c8": "^7.12.0", - "gts": "^3.1.1", - "jsdoc": "^3.6.11", - "jsdoc-fresh": "^2.0.1", - "jsdoc-region-tag": "^2.0.1", - "linkinator": "^4.0.3", - "mocha": "^10.0.0", - "null-loader": "^4.0.1", - "pack-n-play": "^1.0.0-2", - "sinon": "^14.0.0", - "ts-loader": "^8.4.0", - "typescript": "^4.8.3", - "webpack": "^4.46.0", - "webpack-cli": "^4.10.0" - }, - "engines": { - "node": ">=v12" - } -} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/protos/google/cloud/texttospeech/v1beta1/cloud_tts.proto b/owl-bot-staging/google-cloud-texttospeech/v1beta1/protos/google/cloud/texttospeech/v1beta1/cloud_tts.proto deleted file mode 100644 index 63e5361dc0a..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/protos/google/cloud/texttospeech/v1beta1/cloud_tts.proto +++ /dev/null @@ -1,334 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.texttospeech.v1beta1; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; - -option cc_enable_arenas = true; -option csharp_namespace = "Google.Cloud.TextToSpeech.V1Beta1"; -option go_package = "google.golang.org/genproto/googleapis/cloud/texttospeech/v1beta1;texttospeech"; -option java_multiple_files = true; -option java_outer_classname = "TextToSpeechProto"; -option java_package = "com.google.cloud.texttospeech.v1beta1"; -option php_namespace = "Google\\Cloud\\TextToSpeech\\V1beta1"; -option ruby_package = "Google::Cloud::TextToSpeech::V1beta1"; -option (google.api.resource_definition) = { - type: "automl.googleapis.com/Model" - pattern: "projects/{project}/locations/{location}/models/{model}" -}; - -// Service that implements Google Cloud Text-to-Speech API. -service TextToSpeech { - option (google.api.default_host) = "texttospeech.googleapis.com"; - option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; - - // Returns a list of Voice supported for synthesis. - rpc ListVoices(ListVoicesRequest) returns (ListVoicesResponse) { - option (google.api.http) = { - get: "/v1beta1/voices" - }; - option (google.api.method_signature) = "language_code"; - } - - // Synthesizes speech synchronously: receive results after all text input - // has been processed. - rpc SynthesizeSpeech(SynthesizeSpeechRequest) returns (SynthesizeSpeechResponse) { - option (google.api.http) = { - post: "/v1beta1/text:synthesize" - body: "*" - }; - option (google.api.method_signature) = "input,voice,audio_config"; - } -} - -// Gender of the voice as described in -// [SSML voice element](https://www.w3.org/TR/speech-synthesis11/#edef_voice). -enum SsmlVoiceGender { - // An unspecified gender. - // In VoiceSelectionParams, this means that the client doesn't care which - // gender the selected voice will have. In the Voice field of - // ListVoicesResponse, this may mean that the voice doesn't fit any of the - // other categories in this enum, or that the gender of the voice isn't known. - SSML_VOICE_GENDER_UNSPECIFIED = 0; - - // A male voice. - MALE = 1; - - // A female voice. - FEMALE = 2; - - // A gender-neutral voice. This voice is not yet supported. - NEUTRAL = 3; -} - -// Configuration to set up audio encoder. The encoding determines the output -// audio format that we'd like. -enum AudioEncoding { - // Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. - AUDIO_ENCODING_UNSPECIFIED = 0; - - // Uncompressed 16-bit signed little-endian samples (Linear PCM). - // Audio content returned as LINEAR16 also contains a WAV header. - LINEAR16 = 1; - - // MP3 audio at 32kbps. - MP3 = 2; - - // MP3 at 64kbps. - MP3_64_KBPS = 4; - - // Opus encoded audio wrapped in an ogg container. The result will be a - // file which can be played natively on Android, and in browsers (at least - // Chrome and Firefox). The quality of the encoding is considerably higher - // than MP3 while using approximately the same bitrate. - OGG_OPUS = 3; - - // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law. - // Audio content returned as MULAW also contains a WAV header. - MULAW = 5; - - // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/A-law. - // Audio content returned as ALAW also contains a WAV header. - ALAW = 6; -} - -// The top-level message sent by the client for the `ListVoices` method. -message ListVoicesRequest { - // Optional. Recommended. - // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. - // If not specified, the API will return all supported voices. - // If specified, the ListVoices call will only return voices that can be used - // to synthesize this language_code. For example, if you specify `"en-NZ"`, - // all `"en-NZ"` voices will be returned. If you specify `"no"`, both - // `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices will be - // returned. - string language_code = 1 [(google.api.field_behavior) = OPTIONAL]; -} - -// The message returned to the client by the `ListVoices` method. -message ListVoicesResponse { - // The list of voices. - repeated Voice voices = 1; -} - -// Description of a voice supported by the TTS service. -message Voice { - // The languages that this voice supports, expressed as - // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g. - // "en-US", "es-419", "cmn-tw"). - repeated string language_codes = 1; - - // The name of this voice. Each distinct voice has a unique name. - string name = 2; - - // The gender of this voice. - SsmlVoiceGender ssml_gender = 3; - - // The natural sample rate (in hertz) for this voice. - int32 natural_sample_rate_hertz = 4; -} - -// The top-level message sent by the client for the `SynthesizeSpeech` method. -message SynthesizeSpeechRequest { - // The type of timepoint information that is returned in the response. - enum TimepointType { - // Not specified. No timepoint information will be returned. - TIMEPOINT_TYPE_UNSPECIFIED = 0; - - // Timepoint information of `` tags in SSML input will be returned. - SSML_MARK = 1; - } - - // Required. The Synthesizer requires either plain text or SSML as input. - SynthesisInput input = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The desired voice of the synthesized audio. - VoiceSelectionParams voice = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. The configuration of the synthesized audio. - AudioConfig audio_config = 3 [(google.api.field_behavior) = REQUIRED]; - - // Whether and what timepoints are returned in the response. - repeated TimepointType enable_time_pointing = 4; -} - -// Contains text input to be synthesized. Either `text` or `ssml` must be -// supplied. Supplying both or neither returns -// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. The input size is limited to 5000 -// bytes. -message SynthesisInput { - // The input source, which is either plain text or SSML. - oneof input_source { - // The raw text to be synthesized. - string text = 1; - - // The SSML document to be synthesized. The SSML document must be valid - // and well-formed. Otherwise the RPC will fail and return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. For more information, see - // [SSML](https://cloud.google.com/text-to-speech/docs/ssml). - string ssml = 2; - } -} - -// Description of which voice to use for a synthesis request. -message VoiceSelectionParams { - // Required. The language (and potentially also the region) of the voice expressed as a - // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g. - // "en-US". This should not include a script tag (e.g. use - // "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred - // from the input provided in the SynthesisInput. The TTS service - // will use this parameter to help choose an appropriate voice. Note that - // the TTS service may choose a voice with a slightly different language code - // than the one selected; it may substitute a different region - // (e.g. using en-US rather than en-CA if there isn't a Canadian voice - // available), or even a different language, e.g. using "nb" (Norwegian - // Bokmal) instead of "no" (Norwegian)". - string language_code = 1 [(google.api.field_behavior) = REQUIRED]; - - // The name of the voice. If not set, the service will choose a - // voice based on the other parameters such as language_code and gender. - string name = 2; - - // The preferred gender of the voice. If not set, the service will - // choose a voice based on the other parameters such as language_code and - // name. Note that this is only a preference, not requirement; if a - // voice of the appropriate gender is not available, the synthesizer should - // substitute a voice with a different gender rather than failing the request. - SsmlVoiceGender ssml_gender = 3; - - // The configuration for a custom voice. If [CustomVoiceParams.model] is set, - // the service will choose the custom voice matching the specified - // configuration. - CustomVoiceParams custom_voice = 4; -} - -// Description of audio data to be synthesized. -message AudioConfig { - // Required. The format of the audio byte stream. - AudioEncoding audio_encoding = 1 [(google.api.field_behavior) = REQUIRED]; - - // Optional. Input only. Speaking rate/speed, in the range [0.25, 4.0]. 1.0 is - // the normal native speed supported by the specific voice. 2.0 is twice as - // fast, and 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 - // speed. Any other values < 0.25 or > 4.0 will return an error. - double speaking_rate = 2 [ - (google.api.field_behavior) = INPUT_ONLY, - (google.api.field_behavior) = OPTIONAL - ]; - - // Optional. Input only. Speaking pitch, in the range [-20.0, 20.0]. 20 means - // increase 20 semitones from the original pitch. -20 means decrease 20 - // semitones from the original pitch. - double pitch = 3 [ - (google.api.field_behavior) = INPUT_ONLY, - (google.api.field_behavior) = OPTIONAL - ]; - - // Optional. Input only. Volume gain (in dB) of the normal native volume - // supported by the specific voice, in the range [-96.0, 16.0]. If unset, or - // set to a value of 0.0 (dB), will play at normal native signal amplitude. A - // value of -6.0 (dB) will play at approximately half the amplitude of the - // normal native signal amplitude. A value of +6.0 (dB) will play at - // approximately twice the amplitude of the normal native signal amplitude. - // Strongly recommend not to exceed +10 (dB) as there's usually no effective - // increase in loudness for any value greater than that. - double volume_gain_db = 4 [ - (google.api.field_behavior) = INPUT_ONLY, - (google.api.field_behavior) = OPTIONAL - ]; - - // Optional. The synthesis sample rate (in hertz) for this audio. When this is - // specified in SynthesizeSpeechRequest, if this is different from the voice's - // natural sample rate, then the synthesizer will honor this request by - // converting to the desired sample rate (which might result in worse audio - // quality), unless the specified sample rate is not supported for the - // encoding chosen, in which case it will fail the request and return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. - int32 sample_rate_hertz = 5 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Input only. An identifier which selects 'audio effects' profiles - // that are applied on (post synthesized) text to speech. Effects are applied - // on top of each other in the order they are given. See - // [audio - // profiles](https://cloud.google.com/text-to-speech/docs/audio-profiles) for - // current supported profile ids. - repeated string effects_profile_id = 6 [ - (google.api.field_behavior) = INPUT_ONLY, - (google.api.field_behavior) = OPTIONAL - ]; -} - -// Description of the custom voice to be synthesized. -message CustomVoiceParams { - // The usage of the synthesized audio. You must report your honest and - // correct usage of the service as it's regulated by contract and will cause - // significant difference in billing. - enum ReportedUsage { - // Request with reported usage unspecified will be rejected. - REPORTED_USAGE_UNSPECIFIED = 0; - - // For scenarios where the synthesized audio is not downloadable and can - // only be used once. For example, real-time request in IVR system. - REALTIME = 1; - - // For scenarios where the synthesized audio is downloadable and can be - // reused. For example, the synthesized audio is downloaded, stored in - // customer service system and played repeatedly. - OFFLINE = 2; - } - - // Required. The name of the AutoML model that synthesizes the custom voice. - string model = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "automl.googleapis.com/Model" - } - ]; - - // Optional. The usage of the synthesized audio to be reported. - ReportedUsage reported_usage = 3 [(google.api.field_behavior) = OPTIONAL]; -} - -// The message returned to the client by the `SynthesizeSpeech` method. -message SynthesizeSpeechResponse { - // The audio data bytes encoded as specified in the request, including the - // header for encodings that are wrapped in containers (e.g. MP3, OGG_OPUS). - // For LINEAR16 audio, we include the WAV header. Note: as - // with all bytes fields, protobuffers use a pure binary representation, - // whereas JSON representations use base64. - bytes audio_content = 1; - - // A link between a position in the original request input and a corresponding - // time in the output audio. It's only supported via `` of SSML input. - repeated Timepoint timepoints = 2; - - // The audio metadata of `audio_content`. - AudioConfig audio_config = 4; -} - -// This contains a mapping between a certain point in the input text and a -// corresponding time in the output audio. -message Timepoint { - // Timepoint name as received from the client within `` tag. - string mark_name = 4; - - // Time offset in seconds from the start of the synthesized audio. - double time_seconds = 3; -} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/protos/google/cloud/texttospeech/v1beta1/cloud_tts_lrs.proto b/owl-bot-staging/google-cloud-texttospeech/v1beta1/protos/google/cloud/texttospeech/v1beta1/cloud_tts_lrs.proto deleted file mode 100644 index 9adcd6ae600..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/protos/google/cloud/texttospeech/v1beta1/cloud_tts_lrs.proto +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.texttospeech.v1beta1; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/cloud/texttospeech/v1beta1/cloud_tts.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/timestamp.proto"; - -option cc_enable_arenas = true; -option csharp_namespace = "Google.Cloud.TextToSpeech.V1Beta1"; -option go_package = "google.golang.org/genproto/googleapis/cloud/texttospeech/v1beta1;texttospeech"; -option java_multiple_files = true; -option java_outer_classname = "TextToSpeechLongAudioSynthesisProto"; -option java_package = "com.google.cloud.texttospeech.v1beta1"; -option php_namespace = "Google\\Cloud\\TextToSpeech\\V1beta1"; -option ruby_package = "Google::Cloud::TextToSpeech::V1beta1"; - -// Service that implements Google Cloud Text-to-Speech API. -service TextToSpeechLongAudioSynthesize { - option (google.api.default_host) = "texttospeech.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform"; - - // Synthesizes long form text asynchronously. - rpc SynthesizeLongAudio(SynthesizeLongAudioRequest) - returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1beta1/{parent=projects/*/locations/*/voices/*}:SynthesizeLongAudio" - body: "*" - }; - option (google.longrunning.operation_info) = { - response_type: "SynthesizeLongAudioResponse" - metadata_type: "SynthesizeLongAudioMetadata" - }; - } -} - -// The top-level message sent by the client for the -// `SynthesizeLongAudio` method. -message SynthesizeLongAudioRequest { - // The resource states of the request in the form of - // projects/*/locations/*/voices/*. - string parent = 1; - - // Required. The Synthesizer requires either plain text or SSML as input. - SynthesisInput input = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. The configuration of the synthesized audio. - AudioConfig audio_config = 3 [(google.api.field_behavior) = REQUIRED]; - - // Specifies a Cloud Storage URI for the synthesis results. Must be - // specified in the format: `gs://bucket_name/object_name`, and the bucket - // must already exist. - string output_gcs_uri = 4; - - // The desired voice of the synthesized audio. - VoiceSelectionParams voice = 5; -} - -// The message returned to the client by the `SynthesizeLongAudio` method. -message SynthesizeLongAudioResponse {} - -// Metadata for response returned by the `SynthesizeLongAudio` method. -message SynthesizeLongAudioMetadata { - // Time when the request was received. - google.protobuf.Timestamp start_time = 1; - - // Time of the most recent processing update. - google.protobuf.Timestamp last_update_time = 2; - - // The progress of the most recent processing update in percentage, ie. 70.0%. - double progress_percentage = 3; -} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/snippet_metadata.google.cloud.texttospeech.v1beta1.json b/owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/snippet_metadata.google.cloud.texttospeech.v1beta1.json deleted file mode 100644 index 496c28f83c0..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/snippet_metadata.google.cloud.texttospeech.v1beta1.json +++ /dev/null @@ -1,163 +0,0 @@ -{ - "clientLibrary": { - "name": "nodejs-texttospeech", - "version": "0.1.0", - "language": "TYPESCRIPT", - "apis": [ - { - "id": "google.cloud.texttospeech.v1beta1", - "version": "v1beta1" - } - ] - }, - "snippets": [ - { - "regionTag": "texttospeech_v1beta1_generated_TextToSpeech_ListVoices_async", - "title": "TextToSpeech listVoices Sample", - "origin": "API_DEFINITION", - "description": " Returns a list of Voice supported for synthesis.", - "canonical": true, - "file": "text_to_speech.list_voices.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 59, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "ListVoices", - "fullName": "google.cloud.texttospeech.v1beta1.TextToSpeech.ListVoices", - "async": true, - "parameters": [ - { - "name": "language_code", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.cloud.texttospeech.v1beta1.ListVoicesResponse", - "client": { - "shortName": "TextToSpeechClient", - "fullName": "google.cloud.texttospeech.v1beta1.TextToSpeechClient" - }, - "method": { - "shortName": "ListVoices", - "fullName": "google.cloud.texttospeech.v1beta1.TextToSpeech.ListVoices", - "service": { - "shortName": "TextToSpeech", - "fullName": "google.cloud.texttospeech.v1beta1.TextToSpeech" - } - } - } - }, - { - "regionTag": "texttospeech_v1beta1_generated_TextToSpeech_SynthesizeSpeech_async", - "title": "TextToSpeech synthesizeSpeech Sample", - "origin": "API_DEFINITION", - "description": " Synthesizes speech synchronously: receive results after all text input has been processed.", - "canonical": true, - "file": "text_to_speech.synthesize_speech.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 67, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "SynthesizeSpeech", - "fullName": "google.cloud.texttospeech.v1beta1.TextToSpeech.SynthesizeSpeech", - "async": true, - "parameters": [ - { - "name": "input", - "type": ".google.cloud.texttospeech.v1beta1.SynthesisInput" - }, - { - "name": "voice", - "type": ".google.cloud.texttospeech.v1beta1.VoiceSelectionParams" - }, - { - "name": "audio_config", - "type": ".google.cloud.texttospeech.v1beta1.AudioConfig" - }, - { - "name": "enable_time_pointing", - "type": "TYPE_ENUM[]" - } - ], - "resultType": ".google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse", - "client": { - "shortName": "TextToSpeechClient", - "fullName": "google.cloud.texttospeech.v1beta1.TextToSpeechClient" - }, - "method": { - "shortName": "SynthesizeSpeech", - "fullName": "google.cloud.texttospeech.v1beta1.TextToSpeech.SynthesizeSpeech", - "service": { - "shortName": "TextToSpeech", - "fullName": "google.cloud.texttospeech.v1beta1.TextToSpeech" - } - } - } - }, - { - "regionTag": "texttospeech_v1beta1_generated_TextToSpeechLongAudioSynthesize_SynthesizeLongAudio_async", - "title": "TextToSpeech synthesizeLongAudio Sample", - "origin": "API_DEFINITION", - "description": " Synthesizes long form text asynchronously.", - "canonical": true, - "file": "text_to_speech_long_audio_synthesize.synthesize_long_audio.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 74, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "SynthesizeLongAudio", - "fullName": "google.cloud.texttospeech.v1beta1.TextToSpeechLongAudioSynthesize.SynthesizeLongAudio", - "async": true, - "parameters": [ - { - "name": "parent", - "type": "TYPE_STRING" - }, - { - "name": "input", - "type": ".google.cloud.texttospeech.v1beta1.SynthesisInput" - }, - { - "name": "audio_config", - "type": ".google.cloud.texttospeech.v1beta1.AudioConfig" - }, - { - "name": "output_gcs_uri", - "type": "TYPE_STRING" - }, - { - "name": "voice", - "type": ".google.cloud.texttospeech.v1beta1.VoiceSelectionParams" - } - ], - "resultType": ".google.longrunning.Operation", - "client": { - "shortName": "TextToSpeechLongAudioSynthesizeClient", - "fullName": "google.cloud.texttospeech.v1beta1.TextToSpeechLongAudioSynthesizeClient" - }, - "method": { - "shortName": "SynthesizeLongAudio", - "fullName": "google.cloud.texttospeech.v1beta1.TextToSpeechLongAudioSynthesize.SynthesizeLongAudio", - "service": { - "shortName": "TextToSpeechLongAudioSynthesize", - "fullName": "google.cloud.texttospeech.v1beta1.TextToSpeechLongAudioSynthesize" - } - } - } - } - ] -} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/text_to_speech.list_voices.js b/owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/text_to_speech.list_voices.js deleted file mode 100644 index 3410c0eddfc..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/text_to_speech.list_voices.js +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main() { - // [START texttospeech_v1beta1_generated_TextToSpeech_ListVoices_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Optional. Recommended. - * BCP-47 (https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. - * If not specified, the API will return all supported voices. - * If specified, the ListVoices call will only return voices that can be used - * to synthesize this language_code. For example, if you specify `"en-NZ"`, - * all `"en-NZ"` voices will be returned. If you specify `"no"`, both - * `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices will be - * returned. - */ - // const languageCode = 'abc123' - - // Imports the Texttospeech library - const {TextToSpeechClient} = require('@google-cloud/text-to-speech').v1beta1; - - // Instantiates a client - const texttospeechClient = new TextToSpeechClient(); - - async function callListVoices() { - // Construct request - const request = { - }; - - // Run request - const response = await texttospeechClient.listVoices(request); - console.log(response); - } - - callListVoices(); - // [END texttospeech_v1beta1_generated_TextToSpeech_ListVoices_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/text_to_speech.synthesize_speech.js b/owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/text_to_speech.synthesize_speech.js deleted file mode 100644 index 5857dd63dde..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/text_to_speech.synthesize_speech.js +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(input, voice, audioConfig) { - // [START texttospeech_v1beta1_generated_TextToSpeech_SynthesizeSpeech_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The Synthesizer requires either plain text or SSML as input. - */ - // const input = {} - /** - * Required. The desired voice of the synthesized audio. - */ - // const voice = {} - /** - * Required. The configuration of the synthesized audio. - */ - // const audioConfig = {} - /** - * Whether and what timepoints are returned in the response. - */ - // const enableTimePointing = 1234 - - // Imports the Texttospeech library - const {TextToSpeechClient} = require('@google-cloud/text-to-speech').v1beta1; - - // Instantiates a client - const texttospeechClient = new TextToSpeechClient(); - - async function callSynthesizeSpeech() { - // Construct request - const request = { - input, - voice, - audioConfig, - }; - - // Run request - const response = await texttospeechClient.synthesizeSpeech(request); - console.log(response); - } - - callSynthesizeSpeech(); - // [END texttospeech_v1beta1_generated_TextToSpeech_SynthesizeSpeech_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js b/owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js deleted file mode 100644 index 6c71d6fdceb..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/samples/generated/v1beta1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(input, audioConfig) { - // [START texttospeech_v1beta1_generated_TextToSpeechLongAudioSynthesize_SynthesizeLongAudio_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * The resource states of the request in the form of - * projects/* /locations/* /voices/*. - */ - // const parent = 'abc123' - /** - * Required. The Synthesizer requires either plain text or SSML as input. - */ - // const input = {} - /** - * Required. The configuration of the synthesized audio. - */ - // const audioConfig = {} - /** - * Specifies a Cloud Storage URI for the synthesis results. Must be - * specified in the format: `gs://bucket_name/object_name`, and the bucket - * must already exist. - */ - // const outputGcsUri = 'abc123' - /** - * The desired voice of the synthesized audio. - */ - // const voice = {} - - // Imports the Texttospeech library - const {TextToSpeechLongAudioSynthesizeClient} = require('@google-cloud/text-to-speech').v1beta1; - - // Instantiates a client - const texttospeechClient = new TextToSpeechLongAudioSynthesizeClient(); - - async function callSynthesizeLongAudio() { - // Construct request - const request = { - input, - audioConfig, - }; - - // Run request - const [operation] = await texttospeechClient.synthesizeLongAudio(request); - const [response] = await operation.promise(); - console.log(response); - } - - callSynthesizeLongAudio(); - // [END texttospeech_v1beta1_generated_TextToSpeechLongAudioSynthesize_SynthesizeLongAudio_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/index.ts b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/index.ts deleted file mode 100644 index c84882a6a53..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/index.ts +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as v1beta1 from './v1beta1'; -const TextToSpeechClient = v1beta1.TextToSpeechClient; -type TextToSpeechClient = v1beta1.TextToSpeechClient; -const TextToSpeechLongAudioSynthesizeClient = v1beta1.TextToSpeechLongAudioSynthesizeClient; -type TextToSpeechLongAudioSynthesizeClient = v1beta1.TextToSpeechLongAudioSynthesizeClient; -export {v1beta1, TextToSpeechClient, TextToSpeechLongAudioSynthesizeClient}; -export default {v1beta1, TextToSpeechClient, TextToSpeechLongAudioSynthesizeClient}; -import * as protos from '../protos/protos'; -export {protos} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/gapic_metadata.json b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/gapic_metadata.json deleted file mode 100644 index 9e8fec144a6..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/gapic_metadata.json +++ /dev/null @@ -1,67 +0,0 @@ -{ - "schema": "1.0", - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "typescript", - "protoPackage": "google.cloud.texttospeech.v1beta1", - "libraryPackage": "@google-cloud/text-to-speech", - "services": { - "TextToSpeech": { - "clients": { - "grpc": { - "libraryClient": "TextToSpeechClient", - "rpcs": { - "ListVoices": { - "methods": [ - "listVoices" - ] - }, - "SynthesizeSpeech": { - "methods": [ - "synthesizeSpeech" - ] - } - } - }, - "grpc-fallback": { - "libraryClient": "TextToSpeechClient", - "rpcs": { - "ListVoices": { - "methods": [ - "listVoices" - ] - }, - "SynthesizeSpeech": { - "methods": [ - "synthesizeSpeech" - ] - } - } - } - } - }, - "TextToSpeechLongAudioSynthesize": { - "clients": { - "grpc": { - "libraryClient": "TextToSpeechLongAudioSynthesizeClient", - "rpcs": { - "SynthesizeLongAudio": { - "methods": [ - "synthesizeLongAudio" - ] - } - } - }, - "grpc-fallback": { - "libraryClient": "TextToSpeechLongAudioSynthesizeClient", - "rpcs": { - "SynthesizeLongAudio": { - "methods": [ - "synthesizeLongAudio" - ] - } - } - } - } - } - } -} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/index.ts b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/index.ts deleted file mode 100644 index 3cf28b93bc3..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/index.ts +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -export {TextToSpeechClient} from './text_to_speech_client'; -export {TextToSpeechLongAudioSynthesizeClient} from './text_to_speech_long_audio_synthesize_client'; diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_client.ts b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_client.ts deleted file mode 100644 index bc08778d65f..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_client.ts +++ /dev/null @@ -1,515 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -/* global window */ -import type * as gax from 'google-gax'; -import type {Callback, CallOptions, Descriptors, ClientOptions} from 'google-gax'; - -import * as protos from '../../protos/protos'; -import jsonProtos = require('../../protos/protos.json'); -/** - * Client JSON configuration object, loaded from - * `src/v1beta1/text_to_speech_client_config.json`. - * This file defines retry strategy and timeouts for all API methods in this library. - */ -import * as gapicConfig from './text_to_speech_client_config.json'; -const version = require('../../../package.json').version; - -/** - * Service that implements Google Cloud Text-to-Speech API. - * @class - * @memberof v1beta1 - */ -export class TextToSpeechClient { - private _terminated = false; - private _opts: ClientOptions; - private _providedCustomServicePath: boolean; - private _gaxModule: typeof gax | typeof gax.fallback; - private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; - private _protos: {}; - private _defaults: {[method: string]: gax.CallSettings}; - auth: gax.GoogleAuth; - descriptors: Descriptors = { - page: {}, - stream: {}, - longrunning: {}, - batching: {}, - }; - warn: (code: string, message: string, warnType?: string) => void; - innerApiCalls: {[name: string]: Function}; - pathTemplates: {[name: string]: gax.PathTemplate}; - textToSpeechStub?: Promise<{[name: string]: Function}>; - - /** - * Construct an instance of TextToSpeechClient. - * - * @param {object} [options] - The configuration object. - * The options accepted by the constructor are described in detail - * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). - * The common options are: - * @param {object} [options.credentials] - Credentials object. - * @param {string} [options.credentials.client_email] - * @param {string} [options.credentials.private_key] - * @param {string} [options.email] - Account email address. Required when - * using a .pem or .p12 keyFilename. - * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or - * .p12 key downloaded from the Google Developers Console. If you provide - * a path to a JSON file, the projectId option below is not necessary. - * NOTE: .pem and .p12 require you to specify options.email as well. - * @param {number} [options.port] - The port on which to connect to - * the remote host. - * @param {string} [options.projectId] - The project ID from the Google - * Developer's Console, e.g. 'grape-spaceship-123'. We will also check - * the environment variable GCLOUD_PROJECT for your project ID. If your - * app is running in an environment which supports - * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, - * your project ID will be detected automatically. - * @param {string} [options.apiEndpoint] - The domain name of the - * API remote host. - * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. - * Follows the structure of {@link gapicConfig}. - * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. - * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. - * For more information, please check the - * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. - * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you - * need to avoid loading the default gRPC version and want to use the fallback - * HTTP implementation. Load only fallback version and pass it to the constructor: - * ``` - * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC - * const client = new TextToSpeechClient({fallback: 'rest'}, gax); - * ``` - */ - constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { - // Ensure that options include all the required fields. - const staticMembers = this.constructor as typeof TextToSpeechClient; - const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; - this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); - const port = opts?.port || staticMembers.port; - const clientConfig = opts?.clientConfig ?? {}; - const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); - opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); - - // Request numeric enum values if REST transport is used. - opts.numericEnums = true; - - // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. - if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { - opts['scopes'] = staticMembers.scopes; - } - - // Load google-gax module synchronously if needed - if (!gaxInstance) { - gaxInstance = require('google-gax') as typeof gax; - } - - // Choose either gRPC or proto-over-HTTP implementation of google-gax. - this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; - - // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. - this._gaxGrpc = new this._gaxModule.GrpcClient(opts); - - // Save options to use in initialize() method. - this._opts = opts; - - // Save the auth object to the client, for use by other methods. - this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); - - // Set useJWTAccessWithScope on the auth object. - this.auth.useJWTAccessWithScope = true; - - // Set defaultServicePath on the auth object. - this.auth.defaultServicePath = staticMembers.servicePath; - - // Set the default scopes in auth client if needed. - if (servicePath === staticMembers.servicePath) { - this.auth.defaultScopes = staticMembers.scopes; - } - - // Determine the client header string. - const clientHeader = [ - `gax/${this._gaxModule.version}`, - `gapic/${version}`, - ]; - if (typeof process !== 'undefined' && 'versions' in process) { - clientHeader.push(`gl-node/${process.versions.node}`); - } else { - clientHeader.push(`gl-web/${this._gaxModule.version}`); - } - if (!opts.fallback) { - clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); - } else if (opts.fallback === 'rest' ) { - clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); - } - if (opts.libName && opts.libVersion) { - clientHeader.push(`${opts.libName}/${opts.libVersion}`); - } - // Load the applicable protos. - this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); - - // This API contains "path templates"; forward-slash-separated - // identifiers to uniquely identify resources within the API. - // Create useful helper objects for these. - this.pathTemplates = { - modelPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}/models/{model}' - ), - }; - - // Put together the default options sent with requests. - this._defaults = this._gaxGrpc.constructSettings( - 'google.cloud.texttospeech.v1beta1.TextToSpeech', gapicConfig as gax.ClientConfig, - opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); - - // Set up a dictionary of "inner API calls"; the core implementation - // of calling the API is handled in `google-gax`, with this code - // merely providing the destination and request information. - this.innerApiCalls = {}; - - // Add a warn function to the client constructor so it can be easily tested. - this.warn = this._gaxModule.warn; - } - - /** - * Initialize the client. - * Performs asynchronous operations (such as authentication) and prepares the client. - * This function will be called automatically when any class method is called for the - * first time, but if you need to initialize it before calling an actual method, - * feel free to call initialize() directly. - * - * You can await on this method if you want to make sure the client is initialized. - * - * @returns {Promise} A promise that resolves to an authenticated service stub. - */ - initialize() { - // If the client stub promise is already initialized, return immediately. - if (this.textToSpeechStub) { - return this.textToSpeechStub; - } - - // Put together the "service stub" for - // google.cloud.texttospeech.v1beta1.TextToSpeech. - this.textToSpeechStub = this._gaxGrpc.createStub( - this._opts.fallback ? - (this._protos as protobuf.Root).lookupService('google.cloud.texttospeech.v1beta1.TextToSpeech') : - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (this._protos as any).google.cloud.texttospeech.v1beta1.TextToSpeech, - this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; - - // Iterate over each of the methods that the service provides - // and create an API call method for each. - const textToSpeechStubMethods = - ['listVoices', 'synthesizeSpeech']; - for (const methodName of textToSpeechStubMethods) { - const callPromise = this.textToSpeechStub.then( - stub => (...args: Array<{}>) => { - if (this._terminated) { - return Promise.reject('The client has already been closed.'); - } - const func = stub[methodName]; - return func.apply(stub, args); - }, - (err: Error|null|undefined) => () => { - throw err; - }); - - const descriptor = - undefined; - const apiCall = this._gaxModule.createApiCall( - callPromise, - this._defaults[methodName], - descriptor, - this._opts.fallback - ); - - this.innerApiCalls[methodName] = apiCall; - } - - return this.textToSpeechStub; - } - - /** - * The DNS address for this API service. - * @returns {string} The DNS address for this service. - */ - static get servicePath() { - return 'texttospeech.googleapis.com'; - } - - /** - * The DNS address for this API service - same as servicePath(), - * exists for compatibility reasons. - * @returns {string} The DNS address for this service. - */ - static get apiEndpoint() { - return 'texttospeech.googleapis.com'; - } - - /** - * The port for this API service. - * @returns {number} The default port for this service. - */ - static get port() { - return 443; - } - - /** - * The scopes needed to make gRPC calls for every method defined - * in this service. - * @returns {string[]} List of default scopes. - */ - static get scopes() { - return [ - 'https://www.googleapis.com/auth/cloud-platform' - ]; - } - - getProjectId(): Promise; - getProjectId(callback: Callback): void; - /** - * Return the project ID used by this class. - * @returns {Promise} A promise that resolves to string containing the project ID. - */ - getProjectId(callback?: Callback): - Promise|void { - if (callback) { - this.auth.getProjectId(callback); - return; - } - return this.auth.getProjectId(); - } - - // ------------------- - // -- Service calls -- - // ------------------- -/** - * Returns a list of Voice supported for synthesis. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} [request.languageCode] - * Optional. Recommended. - * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. - * If not specified, the API will return all supported voices. - * If specified, the ListVoices call will only return voices that can be used - * to synthesize this language_code. For example, if you specify `"en-NZ"`, - * all `"en-NZ"` voices will be returned. If you specify `"no"`, both - * `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices will be - * returned. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [ListVoicesResponse]{@link google.cloud.texttospeech.v1beta1.ListVoicesResponse}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1beta1/text_to_speech.list_voices.js - * region_tag:texttospeech_v1beta1_generated_TextToSpeech_ListVoices_async - */ - listVoices( - request?: protos.google.cloud.texttospeech.v1beta1.IListVoicesRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.texttospeech.v1beta1.IListVoicesResponse, - protos.google.cloud.texttospeech.v1beta1.IListVoicesRequest|undefined, {}|undefined - ]>; - listVoices( - request: protos.google.cloud.texttospeech.v1beta1.IListVoicesRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.texttospeech.v1beta1.IListVoicesResponse, - protos.google.cloud.texttospeech.v1beta1.IListVoicesRequest|null|undefined, - {}|null|undefined>): void; - listVoices( - request: protos.google.cloud.texttospeech.v1beta1.IListVoicesRequest, - callback: Callback< - protos.google.cloud.texttospeech.v1beta1.IListVoicesResponse, - protos.google.cloud.texttospeech.v1beta1.IListVoicesRequest|null|undefined, - {}|null|undefined>): void; - listVoices( - request?: protos.google.cloud.texttospeech.v1beta1.IListVoicesRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.texttospeech.v1beta1.IListVoicesResponse, - protos.google.cloud.texttospeech.v1beta1.IListVoicesRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.cloud.texttospeech.v1beta1.IListVoicesResponse, - protos.google.cloud.texttospeech.v1beta1.IListVoicesRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.texttospeech.v1beta1.IListVoicesResponse, - protos.google.cloud.texttospeech.v1beta1.IListVoicesRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - this.initialize(); - return this.innerApiCalls.listVoices(request, options, callback); - } -/** - * Synthesizes speech synchronously: receive results after all text input - * has been processed. - * - * @param {Object} request - * The request object that will be sent. - * @param {google.cloud.texttospeech.v1beta1.SynthesisInput} request.input - * Required. The Synthesizer requires either plain text or SSML as input. - * @param {google.cloud.texttospeech.v1beta1.VoiceSelectionParams} request.voice - * Required. The desired voice of the synthesized audio. - * @param {google.cloud.texttospeech.v1beta1.AudioConfig} request.audioConfig - * Required. The configuration of the synthesized audio. - * @param {number[]} request.enableTimePointing - * Whether and what timepoints are returned in the response. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [SynthesizeSpeechResponse]{@link google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1beta1/text_to_speech.synthesize_speech.js - * region_tag:texttospeech_v1beta1_generated_TextToSpeech_SynthesizeSpeech_async - */ - synthesizeSpeech( - request?: protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechResponse, - protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechRequest|undefined, {}|undefined - ]>; - synthesizeSpeech( - request: protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechResponse, - protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechRequest|null|undefined, - {}|null|undefined>): void; - synthesizeSpeech( - request: protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechRequest, - callback: Callback< - protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechResponse, - protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechRequest|null|undefined, - {}|null|undefined>): void; - synthesizeSpeech( - request?: protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechResponse, - protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechResponse, - protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechResponse, - protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - this.initialize(); - return this.innerApiCalls.synthesizeSpeech(request, options, callback); - } - - // -------------------- - // -- Path templates -- - // -------------------- - - /** - * Return a fully-qualified model resource name string. - * - * @param {string} project - * @param {string} location - * @param {string} model - * @returns {string} Resource name string. - */ - modelPath(project:string,location:string,model:string) { - return this.pathTemplates.modelPathTemplate.render({ - project: project, - location: location, - model: model, - }); - } - - /** - * Parse the project from Model resource. - * - * @param {string} modelName - * A fully-qualified path representing Model resource. - * @returns {string} A string representing the project. - */ - matchProjectFromModelName(modelName: string) { - return this.pathTemplates.modelPathTemplate.match(modelName).project; - } - - /** - * Parse the location from Model resource. - * - * @param {string} modelName - * A fully-qualified path representing Model resource. - * @returns {string} A string representing the location. - */ - matchLocationFromModelName(modelName: string) { - return this.pathTemplates.modelPathTemplate.match(modelName).location; - } - - /** - * Parse the model from Model resource. - * - * @param {string} modelName - * A fully-qualified path representing Model resource. - * @returns {string} A string representing the model. - */ - matchModelFromModelName(modelName: string) { - return this.pathTemplates.modelPathTemplate.match(modelName).model; - } - - /** - * Terminate the gRPC channel and close the client. - * - * The client will no longer be usable and all future behavior is undefined. - * @returns {Promise} A promise that resolves when the client is closed. - */ - close(): Promise { - if (this.textToSpeechStub && !this._terminated) { - return this.textToSpeechStub.then(stub => { - this._terminated = true; - stub.close(); - }); - } - return Promise.resolve(); - } -} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_client_config.json b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_client_config.json deleted file mode 100644 index 9c26e72b9e3..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_client_config.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "interfaces": { - "google.cloud.texttospeech.v1beta1.TextToSpeech": { - "retry_codes": { - "non_idempotent": [], - "idempotent": [ - "DEADLINE_EXCEEDED", - "UNAVAILABLE" - ] - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000 - } - }, - "methods": { - "ListVoices": { - "timeout_millis": 300000, - "retry_codes_name": "idempotent", - "retry_params_name": "default" - }, - "SynthesizeSpeech": { - "timeout_millis": 300000, - "retry_codes_name": "idempotent", - "retry_params_name": "default" - } - } - } - } -} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_long_audio_synthesize_client.ts b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_long_audio_synthesize_client.ts deleted file mode 100644 index e13731b24df..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_long_audio_synthesize_client.ts +++ /dev/null @@ -1,614 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -/* global window */ -import type * as gax from 'google-gax'; -import type {Callback, CallOptions, Descriptors, ClientOptions, GrpcClientOptions, LROperation} from 'google-gax'; - -import * as protos from '../../protos/protos'; -import jsonProtos = require('../../protos/protos.json'); -/** - * Client JSON configuration object, loaded from - * `src/v1beta1/text_to_speech_long_audio_synthesize_client_config.json`. - * This file defines retry strategy and timeouts for all API methods in this library. - */ -import * as gapicConfig from './text_to_speech_long_audio_synthesize_client_config.json'; -const version = require('../../../package.json').version; - -/** - * Service that implements Google Cloud Text-to-Speech API. - * @class - * @memberof v1beta1 - */ -export class TextToSpeechLongAudioSynthesizeClient { - private _terminated = false; - private _opts: ClientOptions; - private _providedCustomServicePath: boolean; - private _gaxModule: typeof gax | typeof gax.fallback; - private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; - private _protos: {}; - private _defaults: {[method: string]: gax.CallSettings}; - auth: gax.GoogleAuth; - descriptors: Descriptors = { - page: {}, - stream: {}, - longrunning: {}, - batching: {}, - }; - warn: (code: string, message: string, warnType?: string) => void; - innerApiCalls: {[name: string]: Function}; - operationsClient: gax.OperationsClient; - textToSpeechLongAudioSynthesizeStub?: Promise<{[name: string]: Function}>; - - /** - * Construct an instance of TextToSpeechLongAudioSynthesizeClient. - * - * @param {object} [options] - The configuration object. - * The options accepted by the constructor are described in detail - * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). - * The common options are: - * @param {object} [options.credentials] - Credentials object. - * @param {string} [options.credentials.client_email] - * @param {string} [options.credentials.private_key] - * @param {string} [options.email] - Account email address. Required when - * using a .pem or .p12 keyFilename. - * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or - * .p12 key downloaded from the Google Developers Console. If you provide - * a path to a JSON file, the projectId option below is not necessary. - * NOTE: .pem and .p12 require you to specify options.email as well. - * @param {number} [options.port] - The port on which to connect to - * the remote host. - * @param {string} [options.projectId] - The project ID from the Google - * Developer's Console, e.g. 'grape-spaceship-123'. We will also check - * the environment variable GCLOUD_PROJECT for your project ID. If your - * app is running in an environment which supports - * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, - * your project ID will be detected automatically. - * @param {string} [options.apiEndpoint] - The domain name of the - * API remote host. - * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. - * Follows the structure of {@link gapicConfig}. - * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. - * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. - * For more information, please check the - * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. - * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you - * need to avoid loading the default gRPC version and want to use the fallback - * HTTP implementation. Load only fallback version and pass it to the constructor: - * ``` - * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC - * const client = new TextToSpeechLongAudioSynthesizeClient({fallback: 'rest'}, gax); - * ``` - */ - constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { - // Ensure that options include all the required fields. - const staticMembers = this.constructor as typeof TextToSpeechLongAudioSynthesizeClient; - const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; - this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); - const port = opts?.port || staticMembers.port; - const clientConfig = opts?.clientConfig ?? {}; - const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); - opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); - - // Request numeric enum values if REST transport is used. - opts.numericEnums = true; - - // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. - if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { - opts['scopes'] = staticMembers.scopes; - } - - // Load google-gax module synchronously if needed - if (!gaxInstance) { - gaxInstance = require('google-gax') as typeof gax; - } - - // Choose either gRPC or proto-over-HTTP implementation of google-gax. - this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; - - // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. - this._gaxGrpc = new this._gaxModule.GrpcClient(opts); - - // Save options to use in initialize() method. - this._opts = opts; - - // Save the auth object to the client, for use by other methods. - this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); - - // Set useJWTAccessWithScope on the auth object. - this.auth.useJWTAccessWithScope = true; - - // Set defaultServicePath on the auth object. - this.auth.defaultServicePath = staticMembers.servicePath; - - // Set the default scopes in auth client if needed. - if (servicePath === staticMembers.servicePath) { - this.auth.defaultScopes = staticMembers.scopes; - } - - // Determine the client header string. - const clientHeader = [ - `gax/${this._gaxModule.version}`, - `gapic/${version}`, - ]; - if (typeof process !== 'undefined' && 'versions' in process) { - clientHeader.push(`gl-node/${process.versions.node}`); - } else { - clientHeader.push(`gl-web/${this._gaxModule.version}`); - } - if (!opts.fallback) { - clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); - } else if (opts.fallback === 'rest' ) { - clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); - } - if (opts.libName && opts.libVersion) { - clientHeader.push(`${opts.libName}/${opts.libVersion}`); - } - // Load the applicable protos. - this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); - - const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); - // This API contains "long-running operations", which return a - // an Operation object that allows for tracking of the operation, - // rather than holding a request open. - const lroOptions: GrpcClientOptions = { - auth: this.auth, - grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined - }; - if (opts.fallback === 'rest') { - lroOptions.protoJson = protoFilesRoot; - lroOptions.httpRules = []; - } - this.operationsClient = this._gaxModule.lro(lroOptions).operationsClient(opts); - const synthesizeLongAudioResponse = protoFilesRoot.lookup( - '.google.cloud.texttospeech.v1beta1.SynthesizeLongAudioResponse') as gax.protobuf.Type; - const synthesizeLongAudioMetadata = protoFilesRoot.lookup( - '.google.cloud.texttospeech.v1beta1.SynthesizeLongAudioMetadata') as gax.protobuf.Type; - - this.descriptors.longrunning = { - synthesizeLongAudio: new this._gaxModule.LongrunningDescriptor( - this.operationsClient, - synthesizeLongAudioResponse.decode.bind(synthesizeLongAudioResponse), - synthesizeLongAudioMetadata.decode.bind(synthesizeLongAudioMetadata)) - }; - - // Put together the default options sent with requests. - this._defaults = this._gaxGrpc.constructSettings( - 'google.cloud.texttospeech.v1beta1.TextToSpeechLongAudioSynthesize', gapicConfig as gax.ClientConfig, - opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); - - // Set up a dictionary of "inner API calls"; the core implementation - // of calling the API is handled in `google-gax`, with this code - // merely providing the destination and request information. - this.innerApiCalls = {}; - - // Add a warn function to the client constructor so it can be easily tested. - this.warn = this._gaxModule.warn; - } - - /** - * Initialize the client. - * Performs asynchronous operations (such as authentication) and prepares the client. - * This function will be called automatically when any class method is called for the - * first time, but if you need to initialize it before calling an actual method, - * feel free to call initialize() directly. - * - * You can await on this method if you want to make sure the client is initialized. - * - * @returns {Promise} A promise that resolves to an authenticated service stub. - */ - initialize() { - // If the client stub promise is already initialized, return immediately. - if (this.textToSpeechLongAudioSynthesizeStub) { - return this.textToSpeechLongAudioSynthesizeStub; - } - - // Put together the "service stub" for - // google.cloud.texttospeech.v1beta1.TextToSpeechLongAudioSynthesize. - this.textToSpeechLongAudioSynthesizeStub = this._gaxGrpc.createStub( - this._opts.fallback ? - (this._protos as protobuf.Root).lookupService('google.cloud.texttospeech.v1beta1.TextToSpeechLongAudioSynthesize') : - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (this._protos as any).google.cloud.texttospeech.v1beta1.TextToSpeechLongAudioSynthesize, - this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; - - // Iterate over each of the methods that the service provides - // and create an API call method for each. - const textToSpeechLongAudioSynthesizeStubMethods = - ['synthesizeLongAudio']; - for (const methodName of textToSpeechLongAudioSynthesizeStubMethods) { - const callPromise = this.textToSpeechLongAudioSynthesizeStub.then( - stub => (...args: Array<{}>) => { - if (this._terminated) { - return Promise.reject('The client has already been closed.'); - } - const func = stub[methodName]; - return func.apply(stub, args); - }, - (err: Error|null|undefined) => () => { - throw err; - }); - - const descriptor = - this.descriptors.longrunning[methodName] || - undefined; - const apiCall = this._gaxModule.createApiCall( - callPromise, - this._defaults[methodName], - descriptor, - this._opts.fallback - ); - - this.innerApiCalls[methodName] = apiCall; - } - - return this.textToSpeechLongAudioSynthesizeStub; - } - - /** - * The DNS address for this API service. - * @returns {string} The DNS address for this service. - */ - static get servicePath() { - return 'texttospeech.googleapis.com'; - } - - /** - * The DNS address for this API service - same as servicePath(), - * exists for compatibility reasons. - * @returns {string} The DNS address for this service. - */ - static get apiEndpoint() { - return 'texttospeech.googleapis.com'; - } - - /** - * The port for this API service. - * @returns {number} The default port for this service. - */ - static get port() { - return 443; - } - - /** - * The scopes needed to make gRPC calls for every method defined - * in this service. - * @returns {string[]} List of default scopes. - */ - static get scopes() { - return [ - 'https://www.googleapis.com/auth/cloud-platform' - ]; - } - - getProjectId(): Promise; - getProjectId(callback: Callback): void; - /** - * Return the project ID used by this class. - * @returns {Promise} A promise that resolves to string containing the project ID. - */ - getProjectId(callback?: Callback): - Promise|void { - if (callback) { - this.auth.getProjectId(callback); - return; - } - return this.auth.getProjectId(); - } - - // ------------------- - // -- Service calls -- - // ------------------- - -/** - * Synthesizes long form text asynchronously. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.parent - * The resource states of the request in the form of - * projects/* /locations/* /voices/*. - * @param {google.cloud.texttospeech.v1beta1.SynthesisInput} request.input - * Required. The Synthesizer requires either plain text or SSML as input. - * @param {google.cloud.texttospeech.v1beta1.AudioConfig} request.audioConfig - * Required. The configuration of the synthesized audio. - * @param {string} request.outputGcsUri - * Specifies a Cloud Storage URI for the synthesis results. Must be - * specified in the format: `gs://bucket_name/object_name`, and the bucket - * must already exist. - * @param {google.cloud.texttospeech.v1beta1.VoiceSelectionParams} request.voice - * The desired voice of the synthesized audio. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * a long running operation. Its `promise()` method returns a promise - * you can `await` for. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1beta1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js - * region_tag:texttospeech_v1beta1_generated_TextToSpeechLongAudioSynthesize_SynthesizeLongAudio_async - */ - synthesizeLongAudio( - request?: protos.google.cloud.texttospeech.v1beta1.ISynthesizeLongAudioRequest, - options?: CallOptions): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>; - synthesizeLongAudio( - request: protos.google.cloud.texttospeech.v1beta1.ISynthesizeLongAudioRequest, - options: CallOptions, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - synthesizeLongAudio( - request: protos.google.cloud.texttospeech.v1beta1.ISynthesizeLongAudioRequest, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - synthesizeLongAudio( - request?: protos.google.cloud.texttospeech.v1beta1.ISynthesizeLongAudioRequest, - optionsOrCallback?: CallOptions|Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>, - callback?: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'parent': request.parent ?? '', - }); - this.initialize(); - return this.innerApiCalls.synthesizeLongAudio(request, options, callback); - } -/** - * Check the status of the long running operation returned by `synthesizeLongAudio()`. - * @param {String} name - * The operation name that will be passed. - * @returns {Promise} - The promise which resolves to an object. - * The decoded operation object has result and metadata field to get information from. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1beta1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js - * region_tag:texttospeech_v1beta1_generated_TextToSpeechLongAudioSynthesize_SynthesizeLongAudio_async - */ - async checkSynthesizeLongAudioProgress(name: string): Promise>{ - const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); - const [operation] = await this.operationsClient.getOperation(request); - const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.synthesizeLongAudio, this._gaxModule.createDefaultBackoffSettings()); - return decodeOperation as LROperation; - } -/** - * Gets the latest state of a long-running operation. Clients can use this - * method to poll the operation result at intervals as recommended by the API - * service. - * - * @param {Object} request - The request object that will be sent. - * @param {string} request.name - The name of the operation resource. - * @param {Object=} options - * Optional parameters. You can override the default settings for this call, - * e.g, timeout, retries, paginations, etc. See [gax.CallOptions]{@link - * https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the - * details. - * @param {function(?Error, ?Object)=} callback - * The function which will be called with the result of the API call. - * - * The second parameter to the callback is an object representing - * [google.longrunning.Operation]{@link - * external:"google.longrunning.Operation"}. - * @return {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * [google.longrunning.Operation]{@link - * external:"google.longrunning.Operation"}. The promise has a method named - * "cancel" which cancels the ongoing API call. - * - * @example - * ``` - * const client = longrunning.operationsClient(); - * const name = ''; - * const [response] = await client.getOperation({name}); - * // doThingsWith(response) - * ``` - */ - getOperation( - request: protos.google.longrunning.GetOperationRequest, - options?: - | gax.CallOptions - | Callback< - protos.google.longrunning.Operation, - protos.google.longrunning.GetOperationRequest, - {} | null | undefined - >, - callback?: Callback< - protos.google.longrunning.Operation, - protos.google.longrunning.GetOperationRequest, - {} | null | undefined - > - ): Promise<[protos.google.longrunning.Operation]> { - return this.operationsClient.getOperation(request, options, callback); - } - /** - * Lists operations that match the specified filter in the request. If the - * server doesn't support this method, it returns `UNIMPLEMENTED`. Returns an iterable object. - * - * For-await-of syntax is used with the iterable to recursively get response element on-demand. - * - * @param {Object} request - The request object that will be sent. - * @param {string} request.name - The name of the operation collection. - * @param {string} request.filter - The standard list filter. - * @param {number=} request.pageSize - - * The maximum number of resources contained in the underlying API - * response. If page streaming is performed per-resource, this - * parameter does not affect the return value. If page streaming is - * performed per-page, this determines the maximum number of - * resources in a page. - * @param {Object=} options - * Optional parameters. You can override the default settings for this call, - * e.g, timeout, retries, paginations, etc. See [gax.CallOptions]{@link - * https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the - * details. - * @returns {Object} - * An iterable Object that conforms to @link https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols. - * - * @example - * ``` - * const client = longrunning.operationsClient(); - * for await (const response of client.listOperationsAsync(request)); - * // doThingsWith(response) - * ``` - */ - listOperationsAsync( - request: protos.google.longrunning.ListOperationsRequest, - options?: gax.CallOptions - ): AsyncIterable { - return this.operationsClient.listOperationsAsync(request, options); - } - /** - * Starts asynchronous cancellation on a long-running operation. The server - * makes a best effort to cancel the operation, but success is not - * guaranteed. If the server doesn't support this method, it returns - * `google.rpc.Code.UNIMPLEMENTED`. Clients can use - * {@link Operations.GetOperation} or - * other methods to check whether the cancellation succeeded or whether the - * operation completed despite cancellation. On successful cancellation, - * the operation is not deleted; instead, it becomes an operation with - * an {@link Operation.error} value with a {@link google.rpc.Status.code} of - * 1, corresponding to `Code.CANCELLED`. - * - * @param {Object} request - The request object that will be sent. - * @param {string} request.name - The name of the operation resource to be cancelled. - * @param {Object=} options - * Optional parameters. You can override the default settings for this call, - * e.g, timeout, retries, paginations, etc. See [gax.CallOptions]{@link - * https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the - * details. - * @param {function(?Error)=} callback - * The function which will be called with the result of the API call. - * @return {Promise} - The promise which resolves when API call finishes. - * The promise has a method named "cancel" which cancels the ongoing API - * call. - * - * @example - * ``` - * const client = longrunning.operationsClient(); - * await client.cancelOperation({name: ''}); - * ``` - */ - cancelOperation( - request: protos.google.longrunning.CancelOperationRequest, - options?: - | gax.CallOptions - | Callback< - protos.google.protobuf.Empty, - protos.google.longrunning.CancelOperationRequest, - {} | undefined | null - >, - callback?: Callback< - protos.google.longrunning.CancelOperationRequest, - protos.google.protobuf.Empty, - {} | undefined | null - > - ): Promise { - return this.operationsClient.cancelOperation(request, options, callback); - } - - /** - * Deletes a long-running operation. This method indicates that the client is - * no longer interested in the operation result. It does not cancel the - * operation. If the server doesn't support this method, it returns - * `google.rpc.Code.UNIMPLEMENTED`. - * - * @param {Object} request - The request object that will be sent. - * @param {string} request.name - The name of the operation resource to be deleted. - * @param {Object=} options - * Optional parameters. You can override the default settings for this call, - * e.g, timeout, retries, paginations, etc. See [gax.CallOptions]{@link - * https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the - * details. - * @param {function(?Error)=} callback - * The function which will be called with the result of the API call. - * @return {Promise} - The promise which resolves when API call finishes. - * The promise has a method named "cancel" which cancels the ongoing API - * call. - * - * @example - * ``` - * const client = longrunning.operationsClient(); - * await client.deleteOperation({name: ''}); - * ``` - */ - deleteOperation( - request: protos.google.longrunning.DeleteOperationRequest, - options?: - | gax.CallOptions - | Callback< - protos.google.protobuf.Empty, - protos.google.longrunning.DeleteOperationRequest, - {} | null | undefined - >, - callback?: Callback< - protos.google.protobuf.Empty, - protos.google.longrunning.DeleteOperationRequest, - {} | null | undefined - > - ): Promise { - return this.operationsClient.deleteOperation(request, options, callback); - } - - - /** - * Terminate the gRPC channel and close the client. - * - * The client will no longer be usable and all future behavior is undefined. - * @returns {Promise} A promise that resolves when the client is closed. - */ - close(): Promise { - if (this.textToSpeechLongAudioSynthesizeStub && !this._terminated) { - return this.textToSpeechLongAudioSynthesizeStub.then(stub => { - this._terminated = true; - stub.close(); - this.operationsClient.close(); - }); - } - return Promise.resolve(); - } -} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_long_audio_synthesize_client_config.json b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_long_audio_synthesize_client_config.json deleted file mode 100644 index a640bca5ce1..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_long_audio_synthesize_client_config.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "interfaces": { - "google.cloud.texttospeech.v1beta1.TextToSpeechLongAudioSynthesize": { - "retry_codes": { - "non_idempotent": [], - "idempotent": [ - "DEADLINE_EXCEEDED", - "UNAVAILABLE" - ] - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000 - } - }, - "methods": { - "SynthesizeLongAudio": { - "timeout_millis": 5000000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default" - } - } - } - } -} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_long_audio_synthesize_proto_list.json b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_long_audio_synthesize_proto_list.json deleted file mode 100644 index c7c739dbc73..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_long_audio_synthesize_proto_list.json +++ /dev/null @@ -1,4 +0,0 @@ -[ - "../../protos/google/cloud/texttospeech/v1beta1/cloud_tts.proto", - "../../protos/google/cloud/texttospeech/v1beta1/cloud_tts_lrs.proto" -] diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_proto_list.json b/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_proto_list.json deleted file mode 100644 index c7c739dbc73..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/src/v1beta1/text_to_speech_proto_list.json +++ /dev/null @@ -1,4 +0,0 @@ -[ - "../../protos/google/cloud/texttospeech/v1beta1/cloud_tts.proto", - "../../protos/google/cloud/texttospeech/v1beta1/cloud_tts_lrs.proto" -] diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/system-test/fixtures/sample/src/index.js b/owl-bot-staging/google-cloud-texttospeech/v1beta1/system-test/fixtures/sample/src/index.js deleted file mode 100644 index a08648fec57..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/system-test/fixtures/sample/src/index.js +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - -/* eslint-disable node/no-missing-require, no-unused-vars */ -const texttospeech = require('@google-cloud/text-to-speech'); - -function main() { - const textToSpeechClient = new texttospeech.TextToSpeechClient(); - const textToSpeechLongAudioSynthesizeClient = new texttospeech.TextToSpeechLongAudioSynthesizeClient(); -} - -main(); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/system-test/fixtures/sample/src/index.ts b/owl-bot-staging/google-cloud-texttospeech/v1beta1/system-test/fixtures/sample/src/index.ts deleted file mode 100644 index 14b3691b2df..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/system-test/fixtures/sample/src/index.ts +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import {TextToSpeechClient, TextToSpeechLongAudioSynthesizeClient} from '@google-cloud/text-to-speech'; - -// check that the client class type name can be used -function doStuffWithTextToSpeechClient(client: TextToSpeechClient) { - client.close(); -} -function doStuffWithTextToSpeechLongAudioSynthesizeClient(client: TextToSpeechLongAudioSynthesizeClient) { - client.close(); -} - -function main() { - // check that the client instance can be created - const textToSpeechClient = new TextToSpeechClient(); - doStuffWithTextToSpeechClient(textToSpeechClient); - // check that the client instance can be created - const textToSpeechLongAudioSynthesizeClient = new TextToSpeechLongAudioSynthesizeClient(); - doStuffWithTextToSpeechLongAudioSynthesizeClient(textToSpeechLongAudioSynthesizeClient); -} - -main(); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/system-test/install.ts b/owl-bot-staging/google-cloud-texttospeech/v1beta1/system-test/install.ts deleted file mode 100644 index 557a57558e1..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/system-test/install.ts +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import {packNTest} from 'pack-n-play'; -import {readFileSync} from 'fs'; -import {describe, it} from 'mocha'; - -describe('📦 pack-n-play test', () => { - - it('TypeScript code', async function() { - this.timeout(300000); - const options = { - packageDir: process.cwd(), - sample: { - description: 'TypeScript user can use the type definitions', - ts: readFileSync('./system-test/fixtures/sample/src/index.ts').toString() - } - }; - await packNTest(options); - }); - - it('JavaScript code', async function() { - this.timeout(300000); - const options = { - packageDir: process.cwd(), - sample: { - description: 'JavaScript user can use the library', - ts: readFileSync('./system-test/fixtures/sample/src/index.js').toString() - } - }; - await packNTest(options); - }); - -}); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/test/gapic_text_to_speech_long_audio_synthesize_v1beta1.ts b/owl-bot-staging/google-cloud-texttospeech/v1beta1/test/gapic_text_to_speech_long_audio_synthesize_v1beta1.ts deleted file mode 100644 index 7e48b40b7bc..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/test/gapic_text_to_speech_long_audio_synthesize_v1beta1.ts +++ /dev/null @@ -1,591 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as protos from '../protos/protos'; -import * as assert from 'assert'; -import * as sinon from 'sinon'; -import {SinonStub} from 'sinon'; -import {describe, it} from 'mocha'; -import * as texttospeechlongaudiosynthesizeModule from '../src'; - -import {protobuf, LROperation, operationsProtos} from 'google-gax'; - -// Dynamically loaded proto JSON is needed to get the type information -// to fill in default values for request objects -const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); - -// eslint-disable-next-line @typescript-eslint/no-unused-vars -function getTypeDefaultValue(typeName: string, fields: string[]) { - let type = root.lookupType(typeName) as protobuf.Type; - for (const field of fields.slice(0, -1)) { - type = type.fields[field]?.resolvedType as protobuf.Type; - } - return type.fields[fields[fields.length - 1]]?.defaultValue; -} - -function generateSampleMessage(instance: T) { - const filledObject = (instance.constructor as typeof protobuf.Message) - .toObject(instance as protobuf.Message, {defaults: true}); - return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; -} - -function stubSimpleCall(response?: ResponseType, error?: Error) { - return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); -} - -function stubLongRunningCall(response?: ResponseType, callError?: Error, lroError?: Error) { - const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); - const mockOperation = { - promise: innerStub, - }; - return callError ? sinon.stub().rejects(callError) : sinon.stub().resolves([mockOperation]); -} - -function stubLongRunningCallWithCallback(response?: ResponseType, callError?: Error, lroError?: Error) { - const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); - const mockOperation = { - promise: innerStub, - }; - return callError ? sinon.stub().callsArgWith(2, callError) : sinon.stub().callsArgWith(2, null, mockOperation); -} - -function stubAsyncIterationCall(responses?: ResponseType[], error?: Error) { - let counter = 0; - const asyncIterable = { - [Symbol.asyncIterator]() { - return { - async next() { - if (error) { - return Promise.reject(error); - } - if (counter >= responses!.length) { - return Promise.resolve({done: true, value: undefined}); - } - return Promise.resolve({done: false, value: responses![counter++]}); - } - }; - } - }; - return sinon.stub().returns(asyncIterable); -} - -describe('v1beta1.TextToSpeechLongAudioSynthesizeClient', () => { - describe('Common methods', () => { - it('has servicePath', () => { - const servicePath = texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient.servicePath; - assert(servicePath); - }); - - it('has apiEndpoint', () => { - const apiEndpoint = texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient.apiEndpoint; - assert(apiEndpoint); - }); - - it('has port', () => { - const port = texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient.port; - assert(port); - assert(typeof port === 'number'); - }); - - it('should create a client with no option', () => { - const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient(); - assert(client); - }); - - it('should create a client with gRPC fallback', () => { - const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ - fallback: true, - }); - assert(client); - }); - - it('has initialize method and supports deferred initialization', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.textToSpeechLongAudioSynthesizeStub, undefined); - await client.initialize(); - assert(client.textToSpeechLongAudioSynthesizeStub); - }); - - it('has close method for the initialized client', done => { - const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - assert(client.textToSpeechLongAudioSynthesizeStub); - client.close().then(() => { - done(); - }); - }); - - it('has close method for the non-initialized client', done => { - const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.textToSpeechLongAudioSynthesizeStub, undefined); - client.close().then(() => { - done(); - }); - }); - - it('has getProjectId method', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); - const result = await client.getProjectId(); - assert.strictEqual(result, fakeProjectId); - assert((client.auth.getProjectId as SinonStub).calledWithExactly()); - }); - - it('has getProjectId method with callback', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); - const promise = new Promise((resolve, reject) => { - client.getProjectId((err?: Error|null, projectId?: string|null) => { - if (err) { - reject(err); - } else { - resolve(projectId); - } - }); - }); - const result = await promise; - assert.strictEqual(result, fakeProjectId); - }); - }); - - describe('synthesizeLongAudio', () => { - it('invokes synthesizeLongAudio without error', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.texttospeech.v1beta1.SynthesizeLongAudioRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.texttospeech.v1beta1.SynthesizeLongAudioRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.synthesizeLongAudio = stubLongRunningCall(expectedResponse); - const [operation] = await client.synthesizeLongAudio(request); - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.synthesizeLongAudio as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.synthesizeLongAudio as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes synthesizeLongAudio without error using callback', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.texttospeech.v1beta1.SynthesizeLongAudioRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.texttospeech.v1beta1.SynthesizeLongAudioRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.synthesizeLongAudio = stubLongRunningCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.synthesizeLongAudio( - request, - (err?: Error|null, - result?: LROperation|null - ) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const operation = await promise as LROperation; - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.synthesizeLongAudio as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.synthesizeLongAudio as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes synthesizeLongAudio with call error', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.texttospeech.v1beta1.SynthesizeLongAudioRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.texttospeech.v1beta1.SynthesizeLongAudioRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.synthesizeLongAudio = stubLongRunningCall(undefined, expectedError); - await assert.rejects(client.synthesizeLongAudio(request), expectedError); - const actualRequest = (client.innerApiCalls.synthesizeLongAudio as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.synthesizeLongAudio as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes synthesizeLongAudio with LRO error', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.texttospeech.v1beta1.SynthesizeLongAudioRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.texttospeech.v1beta1.SynthesizeLongAudioRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.synthesizeLongAudio = stubLongRunningCall(undefined, undefined, expectedError); - const [operation] = await client.synthesizeLongAudio(request); - await assert.rejects(operation.promise(), expectedError); - const actualRequest = (client.innerApiCalls.synthesizeLongAudio as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.synthesizeLongAudio as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes checkSynthesizeLongAudioProgress without error', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedResponse = generateSampleMessage( - new operationsProtos.google.longrunning.Operation() - ); - expectedResponse.name = 'test'; - expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; - expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} - - client.operationsClient.getOperation = stubSimpleCall(expectedResponse); - const decodedOperation = await client.checkSynthesizeLongAudioProgress(expectedResponse.name); - assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); - assert(decodedOperation.metadata); - assert((client.operationsClient.getOperation as SinonStub).getCall(0)); - }); - - it('invokes checkSynthesizeLongAudioProgress with error', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedError = new Error('expected'); - - client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.checkSynthesizeLongAudioProgress(''), expectedError); - assert((client.operationsClient.getOperation as SinonStub) - .getCall(0)); - }); - }); - describe('getOperation', () => { - it('invokes getOperation without error', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new operationsProtos.google.longrunning.GetOperationRequest() - ); - const expectedResponse = generateSampleMessage( - new operationsProtos.google.longrunning.Operation() - ); - client.operationsClient.getOperation = stubSimpleCall(expectedResponse); - const response = await client.getOperation(request); - assert.deepStrictEqual(response, [expectedResponse]); - assert((client.operationsClient.getOperation as SinonStub) - .getCall(0).calledWith(request) - ); - }); - it('invokes getOperation without error using callback', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - const request = generateSampleMessage( - new operationsProtos.google.longrunning.GetOperationRequest() - ); - const expectedResponse = generateSampleMessage( - new operationsProtos.google.longrunning.Operation() - ); - client.operationsClient.getOperation = sinon.stub().callsArgWith(2, null, expectedResponse); - const promise = new Promise((resolve, reject) => { - client.operationsClient.getOperation( - request, - undefined, - ( - err?: Error | null, - result?: operationsProtos.google.longrunning.Operation | null - ) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - assert((client.operationsClient.getOperation as SinonStub) - .getCall(0)); - }); - it('invokes getOperation with error', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - const request = generateSampleMessage( - new operationsProtos.google.longrunning.GetOperationRequest() - ); - const expectedError = new Error('expected'); - client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); - await assert.rejects(async () => {await client.getOperation(request)}, expectedError); - assert((client.operationsClient.getOperation as SinonStub) - .getCall(0).calledWith(request)); - }); - }); - describe('cancelOperation', () => { - it('invokes cancelOperation without error', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new operationsProtos.google.longrunning.CancelOperationRequest() - ); - const expectedResponse = generateSampleMessage( - new protos.google.protobuf.Empty() - ); - client.operationsClient.cancelOperation = stubSimpleCall(expectedResponse); - const response = await client.cancelOperation(request); - assert.deepStrictEqual(response, [expectedResponse]); - assert((client.operationsClient.cancelOperation as SinonStub) - .getCall(0).calledWith(request) - ); - }); - it('invokes cancelOperation without error using callback', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - const request = generateSampleMessage( - new operationsProtos.google.longrunning.CancelOperationRequest() - ); - const expectedResponse = generateSampleMessage( - new protos.google.protobuf.Empty() - ); - client.operationsClient.cancelOperation = sinon.stub().callsArgWith(2, null, expectedResponse); - const promise = new Promise((resolve, reject) => { - client.operationsClient.cancelOperation( - request, - undefined, - ( - err?: Error | null, - result?: protos.google.protobuf.Empty | null - ) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - assert((client.operationsClient.cancelOperation as SinonStub) - .getCall(0)); - }); - it('invokes cancelOperation with error', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - const request = generateSampleMessage( - new operationsProtos.google.longrunning.CancelOperationRequest() - ); - const expectedError = new Error('expected'); - client.operationsClient.cancelOperation = stubSimpleCall(undefined, expectedError); - await assert.rejects(async () => {await client.cancelOperation(request)}, expectedError); - assert((client.operationsClient.cancelOperation as SinonStub) - .getCall(0).calledWith(request)); - }); - }); - describe('deleteOperation', () => { - it('invokes deleteOperation without error', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new operationsProtos.google.longrunning.DeleteOperationRequest() - ); - const expectedResponse = generateSampleMessage( - new protos.google.protobuf.Empty() - ); - client.operationsClient.deleteOperation = stubSimpleCall(expectedResponse); - const response = await client.deleteOperation(request); - assert.deepStrictEqual(response, [expectedResponse]); - assert((client.operationsClient.deleteOperation as SinonStub) - .getCall(0).calledWith(request) - ); - }); - it('invokes deleteOperation without error using callback', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - const request = generateSampleMessage( - new operationsProtos.google.longrunning.DeleteOperationRequest() - ); - const expectedResponse = generateSampleMessage( - new protos.google.protobuf.Empty() - ); - client.operationsClient.deleteOperation = sinon.stub().callsArgWith(2, null, expectedResponse); - const promise = new Promise((resolve, reject) => { - client.operationsClient.deleteOperation( - request, - undefined, - ( - err?: Error | null, - result?: protos.google.protobuf.Empty | null - ) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - assert((client.operationsClient.deleteOperation as SinonStub) - .getCall(0)); - }); - it('invokes deleteOperation with error', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - const request = generateSampleMessage( - new operationsProtos.google.longrunning.DeleteOperationRequest() - ); - const expectedError = new Error('expected'); - client.operationsClient.deleteOperation = stubSimpleCall(undefined, expectedError); - await assert.rejects(async () => {await client.deleteOperation(request)}, expectedError); - assert((client.operationsClient.deleteOperation as SinonStub) - .getCall(0).calledWith(request)); - }); - }); - describe('listOperationsAsync', () => { - it('uses async iteration with listOperations without error', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - const request = generateSampleMessage( - new operationsProtos.google.longrunning.ListOperationsRequest() - ); - const expectedResponse = [ - generateSampleMessage( - new operationsProtos.google.longrunning.ListOperationsResponse() - ), - generateSampleMessage( - new operationsProtos.google.longrunning.ListOperationsResponse() - ), - generateSampleMessage( - new operationsProtos.google.longrunning.ListOperationsResponse() - ), - ]; - client.operationsClient.descriptor.listOperations.asyncIterate = stubAsyncIterationCall(expectedResponse); - const responses: operationsProtos.google.longrunning.ListOperationsResponse[] = []; - const iterable = client.operationsClient.listOperationsAsync(request); - for await (const resource of iterable) { - responses.push(resource!); - } - assert.deepStrictEqual(responses, expectedResponse); - assert.deepStrictEqual( - (client.operationsClient.descriptor.listOperations.asyncIterate as SinonStub) - .getCall(0).args[1], request); - }); - it('uses async iteration with listOperations with error', async () => { - const client = new texttospeechlongaudiosynthesizeModule.v1beta1.TextToSpeechLongAudioSynthesizeClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new operationsProtos.google.longrunning.ListOperationsRequest() - ); - const expectedError = new Error('expected'); - client.operationsClient.descriptor.listOperations.asyncIterate = stubAsyncIterationCall(undefined, expectedError); - const iterable = client.operationsClient.listOperationsAsync(request); - await assert.rejects(async () => { - const responses: operationsProtos.google.longrunning.ListOperationsResponse[] = []; - for await (const resource of iterable) { - responses.push(resource!); - } - }); - assert.deepStrictEqual( - (client.operationsClient.descriptor.listOperations.asyncIterate as SinonStub) - .getCall(0).args[1], request); - }); - }); -}); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/test/gapic_text_to_speech_v1beta1.ts b/owl-bot-staging/google-cloud-texttospeech/v1beta1/test/gapic_text_to_speech_v1beta1.ts deleted file mode 100644 index 009d1c69428..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/test/gapic_text_to_speech_v1beta1.ts +++ /dev/null @@ -1,349 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as protos from '../protos/protos'; -import * as assert from 'assert'; -import * as sinon from 'sinon'; -import {SinonStub} from 'sinon'; -import {describe, it} from 'mocha'; -import * as texttospeechModule from '../src'; - -import {protobuf} from 'google-gax'; - -// Dynamically loaded proto JSON is needed to get the type information -// to fill in default values for request objects -const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); - -// eslint-disable-next-line @typescript-eslint/no-unused-vars -function getTypeDefaultValue(typeName: string, fields: string[]) { - let type = root.lookupType(typeName) as protobuf.Type; - for (const field of fields.slice(0, -1)) { - type = type.fields[field]?.resolvedType as protobuf.Type; - } - return type.fields[fields[fields.length - 1]]?.defaultValue; -} - -function generateSampleMessage(instance: T) { - const filledObject = (instance.constructor as typeof protobuf.Message) - .toObject(instance as protobuf.Message, {defaults: true}); - return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; -} - -function stubSimpleCall(response?: ResponseType, error?: Error) { - return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); -} - -function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { - return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); -} - -describe('v1beta1.TextToSpeechClient', () => { - describe('Common methods', () => { - it('has servicePath', () => { - const servicePath = texttospeechModule.v1beta1.TextToSpeechClient.servicePath; - assert(servicePath); - }); - - it('has apiEndpoint', () => { - const apiEndpoint = texttospeechModule.v1beta1.TextToSpeechClient.apiEndpoint; - assert(apiEndpoint); - }); - - it('has port', () => { - const port = texttospeechModule.v1beta1.TextToSpeechClient.port; - assert(port); - assert(typeof port === 'number'); - }); - - it('should create a client with no option', () => { - const client = new texttospeechModule.v1beta1.TextToSpeechClient(); - assert(client); - }); - - it('should create a client with gRPC fallback', () => { - const client = new texttospeechModule.v1beta1.TextToSpeechClient({ - fallback: true, - }); - assert(client); - }); - - it('has initialize method and supports deferred initialization', async () => { - const client = new texttospeechModule.v1beta1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.textToSpeechStub, undefined); - await client.initialize(); - assert(client.textToSpeechStub); - }); - - it('has close method for the initialized client', done => { - const client = new texttospeechModule.v1beta1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - assert(client.textToSpeechStub); - client.close().then(() => { - done(); - }); - }); - - it('has close method for the non-initialized client', done => { - const client = new texttospeechModule.v1beta1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.textToSpeechStub, undefined); - client.close().then(() => { - done(); - }); - }); - - it('has getProjectId method', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new texttospeechModule.v1beta1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); - const result = await client.getProjectId(); - assert.strictEqual(result, fakeProjectId); - assert((client.auth.getProjectId as SinonStub).calledWithExactly()); - }); - - it('has getProjectId method with callback', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new texttospeechModule.v1beta1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); - const promise = new Promise((resolve, reject) => { - client.getProjectId((err?: Error|null, projectId?: string|null) => { - if (err) { - reject(err); - } else { - resolve(projectId); - } - }); - }); - const result = await promise; - assert.strictEqual(result, fakeProjectId); - }); - }); - - describe('listVoices', () => { - it('invokes listVoices without error', async () => { - const client = new texttospeechModule.v1beta1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.texttospeech.v1beta1.ListVoicesRequest() - ); - const expectedResponse = generateSampleMessage( - new protos.google.cloud.texttospeech.v1beta1.ListVoicesResponse() - ); - client.innerApiCalls.listVoices = stubSimpleCall(expectedResponse); - const [response] = await client.listVoices(request); - assert.deepStrictEqual(response, expectedResponse); - }); - - it('invokes listVoices without error using callback', async () => { - const client = new texttospeechModule.v1beta1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.texttospeech.v1beta1.ListVoicesRequest() - ); - const expectedResponse = generateSampleMessage( - new protos.google.cloud.texttospeech.v1beta1.ListVoicesResponse() - ); - client.innerApiCalls.listVoices = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.listVoices( - request, - (err?: Error|null, result?: protos.google.cloud.texttospeech.v1beta1.IListVoicesResponse|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - }); - - it('invokes listVoices with error', async () => { - const client = new texttospeechModule.v1beta1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.texttospeech.v1beta1.ListVoicesRequest() - ); - const expectedError = new Error('expected'); - client.innerApiCalls.listVoices = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.listVoices(request), expectedError); - }); - - it('invokes listVoices with closed client', async () => { - const client = new texttospeechModule.v1beta1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.texttospeech.v1beta1.ListVoicesRequest() - ); - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.listVoices(request), expectedError); - }); - }); - - describe('synthesizeSpeech', () => { - it('invokes synthesizeSpeech without error', async () => { - const client = new texttospeechModule.v1beta1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.texttospeech.v1beta1.SynthesizeSpeechRequest() - ); - const expectedResponse = generateSampleMessage( - new protos.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse() - ); - client.innerApiCalls.synthesizeSpeech = stubSimpleCall(expectedResponse); - const [response] = await client.synthesizeSpeech(request); - assert.deepStrictEqual(response, expectedResponse); - }); - - it('invokes synthesizeSpeech without error using callback', async () => { - const client = new texttospeechModule.v1beta1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.texttospeech.v1beta1.SynthesizeSpeechRequest() - ); - const expectedResponse = generateSampleMessage( - new protos.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse() - ); - client.innerApiCalls.synthesizeSpeech = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.synthesizeSpeech( - request, - (err?: Error|null, result?: protos.google.cloud.texttospeech.v1beta1.ISynthesizeSpeechResponse|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - }); - - it('invokes synthesizeSpeech with error', async () => { - const client = new texttospeechModule.v1beta1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.texttospeech.v1beta1.SynthesizeSpeechRequest() - ); - const expectedError = new Error('expected'); - client.innerApiCalls.synthesizeSpeech = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.synthesizeSpeech(request), expectedError); - }); - - it('invokes synthesizeSpeech with closed client', async () => { - const client = new texttospeechModule.v1beta1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.texttospeech.v1beta1.SynthesizeSpeechRequest() - ); - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.synthesizeSpeech(request), expectedError); - }); - }); - - describe('Path templates', () => { - - describe('model', () => { - const fakePath = "/rendered/path/model"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - model: "modelValue", - }; - const client = new texttospeechModule.v1beta1.TextToSpeechClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.modelPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.modelPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('modelPath', () => { - const result = client.modelPath("projectValue", "locationValue", "modelValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.modelPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromModelName', () => { - const result = client.matchProjectFromModelName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.modelPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromModelName', () => { - const result = client.matchLocationFromModelName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.modelPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchModelFromModelName', () => { - const result = client.matchModelFromModelName(fakePath); - assert.strictEqual(result, "modelValue"); - assert((client.pathTemplates.modelPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - }); -}); diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/tsconfig.json b/owl-bot-staging/google-cloud-texttospeech/v1beta1/tsconfig.json deleted file mode 100644 index c78f1c884ef..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/tsconfig.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "extends": "./node_modules/gts/tsconfig-google.json", - "compilerOptions": { - "rootDir": ".", - "outDir": "build", - "resolveJsonModule": true, - "lib": [ - "es2018", - "dom" - ] - }, - "include": [ - "src/*.ts", - "src/**/*.ts", - "test/*.ts", - "test/**/*.ts", - "system-test/*.ts" - ] -} diff --git a/owl-bot-staging/google-cloud-texttospeech/v1beta1/webpack.config.js b/owl-bot-staging/google-cloud-texttospeech/v1beta1/webpack.config.js deleted file mode 100644 index 25f059a0979..00000000000 --- a/owl-bot-staging/google-cloud-texttospeech/v1beta1/webpack.config.js +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -const path = require('path'); - -module.exports = { - entry: './src/index.ts', - output: { - library: 'TextToSpeech', - filename: './text-to-speech.js', - }, - node: { - child_process: 'empty', - fs: 'empty', - crypto: 'empty', - }, - resolve: { - alias: { - '../../../package.json': path.resolve(__dirname, 'package.json'), - }, - extensions: ['.js', '.json', '.ts'], - }, - module: { - rules: [ - { - test: /\.tsx?$/, - use: 'ts-loader', - exclude: /node_modules/ - }, - { - test: /node_modules[\\/]@grpc[\\/]grpc-js/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]grpc/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]retry-request/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]https?-proxy-agent/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]gtoken/, - use: 'null-loader' - }, - ], - }, - mode: 'production', -}; diff --git a/packages/google-cloud-texttospeech/README.md b/packages/google-cloud-texttospeech/README.md index 4f9edbfe0a3..9d07755752b 100644 --- a/packages/google-cloud-texttospeech/README.md +++ b/packages/google-cloud-texttospeech/README.md @@ -99,6 +99,7 @@ Samples are in the [`samples/`](https://github.com/googleapis/google-cloud-node/ | --------------------------- | --------------------------------- | ------ | | Text_to_speech.list_voices | [source code](https://github.com/googleapis/google-cloud-node/blob/main/packages/google-cloud-texttospeech/samples/generated/v1/text_to_speech.list_voices.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-node&page=editor&open_in_editor=packages/google-cloud-texttospeech/samples/generated/v1/text_to_speech.list_voices.js,samples/README.md) | | Text_to_speech.synthesize_speech | [source code](https://github.com/googleapis/google-cloud-node/blob/main/packages/google-cloud-texttospeech/samples/generated/v1/text_to_speech.synthesize_speech.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-node&page=editor&open_in_editor=packages/google-cloud-texttospeech/samples/generated/v1/text_to_speech.synthesize_speech.js,samples/README.md) | +| Text_to_speech_long_audio_synthesize.synthesize_long_audio | [source code](https://github.com/googleapis/google-cloud-node/blob/main/packages/google-cloud-texttospeech/samples/generated/v1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-node&page=editor&open_in_editor=packages/google-cloud-texttospeech/samples/generated/v1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js,samples/README.md) | | Text_to_speech.list_voices | [source code](https://github.com/googleapis/google-cloud-node/blob/main/packages/google-cloud-texttospeech/samples/generated/v1beta1/text_to_speech.list_voices.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-node&page=editor&open_in_editor=packages/google-cloud-texttospeech/samples/generated/v1beta1/text_to_speech.list_voices.js,samples/README.md) | | Text_to_speech.synthesize_speech | [source code](https://github.com/googleapis/google-cloud-node/blob/main/packages/google-cloud-texttospeech/samples/generated/v1beta1/text_to_speech.synthesize_speech.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-node&page=editor&open_in_editor=packages/google-cloud-texttospeech/samples/generated/v1beta1/text_to_speech.synthesize_speech.js,samples/README.md) | | Text_to_speech_long_audio_synthesize.synthesize_long_audio | [source code](https://github.com/googleapis/google-cloud-node/blob/main/packages/google-cloud-texttospeech/samples/generated/v1beta1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-node&page=editor&open_in_editor=packages/google-cloud-texttospeech/samples/generated/v1beta1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js,samples/README.md) | diff --git a/packages/google-cloud-texttospeech/protos/google/cloud/texttospeech/v1/cloud_tts.proto b/packages/google-cloud-texttospeech/protos/google/cloud/texttospeech/v1/cloud_tts.proto index 4c4ae7bd7db..b50d3698fb7 100644 --- a/packages/google-cloud-texttospeech/protos/google/cloud/texttospeech/v1/cloud_tts.proto +++ b/packages/google-cloud-texttospeech/protos/google/cloud/texttospeech/v1/cloud_tts.proto @@ -37,7 +37,8 @@ option (google.api.resource_definition) = { // Service that implements Google Cloud Text-to-Speech API. service TextToSpeech { option (google.api.default_host) = "texttospeech.googleapis.com"; - option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; // Returns a list of Voice supported for synthesis. rpc ListVoices(ListVoicesRequest) returns (ListVoicesResponse) { @@ -49,7 +50,8 @@ service TextToSpeech { // Synthesizes speech synchronously: receive results after all text input // has been processed. - rpc SynthesizeSpeech(SynthesizeSpeechRequest) returns (SynthesizeSpeechResponse) { + rpc SynthesizeSpeech(SynthesizeSpeechRequest) + returns (SynthesizeSpeechResponse) { option (google.api.http) = { post: "/v1/text:synthesize" body: "*" @@ -81,7 +83,8 @@ enum SsmlVoiceGender { // Configuration to set up audio encoder. The encoding determines the output // audio format that we'd like. enum AudioEncoding { - // Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][]. + // Not specified. Will return result + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. AUDIO_ENCODING_UNSPECIFIED = 0; // Uncompressed 16-bit signed little-endian samples (Linear PCM). @@ -156,8 +159,8 @@ message SynthesizeSpeechRequest { // Contains text input to be synthesized. Either `text` or `ssml` must be // supplied. Supplying both or neither returns -// [google.rpc.Code.INVALID_ARGUMENT][]. The input size is limited to 5000 -// characters. +// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. The +// input size is limited to 5000 bytes. message SynthesisInput { // The input source, which is either plain text or SSML. oneof input_source { @@ -166,7 +169,8 @@ message SynthesisInput { // The SSML document to be synthesized. The SSML document must be valid // and well-formed. Otherwise the RPC will fail and return - // [google.rpc.Code.INVALID_ARGUMENT][]. For more information, see + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. For + // more information, see // [SSML](https://cloud.google.com/text-to-speech/docs/ssml). string ssml = 2; } @@ -174,9 +178,9 @@ message SynthesisInput { // Description of which voice to use for a synthesis request. message VoiceSelectionParams { - // Required. The language (and potentially also the region) of the voice expressed as a - // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g. - // "en-US". This should not include a script tag (e.g. use + // Required. The language (and potentially also the region) of the voice + // expressed as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) + // language tag, e.g. "en-US". This should not include a script tag (e.g. use // "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred // from the input provided in the SynthesisInput. The TTS service // will use this parameter to help choose an appropriate voice. Note that @@ -245,7 +249,7 @@ message AudioConfig { // converting to the desired sample rate (which might result in worse audio // quality), unless the specified sample rate is not supported for the // encoding chosen, in which case it will fail the request and return - // [google.rpc.Code.INVALID_ARGUMENT][]. + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. int32 sample_rate_hertz = 5 [(google.api.field_behavior) = OPTIONAL]; // Optional. Input only. An identifier which selects 'audio effects' profiles @@ -282,9 +286,7 @@ message CustomVoiceParams { // Required. The name of the AutoML model that synthesizes the custom voice. string model = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "automl.googleapis.com/Model" - } + (google.api.resource_reference) = { type: "automl.googleapis.com/Model" } ]; // Optional. The usage of the synthesized audio to be reported. diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/protos/google/cloud/texttospeech/v1/cloud_tts_lrs.proto b/packages/google-cloud-texttospeech/protos/google/cloud/texttospeech/v1/cloud_tts_lrs.proto similarity index 100% rename from owl-bot-staging/google-cloud-texttospeech/v1/protos/google/cloud/texttospeech/v1/cloud_tts_lrs.proto rename to packages/google-cloud-texttospeech/protos/google/cloud/texttospeech/v1/cloud_tts_lrs.proto diff --git a/packages/google-cloud-texttospeech/protos/protos.d.ts b/packages/google-cloud-texttospeech/protos/protos.d.ts index 9a2ca95c554..1a1b8295094 100644 --- a/packages/google-cloud-texttospeech/protos/protos.d.ts +++ b/packages/google-cloud-texttospeech/protos/protos.d.ts @@ -1085,6 +1085,372 @@ export namespace google { */ public static getTypeUrl(typeUrlPrefix?: string): string; } + + /** Represents a TextToSpeechLongAudioSynthesize */ + class TextToSpeechLongAudioSynthesize extends $protobuf.rpc.Service { + + /** + * Constructs a new TextToSpeechLongAudioSynthesize service. + * @param rpcImpl RPC implementation + * @param [requestDelimited=false] Whether requests are length-delimited + * @param [responseDelimited=false] Whether responses are length-delimited + */ + constructor(rpcImpl: $protobuf.RPCImpl, requestDelimited?: boolean, responseDelimited?: boolean); + + /** + * Creates new TextToSpeechLongAudioSynthesize service using the specified rpc implementation. + * @param rpcImpl RPC implementation + * @param [requestDelimited=false] Whether requests are length-delimited + * @param [responseDelimited=false] Whether responses are length-delimited + * @returns RPC service. Useful where requests and/or responses are streamed. + */ + public static create(rpcImpl: $protobuf.RPCImpl, requestDelimited?: boolean, responseDelimited?: boolean): TextToSpeechLongAudioSynthesize; + + /** + * Calls SynthesizeLongAudio. + * @param request SynthesizeLongAudioRequest message or plain object + * @param callback Node-style callback called with the error, if any, and Operation + */ + public synthesizeLongAudio(request: google.cloud.texttospeech.v1.ISynthesizeLongAudioRequest, callback: google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize.SynthesizeLongAudioCallback): void; + + /** + * Calls SynthesizeLongAudio. + * @param request SynthesizeLongAudioRequest message or plain object + * @returns Promise + */ + public synthesizeLongAudio(request: google.cloud.texttospeech.v1.ISynthesizeLongAudioRequest): Promise; + } + + namespace TextToSpeechLongAudioSynthesize { + + /** + * Callback as used by {@link google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize|synthesizeLongAudio}. + * @param error Error, if any + * @param [response] Operation + */ + type SynthesizeLongAudioCallback = (error: (Error|null), response?: google.longrunning.Operation) => void; + } + + /** Properties of a SynthesizeLongAudioRequest. */ + interface ISynthesizeLongAudioRequest { + + /** SynthesizeLongAudioRequest parent */ + parent?: (string|null); + + /** SynthesizeLongAudioRequest input */ + input?: (google.cloud.texttospeech.v1.ISynthesisInput|null); + + /** SynthesizeLongAudioRequest audioConfig */ + audioConfig?: (google.cloud.texttospeech.v1.IAudioConfig|null); + + /** SynthesizeLongAudioRequest outputGcsUri */ + outputGcsUri?: (string|null); + + /** SynthesizeLongAudioRequest voice */ + voice?: (google.cloud.texttospeech.v1.IVoiceSelectionParams|null); + } + + /** Represents a SynthesizeLongAudioRequest. */ + class SynthesizeLongAudioRequest implements ISynthesizeLongAudioRequest { + + /** + * Constructs a new SynthesizeLongAudioRequest. + * @param [properties] Properties to set + */ + constructor(properties?: google.cloud.texttospeech.v1.ISynthesizeLongAudioRequest); + + /** SynthesizeLongAudioRequest parent. */ + public parent: string; + + /** SynthesizeLongAudioRequest input. */ + public input?: (google.cloud.texttospeech.v1.ISynthesisInput|null); + + /** SynthesizeLongAudioRequest audioConfig. */ + public audioConfig?: (google.cloud.texttospeech.v1.IAudioConfig|null); + + /** SynthesizeLongAudioRequest outputGcsUri. */ + public outputGcsUri: string; + + /** SynthesizeLongAudioRequest voice. */ + public voice?: (google.cloud.texttospeech.v1.IVoiceSelectionParams|null); + + /** + * Creates a new SynthesizeLongAudioRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns SynthesizeLongAudioRequest instance + */ + public static create(properties?: google.cloud.texttospeech.v1.ISynthesizeLongAudioRequest): google.cloud.texttospeech.v1.SynthesizeLongAudioRequest; + + /** + * Encodes the specified SynthesizeLongAudioRequest message. Does not implicitly {@link google.cloud.texttospeech.v1.SynthesizeLongAudioRequest.verify|verify} messages. + * @param message SynthesizeLongAudioRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: google.cloud.texttospeech.v1.ISynthesizeLongAudioRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified SynthesizeLongAudioRequest message, length delimited. Does not implicitly {@link google.cloud.texttospeech.v1.SynthesizeLongAudioRequest.verify|verify} messages. + * @param message SynthesizeLongAudioRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: google.cloud.texttospeech.v1.ISynthesizeLongAudioRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a SynthesizeLongAudioRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns SynthesizeLongAudioRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): google.cloud.texttospeech.v1.SynthesizeLongAudioRequest; + + /** + * Decodes a SynthesizeLongAudioRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns SynthesizeLongAudioRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): google.cloud.texttospeech.v1.SynthesizeLongAudioRequest; + + /** + * Verifies a SynthesizeLongAudioRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a SynthesizeLongAudioRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns SynthesizeLongAudioRequest + */ + public static fromObject(object: { [k: string]: any }): google.cloud.texttospeech.v1.SynthesizeLongAudioRequest; + + /** + * Creates a plain object from a SynthesizeLongAudioRequest message. Also converts values to other types if specified. + * @param message SynthesizeLongAudioRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: google.cloud.texttospeech.v1.SynthesizeLongAudioRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this SynthesizeLongAudioRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for SynthesizeLongAudioRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a SynthesizeLongAudioResponse. */ + interface ISynthesizeLongAudioResponse { + } + + /** Represents a SynthesizeLongAudioResponse. */ + class SynthesizeLongAudioResponse implements ISynthesizeLongAudioResponse { + + /** + * Constructs a new SynthesizeLongAudioResponse. + * @param [properties] Properties to set + */ + constructor(properties?: google.cloud.texttospeech.v1.ISynthesizeLongAudioResponse); + + /** + * Creates a new SynthesizeLongAudioResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns SynthesizeLongAudioResponse instance + */ + public static create(properties?: google.cloud.texttospeech.v1.ISynthesizeLongAudioResponse): google.cloud.texttospeech.v1.SynthesizeLongAudioResponse; + + /** + * Encodes the specified SynthesizeLongAudioResponse message. Does not implicitly {@link google.cloud.texttospeech.v1.SynthesizeLongAudioResponse.verify|verify} messages. + * @param message SynthesizeLongAudioResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: google.cloud.texttospeech.v1.ISynthesizeLongAudioResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified SynthesizeLongAudioResponse message, length delimited. Does not implicitly {@link google.cloud.texttospeech.v1.SynthesizeLongAudioResponse.verify|verify} messages. + * @param message SynthesizeLongAudioResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: google.cloud.texttospeech.v1.ISynthesizeLongAudioResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a SynthesizeLongAudioResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns SynthesizeLongAudioResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): google.cloud.texttospeech.v1.SynthesizeLongAudioResponse; + + /** + * Decodes a SynthesizeLongAudioResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns SynthesizeLongAudioResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): google.cloud.texttospeech.v1.SynthesizeLongAudioResponse; + + /** + * Verifies a SynthesizeLongAudioResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a SynthesizeLongAudioResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns SynthesizeLongAudioResponse + */ + public static fromObject(object: { [k: string]: any }): google.cloud.texttospeech.v1.SynthesizeLongAudioResponse; + + /** + * Creates a plain object from a SynthesizeLongAudioResponse message. Also converts values to other types if specified. + * @param message SynthesizeLongAudioResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: google.cloud.texttospeech.v1.SynthesizeLongAudioResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this SynthesizeLongAudioResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for SynthesizeLongAudioResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a SynthesizeLongAudioMetadata. */ + interface ISynthesizeLongAudioMetadata { + + /** SynthesizeLongAudioMetadata startTime */ + startTime?: (google.protobuf.ITimestamp|null); + + /** SynthesizeLongAudioMetadata lastUpdateTime */ + lastUpdateTime?: (google.protobuf.ITimestamp|null); + + /** SynthesizeLongAudioMetadata progressPercentage */ + progressPercentage?: (number|null); + } + + /** Represents a SynthesizeLongAudioMetadata. */ + class SynthesizeLongAudioMetadata implements ISynthesizeLongAudioMetadata { + + /** + * Constructs a new SynthesizeLongAudioMetadata. + * @param [properties] Properties to set + */ + constructor(properties?: google.cloud.texttospeech.v1.ISynthesizeLongAudioMetadata); + + /** SynthesizeLongAudioMetadata startTime. */ + public startTime?: (google.protobuf.ITimestamp|null); + + /** SynthesizeLongAudioMetadata lastUpdateTime. */ + public lastUpdateTime?: (google.protobuf.ITimestamp|null); + + /** SynthesizeLongAudioMetadata progressPercentage. */ + public progressPercentage: number; + + /** + * Creates a new SynthesizeLongAudioMetadata instance using the specified properties. + * @param [properties] Properties to set + * @returns SynthesizeLongAudioMetadata instance + */ + public static create(properties?: google.cloud.texttospeech.v1.ISynthesizeLongAudioMetadata): google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata; + + /** + * Encodes the specified SynthesizeLongAudioMetadata message. Does not implicitly {@link google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata.verify|verify} messages. + * @param message SynthesizeLongAudioMetadata message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: google.cloud.texttospeech.v1.ISynthesizeLongAudioMetadata, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified SynthesizeLongAudioMetadata message, length delimited. Does not implicitly {@link google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata.verify|verify} messages. + * @param message SynthesizeLongAudioMetadata message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: google.cloud.texttospeech.v1.ISynthesizeLongAudioMetadata, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a SynthesizeLongAudioMetadata message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns SynthesizeLongAudioMetadata + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata; + + /** + * Decodes a SynthesizeLongAudioMetadata message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns SynthesizeLongAudioMetadata + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata; + + /** + * Verifies a SynthesizeLongAudioMetadata message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a SynthesizeLongAudioMetadata message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns SynthesizeLongAudioMetadata + */ + public static fromObject(object: { [k: string]: any }): google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata; + + /** + * Creates a plain object from a SynthesizeLongAudioMetadata message. Also converts values to other types if specified. + * @param message SynthesizeLongAudioMetadata + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this SynthesizeLongAudioMetadata to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for SynthesizeLongAudioMetadata + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } } /** Namespace v1beta1. */ diff --git a/packages/google-cloud-texttospeech/protos/protos.js b/packages/google-cloud-texttospeech/protos/protos.js index 397e513b250..74488fbbbfa 100644 --- a/packages/google-cloud-texttospeech/protos/protos.js +++ b/packages/google-cloud-texttospeech/protos/protos.js @@ -2629,6 +2629,820 @@ return SynthesizeSpeechResponse; })(); + v1.TextToSpeechLongAudioSynthesize = (function() { + + /** + * Constructs a new TextToSpeechLongAudioSynthesize service. + * @memberof google.cloud.texttospeech.v1 + * @classdesc Represents a TextToSpeechLongAudioSynthesize + * @extends $protobuf.rpc.Service + * @constructor + * @param {$protobuf.RPCImpl} rpcImpl RPC implementation + * @param {boolean} [requestDelimited=false] Whether requests are length-delimited + * @param {boolean} [responseDelimited=false] Whether responses are length-delimited + */ + function TextToSpeechLongAudioSynthesize(rpcImpl, requestDelimited, responseDelimited) { + $protobuf.rpc.Service.call(this, rpcImpl, requestDelimited, responseDelimited); + } + + (TextToSpeechLongAudioSynthesize.prototype = Object.create($protobuf.rpc.Service.prototype)).constructor = TextToSpeechLongAudioSynthesize; + + /** + * Creates new TextToSpeechLongAudioSynthesize service using the specified rpc implementation. + * @function create + * @memberof google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize + * @static + * @param {$protobuf.RPCImpl} rpcImpl RPC implementation + * @param {boolean} [requestDelimited=false] Whether requests are length-delimited + * @param {boolean} [responseDelimited=false] Whether responses are length-delimited + * @returns {TextToSpeechLongAudioSynthesize} RPC service. Useful where requests and/or responses are streamed. + */ + TextToSpeechLongAudioSynthesize.create = function create(rpcImpl, requestDelimited, responseDelimited) { + return new this(rpcImpl, requestDelimited, responseDelimited); + }; + + /** + * Callback as used by {@link google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize|synthesizeLongAudio}. + * @memberof google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize + * @typedef SynthesizeLongAudioCallback + * @type {function} + * @param {Error|null} error Error, if any + * @param {google.longrunning.Operation} [response] Operation + */ + + /** + * Calls SynthesizeLongAudio. + * @function synthesizeLongAudio + * @memberof google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize + * @instance + * @param {google.cloud.texttospeech.v1.ISynthesizeLongAudioRequest} request SynthesizeLongAudioRequest message or plain object + * @param {google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize.SynthesizeLongAudioCallback} callback Node-style callback called with the error, if any, and Operation + * @returns {undefined} + * @variation 1 + */ + Object.defineProperty(TextToSpeechLongAudioSynthesize.prototype.synthesizeLongAudio = function synthesizeLongAudio(request, callback) { + return this.rpcCall(synthesizeLongAudio, $root.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest, $root.google.longrunning.Operation, request, callback); + }, "name", { value: "SynthesizeLongAudio" }); + + /** + * Calls SynthesizeLongAudio. + * @function synthesizeLongAudio + * @memberof google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize + * @instance + * @param {google.cloud.texttospeech.v1.ISynthesizeLongAudioRequest} request SynthesizeLongAudioRequest message or plain object + * @returns {Promise} Promise + * @variation 2 + */ + + return TextToSpeechLongAudioSynthesize; + })(); + + v1.SynthesizeLongAudioRequest = (function() { + + /** + * Properties of a SynthesizeLongAudioRequest. + * @memberof google.cloud.texttospeech.v1 + * @interface ISynthesizeLongAudioRequest + * @property {string|null} [parent] SynthesizeLongAudioRequest parent + * @property {google.cloud.texttospeech.v1.ISynthesisInput|null} [input] SynthesizeLongAudioRequest input + * @property {google.cloud.texttospeech.v1.IAudioConfig|null} [audioConfig] SynthesizeLongAudioRequest audioConfig + * @property {string|null} [outputGcsUri] SynthesizeLongAudioRequest outputGcsUri + * @property {google.cloud.texttospeech.v1.IVoiceSelectionParams|null} [voice] SynthesizeLongAudioRequest voice + */ + + /** + * Constructs a new SynthesizeLongAudioRequest. + * @memberof google.cloud.texttospeech.v1 + * @classdesc Represents a SynthesizeLongAudioRequest. + * @implements ISynthesizeLongAudioRequest + * @constructor + * @param {google.cloud.texttospeech.v1.ISynthesizeLongAudioRequest=} [properties] Properties to set + */ + function SynthesizeLongAudioRequest(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * SynthesizeLongAudioRequest parent. + * @member {string} parent + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioRequest + * @instance + */ + SynthesizeLongAudioRequest.prototype.parent = ""; + + /** + * SynthesizeLongAudioRequest input. + * @member {google.cloud.texttospeech.v1.ISynthesisInput|null|undefined} input + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioRequest + * @instance + */ + SynthesizeLongAudioRequest.prototype.input = null; + + /** + * SynthesizeLongAudioRequest audioConfig. + * @member {google.cloud.texttospeech.v1.IAudioConfig|null|undefined} audioConfig + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioRequest + * @instance + */ + SynthesizeLongAudioRequest.prototype.audioConfig = null; + + /** + * SynthesizeLongAudioRequest outputGcsUri. + * @member {string} outputGcsUri + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioRequest + * @instance + */ + SynthesizeLongAudioRequest.prototype.outputGcsUri = ""; + + /** + * SynthesizeLongAudioRequest voice. + * @member {google.cloud.texttospeech.v1.IVoiceSelectionParams|null|undefined} voice + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioRequest + * @instance + */ + SynthesizeLongAudioRequest.prototype.voice = null; + + /** + * Creates a new SynthesizeLongAudioRequest instance using the specified properties. + * @function create + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioRequest + * @static + * @param {google.cloud.texttospeech.v1.ISynthesizeLongAudioRequest=} [properties] Properties to set + * @returns {google.cloud.texttospeech.v1.SynthesizeLongAudioRequest} SynthesizeLongAudioRequest instance + */ + SynthesizeLongAudioRequest.create = function create(properties) { + return new SynthesizeLongAudioRequest(properties); + }; + + /** + * Encodes the specified SynthesizeLongAudioRequest message. Does not implicitly {@link google.cloud.texttospeech.v1.SynthesizeLongAudioRequest.verify|verify} messages. + * @function encode + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioRequest + * @static + * @param {google.cloud.texttospeech.v1.ISynthesizeLongAudioRequest} message SynthesizeLongAudioRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + SynthesizeLongAudioRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.parent != null && Object.hasOwnProperty.call(message, "parent")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.parent); + if (message.input != null && Object.hasOwnProperty.call(message, "input")) + $root.google.cloud.texttospeech.v1.SynthesisInput.encode(message.input, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.audioConfig != null && Object.hasOwnProperty.call(message, "audioConfig")) + $root.google.cloud.texttospeech.v1.AudioConfig.encode(message.audioConfig, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.outputGcsUri != null && Object.hasOwnProperty.call(message, "outputGcsUri")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.outputGcsUri); + if (message.voice != null && Object.hasOwnProperty.call(message, "voice")) + $root.google.cloud.texttospeech.v1.VoiceSelectionParams.encode(message.voice, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified SynthesizeLongAudioRequest message, length delimited. Does not implicitly {@link google.cloud.texttospeech.v1.SynthesizeLongAudioRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioRequest + * @static + * @param {google.cloud.texttospeech.v1.ISynthesizeLongAudioRequest} message SynthesizeLongAudioRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + SynthesizeLongAudioRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a SynthesizeLongAudioRequest message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.texttospeech.v1.SynthesizeLongAudioRequest} SynthesizeLongAudioRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + SynthesizeLongAudioRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.parent = reader.string(); + break; + } + case 2: { + message.input = $root.google.cloud.texttospeech.v1.SynthesisInput.decode(reader, reader.uint32()); + break; + } + case 3: { + message.audioConfig = $root.google.cloud.texttospeech.v1.AudioConfig.decode(reader, reader.uint32()); + break; + } + case 4: { + message.outputGcsUri = reader.string(); + break; + } + case 5: { + message.voice = $root.google.cloud.texttospeech.v1.VoiceSelectionParams.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a SynthesizeLongAudioRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.texttospeech.v1.SynthesizeLongAudioRequest} SynthesizeLongAudioRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + SynthesizeLongAudioRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a SynthesizeLongAudioRequest message. + * @function verify + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + SynthesizeLongAudioRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.parent != null && message.hasOwnProperty("parent")) + if (!$util.isString(message.parent)) + return "parent: string expected"; + if (message.input != null && message.hasOwnProperty("input")) { + var error = $root.google.cloud.texttospeech.v1.SynthesisInput.verify(message.input); + if (error) + return "input." + error; + } + if (message.audioConfig != null && message.hasOwnProperty("audioConfig")) { + var error = $root.google.cloud.texttospeech.v1.AudioConfig.verify(message.audioConfig); + if (error) + return "audioConfig." + error; + } + if (message.outputGcsUri != null && message.hasOwnProperty("outputGcsUri")) + if (!$util.isString(message.outputGcsUri)) + return "outputGcsUri: string expected"; + if (message.voice != null && message.hasOwnProperty("voice")) { + var error = $root.google.cloud.texttospeech.v1.VoiceSelectionParams.verify(message.voice); + if (error) + return "voice." + error; + } + return null; + }; + + /** + * Creates a SynthesizeLongAudioRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioRequest + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.texttospeech.v1.SynthesizeLongAudioRequest} SynthesizeLongAudioRequest + */ + SynthesizeLongAudioRequest.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest) + return object; + var message = new $root.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest(); + if (object.parent != null) + message.parent = String(object.parent); + if (object.input != null) { + if (typeof object.input !== "object") + throw TypeError(".google.cloud.texttospeech.v1.SynthesizeLongAudioRequest.input: object expected"); + message.input = $root.google.cloud.texttospeech.v1.SynthesisInput.fromObject(object.input); + } + if (object.audioConfig != null) { + if (typeof object.audioConfig !== "object") + throw TypeError(".google.cloud.texttospeech.v1.SynthesizeLongAudioRequest.audioConfig: object expected"); + message.audioConfig = $root.google.cloud.texttospeech.v1.AudioConfig.fromObject(object.audioConfig); + } + if (object.outputGcsUri != null) + message.outputGcsUri = String(object.outputGcsUri); + if (object.voice != null) { + if (typeof object.voice !== "object") + throw TypeError(".google.cloud.texttospeech.v1.SynthesizeLongAudioRequest.voice: object expected"); + message.voice = $root.google.cloud.texttospeech.v1.VoiceSelectionParams.fromObject(object.voice); + } + return message; + }; + + /** + * Creates a plain object from a SynthesizeLongAudioRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioRequest + * @static + * @param {google.cloud.texttospeech.v1.SynthesizeLongAudioRequest} message SynthesizeLongAudioRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + SynthesizeLongAudioRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) { + object.parent = ""; + object.input = null; + object.audioConfig = null; + object.outputGcsUri = ""; + object.voice = null; + } + if (message.parent != null && message.hasOwnProperty("parent")) + object.parent = message.parent; + if (message.input != null && message.hasOwnProperty("input")) + object.input = $root.google.cloud.texttospeech.v1.SynthesisInput.toObject(message.input, options); + if (message.audioConfig != null && message.hasOwnProperty("audioConfig")) + object.audioConfig = $root.google.cloud.texttospeech.v1.AudioConfig.toObject(message.audioConfig, options); + if (message.outputGcsUri != null && message.hasOwnProperty("outputGcsUri")) + object.outputGcsUri = message.outputGcsUri; + if (message.voice != null && message.hasOwnProperty("voice")) + object.voice = $root.google.cloud.texttospeech.v1.VoiceSelectionParams.toObject(message.voice, options); + return object; + }; + + /** + * Converts this SynthesizeLongAudioRequest to JSON. + * @function toJSON + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioRequest + * @instance + * @returns {Object.} JSON object + */ + SynthesizeLongAudioRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for SynthesizeLongAudioRequest + * @function getTypeUrl + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + SynthesizeLongAudioRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/google.cloud.texttospeech.v1.SynthesizeLongAudioRequest"; + }; + + return SynthesizeLongAudioRequest; + })(); + + v1.SynthesizeLongAudioResponse = (function() { + + /** + * Properties of a SynthesizeLongAudioResponse. + * @memberof google.cloud.texttospeech.v1 + * @interface ISynthesizeLongAudioResponse + */ + + /** + * Constructs a new SynthesizeLongAudioResponse. + * @memberof google.cloud.texttospeech.v1 + * @classdesc Represents a SynthesizeLongAudioResponse. + * @implements ISynthesizeLongAudioResponse + * @constructor + * @param {google.cloud.texttospeech.v1.ISynthesizeLongAudioResponse=} [properties] Properties to set + */ + function SynthesizeLongAudioResponse(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Creates a new SynthesizeLongAudioResponse instance using the specified properties. + * @function create + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioResponse + * @static + * @param {google.cloud.texttospeech.v1.ISynthesizeLongAudioResponse=} [properties] Properties to set + * @returns {google.cloud.texttospeech.v1.SynthesizeLongAudioResponse} SynthesizeLongAudioResponse instance + */ + SynthesizeLongAudioResponse.create = function create(properties) { + return new SynthesizeLongAudioResponse(properties); + }; + + /** + * Encodes the specified SynthesizeLongAudioResponse message. Does not implicitly {@link google.cloud.texttospeech.v1.SynthesizeLongAudioResponse.verify|verify} messages. + * @function encode + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioResponse + * @static + * @param {google.cloud.texttospeech.v1.ISynthesizeLongAudioResponse} message SynthesizeLongAudioResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + SynthesizeLongAudioResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + return writer; + }; + + /** + * Encodes the specified SynthesizeLongAudioResponse message, length delimited. Does not implicitly {@link google.cloud.texttospeech.v1.SynthesizeLongAudioResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioResponse + * @static + * @param {google.cloud.texttospeech.v1.ISynthesizeLongAudioResponse} message SynthesizeLongAudioResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + SynthesizeLongAudioResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a SynthesizeLongAudioResponse message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.texttospeech.v1.SynthesizeLongAudioResponse} SynthesizeLongAudioResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + SynthesizeLongAudioResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.texttospeech.v1.SynthesizeLongAudioResponse(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a SynthesizeLongAudioResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.texttospeech.v1.SynthesizeLongAudioResponse} SynthesizeLongAudioResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + SynthesizeLongAudioResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a SynthesizeLongAudioResponse message. + * @function verify + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + SynthesizeLongAudioResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + return null; + }; + + /** + * Creates a SynthesizeLongAudioResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioResponse + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.texttospeech.v1.SynthesizeLongAudioResponse} SynthesizeLongAudioResponse + */ + SynthesizeLongAudioResponse.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.texttospeech.v1.SynthesizeLongAudioResponse) + return object; + return new $root.google.cloud.texttospeech.v1.SynthesizeLongAudioResponse(); + }; + + /** + * Creates a plain object from a SynthesizeLongAudioResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioResponse + * @static + * @param {google.cloud.texttospeech.v1.SynthesizeLongAudioResponse} message SynthesizeLongAudioResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + SynthesizeLongAudioResponse.toObject = function toObject() { + return {}; + }; + + /** + * Converts this SynthesizeLongAudioResponse to JSON. + * @function toJSON + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioResponse + * @instance + * @returns {Object.} JSON object + */ + SynthesizeLongAudioResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for SynthesizeLongAudioResponse + * @function getTypeUrl + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + SynthesizeLongAudioResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/google.cloud.texttospeech.v1.SynthesizeLongAudioResponse"; + }; + + return SynthesizeLongAudioResponse; + })(); + + v1.SynthesizeLongAudioMetadata = (function() { + + /** + * Properties of a SynthesizeLongAudioMetadata. + * @memberof google.cloud.texttospeech.v1 + * @interface ISynthesizeLongAudioMetadata + * @property {google.protobuf.ITimestamp|null} [startTime] SynthesizeLongAudioMetadata startTime + * @property {google.protobuf.ITimestamp|null} [lastUpdateTime] SynthesizeLongAudioMetadata lastUpdateTime + * @property {number|null} [progressPercentage] SynthesizeLongAudioMetadata progressPercentage + */ + + /** + * Constructs a new SynthesizeLongAudioMetadata. + * @memberof google.cloud.texttospeech.v1 + * @classdesc Represents a SynthesizeLongAudioMetadata. + * @implements ISynthesizeLongAudioMetadata + * @constructor + * @param {google.cloud.texttospeech.v1.ISynthesizeLongAudioMetadata=} [properties] Properties to set + */ + function SynthesizeLongAudioMetadata(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * SynthesizeLongAudioMetadata startTime. + * @member {google.protobuf.ITimestamp|null|undefined} startTime + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata + * @instance + */ + SynthesizeLongAudioMetadata.prototype.startTime = null; + + /** + * SynthesizeLongAudioMetadata lastUpdateTime. + * @member {google.protobuf.ITimestamp|null|undefined} lastUpdateTime + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata + * @instance + */ + SynthesizeLongAudioMetadata.prototype.lastUpdateTime = null; + + /** + * SynthesizeLongAudioMetadata progressPercentage. + * @member {number} progressPercentage + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata + * @instance + */ + SynthesizeLongAudioMetadata.prototype.progressPercentage = 0; + + /** + * Creates a new SynthesizeLongAudioMetadata instance using the specified properties. + * @function create + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata + * @static + * @param {google.cloud.texttospeech.v1.ISynthesizeLongAudioMetadata=} [properties] Properties to set + * @returns {google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata} SynthesizeLongAudioMetadata instance + */ + SynthesizeLongAudioMetadata.create = function create(properties) { + return new SynthesizeLongAudioMetadata(properties); + }; + + /** + * Encodes the specified SynthesizeLongAudioMetadata message. Does not implicitly {@link google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata.verify|verify} messages. + * @function encode + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata + * @static + * @param {google.cloud.texttospeech.v1.ISynthesizeLongAudioMetadata} message SynthesizeLongAudioMetadata message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + SynthesizeLongAudioMetadata.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.startTime != null && Object.hasOwnProperty.call(message, "startTime")) + $root.google.protobuf.Timestamp.encode(message.startTime, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.lastUpdateTime != null && Object.hasOwnProperty.call(message, "lastUpdateTime")) + $root.google.protobuf.Timestamp.encode(message.lastUpdateTime, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.progressPercentage != null && Object.hasOwnProperty.call(message, "progressPercentage")) + writer.uint32(/* id 3, wireType 1 =*/25).double(message.progressPercentage); + return writer; + }; + + /** + * Encodes the specified SynthesizeLongAudioMetadata message, length delimited. Does not implicitly {@link google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata + * @static + * @param {google.cloud.texttospeech.v1.ISynthesizeLongAudioMetadata} message SynthesizeLongAudioMetadata message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + SynthesizeLongAudioMetadata.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a SynthesizeLongAudioMetadata message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata} SynthesizeLongAudioMetadata + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + SynthesizeLongAudioMetadata.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.startTime = $root.google.protobuf.Timestamp.decode(reader, reader.uint32()); + break; + } + case 2: { + message.lastUpdateTime = $root.google.protobuf.Timestamp.decode(reader, reader.uint32()); + break; + } + case 3: { + message.progressPercentage = reader.double(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a SynthesizeLongAudioMetadata message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata} SynthesizeLongAudioMetadata + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + SynthesizeLongAudioMetadata.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a SynthesizeLongAudioMetadata message. + * @function verify + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + SynthesizeLongAudioMetadata.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.startTime != null && message.hasOwnProperty("startTime")) { + var error = $root.google.protobuf.Timestamp.verify(message.startTime); + if (error) + return "startTime." + error; + } + if (message.lastUpdateTime != null && message.hasOwnProperty("lastUpdateTime")) { + var error = $root.google.protobuf.Timestamp.verify(message.lastUpdateTime); + if (error) + return "lastUpdateTime." + error; + } + if (message.progressPercentage != null && message.hasOwnProperty("progressPercentage")) + if (typeof message.progressPercentage !== "number") + return "progressPercentage: number expected"; + return null; + }; + + /** + * Creates a SynthesizeLongAudioMetadata message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata} SynthesizeLongAudioMetadata + */ + SynthesizeLongAudioMetadata.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata) + return object; + var message = new $root.google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata(); + if (object.startTime != null) { + if (typeof object.startTime !== "object") + throw TypeError(".google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata.startTime: object expected"); + message.startTime = $root.google.protobuf.Timestamp.fromObject(object.startTime); + } + if (object.lastUpdateTime != null) { + if (typeof object.lastUpdateTime !== "object") + throw TypeError(".google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata.lastUpdateTime: object expected"); + message.lastUpdateTime = $root.google.protobuf.Timestamp.fromObject(object.lastUpdateTime); + } + if (object.progressPercentage != null) + message.progressPercentage = Number(object.progressPercentage); + return message; + }; + + /** + * Creates a plain object from a SynthesizeLongAudioMetadata message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata + * @static + * @param {google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata} message SynthesizeLongAudioMetadata + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + SynthesizeLongAudioMetadata.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) { + object.startTime = null; + object.lastUpdateTime = null; + object.progressPercentage = 0; + } + if (message.startTime != null && message.hasOwnProperty("startTime")) + object.startTime = $root.google.protobuf.Timestamp.toObject(message.startTime, options); + if (message.lastUpdateTime != null && message.hasOwnProperty("lastUpdateTime")) + object.lastUpdateTime = $root.google.protobuf.Timestamp.toObject(message.lastUpdateTime, options); + if (message.progressPercentage != null && message.hasOwnProperty("progressPercentage")) + object.progressPercentage = options.json && !isFinite(message.progressPercentage) ? String(message.progressPercentage) : message.progressPercentage; + return object; + }; + + /** + * Converts this SynthesizeLongAudioMetadata to JSON. + * @function toJSON + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata + * @instance + * @returns {Object.} JSON object + */ + SynthesizeLongAudioMetadata.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for SynthesizeLongAudioMetadata + * @function getTypeUrl + * @memberof google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + SynthesizeLongAudioMetadata.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata"; + }; + + return SynthesizeLongAudioMetadata; + })(); + return v1; })(); diff --git a/packages/google-cloud-texttospeech/protos/protos.json b/packages/google-cloud-texttospeech/protos/protos.json index 1fd330d28e1..c49b9141911 100644 --- a/packages/google-cloud-texttospeech/protos/protos.json +++ b/packages/google-cloud-texttospeech/protos/protos.json @@ -12,7 +12,7 @@ "csharp_namespace": "Google.Cloud.TextToSpeech.V1", "go_package": "google.golang.org/genproto/googleapis/cloud/texttospeech/v1;texttospeech", "java_multiple_files": true, - "java_outer_classname": "TextToSpeechProto", + "java_outer_classname": "TextToSpeechLongAudioSynthesisProto", "java_package": "com.google.cloud.texttospeech.v1", "php_namespace": "Google\\Cloud\\TextToSpeech\\V1", "ruby_package": "Google::Cloud::TextToSpeech::V1", @@ -275,6 +275,87 @@ "id": 1 } } + }, + "TextToSpeechLongAudioSynthesize": { + "options": { + "(google.api.default_host)": "texttospeech.googleapis.com", + "(google.api.oauth_scopes)": "https://www.googleapis.com/auth/cloud-platform" + }, + "methods": { + "SynthesizeLongAudio": { + "requestType": "SynthesizeLongAudioRequest", + "responseType": "google.longrunning.Operation", + "options": { + "(google.api.http).post": "/v1/{parent=projects/*/locations/*/voices/*}:SynthesizeLongAudio", + "(google.api.http).body": "*", + "(google.longrunning.operation_info).response_type": "SynthesizeLongAudioResponse", + "(google.longrunning.operation_info).metadata_type": "SynthesizeLongAudioMetadata" + }, + "parsedOptions": [ + { + "(google.api.http)": { + "post": "/v1/{parent=projects/*/locations/*/voices/*}:SynthesizeLongAudio", + "body": "*" + } + }, + { + "(google.longrunning.operation_info)": { + "response_type": "SynthesizeLongAudioResponse", + "metadata_type": "SynthesizeLongAudioMetadata" + } + } + ] + } + } + }, + "SynthesizeLongAudioRequest": { + "fields": { + "parent": { + "type": "string", + "id": 1 + }, + "input": { + "type": "SynthesisInput", + "id": 2, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "audioConfig": { + "type": "AudioConfig", + "id": 3, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "outputGcsUri": { + "type": "string", + "id": 4 + }, + "voice": { + "type": "VoiceSelectionParams", + "id": 5 + } + } + }, + "SynthesizeLongAudioResponse": { + "fields": {} + }, + "SynthesizeLongAudioMetadata": { + "fields": { + "startTime": { + "type": "google.protobuf.Timestamp", + "id": 1 + }, + "lastUpdateTime": { + "type": "google.protobuf.Timestamp", + "id": 2 + }, + "progressPercentage": { + "type": "double", + "id": 3 + } + } } } }, diff --git a/packages/google-cloud-texttospeech/samples/README.md b/packages/google-cloud-texttospeech/samples/README.md index e87d0e3e737..d858e17fd76 100644 --- a/packages/google-cloud-texttospeech/samples/README.md +++ b/packages/google-cloud-texttospeech/samples/README.md @@ -14,6 +14,7 @@ * [Samples](#samples) * [Text_to_speech.list_voices](#text_to_speech.list_voices) * [Text_to_speech.synthesize_speech](#text_to_speech.synthesize_speech) + * [Text_to_speech_long_audio_synthesize.synthesize_long_audio](#text_to_speech_long_audio_synthesize.synthesize_long_audio) * [Text_to_speech.list_voices](#text_to_speech.list_voices) * [Text_to_speech.synthesize_speech](#text_to_speech.synthesize_speech) * [Text_to_speech_long_audio_synthesize.synthesize_long_audio](#text_to_speech_long_audio_synthesize.synthesize_long_audio) @@ -69,6 +70,23 @@ __Usage:__ +### Text_to_speech_long_audio_synthesize.synthesize_long_audio + +View the [source code](https://github.com/googleapis/google-cloud-node/blob/main/packages/google-cloud-texttospeech/samples/generated/v1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js). + +[![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-node&page=editor&open_in_editor=packages/google-cloud-texttospeech/samples/generated/v1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js,samples/README.md) + +__Usage:__ + + +`node packages/google-cloud-texttospeech/samples/generated/v1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js` + + +----- + + + + ### Text_to_speech.list_voices View the [source code](https://github.com/googleapis/google-cloud-node/blob/main/packages/google-cloud-texttospeech/samples/generated/v1beta1/text_to_speech.list_voices.js). diff --git a/packages/google-cloud-texttospeech/samples/generated/v1/snippet_metadata.google.cloud.texttospeech.v1.json b/packages/google-cloud-texttospeech/samples/generated/v1/snippet_metadata.google.cloud.texttospeech.v1.json index 04ef8a5552e..62997b152ec 100644 --- a/packages/google-cloud-texttospeech/samples/generated/v1/snippet_metadata.google.cloud.texttospeech.v1.json +++ b/packages/google-cloud-texttospeech/samples/generated/v1/snippet_metadata.google.cloud.texttospeech.v1.json @@ -98,6 +98,62 @@ } } } + }, + { + "regionTag": "texttospeech_v1_generated_TextToSpeechLongAudioSynthesize_SynthesizeLongAudio_async", + "title": "TextToSpeech synthesizeLongAudio Sample", + "origin": "API_DEFINITION", + "description": " Synthesizes long form text asynchronously.", + "canonical": true, + "file": "text_to_speech_long_audio_synthesize.synthesize_long_audio.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 74, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "SynthesizeLongAudio", + "fullName": "google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize.SynthesizeLongAudio", + "async": true, + "parameters": [ + { + "name": "parent", + "type": "TYPE_STRING" + }, + { + "name": "input", + "type": ".google.cloud.texttospeech.v1.SynthesisInput" + }, + { + "name": "audio_config", + "type": ".google.cloud.texttospeech.v1.AudioConfig" + }, + { + "name": "output_gcs_uri", + "type": "TYPE_STRING" + }, + { + "name": "voice", + "type": ".google.cloud.texttospeech.v1.VoiceSelectionParams" + } + ], + "resultType": ".google.longrunning.Operation", + "client": { + "shortName": "TextToSpeechLongAudioSynthesizeClient", + "fullName": "google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesizeClient" + }, + "method": { + "shortName": "SynthesizeLongAudio", + "fullName": "google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize.SynthesizeLongAudio", + "service": { + "shortName": "TextToSpeechLongAudioSynthesize", + "fullName": "google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize" + } + } + } } ] } \ No newline at end of file diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js b/packages/google-cloud-texttospeech/samples/generated/v1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js similarity index 100% rename from owl-bot-staging/google-cloud-texttospeech/v1/samples/generated/v1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js rename to packages/google-cloud-texttospeech/samples/generated/v1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js diff --git a/packages/google-cloud-texttospeech/src/index.ts b/packages/google-cloud-texttospeech/src/index.ts index fc64484d721..1f78201d044 100644 --- a/packages/google-cloud-texttospeech/src/index.ts +++ b/packages/google-cloud-texttospeech/src/index.ts @@ -21,8 +21,17 @@ import * as v1beta1 from './v1beta1'; const TextToSpeechClient = v1.TextToSpeechClient; type TextToSpeechClient = v1.TextToSpeechClient; +const TextToSpeechLongAudioSynthesizeClient = + v1.TextToSpeechLongAudioSynthesizeClient; +type TextToSpeechLongAudioSynthesizeClient = + v1.TextToSpeechLongAudioSynthesizeClient; -export {v1, v1beta1, TextToSpeechClient}; -export default {v1, v1beta1, TextToSpeechClient}; +export {v1, v1beta1, TextToSpeechClient, TextToSpeechLongAudioSynthesizeClient}; +export default { + v1, + v1beta1, + TextToSpeechClient, + TextToSpeechLongAudioSynthesizeClient, +}; import * as protos from '../protos/protos'; export {protos}; diff --git a/packages/google-cloud-texttospeech/src/v1/gapic_metadata.json b/packages/google-cloud-texttospeech/src/v1/gapic_metadata.json index 301ecb6d495..2d09d513d5f 100644 --- a/packages/google-cloud-texttospeech/src/v1/gapic_metadata.json +++ b/packages/google-cloud-texttospeech/src/v1/gapic_metadata.json @@ -38,6 +38,30 @@ } } } + }, + "TextToSpeechLongAudioSynthesize": { + "clients": { + "grpc": { + "libraryClient": "TextToSpeechLongAudioSynthesizeClient", + "rpcs": { + "SynthesizeLongAudio": { + "methods": [ + "synthesizeLongAudio" + ] + } + } + }, + "grpc-fallback": { + "libraryClient": "TextToSpeechLongAudioSynthesizeClient", + "rpcs": { + "SynthesizeLongAudio": { + "methods": [ + "synthesizeLongAudio" + ] + } + } + } + } } } } diff --git a/packages/google-cloud-texttospeech/src/v1/index.ts b/packages/google-cloud-texttospeech/src/v1/index.ts index 65a0b58ddf9..3cf28b93bc3 100644 --- a/packages/google-cloud-texttospeech/src/v1/index.ts +++ b/packages/google-cloud-texttospeech/src/v1/index.ts @@ -17,3 +17,4 @@ // ** All changes to this file may be overwritten. ** export {TextToSpeechClient} from './text_to_speech_client'; +export {TextToSpeechLongAudioSynthesizeClient} from './text_to_speech_long_audio_synthesize_client'; diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_long_audio_synthesize_client.ts b/packages/google-cloud-texttospeech/src/v1/text_to_speech_long_audio_synthesize_client.ts similarity index 71% rename from owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_long_audio_synthesize_client.ts rename to packages/google-cloud-texttospeech/src/v1/text_to_speech_long_audio_synthesize_client.ts index 326aaafef56..1d47929462b 100644 --- a/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_long_audio_synthesize_client.ts +++ b/packages/google-cloud-texttospeech/src/v1/text_to_speech_long_audio_synthesize_client.ts @@ -18,7 +18,14 @@ /* global window */ import type * as gax from 'google-gax'; -import type {Callback, CallOptions, Descriptors, ClientOptions, GrpcClientOptions, LROperation} from 'google-gax'; +import type { + Callback, + CallOptions, + Descriptors, + ClientOptions, + GrpcClientOptions, + LROperation, +} from 'google-gax'; import * as protos from '../../protos/protos'; import jsonProtos = require('../../protos/protos.json'); @@ -95,14 +102,23 @@ export class TextToSpeechLongAudioSynthesizeClient { * const client = new TextToSpeechLongAudioSynthesizeClient({fallback: 'rest'}, gax); * ``` */ - constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { + constructor( + opts?: ClientOptions, + gaxInstance?: typeof gax | typeof gax.fallback + ) { // Ensure that options include all the required fields. - const staticMembers = this.constructor as typeof TextToSpeechLongAudioSynthesizeClient; - const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; - this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const staticMembers = this + .constructor as typeof TextToSpeechLongAudioSynthesizeClient; + const servicePath = + opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; + this._providedCustomServicePath = !!( + opts?.servicePath || opts?.apiEndpoint + ); const port = opts?.port || staticMembers.port; const clientConfig = opts?.clientConfig ?? {}; - const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + const fallback = + opts?.fallback ?? + (typeof window !== 'undefined' && typeof window?.fetch === 'function'); opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); // Request numeric enum values if REST transport is used. @@ -128,7 +144,7 @@ export class TextToSpeechLongAudioSynthesizeClient { this._opts = opts; // Save the auth object to the client, for use by other methods. - this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + this.auth = this._gaxGrpc.auth as gax.GoogleAuth; // Set useJWTAccessWithScope on the auth object. this.auth.useJWTAccessWithScope = true; @@ -142,10 +158,7 @@ export class TextToSpeechLongAudioSynthesizeClient { } // Determine the client header string. - const clientHeader = [ - `gax/${this._gaxModule.version}`, - `gapic/${version}`, - ]; + const clientHeader = [`gax/${this._gaxModule.version}`, `gapic/${version}`]; if (typeof process !== 'undefined' && 'versions' in process) { clientHeader.push(`gl-node/${process.versions.node}`); } else { @@ -153,7 +166,7 @@ export class TextToSpeechLongAudioSynthesizeClient { } if (!opts.fallback) { clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); - } else if (opts.fallback === 'rest' ) { + } else if (opts.fallback === 'rest') { clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); } if (opts.libName && opts.libVersion) { @@ -168,29 +181,37 @@ export class TextToSpeechLongAudioSynthesizeClient { // rather than holding a request open. const lroOptions: GrpcClientOptions = { auth: this.auth, - grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined + grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined, }; if (opts.fallback === 'rest') { lroOptions.protoJson = protoFilesRoot; lroOptions.httpRules = []; } - this.operationsClient = this._gaxModule.lro(lroOptions).operationsClient(opts); + this.operationsClient = this._gaxModule + .lro(lroOptions) + .operationsClient(opts); const synthesizeLongAudioResponse = protoFilesRoot.lookup( - '.google.cloud.texttospeech.v1.SynthesizeLongAudioResponse') as gax.protobuf.Type; + '.google.cloud.texttospeech.v1.SynthesizeLongAudioResponse' + ) as gax.protobuf.Type; const synthesizeLongAudioMetadata = protoFilesRoot.lookup( - '.google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata') as gax.protobuf.Type; + '.google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata' + ) as gax.protobuf.Type; this.descriptors.longrunning = { synthesizeLongAudio: new this._gaxModule.LongrunningDescriptor( this.operationsClient, synthesizeLongAudioResponse.decode.bind(synthesizeLongAudioResponse), - synthesizeLongAudioMetadata.decode.bind(synthesizeLongAudioMetadata)) + synthesizeLongAudioMetadata.decode.bind(synthesizeLongAudioMetadata) + ), }; // Put together the default options sent with requests. this._defaults = this._gaxGrpc.constructSettings( - 'google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize', gapicConfig as gax.ClientConfig, - opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + 'google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize', + gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, + {'x-goog-api-client': clientHeader.join(' ')} + ); // Set up a dictionary of "inner API calls"; the core implementation // of calling the API is handled in `google-gax`, with this code @@ -221,32 +242,36 @@ export class TextToSpeechLongAudioSynthesizeClient { // Put together the "service stub" for // google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize. this.textToSpeechLongAudioSynthesizeStub = this._gaxGrpc.createStub( - this._opts.fallback ? - (this._protos as protobuf.Root).lookupService('google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize') : - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (this._protos as any).google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize, - this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + this._opts.fallback + ? (this._protos as protobuf.Root).lookupService( + 'google.cloud.texttospeech.v1.TextToSpeechLongAudioSynthesize' + ) + : // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.texttospeech.v1 + .TextToSpeechLongAudioSynthesize, + this._opts, + this._providedCustomServicePath + ) as Promise<{[method: string]: Function}>; // Iterate over each of the methods that the service provides // and create an API call method for each. - const textToSpeechLongAudioSynthesizeStubMethods = - ['synthesizeLongAudio']; + const textToSpeechLongAudioSynthesizeStubMethods = ['synthesizeLongAudio']; for (const methodName of textToSpeechLongAudioSynthesizeStubMethods) { const callPromise = this.textToSpeechLongAudioSynthesizeStub.then( - stub => (...args: Array<{}>) => { - if (this._terminated) { - return Promise.reject('The client has already been closed.'); - } - const func = stub[methodName]; - return func.apply(stub, args); - }, - (err: Error|null|undefined) => () => { + stub => + (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error | null | undefined) => () => { throw err; - }); + } + ); - const descriptor = - this.descriptors.longrunning[methodName] || - undefined; + const descriptor = this.descriptors.longrunning[methodName] || undefined; const apiCall = this._gaxModule.createApiCall( callPromise, this._defaults[methodName], @@ -291,9 +316,7 @@ export class TextToSpeechLongAudioSynthesizeClient { * @returns {string[]} List of default scopes. */ static get scopes() { - return [ - 'https://www.googleapis.com/auth/cloud-platform' - ]; + return ['https://www.googleapis.com/auth/cloud-platform']; } getProjectId(): Promise; @@ -302,8 +325,9 @@ export class TextToSpeechLongAudioSynthesizeClient { * Return the project ID used by this class. * @returns {Promise} A promise that resolves to string containing the project ID. */ - getProjectId(callback?: Callback): - Promise|void { + getProjectId( + callback?: Callback + ): Promise | void { if (callback) { this.auth.getProjectId(callback); return; @@ -315,109 +339,156 @@ export class TextToSpeechLongAudioSynthesizeClient { // -- Service calls -- // ------------------- -/** - * Synthesizes long form text asynchronously. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.parent - * The resource states of the request in the form of - * `projects/* /locations/* /voices/*`. - * @param {google.cloud.texttospeech.v1.SynthesisInput} request.input - * Required. The Synthesizer requires either plain text or SSML as input. - * @param {google.cloud.texttospeech.v1.AudioConfig} request.audioConfig - * Required. The configuration of the synthesized audio. - * @param {string} request.outputGcsUri - * Specifies a Cloud Storage URI for the synthesis results. Must be - * specified in the format: `gs://bucket_name/object_name`, and the bucket - * must already exist. - * @param {google.cloud.texttospeech.v1.VoiceSelectionParams} request.voice - * The desired voice of the synthesized audio. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * a long running operation. Its `promise()` method returns a promise - * you can `await` for. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js - * region_tag:texttospeech_v1_generated_TextToSpeechLongAudioSynthesize_SynthesizeLongAudio_async - */ + /** + * Synthesizes long form text asynchronously. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.parent + * The resource states of the request in the form of + * `projects/* /locations/* /voices/*`. + * @param {google.cloud.texttospeech.v1.SynthesisInput} request.input + * Required. The Synthesizer requires either plain text or SSML as input. + * @param {google.cloud.texttospeech.v1.AudioConfig} request.audioConfig + * Required. The configuration of the synthesized audio. + * @param {string} request.outputGcsUri + * Specifies a Cloud Storage URI for the synthesis results. Must be + * specified in the format: `gs://bucket_name/object_name`, and the bucket + * must already exist. + * @param {google.cloud.texttospeech.v1.VoiceSelectionParams} request.voice + * The desired voice of the synthesized audio. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * a long running operation. Its `promise()` method returns a promise + * you can `await` for. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js + * region_tag:texttospeech_v1_generated_TextToSpeechLongAudioSynthesize_SynthesizeLongAudio_async + */ synthesizeLongAudio( - request?: protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioRequest, - options?: CallOptions): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>; + request?: protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioRequest, + options?: CallOptions + ): Promise< + [ + LROperation< + protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioResponse, + protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioMetadata + >, + protos.google.longrunning.IOperation | undefined, + {} | undefined + ] + >; synthesizeLongAudio( - request: protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioRequest, - options: CallOptions, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; + request: protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioRequest, + options: CallOptions, + callback: Callback< + LROperation< + protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioResponse, + protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioMetadata + >, + protos.google.longrunning.IOperation | null | undefined, + {} | null | undefined + > + ): void; synthesizeLongAudio( - request: protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioRequest, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; + request: protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioRequest, + callback: Callback< + LROperation< + protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioResponse, + protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioMetadata + >, + protos.google.longrunning.IOperation | null | undefined, + {} | null | undefined + > + ): void; synthesizeLongAudio( - request?: protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioRequest, - optionsOrCallback?: CallOptions|Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>, - callback?: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>|void { + request?: protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioRequest, + optionsOrCallback?: + | CallOptions + | Callback< + LROperation< + protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioResponse, + protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioMetadata + >, + protos.google.longrunning.IOperation | null | undefined, + {} | null | undefined + >, + callback?: Callback< + LROperation< + protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioResponse, + protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioMetadata + >, + protos.google.longrunning.IOperation | null | undefined, + {} | null | undefined + > + ): Promise< + [ + LROperation< + protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioResponse, + protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioMetadata + >, + protos.google.longrunning.IOperation | undefined, + {} | undefined + ] + > | void { request = request || {}; let options: CallOptions; if (typeof optionsOrCallback === 'function' && callback === undefined) { callback = optionsOrCallback; options = {}; - } - else { + } else { options = optionsOrCallback as CallOptions; } options = options || {}; options.otherArgs = options.otherArgs || {}; options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'parent': request.parent ?? '', - }); + options.otherArgs.headers['x-goog-request-params'] = + this._gaxModule.routingHeader.fromParams({ + parent: request.parent ?? '', + }); this.initialize(); return this.innerApiCalls.synthesizeLongAudio(request, options, callback); } -/** - * Check the status of the long running operation returned by `synthesizeLongAudio()`. - * @param {String} name - * The operation name that will be passed. - * @returns {Promise} - The promise which resolves to an object. - * The decoded operation object has result and metadata field to get information from. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js - * region_tag:texttospeech_v1_generated_TextToSpeechLongAudioSynthesize_SynthesizeLongAudio_async - */ - async checkSynthesizeLongAudioProgress(name: string): Promise>{ - const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); + /** + * Check the status of the long running operation returned by `synthesizeLongAudio()`. + * @param {String} name + * The operation name that will be passed. + * @returns {Promise} - The promise which resolves to an object. + * The decoded operation object has result and metadata field to get information from. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/text_to_speech_long_audio_synthesize.synthesize_long_audio.js + * region_tag:texttospeech_v1_generated_TextToSpeechLongAudioSynthesize_SynthesizeLongAudio_async + */ + async checkSynthesizeLongAudioProgress( + name: string + ): Promise< + LROperation< + protos.google.cloud.texttospeech.v1.SynthesizeLongAudioResponse, + protos.google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata + > + > { + const request = + new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest( + {name} + ); const [operation] = await this.operationsClient.getOperation(request); - const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.synthesizeLongAudio, this._gaxModule.createDefaultBackoffSettings()); - return decodeOperation as LROperation; + const decodeOperation = new this._gaxModule.Operation( + operation, + this.descriptors.longrunning.synthesizeLongAudio, + this._gaxModule.createDefaultBackoffSettings() + ); + return decodeOperation as LROperation< + protos.google.cloud.texttospeech.v1.SynthesizeLongAudioResponse, + protos.google.cloud.texttospeech.v1.SynthesizeLongAudioMetadata + >; } -/** + /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. @@ -533,7 +604,7 @@ export class TextToSpeechLongAudioSynthesizeClient { * await client.cancelOperation({name: ''}); * ``` */ - cancelOperation( + cancelOperation( request: protos.google.longrunning.CancelOperationRequest, options?: | gax.CallOptions @@ -594,7 +665,6 @@ export class TextToSpeechLongAudioSynthesizeClient { return this.operationsClient.deleteOperation(request, options, callback); } - /** * Terminate the gRPC channel and close the client. * diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_long_audio_synthesize_client_config.json b/packages/google-cloud-texttospeech/src/v1/text_to_speech_long_audio_synthesize_client_config.json similarity index 100% rename from owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_long_audio_synthesize_client_config.json rename to packages/google-cloud-texttospeech/src/v1/text_to_speech_long_audio_synthesize_client_config.json diff --git a/owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_long_audio_synthesize_proto_list.json b/packages/google-cloud-texttospeech/src/v1/text_to_speech_long_audio_synthesize_proto_list.json similarity index 100% rename from owl-bot-staging/google-cloud-texttospeech/v1/src/v1/text_to_speech_long_audio_synthesize_proto_list.json rename to packages/google-cloud-texttospeech/src/v1/text_to_speech_long_audio_synthesize_proto_list.json diff --git a/packages/google-cloud-texttospeech/src/v1/text_to_speech_proto_list.json b/packages/google-cloud-texttospeech/src/v1/text_to_speech_proto_list.json index a2b3f234f3f..58814dcb836 100644 --- a/packages/google-cloud-texttospeech/src/v1/text_to_speech_proto_list.json +++ b/packages/google-cloud-texttospeech/src/v1/text_to_speech_proto_list.json @@ -1,3 +1,4 @@ [ - "../../protos/google/cloud/texttospeech/v1/cloud_tts.proto" + "../../protos/google/cloud/texttospeech/v1/cloud_tts.proto", + "../../protos/google/cloud/texttospeech/v1/cloud_tts_lrs.proto" ] diff --git a/packages/google-cloud-texttospeech/system-test/fixtures/sample/src/index.js b/packages/google-cloud-texttospeech/system-test/fixtures/sample/src/index.js index b0689eba403..08d9ba33777 100644 --- a/packages/google-cloud-texttospeech/system-test/fixtures/sample/src/index.js +++ b/packages/google-cloud-texttospeech/system-test/fixtures/sample/src/index.js @@ -21,6 +21,8 @@ const texttospeech = require('@google-cloud/text-to-speech'); function main() { const textToSpeechClient = new texttospeech.TextToSpeechClient(); + const textToSpeechLongAudioSynthesizeClient = + new texttospeech.TextToSpeechLongAudioSynthesizeClient(); } main(); diff --git a/packages/google-cloud-texttospeech/system-test/fixtures/sample/src/index.ts b/packages/google-cloud-texttospeech/system-test/fixtures/sample/src/index.ts index 78c11ac7542..7e5f823b3f9 100644 --- a/packages/google-cloud-texttospeech/system-test/fixtures/sample/src/index.ts +++ b/packages/google-cloud-texttospeech/system-test/fixtures/sample/src/index.ts @@ -16,17 +16,31 @@ // ** https://github.com/googleapis/gapic-generator-typescript ** // ** All changes to this file may be overwritten. ** -import {TextToSpeechClient} from '@google-cloud/text-to-speech'; +import { + TextToSpeechClient, + TextToSpeechLongAudioSynthesizeClient, +} from '@google-cloud/text-to-speech'; // check that the client class type name can be used function doStuffWithTextToSpeechClient(client: TextToSpeechClient) { client.close(); } +function doStuffWithTextToSpeechLongAudioSynthesizeClient( + client: TextToSpeechLongAudioSynthesizeClient +) { + client.close(); +} function main() { // check that the client instance can be created const textToSpeechClient = new TextToSpeechClient(); doStuffWithTextToSpeechClient(textToSpeechClient); + // check that the client instance can be created + const textToSpeechLongAudioSynthesizeClient = + new TextToSpeechLongAudioSynthesizeClient(); + doStuffWithTextToSpeechLongAudioSynthesizeClient( + textToSpeechLongAudioSynthesizeClient + ); } main(); diff --git a/packages/google-cloud-texttospeech/test/gapic_text_to_speech_long_audio_synthesize_v1.ts b/packages/google-cloud-texttospeech/test/gapic_text_to_speech_long_audio_synthesize_v1.ts new file mode 100644 index 00000000000..3692502a32b --- /dev/null +++ b/packages/google-cloud-texttospeech/test/gapic_text_to_speech_long_audio_synthesize_v1.ts @@ -0,0 +1,785 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import {describe, it} from 'mocha'; +import * as texttospeechlongaudiosynthesizeModule from '../src'; + +import {protobuf, LROperation, operationsProtos} from 'google-gax'; + +// Dynamically loaded proto JSON is needed to get the type information +// to fill in default values for request objects +const root = protobuf.Root.fromJSON( + require('../protos/protos.json') +).resolveAll(); + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +function getTypeDefaultValue(typeName: string, fields: string[]) { + let type = root.lookupType(typeName) as protobuf.Type; + for (const field of fields.slice(0, -1)) { + type = type.fields[field]?.resolvedType as protobuf.Type; + } + return type.fields[fields[fields.length - 1]]?.defaultValue; +} + +function generateSampleMessage(instance: T) { + const filledObject = ( + instance.constructor as typeof protobuf.Message + ).toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject( + filledObject + ) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error + ? sinon.stub().rejects(error) + : sinon.stub().resolves([response]); +} + +function stubLongRunningCall( + response?: ResponseType, + callError?: Error, + lroError?: Error +) { + const innerStub = lroError + ? sinon.stub().rejects(lroError) + : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError + ? sinon.stub().rejects(callError) + : sinon.stub().resolves([mockOperation]); +} + +function stubLongRunningCallWithCallback( + response?: ResponseType, + callError?: Error, + lroError?: Error +) { + const innerStub = lroError + ? sinon.stub().rejects(lroError) + : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError + ? sinon.stub().callsArgWith(2, callError) + : sinon.stub().callsArgWith(2, null, mockOperation); +} + +function stubAsyncIterationCall( + responses?: ResponseType[], + error?: Error +) { + let counter = 0; + const asyncIterable = { + [Symbol.asyncIterator]() { + return { + async next() { + if (error) { + return Promise.reject(error); + } + if (counter >= responses!.length) { + return Promise.resolve({done: true, value: undefined}); + } + return Promise.resolve({done: false, value: responses![counter++]}); + }, + }; + }, + }; + return sinon.stub().returns(asyncIterable); +} + +describe('v1.TextToSpeechLongAudioSynthesizeClient', () => { + describe('Common methods', () => { + it('has servicePath', () => { + const servicePath = + texttospeechlongaudiosynthesizeModule.v1 + .TextToSpeechLongAudioSynthesizeClient.servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = + texttospeechlongaudiosynthesizeModule.v1 + .TextToSpeechLongAudioSynthesizeClient.apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = + texttospeechlongaudiosynthesizeModule.v1 + .TextToSpeechLongAudioSynthesizeClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = + new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = + new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient( + { + fallback: true, + } + ); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = + new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + assert.strictEqual(client.textToSpeechLongAudioSynthesizeStub, undefined); + await client.initialize(); + assert(client.textToSpeechLongAudioSynthesizeStub); + }); + + it('has close method for the initialized client', done => { + const client = + new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + assert(client.textToSpeechLongAudioSynthesizeStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = + new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + assert.strictEqual(client.textToSpeechLongAudioSynthesizeStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = + new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = + new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.auth.getProjectId = sinon + .stub() + .callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error | null, projectId?: string | null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + }); + + describe('synthesizeLongAudio', () => { + it('invokes synthesizeLongAudio without error', async () => { + const client = + new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest() + ); + const defaultValue1 = getTypeDefaultValue( + '.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest', + ['parent'] + ); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.synthesizeLongAudio = + stubLongRunningCall(expectedResponse); + const [operation] = await client.synthesizeLongAudio(request); + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = ( + client.innerApiCalls.synthesizeLongAudio as SinonStub + ).getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = ( + client.innerApiCalls.synthesizeLongAudio as SinonStub + ).getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes synthesizeLongAudio without error using callback', async () => { + const client = + new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest() + ); + const defaultValue1 = getTypeDefaultValue( + '.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest', + ['parent'] + ); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.synthesizeLongAudio = + stubLongRunningCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.synthesizeLongAudio( + request, + ( + err?: Error | null, + result?: LROperation< + protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioResponse, + protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioMetadata + > | null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + } + ); + }); + const operation = (await promise) as LROperation< + protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioResponse, + protos.google.cloud.texttospeech.v1.ISynthesizeLongAudioMetadata + >; + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = ( + client.innerApiCalls.synthesizeLongAudio as SinonStub + ).getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = ( + client.innerApiCalls.synthesizeLongAudio as SinonStub + ).getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes synthesizeLongAudio with call error', async () => { + const client = + new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest() + ); + const defaultValue1 = getTypeDefaultValue( + '.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest', + ['parent'] + ); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.synthesizeLongAudio = stubLongRunningCall( + undefined, + expectedError + ); + await assert.rejects(client.synthesizeLongAudio(request), expectedError); + const actualRequest = ( + client.innerApiCalls.synthesizeLongAudio as SinonStub + ).getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = ( + client.innerApiCalls.synthesizeLongAudio as SinonStub + ).getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes synthesizeLongAudio with LRO error', async () => { + const client = + new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest() + ); + const defaultValue1 = getTypeDefaultValue( + '.google.cloud.texttospeech.v1.SynthesizeLongAudioRequest', + ['parent'] + ); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.synthesizeLongAudio = stubLongRunningCall( + undefined, + undefined, + expectedError + ); + const [operation] = await client.synthesizeLongAudio(request); + await assert.rejects(operation.promise(), expectedError); + const actualRequest = ( + client.innerApiCalls.synthesizeLongAudio as SinonStub + ).getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = ( + client.innerApiCalls.synthesizeLongAudio as SinonStub + ).getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes checkSynthesizeLongAudioProgress without error', async () => { + const client = + new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + const expectedResponse = generateSampleMessage( + new operationsProtos.google.longrunning.Operation() + ); + expectedResponse.name = 'test'; + expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; + expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')}; + + client.operationsClient.getOperation = stubSimpleCall(expectedResponse); + const decodedOperation = await client.checkSynthesizeLongAudioProgress( + expectedResponse.name + ); + assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); + assert(decodedOperation.metadata); + assert((client.operationsClient.getOperation as SinonStub).getCall(0)); + }); + + it('invokes checkSynthesizeLongAudioProgress with error', async () => { + const client = + new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + const expectedError = new Error('expected'); + + client.operationsClient.getOperation = stubSimpleCall( + undefined, + expectedError + ); + await assert.rejects( + client.checkSynthesizeLongAudioProgress(''), + expectedError + ); + assert((client.operationsClient.getOperation as SinonStub).getCall(0)); + }); + }); + describe('getOperation', () => { + it('invokes getOperation without error', async () => { + const client = + new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.GetOperationRequest() + ); + const expectedResponse = generateSampleMessage( + new operationsProtos.google.longrunning.Operation() + ); + client.operationsClient.getOperation = stubSimpleCall(expectedResponse); + const response = await client.getOperation(request); + assert.deepStrictEqual(response, [expectedResponse]); + assert( + (client.operationsClient.getOperation as SinonStub) + .getCall(0) + .calledWith(request) + ); + }); + it('invokes getOperation without error using callback', async () => { + const client = + new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.GetOperationRequest() + ); + const expectedResponse = generateSampleMessage( + new operationsProtos.google.longrunning.Operation() + ); + client.operationsClient.getOperation = sinon + .stub() + .callsArgWith(2, null, expectedResponse); + const promise = new Promise((resolve, reject) => { + client.operationsClient.getOperation( + request, + undefined, + ( + err?: Error | null, + result?: operationsProtos.google.longrunning.Operation | null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + } + ); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + assert((client.operationsClient.getOperation as SinonStub).getCall(0)); + }); + it('invokes getOperation with error', async () => { + const client = + new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.GetOperationRequest() + ); + const expectedError = new Error('expected'); + client.operationsClient.getOperation = stubSimpleCall( + undefined, + expectedError + ); + await assert.rejects(async () => { + await client.getOperation(request); + }, expectedError); + assert( + (client.operationsClient.getOperation as SinonStub) + .getCall(0) + .calledWith(request) + ); + }); + }); + describe('cancelOperation', () => { + it('invokes cancelOperation without error', async () => { + const client = + new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.CancelOperationRequest() + ); + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.operationsClient.cancelOperation = + stubSimpleCall(expectedResponse); + const response = await client.cancelOperation(request); + assert.deepStrictEqual(response, [expectedResponse]); + assert( + (client.operationsClient.cancelOperation as SinonStub) + .getCall(0) + .calledWith(request) + ); + }); + it('invokes cancelOperation without error using callback', async () => { + const client = + new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.CancelOperationRequest() + ); + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.operationsClient.cancelOperation = sinon + .stub() + .callsArgWith(2, null, expectedResponse); + const promise = new Promise((resolve, reject) => { + client.operationsClient.cancelOperation( + request, + undefined, + ( + err?: Error | null, + result?: protos.google.protobuf.Empty | null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + } + ); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + assert((client.operationsClient.cancelOperation as SinonStub).getCall(0)); + }); + it('invokes cancelOperation with error', async () => { + const client = + new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.CancelOperationRequest() + ); + const expectedError = new Error('expected'); + client.operationsClient.cancelOperation = stubSimpleCall( + undefined, + expectedError + ); + await assert.rejects(async () => { + await client.cancelOperation(request); + }, expectedError); + assert( + (client.operationsClient.cancelOperation as SinonStub) + .getCall(0) + .calledWith(request) + ); + }); + }); + describe('deleteOperation', () => { + it('invokes deleteOperation without error', async () => { + const client = + new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.DeleteOperationRequest() + ); + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.operationsClient.deleteOperation = + stubSimpleCall(expectedResponse); + const response = await client.deleteOperation(request); + assert.deepStrictEqual(response, [expectedResponse]); + assert( + (client.operationsClient.deleteOperation as SinonStub) + .getCall(0) + .calledWith(request) + ); + }); + it('invokes deleteOperation without error using callback', async () => { + const client = + new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.DeleteOperationRequest() + ); + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.operationsClient.deleteOperation = sinon + .stub() + .callsArgWith(2, null, expectedResponse); + const promise = new Promise((resolve, reject) => { + client.operationsClient.deleteOperation( + request, + undefined, + ( + err?: Error | null, + result?: protos.google.protobuf.Empty | null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + } + ); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + assert((client.operationsClient.deleteOperation as SinonStub).getCall(0)); + }); + it('invokes deleteOperation with error', async () => { + const client = + new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.DeleteOperationRequest() + ); + const expectedError = new Error('expected'); + client.operationsClient.deleteOperation = stubSimpleCall( + undefined, + expectedError + ); + await assert.rejects(async () => { + await client.deleteOperation(request); + }, expectedError); + assert( + (client.operationsClient.deleteOperation as SinonStub) + .getCall(0) + .calledWith(request) + ); + }); + }); + describe('listOperationsAsync', () => { + it('uses async iteration with listOperations without error', async () => { + const client = + new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.ListOperationsRequest() + ); + const expectedResponse = [ + generateSampleMessage( + new operationsProtos.google.longrunning.ListOperationsResponse() + ), + generateSampleMessage( + new operationsProtos.google.longrunning.ListOperationsResponse() + ), + generateSampleMessage( + new operationsProtos.google.longrunning.ListOperationsResponse() + ), + ]; + client.operationsClient.descriptor.listOperations.asyncIterate = + stubAsyncIterationCall(expectedResponse); + const responses: operationsProtos.google.longrunning.ListOperationsResponse[] = + []; + const iterable = client.operationsClient.listOperationsAsync(request); + for await (const resource of iterable) { + responses.push(resource!); + } + assert.deepStrictEqual(responses, expectedResponse); + assert.deepStrictEqual( + ( + client.operationsClient.descriptor.listOperations + .asyncIterate as SinonStub + ).getCall(0).args[1], + request + ); + }); + it('uses async iteration with listOperations with error', async () => { + const client = + new texttospeechlongaudiosynthesizeModule.v1.TextToSpeechLongAudioSynthesizeClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + const request = generateSampleMessage( + new operationsProtos.google.longrunning.ListOperationsRequest() + ); + const expectedError = new Error('expected'); + client.operationsClient.descriptor.listOperations.asyncIterate = + stubAsyncIterationCall(undefined, expectedError); + const iterable = client.operationsClient.listOperationsAsync(request); + await assert.rejects(async () => { + const responses: operationsProtos.google.longrunning.ListOperationsResponse[] = + []; + for await (const resource of iterable) { + responses.push(resource!); + } + }); + assert.deepStrictEqual( + ( + client.operationsClient.descriptor.listOperations + .asyncIterate as SinonStub + ).getCall(0).args[1], + request + ); + }); + }); +});