diff --git a/.github/workflows/plugins.yml b/.github/workflows/plugins.yml index 2517d3f289d..d908ec66566 100644 --- a/.github/workflows/plugins.yml +++ b/.github/workflows/plugins.yml @@ -440,6 +440,14 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: ./.github/actions/plugins/test + google-cloud-vertexai: + runs-on: ubuntu-latest + env: + PLUGINS: google-cloud-vertexai + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: ./.github/actions/plugins/test + graphql: runs-on: ubuntu-latest env: diff --git a/CODEOWNERS b/CODEOWNERS index 1d3f2fd373b..1449aaf0b95 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -57,8 +57,10 @@ /packages/dd-trace/test/llmobs/ @DataDog/ml-observability /packages/datadog-plugin-openai/ @DataDog/ml-observability /packages/datadog-plugin-langchain/ @DataDog/ml-observability +/packages/datadog-plugin-google-cloud-vertexai/ @DataDog/ml-observability /packages/datadog-instrumentations/src/openai.js @DataDog/ml-observability /packages/datadog-instrumentations/src/langchain.js @DataDog/ml-observability +/packages/datadog-instrumentations/src/google-cloud-vertexai.js @DataDog/ml-observability /packages/datadog-plugin-aws-sdk/src/services/bedrockruntime @DataDog/ml-observability /packages/datadog-plugin-aws-sdk/test/bedrockruntime.spec.js @DataDog/ml-observability diff --git a/docs/test.ts b/docs/test.ts index 7691b813810..d86ed36a53b 100644 --- a/docs/test.ts +++ b/docs/test.ts @@ -312,6 +312,7 @@ tracer.use('fetch'); tracer.use('fetch', httpClientOptions); tracer.use('generic-pool'); tracer.use('google-cloud-pubsub'); +tracer.use('google-cloud-vertexai'); tracer.use('graphql'); tracer.use('graphql', graphqlOptions); tracer.use('graphql', { variables: ['foo', 'bar'] }); diff --git a/index.d.ts b/index.d.ts index a6bd13a18e1..e5d4435423a 100644 --- a/index.d.ts +++ b/index.d.ts @@ -165,6 +165,7 @@ interface Plugins { "fetch": tracer.plugins.fetch; "generic-pool": tracer.plugins.generic_pool; "google-cloud-pubsub": tracer.plugins.google_cloud_pubsub; + "google-cloud-vertexai": tracer.plugins.google_cloud_vertexai; "graphql": tracer.plugins.graphql; "grpc": tracer.plugins.grpc; "hapi": tracer.plugins.hapi; @@ -1370,6 +1371,12 @@ declare namespace tracer { */ interface google_cloud_pubsub extends Integration {} + /** + * This plugin automatically instruments the + * [@google-cloud/vertexai](https://github.com/googleapis/nodejs-vertexai) module. + */ + interface google_cloud_vertexai extends Integration {} + /** @hidden */ interface ExecutionArgs { schema: any, diff --git a/packages/datadog-instrumentations/src/google-cloud-vertexai.js b/packages/datadog-instrumentations/src/google-cloud-vertexai.js new file mode 100644 index 00000000000..af9c9440235 --- /dev/null +++ b/packages/datadog-instrumentations/src/google-cloud-vertexai.js @@ -0,0 +1,102 @@ +'use strict' + +const { addHook } = require('./helpers/instrument') +const shimmer = require('../../datadog-shimmer') + +const vertexaiTracingChannel = require('dc-polyfill').tracingChannel('apm:vertexai:request') + +function wrapGenerate (generate) { + return function (request) { + if (!vertexaiTracingChannel.start.hasSubscribers) { + return generate.apply(this, arguments) + } + + const ctx = { + request, + instance: this, + resource: [this.constructor.name, generate.name].join('.') + } + + return vertexaiTracingChannel.tracePromise(generate, ctx, this, ...arguments) + } +} + +function wrapGenerateStream (generateStream) { + return function (request) { + if (!vertexaiTracingChannel.start.hasSubscribers) { + return generateStream.apply(this, arguments) + } + + const ctx = { + request, + instance: this, + resource: [this.constructor.name, generateStream.name].join('.'), + stream: true + } + + return vertexaiTracingChannel.start.runStores(ctx, () => { + let streamingResult + try { + streamingResult = generateStream.apply(this, arguments) + } catch (e) { + finish(ctx, null, e, true) + throw e + } + + vertexaiTracingChannel.end.publish(ctx) + + return streamingResult.then(stream => { + stream.response.then(response => { + finish(ctx, response, null) + }).catch(e => { + finish(ctx, null, e) + throw e + }) + + return stream + }).catch(e => { + finish(ctx, null, e) + throw e + }) + }) + } +} + +function finish (ctx, response, err, publishEndEvent = false) { + if (err) { + ctx.error = err + vertexaiTracingChannel.error.publish(ctx) + } + + ctx.result = { response } + + if (publishEndEvent) vertexaiTracingChannel.end.publish(ctx) + + vertexaiTracingChannel.asyncEnd.publish(ctx) +} + +addHook({ + name: '@google-cloud/vertexai', + file: 'build/src/models/generative_models.js', + versions: ['>=1'] +}, exports => { + const GenerativeModel = exports.GenerativeModel + + shimmer.wrap(GenerativeModel.prototype, 'generateContent', wrapGenerate) + shimmer.wrap(GenerativeModel.prototype, 'generateContentStream', wrapGenerateStream) + + return exports +}) + +addHook({ + name: '@google-cloud/vertexai', + file: 'build/src/models/chat_session.js', + versions: ['>=1'] +}, exports => { + const ChatSession = exports.ChatSession + + shimmer.wrap(ChatSession.prototype, 'sendMessage', wrapGenerate) + shimmer.wrap(ChatSession.prototype, 'sendMessageStream', wrapGenerateStream) + + return exports +}) diff --git a/packages/datadog-instrumentations/src/helpers/hooks.js b/packages/datadog-instrumentations/src/helpers/hooks.js index 89df64d113f..5edfa9b5160 100644 --- a/packages/datadog-instrumentations/src/helpers/hooks.js +++ b/packages/datadog-instrumentations/src/helpers/hooks.js @@ -11,6 +11,7 @@ module.exports = { '@elastic/elasticsearch': () => require('../elasticsearch'), '@elastic/transport': () => require('../elasticsearch'), '@google-cloud/pubsub': () => require('../google-cloud-pubsub'), + '@google-cloud/vertexai': () => require('../google-cloud-vertexai'), '@graphql-tools/executor': () => require('../graphql'), '@grpc/grpc-js': () => require('../grpc'), '@hapi/hapi': () => require('../hapi'), diff --git a/packages/datadog-plugin-google-cloud-vertexai/src/index.js b/packages/datadog-plugin-google-cloud-vertexai/src/index.js new file mode 100644 index 00000000000..a8b2c1e3cfc --- /dev/null +++ b/packages/datadog-plugin-google-cloud-vertexai/src/index.js @@ -0,0 +1,195 @@ +'use strict' + +const { MEASURED } = require('../../../ext/tags') +const { storage } = require('../../datadog-core') +const TracingPlugin = require('../../dd-trace/src/plugins/tracing') +const makeUtilities = require('../../dd-trace/src/plugins/util/llm') + +class GoogleCloudVertexAIPlugin extends TracingPlugin { + static get id () { return 'google-cloud-vertexai' } + static get prefix () { + return 'tracing:apm:vertexai:request' + } + + constructor () { + super(...arguments) + + Object.assign(this, makeUtilities('vertexai', this._tracerConfig)) + } + + bindStart (ctx) { + const { instance, request, resource, stream } = ctx + + const tags = this.tagRequest(request, instance, stream) + + const span = this.startSpan('vertexai.request', { + service: this.config.service, + resource, + kind: 'client', + meta: { + [MEASURED]: 1, + ...tags + } + }, false) + + const store = storage('legacy').getStore() || {} + ctx.currentStore = { ...store, span } + + return ctx.currentStore + } + + asyncEnd (ctx) { + const span = ctx.currentStore?.span + if (!span) return + + const { result } = ctx + + const response = result?.response + if (response) { + const tags = this.tagResponse(response) + span.addTags(tags) + } + + span.finish() + } + + tagRequest (request, instance, stream) { + const model = extractModel(instance) + const tags = { + 'vertexai.request.model': model + } + + const history = instance.historyInternal + let contents = typeof request === 'string' || Array.isArray(request) ? request : request.contents + if (history) { + contents = [...history, ...(Array.isArray(contents) ? contents : [contents])] + } + + const generationConfig = instance.generationConfig || {} + for (const key of Object.keys(generationConfig)) { + const transformedKey = key.replace(/([a-z0-9])([A-Z])/g, '$1_$2').toLowerCase() + tags[`vertexai.request.generation_config.${transformedKey}`] = JSON.stringify(generationConfig[key]) + } + + if (stream) { + tags['vertexai.request.stream'] = true + } + + if (!this.isPromptCompletionSampled()) return tags + + const systemInstructions = extractSystemInstructions(instance) + + for (const [idx, systemInstruction] of systemInstructions.entries()) { + tags[`vertexai.request.system_instruction.${idx}.text`] = systemInstruction + } + + if (typeof contents === 'string') { + tags['vertexai.request.contents.0.text'] = contents + return tags + } + + for (const [contentIdx, content] of contents.entries()) { + this.tagRequestContent(tags, content, contentIdx) + } + + return tags + } + + tagRequestPart (part, tags, partIdx, contentIdx) { + tags[`vertexai.request.contents.${contentIdx}.parts.${partIdx}.text`] = this.normalize(part.text) + + const functionCall = part.functionCall + const functionResponse = part.functionResponse + + if (functionCall) { + tags[`vertexai.request.contents.${contentIdx}.parts.${partIdx}.function_call.name`] = functionCall.name + tags[`vertexai.request.contents.${contentIdx}.parts.${partIdx}.function_call.args`] = + this.normalize(JSON.stringify(functionCall.args)) + } + if (functionResponse) { + tags[`vertexai.request.contents.${contentIdx}.parts.${partIdx}.function_response.name`] = + functionResponse.name + tags[`vertexai.request.contents.${contentIdx}.parts.${partIdx}.function_response.response`] = + this.normalize(JSON.stringify(functionResponse.response)) + } + } + + tagRequestContent (tags, content, contentIdx) { + if (typeof content === 'string') { + tags[`vertexai.request.contents.${contentIdx}.text`] = this.normalize(content) + return + } + + if (content.text || content.functionCall || content.functionResponse) { + this.tagRequestPart(content, tags, 0, contentIdx) + return + } + + const { role, parts } = content + if (role) { + tags[`vertexai.request.contents.${contentIdx}.role`] = role + } + + for (const [partIdx, part] of parts.entries()) { + this.tagRequestPart(part, tags, partIdx, contentIdx) + } + } + + tagResponse (response) { + const tags = {} + + const candidates = response.candidates + for (const [candidateIdx, candidate] of candidates.entries()) { + const finishReason = candidate.finishReason + if (finishReason) { + tags[`vertexai.response.candidates.${candidateIdx}.finish_reason`] = finishReason + } + const candidateContent = candidate.content + const role = candidateContent.role + tags[`vertexai.response.candidates.${candidateIdx}.content.role`] = role + + if (!this.isPromptCompletionSampled()) continue + + const parts = candidateContent.parts + for (const [partIdx, part] of parts.entries()) { + const text = part.text + tags[`vertexai.response.candidates.${candidateIdx}.content.parts.${partIdx}.text`] = + this.normalize(String(text)) + + const functionCall = part.functionCall + if (!functionCall) continue + + tags[`vertexai.response.candidates.${candidateIdx}.content.parts.${partIdx}.function_call.name`] = + functionCall.name + tags[`vertexai.response.candidates.${candidateIdx}.content.parts.${partIdx}.function_call.args`] = + this.normalize(JSON.stringify(functionCall.args)) + } + } + + const tokenCounts = response.usageMetadata + if (tokenCounts) { + tags['vertexai.response.usage.prompt_tokens'] = tokenCounts.promptTokenCount + tags['vertexai.response.usage.completion_tokens'] = tokenCounts.candidatesTokenCount + tags['vertexai.response.usage.total_tokens'] = tokenCounts.totalTokenCount + } + + return tags + } +} + +function extractModel (instance) { + const model = instance.model || instance.resourcePath || instance.publisherModelEndpoint + return model?.split('/').pop() +} + +function extractSystemInstructions (instance) { + // systemInstruction is either a string or a Content object + // Content objects have parts (Part[]) and a role + const systemInstruction = instance.systemInstruction + if (!systemInstruction) return [] + if (typeof systemInstruction === 'string') return [systemInstruction] + + return systemInstruction.parts?.map(part => part.text) +} + +module.exports = GoogleCloudVertexAIPlugin diff --git a/packages/datadog-plugin-google-cloud-vertexai/test/index.spec.js b/packages/datadog-plugin-google-cloud-vertexai/test/index.spec.js new file mode 100644 index 00000000000..1a932d90339 --- /dev/null +++ b/packages/datadog-plugin-google-cloud-vertexai/test/index.spec.js @@ -0,0 +1,395 @@ +'use strict' + +const agent = require('../../dd-trace/test/plugins/agent') +const sinon = require('sinon') +const fs = require('node:fs') +const path = require('node:path') + +/** + * @google-cloud/vertexai uses `fetch` to call against their API, which cannot + * be stubbed with `nock`. This function allows us to stub the `fetch` function + * to return a specific response for a given scenario. + * + * @param {string} scenario the scenario to load + * @param {number} statusCode the status code to return. defaults to 200 + */ +function useScenario ({ scenario, statusCode = 200, stream = false }) { + let originalFetch + + beforeEach(() => { + originalFetch = global.fetch + global.fetch = function () { + let body + + if (statusCode !== 200) { + body = '{}' + } if (stream) { + body = fs.createReadStream(path.join(__dirname, 'resources', `${scenario}.txt`)) + } else { + const contents = require(`./resources/${scenario}.json`) + body = JSON.stringify(contents) + } + + return new Response(body, { + status: statusCode, + headers: { + 'Content-Type': 'application/json' + } + }) + } + }) + + afterEach(() => { + global.fetch = originalFetch + }) +} + +function promiseState (p) { + const t = {} + return Promise.race([p, t]) + .then(v => (v === t) ? 'pending' : 'fulfilled', () => 'rejected') +} + +describe('Plugin', () => { + describe('google-cloud-vertexai', () => { + let model + let authStub + + withVersions('google-cloud-vertexai', '@google-cloud/vertexai', '>=1', version => { + before(() => { + return agent.load('google-cloud-vertexai') + }) + + before(() => { + const { VertexAI } = require(`../../../versions/@google-cloud/vertexai@${version}`).get() + + // stub credentials checking + const { GoogleAuth } = require(`../../../versions/@google-cloud/vertexai@${version}`) + .get('google-auth-library/build/src/auth/googleauth') + authStub = sinon.stub(GoogleAuth.prototype, 'getAccessToken').resolves({}) + + const client = new VertexAI({ + project: 'datadog-sandbox', + location: 'us-central1' + }) + + model = client.getGenerativeModel({ + model: 'gemini-1.5-flash-002', + systemInstruction: 'Please provide an answer', + generationConfig: { + maxOutputTokens: 50, + temperature: 1.0 + } + }) + }) + + after(() => { + authStub.restore() + return agent.close({ ritmReset: false }) + }) + + describe('generateContent', () => { + useScenario({ scenario: 'generate-content-single-response' }) + + it('makes a successful call', async () => { + const checkTraces = agent.use(traces => { + const span = traces[0][0] + + expect(span).to.have.property('name', 'vertexai.request') + expect(span).to.have.property('resource', 'GenerativeModel.generateContent') + expect(span.meta).to.have.property('span.kind', 'client') + + expect(span.meta).to.have.property('vertexai.request.model', 'gemini-1.5-flash-002') + expect(span.meta).to.have.property('vertexai.request.contents.0.role', 'user') + expect(span.meta).to.have.property('vertexai.request.contents.0.parts.0.text', 'Hello, how are you?') + expect(span.meta).to.have.property('vertexai.response.candidates.0.finish_reason', 'STOP') + expect(span.meta).to.have.property('vertexai.response.candidates.0.content.parts.0.text', + 'Hello! How can I assist you today?') + expect(span.meta).to.have.property('vertexai.response.candidates.0.content.role', 'model') + + expect(span.metrics).to.have.property('vertexai.response.usage.prompt_tokens', 35) + expect(span.metrics).to.have.property('vertexai.response.usage.completion_tokens', 2) + expect(span.metrics).to.have.property('vertexai.response.usage.total_tokens', 37) + + if (model.systemInstruction) { + expect(span.meta).to.have.property('vertexai.request.system_instruction.0.text', + 'Please provide an answer') + } + expect(span.meta).to.have.property('vertexai.request.generation_config.max_output_tokens', '50') + expect(span.meta).to.have.property('vertexai.request.generation_config.temperature', '1') + }) + + const { response } = await model.generateContent({ + contents: [{ role: 'user', parts: [{ text: 'Hello, how are you?' }] }] + }) + + expect(response).to.have.property('candidates') + + await checkTraces + }) + + it('makes a successful call with a string argument', async () => { + const checkTraces = agent.use(traces => { + expect(traces[0][0].meta).to.have.property('vertexai.request.contents.0.text', + 'Hello, how are you?') + }) + + const { response } = await model.generateContent('Hello, how are you?') + + expect(response).to.have.property('candidates') + + await checkTraces + }) + + describe('with tools', () => { + useScenario({ scenario: 'generate-content-single-response-with-tools' }) + + it('makes a successful call', async () => { + const checkTraces = agent.use(traces => { + const span = traces[0][0] + + expect(span.meta).to.have.property('vertexai.response.candidates.0.content.parts.0.text', 'undefined') + expect(span.meta).to.have.property('vertexai.response.candidates.0.content.parts.0.function_call.name', + 'add') + expect(span.meta).to.have.property('vertexai.response.candidates.0.content.parts.0.function_call.args', + JSON.stringify({ a: 2, b: 2 })) + }) + + await model.generateContent('what is 2 + 2?') + + await checkTraces + }) + }) + }) + + describe('generateContentStream', () => { + useScenario({ scenario: 'generate-content-stream-single-response', statusCode: 200, stream: true }) + + it('makes a successful call', async () => { + const checkTraces = agent.use(traces => { + const span = traces[0][0] + + expect(span).to.have.property('name', 'vertexai.request') + expect(span).to.have.property('resource', 'GenerativeModel.generateContentStream') + expect(span.meta).to.have.property('span.kind', 'client') + + expect(span.meta).to.have.property('vertexai.request.model', 'gemini-1.5-flash-002') + expect(span.meta).to.have.property('vertexai.request.contents.0.text', 'Hello, how are you?') + expect(span.meta).to.have.property('vertexai.response.candidates.0.finish_reason', 'STOP') + expect(span.meta).to.have.property('vertexai.response.candidates.0.content.parts.0.text', + 'Hi, how are you doing today my friend?') + expect(span.meta).to.have.property('vertexai.response.candidates.0.content.role', 'model') + + expect(span.metrics).to.have.property('vertexai.response.usage.prompt_tokens', 5) + expect(span.metrics).to.have.property('vertexai.response.usage.completion_tokens', 10) + expect(span.metrics).to.have.property('vertexai.response.usage.total_tokens', 15) + + if (model.systemInstruction) { + expect(span.meta).to.have.property('vertexai.request.system_instruction.0.text', + 'Please provide an answer') + } + expect(span.meta).to.have.property('vertexai.request.generation_config.max_output_tokens', '50') + expect(span.meta).to.have.property('vertexai.request.generation_config.temperature', '1') + + expect(span.metrics).to.have.property('vertexai.request.stream', 1) + }) + + const { stream, response } = await model.generateContentStream('Hello, how are you?') + + // check that response is a promise + expect(response).to.be.a('promise') + + const promState = await promiseState(response) + expect(promState).to.equal('pending') // we shouldn't have consumed the promise + + for await (const chunk of stream) { + expect(chunk).to.have.property('candidates') + } + + const result = await response + expect(result).to.have.property('candidates') + + await checkTraces + }) + }) + + describe('chatSession', () => { + describe('sendMessage', () => { + useScenario({ scenario: 'generate-content-single-response' }) + + it('makes a successful call', async () => { + const checkTraces = agent.use(traces => { + const span = traces[0][0] + + expect(span).to.have.property('name', 'vertexai.request') + expect(span).to.have.property('resource', 'ChatSession.sendMessage') + expect(span.meta).to.have.property('span.kind', 'client') + + expect(span.meta).to.have.property('vertexai.request.model', 'gemini-1.5-flash-002') + + expect(span.meta).to.have.property('vertexai.request.contents.0.role', 'user') + expect(span.meta).to.have.property('vertexai.request.contents.0.parts.0.text', 'Foobar?') + expect(span.meta).to.have.property('vertexai.request.contents.1.role', 'model') + expect(span.meta).to.have.property('vertexai.request.contents.1.parts.0.text', 'Foobar!') + expect(span.meta).to.have.property('vertexai.request.contents.2.parts.0.text', 'Hello, how are you?') + + expect(span.meta).to.have.property('vertexai.response.candidates.0.finish_reason', 'STOP') + expect(span.meta).to.have.property('vertexai.response.candidates.0.content.parts.0.text', + 'Hello! How can I assist you today?') + expect(span.meta).to.have.property('vertexai.response.candidates.0.content.role', 'model') + + expect(span.metrics).to.have.property('vertexai.response.usage.prompt_tokens', 35) + expect(span.metrics).to.have.property('vertexai.response.usage.completion_tokens', 2) + expect(span.metrics).to.have.property('vertexai.response.usage.total_tokens', 37) + + if (model.systemInstruction) { + expect(span.meta).to.have.property('vertexai.request.system_instruction.0.text', + 'Please provide an answer') + } + expect(span.meta).to.have.property('vertexai.request.generation_config.max_output_tokens', '50') + expect(span.meta).to.have.property('vertexai.request.generation_config.temperature', '1') + }) + + const chat = model.startChat({ + history: [ + { role: 'user', parts: [{ text: 'Foobar?' }] }, + { role: 'model', parts: [{ text: 'Foobar!' }] } + ] + }) + const { response } = await chat.sendMessage([{ text: 'Hello, how are you?' }]) + + expect(response).to.have.property('candidates') + + await checkTraces + }) + + it('tags a string input', async () => { + const checkTraces = agent.use(traces => { + expect(traces[0][0].meta).to.have.property('vertexai.request.contents.0.text', + 'Hello, how are you?') + }) + + const chat = model.startChat({}) + const { response } = await chat.sendMessage('Hello, how are you?') + + expect(response).to.have.property('candidates') + + await checkTraces + }) + + it('tags an array of string inputs', async () => { + const checkTraces = agent.use(traces => { + expect(traces[0][0].meta).to.have.property('vertexai.request.contents.0.text', + 'Hello, how are you?') + expect(traces[0][0].meta).to.have.property('vertexai.request.contents.1.text', + 'What should I do today?') + }) + + const chat = model.startChat({}) + const { response } = await chat.sendMessage(['Hello, how are you?', 'What should I do today?']) + + expect(response).to.have.property('candidates') + + await checkTraces + }) + }) + + describe('sendMessageStream', () => { + useScenario({ scenario: 'generate-content-stream-single-response', statusCode: 200, stream: true }) + + it('makes a successful call', async () => { + const checkTraces = agent.use(traces => { + const span = traces[0][0] + + expect(span).to.have.property('name', 'vertexai.request') + expect(span).to.have.property('resource', 'ChatSession.sendMessageStream') + expect(span.meta).to.have.property('span.kind', 'client') + + expect(span.meta).to.have.property('vertexai.request.model', 'gemini-1.5-flash-002') + expect(span.meta).to.have.property('vertexai.request.contents.0.text', 'Hello, how are you?') + expect(span.meta).to.have.property('vertexai.response.candidates.0.finish_reason', 'STOP') + expect(span.meta).to.have.property('vertexai.response.candidates.0.content.parts.0.text', + 'Hi, how are you doing today my friend?') + expect(span.meta).to.have.property('vertexai.response.candidates.0.content.role', 'model') + + expect(span.metrics).to.have.property('vertexai.response.usage.prompt_tokens', 5) + expect(span.metrics).to.have.property('vertexai.response.usage.completion_tokens', 10) + expect(span.metrics).to.have.property('vertexai.response.usage.total_tokens', 15) + + if (model.systemInstruction) { + expect(span.meta).to.have.property('vertexai.request.system_instruction.0.text', + 'Please provide an answer') + } + expect(span.meta).to.have.property('vertexai.request.generation_config.max_output_tokens', '50') + expect(span.meta).to.have.property('vertexai.request.generation_config.temperature', '1') + + expect(span.metrics).to.have.property('vertexai.request.stream', 1) + }) + + const chat = model.startChat({}) + const { stream, response } = await chat.sendMessageStream('Hello, how are you?') + + // check that response is a promise + expect(response).to.be.a('promise') + + const promState = await promiseState(response) + expect(promState).to.equal('pending') // we shouldn't have consumed the promise + + for await (const chunk of stream) { + expect(chunk).to.have.property('candidates') + } + + const result = await response + expect(result).to.have.property('candidates') + + await checkTraces + }) + }) + }) + + describe('errors', () => { + describe('non-streamed', () => { + useScenario({ statusCode: 404 }) + + it('tags the error', async () => { + const checkTraces = agent.use(traces => { + expect(traces[0][0]).to.have.property('error', 1) + }) + + let errorPropagated = false + + try { + await model.generateContent('Hello, how are you?') + } catch { errorPropagated = true } + + expect(errorPropagated).to.be.true + + await checkTraces + }) + }) + + describe('streamed', () => { + useScenario({ scenario: 'malformed-stream', stream: true }) + + it('tags the error', async () => { + const checkTraces = agent.use(traces => { + expect(traces[0][0]).to.have.property('error', 1) + }) + + let errorPropagated = false + + try { + const { stream } = await model.generateContentStream('Hello, how are you?') + // eslint-disable-next-line no-unused-vars + for await (const _ of stream) { /* pass */ } + } catch { errorPropagated = true } + + expect(errorPropagated).to.be.true + + await checkTraces + }) + }) + }) + }) + }) +}) diff --git a/packages/datadog-plugin-google-cloud-vertexai/test/integration-test/client.spec.js b/packages/datadog-plugin-google-cloud-vertexai/test/integration-test/client.spec.js new file mode 100644 index 00000000000..4b4d5a83fc1 --- /dev/null +++ b/packages/datadog-plugin-google-cloud-vertexai/test/integration-test/client.spec.js @@ -0,0 +1,52 @@ +'use strict' + +const { + FakeAgent, + createSandbox, + checkSpansForServiceName, + spawnPluginIntegrationTestProc +} = require('../../../../integration-tests/helpers') +const { assert } = require('chai') + +describe('esm', () => { + let agent + let proc + let sandbox + + withVersions('google-cloud-vertexai', '@google-cloud/vertexai', '>=1', version => { + before(async function () { + this.timeout(20000) + sandbox = await createSandbox([ + `@google-cloud/vertexai@${version}`, + 'sinon' + ], false, [ + './packages/datadog-plugin-google-cloud-vertexai/test/integration-test/*' + ]) + }) + + after(async () => { + await sandbox.remove() + }) + + beforeEach(async () => { + agent = await new FakeAgent().start() + }) + + afterEach(async () => { + proc?.kill() + await agent.stop() + }) + + it('is instrumented', async () => { + const res = agent.assertMessageReceived(({ headers, payload }) => { + assert.propertyVal(headers, 'host', `127.0.0.1:${agent.port}`) + assert.isArray(payload) + assert.strictEqual(checkSpansForServiceName(payload, 'vertexai.request'), true) + }) + + proc = await spawnPluginIntegrationTestProc(sandbox.folder, 'server.mjs', agent.port) + + await res + }).timeout(20000) + }) +}) diff --git a/packages/datadog-plugin-google-cloud-vertexai/test/integration-test/server.mjs b/packages/datadog-plugin-google-cloud-vertexai/test/integration-test/server.mjs new file mode 100644 index 00000000000..38155344795 --- /dev/null +++ b/packages/datadog-plugin-google-cloud-vertexai/test/integration-test/server.mjs @@ -0,0 +1,62 @@ +import 'dd-trace/init.js' + +import { VertexAI } from '@google-cloud/vertexai' +import { GoogleAuth } from 'google-auth-library/build/src/auth/googleauth.js' + +import sinon from 'sinon' +const authStub = sinon.stub(GoogleAuth.prototype, 'getAccessToken').resolves({}) + +const originalFetch = global.fetch +global.fetch = async (url, options) => { + const responseBody = JSON.stringify({ + candidates: [{ + content: { + role: 'model', + parts: [{ text: 'Hello! How can I assist you today?' }] + }, + finishReason: 'STOP', + avgLogprobs: -0.0016951755387708545 + }], + usageMetadata: { + promptTokenCount: 35, + candidatesTokenCount: 2, + totalTokenCount: 37, + promptTokensDetails: [{ + modality: 'TEXT', tokenCount: 35 + }], + candidatesTokensDetails: [{ + modality: 'TEXT', + tokenCount: 2 + }] + }, + modelVersion: 'gemini-1.5-flash-002', + createTime: '2025-02-25T18:45:57.459163Z', + responseId: '5Q--Z5uDHLWX2PgP1eLSwAk' + }) + return new Response(responseBody, { + status: 200, + headers: { + 'Content-Type': 'application/json' + } + }) +} + +try { + const client = new VertexAI({ + project: 'datadog-sandbox', + location: 'us-central1' + }) + + const model = client.getGenerativeModel({ + model: 'gemini-1.5-flash-002', + generationConfig: { + maxOutputTokens: 100, + stopSequences: ['\n'] + } + }) + + await model.generateContent('Hi!') +} finally { + global.fetch = originalFetch + authStub.restore() +} diff --git a/packages/datadog-plugin-google-cloud-vertexai/test/resources/generate-content-single-response-with-tools.json b/packages/datadog-plugin-google-cloud-vertexai/test/resources/generate-content-single-response-with-tools.json new file mode 100644 index 00000000000..25a128c6a1b --- /dev/null +++ b/packages/datadog-plugin-google-cloud-vertexai/test/resources/generate-content-single-response-with-tools.json @@ -0,0 +1,43 @@ +{ + "candidates": [ + { + "content": { + "role": "model", + "parts": [ + { + "functionCall": { + "name": "add", + "args": { + "a": 2, + "b": 2 + } + } + } + ] + }, + "finishReason": "STOP", + "avgLogprobs": -0.00008383560149619977, + "index": 0 + } + ], + "usageMetadata": { + "promptTokenCount": 20, + "candidatesTokenCount": 3, + "totalTokenCount": 23, + "promptTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 20 + } + ], + "candidatesTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 3 + } + ] + }, + "modelVersion": "gemini-1.5-flash-002", + "createTime": "2025-03-06T15:03:24.661997Z", + "responseId": "PLnJZ-2zKKLB698PsoTcuAw" +} diff --git a/packages/datadog-plugin-google-cloud-vertexai/test/resources/generate-content-single-response.json b/packages/datadog-plugin-google-cloud-vertexai/test/resources/generate-content-single-response.json new file mode 100644 index 00000000000..f824cb4f438 --- /dev/null +++ b/packages/datadog-plugin-google-cloud-vertexai/test/resources/generate-content-single-response.json @@ -0,0 +1,25 @@ +{ + "candidates": [{ + "content": { + "role": "model", + "parts": [{ "text": "Hello! How can I assist you today?" }] + }, + "finishReason": "STOP", + "avgLogprobs": -0.0016951755387708545 + }], + "usageMetadata": { + "promptTokenCount": 35, + "candidatesTokenCount": 2, + "totalTokenCount": 37, + "promptTokensDetails": [{ + "modality": "TEXT", "tokenCount": 35 + }], + "candidatesTokensDetails": [{ + "modality": "TEXT", + "tokenCount": 2 + }] + }, + "modelVersion": "gemini-1.5-flash-002", + "createTime": "2025-02-25T18:45:57.459163Z", + "responseId": "5Q--Z5uDHLWX2PgP1eLSwAk" +} diff --git a/packages/datadog-plugin-google-cloud-vertexai/test/resources/generate-content-stream-single-response.txt b/packages/datadog-plugin-google-cloud-vertexai/test/resources/generate-content-stream-single-response.txt new file mode 100644 index 00000000000..f33990ed957 --- /dev/null +++ b/packages/datadog-plugin-google-cloud-vertexai/test/resources/generate-content-stream-single-response.txt @@ -0,0 +1,10 @@ +data: {"candidates": [{"content": {"role": "model","parts": [{"text": "Hi, how"}]}}],"usageMetadata": {},"modelVersion": "gemini-1.5-flash-002","createTime": "2025-03-05T16:59:45.373757Z","responseId": "AYPIZ_3nFtShmecPqaXa6As"} + +data: {"candidates": [{"content": {"role": "model","parts": [{"text": " are you"}]}}],"modelVersion": "gemini-1.5-flash-002","createTime": "2025-03-05T16:59:45.373757Z","responseId": "AYPIZ_3nFtShmecPqaXa6As"} + +data: {"candidates": [{"content": {"role": "model","parts": [{"text": " doing "}]}}],"modelVersion": "gemini-1.5-flash-002","createTime": "2025-03-05T16:59:45.373757Z","responseId": "AYPIZ_3nFtShmecPqaXa6As"} + +data: {"candidates": [{"content": {"role": "model","parts": [{"text": "today"}]}}],"modelVersion": "gemini-1.5-flash-002","createTime": "2025-03-05T16:59:45.373757Z","responseId": "AYPIZ_3nFtShmecPqaXa6As"} + +data: {"candidates": [{"content": {"role": "model","parts": [{"text": " my friend?"}]},"finishReason": "STOP"}],"usageMetadata": {"promptTokenCount": 5,"candidatesTokenCount": 10,"totalTokenCount": 15,"promptTokensDetails": [{"modality": "TEXT","tokenCount": 5}],"candidatesTokensDetails": [{"modality": "TEXT","tokenCount": 10}]},"modelVersion": "gemini-1.5-flash-002","createTime": "2025-03-05T16:59:45.373757Z","responseId": "AYPIZ_3nFtShmecPqaXa6As"} + diff --git a/packages/datadog-plugin-google-cloud-vertexai/test/resources/malformed-stream.txt b/packages/datadog-plugin-google-cloud-vertexai/test/resources/malformed-stream.txt new file mode 100644 index 00000000000..c6cac69265a --- /dev/null +++ b/packages/datadog-plugin-google-cloud-vertexai/test/resources/malformed-stream.txt @@ -0,0 +1 @@ +empty diff --git a/packages/dd-trace/src/config.js b/packages/dd-trace/src/config.js index 626d002791a..4f85b485a01 100644 --- a/packages/dd-trace/src/config.js +++ b/packages/dd-trace/src/config.js @@ -591,8 +591,10 @@ class Config { this._setValue(defaults, 'url', undefined) this._setValue(defaults, 'version', pkg.version) this._setValue(defaults, 'instrumentation_config_id', undefined) + this._setValue(defaults, 'aws.dynamoDb.tablePrimaryKeys', undefined) + this._setValue(defaults, 'vertexai.spanCharLimit', 128) + this._setValue(defaults, 'vertexai.spanPromptCompletionSampleRate', 1.0) this._setValue(defaults, 'trace.aws.addSpanPointers', true) - this._setValue(defaults, 'trace.dynamoDb.tablePrimaryKeys', undefined) } _applyLocalStableConfig () { @@ -760,6 +762,8 @@ class Config { DD_TRACE_X_DATADOG_TAGS_MAX_LENGTH, DD_TRACING_ENABLED, DD_VERSION, + DD_VERTEXAI_SPAN_PROMPT_COMPLETION_SAMPLE_RATE, + DD_VERTEXAI_SPAN_CHAR_LIMIT, DD_TRACE_INFERRED_PROXY_SERVICES_ENABLED, OTEL_METRICS_EXPORTER, OTEL_PROPAGATORS, @@ -973,6 +977,12 @@ class Config { this._setBoolean(env, 'trace.aws.addSpanPointers', DD_TRACE_AWS_ADD_SPAN_POINTERS) this._setString(env, 'trace.dynamoDb.tablePrimaryKeys', DD_TRACE_DYNAMODB_TABLE_PRIMARY_KEYS) this._setArray(env, 'graphqlErrorExtensions', DD_TRACE_GRAPHQL_ERROR_EXTENSIONS) + this._setValue( + env, + 'vertexai.spanPromptCompletionSampleRate', + maybeFloat(DD_VERTEXAI_SPAN_PROMPT_COMPLETION_SAMPLE_RATE) + ) + this._setValue(env, 'vertexai.spanCharLimit', maybeInt(DD_VERTEXAI_SPAN_CHAR_LIMIT)) } _applyOptions (options) { diff --git a/packages/dd-trace/src/plugins/index.js b/packages/dd-trace/src/plugins/index.js index 67c68e4595f..1ccc1f91138 100644 --- a/packages/dd-trace/src/plugins/index.js +++ b/packages/dd-trace/src/plugins/index.js @@ -9,6 +9,7 @@ module.exports = { get '@elastic/elasticsearch' () { return require('../../../datadog-plugin-elasticsearch/src') }, get '@elastic/transport' () { return require('../../../datadog-plugin-elasticsearch/src') }, get '@google-cloud/pubsub' () { return require('../../../datadog-plugin-google-cloud-pubsub/src') }, + get '@google-cloud/vertexai' () { return require('../../../datadog-plugin-google-cloud-vertexai/src') }, get '@grpc/grpc-js' () { return require('../../../datadog-plugin-grpc/src') }, get '@hapi/hapi' () { return require('../../../datadog-plugin-hapi/src') }, get '@jest/core' () { return require('../../../datadog-plugin-jest/src') }, diff --git a/packages/dd-trace/test/config.spec.js b/packages/dd-trace/test/config.spec.js index f737f49984e..0cc413be238 100644 --- a/packages/dd-trace/test/config.spec.js +++ b/packages/dd-trace/test/config.spec.js @@ -396,7 +396,9 @@ describe('Config', () => { { name: 'traceId128BitLoggingEnabled', value: true, origin: 'default' }, { name: 'tracing', value: true, origin: 'default' }, { name: 'url', value: undefined, origin: 'default' }, - { name: 'version', value: '', origin: 'default' } + { name: 'version', value: '', origin: 'default' }, + { name: 'vertexai.spanCharLimit', value: 128, origin: 'default' }, + { name: 'vertexai.spanPromptCompletionSampleRate', value: 1.0, origin: 'default' } ]) }) @@ -534,6 +536,8 @@ describe('Config', () => { process.env.DD_TRACE_ENABLED = 'true' process.env.DD_GRPC_CLIENT_ERROR_STATUSES = '3,13,400-403' process.env.DD_GRPC_SERVER_ERROR_STATUSES = '3,13,400-403' + process.env.DD_VERTEXAI_SPAN_CHAR_LIMIT = 50 + process.env.DD_VERTEXAI_SPAN_PROMPT_COMPLETION_SAMPLE_RATE = 0.5 // required if we want to check updates to config.debug and config.logLevel which is fetched from logger reloadLoggerAndConfig() @@ -723,7 +727,9 @@ describe('Config', () => { { name: 'llmobs.mlApp', value: 'myMlApp', origin: 'env_var' }, { name: 'llmobs.agentlessEnabled', value: true, origin: 'env_var' }, { name: 'langchain.spanCharLimit', value: 50, origin: 'env_var' }, - { name: 'langchain.spanPromptCompletionSampleRate', value: 0.5, origin: 'env_var' } + { name: 'langchain.spanPromptCompletionSampleRate', value: 0.5, origin: 'env_var' }, + { name: 'vertexai.spanCharLimit', value: 50, origin: 'env_var' }, + { name: 'vertexai.spanPromptCompletionSampleRate', value: 0.5, origin: 'env_var' } ]) })