From 39dee8fc51df36eb59c6a248a90281e0b3e3cd63 Mon Sep 17 00:00:00 2001 From: Travis Wilson Date: Thu, 22 Jun 2023 18:15:13 -0700 Subject: [PATCH 1/9] Azure OpenAI: reuse LRO response model as status polling model --- .../OpenAI.Inference/models/images.tsp | 73 ++----------------- .../models/operations.common.tsp | 64 ++++++++++++++++ .../OpenAI.Inference/routes.tsp | 5 +- 3 files changed, 75 insertions(+), 67 deletions(-) create mode 100644 specification/cognitiveservices/OpenAI.Inference/models/operations.common.tsp diff --git a/specification/cognitiveservices/OpenAI.Inference/models/images.tsp b/specification/cognitiveservices/OpenAI.Inference/models/images.tsp index c990a9ff2c24..e694be03b1ba 100644 --- a/specification/cognitiveservices/OpenAI.Inference/models/images.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/models/images.tsp @@ -1,5 +1,6 @@ import "@azure-tools/typespec-azure-core"; import "@typespec/http"; +import "./operations.common.tsp"; using Azure.Core; using TypeSpec.Http; @@ -51,7 +52,11 @@ model ImageResponse { data: ImageLocation[]; } -@doc("The result of the operation if the operation succeeded.") +// Note: pending resolution of cross-language code emission behavior for long-running operations, image generation +// reuses its final operation response model as its status polling model. + +// @lroStatus +@doc("A polling status update or final response payload for an image operation.") model ImageOperationResponse { @doc("The ID of the operation.") id: string; @@ -66,72 +71,8 @@ model ImageOperationResponse { result?: ImageResponse; @doc("The status of the operation") - status: State; + status: AzureOpenAIOperationState; @doc("The error if the operation failed.") error?: Foundations.Error; } - -@lroStatus -@doc("The state of a job or item.") -enum State { - @doc("The operation was created and is queued to be processed in the future.") - notRunning, - - @doc("The operation has started to be processed.") - running, - - @lroSucceeded - @doc("The operation has successfully be processed and is ready for consumption.") - succeeded, - - @lroCanceled - @doc("The operation has been canceled and is incomplete.") - canceled, - - @lroFailed - @doc("The operation has completed processing with a failure and cannot be further consumed.") - failed, - - @doc("The entity has been deleted but may still be referenced by other entities predating the deletion.") - deleted, -} - -@doc("Provides status details for long running operations.") -model ImageOperationStatus { - @key("operationId") - @visibility("read", "query") - @doc("The unique ID of the operation.") - id: string; - - @doc("The status of the operation") - status: State; -} - -#suppress "@azure-tools/typespec-azure-core/long-running-polling-operation-required" "template" -@Foundations.Private.needsRoute -@post -@doc("Long running operation template to match Azure OpenAI .") -op OaiLongRunningRpcOperation< - TParams extends TypeSpec.Reflection.Model, - TResponse extends TypeSpec.Reflection.Model, - TStatusResult extends TypeSpec.Reflection.Model, - TStatusError = Foundations.Error, - Traits extends TypeSpec.Reflection.Model = {}, - TErrorResponse = Azure.Core.Foundations.ErrorResponse -> is Foundations.Operation< - TParams & - Azure.Core.Traits.Private.TraitProperties< - Traits, - Azure.Core.Traits.TraitLocation.Parameters - >, - Azure.Core.Foundations.AcceptedResponse & - TResponse & - Foundations.LongRunningStatusLocation & - Azure.Core.Traits.Private.TraitProperties< - Traits, - Azure.Core.Traits.TraitLocation.Response - >, - Traits, - TErrorResponse ->; diff --git a/specification/cognitiveservices/OpenAI.Inference/models/operations.common.tsp b/specification/cognitiveservices/OpenAI.Inference/models/operations.common.tsp new file mode 100644 index 000000000000..df0306801328 --- /dev/null +++ b/specification/cognitiveservices/OpenAI.Inference/models/operations.common.tsp @@ -0,0 +1,64 @@ +import "@azure-tools/typespec-azure-core"; +import "@typespec/http"; + +using Azure.Core; +using TypeSpec.Http; + +namespace Azure.OpenAI; + +@lroStatus +@doc("The state of a job or item.") +enum AzureOpenAIOperationState { + @doc("The operation was created and is queued to be processed in the future.") + notRunning, + + @doc("The operation has started to be processed.") + running, + + @lroSucceeded + @doc("The operation has successfully be processed and is ready for consumption.") + succeeded, + + @lroCanceled + @doc("The operation has been canceled and is incomplete.") + canceled, + + @lroFailed + @doc("The operation has completed processing with a failure and cannot be further consumed.") + failed, + + @doc(""" + The entity has been deleted but may still be referenced by other entities that originated before the deletion. + Note that this state is not used by some Azure OpenAI long-running operations and may not represent + a terminal state for those operations. + """) + deleted, +} + +#suppress "@azure-tools/typespec-azure-core/long-running-polling-operation-required" "template" +@Foundations.Private.needsRoute +@post +@doc("Long running operation template to match Azure OpenAI .") +op OaiLongRunningRpcOperation< + TParams extends TypeSpec.Reflection.Model, + TResponse extends TypeSpec.Reflection.Model, + TStatusResult extends TypeSpec.Reflection.Model, + TStatusError = Foundations.Error, + Traits extends TypeSpec.Reflection.Model = {}, + TErrorResponse = Azure.Core.Foundations.ErrorResponse +> is Foundations.Operation< + TParams & + Azure.Core.Traits.Private.TraitProperties< + Traits, + Azure.Core.Traits.TraitLocation.Parameters + >, + Azure.Core.Foundations.AcceptedResponse & + TResponse & + Foundations.LongRunningStatusLocation & + Azure.Core.Traits.Private.TraitProperties< + Traits, + Azure.Core.Traits.TraitLocation.Response + >, + Traits, + TErrorResponse +>; diff --git a/specification/cognitiveservices/OpenAI.Inference/routes.tsp b/specification/cognitiveservices/OpenAI.Inference/routes.tsp index 5a63fa8b0f2e..ae9502b52b22 100644 --- a/specification/cognitiveservices/OpenAI.Inference/routes.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/routes.tsp @@ -52,6 +52,9 @@ op getChatCompletions is ResourceAction< ChatCompletions >; +// Note: pending resolution of cross-language code emission behavior for long-running operations, image generation +// reuses its final operation response model as its status polling model. + #suppress "@azure-tools/typespec-azure-core/no-rpc-path-params" "Allowed because this is a non-standard status polling operation." @doc("Returns the status of the images operation") @added(ServiceApiVersions.v2023_06_01_Preview) @@ -76,5 +79,5 @@ op getImageOperationStatus is RpcOperation< op startGenerateImage is OaiLongRunningRpcOperation< ImageGenerationOptions, ImageOperationResponse, - ImageOperationStatus + ImageOperationResponse >; From 332c398bf023b95449820c48469c919857f7798c Mon Sep 17 00:00:00 2001 From: Travis Wilson Date: Fri, 23 Jun 2023 16:50:32 -0700 Subject: [PATCH 2/9] incorporate unreachable type omission for limitation of breaking change triggers --- .../OpenAI.Inference/tspconfig.yaml | 1 + .../preview/2023-06-01-preview/generated.json | 70 ---- .../stable/2022-12-01/generated.json | 303 ------------------ .../stable/2023-05-15/generated.json | 112 ------- 4 files changed, 1 insertion(+), 485 deletions(-) diff --git a/specification/cognitiveservices/OpenAI.Inference/tspconfig.yaml b/specification/cognitiveservices/OpenAI.Inference/tspconfig.yaml index cbc2af3360ce..db615f381014 100644 --- a/specification/cognitiveservices/OpenAI.Inference/tspconfig.yaml +++ b/specification/cognitiveservices/OpenAI.Inference/tspconfig.yaml @@ -11,6 +11,7 @@ options: output-file: "{azure-resource-provider-folder}/AzureOpenAI/inference/{version-status}/{version}/generated.json" azure-resource-provider-folder: "data-plane" examples-directory: examples + omit-unreachable-types: true # Uncomment this line and add "@azure-tools/cadl-python" to your package.json to generate Python code # "@azure-tools/cadl-python": # "basic-setup-py": true diff --git a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json index bb88b34f7fb3..d99a3a95fcc3 100644 --- a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json +++ b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json @@ -674,36 +674,6 @@ "usage" ] }, - "CompletionsFinishReason": { - "type": "string", - "description": "Representation of the manner in which a completions response concluded.", - "enum": [ - "stop", - "length", - "content_filter" - ], - "x-ms-enum": { - "name": "CompletionsFinishReason", - "modelAsString": true, - "values": [ - { - "name": "stopped", - "value": "stop", - "description": "Completions ended normally and reached its end of token generation." - }, - { - "name": "tokenLimitReached", - "value": "length", - "description": "Completions exhausted available token limits before generation could complete." - }, - { - "name": "contentFiltered", - "value": "content_filter", - "description": "Completions generated a response that was identified as potentially sensitive per content\nmoderation policies." - } - ] - } - }, "CompletionsLogProbabilityModel": { "type": "object", "properties": { @@ -882,20 +852,6 @@ "total_tokens" ] }, - "Deployment": { - "type": "object", - "properties": { - "deploymentId": { - "type": "string", - "description": "deployment id of the deployed model", - "readOnly": true - } - }, - "description": "A specific deployment", - "required": [ - "deploymentId" - ] - }, "EmbeddingItem": { "type": "object", "properties": { @@ -1123,32 +1079,6 @@ } ] } - }, - "ServiceApiVersions": { - "type": "string", - "enum": [ - "2022-12-01", - "2023-05-15", - "2023-06-01-preview" - ], - "x-ms-enum": { - "name": "ServiceApiVersions", - "modelAsString": true, - "values": [ - { - "name": "v2022_12_01", - "value": "2022-12-01" - }, - { - "name": "v2023_05_15", - "value": "2023-05-15" - }, - { - "name": "v2023_06_01_Preview", - "value": "2023-06-01-preview" - } - ] - } } }, "parameters": { diff --git a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/stable/2022-12-01/generated.json b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/stable/2022-12-01/generated.json index 028169686f31..5ce1d80666c5 100644 --- a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/stable/2022-12-01/generated.json +++ b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/stable/2022-12-01/generated.json @@ -225,239 +225,6 @@ }, "description": "An object containing more specific information about the error. As per Microsoft One API guidelines - https://github.com/Microsoft/api-guidelines/blob/vNext/Guidelines.md#7102-error-condition-responses." }, - "AzureOpenAIOperationState": { - "type": "string", - "description": "The state of a job or item.", - "enum": [ - "notRunning", - "running", - "succeeded", - "canceled", - "failed" - ], - "x-ms-enum": { - "name": "AzureOpenAIOperationState", - "modelAsString": true, - "values": [ - { - "name": "notRunning", - "value": "notRunning", - "description": "The operation was created and is queued to be processed in the future." - }, - { - "name": "running", - "value": "running", - "description": "The operation has started to be processed." - }, - { - "name": "succeeded", - "value": "succeeded", - "description": "The operation has successfully be processed and is ready for consumption." - }, - { - "name": "canceled", - "value": "canceled", - "description": "The operation has been canceled and is incomplete." - }, - { - "name": "failed", - "value": "failed", - "description": "The operation has completed processing with a failure and cannot be further consumed." - } - ] - } - }, - "ChatChoice": { - "type": "object", - "properties": { - "message": { - "$ref": "#/definitions/ChatMessage", - "description": "The chat message for a given chat completions prompt." - }, - "index": { - "type": "integer", - "format": "int32", - "description": "The ordered index associated with this chat completions choice." - }, - "finish_reason": { - "x-typespec-name": "CompletionsFinishReason | null", - "description": "The reason that this chat completions choice completed its generated.", - "x-ms-client-name": "finishReason" - }, - "delta": { - "$ref": "#/definitions/ChatMessage", - "description": "The delta message content for a streaming response." - } - }, - "description": "The representation of a single prompt completion as part of an overall chat completions request.\nGenerally, `n` choices are generated per provided prompt with a default value of 1.\nToken limits and other settings may limit the number of choices generated.", - "required": [ - "index", - "finish_reason" - ] - }, - "ChatCompletions": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "A unique identifier associated with this chat completions response." - }, - "created": { - "type": "integer", - "format": "int32", - "description": "The first timestamp associated with generation activity for this completions response,\nrepresented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970." - }, - "choices": { - "type": "array", - "items": { - "$ref": "#/definitions/ChatChoice" - }, - "x-ms-identifiers": [], - "description": "The collection of completions choices associated with this completions response.\nGenerally, `n` choices are generated per provided prompt with a default value of 1.\nToken limits and other settings may limit the number of choices generated.", - "x-typespec-name": "ChatChoice[]" - }, - "usage": { - "$ref": "#/definitions/CompletionsUsage", - "description": "Usage information for tokens processed and generated as part of this completions operation." - } - }, - "description": "Representation of the response data from a chat completions request.\nCompletions support a wide variety of tasks and generate text that continues from or \"completes\"\nprovided prompt data.", - "required": [ - "id", - "created", - "choices", - "usage" - ] - }, - "ChatCompletionsOptions": { - "type": "object", - "properties": { - "messages": { - "type": "array", - "items": { - "$ref": "#/definitions/ChatMessage" - }, - "x-ms-identifiers": [], - "description": "The collection of context messages associated with this chat completions request.\nTypical usage begins with a chat message for the System role that provides instructions for\nthe behavior of the assistant, followed by alternating messages between the User and\nAssistant roles.", - "x-typespec-name": "ChatMessage[]" - }, - "max_tokens": { - "type": "integer", - "format": "int32", - "description": "The maximum number of tokens to generate.", - "x-ms-client-name": "maxTokens" - }, - "temperature": { - "type": "number", - "format": "float", - "description": "The sampling temperature to use that controls the apparent creativity of generated completions.\nHigher values will make output more random while lower values will make results more focused\nand deterministic.\nIt is not recommended to modify temperature and top_p for the same completions request as the\ninteraction of these two settings is difficult to predict." - }, - "top_p": { - "type": "number", - "format": "float", - "description": "An alternative to sampling with temperature called nucleus sampling. This value causes the\nmodel to consider the results of tokens with the provided probability mass. As an example, a\nvalue of 0.15 will cause only the tokens comprising the top 15% of probability mass to be\nconsidered.\nIt is not recommended to modify temperature and top_p for the same completions request as the\ninteraction of these two settings is difficult to predict.", - "x-ms-client-name": "topP" - }, - "logit_bias": { - "type": "object", - "additionalProperties": { - "type": "integer", - "format": "int32" - }, - "description": "A map between GPT token IDs and bias scores that influences the probability of specific tokens\nappearing in a completions response. Token IDs are computed via external tokenizer tools, while\nbias scores reside in the range of -100 to 100 with minimum and maximum values corresponding to\na full ban or exclusive selection of a token, respectively. The exact behavior of a given bias\nscore varies by model.", - "x-typespec-name": "Record", - "x-ms-client-name": "logitBias" - }, - "user": { - "type": "string", - "description": "An identifier for the caller or end user of the operation. This may be used for tracking\nor rate-limiting purposes." - }, - "n": { - "type": "integer", - "format": "int32", - "description": "The number of chat completions choices that should be generated for a chat completions\nresponse.\nBecause this setting can generate many completions, it may quickly consume your token quota.\nUse carefully and ensure reasonable settings for max_tokens and stop." - }, - "stop": { - "type": "array", - "items": { - "type": "string" - }, - "description": "A collection of textual sequences that will end completions generation.", - "x-typespec-name": "string[]" - }, - "presence_penalty": { - "type": "number", - "format": "float", - "description": "A value that influences the probability of generated tokens appearing based on their existing\npresence in generated text.\nPositive values will make tokens less likely to appear when they already exist and increase the\nmodel's likelihood to output new topics.", - "x-ms-client-name": "presencePenalty" - }, - "frequency_penalty": { - "type": "number", - "format": "float", - "description": "A value that influences the probability of generated tokens appearing based on their cumulative\nfrequency in generated text.\nPositive values will make tokens less likely to appear as their frequency increases and\ndecrease the likelihood of the model repeating the same statements verbatim.", - "x-ms-client-name": "frequencyPenalty" - }, - "stream": { - "type": "boolean", - "description": "A value indicating whether chat completions should be streamed for this request." - }, - "model": { - "type": "string", - "description": "The model name to provide as part of this completions request.\nNot applicable to Azure OpenAI, where deployment information should be included in the Azure\nresource URI that's connected to." - } - }, - "description": "The configuration information for a chat completions request.\nCompletions support a wide variety of tasks and generate text that continues from or \"completes\"\nprovided prompt data.", - "required": [ - "messages" - ] - }, - "ChatMessage": { - "type": "object", - "properties": { - "role": { - "$ref": "#/definitions/ChatRole", - "description": "The role associated with this message payload." - }, - "content": { - "type": "string", - "description": "The text associated with this message payload." - } - }, - "description": "A single, role-attributed message within a chat completion interaction.", - "required": [ - "role" - ] - }, - "ChatRole": { - "type": "string", - "description": "A description of the intended purpose of a message within a chat completions interaction.", - "enum": [ - "system", - "assistant", - "user" - ], - "x-ms-enum": { - "name": "ChatRole", - "modelAsString": true, - "values": [ - { - "name": "system", - "value": "system", - "description": "The role that instructs or sets the behavior of the assistant." - }, - { - "name": "assistant", - "value": "assistant", - "description": "The role that provides responses to system-instructed, user-prompted input." - }, - { - "name": "user", - "value": "user", - "description": "The role that provides input for chat completions." - } - ] - } - }, "Choice": { "type": "object", "properties": { @@ -529,36 +296,6 @@ "usage" ] }, - "CompletionsFinishReason": { - "type": "string", - "description": "Representation of the manner in which a completions response concluded.", - "enum": [ - "stop", - "length", - "content_filter" - ], - "x-ms-enum": { - "name": "CompletionsFinishReason", - "modelAsString": true, - "values": [ - { - "name": "stopped", - "value": "stop", - "description": "Completions ended normally and reached its end of token generation." - }, - { - "name": "tokenLimitReached", - "value": "length", - "description": "Completions exhausted available token limits before generation could complete." - }, - { - "name": "contentFiltered", - "value": "content_filter", - "description": "Completions generated a response that was identified as potentially sensitive per content\nmoderation policies." - } - ] - } - }, "CompletionsLogProbabilityModel": { "type": "object", "properties": { @@ -737,20 +474,6 @@ "total_tokens" ] }, - "Deployment": { - "type": "object", - "properties": { - "deploymentId": { - "type": "string", - "description": "deployment id of the deployed model", - "readOnly": true - } - }, - "description": "A specific deployment", - "required": [ - "deploymentId" - ] - }, "EmbeddingItem": { "type": "object", "properties": { @@ -844,32 +567,6 @@ "prompt_tokens", "total_tokens" ] - }, - "ServiceApiVersions": { - "type": "string", - "enum": [ - "2022-12-01", - "2023-05-15", - "2023-06-01-preview" - ], - "x-ms-enum": { - "name": "ServiceApiVersions", - "modelAsString": true, - "values": [ - { - "name": "v2022_12_01", - "value": "2022-12-01" - }, - { - "name": "v2023_05_15", - "value": "2023-05-15" - }, - { - "name": "v2023_06_01_Preview", - "value": "2023-06-01-preview" - } - ] - } } }, "parameters": { diff --git a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/stable/2023-05-15/generated.json b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/stable/2023-05-15/generated.json index cd8c3f2d8049..3ab19f89af3d 100644 --- a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/stable/2023-05-15/generated.json +++ b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/stable/2023-05-15/generated.json @@ -276,48 +276,6 @@ }, "description": "An object containing more specific information about the error. As per Microsoft One API guidelines - https://github.com/Microsoft/api-guidelines/blob/vNext/Guidelines.md#7102-error-condition-responses." }, - "AzureOpenAIOperationState": { - "type": "string", - "description": "The state of a job or item.", - "enum": [ - "notRunning", - "running", - "succeeded", - "canceled", - "failed" - ], - "x-ms-enum": { - "name": "AzureOpenAIOperationState", - "modelAsString": true, - "values": [ - { - "name": "notRunning", - "value": "notRunning", - "description": "The operation was created and is queued to be processed in the future." - }, - { - "name": "running", - "value": "running", - "description": "The operation has started to be processed." - }, - { - "name": "succeeded", - "value": "succeeded", - "description": "The operation has successfully be processed and is ready for consumption." - }, - { - "name": "canceled", - "value": "canceled", - "description": "The operation has been canceled and is incomplete." - }, - { - "name": "failed", - "value": "failed", - "description": "The operation has completed processing with a failure and cannot be further consumed." - } - ] - } - }, "ChatChoice": { "type": "object", "properties": { @@ -580,36 +538,6 @@ "usage" ] }, - "CompletionsFinishReason": { - "type": "string", - "description": "Representation of the manner in which a completions response concluded.", - "enum": [ - "stop", - "length", - "content_filter" - ], - "x-ms-enum": { - "name": "CompletionsFinishReason", - "modelAsString": true, - "values": [ - { - "name": "stopped", - "value": "stop", - "description": "Completions ended normally and reached its end of token generation." - }, - { - "name": "tokenLimitReached", - "value": "length", - "description": "Completions exhausted available token limits before generation could complete." - }, - { - "name": "contentFiltered", - "value": "content_filter", - "description": "Completions generated a response that was identified as potentially sensitive per content\nmoderation policies." - } - ] - } - }, "CompletionsLogProbabilityModel": { "type": "object", "properties": { @@ -788,20 +716,6 @@ "total_tokens" ] }, - "Deployment": { - "type": "object", - "properties": { - "deploymentId": { - "type": "string", - "description": "deployment id of the deployed model", - "readOnly": true - } - }, - "description": "A specific deployment", - "required": [ - "deploymentId" - ] - }, "EmbeddingItem": { "type": "object", "properties": { @@ -895,32 +809,6 @@ "prompt_tokens", "total_tokens" ] - }, - "ServiceApiVersions": { - "type": "string", - "enum": [ - "2022-12-01", - "2023-05-15", - "2023-06-01-preview" - ], - "x-ms-enum": { - "name": "ServiceApiVersions", - "modelAsString": true, - "values": [ - { - "name": "v2022_12_01", - "value": "2022-12-01" - }, - { - "name": "v2023_05_15", - "value": "2023-05-15" - }, - { - "name": "v2023_06_01_Preview", - "value": "2023-06-01-preview" - } - ] - } } }, "parameters": { From 26f8706cde3af515d3880dc73b5393f50b19343c Mon Sep 17 00:00:00 2001 From: Travis Wilson Date: Mon, 26 Jun 2023 15:53:56 -0700 Subject: [PATCH 3/9] batch rename for new LRO tack --- .../OpenAI.Inference/client.tsp | 7 +- .../OpenAI.Inference/models/images.tsp | 6 +- .../OpenAI.Inference/routes.tsp | 12 +-- .../preview/2023-06-01-preview/generated.json | 94 +++++++++---------- 4 files changed, 56 insertions(+), 63 deletions(-) diff --git a/specification/cognitiveservices/OpenAI.Inference/client.tsp b/specification/cognitiveservices/OpenAI.Inference/client.tsp index 80c27985adf3..6143449eba14 100644 --- a/specification/cognitiveservices/OpenAI.Inference/client.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/client.tsp @@ -3,8 +3,11 @@ import "./main.tsp"; using Azure.ClientGenerator.Core; -// Azure-specific long-running operations should be treated as implementation details -@@internal(Azure.OpenAI.startGenerateImage); +// Azure-specific long-running operations should be treated as implementation details that are wrapped into +// appropriately merged public surface. +@@internal(Azure.OpenAI.beginAzureBatchImageGeneration); // Some models from client-internal LROs are still desired for custom public surface +@@include(Azure.OpenAI.BatchImageGenerationOperationResponse) @@include(Azure.OpenAI.ImageGenerationOptions); +@@include(Azure.OpenAI.ImageLocationResult) diff --git a/specification/cognitiveservices/OpenAI.Inference/models/images.tsp b/specification/cognitiveservices/OpenAI.Inference/models/images.tsp index 8d18e5793a4e..067dc67088d7 100644 --- a/specification/cognitiveservices/OpenAI.Inference/models/images.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/models/images.tsp @@ -50,7 +50,7 @@ model ImageLocation { @doc("The result of the operation if the operation succeeded.") @added(ServiceApiVersions.v2023_06_01_Preview) -model ImageResponse { +model ImageLocationResult { @doc("A timestamp when this job or item was created (in unix epochs).") created: int64; @@ -64,7 +64,7 @@ model ImageResponse { // @lroStatus @doc("A polling status update or final response payload for an image operation.") @added(ServiceApiVersions.v2023_06_01_Preview) -model ImageOperationResponse { +model BatchImageGenerationOperationResponse { @doc("The ID of the operation.") id: string; @@ -75,7 +75,7 @@ model ImageOperationResponse { expires?: int64; @doc("The result of the operation if the operation succeeded.") - result?: ImageResponse; + result?: ImageLocationResult; @doc("The status of the operation") status: AzureOpenAIOperationState; diff --git a/specification/cognitiveservices/OpenAI.Inference/routes.tsp b/specification/cognitiveservices/OpenAI.Inference/routes.tsp index ae9502b52b22..989654ca7e84 100644 --- a/specification/cognitiveservices/OpenAI.Inference/routes.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/routes.tsp @@ -59,11 +59,11 @@ op getChatCompletions is ResourceAction< @doc("Returns the status of the images operation") @added(ServiceApiVersions.v2023_06_01_Preview) @route("/operations/images/{operationId}") -op getImageOperationStatus is RpcOperation< +op getAzureBatchImageGenerationOperationStatus is RpcOperation< { @doc(".") @path operationId: string; }, - ImageOperationResponse + BatchImageGenerationOperationResponse >; #suppress "@azure-tools/typespec-azure-core/use-standard-operations" "" @@ -71,13 +71,13 @@ op getImageOperationStatus is RpcOperation< @added(ServiceApiVersions.v2023_06_01_Preview) @route("/images/generations:submit") @pollingOperation( - getImageOperationStatus, + getAzureBatchImageGenerationOperationStatus, { operationId: ResponseProperty<"id">, } ) -op startGenerateImage is OaiLongRunningRpcOperation< +op beginAzureBatchImageGeneration is OaiLongRunningRpcOperation< ImageGenerationOptions, - ImageOperationResponse, - ImageOperationResponse + BatchImageGenerationOperationResponse, + BatchImageGenerationOperationResponse >; diff --git a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json index d99a3a95fcc3..3de428e18622 100644 --- a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json +++ b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json @@ -214,7 +214,7 @@ }, "/images/generations:submit": { "post": { - "operationId": "StartGenerateImage", + "operationId": "BeginAzureBatchImageGeneration", "description": "Starts the generation of a batch of images from a text caption", "parameters": [ { @@ -240,7 +240,7 @@ } }, "schema": { - "$ref": "#/definitions/ImageOperationResponse" + "$ref": "#/definitions/BatchImageGenerationOperationResponse" } }, "default": { @@ -255,17 +255,12 @@ "$ref": "#/definitions/Azure.Core.Foundations.ErrorResponse" } } - }, - "x-ms-examples": { - "Starts the generation of a batch of images from a text caption": { - "$ref": "./examples/start_generate_image.json" - } } } }, "/operations/images/{operationId}": { "get": { - "operationId": "GetImageOperationStatus", + "operationId": "GetAzureBatchImageGenerationOperationStatus", "description": "Returns the status of the images operation", "parameters": [ { @@ -283,7 +278,7 @@ "200": { "description": "The request has succeeded.", "schema": { - "$ref": "#/definitions/ImageOperationResponse" + "$ref": "#/definitions/BatchImageGenerationOperationResponse" } }, "default": { @@ -298,11 +293,6 @@ "$ref": "#/definitions/Azure.Core.Foundations.ErrorResponse" } } - }, - "x-ms-examples": { - "Returns the status of the images operation": { - "$ref": "./examples/get_image_operation_status.json" - } } } } @@ -412,6 +402,43 @@ ] } }, + "BatchImageGenerationOperationResponse": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the operation." + }, + "created": { + "type": "integer", + "format": "int64", + "description": "A timestamp when this job or item was created (in unix epochs)." + }, + "expires": { + "type": "integer", + "format": "int64", + "description": "A timestamp when this operation and its associated images expire and will be deleted (in unix epochs)." + }, + "result": { + "$ref": "#/definitions/ImageLocationResult", + "description": "The result of the operation if the operation succeeded." + }, + "status": { + "$ref": "#/definitions/AzureOpenAIOperationState", + "description": "The status of the operation" + }, + "error": { + "$ref": "#/definitions/Azure.Core.Foundations.Error", + "description": "The error if the operation failed." + } + }, + "description": "A polling status update or final response payload for an image operation.", + "required": [ + "id", + "created", + "status" + ] + }, "ChatChoice": { "type": "object", "properties": { @@ -989,44 +1016,7 @@ }, "description": "The image url if successful, and an error otherwise." }, - "ImageOperationResponse": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "The ID of the operation." - }, - "created": { - "type": "integer", - "format": "int64", - "description": "A timestamp when this job or item was created (in unix epochs)." - }, - "expires": { - "type": "integer", - "format": "int64", - "description": "A timestamp when this operation and its associated images expire and will be deleted (in unix epochs)." - }, - "result": { - "$ref": "#/definitions/ImageResponse", - "description": "The result of the operation if the operation succeeded." - }, - "status": { - "$ref": "#/definitions/AzureOpenAIOperationState", - "description": "The status of the operation" - }, - "error": { - "$ref": "#/definitions/Azure.Core.Foundations.Error", - "description": "The error if the operation failed." - } - }, - "description": "A polling status update or final response payload for an image operation.", - "required": [ - "id", - "created", - "status" - ] - }, - "ImageResponse": { + "ImageLocationResult": { "type": "object", "properties": { "created": { From 5cbbc2fc952a4e9072e52fbf0e180665e011ac52 Mon Sep 17 00:00:00 2001 From: Travis Wilson <35748617+trrwilson@users.noreply.github.com> Date: Tue, 27 Jun 2023 09:11:52 -0700 Subject: [PATCH 4/9] Updates after clarification on sync parity snap --- .../cognitiveservices/OpenAI.Inference/client.tsp | 10 +++++++--- .../OpenAI.Inference/models/images.tsp | 4 ++-- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/specification/cognitiveservices/OpenAI.Inference/client.tsp b/specification/cognitiveservices/OpenAI.Inference/client.tsp index 6143449eba14..d58bf32f0506 100644 --- a/specification/cognitiveservices/OpenAI.Inference/client.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/client.tsp @@ -6,8 +6,12 @@ using Azure.ClientGenerator.Core; // Azure-specific long-running operations should be treated as implementation details that are wrapped into // appropriately merged public surface. @@internal(Azure.OpenAI.beginAzureBatchImageGeneration); +@@internal(Azure.OpenAI.getAzureBatchImageGenerationOperationStatus); -// Some models from client-internal LROs are still desired for custom public surface -@@include(Azure.OpenAI.BatchImageGenerationOperationResponse) +// Models related to the top-level details of these LROs should also be internal. +@@internal(Azure.OpenAI.BatchImageGenerationOperationResponse); + +// Some models from client-internal LROs are still desired for custom public surface. @@include(Azure.OpenAI.ImageGenerationOptions); -@@include(Azure.OpenAI.ImageLocationResult) +@@include(Azure.OpenAI.ImageLocation); +@@include(Azure.OpenAI.ImageGenerations); diff --git a/specification/cognitiveservices/OpenAI.Inference/models/images.tsp b/specification/cognitiveservices/OpenAI.Inference/models/images.tsp index 067dc67088d7..31aca2ad4c47 100644 --- a/specification/cognitiveservices/OpenAI.Inference/models/images.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/models/images.tsp @@ -50,7 +50,7 @@ model ImageLocation { @doc("The result of the operation if the operation succeeded.") @added(ServiceApiVersions.v2023_06_01_Preview) -model ImageLocationResult { +model ImageGenerations { @doc("A timestamp when this job or item was created (in unix epochs).") created: int64; @@ -75,7 +75,7 @@ model BatchImageGenerationOperationResponse { expires?: int64; @doc("The result of the operation if the operation succeeded.") - result?: ImageLocationResult; + result?: ImageGenerations; @doc("The status of the operation") status: AzureOpenAIOperationState; From 98fdcba24417c3b8fa7182da96424c2fb3e6971f Mon Sep 17 00:00:00 2001 From: Travis Wilson Date: Tue, 27 Jun 2023 11:28:50 -0700 Subject: [PATCH 5/9] Update: adjustments after re-snap to plans for converged sync API --- .../OpenAI.Inference/client.tsp | 3 -- .../OpenAI.Inference/models/images.tsp | 5 ++-- .../OpenAI.Inference/tspconfig.yaml | 26 +++++----------- .../preview/2023-06-01-preview/generated.json | 30 ++++++++----------- 4 files changed, 23 insertions(+), 41 deletions(-) diff --git a/specification/cognitiveservices/OpenAI.Inference/client.tsp b/specification/cognitiveservices/OpenAI.Inference/client.tsp index d58bf32f0506..16e411ae37c7 100644 --- a/specification/cognitiveservices/OpenAI.Inference/client.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/client.tsp @@ -8,9 +8,6 @@ using Azure.ClientGenerator.Core; @@internal(Azure.OpenAI.beginAzureBatchImageGeneration); @@internal(Azure.OpenAI.getAzureBatchImageGenerationOperationStatus); -// Models related to the top-level details of these LROs should also be internal. -@@internal(Azure.OpenAI.BatchImageGenerationOperationResponse); - // Some models from client-internal LROs are still desired for custom public surface. @@include(Azure.OpenAI.ImageGenerationOptions); @@include(Azure.OpenAI.ImageLocation); diff --git a/specification/cognitiveservices/OpenAI.Inference/models/images.tsp b/specification/cognitiveservices/OpenAI.Inference/models/images.tsp index 31aca2ad4c47..fb87559b666d 100644 --- a/specification/cognitiveservices/OpenAI.Inference/models/images.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/models/images.tsp @@ -29,6 +29,7 @@ model ImageGenerationOptions { prompt: string; @doc("The number of images to generate (defaults to 1).") + @projectedName("csharp", "ImageCount") n?: int32 = 1; @doc("The desired size of the generated images. Must be one of 256x256, 512x512, or 1024x1024 (defaults to 1024x1024).") @@ -43,15 +44,13 @@ model ImageGenerationOptions { model ImageLocation { @doc("The URL that provides temporary access to download the generated image.") url?: url; - - @doc("The error if the operation failed.") - error?: Foundations.Error; } @doc("The result of the operation if the operation succeeded.") @added(ServiceApiVersions.v2023_06_01_Preview) model ImageGenerations { @doc("A timestamp when this job or item was created (in unix epochs).") + @projectedName("csharp", "InternalCreatedSecondsAfterUnixEpoch") created: int64; @doc("The images generated by the operator.") diff --git a/specification/cognitiveservices/OpenAI.Inference/tspconfig.yaml b/specification/cognitiveservices/OpenAI.Inference/tspconfig.yaml index db615f381014..b7e5e4d3a7f6 100644 --- a/specification/cognitiveservices/OpenAI.Inference/tspconfig.yaml +++ b/specification/cognitiveservices/OpenAI.Inference/tspconfig.yaml @@ -12,15 +12,6 @@ options: azure-resource-provider-folder: "data-plane" examples-directory: examples omit-unreachable-types: true - # Uncomment this line and add "@azure-tools/cadl-python" to your package.json to generate Python code - # "@azure-tools/cadl-python": - # "basic-setup-py": true - # "package-version": - # "package-name": - # "output-path": - # Uncomment this line and add "@azure-tools/cadl-java" to your package.json to generate Java code - # "@azure-tools/cadl-java": true - # Uncomment this line and add "@azure-tools/cadl-csharp" to your package.json to generate C# code "@azure-tools/typespec-csharp": package-dir: "Azure.AI.OpenAI" namespace: "Azure.AI.OpenAI" @@ -33,12 +24,11 @@ options: # partial-update: true # enable-sync-stack: true # generate-tests: false - # https://github.com/Azure/azure-rest-api-specs/issues/24498 - # "@azure-tools/typespec-ts": - # package-dir: "azure-ai-openai" - # emitter-output-dir: "{project-root}/generated" - # generateMetadata: false - # generateTest: false - # isModularLibrary: true - # packageDetails: - # name: "@azure/openai" + "@azure-tools/typespec-ts": + package-dir: "azure-ai-openai" + emitter-output-dir: "{project-root}/generated" + generateMetadata: false + generateTest: false + isModularLibrary: true + packageDetails: + name: "@azure/openai" diff --git a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json index 3de428e18622..a0a78f31c4ae 100644 --- a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json +++ b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json @@ -420,7 +420,7 @@ "description": "A timestamp when this operation and its associated images expire and will be deleted (in unix epochs)." }, "result": { - "$ref": "#/definitions/ImageLocationResult", + "$ref": "#/definitions/ImageGenerations", "description": "The result of the operation if the operation succeeded." }, "status": { @@ -1001,22 +1001,7 @@ "prompt" ] }, - "ImageLocation": { - "type": "object", - "properties": { - "url": { - "type": "string", - "format": "uri", - "description": "The URL that provides temporary access to download the generated image." - }, - "error": { - "$ref": "#/definitions/Azure.Core.Foundations.Error", - "description": "The error if the operation failed." - } - }, - "description": "The image url if successful, and an error otherwise." - }, - "ImageLocationResult": { + "ImageGenerations": { "type": "object", "properties": { "created": { @@ -1040,6 +1025,17 @@ "data" ] }, + "ImageLocation": { + "type": "object", + "properties": { + "url": { + "type": "string", + "format": "uri", + "description": "The URL that provides temporary access to download the generated image." + } + }, + "description": "The image url if successful, and an error otherwise." + }, "ImageSize": { "type": "string", "description": "The desired size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.", From 4344311be54e1a97744e1ff54d08bc4a24995ff1 Mon Sep 17 00:00:00 2001 From: Travis Wilson Date: Tue, 27 Jun 2023 16:44:05 -0700 Subject: [PATCH 6/9] Adjustments based on feedback and .NET viability experiments --- .../OpenAI.Inference/models/images.tsp | 36 +++++++++++++-- .../preview/2023-06-01-preview/generated.json | 45 +++++++++++++------ 2 files changed, 64 insertions(+), 17 deletions(-) diff --git a/specification/cognitiveservices/OpenAI.Inference/models/images.tsp b/specification/cognitiveservices/OpenAI.Inference/models/images.tsp index fb87559b666d..8b3a7b0c795f 100644 --- a/specification/cognitiveservices/OpenAI.Inference/models/images.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/models/images.tsp @@ -22,6 +22,16 @@ enum ImageSize { Size1024x1024: "1024x1024", } +@doc("The format in which the generated images are returned.") +@added(ServiceApiVersions.v2023_06_01_Preview) +enum ImageGenerationResponseFormat { + @doc("Image generation response items should provide a URL from which the image may be retrieved.") + Url: "url", + + @doc("Image generation response items should provide image data as a base64-encoded string.") + Base64: "b64_json", +} + @doc("Represents the request data used to generate images.") @added(ServiceApiVersions.v2023_06_01_Preview) model ImageGenerationOptions { @@ -35,17 +45,36 @@ model ImageGenerationOptions { @doc("The desired size of the generated images. Must be one of 256x256, 512x512, or 1024x1024 (defaults to 1024x1024).") size?: ImageSize = ImageSize.Size1024x1024; + @doc(""" + The format in which image generation response items should be presented. + Azure OpenAI only supports URL response items. + """) + @projectedName("json", "response_format") + responseFormat?: ImageGenerationResponseFormat; + @doc("A unique identifier representing your end-user, which can help to monitor and detect abuse.") user?: string; } -@doc("The image url if successful, and an error otherwise.") +@doc("An image response item that provides a URL from which an image may be accessed.") @added(ServiceApiVersions.v2023_06_01_Preview) +@projectedName("csharp", "InternalImageLocation") model ImageLocation { @doc("The URL that provides temporary access to download the generated image.") - url?: url; + url: url; } +@doc("An image response item that directly represents the image data as a base64-encoded string.") +@added(ServiceApiVersions.v2023_06_01_Preview) +model ImagePayload { + @doc("The complete data for an image represented as a base64-encoded string.") + @projectedName("json", "b64_json") + base64Data: string; +} + +#suppress "@azure-tools/typespec-autorest/union-unsupported" "openapi v2 not required" +alias ImageResponseItem = ImageLocation | ImagePayload; + @doc("The result of the operation if the operation succeeded.") @added(ServiceApiVersions.v2023_06_01_Preview) model ImageGenerations { @@ -54,7 +83,8 @@ model ImageGenerations { created: int64; @doc("The images generated by the operator.") - data: ImageLocation[]; + @projectedName("csharp", "InternalEmittedImageResponseItems") + data: ImageResponseItem[]; } // Note: pending resolution of cross-language code emission behavior for long-running operations, image generation diff --git a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json index a0a78f31c4ae..dd934fe9db1f 100644 --- a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json +++ b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json @@ -991,6 +991,11 @@ "description": "The desired size of the generated images. Must be one of 256x256, 512x512, or 1024x1024 (defaults to 1024x1024).", "default": "1024x1024" }, + "response_format": { + "$ref": "#/definitions/ImageGenerationResponseFormat", + "description": " The format in which image generation response items should be presented.\n Azure OpenAI only supports URL response items.", + "x-ms-client-name": "responseFormat" + }, "user": { "type": "string", "description": "A unique identifier representing your end-user, which can help to monitor and detect abuse." @@ -1001,6 +1006,30 @@ "prompt" ] }, + "ImageGenerationResponseFormat": { + "type": "string", + "description": "The format in which the generated images are returned.", + "enum": [ + "url", + "b64_json" + ], + "x-ms-enum": { + "name": "ImageGenerationResponseFormat", + "modelAsString": true, + "values": [ + { + "name": "Url", + "value": "url", + "description": "Image generation response items should provide a URL from which the image may be retrieved." + }, + { + "name": "Base64", + "value": "b64_json", + "description": "Image generation response items should provide image data as a base64-encoded string." + } + ] + } + }, "ImageGenerations": { "type": "object", "properties": { @@ -1012,11 +1041,10 @@ "data": { "type": "array", "items": { - "$ref": "#/definitions/ImageLocation" + "x-typespec-name": "ImageLocation | ImagePayload" }, - "x-ms-identifiers": [], "description": "The images generated by the operator.", - "x-typespec-name": "ImageLocation[]" + "x-typespec-name": "ImageLocation | ImagePayload[]" } }, "description": "The result of the operation if the operation succeeded.", @@ -1025,17 +1053,6 @@ "data" ] }, - "ImageLocation": { - "type": "object", - "properties": { - "url": { - "type": "string", - "format": "uri", - "description": "The URL that provides temporary access to download the generated image." - } - }, - "description": "The image url if successful, and an error otherwise." - }, "ImageSize": { "type": "string", "description": "The desired size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.", From 14ca49a909ee8d2824a4eb9fbcce53d3e9348630 Mon Sep 17 00:00:00 2001 From: Travis Wilson <35748617+trrwilson@users.noreply.github.com> Date: Wed, 28 Jun 2023 10:13:33 -0700 Subject: [PATCH 7/9] Example naming update (caught by CI) --- .../OpenAI.Inference/.gitignore | 2 + .../get_image_operation_status.json | 2 +- .../start_generate_image.json | 2 +- .../examples/get_image_operation_status.json | 2 +- .../examples/start_generate_image.json | 2 +- .../preview/2023-06-01-preview/generated.json | 42 +++++++++++++++++++ 6 files changed, 48 insertions(+), 4 deletions(-) create mode 100644 specification/cognitiveservices/OpenAI.Inference/.gitignore diff --git a/specification/cognitiveservices/OpenAI.Inference/.gitignore b/specification/cognitiveservices/OpenAI.Inference/.gitignore new file mode 100644 index 000000000000..44d6cda3266c --- /dev/null +++ b/specification/cognitiveservices/OpenAI.Inference/.gitignore @@ -0,0 +1,2 @@ +**/package.json +**/package-lock.json diff --git a/specification/cognitiveservices/OpenAI.Inference/examples/2023-06-01-preview/get_image_operation_status.json b/specification/cognitiveservices/OpenAI.Inference/examples/2023-06-01-preview/get_image_operation_status.json index c9923b9ae46f..b7b1e32b8bb6 100644 --- a/specification/cognitiveservices/OpenAI.Inference/examples/2023-06-01-preview/get_image_operation_status.json +++ b/specification/cognitiveservices/OpenAI.Inference/examples/2023-06-01-preview/get_image_operation_status.json @@ -1,5 +1,5 @@ { - "operationId": "GetImageOperationStatus", + "operationId": "GetAzureBatchImageGenerationOperationStatus", "title": "Returns the status of the images operation", "parameters": { "endpoint": "{endpoint}", diff --git a/specification/cognitiveservices/OpenAI.Inference/examples/2023-06-01-preview/start_generate_image.json b/specification/cognitiveservices/OpenAI.Inference/examples/2023-06-01-preview/start_generate_image.json index f8bac40e573f..40903f1667f0 100644 --- a/specification/cognitiveservices/OpenAI.Inference/examples/2023-06-01-preview/start_generate_image.json +++ b/specification/cognitiveservices/OpenAI.Inference/examples/2023-06-01-preview/start_generate_image.json @@ -1,5 +1,5 @@ { - "operationId": "StartGenerateImage", + "operationId": "BeginAzureBatchImageGeneration", "title": "Starts the generation of a batch of images from a text caption", "parameters": { "endpoint": "{endpoint}", diff --git a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/examples/get_image_operation_status.json b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/examples/get_image_operation_status.json index c9923b9ae46f..b7b1e32b8bb6 100644 --- a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/examples/get_image_operation_status.json +++ b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/examples/get_image_operation_status.json @@ -1,5 +1,5 @@ { - "operationId": "GetImageOperationStatus", + "operationId": "GetAzureBatchImageGenerationOperationStatus", "title": "Returns the status of the images operation", "parameters": { "endpoint": "{endpoint}", diff --git a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/examples/start_generate_image.json b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/examples/start_generate_image.json index f8bac40e573f..40903f1667f0 100644 --- a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/examples/start_generate_image.json +++ b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/examples/start_generate_image.json @@ -1,5 +1,5 @@ { - "operationId": "StartGenerateImage", + "operationId": "BeginAzureBatchImageGeneration", "title": "Starts the generation of a batch of images from a text caption", "parameters": { "endpoint": "{endpoint}", diff --git a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json index dd934fe9db1f..42137efb3996 100644 --- a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json +++ b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json @@ -212,6 +212,38 @@ } } }, + "/images/generations": { + "post": { + "operationId": "GetImageGenerations", + "parameters": [ + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "imageGenerationOptions": { + "$ref": "#/definitions/ImageGenerationOptions" + } + }, + "required": [ + "imageGenerationOptions" + ], + "x-typespec-name": "(anonymous model)" + }, + "required": true + } + ], + "responses": { + "200": { + "description": "The request has succeeded.", + "schema": { + "$ref": "#/definitions/ImageGenerations" + } + } + } + } + }, "/images/generations:submit": { "post": { "operationId": "BeginAzureBatchImageGeneration", @@ -255,6 +287,11 @@ "$ref": "#/definitions/Azure.Core.Foundations.ErrorResponse" } } + }, + "x-ms-examples": { + "Starts the generation of a batch of images from a text caption": { + "$ref": "./examples/start_generate_image.json" + } } } }, @@ -293,6 +330,11 @@ "$ref": "#/definitions/Azure.Core.Foundations.ErrorResponse" } } + }, + "x-ms-examples": { + "Returns the status of the images operation": { + "$ref": "./examples/get_image_operation_status.json" + } } } } From 1393b6e34d7370733e3e2236c4df686280a96f36 Mon Sep 17 00:00:00 2001 From: Travis Wilson <35748617+trrwilson@users.noreply.github.com> Date: Wed, 28 Jun 2023 10:23:51 -0700 Subject: [PATCH 8/9] Example update --- .../preview/2023-06-01-preview/generated.json | 32 ------------------- 1 file changed, 32 deletions(-) diff --git a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json index 42137efb3996..0404438d32fc 100644 --- a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json +++ b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json @@ -212,38 +212,6 @@ } } }, - "/images/generations": { - "post": { - "operationId": "GetImageGenerations", - "parameters": [ - { - "name": "body", - "in": "body", - "schema": { - "type": "object", - "properties": { - "imageGenerationOptions": { - "$ref": "#/definitions/ImageGenerationOptions" - } - }, - "required": [ - "imageGenerationOptions" - ], - "x-typespec-name": "(anonymous model)" - }, - "required": true - } - ], - "responses": { - "200": { - "description": "The request has succeeded.", - "schema": { - "$ref": "#/definitions/ImageGenerations" - } - } - } - } - }, "/images/generations:submit": { "post": { "operationId": "BeginAzureBatchImageGeneration", From 827a285cfc63c3e23f110ddcc7d3bd4709c5e093 Mon Sep 17 00:00:00 2001 From: Travis Wilson <35748617+trrwilson@users.noreply.github.com> Date: Wed, 28 Jun 2023 18:18:34 -0700 Subject: [PATCH 9/9] PR feedback and re-disabling ts (will treat separately) --- .../OpenAI.Inference/models/images.tsp | 6 ++---- .../OpenAI.Inference/tspconfig.yaml | 16 ++++++++-------- .../preview/2023-06-01-preview/generated.json | 8 ++------ 3 files changed, 12 insertions(+), 18 deletions(-) diff --git a/specification/cognitiveservices/OpenAI.Inference/models/images.tsp b/specification/cognitiveservices/OpenAI.Inference/models/images.tsp index 8b3a7b0c795f..cc9dffd6601d 100644 --- a/specification/cognitiveservices/OpenAI.Inference/models/images.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/models/images.tsp @@ -72,9 +72,6 @@ model ImagePayload { base64Data: string; } -#suppress "@azure-tools/typespec-autorest/union-unsupported" "openapi v2 not required" -alias ImageResponseItem = ImageLocation | ImagePayload; - @doc("The result of the operation if the operation succeeded.") @added(ServiceApiVersions.v2023_06_01_Preview) model ImageGenerations { @@ -82,9 +79,10 @@ model ImageGenerations { @projectedName("csharp", "InternalCreatedSecondsAfterUnixEpoch") created: int64; + #suppress "@azure-tools/typespec-autorest/union-unsupported" "openapi v2 not required" @doc("The images generated by the operator.") @projectedName("csharp", "InternalEmittedImageResponseItems") - data: ImageResponseItem[]; + data: ImageLocation[] | ImagePayload[]; } // Note: pending resolution of cross-language code emission behavior for long-running operations, image generation diff --git a/specification/cognitiveservices/OpenAI.Inference/tspconfig.yaml b/specification/cognitiveservices/OpenAI.Inference/tspconfig.yaml index b68feae29393..b3d1870826b6 100644 --- a/specification/cognitiveservices/OpenAI.Inference/tspconfig.yaml +++ b/specification/cognitiveservices/OpenAI.Inference/tspconfig.yaml @@ -23,11 +23,11 @@ options: partial-update: true enable-sync-stack: true generate-tests: false - "@azure-tools/typespec-ts": - package-dir: "azure-ai-openai" - emitter-output-dir: "{project-root}/generated" - generateMetadata: false - generateTest: false - isModularLibrary: true - packageDetails: - name: "@azure/openai" + # "@azure-tools/typespec-ts": + # package-dir: "azure-ai-openai" + # emitter-output-dir: "{project-root}/generated" + # generateMetadata: false + # generateTest: false + # isModularLibrary: true + # packageDetails: + # name: "@azure/openai" diff --git a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json index 0404438d32fc..2f4e00b9c8bd 100644 --- a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json +++ b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json @@ -1049,12 +1049,8 @@ "description": "A timestamp when this job or item was created (in unix epochs)." }, "data": { - "type": "array", - "items": { - "x-typespec-name": "ImageLocation | ImagePayload" - }, - "description": "The images generated by the operator.", - "x-typespec-name": "ImageLocation | ImagePayload[]" + "x-typespec-name": "ImageLocation[] | ImagePayload[]", + "description": "The images generated by the operator." } }, "description": "The result of the operation if the operation succeeded.",