Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

release: 1.50.0 #1752

Merged
merged 3 commits into from
Sep 26, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .release-please-manifest.json
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{
".": "1.49.0"
".": "1.50.0"
}
13 changes: 13 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,18 @@
# Changelog

## 1.50.0 (2024-09-26)

Full Changelog: [v1.49.0...v1.50.0](https://github.com/openai/openai-python/compare/v1.49.0...v1.50.0)

### Features

* **structured outputs:** add support for accessing raw responses ([#1748](https://github.com/openai/openai-python/issues/1748)) ([0189e28](https://github.com/openai/openai-python/commit/0189e28b0b062a28b16343da0460a4f0f4e17a9a))


### Chores

* **pydantic v1:** exclude specific properties when rich printing ([#1751](https://github.com/openai/openai-python/issues/1751)) ([af535ce](https://github.com/openai/openai-python/commit/af535ce6a523eca39438f117a3e55f16064567a9))

## 1.49.0 (2024-09-26)

Full Changelog: [v1.48.0...v1.49.0](https://github.com/openai/openai-python/compare/v1.48.0...v1.49.0)
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "openai"
version = "1.49.0"
version = "1.50.0"
description = "The official Python library for the openai API"
dynamic = ["readme"]
license = "Apache-2.0"
Expand Down
10 changes: 9 additions & 1 deletion src/openai/_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,15 @@

import os
import inspect
from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, Optional, cast
from typing import TYPE_CHECKING, Any, Type, Tuple, Union, Generic, TypeVar, Callable, Optional, cast
from datetime import date, datetime
from typing_extensions import (
Unpack,
Literal,
ClassVar,
Protocol,
Required,
Sequence,
ParamSpec,
TypedDict,
TypeGuard,
Expand Down Expand Up @@ -72,6 +73,8 @@

P = ParamSpec("P")

ReprArgs = Sequence[Tuple[Optional[str], Any]]


@runtime_checkable
class _ConfigProtocol(Protocol):
Expand All @@ -94,6 +97,11 @@ def model_fields_set(self) -> set[str]:
class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated]
extra: Any = pydantic.Extra.allow # type: ignore

@override
def __repr_args__(self) -> ReprArgs:
# we don't want these attributes to be included when something like `rich.print` is used
return [arg for arg in super().__repr_args__() if arg[0] not in {"_request_id", "__exclude_fields__"}]

if TYPE_CHECKING:
_request_id: Optional[str] = None
"""The ID of the request, returned via the X-Request-ID header. Useful for debugging requests and reporting issues to OpenAI.
Expand Down
2 changes: 1 addition & 1 deletion src/openai/_version.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

__title__ = "openai"
__version__ = "1.49.0" # x-release-please-version
__version__ = "1.50.0" # x-release-please-version
246 changes: 179 additions & 67 deletions src/openai/resources/beta/chat/completions.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,21 @@

from __future__ import annotations

from typing import Dict, List, Union, Iterable, Optional
from typing import Dict, List, Type, Union, Iterable, Optional, cast
from functools import partial
from typing_extensions import Literal

import httpx

from .... import _legacy_response
from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from ...._utils import maybe_transform, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ...._streaming import Stream
from ....types.chat import completion_create_params
from ...._base_client import make_request_options
from ....lib._parsing import (
ResponseFormatT,
validate_input_tools as _validate_input_tools,
Expand All @@ -20,6 +25,7 @@
)
from ....types.chat_model import ChatModel
from ....lib.streaming.chat import ChatCompletionStreamManager, AsyncChatCompletionStreamManager
from ....types.chat.chat_completion import ChatCompletion
from ....types.chat.chat_completion_chunk import ChatCompletionChunk
from ....types.chat.parsed_chat_completion import ParsedChatCompletion
from ....types.chat.chat_completion_tool_param import ChatCompletionToolParam
Expand All @@ -31,6 +37,25 @@


class Completions(SyncAPIResource):
@cached_property
def with_raw_response(self) -> CompletionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return the
the raw response object instead of the parsed content.

For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return CompletionsWithRawResponse(self)

@cached_property
def with_streaming_response(self) -> CompletionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.

For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return CompletionsWithStreamingResponse(self)

def parse(
self,
*,
Expand Down Expand Up @@ -113,39 +138,55 @@ class MathResponse(BaseModel):
**(extra_headers or {}),
}

raw_completion = self._client.chat.completions.create(
messages=messages,
model=model,
response_format=_type_to_response_format(response_format),
frequency_penalty=frequency_penalty,
function_call=function_call,
functions=functions,
logit_bias=logit_bias,
logprobs=logprobs,
max_completion_tokens=max_completion_tokens,
max_tokens=max_tokens,
n=n,
parallel_tool_calls=parallel_tool_calls,
presence_penalty=presence_penalty,
seed=seed,
service_tier=service_tier,
stop=stop,
stream_options=stream_options,
temperature=temperature,
tool_choice=tool_choice,
tools=tools,
top_logprobs=top_logprobs,
top_p=top_p,
user=user,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
)
return _parse_chat_completion(
response_format=response_format,
chat_completion=raw_completion,
input_tools=tools,
def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseFormatT]:
return _parse_chat_completion(
response_format=response_format,
chat_completion=raw_completion,
input_tools=tools,
)

return self._post(
"/chat/completions",
body=maybe_transform(
{
"messages": messages,
"model": model,
"frequency_penalty": frequency_penalty,
"function_call": function_call,
"functions": functions,
"logit_bias": logit_bias,
"logprobs": logprobs,
"max_completion_tokens": max_completion_tokens,
"max_tokens": max_tokens,
"n": n,
"parallel_tool_calls": parallel_tool_calls,
"presence_penalty": presence_penalty,
"response_format": _type_to_response_format(response_format),
"seed": seed,
"service_tier": service_tier,
"stop": stop,
"stream": False,
"stream_options": stream_options,
"temperature": temperature,
"tool_choice": tool_choice,
"tools": tools,
"top_logprobs": top_logprobs,
"top_p": top_p,
"user": user,
},
completion_create_params.CompletionCreateParams,
),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
post_parser=parser,
),
# we turn the `ChatCompletion` instance into a `ParsedChatCompletion`
# in the `parser` function above
cast_to=cast(Type[ParsedChatCompletion[ResponseFormatT]], ChatCompletion),
stream=False,
)

def stream(
Expand Down Expand Up @@ -247,6 +288,25 @@ def stream(


class AsyncCompletions(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncCompletionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return the
the raw response object instead of the parsed content.

For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncCompletionsWithRawResponse(self)

@cached_property
def with_streaming_response(self) -> AsyncCompletionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.

For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncCompletionsWithStreamingResponse(self)

async def parse(
self,
*,
Expand Down Expand Up @@ -329,39 +389,55 @@ class MathResponse(BaseModel):
**(extra_headers or {}),
}

raw_completion = await self._client.chat.completions.create(
messages=messages,
model=model,
response_format=_type_to_response_format(response_format),
frequency_penalty=frequency_penalty,
function_call=function_call,
functions=functions,
logit_bias=logit_bias,
logprobs=logprobs,
max_completion_tokens=max_completion_tokens,
max_tokens=max_tokens,
n=n,
parallel_tool_calls=parallel_tool_calls,
presence_penalty=presence_penalty,
seed=seed,
service_tier=service_tier,
stop=stop,
stream_options=stream_options,
temperature=temperature,
tool_choice=tool_choice,
tools=tools,
top_logprobs=top_logprobs,
top_p=top_p,
user=user,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
)
return _parse_chat_completion(
response_format=response_format,
chat_completion=raw_completion,
input_tools=tools,
def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseFormatT]:
return _parse_chat_completion(
response_format=response_format,
chat_completion=raw_completion,
input_tools=tools,
)

return await self._post(
"/chat/completions",
body=await async_maybe_transform(
{
"messages": messages,
"model": model,
"frequency_penalty": frequency_penalty,
"function_call": function_call,
"functions": functions,
"logit_bias": logit_bias,
"logprobs": logprobs,
"max_completion_tokens": max_completion_tokens,
"max_tokens": max_tokens,
"n": n,
"parallel_tool_calls": parallel_tool_calls,
"presence_penalty": presence_penalty,
"response_format": _type_to_response_format(response_format),
"seed": seed,
"service_tier": service_tier,
"stop": stop,
"stream": False,
"stream_options": stream_options,
"temperature": temperature,
"tool_choice": tool_choice,
"tools": tools,
"top_logprobs": top_logprobs,
"top_p": top_p,
"user": user,
},
completion_create_params.CompletionCreateParams,
),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
post_parser=parser,
),
# we turn the `ChatCompletion` instance into a `ParsedChatCompletion`
# in the `parser` function above
cast_to=cast(Type[ParsedChatCompletion[ResponseFormatT]], ChatCompletion),
stream=False,
)

def stream(
Expand Down Expand Up @@ -461,3 +537,39 @@ def stream(
response_format=response_format,
input_tools=tools,
)


class CompletionsWithRawResponse:
def __init__(self, completions: Completions) -> None:
self._completions = completions

self.parse = _legacy_response.to_raw_response_wrapper(
completions.parse,
)


class AsyncCompletionsWithRawResponse:
def __init__(self, completions: AsyncCompletions) -> None:
self._completions = completions

self.parse = _legacy_response.async_to_raw_response_wrapper(
completions.parse,
)


class CompletionsWithStreamingResponse:
def __init__(self, completions: Completions) -> None:
self._completions = completions

self.parse = to_streamed_response_wrapper(
completions.parse,
)


class AsyncCompletionsWithStreamingResponse:
def __init__(self, completions: AsyncCompletions) -> None:
self._completions = completions

self.parse = async_to_streamed_response_wrapper(
completions.parse,
)
Loading