Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Updates dependencies and fixes errors and warnings #330

Merged
merged 5 commits into from
Feb 19, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
352 changes: 193 additions & 159 deletions assistants/codespace-assistant/uv.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion assistants/explorer-assistant/assistant/response/model.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from typing import Any, Protocol, Sequence

from assistant_extensions.ai_clients.model import CompletionMessage
from attr import dataclass
from llm_client.model import CompletionMessage
from semantic_workbench_api_model.workbench_model import (
MessageType,
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,9 @@
from typing import Any, Awaitable, Callable, Sequence

import deepmerge
from assistant_extensions.ai_clients.model import CompletionMessage
from assistant_extensions.artifacts import ArtifactsExtension
from assistant_extensions.attachments import AttachmentsExtension
from llm_client.model import CompletionMessage
from semantic_workbench_api_model.workbench_model import (
ConversationMessage,
ConversationParticipant,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from anthropic import NotGiven
from anthropic.types import Message, MessageParam, TextBlock, ToolUseBlock
from assistant_extensions.ai_clients.config import AnthropicClientConfigModel
from assistant_extensions.ai_clients.model import CompletionMessage
from llm_client.model import CompletionMessage
from semantic_workbench_api_model.workbench_model import (
MessageType,
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@
import deepmerge
import openai_client
from assistant_extensions.ai_clients.config import AzureOpenAIClientConfigModel, OpenAIClientConfigModel
from assistant_extensions.ai_clients.model import CompletionMessage
from assistant_extensions.artifacts import ArtifactsExtension
from llm_client.model import CompletionMessage
from openai.types.chat import (
ChatCompletion,
ChatCompletionDeveloperMessageParam,
Expand Down
454 changes: 237 additions & 217 deletions assistants/explorer-assistant/uv.lock

Large diffs are not rendered by default.

2,181 changes: 1,179 additions & 1,002 deletions assistants/guided-conversation-assistant/uv.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion assistants/prospector-assistant/assistant/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@

import deepmerge
import openai_client
from assistant_extensions.ai_clients.model import CompletionMessageImageContent
from assistant_extensions.attachments import AttachmentsExtension
from content_safety.evaluators import CombinedContentSafetyEvaluator
from openai.types.chat import ChatCompletionMessageParam
from llm_client.model import CompletionMessageImageContent
from pydantic import BaseModel, ConfigDict
from semantic_workbench_api_model.workbench_model import (
AssistantStateEvent,
Expand Down
119 changes: 69 additions & 50 deletions assistants/prospector-assistant/uv.lock

Large diffs are not rendered by default.

1,010 changes: 510 additions & 500 deletions assistants/skill-assistant/uv.lock

Large diffs are not rendered by default.

1,598 changes: 802 additions & 796 deletions examples/python/python-02-simple-chatbot/uv.lock

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,11 @@
from enum import StrEnum
from typing import Annotated, Any, Literal

import google.generativeai as genai
import openai
import openai_client
from anthropic import AsyncAnthropic
from content_safety.evaluators import CombinedContentSafetyEvaluatorConfig
from google import genai
from pydantic import BaseModel, ConfigDict, Field
from semantic_workbench_assistant.config import ConfigSecretStr, UISchema

Expand Down Expand Up @@ -180,9 +180,8 @@ class GeminiServiceConfig(ServiceConfig):
Field(title="Gemini Model", description="The Gemini model to use for generating responses."),
] = "gemini-1.5-pro"

def new_client(self, **kwargs) -> genai.GenerativeModel:
genai.configure(api_key=self.gemini_api_key)
return genai.GenerativeModel(self.gemini_model)
def new_client(self, **kwargs) -> genai.Client:
return genai.Client(api_key=self.gemini_api_key)


# endregion
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

import anthropic
import deepmerge
import google.generativeai as genai
from google.genai.types import Content
from openai.types.chat import (
ChatCompletion,
ChatCompletionAssistantMessageParam,
Expand All @@ -16,7 +16,7 @@
ChatCompletionUserMessageParam,
)
from openai.types.chat.chat_completion import Choice
from pydantic import BaseModel
from pydantic import BaseModel, ConfigDict

from .config import (
AnthropicServiceConfig,
Expand All @@ -42,8 +42,7 @@ class GenerateResponseResult(BaseModel):


class ModelAdapter(BaseModel):
class Config:
arbitrary_types_allowed = True
model_config = ConfigDict(arbitrary_types_allowed=True)

@abstractmethod
def _format_messages(self, messages: List[Message]) -> Any:
Expand Down Expand Up @@ -230,7 +229,7 @@ async def generate_response(
#


GeminiFormattedMessages: TypeAlias = Iterable[genai.types.StrictContentType]
GeminiFormattedMessages: TypeAlias = Iterable[Content]


class GeminiAdapter(ModelAdapter):
Expand Down Expand Up @@ -265,10 +264,12 @@ async def generate_response(
}

try:
model = service_config.new_client()
chat = model.start_chat(history=list(formatted_messages)[:-1])
response = await chat.send_message_async(list(formatted_messages)[-1])
deepmerge.always_merger.merge(metadata, {"debug": {"response": response.to_dict()}})
client = service_config.new_client()
chat = client.aio.chats.create(model=service_config.gemini_model, history=list(formatted_messages)[:-1])
latest_message = list(formatted_messages)[-1]
message = (latest_message.parts or [""])[0]
response = await chat.send_message(message)
deepmerge.always_merger.merge(metadata, {"debug": {"response": response.model_dump(mode="json")}})
return GenerateResponseResult(
response=response.text,
metadata=metadata,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ readme = "README.md"
requires-python = ">=3.11"
dependencies = [
"anthropic>=0.34.2",
"google-generativeai>=0.8.1",
"google-genai>=1.2.0",
"openai>=1.61.0",
"tiktoken>=0.8.0",
"semantic-workbench-assistant>=0.1.0",
Expand Down
Loading