From 996cce32d1884ac382fcd0e9323b1c29aa26efd6 Mon Sep 17 00:00:00 2001 From: Peter Kirkham Date: Tue, 14 Jan 2025 12:05:39 -0800 Subject: [PATCH] feat: privacy_mode --- llm_observability_examples.py | 4 +++- posthog/__init__.py | 2 ++ posthog/ai/openai/openai.py | 10 +++++----- posthog/ai/openai/openai_async.py | 10 +++++----- posthog/ai/utils.py | 13 +++++++++---- 5 files changed, 24 insertions(+), 15 deletions(-) diff --git a/llm_observability_examples.py b/llm_observability_examples.py index 2e92032..35caa64 100644 --- a/llm_observability_examples.py +++ b/llm_observability_examples.py @@ -9,10 +9,12 @@ posthog.personal_api_key = os.getenv("POSTHOG_PERSONAL_API_KEY", "your-personal-api-key") posthog.host = os.getenv("POSTHOG_HOST", "http://localhost:8000") # Or https://app.posthog.com posthog.debug = True +# change this to False to see usage events +posthog.privacy_mode = True openai_client = OpenAI( api_key=os.getenv("OPENAI_API_KEY", "your-openai-api-key"), - posthog_client=posthog, + posthog_client=posthog ) async_openai_client = AsyncOpenAI( diff --git a/posthog/__init__.py b/posthog/__init__.py index fa56e8b..dff0dfa 100644 --- a/posthog/__init__.py +++ b/posthog/__init__.py @@ -26,6 +26,8 @@ exception_autocapture_integrations = [] # type: List[Integrations] # Used to determine in app paths for exception autocapture. Defaults to the current working directory project_root = None # type: Optional[str] +# Used for our AI observability feature to not capture any prompt or output just usage + metadata +privacy_mode = False # type: bool default_client = None # type: Optional[Client] diff --git a/posthog/ai/openai/openai.py b/posthog/ai/openai/openai.py index ca49a6c..6896e31 100644 --- a/posthog/ai/openai/openai.py +++ b/posthog/ai/openai/openai.py @@ -8,7 +8,7 @@ except ImportError: raise ModuleNotFoundError("Please install the OpenAI SDK to use this feature: 'pip install openai'") -from posthog.ai.utils import call_llm_and_track_usage, get_model_params +from posthog.ai.utils import call_llm_and_track_usage, get_model_params, with_privacy_mode from posthog.client import Client as PostHogClient @@ -142,15 +142,15 @@ def _capture_streaming_event( "$ai_provider": "openai", "$ai_model": kwargs.get("model"), "$ai_model_parameters": get_model_params(kwargs), - "$ai_input": kwargs.get("messages"), - "$ai_output": { + "$ai_input": with_privacy_mode(self._client._ph_client, kwargs.get("messages")), + "$ai_output": with_privacy_mode(self._client._ph_client, { "choices": [ { "content": output, "role": "assistant", } ] - }, + }), "$ai_http_status": 200, "$ai_input_tokens": usage_stats.get("prompt_tokens", 0), "$ai_output_tokens": usage_stats.get("completion_tokens", 0), @@ -214,7 +214,7 @@ def create( event_properties = { "$ai_provider": "openai", "$ai_model": kwargs.get("model"), - "$ai_input": kwargs.get("input"), + "$ai_input": with_privacy_mode(self._client._ph_client, kwargs.get("input")), "$ai_http_status": 200, "$ai_input_tokens": usage_stats.get("prompt_tokens", 0), "$ai_latency": latency, diff --git a/posthog/ai/openai/openai_async.py b/posthog/ai/openai/openai_async.py index 4f8bf34..094fcfd 100644 --- a/posthog/ai/openai/openai_async.py +++ b/posthog/ai/openai/openai_async.py @@ -8,7 +8,7 @@ except ImportError: raise ModuleNotFoundError("Please install the OpenAI SDK to use this feature: 'pip install openai'") -from posthog.ai.utils import call_llm_and_track_usage_async, get_model_params +from posthog.ai.utils import call_llm_and_track_usage_async, get_model_params, with_privacy_mode from posthog.client import Client as PostHogClient @@ -141,15 +141,15 @@ def _capture_streaming_event( "$ai_provider": "openai", "$ai_model": kwargs.get("model"), "$ai_model_parameters": get_model_params(kwargs), - "$ai_input": kwargs.get("messages"), - "$ai_output": { + "$ai_input": with_privacy_mode(self._client._ph_client, kwargs.get("messages")), + "$ai_output": with_privacy_mode(self._client._ph_client, { "choices": [ { "content": output, "role": "assistant", } ] - }, + }), "$ai_http_status": 200, "$ai_input_tokens": usage_stats.get("prompt_tokens", 0), "$ai_output_tokens": usage_stats.get("completion_tokens", 0), @@ -213,7 +213,7 @@ async def create( event_properties = { "$ai_provider": "openai", "$ai_model": kwargs.get("model"), - "$ai_input": kwargs.get("input"), + "$ai_input": with_privacy_mode(self._client._ph_client, kwargs.get("input")), "$ai_http_status": 200, "$ai_input_tokens": usage_stats.get("prompt_tokens", 0), "$ai_latency": latency, diff --git a/posthog/ai/utils.py b/posthog/ai/utils.py index 2cdb9e2..555804d 100644 --- a/posthog/ai/utils.py +++ b/posthog/ai/utils.py @@ -86,8 +86,8 @@ def call_llm_and_track_usage( "$ai_provider": "openai", "$ai_model": kwargs.get("model"), "$ai_model_parameters": get_model_params(kwargs), - "$ai_input": kwargs.get("messages"), - "$ai_output": format_response(response), + "$ai_input": with_privacy_mode(ph_client, kwargs.get("messages")), + "$ai_output": with_privacy_mode(ph_client, format_response(response)), "$ai_http_status": http_status, "$ai_input_tokens": input_tokens, "$ai_output_tokens": output_tokens, @@ -150,8 +150,8 @@ async def call_llm_and_track_usage_async( "$ai_provider": "openai", "$ai_model": kwargs.get("model"), "$ai_model_parameters": get_model_params(kwargs), - "$ai_input": kwargs.get("messages"), - "$ai_output": format_response(response), + "$ai_input": with_privacy_mode(ph_client, kwargs.get("messages")), + "$ai_output": with_privacy_mode(ph_client, format_response(response)), "$ai_http_status": http_status, "$ai_input_tokens": input_tokens, "$ai_output_tokens": output_tokens, @@ -176,3 +176,8 @@ async def call_llm_and_track_usage_async( raise error return response + +def with_privacy_mode(ph_client: PostHogClient, value: Any): + if ph_client.privacy_mode: + return None + return value