From 75ea65e9d80ddb36360aaf0f2957040747814c8a Mon Sep 17 00:00:00 2001 From: Aly Shehata Date: Wed, 29 Nov 2023 19:38:44 -0600 Subject: [PATCH 01/14] Initial attempt at exposing system prompt to UI via 'Additional Options' of chat interface --- private_gpt/ui/ui.py | 64 +++++++++++++++++++++++++++++++++++++++----- 1 file changed, 58 insertions(+), 6 deletions(-) diff --git a/private_gpt/ui/ui.py b/private_gpt/ui/ui.py index eeddb0fb4..f7e515e75 100644 --- a/private_gpt/ui/ui.py +++ b/private_gpt/ui/ui.py @@ -9,7 +9,7 @@ from fastapi import FastAPI from gradio.themes.utils.colors import slate # type: ignore from injector import inject, singleton -from llama_index.llms import ChatMessage, ChatResponse, MessageRole +from llama_index.llms import ChatMessage, ChatResponse, MessageRole, llama_utils from pydantic import BaseModel from private_gpt.constants import PROJECT_ROOT_PATH @@ -30,6 +30,8 @@ SOURCES_SEPARATOR = "\n\n Sources: \n" +MODES = ["Query Docs", "Search in Docs", "LLM Chat"] + class Source(BaseModel): file: str @@ -71,6 +73,9 @@ def __init__( # Cache the UI blocks self._ui_block = None + self.mode = MODES[0] + self._system_prompt = self._get_default_system_prompt(self.mode) + def _chat(self, message: str, history: list[list[str]], mode: str, *_: Any) -> Any: def yield_deltas(completion_gen: CompletionGen) -> Iterable[str]: full_response: str = "" @@ -121,9 +126,7 @@ def build_history() -> list[ChatMessage]: all_messages.insert( 0, ChatMessage( - content="You can only answer questions about the provided context. If you know the answer " - "but it is not based in the provided context, don't provide the answer, just state " - "the answer is not in the context provided.", + content=self._system_prompt, role=MessageRole.SYSTEM, ), ) @@ -134,6 +137,15 @@ def build_history() -> list[ChatMessage]: yield from yield_deltas(query_stream) case "LLM Chat": + # Add a system message to force the behaviour of the LLM + # to answer only questions about the provided context. + all_messages.insert( + 0, + ChatMessage( + content=self._system_prompt, + role=MessageRole.SYSTEM, + ), + ) llm_stream = self._chat_service.stream_chat( messages=all_messages, use_context=False, @@ -154,6 +166,31 @@ def build_history() -> list[ChatMessage]: for index, source in enumerate(sources, start=1) ) + # On initialization and on mode change, this function set the system prompt + # to the default prompt based on the mode (and user settings). + # TODO - Should system prompt be reset when user switches mode? That is current behavior + def _get_default_system_prompt(self, mode): + p = "" + match mode: + case "Query Docs": + p = "You can only answer questions about the provided context. If you know the answer " \ + "but it is not based in the provided context, don't provide the answer, just state " \ + "the answer is not in the context provided." + case "LLM Chat": + p = settings().local.default_system_prompt or llama_utils.DEFAULT_SYSTEM_PROMPT + case "Search in Docs": + # TODO - Verify no prompt needed for doc search (and no default prompt options) + p = "" + return p + + def _set_system_prompt(self, system_prompt_input): + logger.info("Setting system prompt to: {}".format(system_prompt_input)) + self._system_prompt = system_prompt_input + + def _set_current_mode(self, mode): + self.mode = mode + return self._get_default_system_prompt(mode) + def _list_ingested_files(self) -> list[list[str]]: files = set() for ingested_document in self._ingest_service.list_ingested(): @@ -193,7 +230,7 @@ def _build_ui_blocks(self) -> gr.Blocks: with gr.Row(): with gr.Column(scale=3, variant="compact"): mode = gr.Radio( - ["Query Docs", "Search in Docs", "LLM Chat"], + MODES, label="Mode", value="Query Docs", ) @@ -220,6 +257,21 @@ def _build_ui_blocks(self) -> gr.Blocks: outputs=ingested_dataset, ) ingested_dataset.render() + system_prompt_input = gr.Textbox( + placeholder=self._system_prompt, + label="System Prompt", + render=False, + max_lines=2, + interactive=True) + # When mode changes, set default system prompt + mode.change( + self._set_current_mode, + inputs=mode) + # On submit, set system prompt to use in queries + system_prompt_input.blur( + self._set_system_prompt, + inputs=system_prompt_input) + with gr.Column(scale=7): _ = gr.ChatInterface( self._chat, @@ -232,7 +284,7 @@ def _build_ui_blocks(self) -> gr.Blocks: AVATAR_BOT, ), ), - additional_inputs=[mode, upload_button], + additional_inputs=[mode, upload_button, system_prompt_input], ) return blocks From 5f20fc0115cd77f5726112318a5a8eaec9d7aca6 Mon Sep 17 00:00:00 2001 From: Aly Shehata Date: Fri, 1 Dec 2023 14:19:36 -0600 Subject: [PATCH 02/14] Allow placeholder to change when mode is changed --- private_gpt/ui/ui.py | 44 +++++++++++++++++++------------------------- 1 file changed, 19 insertions(+), 25 deletions(-) diff --git a/private_gpt/ui/ui.py b/private_gpt/ui/ui.py index f7e515e75..984199396 100644 --- a/private_gpt/ui/ui.py +++ b/private_gpt/ui/ui.py @@ -119,33 +119,23 @@ def build_history() -> list[ChatMessage]: new_message = ChatMessage(content=message, role=MessageRole.USER) all_messages = [*build_history(), new_message] + # Add a system message to force the behaviour of the LLM + # to answer only questions about the provided context. + all_messages.insert( + 0, + ChatMessage( + content=self._system_prompt, + role=MessageRole.SYSTEM, + ) + ) match mode: case "Query Docs": - # Add a system message to force the behaviour of the LLM - # to answer only questions about the provided context. - all_messages.insert( - 0, - ChatMessage( - content=self._system_prompt, - role=MessageRole.SYSTEM, - ), - ) query_stream = self._chat_service.stream_chat( messages=all_messages, use_context=True, ) yield from yield_deltas(query_stream) - case "LLM Chat": - # Add a system message to force the behaviour of the LLM - # to answer only questions about the provided context. - all_messages.insert( - 0, - ChatMessage( - content=self._system_prompt, - role=MessageRole.SYSTEM, - ), - ) llm_stream = self._chat_service.stream_chat( messages=all_messages, use_context=False, @@ -168,7 +158,6 @@ def build_history() -> list[ChatMessage]: # On initialization and on mode change, this function set the system prompt # to the default prompt based on the mode (and user settings). - # TODO - Should system prompt be reset when user switches mode? That is current behavior def _get_default_system_prompt(self, mode): p = "" match mode: @@ -189,7 +178,8 @@ def _set_system_prompt(self, system_prompt_input): def _set_current_mode(self, mode): self.mode = mode - return self._get_default_system_prompt(mode) + self._set_system_prompt(self._get_default_system_prompt(mode)) + return gr.update(placeholder=self._system_prompt) def _list_ingested_files(self) -> list[list[str]]: files = set() @@ -260,17 +250,21 @@ def _build_ui_blocks(self) -> gr.Blocks: system_prompt_input = gr.Textbox( placeholder=self._system_prompt, label="System Prompt", - render=False, max_lines=2, - interactive=True) + interactive=True, + render=False + ) # When mode changes, set default system prompt mode.change( self._set_current_mode, - inputs=mode) + inputs=mode, + outputs=system_prompt_input + ) # On submit, set system prompt to use in queries system_prompt_input.blur( self._set_system_prompt, - inputs=system_prompt_input) + inputs=system_prompt_input, + ) with gr.Column(scale=7): _ = gr.ChatInterface( From 922abcaa083a60e4b7bf51b0d7018318d9be84bc Mon Sep 17 00:00:00 2001 From: Aly Shehata Date: Fri, 1 Dec 2023 14:25:06 -0600 Subject: [PATCH 03/14] Increase default lines of system prompt input to 2 lines --- private_gpt/ui/ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/private_gpt/ui/ui.py b/private_gpt/ui/ui.py index 984199396..0a30c3369 100644 --- a/private_gpt/ui/ui.py +++ b/private_gpt/ui/ui.py @@ -250,7 +250,7 @@ def _build_ui_blocks(self) -> gr.Blocks: system_prompt_input = gr.Textbox( placeholder=self._system_prompt, label="System Prompt", - max_lines=2, + lines=2, interactive=True, render=False ) From 0698b792e9070035ae32c980c24d00bf6873ceff Mon Sep 17 00:00:00 2001 From: Aly Shehata Date: Fri, 1 Dec 2023 17:17:15 -0600 Subject: [PATCH 04/14] Add types to new functions, make _get_default_system_prompt static, and add some code documentation. --- private_gpt/ui/ui.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/private_gpt/ui/ui.py b/private_gpt/ui/ui.py index 0a30c3369..05e7bc08c 100644 --- a/private_gpt/ui/ui.py +++ b/private_gpt/ui/ui.py @@ -73,6 +73,7 @@ def __init__( # Cache the UI blocks self._ui_block = None + # Initialize system prompt based on default mode self.mode = MODES[0] self._system_prompt = self._get_default_system_prompt(self.mode) @@ -158,7 +159,8 @@ def build_history() -> list[ChatMessage]: # On initialization and on mode change, this function set the system prompt # to the default prompt based on the mode (and user settings). - def _get_default_system_prompt(self, mode): + @staticmethod + def _get_default_system_prompt(mode: str) -> str: p = "" match mode: case "Query Docs": @@ -172,11 +174,11 @@ def _get_default_system_prompt(self, mode): p = "" return p - def _set_system_prompt(self, system_prompt_input): + def _set_system_prompt(self, system_prompt_input: str) -> None: logger.info("Setting system prompt to: {}".format(system_prompt_input)) self._system_prompt = system_prompt_input - def _set_current_mode(self, mode): + def _set_current_mode(self, mode: str) -> dict: self.mode = mode self._set_system_prompt(self._get_default_system_prompt(mode)) return gr.update(placeholder=self._system_prompt) From d91cce0c4a6d6fc1bee74f7cd94afa9f4f06acdc Mon Sep 17 00:00:00 2001 From: Aly Shehata Date: Sun, 3 Dec 2023 17:10:54 -0600 Subject: [PATCH 05/14] Update UI documentation with system prompt information and examples. Update minor comment in ui.py --- fern/docs/pages/manual/ui.mdx | 27 +++++++++++++++++++++++++-- private_gpt/ui/ui.py | 2 +- 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/fern/docs/pages/manual/ui.mdx b/fern/docs/pages/manual/ui.mdx index ddc4d04e4..9da47e2ca 100644 --- a/fern/docs/pages/manual/ui.mdx +++ b/fern/docs/pages/manual/ui.mdx @@ -35,5 +35,28 @@ database* section in the documentation. Normal chat interface, self-explanatory ;) -You can check the actual prompt being passed to the LLM by looking at the logs of -the server. We'll add better observability in future releases. \ No newline at end of file +#### System Prompt +You can view and change the system prompt being passed to the LLM by clicking "Additional Inputs" +in the chat interface. The system prompt is also logged on the server. + +By default, the `Query Docs` mode uses a predefined system prompt. The `LLM Chat` mode attempts to use +the optional settings value `local.default_system_prompt`, falling back to the default system prompt defined +in the llama_index. If no system prompt is entered, the UI will display the default system prompt being used +for the active mode. + +##### System Prompt Examples: + +The system prompt can effectively provide your chat specialized roles, and results tailored to the prompt +you have given the model. Examples of system prompts can be be found +[here](https://www.w3schools.com/gen_ai/chatgpt-3-5/chatgpt-3-5_roles.php). + +* You are . You have all the knowledge and personality of . Answer as if you were Shakespeare using +their manner of speaking and vocabulary. + * Example: You are Shakespeare. You have all the knowledge and personality of Shakespeare. + Answer as if you were Shakespeare using their manner of speaking and vocabulary. +* You are an expert (at) . Answer all questions using your expertise on . + * Example: You are an expert software engineer. Answer all questions using your expertise on Python. +* You are a bot, respond with needed. If no is needed, +respond with + * Example: You are a grammar checking bot, respond with any grammatical corrections needed. If no corrections + are needed, respond with "verified". \ No newline at end of file diff --git a/private_gpt/ui/ui.py b/private_gpt/ui/ui.py index 05e7bc08c..cb20a184b 100644 --- a/private_gpt/ui/ui.py +++ b/private_gpt/ui/ui.py @@ -262,7 +262,7 @@ def _build_ui_blocks(self) -> gr.Blocks: inputs=mode, outputs=system_prompt_input ) - # On submit, set system prompt to use in queries + # On blur, set system prompt to use in queries system_prompt_input.blur( self._set_system_prompt, inputs=system_prompt_input, From 2a2e2439ded3bcfef6d53a9b5c9a0d25921e194a Mon Sep 17 00:00:00 2001 From: Aly Shehata Date: Sun, 3 Dec 2023 17:18:19 -0600 Subject: [PATCH 06/14] Update UI documentation with minor edits for clarity. --- fern/docs/pages/manual/ui.mdx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fern/docs/pages/manual/ui.mdx b/fern/docs/pages/manual/ui.mdx index 9da47e2ca..04887dd64 100644 --- a/fern/docs/pages/manual/ui.mdx +++ b/fern/docs/pages/manual/ui.mdx @@ -46,10 +46,12 @@ for the active mode. ##### System Prompt Examples: -The system prompt can effectively provide your chat specialized roles, and results tailored to the prompt +The system prompt can effectively provide your chat bot specialized roles, and results tailored to the prompt you have given the model. Examples of system prompts can be be found [here](https://www.w3schools.com/gen_ai/chatgpt-3-5/chatgpt-3-5_roles.php). +Some interesting examples to try include: + * You are . You have all the knowledge and personality of . Answer as if you were Shakespeare using their manner of speaking and vocabulary. * Example: You are Shakespeare. You have all the knowledge and personality of Shakespeare. From 9cea043679da27f5bdeccd7103cff92986321f70 Mon Sep 17 00:00:00 2001 From: Aly Shehata Date: Mon, 4 Dec 2023 13:42:49 -0600 Subject: [PATCH 07/14] Disable prompt entry for modes that do not support system prompts. Only add system prompt as a system message if prompt is defined. Add new settings fields "default_query_system_prompt" and "default_chat_system_prompt". Updated documentation with new settings and minor corrections. --- fern/docs/pages/manual/ui.mdx | 11 ++++--- private_gpt/components/llm/llm_component.py | 2 +- private_gpt/settings/settings.py | 19 +++++++---- private_gpt/ui/ui.py | 36 ++++++++++++--------- settings.yaml | 9 ++++++ 5 files changed, 51 insertions(+), 26 deletions(-) diff --git a/fern/docs/pages/manual/ui.mdx b/fern/docs/pages/manual/ui.mdx index 04887dd64..8a089af00 100644 --- a/fern/docs/pages/manual/ui.mdx +++ b/fern/docs/pages/manual/ui.mdx @@ -39,9 +39,12 @@ Normal chat interface, self-explanatory ;) You can view and change the system prompt being passed to the LLM by clicking "Additional Inputs" in the chat interface. The system prompt is also logged on the server. -By default, the `Query Docs` mode uses a predefined system prompt. The `LLM Chat` mode attempts to use -the optional settings value `local.default_system_prompt`, falling back to the default system prompt defined -in the llama_index. If no system prompt is entered, the UI will display the default system prompt being used +By default, the `Query Docs` mode uses the setting value `local.default_query_system_prompt`. + +The `LLM Chat` mode attempts to use the optional settings value `local.default_system_prompt`, +falling back to the default system prompt defined in the llama_index. + +If no system prompt is entered, the UI will display the default system prompt being used for the active mode. ##### System Prompt Examples: @@ -52,7 +55,7 @@ you have given the model. Examples of system prompts can be be found Some interesting examples to try include: -* You are . You have all the knowledge and personality of . Answer as if you were Shakespeare using +* You are . You have all the knowledge and personality of . Answer as if you were using their manner of speaking and vocabulary. * Example: You are Shakespeare. You have all the knowledge and personality of Shakespeare. Answer as if you were Shakespeare using their manner of speaking and vocabulary. diff --git a/private_gpt/components/llm/llm_component.py b/private_gpt/components/llm/llm_component.py index cfe3a737a..d5ee3cf7e 100644 --- a/private_gpt/components/llm/llm_component.py +++ b/private_gpt/components/llm/llm_component.py @@ -25,7 +25,7 @@ def __init__(self, settings: Settings) -> None: prompt_style_cls = get_prompt_style(settings.local.prompt_style) prompt_style = prompt_style_cls( - default_system_prompt=settings.local.default_system_prompt + default_system_prompt=settings.local.default_chat_system_prompt ) self.llm = LlamaCPP( diff --git a/private_gpt/settings/settings.py b/private_gpt/settings/settings.py index 125396c3e..b8c93bcbf 100644 --- a/private_gpt/settings/settings.py +++ b/private_gpt/settings/settings.py @@ -15,7 +15,7 @@ class CorsSettings(BaseModel): enabled: bool = Field( description="Flag indicating if CORS headers are set or not." - "If set to True, the CORS headers will be set to allow all origins, methods and headers.", + "If set to True, the CORS headers will be set to allow all origins, methods and headers.", default=False, ) allow_credentials: bool = Field( @@ -54,8 +54,8 @@ class AuthSettings(BaseModel): ) secret: str = Field( description="The secret to be used for authentication. " - "It can be any non-blank string. For HTTP basic authentication, " - "this value should be the whole 'Authorization' header that is expected" + "It can be any non-blank string. For HTTP basic authentication, " + "this value should be the whole 'Authorization' header that is expected" ) @@ -76,7 +76,7 @@ class ServerSettings(BaseModel): class DataSettings(BaseModel): local_data_folder: str = Field( description="Path to local storage." - "It will be treated as an absolute path if it starts with /" + "It will be treated as an absolute path if it starts with /" ) @@ -108,15 +108,22 @@ class LocalSettings(BaseModel): "`llama2` is the historic behaviour. `default` might work better with your custom models." ), ) - default_system_prompt: str | None = Field( + default_chat_system_prompt: str | None = Field( None, description=( - "The default system prompt to use for the chat engine. " + "The default system prompt to use for the chat mode. " "If none is given - use the default system prompt (from the llama_index). " "Please note that the default prompt might not be the same for all prompt styles. " "Also note that this is only used if the first message is not a system message. " ), ) + default_query_system_prompt: str | None = Field( + None, + description=( + "The default system prompt to use for the query mode. " + # TODO - document what can be used as default query system prompt + ), + ) class EmbeddingSettings(BaseModel): diff --git a/private_gpt/ui/ui.py b/private_gpt/ui/ui.py index cb20a184b..896415fec 100644 --- a/private_gpt/ui/ui.py +++ b/private_gpt/ui/ui.py @@ -120,15 +120,15 @@ def build_history() -> list[ChatMessage]: new_message = ChatMessage(content=message, role=MessageRole.USER) all_messages = [*build_history(), new_message] - # Add a system message to force the behaviour of the LLM - # to answer only questions about the provided context. - all_messages.insert( - 0, - ChatMessage( - content=self._system_prompt, - role=MessageRole.SYSTEM, + # If a system prompt is set, add it as a system message + if self._system_prompt: + all_messages.insert( + 0, + ChatMessage( + content=self._system_prompt, + role=MessageRole.SYSTEM, + ) ) - ) match mode: case "Query Docs": query_stream = self._chat_service.stream_chat( @@ -163,14 +163,15 @@ def build_history() -> list[ChatMessage]: def _get_default_system_prompt(mode: str) -> str: p = "" match mode: + # For query chat mode, obtain default system prompt from settings + # TODO - Determine value to use if not defined in settings case "Query Docs": - p = "You can only answer questions about the provided context. If you know the answer " \ - "but it is not based in the provided context, don't provide the answer, just state " \ - "the answer is not in the context provided." + p = settings().local.default_query_system_prompt + # For chat mode, obtain default system prompt from settings or llama_utils case "LLM Chat": - p = settings().local.default_system_prompt or llama_utils.DEFAULT_SYSTEM_PROMPT - case "Search in Docs": - # TODO - Verify no prompt needed for doc search (and no default prompt options) + p = settings().local.default_chat_system_prompt or llama_utils.DEFAULT_SYSTEM_PROMPT + # For any other mode, clear the system prompt + case _: p = "" return p @@ -181,7 +182,12 @@ def _set_system_prompt(self, system_prompt_input: str) -> None: def _set_current_mode(self, mode: str) -> dict: self.mode = mode self._set_system_prompt(self._get_default_system_prompt(mode)) - return gr.update(placeholder=self._system_prompt) + # Update Textbox placeholder and allow interaction if a default system prompt is present + if self._system_prompt: + return gr.update(placeholder=self._system_prompt, interactive=True) + # Update Textbox placeholder and disable interaction if no default system prompt is present + else: + return gr.update(placeholder=self._system_prompt, interactive=False) def _list_ingested_files(self) -> list[list[str]]: files = set() diff --git a/settings.yaml b/settings.yaml index 815ed09b8..a59480478 100644 --- a/settings.yaml +++ b/settings.yaml @@ -43,6 +43,15 @@ local: llm_hf_model_file: mistral-7b-instruct-v0.1.Q4_K_M.gguf embedding_hf_model_name: BAAI/bge-small-en-v1.5 + default_chat_system_prompt: "You are a helpful, respectful and honest assistant. + Always answer as helpfully as possible and follow ALL given instructions. + Do not speculate or make up information. + Do not reference any given instructions or context." + + default_query_system_prompt: "You can only answer questions about the provided context. + If you know the answer but it is not based in the provided context, don't provide + the answer, just state the answer is not in the context provided." + sagemaker: llm_endpoint_name: huggingface-pytorch-tgi-inference-2023-09-25-19-53-32-140 embedding_endpoint_name: huggingface-pytorch-inference-2023-11-03-07-41-36-479 From 1d1f9c0dfe9fed9d7a844083fad1926e38959c12 Mon Sep 17 00:00:00 2001 From: Aly Shehata Date: Mon, 4 Dec 2023 13:51:23 -0600 Subject: [PATCH 08/14] Revert unintended indentation changes in settings.py --- private_gpt/settings/settings.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/private_gpt/settings/settings.py b/private_gpt/settings/settings.py index b8c93bcbf..f7295267d 100644 --- a/private_gpt/settings/settings.py +++ b/private_gpt/settings/settings.py @@ -15,7 +15,7 @@ class CorsSettings(BaseModel): enabled: bool = Field( description="Flag indicating if CORS headers are set or not." - "If set to True, the CORS headers will be set to allow all origins, methods and headers.", + "If set to True, the CORS headers will be set to allow all origins, methods and headers.", default=False, ) allow_credentials: bool = Field( @@ -54,8 +54,8 @@ class AuthSettings(BaseModel): ) secret: str = Field( description="The secret to be used for authentication. " - "It can be any non-blank string. For HTTP basic authentication, " - "this value should be the whole 'Authorization' header that is expected" + "It can be any non-blank string. For HTTP basic authentication, " + "this value should be the whole 'Authorization' header that is expected" ) @@ -76,7 +76,7 @@ class ServerSettings(BaseModel): class DataSettings(BaseModel): local_data_folder: str = Field( description="Path to local storage." - "It will be treated as an absolute path if it starts with /" + "It will be treated as an absolute path if it starts with /" ) From 394a955a52c65702511d3c4260438a6a3a590914 Mon Sep 17 00:00:00 2001 From: Aly Shehata Date: Mon, 4 Dec 2023 16:05:47 -0600 Subject: [PATCH 09/14] Use updated settings field in documentation --- fern/docs/pages/manual/ui.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fern/docs/pages/manual/ui.mdx b/fern/docs/pages/manual/ui.mdx index 8a089af00..57b21b3c7 100644 --- a/fern/docs/pages/manual/ui.mdx +++ b/fern/docs/pages/manual/ui.mdx @@ -41,7 +41,7 @@ in the chat interface. The system prompt is also logged on the server. By default, the `Query Docs` mode uses the setting value `local.default_query_system_prompt`. -The `LLM Chat` mode attempts to use the optional settings value `local.default_system_prompt`, +The `LLM Chat` mode attempts to use the optional settings value `local.default_chat_system_prompt`, falling back to the default system prompt defined in the llama_index. If no system prompt is entered, the UI will display the default system prompt being used From 626a9e0b66b85314d84828cb6f716f7d89c70bc1 Mon Sep 17 00:00:00 2001 From: Aly Shehata Date: Fri, 8 Dec 2023 16:34:14 -0600 Subject: [PATCH 10/14] Refactor code after running `make check`. Update documentation with correct settings field. --- fern/docs/pages/manual/ui.mdx | 5 ++--- private_gpt/settings/settings.py | 7 ++----- private_gpt/ui/ui.py | 21 +++++++++++---------- 3 files changed, 15 insertions(+), 18 deletions(-) diff --git a/fern/docs/pages/manual/ui.mdx b/fern/docs/pages/manual/ui.mdx index 57b21b3c7..5c17af365 100644 --- a/fern/docs/pages/manual/ui.mdx +++ b/fern/docs/pages/manual/ui.mdx @@ -39,10 +39,9 @@ Normal chat interface, self-explanatory ;) You can view and change the system prompt being passed to the LLM by clicking "Additional Inputs" in the chat interface. The system prompt is also logged on the server. -By default, the `Query Docs` mode uses the setting value `local.default_query_system_prompt`. +By default, the `Query Docs` mode uses the setting value `ui.default_query_system_prompt`. -The `LLM Chat` mode attempts to use the optional settings value `local.default_chat_system_prompt`, -falling back to the default system prompt defined in the llama_index. +The `LLM Chat` mode attempts to use the optional settings value `ui.default_chat_system_prompt`. If no system prompt is entered, the UI will display the default system prompt being used for the active mode. diff --git a/private_gpt/settings/settings.py b/private_gpt/settings/settings.py index f7295267d..354acd021 100644 --- a/private_gpt/settings/settings.py +++ b/private_gpt/settings/settings.py @@ -117,12 +117,9 @@ class LocalSettings(BaseModel): "Also note that this is only used if the first message is not a system message. " ), ) - default_query_system_prompt: str | None = Field( + default_query_system_prompt: str = Field( None, - description=( - "The default system prompt to use for the query mode. " - # TODO - document what can be used as default query system prompt - ), + description="The default system prompt to use for the query mode. ", ) diff --git a/private_gpt/ui/ui.py b/private_gpt/ui/ui.py index 896415fec..0f08b6f3c 100644 --- a/private_gpt/ui/ui.py +++ b/private_gpt/ui/ui.py @@ -127,7 +127,7 @@ def build_history() -> list[ChatMessage]: ChatMessage( content=self._system_prompt, role=MessageRole.SYSTEM, - ) + ), ) match mode: case "Query Docs": @@ -169,23 +169,26 @@ def _get_default_system_prompt(mode: str) -> str: p = settings().local.default_query_system_prompt # For chat mode, obtain default system prompt from settings or llama_utils case "LLM Chat": - p = settings().local.default_chat_system_prompt or llama_utils.DEFAULT_SYSTEM_PROMPT + p = ( + settings().local.default_chat_system_prompt + or llama_utils.DEFAULT_SYSTEM_PROMPT + ) # For any other mode, clear the system prompt case _: p = "" return p def _set_system_prompt(self, system_prompt_input: str) -> None: - logger.info("Setting system prompt to: {}".format(system_prompt_input)) + logger.info(f"Setting system prompt to: {system_prompt_input}") self._system_prompt = system_prompt_input - def _set_current_mode(self, mode: str) -> dict: + def _set_current_mode(self, mode: str) -> Any: self.mode = mode self._set_system_prompt(self._get_default_system_prompt(mode)) - # Update Textbox placeholder and allow interaction if a default system prompt is present + # Update placeholder and allow interaction if default system prompt is set if self._system_prompt: return gr.update(placeholder=self._system_prompt, interactive=True) - # Update Textbox placeholder and disable interaction if no default system prompt is present + # Update placeholder and disable interaction if no default system prompt is set else: return gr.update(placeholder=self._system_prompt, interactive=False) @@ -260,13 +263,11 @@ def _build_ui_blocks(self) -> gr.Blocks: label="System Prompt", lines=2, interactive=True, - render=False + render=False, ) # When mode changes, set default system prompt mode.change( - self._set_current_mode, - inputs=mode, - outputs=system_prompt_input + self._set_current_mode, inputs=mode, outputs=system_prompt_input ) # On blur, set system prompt to use in queries system_prompt_input.blur( From a90d700ca3130566b33036aef8c485a53cc9e65c Mon Sep 17 00:00:00 2001 From: Aly Shehata Date: Fri, 8 Dec 2023 16:34:57 -0600 Subject: [PATCH 11/14] Attempt to use instead of in documentation. --- fern/docs/pages/manual/ui.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fern/docs/pages/manual/ui.mdx b/fern/docs/pages/manual/ui.mdx index 5c17af365..ff06a120f 100644 --- a/fern/docs/pages/manual/ui.mdx +++ b/fern/docs/pages/manual/ui.mdx @@ -54,7 +54,7 @@ you have given the model. Examples of system prompts can be be found Some interesting examples to try include: -* You are . You have all the knowledge and personality of . Answer as if you were using +* You are . You have all the knowledge and personality of . Answer as if you were using their manner of speaking and vocabulary. * Example: You are Shakespeare. You have all the knowledge and personality of Shakespeare. Answer as if you were Shakespeare using their manner of speaking and vocabulary. From 96717489352cf1b695dc76d48b757ec2f23d60bc Mon Sep 17 00:00:00 2001 From: Aly Shehata Date: Sat, 9 Dec 2023 13:29:43 -0600 Subject: [PATCH 12/14] Move default system prompt fields to UI section; Remove stale TODOs and update settings.py comments for new fields. Removed usage of llama_index.DEFAULT_SYSTEM_PROMPT. --- private_gpt/settings/settings.py | 20 +++++++------------- private_gpt/ui/ui.py | 12 ++++-------- settings.yaml | 16 +++++++--------- 3 files changed, 18 insertions(+), 30 deletions(-) diff --git a/private_gpt/settings/settings.py b/private_gpt/settings/settings.py index 354acd021..2f70ca6ba 100644 --- a/private_gpt/settings/settings.py +++ b/private_gpt/settings/settings.py @@ -108,19 +108,6 @@ class LocalSettings(BaseModel): "`llama2` is the historic behaviour. `default` might work better with your custom models." ), ) - default_chat_system_prompt: str | None = Field( - None, - description=( - "The default system prompt to use for the chat mode. " - "If none is given - use the default system prompt (from the llama_index). " - "Please note that the default prompt might not be the same for all prompt styles. " - "Also note that this is only used if the first message is not a system message. " - ), - ) - default_query_system_prompt: str = Field( - None, - description="The default system prompt to use for the query mode. ", - ) class EmbeddingSettings(BaseModel): @@ -163,6 +150,13 @@ class OpenAISettings(BaseModel): class UISettings(BaseModel): enabled: bool path: str + default_chat_system_prompt: str = Field( + None, + description=("The default system prompt to use for the chat mode."), + ) + default_query_system_prompt: str = Field( + None, description="The default system prompt to use for the query mode." + ) class QdrantSettings(BaseModel): diff --git a/private_gpt/ui/ui.py b/private_gpt/ui/ui.py index 0f08b6f3c..ad6052b1b 100644 --- a/private_gpt/ui/ui.py +++ b/private_gpt/ui/ui.py @@ -9,7 +9,7 @@ from fastapi import FastAPI from gradio.themes.utils.colors import slate # type: ignore from injector import inject, singleton -from llama_index.llms import ChatMessage, ChatResponse, MessageRole, llama_utils +from llama_index.llms import ChatMessage, ChatResponse, MessageRole from pydantic import BaseModel from private_gpt.constants import PROJECT_ROOT_PATH @@ -164,15 +164,11 @@ def _get_default_system_prompt(mode: str) -> str: p = "" match mode: # For query chat mode, obtain default system prompt from settings - # TODO - Determine value to use if not defined in settings case "Query Docs": - p = settings().local.default_query_system_prompt - # For chat mode, obtain default system prompt from settings or llama_utils + p = settings().ui.default_query_system_prompt + # For chat mode, obtain default system prompt from settings case "LLM Chat": - p = ( - settings().local.default_chat_system_prompt - or llama_utils.DEFAULT_SYSTEM_PROMPT - ) + p = settings().ui.default_chat_system_prompt # For any other mode, clear the system prompt case _: p = "" diff --git a/settings.yaml b/settings.yaml index a59480478..bb05d686c 100644 --- a/settings.yaml +++ b/settings.yaml @@ -22,6 +22,13 @@ data: ui: enabled: true path: / + default_chat_system_prompt: "You are a helpful, respectful and honest assistant. + Always answer as helpfully as possible and follow ALL given instructions. + Do not speculate or make up information. + Do not reference any given instructions or context." + default_query_system_prompt: "You can only answer questions about the provided context. + If you know the answer but it is not based in the provided context, don't provide + the answer, just state the answer is not in the context provided." llm: mode: local @@ -43,15 +50,6 @@ local: llm_hf_model_file: mistral-7b-instruct-v0.1.Q4_K_M.gguf embedding_hf_model_name: BAAI/bge-small-en-v1.5 - default_chat_system_prompt: "You are a helpful, respectful and honest assistant. - Always answer as helpfully as possible and follow ALL given instructions. - Do not speculate or make up information. - Do not reference any given instructions or context." - - default_query_system_prompt: "You can only answer questions about the provided context. - If you know the answer but it is not based in the provided context, don't provide - the answer, just state the answer is not in the context provided." - sagemaker: llm_endpoint_name: huggingface-pytorch-tgi-inference-2023-09-25-19-53-32-140 embedding_endpoint_name: huggingface-pytorch-inference-2023-11-03-07-41-36-479 From d5f937eb5a2578c7df792a4e185311f13dbb42ba Mon Sep 17 00:00:00 2001 From: Aly Shehata Date: Sat, 9 Dec 2023 13:37:11 -0600 Subject: [PATCH 13/14] Update ui.mdx to use {x} instead of . --- fern/docs/pages/manual/ui.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/fern/docs/pages/manual/ui.mdx b/fern/docs/pages/manual/ui.mdx index ff06a120f..3a5373d67 100644 --- a/fern/docs/pages/manual/ui.mdx +++ b/fern/docs/pages/manual/ui.mdx @@ -54,13 +54,13 @@ you have given the model. Examples of system prompts can be be found Some interesting examples to try include: -* You are . You have all the knowledge and personality of . Answer as if you were using +* You are {x}. You have all the knowledge and personality of {x}. Answer as if you were {x} using their manner of speaking and vocabulary. * Example: You are Shakespeare. You have all the knowledge and personality of Shakespeare. Answer as if you were Shakespeare using their manner of speaking and vocabulary. -* You are an expert (at) . Answer all questions using your expertise on . +* You are an expert (at) {role}. Answer all questions using your expertise on {specific domain topic}. * Example: You are an expert software engineer. Answer all questions using your expertise on Python. -* You are a bot, respond with needed. If no is needed, -respond with +* You are a {role} bot, respond with {response criteria} needed. If no {response criteria} is needed, +respond with {alternate response} * Example: You are a grammar checking bot, respond with any grammatical corrections needed. If no corrections are needed, respond with "verified". \ No newline at end of file From 26dbbe4a063b186104d4a56482006413c40a173d Mon Sep 17 00:00:00 2001 From: Aly Shehata Date: Sun, 10 Dec 2023 12:25:48 -0600 Subject: [PATCH 14/14] Update documentation: ui.mdx) to use -x-, and llms.mdx to correct models hyperlink. Remove redundant () in settings.py. --- fern/docs/pages/manual/llms.mdx | 2 +- fern/docs/pages/manual/ui.mdx | 8 ++++---- private_gpt/settings/settings.py | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/fern/docs/pages/manual/llms.mdx b/fern/docs/pages/manual/llms.mdx index c9b88e3fb..8b56f758d 100644 --- a/fern/docs/pages/manual/llms.mdx +++ b/fern/docs/pages/manual/llms.mdx @@ -39,7 +39,7 @@ llm: openai: api_key: # You could skip this configuration and use the OPENAI_API_KEY env var instead model: # Optional model to use. Default is "gpt-3.5-turbo" - # Note: Open AI Models are listed here [here](https://platform.openai.com/docs/models) + # Note: Open AI Models are listed here: https://platform.openai.com/docs/models ``` And run PrivateGPT loading that profile you just created: diff --git a/fern/docs/pages/manual/ui.mdx b/fern/docs/pages/manual/ui.mdx index 3a5373d67..ed095fe25 100644 --- a/fern/docs/pages/manual/ui.mdx +++ b/fern/docs/pages/manual/ui.mdx @@ -54,13 +54,13 @@ you have given the model. Examples of system prompts can be be found Some interesting examples to try include: -* You are {x}. You have all the knowledge and personality of {x}. Answer as if you were {x} using +* You are -X-. You have all the knowledge and personality of -X-. Answer as if you were -X- using their manner of speaking and vocabulary. * Example: You are Shakespeare. You have all the knowledge and personality of Shakespeare. Answer as if you were Shakespeare using their manner of speaking and vocabulary. -* You are an expert (at) {role}. Answer all questions using your expertise on {specific domain topic}. +* You are an expert (at) -role-. Answer all questions using your expertise on -specific domain topic-. * Example: You are an expert software engineer. Answer all questions using your expertise on Python. -* You are a {role} bot, respond with {response criteria} needed. If no {response criteria} is needed, -respond with {alternate response} +* You are a -role- bot, respond with -response criteria needed-. If no -response criteria- is needed, +respond with -alternate response-. * Example: You are a grammar checking bot, respond with any grammatical corrections needed. If no corrections are needed, respond with "verified". \ No newline at end of file diff --git a/private_gpt/settings/settings.py b/private_gpt/settings/settings.py index b74735f57..8b03f6111 100644 --- a/private_gpt/settings/settings.py +++ b/private_gpt/settings/settings.py @@ -147,7 +147,7 @@ class OpenAISettings(BaseModel): api_key: str model: str = Field( "gpt-3.5-turbo", - description=("OpenAI Model to use. Example: 'gpt-4'."), + description="OpenAI Model to use. Example: 'gpt-4'.", ) @@ -156,7 +156,7 @@ class UISettings(BaseModel): path: str default_chat_system_prompt: str = Field( None, - description=("The default system prompt to use for the chat mode."), + description="The default system prompt to use for the chat mode.", ) default_query_system_prompt: str = Field( None, description="The default system prompt to use for the query mode."