diff --git a/README.md b/README.md index fb96fc0a9..0af665e99 100755 --- a/README.md +++ b/README.md @@ -6,6 +6,14 @@ This release includes model weights and starting code for pretrained and fine-tu This repository is intended as a minimal example to load [Llama 2](https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/) models and run inference. For more detailed examples leveraging HuggingFace, see [llama-recipes](https://github.com/facebookresearch/llama-recipes/). +## System Prompt Update + +### Observed Issue +We received feedback from the community on our prompt template and we are providing an update to reduce the false refusal rates seen. False refusals occur when the model incorrectly refuses to answer a question that it should, for example due to overly broad instructions to be cautious in how it provides responses. + +### Updated approach +Based on evaluation and analysis, we recommend the removal of the system prompt as the default setting. Pull request [#626](https://github.com/facebookresearch/llama/pull/626) removes the system prompt as the default option, but still provides an example to help enable experimentation for those using it. + ## Download ⚠️ **7/18: We're aware of people encountering a number of download issues today. Anyone still encountering issues should remove all local files, re-clone the repository, and [request a new download link](https://ai.meta.com/resources/models-and-libraries/llama-downloads/). It's critical to do all of these in case you have local corrupt files. When you receive the email, copy *only* the link text - it should begin with https://download.llamameta.net and not with https://l.facebook.com, which will give errors.** diff --git a/example_chat_completion.py b/example_chat_completion.py index 5043bc5f9..02583d955 100644 --- a/example_chat_completion.py +++ b/example_chat_completion.py @@ -14,7 +14,7 @@ def main( temperature: float = 0.6, top_p: float = 0.9, max_seq_len: int = 512, - max_batch_size: int = 4, + max_batch_size: int = 8, max_gen_len: Optional[int] = None, ): generator = Llama.build( @@ -52,6 +52,16 @@ def main( }, {"role": "user", "content": "How to go from Beijing to NY?"}, ], + [ + { + "role": "system", + "content": """\ +You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. + +If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""", + }, + {"role": "user", "content": "Write a brief birthday message to John"}, + ], ] results = generator.chat_completion( dialogs, # type: ignore diff --git a/llama/generation.py b/llama/generation.py index 1f37856ef..200aa0ced 100755 --- a/llama/generation.py +++ b/llama/generation.py @@ -43,10 +43,6 @@ class ChatPrediction(TypedDict, total=False): B_INST, E_INST = "[INST]", "[/INST]" B_SYS, E_SYS = "<>\n", "\n<>\n\n" -DEFAULT_SYSTEM_PROMPT = """\ -You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. - -If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""" class Llama: @@ -222,22 +218,16 @@ def chat_completion( max_gen_len = self.model.params.max_seq_len - 1 prompt_tokens = [] for dialog in dialogs: - if dialog[0]["role"] != "system": + if dialog[0]["role"] == "system": dialog = [ { - "role": "system", - "content": DEFAULT_SYSTEM_PROMPT, + "role": dialog[1]["role"], + "content": B_SYS + + dialog[0]["content"] + + E_SYS + + dialog[1]["content"], } - ] + dialog - dialog = [ - { - "role": dialog[1]["role"], - "content": B_SYS - + dialog[0]["content"] - + E_SYS - + dialog[1]["content"], - } - ] + dialog[2:] + ] + dialog[2:] assert all([msg["role"] == "user" for msg in dialog[::2]]) and all( [msg["role"] == "assistant" for msg in dialog[1::2]] ), (