diff --git a/.gitignore b/.gitignore index f1d43c6419..07cd6d5e2f 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,33 @@ +# See https://help.github.com/ignore-files/ for more about ignoring files. + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# Distribution / packaging +dist/ +build/ +*.egg-info/ +*.egg + +# Virtual environments +venv/ +ENV/ + +# IDE-specific files +.vscode/ + +# Compiled Python modules +*.pyc +*.pyo +*.pyd + +# macOS specific files +.DS_Store + +# Windows specific files +Thumbs.db + +# this application's specific files archive diff --git a/ai.py b/ai.py index dc332cf6eb..8769180abb 100644 --- a/ai.py +++ b/ai.py @@ -1,4 +1,3 @@ - import openai @@ -8,15 +7,15 @@ def __init__(self, **kwargs): def start(self, system, user): messages = [ - {"role": "system", "content": system}, - {"role": "user", "content": user}, - ] + {"role": "system", "content": system}, + {"role": "user", "content": user}, + ] return self.next(messages) def fsystem(self, msg): return {"role": "system", "content": msg} - + def fuser(self, msg): return {"role": "user", "content": msg} @@ -25,9 +24,7 @@ def next(self, messages: list[dict[str, str]], prompt=None): messages = messages + [{"role": "user", "content": prompt}] response = openai.ChatCompletion.create( - messages=messages, - stream=True, - **self.kwargs + messages=messages, stream=True, **self.kwargs ) chat = [] @@ -36,4 +33,4 @@ def next(self, messages: list[dict[str, str]], prompt=None): msg = delta.get('content', '') print(msg, end="") chat.append(msg) - return messages + [{"role": "assistant", "content": "".join(chat)}] \ No newline at end of file + return messages + [{"role": "assistant", "content": "".join(chat)}] diff --git a/chat_to_files.py b/chat_to_files.py index 6a3364314f..7a247a610c 100644 --- a/chat_to_files.py +++ b/chat_to_files.py @@ -1,7 +1,7 @@ import re -def parse_chat(chat):# -> List[Tuple[str, str]]: +def parse_chat(chat): # -> List[Tuple[str, str]]: # Get all ``` blocks regex = r"```(.*?)```" @@ -15,7 +15,7 @@ def parse_chat(chat):# -> List[Tuple[str, str]]: code = "\n".join(code) # Add the file to the list files.append((path, code)) - + return files @@ -24,4 +24,4 @@ def to_files(chat, workspace): files = parse_chat(chat) for file_name, file_content in files: - workspace[file_name] = file_content \ No newline at end of file + workspace[file_name] = file_content diff --git a/db.py b/db.py index bb7a895d51..96d806d7cb 100644 --- a/db.py +++ b/db.py @@ -1,6 +1,6 @@ +import os from dataclasses import dataclass -import os from pathlib import Path @@ -25,4 +25,4 @@ class DBs: logs: DB identity: DB input: DB - workspace: DB \ No newline at end of file + workspace: DB diff --git a/main.py b/main.py index 66a7d9f54a..ffd246fe55 100644 --- a/main.py +++ b/main.py @@ -1,14 +1,11 @@ import json -import os import pathlib -from typing import Optional -import openai -from chat_to_files import to_files -from ai import AI -from steps import STEPS -from db import DB, DBs + import typer +from ai import AI +from db import DB, DBs +from steps import STEPS app = typer.Typer() @@ -16,11 +13,13 @@ @app.command() def chat( project_path: str = typer.Argument(None, help="path"), - run_prefix: str = typer.Option("", help="run prefix, if you want to run multiple variants of the same project and later compare them"), + run_prefix: str = typer.Option( + "", + help="run prefix, if you want to run multiple variants of the same project and later compare them", + ), model: str = "gpt-4", temperature: float = 0.1, ): - if project_path is None: project_path = str(pathlib.Path(__file__).parent / "example") @@ -41,7 +40,6 @@ def chat( identity=DB(pathlib.Path(__file__).parent / "identity"), ) - for step in STEPS: messages = step(ai, dbs) dbs.logs[step.__name__] = json.dumps(messages) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..be74db43b9 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,59 @@ +# https://beta.ruff.rs/docs/configuration/#using-rufftoml +[tool.ruff] +select = ["F", "E", "W", "I001"] +line-length = 90 +show-fixes = false +target-version = "py311" +task-tags = ["TODO", "FIXME"] +exclude = [ + ".bzr", + ".direnv", + ".eggs", + ".git", + ".ruff_cache", + ".svn", + ".tox", + ".venv", + "__pypackages__", + "_build", + "buck-out", + "build", + "dist", + "node_modules", + "venv", +] + +[tool.ruff.isort] +known-first-party = [] +known-third-party = [] +section-order = [ + "future", + "standard-library", + "third-party", + "first-party", + "local-folder", +] +combine-as-imports = true +split-on-trailing-comma = false +lines-between-types = 1 + +# https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html +[tool.black] +line-length = 90 +target-version = ["py311"] +include = '\.pyi?$' +exclude = ''' +( + /( + \.direnv + | \.eggs + | \.git + | \.tox + | \.venv + | _build + | build + | dist + | venv + )/ +) +''' diff --git a/requirements.txt b/requirements.txt index 02c40f6710..cffa528beb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,4 @@ -openai -typer +black==23.3.0 +openai==0.27.8 +ruff==0.0.272 +typer==0.9.0 diff --git a/scripts/rerun_edited_message_logs.py b/scripts/rerun_edited_message_logs.py index 40e227efb1..e0ff9bb3bd 100644 --- a/scripts/rerun_edited_message_logs.py +++ b/scripts/rerun_edited_message_logs.py @@ -17,7 +17,6 @@ def chat( temperature: float = 0.1, max_tokens: int = 4096, ): - ai = AI( model=model, temperature=temperature, diff --git a/steps.py b/steps.py index a5028f4b7e..08c73f620c 100644 --- a/steps.py +++ b/steps.py @@ -1,20 +1,28 @@ +import json + from ai import AI from chat_to_files import to_files from db import DBs -import json def setup_sys_prompt(dbs): return dbs.identity['setup'] + '\nUseful to know:\n' + dbs.identity['philosophy'] + def run(ai: AI, dbs: DBs): '''Run the AI on the main prompt and save the results''' - messages = ai.start(setup_sys_prompt(dbs), dbs.input['main_prompt']) + messages = ai.start( + setup_sys_prompt(dbs), + dbs.input['main_prompt'], + ) to_files(messages[-1]['content'], dbs.workspace) return messages + def clarify(ai: AI, dbs: DBs): - '''Ask the user if they want to clarify anything and save the results to the workspace''' + ''' + Ask the user if they want to clarify anything and save the results to the workspace + ''' messages = [ai.fsystem(dbs.identity['qa'])] user = dbs.input['main_prompt'] while True: @@ -31,35 +39,30 @@ def clarify(ai: AI, dbs: DBs): break user += ( - '\n\n' - 'Is anything else unclear? If yes, only answer in the form:\n' + '\n\n' + 'Is anything else unclear? If yes, only answer in the form:\n' '{remaining unclear areas} remaining questions.\n' '{Next question}\n' 'If everything is sufficiently clear, only answer "no".' - ) + ) print() return messages + def run_clarified(ai: AI, dbs: DBs): # get the messages from previous step messages = json.loads(dbs.logs[clarify.__name__]) - messages = ( - [ - ai.fsystem(setup_sys_prompt(dbs)), - ] + - messages[1:] - ) + messages = [ + ai.fsystem(setup_sys_prompt(dbs)), + ] + messages[1:] messages = ai.next(messages, dbs.identity['use_qa']) to_files(messages[-1]['content'], dbs.workspace) return messages -STEPS=[ - clarify, - run_clarified -] +STEPS = [clarify, run_clarified] # Future steps that can be added: # improve_files,