From 984379fe71d294136d7d52bfb4d6c8ed053aed1e Mon Sep 17 00:00:00 2001 From: Sander Hilven Date: Fri, 1 Sep 2023 09:53:17 +0200 Subject: [PATCH 1/7] Adde Azure OpenAI endpoint. Tested and confirmed working. --- pilot/.env.example | 5 +++++ pilot/utils/llm_connection.py | 42 +++++++++++++++++++++++++++-------- 2 files changed, 38 insertions(+), 9 deletions(-) diff --git a/pilot/.env.example b/pilot/.env.example index d53210e..5041cce 100644 --- a/pilot/.env.example +++ b/pilot/.env.example @@ -1,4 +1,9 @@ +#OPENAI or AZURE +ENDPOINT=OPENAI OPENAI_API_KEY= +AZURE_API_KEY= +AZURE_ENDPOINT= +AZURE_MODEL_NAME= DB_NAME=gpt-pilot DB_HOST=localhost DB_PORT=5432 diff --git a/pilot/utils/llm_connection.py b/pilot/utils/llm_connection.py index c191d8e..cace0d5 100644 --- a/pilot/utils/llm_connection.py +++ b/pilot/utils/llm_connection.py @@ -46,8 +46,15 @@ def get_tokens_in_messages(messages: List[str]) -> int: tokenized_messages = [tokenizer.encode(message['content']) for message in messages] return sum(len(tokens) for tokens in tokenized_messages) - -def num_tokens_from_functions(functions, model="gpt-4"): +# Check if the ENDPOINT is AZURE +endpoint = os.getenv('ENDPOINT') +if endpoint == 'AZURE': + # If yes, get the model name from .ENV file + model = os.getenv('AZURE_MODEL_NAME') +else: + model="gpt-4" + +def num_tokens_from_functions(functions, model=model): """Return the number of tokens used by a list of functions.""" encoding = tiktoken.get_encoding("cl100k_base") @@ -94,7 +101,7 @@ def create_gpt_chat_completion(messages: List[dict], req_type, min_tokens=MIN_TO raise ValueError(f'Too many tokens in messages: {tokens_in_messages}. Please try a different test.') gpt_data = { - 'model': 'gpt-4', + 'model': model, 'n': 1, 'max_tokens': min(4096, MAX_GPT_MODEL_TOKENS - tokens_in_messages), 'temperature': 1, @@ -172,15 +179,32 @@ def stream_gpt_completion(data, req_type): # spinner = spinner_start(colored("Waiting for OpenAI API response...", 'yellow')) # print(colored("Stream response from OpenAI:", 'yellow')) api_key = os.getenv("OPENAI_API_KEY") + azure_api_key = os.getenv('AZURE_API_KEY') + headers = {'Content-Type': 'application/json', 'api-key': azure_api_key} + openai_endpoint = 'https://api.openai.com/v1/chat/completions' logger.info(f'Request data: {data}') - response = requests.post( - 'https://api.openai.com/v1/chat/completions', - headers={'Content-Type': 'application/json', 'Authorization': 'Bearer ' + api_key}, - json=data, - stream=True - ) + # Check if the ENDPOINT is AZURE + if endpoint == 'AZURE': + # If yes, get the AZURE_ENDPOINT from .ENV file + azure_endpoint = os.getenv('AZURE_ENDPOINT') + + # Send the request to the Azure endpoint + response = requests.post( + azure_endpoint + '/openai/deployments/GPT-4/chat/completions?api-version=2023-05-15', + headers=headers, + json=data, + stream=True + ) + else: + # If not, send the request to the OpenAI endpoint + response = requests.post( + openai_endpoint, + headers=headers, + json=data, + stream=True + ) # Log the response status code and message logger.info(f'Response status code: {response.status_code}') From 660047a071ec685a2217188a3b4d6c07f900dc6b Mon Sep 17 00:00:00 2001 From: Sander Hilven Date: Fri, 1 Sep 2023 09:56:39 +0200 Subject: [PATCH 2/7] Hardcoded model in endpoint URL, now fixed. --- pilot/utils/llm_connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pilot/utils/llm_connection.py b/pilot/utils/llm_connection.py index cace0d5..baeda37 100644 --- a/pilot/utils/llm_connection.py +++ b/pilot/utils/llm_connection.py @@ -192,7 +192,7 @@ def stream_gpt_completion(data, req_type): # Send the request to the Azure endpoint response = requests.post( - azure_endpoint + '/openai/deployments/GPT-4/chat/completions?api-version=2023-05-15', + azure_endpoint + '/openai/deployments/' + model + '/chat/completions?api-version=2023-05-15', headers=headers, json=data, stream=True From a4d520763f33a2c097abd94b58fa9fae9848b0ad Mon Sep 17 00:00:00 2001 From: Sander Hilven Date: Fri, 1 Sep 2023 10:34:12 +0200 Subject: [PATCH 3/7] Added model selection to .env and update readme --- README.md | 2 +- pilot/.env.example | 3 ++- pilot/utils/llm_connection.py | 8 ++------ 3 files changed, 5 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 32fe06e..6c798a0 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,7 @@ Obviously, it still can't create any production-ready app but the general concep 5. `pip install -r requirements.txt` (install the dependencies) 6. `cd pilot` 7. `mv .env.example .env` (create the .env file) -8. Add your OpenAI API key and the database info to the `.env` file +8. Add your environment (OpenAI/Azure), your API key and the database info to the `.env` file 9. `python db_init.py` (initialize the database) 10. `python main.py` (start GPT Pilot) diff --git a/pilot/.env.example b/pilot/.env.example index 5041cce..da30616 100644 --- a/pilot/.env.example +++ b/pilot/.env.example @@ -3,7 +3,8 @@ ENDPOINT=OPENAI OPENAI_API_KEY= AZURE_API_KEY= AZURE_ENDPOINT= -AZURE_MODEL_NAME= +#In case of Azure endpoint, change this to your deployed model name +MODEL_NAME=gpt-4 DB_NAME=gpt-pilot DB_HOST=localhost DB_PORT=5432 diff --git a/pilot/utils/llm_connection.py b/pilot/utils/llm_connection.py index baeda37..c53f909 100644 --- a/pilot/utils/llm_connection.py +++ b/pilot/utils/llm_connection.py @@ -46,13 +46,9 @@ def get_tokens_in_messages(messages: List[str]) -> int: tokenized_messages = [tokenizer.encode(message['content']) for message in messages] return sum(len(tokens) for tokens in tokenized_messages) -# Check if the ENDPOINT is AZURE +#get endpoint and model name from .ENV file +model = os.getenv('MODEL_NAME') endpoint = os.getenv('ENDPOINT') -if endpoint == 'AZURE': - # If yes, get the model name from .ENV file - model = os.getenv('AZURE_MODEL_NAME') -else: - model="gpt-4" def num_tokens_from_functions(functions, model=model): """Return the number of tokens used by a list of functions.""" From 34a53972093355992324b9c38f8720257d6139eb Mon Sep 17 00:00:00 2001 From: Sander Hilven Date: Fri, 1 Sep 2023 10:41:39 +0200 Subject: [PATCH 4/7] Added MAX_TOKENS to the .env as a variable --- pilot/.env.example | 1 + pilot/const/llm.py | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/pilot/.env.example b/pilot/.env.example index da30616..e7b661c 100644 --- a/pilot/.env.example +++ b/pilot/.env.example @@ -5,6 +5,7 @@ AZURE_API_KEY= AZURE_ENDPOINT= #In case of Azure endpoint, change this to your deployed model name MODEL_NAME=gpt-4 +MAX_TOKENS=8192 DB_NAME=gpt-pilot DB_HOST=localhost DB_PORT=5432 diff --git a/pilot/const/llm.py b/pilot/const/llm.py index ca42267..8ae8ab8 100644 --- a/pilot/const/llm.py +++ b/pilot/const/llm.py @@ -1,4 +1,5 @@ +import os +MAX_GPT_MODEL_TOKENS = int(os.getenv('MAX_TOKENS')) MIN_TOKENS_FOR_GPT_RESPONSE = 600 -MAX_GPT_MODEL_TOKENS = 8192 MAX_QUESTIONS = 5 -END_RESPONSE = "EVERYTHING_CLEAR" +END_RESPONSE = "EVERYTHING_CLEAR" \ No newline at end of file From af6a972cba580e76c69c5746882ad0bcc406941a Mon Sep 17 00:00:00 2001 From: Dani Acosta Date: Tue, 5 Sep 2023 00:15:21 +0200 Subject: [PATCH 5/7] Add OPENAI_MODEL env var Adds a env variable OPENAI_MODEL to be able to use different models to GPT-4 --- pilot/utils/llm_connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pilot/utils/llm_connection.py b/pilot/utils/llm_connection.py index c191d8e..58f4b6a 100644 --- a/pilot/utils/llm_connection.py +++ b/pilot/utils/llm_connection.py @@ -94,7 +94,7 @@ def create_gpt_chat_completion(messages: List[dict], req_type, min_tokens=MIN_TO raise ValueError(f'Too many tokens in messages: {tokens_in_messages}. Please try a different test.') gpt_data = { - 'model': 'gpt-4', + 'model': os.getenv('OPENAI_MODEL', 'gpt-4'), 'n': 1, 'max_tokens': min(4096, MAX_GPT_MODEL_TOKENS - tokens_in_messages), 'temperature': 1, From a9ead6ecbbfa62a8aa1cf9d6f05a49d423f1a8af Mon Sep 17 00:00:00 2001 From: Zvonimir Sabljic Date: Tue, 5 Sep 2023 22:50:48 +0200 Subject: [PATCH 6/7] Fix to enable regular OpenAI access --- pilot/utils/llm_connection.py | 30 +++++++++++------------------- 1 file changed, 11 insertions(+), 19 deletions(-) diff --git a/pilot/utils/llm_connection.py b/pilot/utils/llm_connection.py index c53f909..8fce916 100644 --- a/pilot/utils/llm_connection.py +++ b/pilot/utils/llm_connection.py @@ -174,33 +174,25 @@ def stream_gpt_completion(data, req_type): # spinner = spinner_start(colored("Waiting for OpenAI API response...", 'yellow')) # print(colored("Stream response from OpenAI:", 'yellow')) - api_key = os.getenv("OPENAI_API_KEY") - azure_api_key = os.getenv('AZURE_API_KEY') - headers = {'Content-Type': 'application/json', 'api-key': azure_api_key} - openai_endpoint = 'https://api.openai.com/v1/chat/completions' logger.info(f'Request data: {data}') # Check if the ENDPOINT is AZURE if endpoint == 'AZURE': # If yes, get the AZURE_ENDPOINT from .ENV file - azure_endpoint = os.getenv('AZURE_ENDPOINT') - - # Send the request to the Azure endpoint - response = requests.post( - azure_endpoint + '/openai/deployments/' + model + '/chat/completions?api-version=2023-05-15', - headers=headers, - json=data, - stream=True - ) + endpoint_url = os.getenv('AZURE_ENDPOINT') + '/openai/deployments/' + model + '/chat/completions?api-version=2023-05-15' + headers = {'Content-Type': 'application/json', 'api-key': os.getenv('AZURE_API_KEY')} else: # If not, send the request to the OpenAI endpoint - response = requests.post( - openai_endpoint, - headers=headers, - json=data, - stream=True - ) + headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + os.getenv("OPENAI_API_KEY")} + endpoint_url = 'https://api.openai.com/v1/chat/completions' + + response = requests.post( + endpoint_url, + headers=headers, + json=data, + stream=True + ) # Log the response status code and message logger.info(f'Response status code: {response.status_code}') From f91da2b5eb097795ed60a439e601dea8d0894455 Mon Sep 17 00:00:00 2001 From: Zvonimir Sabljic Date: Tue, 5 Sep 2023 23:13:09 +0200 Subject: [PATCH 7/7] Revert "Added back the functionality for tech lead to break down the project and the developer to code task by task" This reverts commit 990eb0d1823ee499249033fc8e4a23ab5c279ed4. --- pilot/const/common.py | 2 +- pilot/helpers/Project.py | 10 +++---- pilot/helpers/agents/Developer.py | 29 +++++-------------- pilot/prompts/development/plan.prompt | 8 +---- .../prompts/development/task/breakdown.prompt | 13 ++------- 5 files changed, 18 insertions(+), 44 deletions(-) diff --git a/pilot/const/common.py b/pilot/const/common.py index 05269cb..6e39416 100644 --- a/pilot/const/common.py +++ b/pilot/const/common.py @@ -11,8 +11,8 @@ STEPS = [ 'user_stories', 'user_tasks', 'architecture', - 'environment_setup', 'development_planning', + 'environment_setup', 'coding' ] diff --git a/pilot/helpers/Project.py b/pilot/helpers/Project.py index 9e605e2..9aa92a1 100644 --- a/pilot/helpers/Project.py +++ b/pilot/helpers/Project.py @@ -76,11 +76,8 @@ class Project: self.architect = Architect(self) self.architecture = self.architect.get_architecture() - self.developer = Developer(self) - self.developer.set_up_environment(); - - self.tech_lead = TechLead(self) - self.development_plan = self.tech_lead.create_development_plan() + # self.tech_lead = TechLead(self) + # self.development_plan = self.tech_lead.create_development_plan() # TODO move to constructor eventually if 'skip_until_dev_step' in self.args: @@ -94,6 +91,9 @@ class Project: self.save_files_snapshot(self.skip_until_dev_step) # TODO END + self.developer = Developer(self) + self.developer.set_up_environment(); + self.developer.start_coding() def get_directory_tree(self, with_descriptions=False): diff --git a/pilot/helpers/agents/Developer.py b/pilot/helpers/agents/Developer.py index 534e9bf..a0074bc 100644 --- a/pilot/helpers/agents/Developer.py +++ b/pilot/helpers/agents/Developer.py @@ -30,16 +30,13 @@ class Developer(Agent): print(colored(f"Ok, great, now, let's start with the actual development...\n", "green", attrs=['bold'])) logger.info(f"Starting to create the actual code...") - for i, dev_task in enumerate(self.project.development_plan): - self.implement_task(i, dev_task) + self.implement_task() # DEVELOPMENT END logger.info('The app is DONE!!! Yay...you can use it now.') - def implement_task(self, i, development_task=None): - print(colored(f'Implementing task #{i + 1}: ', 'green', attrs=['bold']) + colored(f' {development_task["description"]}\n', 'green')); - + def implement_task(self): convo_dev_task = AgentConvo(self) task_description = convo_dev_task.send_message('development/task/breakdown.prompt', { "name": self.project.args['name'], @@ -50,16 +47,13 @@ class Developer(Agent): "technologies": self.project.architecture, "array_of_objects_to_string": array_of_objects_to_string, "directory_tree": self.project.get_directory_tree(True), - "current_task_index": i, - "development_tasks": self.project.development_plan, - "files": self.project.get_all_coded_files(), }) task_steps = convo_dev_task.send_message('development/parse_task.prompt', {}, IMPLEMENT_TASK) convo_dev_task.remove_last_x_messages(2) - self.execute_task(convo_dev_task, task_steps, development_task=development_task, continue_development=True) + self.execute_task(convo_dev_task, task_steps, continue_development=True) - def execute_task(self, convo, task_steps, test_command=None, reset_convo=True, test_after_code_changes=True, continue_development=False, development_task=None): + def execute_task(self, convo, task_steps, test_command=None, reset_convo=True, test_after_code_changes=True, continue_development=False): function_uuid = str(uuid.uuid4()) convo.save_branch(function_uuid) @@ -123,21 +117,14 @@ class Developer(Agent): if self.run_command.endswith('`'): self.run_command = self.run_command[:-1] - if development_task is not None: - convo.remove_last_x_messages(2) - detailed_user_review_goal = convo.send_message('development/define_user_review_goal.prompt', {}) - if continue_development: - continue_description = detailed_user_review_goal if detailed_user_review_goal is not None else None - self.continue_development(convo, continue_description) + self.continue_development(convo) - def continue_development(self, iteration_convo, continue_description=''): + def continue_development(self, iteration_convo): while True: - user_description = ('Here is a description of what should be working: \n\n' + colored(continue_description, 'blue', attrs=['bold']) + '\n') if continue_description != '' else '' - user_description = 'Can you check if the app works please? ' + user_description + '\nIf you want to run the app, ' + colored('just type "r" and press ENTER and that will run `' + self.run_command + '`', 'yellow', attrs=['bold']) - continue_description = '' + # TODO add description about how can the user check if the app works user_feedback = self.project.ask_for_human_intervention( - user_description, + 'Can you check if the app works?\nIf you want to run the app, ' + colored('just type "r" and press ENTER', 'yellow', attrs=['bold']), cbs={ 'r': lambda: run_command_until_success(self.run_command, None, iteration_convo, force=True) }) if user_feedback == 'continue': diff --git a/pilot/prompts/development/plan.prompt b/pilot/prompts/development/plan.prompt index 99cf96e..40cfbd2 100644 --- a/pilot/prompts/development/plan.prompt +++ b/pilot/prompts/development/plan.prompt @@ -34,10 +34,4 @@ Here are the technologies that you need to use for this project: {% endfor %} ``` -OK, now, you need to create code to have this app fully working but before we go into the coding part, I want you to split the development process of creating this app into smaller tasks so that it is easier to debug and make the app work. Each smaller task of this project has to be a whole that can be reviewed by a developer to make sure we're on a right track to create this app completely. However, it cannot be split into tasks that are too small as well. - -Each task needs to be related only to the development of this app and nothing else - once the app is fully working, that is it. There shouldn't be a task for deployment, writing documentation, or anything that is not writing the actual code. Think task by task and create the least number of tasks that are relevant for this specific app. - -For each task, there must be a way for human developer to check if the task is done or not. Write how should the developer check if the task is done. - -Now, based on the app's description, user stories and user tasks, and the technologies that you need to use, think task by task and write up the entire plan for the development. Start from the project setup and specify each task until the moment when the entire app should be fully working. For each task, write a description and a user-review goal. \ No newline at end of file +Now, based on the app's description, user stories and user tasks, and the technologies that you need to use, think step by step and write up the entire plan for the development. Start from the project setup and specify each step until the moment when the entire app should be fully working. For each step, write a description, a programmatic goal, and a user-review goal. \ No newline at end of file diff --git a/pilot/prompts/development/task/breakdown.prompt b/pilot/prompts/development/task/breakdown.prompt index d9f41a0..1509b57 100644 --- a/pilot/prompts/development/task/breakdown.prompt +++ b/pilot/prompts/development/task/breakdown.prompt @@ -31,19 +31,12 @@ So far, this code has been implemented {% endfor %} {% endif %} -We've broken the development of this app down to these tasks: -```{% for task in development_tasks %} -- {{ task['description'] }}{% endfor %} -``` - -You are currently working on this task with the following description: {{ development_tasks[current_task_index]['description'] }} -After all the code is finished, a human developer will check it works this way - {{ development_tasks[current_task_index]['user_review_goal'] }} - Now, tell me all the code that needs to be written to implement this app and have it fully working and all commands that need to be run to implement this app. +This should be a simple version of the app so you don't need to aim to provide a production ready code but rather something that a developer can run locally and play with the implementation. Do not leave any parts of the code to be written afterwards. Make sure that all the code you provide is working and does as outlined in the description area above. + !IMPORTANT! Remember, I'm currently in an empty folder where I will start writing files that you tell me. Tell me how can I test the app to see if it's working or not. You do not need to make any automated tests work. -DO NOT specify commands to create any folders or files, they will be created automatically - just specify the relative path to each file that needs to be written. -Never use the port 5000 to run the app, it's reserved. +DO NOT specify commands to create any folders or files, they will be created automatically - just specify the relative path to each file that needs to be written \ No newline at end of file