diff --git a/README.md b/README.md index c3ea724..b75596e 100644 --- a/README.md +++ b/README.md @@ -52,7 +52,7 @@ After you have Python and PostgreSQL installed, follow these steps: 5. `pip install -r requirements.txt` (install the dependencies) 6. `cd pilot` 7. `mv .env.example .env` (create the .env file) -8. Add your OpenAI API key and the PostgreSQL database info to the `.env` file +8. Add your environment (OpenAI/Azure), your API key and the PostgreSQL database info to the `.env` file 9. `python db_init.py` (initialize the database) 10. `python main.py` (start GPT Pilot) diff --git a/pilot/.env.example b/pilot/.env.example index d53210e..e7b661c 100644 --- a/pilot/.env.example +++ b/pilot/.env.example @@ -1,4 +1,11 @@ +#OPENAI or AZURE +ENDPOINT=OPENAI OPENAI_API_KEY= +AZURE_API_KEY= +AZURE_ENDPOINT= +#In case of Azure endpoint, change this to your deployed model name +MODEL_NAME=gpt-4 +MAX_TOKENS=8192 DB_NAME=gpt-pilot DB_HOST=localhost DB_PORT=5432 diff --git a/pilot/const/common.py b/pilot/const/common.py index 05269cb..6e39416 100644 --- a/pilot/const/common.py +++ b/pilot/const/common.py @@ -11,8 +11,8 @@ STEPS = [ 'user_stories', 'user_tasks', 'architecture', - 'environment_setup', 'development_planning', + 'environment_setup', 'coding' ] diff --git a/pilot/const/llm.py b/pilot/const/llm.py index ca42267..8ae8ab8 100644 --- a/pilot/const/llm.py +++ b/pilot/const/llm.py @@ -1,4 +1,5 @@ +import os +MAX_GPT_MODEL_TOKENS = int(os.getenv('MAX_TOKENS')) MIN_TOKENS_FOR_GPT_RESPONSE = 600 -MAX_GPT_MODEL_TOKENS = 8192 MAX_QUESTIONS = 5 -END_RESPONSE = "EVERYTHING_CLEAR" +END_RESPONSE = "EVERYTHING_CLEAR" \ No newline at end of file diff --git a/pilot/helpers/Project.py b/pilot/helpers/Project.py index 9e605e2..9aa92a1 100644 --- a/pilot/helpers/Project.py +++ b/pilot/helpers/Project.py @@ -76,11 +76,8 @@ class Project: self.architect = Architect(self) self.architecture = self.architect.get_architecture() - self.developer = Developer(self) - self.developer.set_up_environment(); - - self.tech_lead = TechLead(self) - self.development_plan = self.tech_lead.create_development_plan() + # self.tech_lead = TechLead(self) + # self.development_plan = self.tech_lead.create_development_plan() # TODO move to constructor eventually if 'skip_until_dev_step' in self.args: @@ -94,6 +91,9 @@ class Project: self.save_files_snapshot(self.skip_until_dev_step) # TODO END + self.developer = Developer(self) + self.developer.set_up_environment(); + self.developer.start_coding() def get_directory_tree(self, with_descriptions=False): diff --git a/pilot/helpers/agents/Developer.py b/pilot/helpers/agents/Developer.py index 534e9bf..a0074bc 100644 --- a/pilot/helpers/agents/Developer.py +++ b/pilot/helpers/agents/Developer.py @@ -30,16 +30,13 @@ class Developer(Agent): print(colored(f"Ok, great, now, let's start with the actual development...\n", "green", attrs=['bold'])) logger.info(f"Starting to create the actual code...") - for i, dev_task in enumerate(self.project.development_plan): - self.implement_task(i, dev_task) + self.implement_task() # DEVELOPMENT END logger.info('The app is DONE!!! Yay...you can use it now.') - def implement_task(self, i, development_task=None): - print(colored(f'Implementing task #{i + 1}: ', 'green', attrs=['bold']) + colored(f' {development_task["description"]}\n', 'green')); - + def implement_task(self): convo_dev_task = AgentConvo(self) task_description = convo_dev_task.send_message('development/task/breakdown.prompt', { "name": self.project.args['name'], @@ -50,16 +47,13 @@ class Developer(Agent): "technologies": self.project.architecture, "array_of_objects_to_string": array_of_objects_to_string, "directory_tree": self.project.get_directory_tree(True), - "current_task_index": i, - "development_tasks": self.project.development_plan, - "files": self.project.get_all_coded_files(), }) task_steps = convo_dev_task.send_message('development/parse_task.prompt', {}, IMPLEMENT_TASK) convo_dev_task.remove_last_x_messages(2) - self.execute_task(convo_dev_task, task_steps, development_task=development_task, continue_development=True) + self.execute_task(convo_dev_task, task_steps, continue_development=True) - def execute_task(self, convo, task_steps, test_command=None, reset_convo=True, test_after_code_changes=True, continue_development=False, development_task=None): + def execute_task(self, convo, task_steps, test_command=None, reset_convo=True, test_after_code_changes=True, continue_development=False): function_uuid = str(uuid.uuid4()) convo.save_branch(function_uuid) @@ -123,21 +117,14 @@ class Developer(Agent): if self.run_command.endswith('`'): self.run_command = self.run_command[:-1] - if development_task is not None: - convo.remove_last_x_messages(2) - detailed_user_review_goal = convo.send_message('development/define_user_review_goal.prompt', {}) - if continue_development: - continue_description = detailed_user_review_goal if detailed_user_review_goal is not None else None - self.continue_development(convo, continue_description) + self.continue_development(convo) - def continue_development(self, iteration_convo, continue_description=''): + def continue_development(self, iteration_convo): while True: - user_description = ('Here is a description of what should be working: \n\n' + colored(continue_description, 'blue', attrs=['bold']) + '\n') if continue_description != '' else '' - user_description = 'Can you check if the app works please? ' + user_description + '\nIf you want to run the app, ' + colored('just type "r" and press ENTER and that will run `' + self.run_command + '`', 'yellow', attrs=['bold']) - continue_description = '' + # TODO add description about how can the user check if the app works user_feedback = self.project.ask_for_human_intervention( - user_description, + 'Can you check if the app works?\nIf you want to run the app, ' + colored('just type "r" and press ENTER', 'yellow', attrs=['bold']), cbs={ 'r': lambda: run_command_until_success(self.run_command, None, iteration_convo, force=True) }) if user_feedback == 'continue': diff --git a/pilot/prompts/development/plan.prompt b/pilot/prompts/development/plan.prompt index 99cf96e..40cfbd2 100644 --- a/pilot/prompts/development/plan.prompt +++ b/pilot/prompts/development/plan.prompt @@ -34,10 +34,4 @@ Here are the technologies that you need to use for this project: {% endfor %} ``` -OK, now, you need to create code to have this app fully working but before we go into the coding part, I want you to split the development process of creating this app into smaller tasks so that it is easier to debug and make the app work. Each smaller task of this project has to be a whole that can be reviewed by a developer to make sure we're on a right track to create this app completely. However, it cannot be split into tasks that are too small as well. - -Each task needs to be related only to the development of this app and nothing else - once the app is fully working, that is it. There shouldn't be a task for deployment, writing documentation, or anything that is not writing the actual code. Think task by task and create the least number of tasks that are relevant for this specific app. - -For each task, there must be a way for human developer to check if the task is done or not. Write how should the developer check if the task is done. - -Now, based on the app's description, user stories and user tasks, and the technologies that you need to use, think task by task and write up the entire plan for the development. Start from the project setup and specify each task until the moment when the entire app should be fully working. For each task, write a description and a user-review goal. \ No newline at end of file +Now, based on the app's description, user stories and user tasks, and the technologies that you need to use, think step by step and write up the entire plan for the development. Start from the project setup and specify each step until the moment when the entire app should be fully working. For each step, write a description, a programmatic goal, and a user-review goal. \ No newline at end of file diff --git a/pilot/prompts/development/task/breakdown.prompt b/pilot/prompts/development/task/breakdown.prompt index d9f41a0..1509b57 100644 --- a/pilot/prompts/development/task/breakdown.prompt +++ b/pilot/prompts/development/task/breakdown.prompt @@ -31,19 +31,12 @@ So far, this code has been implemented {% endfor %} {% endif %} -We've broken the development of this app down to these tasks: -```{% for task in development_tasks %} -- {{ task['description'] }}{% endfor %} -``` - -You are currently working on this task with the following description: {{ development_tasks[current_task_index]['description'] }} -After all the code is finished, a human developer will check it works this way - {{ development_tasks[current_task_index]['user_review_goal'] }} - Now, tell me all the code that needs to be written to implement this app and have it fully working and all commands that need to be run to implement this app. +This should be a simple version of the app so you don't need to aim to provide a production ready code but rather something that a developer can run locally and play with the implementation. Do not leave any parts of the code to be written afterwards. Make sure that all the code you provide is working and does as outlined in the description area above. + !IMPORTANT! Remember, I'm currently in an empty folder where I will start writing files that you tell me. Tell me how can I test the app to see if it's working or not. You do not need to make any automated tests work. -DO NOT specify commands to create any folders or files, they will be created automatically - just specify the relative path to each file that needs to be written. -Never use the port 5000 to run the app, it's reserved. +DO NOT specify commands to create any folders or files, they will be created automatically - just specify the relative path to each file that needs to be written \ No newline at end of file diff --git a/pilot/utils/llm_connection.py b/pilot/utils/llm_connection.py index c191d8e..369af7c 100644 --- a/pilot/utils/llm_connection.py +++ b/pilot/utils/llm_connection.py @@ -46,8 +46,11 @@ def get_tokens_in_messages(messages: List[str]) -> int: tokenized_messages = [tokenizer.encode(message['content']) for message in messages] return sum(len(tokens) for tokens in tokenized_messages) +#get endpoint and model name from .ENV file +model = os.getenv('MODEL_NAME') +endpoint = os.getenv('ENDPOINT') -def num_tokens_from_functions(functions, model="gpt-4"): +def num_tokens_from_functions(functions, model=model): """Return the number of tokens used by a list of functions.""" encoding = tiktoken.get_encoding("cl100k_base") @@ -94,7 +97,7 @@ def create_gpt_chat_completion(messages: List[dict], req_type, min_tokens=MIN_TO raise ValueError(f'Too many tokens in messages: {tokens_in_messages}. Please try a different test.') gpt_data = { - 'model': 'gpt-4', + 'model': os.getenv('OPENAI_MODEL', 'gpt-4'), 'n': 1, 'max_tokens': min(4096, MAX_GPT_MODEL_TOKENS - tokens_in_messages), 'temperature': 1, @@ -171,13 +174,22 @@ def stream_gpt_completion(data, req_type): # spinner = spinner_start(colored("Waiting for OpenAI API response...", 'yellow')) # print(colored("Stream response from OpenAI:", 'yellow')) - api_key = os.getenv("OPENAI_API_KEY") logger.info(f'Request data: {data}') + # Check if the ENDPOINT is AZURE + if endpoint == 'AZURE': + # If yes, get the AZURE_ENDPOINT from .ENV file + endpoint_url = os.getenv('AZURE_ENDPOINT') + '/openai/deployments/' + model + '/chat/completions?api-version=2023-05-15' + headers = {'Content-Type': 'application/json', 'api-key': os.getenv('AZURE_API_KEY')} + else: + # If not, send the request to the OpenAI endpoint + headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + os.getenv("OPENAI_API_KEY")} + endpoint_url = 'https://api.openai.com/v1/chat/completions' + response = requests.post( - 'https://api.openai.com/v1/chat/completions', - headers={'Content-Type': 'application/json', 'Authorization': 'Bearer ' + api_key}, + endpoint_url, + headers=headers, json=data, stream=True )