diff --git a/pilot/helpers/AgentConvo.py b/pilot/helpers/AgentConvo.py index b8c5081..b5bac8e 100644 --- a/pilot/helpers/AgentConvo.py +++ b/pilot/helpers/AgentConvo.py @@ -124,7 +124,9 @@ class AgentConvo: def log_message(self, content): print_msg = capitalize_first_word_with_underscores(self.high_level_step) if self.log_to_user: - print(colored("Dev step ", 'yellow') + colored(self.agent.project.checkpoints['last_development_step'], 'yellow', attrs=['bold']) + f"\n{content}\n") + if self.agent.project.checkpoints['last_development_step'] is not None: + print(colored("\nDev step ", 'yellow') + colored(self.agent.project.checkpoints['last_development_step'], 'yellow', attrs=['bold']) + '\n', end='') + print(f"\n{content}\n") logger.info(f"{print_msg}: {content}\n") def to_playground(self): diff --git a/pilot/helpers/Project.py b/pilot/helpers/Project.py index 6eddb5e..2a4283c 100644 --- a/pilot/helpers/Project.py +++ b/pilot/helpers/Project.py @@ -181,14 +181,14 @@ class Project: delete_unconnected_steps_from(self.checkpoints['last_user_input'], 'previous_step') def ask_for_human_intervention(self, message, description=None, cbs={}): - print(colored(message, "yellow")) + print(colored(message, "yellow", attrs=['bold'])) if description is not None: print(description) answer = '' while answer != 'continue': answer = styled_text( self, - 'Once you are ready, type "continue" to continue.', + 'If something is wrong, tell me or type "continue" to continue.', ) if answer in cbs: diff --git a/pilot/helpers/agents/Developer.py b/pilot/helpers/agents/Developer.py index 06ee8ba..cb451e4 100644 --- a/pilot/helpers/agents/Developer.py +++ b/pilot/helpers/agents/Developer.py @@ -26,7 +26,7 @@ class Developer(Agent): self.project.skip_steps = False if ('skip_until_dev_step' in self.project.args and self.project.args['skip_until_dev_step'] == '0') else True # DEVELOPMENT - print(colored(f"Ok, great, now, let's start with the actual development...\n", "green")) + print(colored(f"Ok, great, now, let's start with the actual development...\n", "green", attrs=['bold'])) logger.info(f"Starting to create the actual code...") self.implement_task() @@ -88,7 +88,7 @@ class Developer(Agent): # TODO end elif step['type'] == 'human_intervention': - user_feedback = self.project.ask_for_human_intervention('I need your help! Can you try debugging this yourself and let me take over afterwards? Here are the details about the issue:', step['human_intervention_description']) + user_feedback = self.project.ask_for_human_intervention('I need human intervention:', step['human_intervention_description']) if user_feedback is not None and user_feedback != 'continue': debug(convo, user_input=user_feedback, issue_description=step['human_intervention_description']) diff --git a/pilot/helpers/agents/ProductOwner.py b/pilot/helpers/agents/ProductOwner.py index c6acceb..d592fb3 100644 --- a/pilot/helpers/agents/ProductOwner.py +++ b/pilot/helpers/agents/ProductOwner.py @@ -43,6 +43,7 @@ class ProductOwner(Agent): self.project, generate_messages_from_description(main_prompt, self.project.args['app_type'], self.project.args['name'])) + print(colored('Project Summary:\n', 'green', attrs=['bold'])) high_level_summary = convo_project_description.send_message('utils/summary.prompt', {'conversation': '\n'.join([f"{msg['role']}: {msg['content']}" for msg in high_level_messages])}) @@ -71,7 +72,7 @@ class ProductOwner(Agent): return step['user_stories'] # USER STORIES - msg = f"Generating USER STORIES...\n" + msg = f"User Stories:\n" print(colored(msg, "green", attrs=['bold'])) logger.info(msg) @@ -105,7 +106,7 @@ class ProductOwner(Agent): return step['user_tasks'] # USER TASKS - msg = f"Generating USER TASKS...\n" + msg = f"User Tasks:\n" print(colored(msg, "green", attrs=['bold'])) logger.info(msg) diff --git a/pilot/helpers/cli.py b/pilot/helpers/cli.py index c34d8a1..c250a78 100644 --- a/pilot/helpers/cli.py +++ b/pilot/helpers/cli.py @@ -49,7 +49,8 @@ def execute_command(project, command, timeout=None, force=False): timeout = min(max(timeout, MIN_COMMAND_RUN_TIME), MAX_COMMAND_RUN_TIME) if not force: - print(colored(f'Can i execute the command: `') + colored(command, 'white', attrs=['bold']) + colored(f'` with {timeout}ms timeout?')) + print(colored(f'\n--------- EXECUTE COMMAND ----------', 'yellow', attrs=['bold'])) + print(colored(f'Can i execute the command: `') + colored(command, 'yellow', attrs=['bold']) + colored(f'` with {timeout}ms timeout?')) answer = styled_text( project, diff --git a/pilot/prompts/prompts.py b/pilot/prompts/prompts.py index f2fe8cd..c667268 100644 --- a/pilot/prompts/prompts.py +++ b/pilot/prompts/prompts.py @@ -75,7 +75,7 @@ def get_additional_info_from_openai(project, messages): if response is not None: if response['text'].strip() == END_RESPONSE: - print(response['text']) + print(response['text'] + '\n') return messages # Ask the question to the user @@ -109,9 +109,7 @@ def get_additional_info_from_user(project, messages, role): if answer.lower() == '': break response = create_gpt_chat_completion( - generate_messages_from_custom_conversation(role, [get_prompt('utils/update.prompt'), message, answer], - 'user'), - 'additional_info') + generate_messages_from_custom_conversation(role, [get_prompt('utils/update.prompt'), message, answer], 'user'), 'additional_info') message = response diff --git a/pilot/utils/arguments.py b/pilot/utils/arguments.py index 2546aca..8590373 100644 --- a/pilot/utils/arguments.py +++ b/pilot/utils/arguments.py @@ -49,6 +49,8 @@ def get_arguments(): if 'step' not in arguments: arguments['step'] = None + print(colored('\n------------------ STARTING NEW PROJECT ----------------------', 'green', attrs=['bold'])) print(f"If you wish to continue with this project in future run:") - print(colored(f'python main.py app_id={arguments["app_id"]}', 'yellow', attrs=['bold'])) + print(colored(f'python main.py app_id={arguments["app_id"]}', 'green', attrs=['bold'])) + print(colored('--------------------------------------------------------------\n', 'green', attrs=['bold'])) return arguments diff --git a/pilot/utils/llm_connection.py b/pilot/utils/llm_connection.py index c35b2c0..c191d8e 100644 --- a/pilot/utils/llm_connection.py +++ b/pilot/utils/llm_connection.py @@ -74,8 +74,8 @@ def num_tokens_from_functions(functions, model="gpt-4"): for o in v['enum']: function_tokens += 3 function_tokens += len(encoding.encode(o)) - else: - print(f"Warning: not supported field {field}") + # else: + # print(f"Warning: not supported field {field}") function_tokens += 11 num_tokens += function_tokens @@ -170,7 +170,7 @@ def stream_gpt_completion(data, req_type): return result_data # spinner = spinner_start(colored("Waiting for OpenAI API response...", 'yellow')) - print(colored("Waiting for OpenAI API response...", 'yellow')) + # print(colored("Stream response from OpenAI:", 'yellow')) api_key = os.getenv("OPENAI_API_KEY") logger.info(f'Request data: {data}') diff --git a/pilot/utils/questionary.py b/pilot/utils/questionary.py index d81f279..a9a9506 100644 --- a/pilot/utils/questionary.py +++ b/pilot/utils/questionary.py @@ -33,4 +33,7 @@ def styled_text(project, question): } response = questionary.text(question, **config).unsafe_ask() # .ask() is included here user_input = save_user_input(project, question, response) + + print('\n\n', end='') + return response