mirror of
https://github.com/OMGeeky/gpt-pilot.git
synced 2026-01-19 00:44:04 +01:00
Made logging look nicer
This commit is contained in:
@@ -124,7 +124,9 @@ class AgentConvo:
|
||||
def log_message(self, content):
|
||||
print_msg = capitalize_first_word_with_underscores(self.high_level_step)
|
||||
if self.log_to_user:
|
||||
print(colored("Dev step ", 'yellow') + colored(self.agent.project.checkpoints['last_development_step'], 'yellow', attrs=['bold']) + f"\n{content}\n")
|
||||
if self.agent.project.checkpoints['last_development_step'] is not None:
|
||||
print(colored("\nDev step ", 'yellow') + colored(self.agent.project.checkpoints['last_development_step'], 'yellow', attrs=['bold']) + '\n', end='')
|
||||
print(f"\n{content}\n")
|
||||
logger.info(f"{print_msg}: {content}\n")
|
||||
|
||||
def to_playground(self):
|
||||
|
||||
@@ -181,14 +181,14 @@ class Project:
|
||||
delete_unconnected_steps_from(self.checkpoints['last_user_input'], 'previous_step')
|
||||
|
||||
def ask_for_human_intervention(self, message, description=None, cbs={}):
|
||||
print(colored(message, "yellow"))
|
||||
print(colored(message, "yellow", attrs=['bold']))
|
||||
if description is not None:
|
||||
print(description)
|
||||
answer = ''
|
||||
while answer != 'continue':
|
||||
answer = styled_text(
|
||||
self,
|
||||
'Once you are ready, type "continue" to continue.',
|
||||
'If something is wrong, tell me or type "continue" to continue.',
|
||||
)
|
||||
|
||||
if answer in cbs:
|
||||
|
||||
@@ -26,7 +26,7 @@ class Developer(Agent):
|
||||
self.project.skip_steps = False if ('skip_until_dev_step' in self.project.args and self.project.args['skip_until_dev_step'] == '0') else True
|
||||
|
||||
# DEVELOPMENT
|
||||
print(colored(f"Ok, great, now, let's start with the actual development...\n", "green"))
|
||||
print(colored(f"Ok, great, now, let's start with the actual development...\n", "green", attrs=['bold']))
|
||||
logger.info(f"Starting to create the actual code...")
|
||||
|
||||
self.implement_task()
|
||||
@@ -88,7 +88,7 @@ class Developer(Agent):
|
||||
# TODO end
|
||||
|
||||
elif step['type'] == 'human_intervention':
|
||||
user_feedback = self.project.ask_for_human_intervention('I need your help! Can you try debugging this yourself and let me take over afterwards? Here are the details about the issue:', step['human_intervention_description'])
|
||||
user_feedback = self.project.ask_for_human_intervention('I need human intervention:', step['human_intervention_description'])
|
||||
if user_feedback is not None and user_feedback != 'continue':
|
||||
debug(convo, user_input=user_feedback, issue_description=step['human_intervention_description'])
|
||||
|
||||
|
||||
@@ -43,6 +43,7 @@ class ProductOwner(Agent):
|
||||
self.project,
|
||||
generate_messages_from_description(main_prompt, self.project.args['app_type'], self.project.args['name']))
|
||||
|
||||
print(colored('Project Summary:\n', 'green', attrs=['bold']))
|
||||
high_level_summary = convo_project_description.send_message('utils/summary.prompt',
|
||||
{'conversation': '\n'.join([f"{msg['role']}: {msg['content']}" for msg in high_level_messages])})
|
||||
|
||||
@@ -71,7 +72,7 @@ class ProductOwner(Agent):
|
||||
return step['user_stories']
|
||||
|
||||
# USER STORIES
|
||||
msg = f"Generating USER STORIES...\n"
|
||||
msg = f"User Stories:\n"
|
||||
print(colored(msg, "green", attrs=['bold']))
|
||||
logger.info(msg)
|
||||
|
||||
@@ -105,7 +106,7 @@ class ProductOwner(Agent):
|
||||
return step['user_tasks']
|
||||
|
||||
# USER TASKS
|
||||
msg = f"Generating USER TASKS...\n"
|
||||
msg = f"User Tasks:\n"
|
||||
print(colored(msg, "green", attrs=['bold']))
|
||||
logger.info(msg)
|
||||
|
||||
|
||||
@@ -49,7 +49,8 @@ def execute_command(project, command, timeout=None, force=False):
|
||||
timeout = min(max(timeout, MIN_COMMAND_RUN_TIME), MAX_COMMAND_RUN_TIME)
|
||||
|
||||
if not force:
|
||||
print(colored(f'Can i execute the command: `') + colored(command, 'white', attrs=['bold']) + colored(f'` with {timeout}ms timeout?'))
|
||||
print(colored(f'\n--------- EXECUTE COMMAND ----------', 'yellow', attrs=['bold']))
|
||||
print(colored(f'Can i execute the command: `') + colored(command, 'yellow', attrs=['bold']) + colored(f'` with {timeout}ms timeout?'))
|
||||
|
||||
answer = styled_text(
|
||||
project,
|
||||
|
||||
@@ -75,7 +75,7 @@ def get_additional_info_from_openai(project, messages):
|
||||
|
||||
if response is not None:
|
||||
if response['text'].strip() == END_RESPONSE:
|
||||
print(response['text'])
|
||||
print(response['text'] + '\n')
|
||||
return messages
|
||||
|
||||
# Ask the question to the user
|
||||
@@ -109,9 +109,7 @@ def get_additional_info_from_user(project, messages, role):
|
||||
if answer.lower() == '':
|
||||
break
|
||||
response = create_gpt_chat_completion(
|
||||
generate_messages_from_custom_conversation(role, [get_prompt('utils/update.prompt'), message, answer],
|
||||
'user'),
|
||||
'additional_info')
|
||||
generate_messages_from_custom_conversation(role, [get_prompt('utils/update.prompt'), message, answer], 'user'), 'additional_info')
|
||||
|
||||
message = response
|
||||
|
||||
|
||||
@@ -49,6 +49,8 @@ def get_arguments():
|
||||
if 'step' not in arguments:
|
||||
arguments['step'] = None
|
||||
|
||||
print(colored('\n------------------ STARTING NEW PROJECT ----------------------', 'green', attrs=['bold']))
|
||||
print(f"If you wish to continue with this project in future run:")
|
||||
print(colored(f'python main.py app_id={arguments["app_id"]}', 'yellow', attrs=['bold']))
|
||||
print(colored(f'python main.py app_id={arguments["app_id"]}', 'green', attrs=['bold']))
|
||||
print(colored('--------------------------------------------------------------\n', 'green', attrs=['bold']))
|
||||
return arguments
|
||||
|
||||
@@ -74,8 +74,8 @@ def num_tokens_from_functions(functions, model="gpt-4"):
|
||||
for o in v['enum']:
|
||||
function_tokens += 3
|
||||
function_tokens += len(encoding.encode(o))
|
||||
else:
|
||||
print(f"Warning: not supported field {field}")
|
||||
# else:
|
||||
# print(f"Warning: not supported field {field}")
|
||||
function_tokens += 11
|
||||
|
||||
num_tokens += function_tokens
|
||||
@@ -170,7 +170,7 @@ def stream_gpt_completion(data, req_type):
|
||||
return result_data
|
||||
|
||||
# spinner = spinner_start(colored("Waiting for OpenAI API response...", 'yellow'))
|
||||
print(colored("Waiting for OpenAI API response...", 'yellow'))
|
||||
# print(colored("Stream response from OpenAI:", 'yellow'))
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
logger.info(f'Request data: {data}')
|
||||
|
||||
@@ -33,4 +33,7 @@ def styled_text(project, question):
|
||||
}
|
||||
response = questionary.text(question, **config).unsafe_ask() # .ask() is included here
|
||||
user_input = save_user_input(project, question, response)
|
||||
|
||||
print('\n\n', end='')
|
||||
|
||||
return response
|
||||
|
||||
Reference in New Issue
Block a user