mirror of
https://github.com/OMGeeky/gpt-pilot.git
synced 2026-01-06 11:19:33 +01:00
improved logging
This commit is contained in:
@@ -29,7 +29,10 @@ class AgentConvo:
|
||||
self.high_level_step = self.agent.project.current_step
|
||||
|
||||
# add system message
|
||||
self.messages.append(get_sys_message(self.agent.role))
|
||||
system_message = get_sys_message(self.agent.role)
|
||||
logger.info('\n>>>>>>>>>> System Prompt >>>>>>>>>>\n%s\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>',
|
||||
system_message['content'])
|
||||
self.messages.append(system_message)
|
||||
|
||||
def send_message(self, prompt_path=None, prompt_data=None, function_calls: FunctionCallSet = None):
|
||||
"""
|
||||
@@ -103,6 +106,7 @@ class AgentConvo:
|
||||
|
||||
# TODO we need to specify the response when there is a function called
|
||||
# TODO maybe we can have a specific function that creates the GPT response from the function call
|
||||
logger.info('\n>>>>>>>>>> Assistant Prompt >>>>>>>>>>\n%s\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>', message_content)
|
||||
self.messages.append({"role": "assistant", "content": message_content})
|
||||
self.log_message(message_content)
|
||||
|
||||
@@ -133,6 +137,7 @@ class AgentConvo:
|
||||
if user_message == "":
|
||||
accepted_messages.append(response)
|
||||
|
||||
logger.info('\n>>>>>>>>>> User Message >>>>>>>>>>\n%s\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>', user_message)
|
||||
self.messages.append({"role": "user", "content": user_message})
|
||||
response = self.send_message(None, None, function_calls)
|
||||
|
||||
@@ -202,4 +207,5 @@ class AgentConvo:
|
||||
def construct_and_add_message_from_prompt(self, prompt_path, prompt_data):
|
||||
if prompt_path is not None and prompt_data is not None:
|
||||
prompt = get_prompt(prompt_path, prompt_data)
|
||||
logger.info('\n>>>>>>>>>> User Prompt >>>>>>>>>>\n%s\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>', prompt)
|
||||
self.messages.append({"role": "user", "content": prompt})
|
||||
|
||||
@@ -338,8 +338,7 @@ class Project:
|
||||
if description is not None:
|
||||
question += '\n' + '-' * 100 + '\n' + white_bold(description) + '\n' + '-' * 100 + '\n'
|
||||
|
||||
if convo is not None:
|
||||
reset_branch_id = convo.save_branch()
|
||||
reset_branch_id = None if convo is None else convo.save_branch()
|
||||
|
||||
while answer != 'continue':
|
||||
answer = ask_user(self, question,
|
||||
|
||||
@@ -124,9 +124,13 @@ def execute_command(project, command, timeout=None, process_name: str = None, fo
|
||||
exit_code (int): The exit code of the process.
|
||||
"""
|
||||
if timeout is not None:
|
||||
if timeout < 1000:
|
||||
timeout *= 1000
|
||||
timeout = min(max(timeout, MIN_COMMAND_RUN_TIME), MAX_COMMAND_RUN_TIME)
|
||||
if timeout == 0:
|
||||
timeout = None
|
||||
else:
|
||||
if timeout < 1000:
|
||||
timeout *= 1000
|
||||
|
||||
timeout = min(max(timeout, MIN_COMMAND_RUN_TIME), MAX_COMMAND_RUN_TIME)
|
||||
|
||||
if not force:
|
||||
print(yellow_bold(f'\n--------- EXECUTE COMMAND ----------'))
|
||||
@@ -229,6 +233,7 @@ def execute_command(project, command, timeout=None, process_name: str = None, fo
|
||||
logger.error('CLI ERROR: ' + stderr_line)
|
||||
|
||||
if process_name is not None:
|
||||
logger.info(f'Process {process_name} running as pid: {process.pid}')
|
||||
break
|
||||
|
||||
except (KeyboardInterrupt, TimeoutError) as e:
|
||||
@@ -317,7 +322,10 @@ def execute_command_and_check_cli_response(command, timeout, convo):
|
||||
cli_response, llm_response, exit_code = execute_command(convo.agent.project, command, timeout=timeout)
|
||||
if llm_response is None:
|
||||
llm_response = convo.send_message('dev_ops/ran_command.prompt',
|
||||
{ 'cli_response': cli_response, 'command': command })
|
||||
{
|
||||
'cli_response': cli_response,
|
||||
'command': command
|
||||
})
|
||||
return cli_response, llm_response
|
||||
|
||||
|
||||
|
||||
@@ -193,12 +193,16 @@ def generate_messages_from_custom_conversation(role, messages, start_role='user'
|
||||
... ]
|
||||
"""
|
||||
# messages is list of strings
|
||||
result = [get_sys_message(role)]
|
||||
system_message = get_sys_message(role)
|
||||
result = [system_message]
|
||||
logger.info(f'\n>>>>>>>>>> {role} Prompt >>>>>>>>>>\n%s\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>', system_message['content'])
|
||||
|
||||
for i, message in enumerate(messages):
|
||||
if i % 2 == 0:
|
||||
result.append({"role": start_role, "content": message})
|
||||
logger.info(f'\n>>>>>>>>>> {start_role} Prompt >>>>>>>>>>\n%s\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>', message)
|
||||
else:
|
||||
result.append({"role": "assistant" if start_role == "user" else "user", "content": message})
|
||||
logger.info('\n>>>>>>>>>> Assistant Prompt >>>>>>>>>>\n%s\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>', message)
|
||||
|
||||
return result
|
||||
|
||||
@@ -11,7 +11,7 @@ from jsonschema import validate, ValidationError
|
||||
from utils.style import red
|
||||
from typing import List
|
||||
from const.llm import MIN_TOKENS_FOR_GPT_RESPONSE, MAX_GPT_MODEL_TOKENS
|
||||
from logger.logger import logger
|
||||
from logger.logger import logger, logging
|
||||
from helpers.exceptions import TokenLimitError, ApiKeyNotDefinedError
|
||||
from utils.utils import fix_json, get_prompt
|
||||
from utils.function_calling import add_function_calls_to_request, FunctionCallSet, FunctionType
|
||||
@@ -156,7 +156,7 @@ def retry_on_exception(func):
|
||||
if 'function_buffer' in args[0]:
|
||||
del args[0]['function_buffer']
|
||||
|
||||
def retry_wrapper(*args, **kwargs):
|
||||
def wrapper(*args, **kwargs):
|
||||
while True:
|
||||
try:
|
||||
# spinner_stop(spinner)
|
||||
@@ -237,7 +237,7 @@ def retry_on_exception(func):
|
||||
if user_message != '':
|
||||
return {}
|
||||
|
||||
return retry_wrapper
|
||||
return wrapper
|
||||
|
||||
|
||||
@retry_on_exception
|
||||
@@ -291,8 +291,9 @@ def stream_gpt_completion(data, req_type, project):
|
||||
model = os.getenv('MODEL_NAME', 'gpt-4')
|
||||
endpoint = os.getenv('ENDPOINT')
|
||||
|
||||
logger.info(f'> Request model: {model} ({data["model"]})\n'
|
||||
+ '\n'.join([f"{message['role']}: {message['content']}" for message in data['messages']]))
|
||||
logger.info(f'> Request model: {model} ({data["model"]} in data)')
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logger.debug('\n'.join([f"{message['role']}: {message['content']}" for message in data['messages']]))
|
||||
|
||||
if endpoint == 'AZURE':
|
||||
# If yes, get the AZURE_ENDPOINT from .ENV file
|
||||
@@ -401,7 +402,7 @@ def stream_gpt_completion(data, req_type, project):
|
||||
# logger.info(f'Response via function call: {function_calls["arguments"]}')
|
||||
# function_calls['arguments'] = load_data_to_json(function_calls['arguments'])
|
||||
# return return_result({'function_calls': function_calls}, lines_printed)
|
||||
logger.info(f'< Response message: {gpt_response}')
|
||||
logger.info('<<<<<<<<<< LLM Response <<<<<<<<<<\n%s\n<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<', gpt_response)
|
||||
|
||||
if expecting_json:
|
||||
gpt_response = clean_json_response(gpt_response)
|
||||
|
||||
Reference in New Issue
Block a user