Improved debugging process and enabled splitting of app development into tasks and then into steps

- split step implementation into different functions
- standardized the return value in the implementation process - { "success": True }
- added propagation of errors back to the recursion level 0 with TooDeepRecursionError and TokenLimitError
- created new class Debugger and moved debugging in it
This commit is contained in:
Zvonimir Sabljic
2023-09-12 21:32:56 +02:00
parent 19ac692509
commit 151aa051e2
5 changed files with 205 additions and 154 deletions

View File

@@ -123,7 +123,7 @@ def create_gpt_chat_completion(messages: List[dict], req_type, min_tokens=MIN_TO
# Check if the error message is related to token limit
if "context_length_exceeded" in error_message.lower():
raise Exception(f'Too many tokens in the request. Please try to continue the project with some previous development step.')
raise TokenLimitError(tokens_in_messages + min_tokens, MAX_GPT_MODEL_TOKENS)
else:
print('The request to OpenAI API failed. Here is the error message:')
print(e)
@@ -153,7 +153,7 @@ def retry_on_exception(func):
# If the specific error "context_length_exceeded" is present, simply return without retry
if "context_length_exceeded" in err_str:
raise Exception("context_length_exceeded")
raise TokenLimitError(tokens_in_messages + min_tokens, MAX_GPT_MODEL_TOKENS)
print(colored(f'There was a problem with request to openai API:', 'red'))
print(err_str)