mirror of
https://github.com/OMGeeky/gpt-pilot.git
synced 2026-01-21 01:31:06 +01:00
enable retrying request for extension if API call fails
This commit is contained in:
@@ -70,7 +70,7 @@ class AgentConvo:
|
||||
else:
|
||||
# if we don't, get the response from LLM
|
||||
try:
|
||||
response = create_gpt_chat_completion(self.messages, self.high_level_step, function_calls=function_calls)
|
||||
response = create_gpt_chat_completion(self.messages, self.high_level_step, self.agent.project, function_calls=function_calls)
|
||||
except TokenLimitError as e:
|
||||
save_development_step(self.agent.project, prompt_path, prompt_data, self.messages, '', str(e))
|
||||
raise e
|
||||
|
||||
@@ -86,10 +86,10 @@ def get_additional_info_from_openai(project, messages):
|
||||
while not is_complete:
|
||||
# Obtain clarifications using the OpenAI API
|
||||
# { 'text': new_code }
|
||||
response = create_gpt_chat_completion(messages, 'additional_info')
|
||||
response = create_gpt_chat_completion(messages, 'additional_info', project)
|
||||
|
||||
if response is not None:
|
||||
if response['text'].strip() == END_RESPONSE:
|
||||
if response['text'] and response['text'].strip() == END_RESPONSE:
|
||||
# print(response['text'] + '\n')
|
||||
return messages
|
||||
|
||||
@@ -132,7 +132,9 @@ def get_additional_info_from_user(project, messages, role):
|
||||
break
|
||||
response = create_gpt_chat_completion(
|
||||
generate_messages_from_custom_conversation(role, [get_prompt('utils/update.prompt'), message, answer], 'user'),
|
||||
'additional_info')
|
||||
'additional_info',
|
||||
project
|
||||
)
|
||||
|
||||
message = response
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import sys
|
||||
import time
|
||||
import json
|
||||
import tiktoken
|
||||
import questionary
|
||||
from prompt_toolkit.styles import Style
|
||||
|
||||
from jsonschema import validate, ValidationError
|
||||
from utils.style import red
|
||||
@@ -15,6 +15,7 @@ from logger.logger import logger
|
||||
from helpers.exceptions.TokenLimitError import TokenLimitError
|
||||
from utils.utils import fix_json, get_prompt
|
||||
from utils.function_calling import add_function_calls_to_request, FunctionCallSet, FunctionType
|
||||
from utils.questionary import styled_text
|
||||
|
||||
|
||||
def get_tokens_in_messages(messages: List[str]) -> int:
|
||||
@@ -58,7 +59,7 @@ def num_tokens_from_functions(functions):
|
||||
return num_tokens
|
||||
|
||||
|
||||
def create_gpt_chat_completion(messages: List[dict], req_type, min_tokens=MIN_TOKENS_FOR_GPT_RESPONSE,
|
||||
def create_gpt_chat_completion(messages: List[dict], req_type, project,
|
||||
function_calls: FunctionCallSet = None):
|
||||
"""
|
||||
Called from:
|
||||
@@ -69,7 +70,7 @@ def create_gpt_chat_completion(messages: List[dict], req_type, min_tokens=MIN_TO
|
||||
"Please check this message and say what needs to be changed... {message}"
|
||||
:param messages: [{ "role": "system"|"assistant"|"user", "content": string }, ... ]
|
||||
:param req_type: 'project_description' etc. See common.STEPS
|
||||
:param min_tokens: defaults to 600
|
||||
:param project: project
|
||||
:param function_calls: (optional) {'definitions': [{ 'name': str }, ...]}
|
||||
see `IMPLEMENT_CHANGES` etc. in `pilot/const/function_calls.py`
|
||||
:return: {'text': new_code}
|
||||
@@ -99,7 +100,7 @@ def create_gpt_chat_completion(messages: List[dict], req_type, min_tokens=MIN_TO
|
||||
add_function_calls_to_request(gpt_data, function_calls)
|
||||
|
||||
try:
|
||||
response = stream_gpt_completion(gpt_data, req_type)
|
||||
response = stream_gpt_completion(gpt_data, req_type, project)
|
||||
return response
|
||||
except TokenLimitError as e:
|
||||
raise e
|
||||
@@ -186,12 +187,15 @@ def retry_on_exception(func):
|
||||
print(err_str)
|
||||
logger.error(f'There was a problem with request to openai API: {err_str}')
|
||||
|
||||
user_message = questionary.text(
|
||||
project = args[2]
|
||||
user_message = styled_text(
|
||||
project,
|
||||
"Do you want to try make the same request again? If yes, just press ENTER. Otherwise, type 'no'.",
|
||||
style=questionary.Style([
|
||||
('question', 'fg:red'),
|
||||
('answer', 'fg:orange')
|
||||
])).ask()
|
||||
style=Style.from_dict({
|
||||
'question': '#FF0000 bold',
|
||||
'answer': '#FF910A bold'
|
||||
})
|
||||
)
|
||||
|
||||
# TODO: take user's input into consideration - send to LLM?
|
||||
# https://github.com/Pythagora-io/gpt-pilot/issues/122
|
||||
@@ -202,11 +206,12 @@ def retry_on_exception(func):
|
||||
|
||||
|
||||
@retry_on_exception
|
||||
def stream_gpt_completion(data, req_type):
|
||||
def stream_gpt_completion(data, req_type, project):
|
||||
"""
|
||||
Called from create_gpt_chat_completion()
|
||||
:param data:
|
||||
:param req_type: 'project_description' etc. See common.STEPS
|
||||
:param project: NEEDED FOR WRAPPER FUNCTION retry_on_exception
|
||||
:return: {'text': str} or {'function_calls': {'name': str, arguments: '{...}'}}
|
||||
"""
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ def styled_select(*args, **kwargs):
|
||||
return questionary.select(*args, **kwargs).unsafe_ask() # .ask() is included here
|
||||
|
||||
|
||||
def styled_text(project, question, ignore_user_input_count=False):
|
||||
def styled_text(project, question, ignore_user_input_count=False, style=None):
|
||||
if not ignore_user_input_count:
|
||||
project.user_inputs_count += 1
|
||||
user_input = get_saved_user_input(project, question)
|
||||
@@ -36,7 +36,7 @@ def styled_text(project, question, ignore_user_input_count=False):
|
||||
|
||||
if project.ipc_client_instance is None or project.ipc_client_instance.client is None:
|
||||
config = {
|
||||
'style': custom_style,
|
||||
'style': style if style is not None else custom_style,
|
||||
}
|
||||
question = remove_ansi_codes(question) # Colorama and questionary are not compatible and styling doesn't work
|
||||
response = questionary.text(question, **config).unsafe_ask() # .ask() is included here
|
||||
|
||||
Reference in New Issue
Block a user