Refactored questionary and implemented saving and restoring user input

This commit is contained in:
Zvonimir Sabljic
2023-08-05 09:36:08 +02:00
parent 3bcfa4a70e
commit f89ce318e3
9 changed files with 74 additions and 14 deletions

View File

@@ -18,6 +18,7 @@ from database.models.environment_setup import EnvironmentSetup
from database.models.development import Development
from database.models.file_snapshot import FileSnapshot
from database.models.command_runs import CommandRuns
from database.models.user_inputs import UserInputs
def save_user(user_id, email, password):
@@ -210,6 +211,24 @@ def get_command_run_from_hash_id(project, command):
}
return get_db_model_from_hash_id(data_to_hash, CommandRuns, project.args['app_id'])
def save_user_input(project, query, user_input):
hash_data_args = {
'query': query,
'user_inputs_count': project.user_inputs_count,
}
data_fields = {
'query': query,
'user_input': user_input,
}
return hash_and_save_step(UserInputs, project.args['app_id'], hash_data_args, data_fields, "Saved User Input")
def get_user_input_from_hash_id(project, query):
data_to_hash = {
'query': query,
'user_inputs_count': project.user_inputs_count
}
return get_db_model_from_hash_id(data_to_hash, UserInputs, project.args['app_id'])
def get_development_step_from_hash_id(app_id, prompt_path, prompt_data, llm_req_num):
hash_id = hash_data({
@@ -239,6 +258,7 @@ def create_tables():
Development,
FileSnapshot,
CommandRuns,
UserInputs,
])
@@ -256,7 +276,8 @@ def drop_tables():
EnvironmentSetup,
Development,
FileSnapshot,
CommandRuns
CommandRuns,
UserInputs,
]:
database.execute_sql(f'DROP TABLE IF EXISTS "{table._meta.table_name}" CASCADE')

View File

@@ -0,0 +1,18 @@
from peewee import *
from database.models.components.base_models import BaseModel
from database.models.app import App
class UserInputs(BaseModel):
id = AutoField()
app = ForeignKeyField(App)
hash_id = CharField(null=False)
query = TextField(null=True)
user_input = TextField(null=True)
class Meta:
db_table = 'user_inputs'
indexes = (
(('app', 'hash_id'), True),
)

View File

@@ -80,7 +80,7 @@ class AgentConvo:
# Continue conversation until GPT response equals END_RESPONSE
while response != END_RESPONSE:
print(colored("Do you want to add anything else? If not, just press ENTER.", 'yellow'))
user_message = ask_user(response, False)
user_message = ask_user(self.agent.project, response, False)
if user_message == "":
accepted_messages.append(response)

View File

@@ -18,6 +18,7 @@ class Project:
self.args = args
self.llm_req_num = 0
self.command_runs_count = 0
self.user_inputs_count = 0
self.skip_steps = False if ('skip_until_dev_step' in args and args['skip_until_dev_step'] == '0') else True
self.skip_until_dev_step = args['skip_until_dev_step'] if 'skip_until_dev_step' in args else None
# TODO make flexible
@@ -108,5 +109,6 @@ class Project:
answer = ''
while answer != 'continue':
answer = styled_text(
self,
'Once you are ready, type "continue" to continue.',
)

View File

@@ -37,7 +37,7 @@ class Architect(Agent):
'app_type': self.project.args['app_type']}, ARCHITECTURE)
if self.project.args.get('advanced', False):
architecture = get_additional_info_from_user(architecture, 'architect')
architecture = get_additional_info_from_user(self.project, architecture, 'architect')
logger.info(f"Final architecture: {architecture}")

View File

@@ -30,9 +30,10 @@ class ProductOwner(Agent):
save_app(self.project.args)
main_prompt = ask_for_main_app_definition()
main_prompt = ask_for_main_app_definition(self, project)
high_level_messages = get_additional_info_from_openai(
self.project,
generate_messages_from_description(main_prompt, self.project.args['app_type']))
high_level_summary = convo_project_description.send_message('utils/summary.prompt',

View File

@@ -49,6 +49,7 @@ def execute_command(project, command, timeout=5000):
return command_run.cli_response
answer = styled_text(
project,
'If yes, just press ENTER and if not, please paste the output of running this command here and press ENTER'
)

View File

@@ -36,8 +36,9 @@ def ask_for_app_type():
return answer
def ask_for_main_app_definition():
def ask_for_main_app_definition(project):
description = styled_text(
project,
"Describe your app in as many details as possible."
)
@@ -47,6 +48,7 @@ def ask_for_main_app_definition():
while True:
confirmation = styled_text(
project,
"Do you want to add anything else? If not, just press ENTER."
)
@@ -62,9 +64,9 @@ def ask_for_main_app_definition():
return description
def ask_user(question, require_some_input=True):
def ask_user(project, question, require_some_input=True):
while True:
answer = styled_text(question)
answer = styled_text(project, question)
if answer is None:
print("Exiting application.")
@@ -77,7 +79,7 @@ def ask_user(question, require_some_input=True):
return answer
def get_additional_info_from_openai(messages):
def get_additional_info_from_openai(project, messages):
is_complete = False
while not is_complete:
# Obtain clarifications using the OpenAI API
@@ -89,7 +91,7 @@ def get_additional_info_from_openai(messages):
return messages
# Ask the question to the user
answer = ask_user(response['text'])
answer = ask_user(project, response['text'])
# Add the answer to the messages
messages.append({'role': 'assistant', 'content': response['text']})
@@ -103,7 +105,7 @@ def get_additional_info_from_openai(messages):
# TODO refactor this to comply with AgentConvo class
def get_additional_info_from_user(messages, role):
def get_additional_info_from_user(project, messages, role):
# TODO process with agent convo
updated_messages = []
@@ -115,7 +117,7 @@ def get_additional_info_from_user(messages, role):
print(colored(
f"Please check this message and say what needs to be changed. If everything is ok just press ENTER",
"yellow"))
answer = ask_user(message, False)
answer = ask_user(project, message, False)
if answer.lower() == '':
break
response = create_gpt_chat_completion(

View File

@@ -1,5 +1,8 @@
from prompt_toolkit.styles import Style
import questionary
from termcolor import colored
from database.database import save_user_input, get_user_input_from_hash_id
custom_style = Style.from_dict({
'question': '#FFFFFF bold', # the color and style of the question
@@ -15,6 +18,18 @@ def styled_select(*args, **kwargs):
return questionary.select(*args, **kwargs).ask() # .ask() is included here
def styled_text(*args, **kwargs):
kwargs["style"] = custom_style # Set style here
return questionary.text(*args, **kwargs).ask() # .ask() is included here
def styled_text(project, question):
project.user_inputs_count += 1
user_input = get_user_input_from_hash_id(project, question)
if user_input is not None and project.skip_steps:
# if we do, use it
print(colored(f'Restoring user input id {user_input.id}: ', 'yellow'), end='')
print(colored(f'{user_input.user_input}', 'yellow', attrs=['bold']))
return user_input.user_input
config = {
'style': custom_style,
}
response = questionary.text(question, **config).ask() # .ask() is included here
user_input = save_user_input(project, question, response)
return response