Merge pull request #82 from nalbion/feature/test_CodeMonkey

Feature/test code monkey
This commit is contained in:
LeonOstrez
2023-09-12 08:09:12 +02:00
committed by GitHub
5 changed files with 146 additions and 4 deletions

View File

@@ -231,10 +231,8 @@ def save_development_step(project, prompt_path, prompt_data, messages, llm_respo
development_step = hash_and_save_step(DevelopmentSteps, project.args['app_id'], hash_data_args, data_fields, "Saved Development Step")
project.checkpoints['last_development_step'] = development_step
project.save_files_snapshot(development_step.id)
return development_step

View File

@@ -168,7 +168,7 @@ class Project:
Save a file.
Args:
data (dict): File data.
data: { name: 'hello.py', path: 'path/to/hello.py', content: 'print("Hello!")' }
"""
# TODO fix this in prompts
if ' ' in data['name'] or '.' not in data['name']:

View File

@@ -0,0 +1,120 @@
import re
import os
from unittest.mock import patch, Mock, MagicMock
from dotenv import load_dotenv
load_dotenv()
from .CodeMonkey import CodeMonkey
from .Developer import Developer
from database.models.files import File
from helpers.Project import Project, update_file, clear_directory
from helpers.AgentConvo import AgentConvo
SEND_TO_LLM = False
WRITE_TO_FILE = False
def mock_terminal_size():
mock_size = Mock()
mock_size.columns = 80 # or whatever width you want
return mock_size
class TestCodeMonkey:
def setup_method(self):
name = 'TestDeveloper'
self.project = Project({
'app_id': 'test-developer',
'name': name,
'app_type': ''
},
name=name,
architecture=[],
user_stories=[],
current_step='coding',
)
self.project.root_path = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../../../workspace/TestDeveloper'))
self.project.technologies = []
self.project.app = None
self.developer = Developer(self.project)
self.codeMonkey = CodeMonkey(self.project, developer=self.developer)
@patch('helpers.AgentConvo.get_development_step_from_hash_id', return_value=None)
@patch('helpers.AgentConvo.save_development_step', return_value=None)
@patch('os.get_terminal_size', mock_terminal_size)
@patch.object(File, 'insert')
def test_implement_code_changes(self, mock_get_dev, mock_save_dev, mock_file_insert):
# Given
code_changes_description = "Write the word 'Washington' to a .txt file"
if SEND_TO_LLM:
convo = AgentConvo(self.codeMonkey)
else:
convo = MagicMock()
mock_responses = [
[],
[{
'content': 'Washington',
'description': "A new .txt file with the word 'Washington' in it.",
'name': 'washington.txt',
'path': 'washington.txt'
}]
]
convo.send_message.side_effect = mock_responses
if WRITE_TO_FILE:
self.codeMonkey.implement_code_changes(convo, code_changes_description)
else:
# don't write the file, just
with patch.object(Project, 'save_file') as mock_save_file:
# When
self.codeMonkey.implement_code_changes(convo, code_changes_description)
# Then
mock_save_file.assert_called_once()
called_data = mock_save_file.call_args[0][0]
assert re.match(r'\w+\.txt$', called_data['name'])
assert (called_data['path'] == '/' or called_data['path'] == called_data['name'])
assert called_data['content'] == 'Washington'
@patch('helpers.AgentConvo.get_development_step_from_hash_id', return_value=None)
@patch('helpers.AgentConvo.save_development_step', return_value=None)
@patch('os.get_terminal_size', mock_terminal_size)
@patch.object(File, 'insert')
def test_implement_code_changes_with_read(self, mock_get_dev, mock_save_dev, mock_file_insert):
# Given
code_changes_description = "Read the file called file_to_read.txt and write its content to a file called output.txt"
workspace = self.project.root_path
update_file(os.path.join(workspace, 'file_to_read.txt'), 'Hello World!\n')
if SEND_TO_LLM:
convo = AgentConvo(self.codeMonkey)
else:
convo = MagicMock()
mock_responses = [
['file_to_read.txt', 'output.txt'],
[{
'content': 'Hello World!\n',
'description': 'This file is the output file. The content of file_to_read.txt is copied into this file.',
'name': 'output.txt',
'path': 'output.txt'
}]
]
convo.send_message.side_effect = mock_responses
if WRITE_TO_FILE:
self.codeMonkey.implement_code_changes(convo, code_changes_description)
else:
with patch.object(Project, 'save_file') as mock_save_file:
# When
self.codeMonkey.implement_code_changes(convo, code_changes_description)
# Then
clear_directory(workspace)
mock_save_file.assert_called_once()
called_data = mock_save_file.call_args[0][0]
assert called_data['name'] == 'output.txt'
assert (called_data['path'] == '/' or called_data['path'] == called_data['name'])
assert called_data['content'] == 'Hello World!\n'

View File

@@ -1 +1 @@
Ok, now, take your previous message and convert it to actionable items. An item might be a code change or a command run. When you need to change code, make sure that you put the entire content of the file in the value of `content` key even though you will likely copy and paste the most of the previous messsage.
Ok, now, take your previous message and convert it to actionable items. An item might be a code change or a command run. When you need to change code, make sure that you put the entire content of the file in the value of `content` key even though you will likely copy and paste the most of the previous message.

View File

@@ -91,6 +91,22 @@ def num_tokens_from_functions(functions, model=model):
def create_gpt_chat_completion(messages: List[dict], req_type, min_tokens=MIN_TOKENS_FOR_GPT_RESPONSE,
function_calls=None):
"""
Called from:
- AgentConvo.send_message() - these calls often have `function_calls`, usually from `pilot/const/function_calls.py`
- convo.continuous_conversation()
- prompts.get_additional_info_from_openai()
- prompts.get_additional_info_from_user() after the user responds to each
"Please check this message and say what needs to be changed... {message}"
:param messages: [{ "role": "system"|"assistant"|"user", "content": string }, ... ]
:param req_type: 'project_description' etc. See common.STEPS
:param min_tokens: defaults to 600
:param function_calls: (optional) {'definitions': [{ 'name': str }, ...]}
see `IMPLEMENT_CHANGES` etc. in `pilot/const/function_calls.py`
:return: {'text': new_code}
or if `function_calls` param provided
{'function_calls': {'name': str, arguments: {...}}}
"""
gpt_data = {
'model': os.getenv('MODEL_NAME', 'gpt-4'),
'n': 1,
@@ -111,6 +127,7 @@ def create_gpt_chat_completion(messages: List[dict], req_type, min_tokens=MIN_TO
del gpt_data[key]
if function_calls is not None:
# Advise the LLM of the JSON response schema we are expecting
gpt_data['functions'] = function_calls['definitions']
if len(function_calls['definitions']) > 1:
gpt_data['function_call'] = 'auto'
@@ -182,6 +199,12 @@ def retry_on_exception(func):
@retry_on_exception
def stream_gpt_completion(data, req_type):
"""
Called from create_gpt_chat_completion()
:param data:
:param req_type: 'project_description' etc. See common.STEPS
:return: {'text': str} or {'function_calls': {'name': str, arguments: '{...}'}}
"""
terminal_width = os.get_terminal_size().columns
lines_printed = 2
buffer = "" # A buffer to accumulate incoming data
@@ -258,6 +281,7 @@ def stream_gpt_completion(data, req_type):
logger.error(f'Unable to decode line: {line}')
continue # skip to the next line
# handle the streaming response
if 'function_call' in json_line:
if 'name' in json_line['function_call']:
function_calls['name'] = json_line['function_call']['name']