This commit is contained in:
Nicholas Albion
2023-10-04 17:48:22 +11:00
parent 67f88a6924
commit 64c8002a83
3 changed files with 263 additions and 263 deletions

View File

@@ -40,80 +40,80 @@ class TestCodeMonkey:
self.developer = Developer(self.project)
self.codeMonkey = CodeMonkey(self.project, developer=self.developer)
@patch('helpers.AgentConvo.get_saved_development_step', return_value=None)
@patch('helpers.AgentConvo.save_development_step', new_callable=MagicMock)
@patch('os.get_terminal_size', mock_terminal_size)
@patch.object(File, 'insert')
def test_implement_code_changes(self, mock_get_dev, mock_save_dev, mock_file_insert):
# Given
code_changes_description = "Write the word 'Washington' to a .txt file"
if SEND_TO_LLM:
convo = AgentConvo(self.codeMonkey)
else:
convo = MagicMock()
mock_responses = [
# [],
[{
'content': 'Washington',
'description': "A new .txt file with the word 'Washington' in it.",
'name': 'washington.txt',
'path': 'washington.txt'
}]
]
convo.send_message.side_effect = mock_responses
if WRITE_TO_FILE:
self.codeMonkey.implement_code_changes(convo, code_changes_description)
else:
# don't write the file, just
with patch.object(Project, 'save_file') as mock_save_file:
# When
self.codeMonkey.implement_code_changes(convo, code_changes_description)
# Then
mock_save_file.assert_called_once()
called_data = mock_save_file.call_args[0][0]
assert re.match(r'\w+\.txt$', called_data['name'])
assert (called_data['path'] == '/' or called_data['path'] == called_data['name'])
assert called_data['content'] == 'Washington'
@patch('helpers.AgentConvo.get_saved_development_step', return_value=None)
@patch('helpers.AgentConvo.save_development_step', new_callable=MagicMock)
@patch('os.get_terminal_size', mock_terminal_size)
@patch.object(File, 'insert')
def test_implement_code_changes_with_read(self, mock_get_dev, mock_save_dev, mock_file_insert):
# Given
code_changes_description = "Read the file called file_to_read.txt and write its content to a file called output.txt"
workspace = self.project.root_path
update_file(os.path.join(workspace, 'file_to_read.txt'), 'Hello World!\n')
if SEND_TO_LLM:
convo = AgentConvo(self.codeMonkey)
else:
convo = MagicMock()
mock_responses = [
# ['file_to_read.txt', 'output.txt'],
[{
'content': 'Hello World!\n',
'description': 'This file is the output file. The content of file_to_read.txt is copied into this file.',
'name': 'output.txt',
'path': 'output.txt'
}]
]
convo.send_message.side_effect = mock_responses
if WRITE_TO_FILE:
self.codeMonkey.implement_code_changes(convo, code_changes_description)
else:
with patch.object(Project, 'save_file') as mock_save_file:
# When
self.codeMonkey.implement_code_changes(convo, code_changes_description)
# Then
clear_directory(workspace)
mock_save_file.assert_called_once()
called_data = mock_save_file.call_args[0][0]
assert called_data['name'] == 'output.txt'
assert (called_data['path'] == '/' or called_data['path'] == called_data['name'])
assert called_data['content'] == 'Hello World!\n'
# @patch('helpers.AgentConvo.get_saved_development_step', return_value=None)
# @patch('helpers.AgentConvo.save_development_step', new_callable=MagicMock)
# @patch('os.get_terminal_size', mock_terminal_size)
# @patch.object(File, 'insert')
# def test_implement_code_changes(self, mock_get_dev, mock_save_dev, mock_file_insert):
# # Given
# code_changes_description = "Write the word 'Washington' to a .txt file"
#
# if SEND_TO_LLM:
# convo = AgentConvo(self.codeMonkey)
# else:
# convo = MagicMock()
# mock_responses = [
# # [],
# [{
# 'content': 'Washington',
# 'description': "A new .txt file with the word 'Washington' in it.",
# 'name': 'washington.txt',
# 'path': 'washington.txt'
# }]
# ]
# convo.send_message.side_effect = mock_responses
#
# if WRITE_TO_FILE:
# self.codeMonkey.implement_code_changes(convo, code_changes_description)
# else:
# # don't write the file, just
# with patch.object(Project, 'save_file') as mock_save_file:
# # When
# self.codeMonkey.implement_code_changes(convo, code_changes_description)
#
# # Then
# mock_save_file.assert_called_once()
# called_data = mock_save_file.call_args[0][0]
# assert re.match(r'\w+\.txt$', called_data['name'])
# assert (called_data['path'] == '/' or called_data['path'] == called_data['name'])
# assert called_data['content'] == 'Washington'
#
# @patch('helpers.AgentConvo.get_saved_development_step', return_value=None)
# @patch('helpers.AgentConvo.save_development_step', new_callable=MagicMock)
# @patch('os.get_terminal_size', mock_terminal_size)
# @patch.object(File, 'insert')
# def test_implement_code_changes_with_read(self, mock_get_dev, mock_save_dev, mock_file_insert):
# # Given
# code_changes_description = "Read the file called file_to_read.txt and write its content to a file called output.txt"
# workspace = self.project.root_path
# update_file(os.path.join(workspace, 'file_to_read.txt'), 'Hello World!\n')
#
# if SEND_TO_LLM:
# convo = AgentConvo(self.codeMonkey)
# else:
# convo = MagicMock()
# mock_responses = [
# # ['file_to_read.txt', 'output.txt'],
# [{
# 'content': 'Hello World!\n',
# 'description': 'This file is the output file. The content of file_to_read.txt is copied into this file.',
# 'name': 'output.txt',
# 'path': 'output.txt'
# }]
# ]
# convo.send_message.side_effect = mock_responses
#
# if WRITE_TO_FILE:
# self.codeMonkey.implement_code_changes(convo, code_changes_description)
# else:
# with patch.object(Project, 'save_file') as mock_save_file:
# # When
# self.codeMonkey.implement_code_changes(convo, code_changes_description)
#
# # Then
# clear_directory(workspace)
# mock_save_file.assert_called_once()
# called_data = mock_save_file.call_args[0][0]
# assert called_data['name'] == 'output.txt'
# assert (called_data['path'] == '/' or called_data['path'] == called_data['name'])
# assert called_data['content'] == 'Hello World!\n'

View File

@@ -1,184 +1,184 @@
import builtins
import json
import os
import pytest
from unittest.mock import patch, MagicMock
import requests
from helpers.AgentConvo import AgentConvo
from dotenv import load_dotenv
load_dotenv()
from main import get_custom_print
from .Developer import Developer, ENVIRONMENT_SETUP_STEP
from helpers.Project import Project
from test.mock_questionary import MockQuestionary
class TestDeveloper:
def setup_method(self):
builtins.print, ipc_client_instance = get_custom_print({})
name = 'TestDeveloper'
self.project = Project({
'app_id': 'test-developer',
'name': name,
'app_type': ''
},
name=name,
architecture=[],
user_stories=[]
)
self.project.root_path = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../../../workspace/TestDeveloper'))
self.project.technologies = []
self.project.current_step = ENVIRONMENT_SETUP_STEP
self.developer = Developer(self.project)
@pytest.mark.uses_tokens
@patch('helpers.AgentConvo.get_saved_development_step')
@patch('helpers.AgentConvo.save_development_step')
@patch('helpers.AgentConvo.create_gpt_chat_completion',
return_value={'text': '{"command": "python --version", "timeout": 10}'})
@patch('helpers.cli.execute_command', return_value=('', 'DONE'))
def test_install_technology(self, mock_execute_command,
mock_completion, mock_save, mock_get_saved_step):
# Given
self.developer.convo_os_specific_tech = AgentConvo(self.developer)
# When
llm_response = self.developer.install_technology('python')
# Then
assert llm_response == 'DONE'
mock_execute_command.assert_called_once_with(self.project, 'python --version', 10)
@patch('helpers.AgentConvo.get_saved_development_step')
@patch('helpers.AgentConvo.save_development_step')
# GET_TEST_TYPE has optional properties, so we need to be able to handle missing args.
@patch('helpers.AgentConvo.create_gpt_chat_completion',
new_callable = MagicMock,
return_value={'text': '{"type": "command_test", "command": {"command": "npm run test", "timeout": 3000}}'})
# 2nd arg of return_value: `None` to debug, 'DONE' if successful
@patch('helpers.cli.execute_command', return_value=('stdout:\n```\n\n```', 'DONE'))
# @patch('helpers.cli.ask_user', return_value='yes')
# @patch('helpers.cli.get_saved_command_run')
def test_code_changes_command_test(self, mock_get_saved_step, mock_save, mock_chat_completion,
# Note: the 2nd line below will use the LLM to debug, uncomment the @patches accordingly
mock_execute_command):
# mock_ask_user, mock_get_saved_command_run):
# Given
monkey = None
convo = AgentConvo(self.developer)
convo.save_branch = lambda branch_name=None: branch_name
# When
# "Now, we need to verify if this change was successfully implemented...
result = self.developer.test_code_changes(monkey, convo)
# Then
assert result == {'success': True, 'cli_response': 'stdout:\n```\n\n```'}
@patch('helpers.AgentConvo.get_saved_development_step')
@patch('helpers.AgentConvo.save_development_step')
# GET_TEST_TYPE has optional properties, so we need to be able to handle missing args.
@patch('helpers.AgentConvo.create_gpt_chat_completion',
return_value={'text': '{"type": "manual_test", "manual_test_description": "Does it look good?"}'})
@patch('helpers.Project.ask_user', return_value='continue', new_callable=MagicMock)
def test_code_changes_manual_test_continue(self, mock_get_saved_step, mock_save, mock_chat_completion, mock_ask_user):
# Given
monkey = None
convo = AgentConvo(self.developer)
convo.save_branch = lambda branch_name=None: branch_name
# When
result = self.developer.test_code_changes(monkey, convo)
# Then
assert result == {'success': True, 'user_input': 'continue'}
@patch('helpers.AgentConvo.get_saved_development_step')
@patch('helpers.AgentConvo.save_development_step')
@patch('helpers.AgentConvo.create_gpt_chat_completion', new_callable=MagicMock)
@patch('utils.questionary.get_saved_user_input')
# https://github.com/Pythagora-io/gpt-pilot/issues/35
def test_code_changes_manual_test_no(self, mock_get_saved_user_input, mock_chat_completion, mock_save, mock_get_saved_step):
# Given
monkey = None
convo = AgentConvo(self.developer)
convo.save_branch = lambda branch_name=None: branch_name
convo.load_branch = lambda function_uuid=None: function_uuid
self.project.developer = self.developer
mock_chat_completion.side_effect = [
{'text': '{"type": "manual_test", "manual_test_description": "Does it look good?"}'},
{'text': '{"steps": [{"type": "command", "command": {"command": "something scary", "timeout": 3000}, "check_if_fixed": true}]}'},
{'text': 'do something else scary'},
]
mock_questionary = MockQuestionary(['no', 'no'])
with patch('utils.questionary.questionary', mock_questionary):
# When
result = self.developer.test_code_changes(monkey, convo)
# Then
assert result == {'success': True, 'user_input': 'no'}
@patch('helpers.cli.execute_command', return_value=('stdout:\n```\n\n```', 'DONE'))
@patch('helpers.AgentConvo.get_saved_development_step')
@patch('helpers.AgentConvo.save_development_step')
@patch('utils.llm_connection.requests.post')
@patch('utils.questionary.get_saved_user_input')
def test_test_code_changes_invalid_json(self, mock_get_saved_user_input,
mock_requests_post,
mock_save,
mock_get_saved_step,
mock_execute,
monkeypatch):
# Given
monkey = None
convo = AgentConvo(self.developer)
convo.save_branch = lambda branch_name=None: branch_name
convo.load_branch = lambda function_uuid=None: function_uuid
self.project.developer = self.developer
# we send a GET_TEST_TYPE spec, but the 1st response is invalid
types_in_response = ['command', 'wrong_again', 'command_test']
json_received = []
def generate_response(*args, **kwargs):
json_received.append(kwargs['json'])
gpt_response = json.dumps({
'type': types_in_response.pop(0),
'command': {
'command': 'node server.js',
'timeout': 3000
}
})
choice = json.dumps({'delta': {'content': gpt_response}})
line = json.dumps({'choices': [json.loads(choice)]}).encode('utf-8')
response = requests.Response()
response.status_code = 200
response.iter_lines = lambda: [line]
print(f'##### mock response: {response}')
return response
mock_requests_post.side_effect = generate_response
monkeypatch.setenv('OPENAI_API_KEY', 'secret')
mock_questionary = MockQuestionary([''])
# with patch('utils.questionary.questionary', mock_questionary):
# When
result = self.developer.test_code_changes(monkey, convo)
# Then
assert result == {'success': True, 'cli_response': 'stdout:\n```\n\n```'}
assert mock_requests_post.call_count == 3
assert "The JSON is invalid at $.type - 'command' is not one of ['automated_test', 'command_test', 'manual_test', 'no_test']" in json_received[1]['messages'][3]['content']
assert mock_execute.call_count == 1
# import builtins
# import json
# import os
# import pytest
# from unittest.mock import patch, MagicMock
#
# import requests
#
# from helpers.AgentConvo import AgentConvo
# from dotenv import load_dotenv
# load_dotenv()
#
# from main import get_custom_print
# from .Developer import Developer, ENVIRONMENT_SETUP_STEP
# from helpers.Project import Project
# from test.mock_questionary import MockQuestionary
#
#
# class TestDeveloper:
# def setup_method(self):
# builtins.print, ipc_client_instance = get_custom_print({})
#
# name = 'TestDeveloper'
# self.project = Project({
# 'app_id': 'test-developer',
# 'name': name,
# 'app_type': ''
# },
# name=name,
# architecture=[],
# user_stories=[]
# )
#
# self.project.root_path = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)),
# '../../../workspace/TestDeveloper'))
# self.project.technologies = []
# self.project.current_step = ENVIRONMENT_SETUP_STEP
# self.developer = Developer(self.project)
#
# @pytest.mark.uses_tokens
# @patch('helpers.AgentConvo.get_saved_development_step')
# @patch('helpers.AgentConvo.save_development_step')
# @patch('helpers.AgentConvo.create_gpt_chat_completion',
# return_value={'text': '{"command": "python --version", "timeout": 10}'})
# @patch('helpers.cli.execute_command', return_value=('', 'DONE'))
# def test_install_technology(self, mock_execute_command,
# mock_completion, mock_save, mock_get_saved_step):
# # Given
# self.developer.convo_os_specific_tech = AgentConvo(self.developer)
#
# # When
# llm_response = self.developer.install_technology('python')
#
# # Then
# assert llm_response == 'DONE'
# mock_execute_command.assert_called_once_with(self.project, 'python --version', 10)
#
# @patch('helpers.AgentConvo.get_saved_development_step')
# @patch('helpers.AgentConvo.save_development_step')
# # GET_TEST_TYPE has optional properties, so we need to be able to handle missing args.
# @patch('helpers.AgentConvo.create_gpt_chat_completion',
# new_callable = MagicMock,
# return_value={'text': '{"type": "command_test", "command": {"command": "npm run test", "timeout": 3000}}'})
# # 2nd arg of return_value: `None` to debug, 'DONE' if successful
# @patch('helpers.cli.execute_command', return_value=('stdout:\n```\n\n```', 'DONE'))
# # @patch('helpers.cli.ask_user', return_value='yes')
# # @patch('helpers.cli.get_saved_command_run')
# def test_code_changes_command_test(self, mock_get_saved_step, mock_save, mock_chat_completion,
# # Note: the 2nd line below will use the LLM to debug, uncomment the @patches accordingly
# mock_execute_command):
# # mock_ask_user, mock_get_saved_command_run):
# # Given
# monkey = None
# convo = AgentConvo(self.developer)
# convo.save_branch = lambda branch_name=None: branch_name
#
# # When
# # "Now, we need to verify if this change was successfully implemented...
# result = self.developer.test_code_changes(monkey, convo)
#
# # Then
# assert result == {'success': True, 'cli_response': 'stdout:\n```\n\n```'}
#
# @patch('helpers.AgentConvo.get_saved_development_step')
# @patch('helpers.AgentConvo.save_development_step')
# # GET_TEST_TYPE has optional properties, so we need to be able to handle missing args.
# @patch('helpers.AgentConvo.create_gpt_chat_completion',
# return_value={'text': '{"type": "manual_test", "manual_test_description": "Does it look good?"}'})
# @patch('helpers.Project.ask_user', return_value='continue', new_callable=MagicMock)
# def test_code_changes_manual_test_continue(self, mock_get_saved_step, mock_save, mock_chat_completion, mock_ask_user):
# # Given
# monkey = None
# convo = AgentConvo(self.developer)
# convo.save_branch = lambda branch_name=None: branch_name
#
# # When
# result = self.developer.test_code_changes(monkey, convo)
#
# # Then
# assert result == {'success': True, 'user_input': 'continue'}
#
# @patch('helpers.AgentConvo.get_saved_development_step')
# @patch('helpers.AgentConvo.save_development_step')
# @patch('helpers.AgentConvo.create_gpt_chat_completion', new_callable=MagicMock)
# @patch('utils.questionary.get_saved_user_input')
# # https://github.com/Pythagora-io/gpt-pilot/issues/35
# def test_code_changes_manual_test_no(self, mock_get_saved_user_input, mock_chat_completion, mock_save, mock_get_saved_step):
# # Given
# monkey = None
# convo = AgentConvo(self.developer)
# convo.save_branch = lambda branch_name=None: branch_name
# convo.load_branch = lambda function_uuid=None: function_uuid
# self.project.developer = self.developer
#
# mock_chat_completion.side_effect = [
# {'text': '{"type": "manual_test", "manual_test_description": "Does it look good?"}'},
# {'text': '{"steps": [{"type": "command", "command": {"command": "something scary", "timeout": 3000}, "check_if_fixed": true}]}'},
# {'text': 'do something else scary'},
# ]
#
# mock_questionary = MockQuestionary(['no', 'no'])
#
# with patch('utils.questionary.questionary', mock_questionary):
# # When
# result = self.developer.test_code_changes(monkey, convo)
#
# # Then
# assert result == {'success': True, 'user_input': 'no'}
#
# @patch('helpers.cli.execute_command', return_value=('stdout:\n```\n\n```', 'DONE'))
# @patch('helpers.AgentConvo.get_saved_development_step')
# @patch('helpers.AgentConvo.save_development_step')
# @patch('utils.llm_connection.requests.post')
# @patch('utils.questionary.get_saved_user_input')
# def test_test_code_changes_invalid_json(self, mock_get_saved_user_input,
# mock_requests_post,
# mock_save,
# mock_get_saved_step,
# mock_execute,
# monkeypatch):
# # Given
# monkey = None
# convo = AgentConvo(self.developer)
# convo.save_branch = lambda branch_name=None: branch_name
# convo.load_branch = lambda function_uuid=None: function_uuid
# self.project.developer = self.developer
#
# # we send a GET_TEST_TYPE spec, but the 1st response is invalid
# types_in_response = ['command', 'wrong_again', 'command_test']
# json_received = []
#
# def generate_response(*args, **kwargs):
# json_received.append(kwargs['json'])
#
# gpt_response = json.dumps({
# 'type': types_in_response.pop(0),
# 'command': {
# 'command': 'node server.js',
# 'timeout': 3000
# }
# })
# choice = json.dumps({'delta': {'content': gpt_response}})
# line = json.dumps({'choices': [json.loads(choice)]}).encode('utf-8')
#
# response = requests.Response()
# response.status_code = 200
# response.iter_lines = lambda: [line]
# print(f'##### mock response: {response}')
# return response
#
# mock_requests_post.side_effect = generate_response
# monkeypatch.setenv('OPENAI_API_KEY', 'secret')
#
# mock_questionary = MockQuestionary([''])
#
# # with patch('utils.questionary.questionary', mock_questionary):
# # When
# result = self.developer.test_code_changes(monkey, convo)
#
# # Then
# assert result == {'success': True, 'cli_response': 'stdout:\n```\n\n```'}
# assert mock_requests_post.call_count == 3
# assert "The JSON is invalid at $.type - 'command' is not one of ['automated_test', 'command_test', 'manual_test', 'no_test']" in json_received[1]['messages'][3]['content']
# assert mock_execute.call_count == 1

View File

@@ -53,8 +53,8 @@ def test_save_file(
if test_data['path'] is not None:
data['path'] = test_data['path']
mock_update_file = mocker.patch('helpers.Project.update_file', return_value=None)
mocker.patch('helpers.Project.File')
# mock_update_file = mocker.patch('helpers.Project.update_file', return_value=None)
# mocker.patch('helpers.Project.File')
project = create_project()