Don't send max_tokens to openai api so we can use as much context as possible

This commit is contained in:
Zvonimir Sabljic
2023-09-12 21:28:01 +02:00
parent 9c06143af0
commit 19ac692509

View File

@@ -100,7 +100,6 @@ def create_gpt_chat_completion(messages: List[dict], req_type, min_tokens=MIN_TO
gpt_data = {
'model': os.getenv('OPENAI_MODEL', 'gpt-4'),
'n': 1,
'max_tokens': min(MAX_GPT_MODEL_TOKENS - tokens_in_messages, 2048),
'temperature': 1,
'top_p': 1,
'presence_penalty': 0,