From 74cbe334213c3c15f791bd71ad7d8a1a65418657 Mon Sep 17 00:00:00 2001 From: Jay Date: Tue, 12 Sep 2023 00:01:13 +0530 Subject: [PATCH] Add check when the response is empty { "id": "", "object": "", "created": 0, "model": "", "prompt_annotations": [ { "prompt_index": 0, "content_filter_results": { "hate": { "filtered": false, "severity": "safe" }, "self_harm": { "filtered": false, "severity": "safe" }, "sexual": { "filtered": false, "severity": "safe" }, "violence": { "filtered": false, "severity": "safe" } } } ], "choices": [], "usage": null } In newer versions of models, at times choices array can be empty --- pilot/utils/llm_connection.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pilot/utils/llm_connection.py b/pilot/utils/llm_connection.py index 72167f2..3019e8e 100644 --- a/pilot/utils/llm_connection.py +++ b/pilot/utils/llm_connection.py @@ -239,6 +239,10 @@ def stream_gpt_completion(data, req_type): try: json_line = json.loads(line) + + if len(json_line['choices']) == 0: + continue + if 'error' in json_line: logger.error(f'Error in LLM response: {json_line}') raise ValueError(f'Error in LLM response: {json_line["error"]["message"]}')