mirror of
https://github.com/OMGeeky/gpt-pilot.git
synced 2026-02-23 15:49:50 +01:00
Merge pull request #44 from Pythagora-io/sander110419-main
Sander110419 main
This commit is contained in:
@@ -52,7 +52,7 @@ After you have Python and PostgreSQL installed, follow these steps:
|
||||
5. `pip install -r requirements.txt` (install the dependencies)
|
||||
6. `cd pilot`
|
||||
7. `mv .env.example .env` (create the .env file)
|
||||
8. Add your OpenAI API key and the PostgreSQL database info to the `.env` file
|
||||
8. Add your environment (OpenAI/Azure), your API key and the PostgreSQL database info to the `.env` file
|
||||
9. `python db_init.py` (initialize the database)
|
||||
10. `python main.py` (start GPT Pilot)
|
||||
|
||||
|
||||
@@ -1,4 +1,11 @@
|
||||
#OPENAI or AZURE
|
||||
ENDPOINT=OPENAI
|
||||
OPENAI_API_KEY=
|
||||
AZURE_API_KEY=
|
||||
AZURE_ENDPOINT=
|
||||
#In case of Azure endpoint, change this to your deployed model name
|
||||
MODEL_NAME=gpt-4
|
||||
MAX_TOKENS=8192
|
||||
DB_NAME=gpt-pilot
|
||||
DB_HOST=localhost
|
||||
DB_PORT=5432
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import os
|
||||
MAX_GPT_MODEL_TOKENS = int(os.getenv('MAX_TOKENS'))
|
||||
MIN_TOKENS_FOR_GPT_RESPONSE = 600
|
||||
MAX_GPT_MODEL_TOKENS = 8192
|
||||
MAX_QUESTIONS = 5
|
||||
END_RESPONSE = "EVERYTHING_CLEAR"
|
||||
END_RESPONSE = "EVERYTHING_CLEAR"
|
||||
@@ -46,8 +46,11 @@ def get_tokens_in_messages(messages: List[str]) -> int:
|
||||
tokenized_messages = [tokenizer.encode(message['content']) for message in messages]
|
||||
return sum(len(tokens) for tokens in tokenized_messages)
|
||||
|
||||
#get endpoint and model name from .ENV file
|
||||
model = os.getenv('MODEL_NAME')
|
||||
endpoint = os.getenv('ENDPOINT')
|
||||
|
||||
def num_tokens_from_functions(functions, model="gpt-4"):
|
||||
def num_tokens_from_functions(functions, model=model):
|
||||
"""Return the number of tokens used by a list of functions."""
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
|
||||
@@ -171,13 +174,22 @@ def stream_gpt_completion(data, req_type):
|
||||
|
||||
# spinner = spinner_start(colored("Waiting for OpenAI API response...", 'yellow'))
|
||||
# print(colored("Stream response from OpenAI:", 'yellow'))
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
logger.info(f'Request data: {data}')
|
||||
|
||||
# Check if the ENDPOINT is AZURE
|
||||
if endpoint == 'AZURE':
|
||||
# If yes, get the AZURE_ENDPOINT from .ENV file
|
||||
endpoint_url = os.getenv('AZURE_ENDPOINT') + '/openai/deployments/' + model + '/chat/completions?api-version=2023-05-15'
|
||||
headers = {'Content-Type': 'application/json', 'api-key': os.getenv('AZURE_API_KEY')}
|
||||
else:
|
||||
# If not, send the request to the OpenAI endpoint
|
||||
headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + os.getenv("OPENAI_API_KEY")}
|
||||
endpoint_url = 'https://api.openai.com/v1/chat/completions'
|
||||
|
||||
response = requests.post(
|
||||
'https://api.openai.com/v1/chat/completions',
|
||||
headers={'Content-Type': 'application/json', 'Authorization': 'Bearer ' + api_key},
|
||||
endpoint_url,
|
||||
headers=headers,
|
||||
json=data,
|
||||
stream=True
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user