This repository has been archived by the owner on May 17, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 2
/
ai_handler.py
55 lines (49 loc) · 1.92 KB
/
ai_handler.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import requests
import os
import openai
import logging
# Set up logging
logging.basicConfig(filename='app.log', filemode='a', format='%(name)s - %(levelname)s - %(message)s', level=logging.INFO)
def generate_response(email_data, model, max_tokens, prompt_template, local_server, system_prompt):
prompt = prompt_template.format(email_body=email_data['Body'])
try:
if os.getenv('USE_LOCAL', 'false').lower() == 'true':
# Send a POST request to the local server
response = requests.post(
local_server,
json={
"model": model,
"messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt}
]
}
)
# Extract the generated response
response_content = response.json()["choices"][0]["message"]["content"]
else:
# Use OpenAI API
openai.api_key = os.getenv('OPENAI_API_KEY')
response = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt}
]
)
# Extract the generated response
response_content = response['choices'][0]['message']['content']
# Prepare the response data
draft_response = {
'To': email_data['To'],
'From': email_data['From'],
'Subject': email_data['Subject'],
'Body': response_content,
'ThreadId': email_data['ThreadId'] # Add the ThreadId
}
if draft_response is None:
logging.error('Failed to generate response.')
return draft_response
except Exception as e:
logging.error(f'An error occurred: {e}')
return None