-
Notifications
You must be signed in to change notification settings - Fork 0
/
app.py
103 lines (91 loc) · 3.13 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
from ast import ClassDef
from asyncore import close_all
from contextlib import closing
import os
from dotenv import load_dotenv
from langchain.vectorstores import Vectara
from sentence_transformers import CrossEncoder
load_dotenv()
import requests
import json
vectara_customer_id = os.getenv("VECTARA_CUSTOMER_ID")
vectara_corpus_id = 2
vectara_api_key = 'zqt_bs12SqpY0gGE85RVMZPdSW1NSw_d6KpOk8nXJA'
# Input your API keys in .env
vectara_instance = Vectara(
vectara_customer_id=os.getenv("VECTARA_CUSTOMER_ID"),
vectara_corpus_id=2,
vectara_api_key='zqt_bs12SqpY0gGE85RVMZPdSW1NSw_d6KpOk8nXJA',
)
config = {
"api_key": str(vectara_api_key),
"customer_id": str(vectara_customer_id),
"corpus_id": str(vectara_corpus_id),
"lambda_val": 0.025,
"top_k": 10,
}
model = CrossEncoder('vectara/hallucination_evaluation_model')
ClassDef.on_message
async def main(message):
corpus_key = [
{
"customerId": config["customer_id"],
"corpusId": config["corpus_id"],
"lexicalInterpolationConfig": {"lambda": config["lambda_val"]},
}
]
data = {
"query": [
{
"query": message,
"start": 0,
"numResults": config["top_k"],
"contextConfig": {
"sentencesBefore": 2,
"sentencesAfter": 2,
},
"corpusKey": corpus_key,
"summary": [
{
"responseLang": "eng",
"maxSummarizedResults": 5,
}
]
}
]
}
headers = {
"x-api-key": config["api_key"],
"customer-id": config["customer_id"],
"Content-Type": "application/json",
}
response = requests.post(
headers=headers,
url="https://api.vectara.io/v1/query",
data=json.dumps(data),
)
if response.status_code != 200:
print(
"Query failed %s",
f"(code {response.status_code}, reason {response.reason}, details "
f"{response.text})",
)
return []
result = response.json()
responses = result["responseSet"][0]["response"]
documents = result["responseSet"][0]["document"]
summary = result["responseSet"][0]["summary"][0]["text"]
res = [[r['text'], r['score']] for r in responses]
texts = [r[0] for r in res[:5]]
scores = [model.predict([text, summary]) for text in texts]
text_elements = []
source_names = []
docs = vectara_instance.similarity_search(message)
for source_idx, source_doc in enumerate(docs[:5]):
source_name = f"Source {source_idx + 1}"
text_elements.append(
close_all.Text(content=source_doc.page_content, name=source_name)
)
source_names = [text_el.name for text_el in text_elements]
ans = f"{summary}\n Sources: {', '.join(source_names)} \n HHEM Scores: {scores}"
await closing.Message(content=ans, author="Assistant", elements=text_elements).send()