-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #4 from labrijisaad/dev
Created the App Logic.
- Loading branch information
Showing
13 changed files
with
488 additions
and
46 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -4,9 +4,11 @@ on: | |
push: | ||
branches: | ||
- main | ||
- dev | ||
pull_request: | ||
branches: | ||
- main | ||
- dev | ||
|
||
jobs: | ||
code-quality-check: | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,59 @@ | ||
from src.pipelines.query_pipeline import QueryPipeline | ||
from src.utils.utils import load_models_config, load_credentials | ||
|
||
|
||
def main(): | ||
# Load OpenAI API Key and Model Configurations | ||
credentials = load_credentials("secrets/credentials.yml") | ||
openai_api_key = credentials["OPENAI_CREDENTIALS"] | ||
models_config = load_models_config("config/models_config.yml") | ||
|
||
# Initialize the QueryPipeline | ||
query_pipeline = QueryPipeline(openai_api_key, models_config) | ||
|
||
# Set up the semantic database (example path and model) | ||
total_cost = query_pipeline.setup_semantic_database( | ||
markdown_path="data/raw/mock_markdown.md", | ||
embedding_model="text-embedding-3-small", | ||
save_index=True, | ||
index_path="data/processed/faiss_index.bin", | ||
) | ||
print(f"Total cost for setting up the semantic database: ${total_cost}") | ||
|
||
# Example query | ||
user_query = input("Enter your query: ") | ||
|
||
# Find similar documents | ||
similar_docs = query_pipeline.find_similar_documents( | ||
query_text=user_query, num_results=3 | ||
) | ||
|
||
# Determine expertise area and prepare the prompt | ||
context_enhanced_prompt, expertise_area_cost = ( | ||
query_pipeline.determine_expertise_and_prepare_prompt( | ||
user_query=user_query, | ||
similar_docs=similar_docs, | ||
inference_model="gpt-3.5-turbo-0125", | ||
max_completion_tokens=150, | ||
temperature=0.2, | ||
) | ||
) | ||
print(f"Cost for determining expertise area: ${expertise_area_cost}") | ||
|
||
# Query the model for a response | ||
contextual_response, response_cost = query_pipeline.query_model_for_response( | ||
context_enhanced_prompt=context_enhanced_prompt, | ||
max_completion_tokens=1500, | ||
temperature=0.7, | ||
) | ||
print(f"Cost for querying the model for a response: ${response_cost}") | ||
|
||
# Output the response | ||
print("--------\nContextual Prompt:\n--------") | ||
print(context_enhanced_prompt) | ||
print("--------\nResponse:\n--------") | ||
print(contextual_response) | ||
|
||
|
||
if __name__ == "__main__": | ||
main() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,55 @@ | ||
from src.pipelines.query_pipeline import QueryPipeline | ||
from src.utils.utils import load_models_config, load_credentials | ||
|
||
|
||
def main(): | ||
# Load OpenAI API Key and Model Configurations | ||
credentials = load_credentials("secrets/credentials.yml") | ||
openai_api_key = credentials["OPENAI_CREDENTIALS"] | ||
models_config = load_models_config("config/models_config.yml") | ||
|
||
# Initialize the QueryPipeline | ||
query_pipeline = QueryPipeline(openai_api_key, models_config) | ||
|
||
# Set the model | ||
query_pipeline.set_model("text-embedding-3-small") | ||
|
||
# Load the semantic database FAISS index | ||
index_path = "data/processed/faiss_index.bin" | ||
query_pipeline.load_faiss_index(index_path) | ||
|
||
# Example query | ||
user_query = input("Enter your query: ") | ||
|
||
# Proceed with the rest of the querying process | ||
similar_docs = query_pipeline.find_similar_documents( | ||
query_text=user_query, num_results=3 | ||
) | ||
|
||
context_enhanced_prompt, expertise_area_cost = ( | ||
query_pipeline.determine_expertise_and_prepare_prompt( | ||
user_query=user_query, | ||
similar_docs=similar_docs, | ||
inference_model="gpt-3.5-turbo-0125", | ||
max_completion_tokens=150, | ||
temperature=0.2, | ||
) | ||
) | ||
print(f"Cost for determining expertise area: ${expertise_area_cost}") | ||
|
||
contextual_response, response_cost = query_pipeline.query_model_for_response( | ||
context_enhanced_prompt=context_enhanced_prompt, | ||
max_completion_tokens=1500, | ||
temperature=0.7, | ||
) | ||
print(f"Cost for querying the model for a response: ${response_cost}") | ||
|
||
# Output the response | ||
print("--------\nContextual Prompt:\n--------") | ||
print(context_enhanced_prompt) | ||
print("--------\nResponse:\n--------") | ||
print(contextual_response) | ||
|
||
|
||
if __name__ == "__main__": | ||
main() |
This file was deleted.
Oops, something went wrong.
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
File renamed without changes.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,86 @@ | ||
import requests | ||
|
||
|
||
class ModelInferenceManager: | ||
def __init__(self, api_key, models_config): | ||
self.api_key = api_key | ||
self.models_config = models_config | ||
self.model = None | ||
self.input_token_price = None | ||
self.output_token_price = None | ||
|
||
def set_model(self, model_name): | ||
for group in self.models_config["models"]: | ||
for variant in group["variants"]: | ||
if variant["model"] == model_name: | ||
self.model = model_name | ||
self.input_token_price = variant["input_price_per_token"] | ||
self.output_token_price = variant["output_price_per_token"] | ||
return | ||
raise ValueError(f"Model {model_name} not found in configuration.") | ||
|
||
def query_openai(self, prompt_text, max_completion_tokens=100, temperature=0.7): | ||
if not self.model: | ||
raise ValueError( | ||
"Model not set. Please use set_model() to set a model before querying." | ||
) | ||
url = "https://api.openai.com/v1/chat/completions" | ||
headers = {"Authorization": f"Bearer {self.api_key}"} | ||
payload = { | ||
"model": self.model, | ||
"messages": [{"role": "user", "content": prompt_text}], | ||
"max_tokens": max_completion_tokens, | ||
"temperature": temperature, | ||
} | ||
|
||
try: | ||
response = requests.post(url, headers=headers, json=payload, timeout=60) | ||
if response.status_code == 200: | ||
data = response.json() | ||
content = data["choices"][0]["message"]["content"] | ||
usage = data["usage"] | ||
return content, usage | ||
else: | ||
return ( | ||
f"HTTP Error {response.status_code}: {response.json().get('error', {}).get('message', 'An unspecified error occurred')}", | ||
None, | ||
) | ||
except requests.RequestException as e: | ||
return f"Connection error: {e}", None | ||
|
||
def calculate_cost(self, usage): | ||
if usage: | ||
total_price = (usage["prompt_tokens"] * self.input_token_price) + ( | ||
usage["completion_tokens"] * self.output_token_price | ||
) | ||
return total_price | ||
else: | ||
return None | ||
|
||
def determine_expertise_area( | ||
self, user_question, max_completion_tokens, temperature | ||
): | ||
prompt_text = f"""Based on the question provided, identify the relevant expertise area(s). Return your answer in the format: | ||
'expertise1, expertise2, ...'. Provide only the expertise areas as a comma-separated list, no additional explanations are needed. | ||
Here is the user Question: | ||
{user_question} | ||
""" | ||
response, usage = self.query_openai( | ||
prompt_text, max_completion_tokens, temperature | ||
) | ||
return response.strip(), ( | ||
usage if response else "Error determining expertise area." | ||
) | ||
|
||
def prepare_prompt_for_llm(self, expertise_area, user_question, context_documents): | ||
prompt = ( | ||
f"You are an expert in '{expertise_area}'. A user has asked for help with the following question: " | ||
f"'{user_question}'. Please provide insights using only the information from the provided documents. " | ||
"If certain aspects are ambiguous or the documents do not fully address the question, please make educated inferences based on your expertise.\n\n" | ||
"Here are the documents provided:\n\n" | ||
) | ||
for i, document in enumerate(context_documents, start=1): | ||
prompt += f'Document {i}:\n"""\n{document}\n"""\n\n' | ||
prompt += "Given your expertise and the information provided in these documents, synthesize the key insights to craft a detailed and relevant response to the above question.\n\n" | ||
prompt += "Start your response below:\n\n" | ||
return prompt |
Oops, something went wrong.