From d8a699ca7261650e6e29233f4a23b255e7a28f2f Mon Sep 17 00:00:00 2001 From: alex Date: Mon, 4 Mar 2024 11:32:41 -0600 Subject: [PATCH] implement langsmith logging --- src/main/app/backend/tools/llm.py | 8 ++++++++ src/main/app/dockerfile | 3 +++ src/main/app/requirements.txt | 1 + 3 files changed, 12 insertions(+) diff --git a/src/main/app/backend/tools/llm.py b/src/main/app/backend/tools/llm.py index 0d053cdb..8403b709 100644 --- a/src/main/app/backend/tools/llm.py +++ b/src/main/app/backend/tools/llm.py @@ -3,6 +3,7 @@ import openai from langchain_community.chat_models import ChatVertexAI, AzureChatOpenAI +from langchain_openai import OpenAI # from langchain.chains import ConversationChain import pandas as pd from pydantic import BaseModel @@ -12,6 +13,7 @@ from tools.secret_manager import SecretManager sm = SecretManager() +os.environ["LANGCHAIN_API_KEY"] = sm.access_secret_version("langsmith_api_key") class LLM(BaseModel): """ @@ -59,6 +61,9 @@ def _init_llm(self, llm_type: str, temperature: float): deployment_name = sm.access_secret_version('gpt4_8k_name'), model_name = 'gpt-4', temperature=temperature) # default is 0.7 + # self.llm_instance = OpenAI(api_key=sm.access_secret_version("openai_key_dan"), + # model="gpt-4", + # temperature=temperature) case "GPT-4 32k": # Tokens per Minute Rate Limit (thousands): 30 # Rate limit (Tokens per minute): 30000 @@ -69,6 +74,9 @@ def _init_llm(self, llm_type: str, temperature: float): deployment_name = sm.access_secret_version('gpt4_32k_name'), model_name = 'gpt-4-32k', temperature=temperature) # default is 0.7 + # self.llm_instance = OpenAI(api_key=sm.access_secret_version("openai_key_dan"), + # model="gpt-4-32k", + # temperature=temperature) case _: raise ValueError("Please provide a valid LLM type.") diff --git a/src/main/app/dockerfile b/src/main/app/dockerfile index 821dcdbf..bed528cd 100644 --- a/src/main/app/dockerfile +++ b/src/main/app/dockerfile @@ -15,6 +15,9 @@ RUN pip install -U pip RUN pip3 install -r requirements.txt ENV PORT=8080 +ENV LANGCHAIN_TRACING_V2="true" +ENV LANGCHAIN_ENDPOINT="https://api.smith.langchain.com" +ENV LANGCHAIN_PROJECT="agent-neo" # HEALTHCHECK CMD curl --fail http://localhost:8080/_stcore/health RUN find /usr/local/lib/python3.11/site-packages/streamlit -type f \( -iname \*.py -o -iname \*.js \) -print0 | xargs -0 sed -i 's/healthz/health-check/g' diff --git a/src/main/app/requirements.txt b/src/main/app/requirements.txt index 110dd24c..02e01ce1 100644 --- a/src/main/app/requirements.txt +++ b/src/main/app/requirements.txt @@ -8,6 +8,7 @@ google-cloud-storage==2.10.0 gcloud==0.18.3 graphdatascience==1.7 langchain==0.0.311 +langchain-openai-0.0.8 langcodes==3.3.0 langsmith==0.0.43 neo4j==5.8.0