From c8c30b020275f8fc4b2f0f2669cb96668db16796 Mon Sep 17 00:00:00 2001 From: Jeff Kayne <43336277+jeffkayne@users.noreply.github.com> Date: Wed, 15 Nov 2023 10:07:06 +0100 Subject: [PATCH] update openai api --- examples/streamlit/llm_app.py | 17 ++++++++--------- examples/streamlit/llm_chatbot.py | 12 ++++++------ 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/examples/streamlit/llm_app.py b/examples/streamlit/llm_app.py index 8d79bdcb..b1357e24 100644 --- a/examples/streamlit/llm_app.py +++ b/examples/streamlit/llm_app.py @@ -1,6 +1,6 @@ -import openai import streamlit as st -from trubrics_utils import trubrics_config, trubrics_successful_feedback +from openai import OpenAI +from trubrics_utils import trubrics_config from trubrics.integrations.streamlit import FeedbackCollector @@ -36,17 +36,19 @@ help="Consult https://platform.openai.com/docs/models/gpt-3-5 for model info.", ) -openai.api_key = st.secrets.get("OPENAI_API_KEY") -if openai.api_key is None: +openai_api_key = st.secrets.get("OPENAI_API_KEY") +if openai_api_key is None: st.info("Please add your OpenAI API key to continue.") st.stop() +client = OpenAI(api_key=openai_api_key) + prompt = st.text_area(label="Prompt", label_visibility="collapsed", placeholder="What would you like to know?") button = st.button(f"Ask {model}") if button: - response = openai.ChatCompletion.create(model=model, messages=[{"role": "user", "content": prompt}]) - response_text = response.choices[0].message["content"] + response = client.chat.completions.create(model=model, messages=[{"role": "user", "content": prompt}]) + response_text = response.choices[0].message.content st.session_state.logged_prompt = collector.log_prompt( config_model={"model": model}, prompt=prompt, generation=response_text, tags=["llm_app.py"], user_id=email ) @@ -67,6 +69,3 @@ key=f"feedback_{st.session_state.feedback_key}", # overwrite with new key user_id=email, ) - - if feedback: - trubrics_successful_feedback(feedback) diff --git a/examples/streamlit/llm_chatbot.py b/examples/streamlit/llm_chatbot.py index 23f4e825..5a8fd43c 100644 --- a/examples/streamlit/llm_chatbot.py +++ b/examples/streamlit/llm_chatbot.py @@ -1,7 +1,7 @@ import uuid -import openai import streamlit as st +from openai import OpenAI from trubrics_utils import trubrics_config from trubrics.integrations.streamlit import FeedbackCollector @@ -78,18 +78,18 @@ def init_trubrics(email, password): st.info("Please add your OpenAI API key to continue.") st.stop() else: - openai.api_key = openai_api_key + client = OpenAI(api_key=openai_api_key) with st.chat_message("assistant"): if stream: message_placeholder = st.empty() generation = "" - for response in openai.ChatCompletion.create(model=model, messages=messages, stream=True): - generation += response.choices[0].delta.get("content", "") + for part in client.chat.completions.create(model=model, messages=messages, stream=True): + generation += part.choices[0].delta.content or "" message_placeholder.markdown(generation + "▌") message_placeholder.markdown(generation) else: - response = openai.ChatCompletion.create(model=model, messages=messages) + response = client.chat.completions.create(model=model, messages=messages) generation = response.choices[0].message.content st.write(generation) @@ -103,4 +103,4 @@ def init_trubrics(email, password): ) st.session_state.prompt_ids.append(logged_prompt.id) messages.append({"role": "assistant", "content": generation}) - st.experimental_rerun() # force rerun of app, to load last feedback component + st.rerun() # force rerun of app, to load last feedback component