Skip to content

Commit

Permalink
Merge pull request #13 from CJCrafter/completions
Browse files Browse the repository at this point in the history
Completions
  • Loading branch information
CJCrafter authored Apr 3, 2023
2 parents 7f38791 + 953eedf commit 8db59dd
Show file tree
Hide file tree
Showing 19 changed files with 1,240 additions and 307 deletions.
451 changes: 339 additions & 112 deletions src/main/kotlin/com/cjcrafter/openai/OpenAI.kt

Large diffs are not rendered by default.

86 changes: 86 additions & 0 deletions src/main/kotlin/com/cjcrafter/openai/OpenAICallback.kt
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
package com.cjcrafter.openai

import com.cjcrafter.openai.exception.OpenAIError
import com.cjcrafter.openai.exception.WrappedIOError
import com.google.gson.JsonObject
import com.google.gson.JsonParseException
import com.google.gson.JsonParser
import okhttp3.Call
import okhttp3.Callback
import okhttp3.Response
import java.io.IOException
import java.util.function.Consumer

internal class OpenAICallback(
private val isStream: Boolean,
private val onFailure: Consumer<OpenAIError>,
private val onResponse: Consumer<JsonObject>
) : Callback {

override fun onFailure(call: Call, e: IOException) {
onFailure.accept(WrappedIOError(e))
}

override fun onResponse(call: Call, response: Response) {
onResponse(response)
}

fun onResponse(response: Response) {
if (isStream) {
handleStream(response)
return
}

val rootObject = JsonParser.parseString(response.body!!.string()).asJsonObject

// Sometimes OpenAI will respond with an error code for malformed
// requests, timeouts, rate limits, etc. We need to let the dev
// know that an error occurred.
if (rootObject.has("error")) {
onFailure.accept(OpenAIError.fromJson(rootObject.get("error").asJsonObject))
return
}

onResponse.accept(rootObject)
}

private fun handleStream(response: Response) {
response.body?.source()?.use { source ->

while (!source.exhausted()) {
var jsonResponse = source.readUtf8Line()

// Or data is separated by empty lines, ignore them. The final
// line is always "data: [DONE]", ignore it.
if (jsonResponse.isNullOrEmpty() || jsonResponse == "data: [DONE]")
continue

// The CHAT API returns a json string, but they prepend the content
// with "data: " (which is not valid json). In order to parse this
// into a JsonObject, we have to strip away this extra string.
if (jsonResponse.startsWith("data: "))
jsonResponse = jsonResponse.substring("data: ".length)

lateinit var rootObject: JsonObject
try {
rootObject = JsonParser.parseString(jsonResponse).asJsonObject
} catch (ex: JsonParseException) {
println(jsonResponse)
ex.printStackTrace()
continue
}

// Sometimes OpenAI will respond with an error code for malformed
// requests, timeouts, rate limits, etc. We need to let the dev
// know that an error occurred.
if (rootObject.has("error")) {
onFailure.accept(OpenAIError.fromJson(rootObject.get("error").asJsonObject))
continue
}

// Developer defined code to run
onResponse.accept(rootObject)
}
}
}
}
2 changes: 1 addition & 1 deletion src/main/kotlin/com/cjcrafter/openai/chat/ChatChoice.kt
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import com.google.gson.JsonObject
import com.google.gson.annotations.SerializedName

/**
* The OpenAI API returns a list of [ChatChoice]. Each chat choice has a
* The OpenAI API returns a list of `ChatChoice`. Each choice has a
* generated message ([ChatChoice.message]) and a finish reason
* ([ChatChoice.finishReason]). For most use cases, you only need the generated
* message.
Expand Down
7 changes: 7 additions & 0 deletions src/main/kotlin/com/cjcrafter/openai/chat/ChatChoiceChunk.kt
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,13 @@ data class ChatChoiceChunk(
message.content += delta
finishReason = if (json["finish_reason"].isJsonNull) null else FinishReason.valueOf(json["finish_reason"].asString.uppercase())
}

/**
* Returns `true` if this message chunk is complete. Once complete, no more
* tokens will be generated, and [ChatChoiceChunk.message] will contain the
* complete message.
*/
fun isFinished() = finishReason != null
}

/*
Expand Down
31 changes: 15 additions & 16 deletions src/main/kotlin/com/cjcrafter/openai/chat/ChatRequest.kt
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import com.google.gson.annotations.SerializedName
* [ChatRequest] holds the configurable options that can be sent to the OpenAI
* Chat API. For most use cases, you only need to set [model] and [messages].
* For more detailed descriptions for each option, refer to the
* [Chat Wiki](https://platform.openai.com/docs/api-reference/chat)
* [Chat Wiki](https://platform.openai.com/docs/api-reference/chat).
*
* [messages] stores **ALL** previous messages from the conversation. It is
* **YOUR RESPONSIBILITY** to store and update this list for your conversations
Expand Down Expand Up @@ -49,7 +49,7 @@ data class ChatRequest @JvmOverloads constructor(
var temperature: Float? = null,
@field:SerializedName("top_p") var topP: Float? = null,
var n: Int? = null,
@Deprecated("Use ChatBot#streamResponse") var stream: Boolean? = null,
@Deprecated("Use OpenAI#streamChatCompletion") var stream: Boolean? = null,
var stop: String? = null,
@field:SerializedName("max_tokens") var maxTokens: Int? = null,
@field:SerializedName("presence_penalty") var presencePenalty: Float? = null,
Expand All @@ -58,20 +58,8 @@ data class ChatRequest @JvmOverloads constructor(
var user: String? = null
) {

companion object {

/**
* A static method that provides a new [Builder] instance for the
* [ChatRequest] class.
*
* @return a new [Builder] instance for creating a [ChatRequest] object.
*/
@JvmStatic
fun builder(): Builder = Builder()
}

/**
* [Builder] is a helper class to build a [ChatRequest] instance with a fluent API.
* [Builder] is a helper class to build a [ChatRequest] instance with a stable API.
* It provides methods for setting the properties of the [ChatRequest] object.
* The [build] method returns a new [ChatRequest] instance with the specified properties.
*
Expand All @@ -80,7 +68,6 @@ data class ChatRequest @JvmOverloads constructor(
* val chatRequest = ChatRequest.builder()
* .model("gpt-3.5-turbo")
* .messages(mutableListOf("Be as helpful as possible".toSystemMessage()))
* .temperature(0.7f)
* .build()
* ```
*
Expand Down Expand Up @@ -222,4 +209,16 @@ data class ChatRequest @JvmOverloads constructor(
)
}
}

companion object {

/**
* A static method that provides a new [Builder] instance for the
* [ChatRequest] class.
*
* @return a new [Builder] instance for creating a [ChatRequest] object.
*/
@JvmStatic
fun builder(): Builder = Builder()
}
}
2 changes: 1 addition & 1 deletion src/main/kotlin/com/cjcrafter/openai/chat/ChatResponse.kt
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ import java.time.ZonedDateTime
import java.util.*

/**
* The [ChatResponse] contains all the data returned by the OpenAI Chat API.
* The `ChatResponse` contains all the data returned by the OpenAI Chat API.
* For most use cases, [ChatResponse.get] (passing 0 to the index argument) is
* all you need.
*
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
package com.cjcrafter.openai.completions

import com.cjcrafter.openai.FinishReason
import com.google.gson.annotations.SerializedName

/**
* The OpenAI API returns a list of `CompletionChoice`. Each choice has a
* generated message ([CompletionChoice.text]) and a finish reason
* ([CompletionChoice.finishReason]). For most use cases, you only need the
* generated text.
*
* By default, only 1 choice is generated (since [CompletionRequest.n] == 1).
* When you increase `n` or provide a list of prompts (called batching),
* there will be multiple choices.
*
* @property text The generated text.
* @property index The index in the list... This is 0 for most use cases.
* @property logprobs List of logarithmic probabilities for each token in the generated text.
* @property finishReason The reason the bot stopped generating tokens.
* @constructor Create empty Completion choice, for internal usage.
* @see FinishReason
*/
data class CompletionChoice(
val text: String,
val index: Int,
val logprobs: List<Float>?,
@field:SerializedName("finish_reason") val finishReason: FinishReason
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
package com.cjcrafter.openai.completions

import com.cjcrafter.openai.FinishReason
import com.cjcrafter.openai.chat.ChatChoiceChunk
import com.google.gson.annotations.SerializedName

/**
* The OpenAI API returns a list of `CompletionChoice`. Each choice has a
* generated message ([CompletionChoice.text]) and a finish reason
* ([CompletionChoice.finishReason]). For most use cases, you only need the
* generated text.
*
* By default, only 1 choice is generated (since [CompletionRequest.n] == 1).
* When you increase `n` or provide a list of prompts (called batching),
* there will be multiple choices.
*
* @property text The few generated tokens.
* @property index The index in the list... This is 0 for most use cases.
* @property logprobs List of logarithmic probabilities for each token in the generated text.
* @property finishReason The reason the bot stopped generating tokens.
* @constructor Create empty Completion choice, for internal usage.
* @see FinishReason
*/
data class CompletionChoiceChunk(
val text: String,
val index: Int,
val logprobs: List<Float>?,
@field:SerializedName("finish_reason") val finishReason: FinishReason?
) {
/**
* Returns `true` if this message chunk is complete. Once complete, no more
* tokens will be generated.
*/
fun isFinished() = finishReason != null
}
Loading

0 comments on commit 8db59dd

Please sign in to comment.