- Notifications
You must be signed in to change notification settings - Fork 1.6k
/
Copy pathchat_stream.go
106 lines (93 loc) · 3.77 KB
/
chat_stream.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
package openai
import (
"context"
"net/http"
)
typeChatCompletionStreamChoiceDeltastruct {
Contentstring`json:"content,omitempty"`
Rolestring`json:"role,omitempty"`
FunctionCall*FunctionCall`json:"function_call,omitempty"`
ToolCalls []ToolCall`json:"tool_calls,omitempty"`
Refusalstring`json:"refusal,omitempty"`
}
typeChatCompletionStreamChoiceLogprobsstruct {
Content []ChatCompletionTokenLogprob`json:"content,omitempty"`
Refusal []ChatCompletionTokenLogprob`json:"refusal,omitempty"`
}
typeChatCompletionTokenLogprobstruct {
Tokenstring`json:"token"`
Bytes []int64`json:"bytes,omitempty"`
Logprobfloat64`json:"logprob,omitempty"`
TopLogprobs []ChatCompletionTokenLogprobTopLogprob`json:"top_logprobs"`
}
typeChatCompletionTokenLogprobTopLogprobstruct {
Tokenstring`json:"token"`
Bytes []int64`json:"bytes"`
Logprobfloat64`json:"logprob"`
}
typeChatCompletionStreamChoicestruct {
Indexint`json:"index"`
DeltaChatCompletionStreamChoiceDelta`json:"delta"`
Logprobs*ChatCompletionStreamChoiceLogprobs`json:"logprobs,omitempty"`
FinishReasonFinishReason`json:"finish_reason"`
ContentFilterResultsContentFilterResults`json:"content_filter_results,omitempty"`
}
typePromptFilterResultstruct {
Indexint`json:"index"`
ContentFilterResultsContentFilterResults`json:"content_filter_results,omitempty"`
}
typeChatCompletionStreamResponsestruct {
IDstring`json:"id"`
Objectstring`json:"object"`
Createdint64`json:"created"`
Modelstring`json:"model"`
Choices []ChatCompletionStreamChoice`json:"choices"`
SystemFingerprintstring`json:"system_fingerprint"`
PromptAnnotations []PromptAnnotation`json:"prompt_annotations,omitempty"`
PromptFilterResults []PromptFilterResult`json:"prompt_filter_results,omitempty"`
// An optional field that will only be present when you set stream_options: {"include_usage": true} in your request.
// When present, it contains a null value except for the last chunk which contains the token usage statistics
// for the entire request.
Usage*Usage`json:"usage,omitempty"`
}
// ChatCompletionStream
// Note: Perhaps it is more elegant to abstract Stream using generics.
typeChatCompletionStreamstruct {
*streamReader[ChatCompletionStreamResponse]
}
// CreateChatCompletionStream — API call to create a chat completion w/ streaming
// support. It sets whether to stream back partial progress. If set, tokens will be
// sent as data-only server-sent events as they become available, with the
// stream terminated by a data: [DONE] message.
func (c*Client) CreateChatCompletionStream(
ctx context.Context,
requestChatCompletionRequest,
) (stream*ChatCompletionStream, errerror) {
urlSuffix:=chatCompletionsSuffix
if!checkEndpointSupportsModel(urlSuffix, request.Model) {
err=ErrChatCompletionInvalidModel
return
}
request.Stream=true
reasoningValidator:=NewReasoningValidator()
iferr=reasoningValidator.Validate(request); err!=nil {
return
}
req, err:=c.newRequest(
ctx,
http.MethodPost,
c.fullURL(urlSuffix, withModel(request.Model)),
withBody(request),
)
iferr!=nil {
returnnil, err
}
resp, err:=sendRequestStream[ChatCompletionStreamResponse](c, req)
iferr!=nil {
return
}
stream=&ChatCompletionStream{
streamReader: resp,
}
return
}