- Notifications
You must be signed in to change notification settings - Fork 129
/
Copy pathaudiotranslation.go
119 lines (106 loc) · 4.65 KB
/
audiotranslation.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
package openai
import (
"bytes"
"context"
"io"
"mime/multipart"
"net/http"
"github.com/openai/openai-go/internal/apiform"
"github.com/openai/openai-go/internal/apijson"
"github.com/openai/openai-go/internal/requestconfig"
"github.com/openai/openai-go/option"
"github.com/openai/openai-go/packages/param"
"github.com/openai/openai-go/packages/resp"
)
// AudioTranslationService contains methods and other services that help with
// interacting with the openai API.
//
// Note, unlike clients, this service does not read variables from the environment
// automatically. You should not instantiate this service directly, and instead use
// the [NewAudioTranslationService] method instead.
typeAudioTranslationServicestruct {
Options []option.RequestOption
}
// NewAudioTranslationService generates a new service that applies the given
// options to each request. These options are applied after the parent client's
// options (if there is one), and before any request-specific options.
funcNewAudioTranslationService(opts...option.RequestOption) (rAudioTranslationService) {
r=AudioTranslationService{}
r.Options=opts
return
}
// Translates audio into English.
func (r*AudioTranslationService) New(ctx context.Context, bodyAudioTranslationNewParams, opts...option.RequestOption) (res*Translation, errerror) {
opts=append(r.Options[:], opts...)
path:="audio/translations"
err=requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...)
return
}
typeTranslationstruct {
Textstring`json:"text,required"`
// Metadata for the response, check the presence of optional fields with the
// [resp.Field.IsPresent] method.
JSONstruct {
Text resp.Field
ExtraFieldsmap[string]resp.Field
rawstring
} `json:"-"`
}
// Returns the unmodified JSON received from the API
func (rTranslation) RawJSON() string { returnr.JSON.raw }
func (r*Translation) UnmarshalJSON(data []byte) error {
returnapijson.UnmarshalRoot(data, r)
}
typeAudioTranslationNewParamsstruct {
// The audio file object (not file name) translate, in one of these formats: flac,
// mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
File io.Reader`json:"file,required" format:"binary"`
// ID of the model to use. Only `whisper-1` (which is powered by our open source
// Whisper V2 model) is currently available.
ModelAudioModel`json:"model,omitzero,required"`
// An optional text to guide the model's style or continue a previous audio
// segment. The
// [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
// should be in English.
Prompt param.Opt[string] `json:"prompt,omitzero"`
// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
// output more random, while lower values like 0.2 will make it more focused and
// deterministic. If set to 0, the model will use
// [log probability](https://en.wikipedia.org/wiki/Log_probability) to
// automatically increase the temperature until certain thresholds are hit.
Temperature param.Opt[float64] `json:"temperature,omitzero"`
// The format of the output, in one of these options: `json`, `text`, `srt`,
// `verbose_json`, or `vtt`.
//
// Any of "json", "text", "srt", "verbose_json", "vtt".
ResponseFormatAudioTranslationNewParamsResponseFormat`json:"response_format,omitzero"`
paramObj
}
// IsPresent returns true if the field's value is not omitted and not the JSON
// "null". To check if this field is omitted, use [param.IsOmitted].
func (fAudioTranslationNewParams) IsPresent() bool { return!param.IsOmitted(f) &&!f.IsNull() }
func (rAudioTranslationNewParams) MarshalMultipart() (data []byte, contentTypestring, errerror) {
buf:=bytes.NewBuffer(nil)
writer:=multipart.NewWriter(buf)
err=apiform.MarshalRoot(r, writer)
iferr!=nil {
writer.Close()
returnnil, "", err
}
err=writer.Close()
iferr!=nil {
returnnil, "", err
}
returnbuf.Bytes(), writer.FormDataContentType(), nil
}
// The format of the output, in one of these options: `json`, `text`, `srt`,
// `verbose_json`, or `vtt`.
typeAudioTranslationNewParamsResponseFormatstring
const (
AudioTranslationNewParamsResponseFormatJSONAudioTranslationNewParamsResponseFormat="json"
AudioTranslationNewParamsResponseFormatTextAudioTranslationNewParamsResponseFormat="text"
AudioTranslationNewParamsResponseFormatSRTAudioTranslationNewParamsResponseFormat="srt"
AudioTranslationNewParamsResponseFormatVerboseJSONAudioTranslationNewParamsResponseFormat="verbose_json"
AudioTranslationNewParamsResponseFormatVTTAudioTranslationNewParamsResponseFormat="vtt"
)