- Notifications
You must be signed in to change notification settings - Fork 45
/
Copy pathFirebaseAI.cs
191 lines (169 loc) · 7.67 KB
/
FirebaseAI.cs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
/*
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
usingSystem;
usingSystem.Collections.Concurrent;
namespaceFirebase.VertexAI{
/// <summary>
/// The entry point for all Firebase AI SDK functionality.
/// </summary>
publicclassFirebaseAI{
/// <summary>
/// Defines which backend AI service is being used, provided to `FirebaseAI.GetInstance`.
/// </summary>
publicreadonlystructBackend{
/// <summary>
/// Intended for internal use only.
/// Defines the possible types of backend providers.
/// </summary>
internalenumInternalProvider{
GoogleAI,
VertexAI
}
/// <summary>
/// Intended for internal use only.
/// The backend provider being used.
/// </summary>
internalInternalProviderProvider{get;}
/// <summary>
/// Intended for internal use only.
/// The region identifier used by the Vertex AI backend.
/// </summary>
internalstringLocation{get;}
privateBackend(InternalProviderprovider,stringlocation=null){
Provider=provider;
Location=location;
}
/// <summary>
/// The Google AI backend service configuration.
/// </summary>
publicstaticBackendGoogleAI(){
returnnewBackend(InternalProvider.GoogleAI);
}
/// <summary>
/// The Vertex AI backend service configuration.
/// </summary>
/// <param name="location">The region identifier, defaulting to `us-central1`; see [Vertex AI
/// regions](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations#available-regions)
/// for a list of supported regions.</param>
publicstaticBackendVertexAI(stringlocation="us-central1"){
if(string.IsNullOrWhiteSpace(location)||location.Contains("/")){
thrownewVertexAIInvalidLocationException(location);
}
returnnewBackend(InternalProvider.VertexAI,location);
}
publicoverridereadonlystringToString(){
return$"FirebaseAIBackend|{Provider}|{Location}";
}
}
privatestaticreadonlyConcurrentDictionary<string,FirebaseAI>_instances=new();
privatereadonlyFirebaseApp_firebaseApp;
privatereadonlyBackend_backend;
privateFirebaseAI(FirebaseAppfirebaseApp,Backendbackend){
_firebaseApp=firebaseApp;
_backend=backend;
}
/// <summary>
/// Returns a `FirebaseAI` instance with the default `FirebaseApp` and GoogleAI Backend.
/// </summary>
publicstaticFirebaseAIDefaultInstance=>GetInstance();
/// <summary>
/// Returns a `FirebaseAI` instance with the default `FirebaseApp` and the given Backend.
/// </summary>
/// <param name="backend">The backend AI service to use.</param>
/// <returns>A configured instance of `FirebaseAI`.</returns>
publicstaticFirebaseAIGetInstance(Backend?backend=null){
returnGetInstance(FirebaseApp.DefaultInstance,backend);
}
/// <summary>
/// Returns a `FirebaseAI` instance with the given `FirebaseApp` and Backend.
/// </summary>
/// <param name="app">The custom `FirebaseApp` used for initialization.</param>
/// <param name="backend">The backend AI service to use.</param>
/// <returns>A configured instance of `FirebaseAI`.</returns>
publicstaticFirebaseAIGetInstance(FirebaseAppapp,Backend?backend=null){
if(app==null){
thrownewArgumentNullException(nameof(app));
}
BackendresolvedBackend=backend??Backend.GoogleAI();
// FirebaseAI instances are keyed by a combination of the app name and backend.
stringkey=$"{app.Name}::{resolvedBackend}";
if(_instances.ContainsKey(key)){
return_instances[key];
}
return_instances.GetOrAdd(key, _ =>newFirebaseAI(app,resolvedBackend));
}
/// <summary>
/// Initializes a generative model with the given parameters.
///
/// - Note: Refer to [Gemini models](https://firebase.google.com/docs/vertex-ai/gemini-models) for
/// guidance on choosing an appropriate model for your use case.
/// </summary>
/// <param name="modelName">The name of the model to use, for example `"gemini-2.0-flash"`; see
/// [available model names
/// ](https://firebase.google.com/docs/vertex-ai/gemini-models#available-model-names) for a
/// list of supported model names.</param>
/// <param name="generationConfig">The content generation parameters your model should use.</param>
/// <param name="safetySettings">A value describing what types of harmful content your model should allow.</param>
/// <param name="tools">A list of `Tool` objects that the model may use to generate the next response.</param>
/// <param name="toolConfig">Tool configuration for any `Tool` specified in the request.</param>
/// <param name="systemInstruction">Instructions that direct the model to behave a certain way;
/// currently only text content is supported.</param>
/// <param name="requestOptions">Configuration parameters for sending requests to the backend.</param>
/// <returns>The initialized `GenerativeModel` instance.</returns>
publicGenerativeModelGetGenerativeModel(
stringmodelName,
GenerationConfig?generationConfig=null,
SafetySetting[]safetySettings=null,
Tool[]tools=null,
ToolConfig?toolConfig=null,
ModelContent?systemInstruction=null,
RequestOptions?requestOptions=null){
returnnewGenerativeModel(_firebaseApp,_backend,modelName,
generationConfig,safetySettings,tools,
toolConfig,systemInstruction,requestOptions);
}
/// <summary>
/// Initializes a `LiveGenerativeModel` for real-time interaction.
///
/// - Note: Refer to [Gemini models](https://firebase.google.com/docs/vertex-ai/gemini-models) for
/// guidance on choosing an appropriate model for your use case.
///
/// - Note: Currently only supports the VertexAI backend.
/// </summary>
/// <param name="modelName">The name of the model to use, for example `"gemini-2.0-flash-live-preview-04-09"`; see
/// [available model names
/// ](https://firebase.google.com/docs/vertex-ai/gemini-models#available-model-names) for a
/// list of supported model names.</param>
/// <param name="liveGenerationConfig">The content generation parameters your model should use.</param>
/// <param name="tools">A list of `Tool` objects that the model may use to generate the next response.</param>
/// <param name="systemInstruction">Instructions that direct the model to behave a certain way.</param>
/// <param name="requestOptions">Configuration parameters for sending requests to the backend.</param>
/// <returns>The initialized `LiveGenerativeModel` instance.</returns>
publicLiveGenerativeModelGetLiveModel(
stringmodelName,
LiveGenerationConfig?liveGenerationConfig=null,
Tool[]tools=null,
ModelContent?systemInstruction=null,
RequestOptions?requestOptions=null){
if(_backend.Provider!=Backend.InternalProvider.VertexAI){
thrownewNotSupportedException("LiveGenerativeModel is currently only supported with the VertexAI backend.");
}
returnnewLiveGenerativeModel(_firebaseApp,_backend,modelName,
liveGenerationConfig,tools,
systemInstruction,requestOptions);
}
}
}