@@ -26,16 +26,50 @@ public struct SafetyRating: Equatable, Hashable, Sendable {
26
26
27
27
/// The model-generated probability that the content falls under the specified harm ``category``.
28
28
///
29
- /// See ``HarmProbability`` for a list of possible values.
29
+ /// See ``HarmProbability`` for a list of possible values. This is a discretized representation
30
+ /// of the ``probabilityScore``.
30
31
///
31
32
/// > Important: This does not indicate the severity of harm for a piece of content.
32
33
public let probability : HarmProbability
33
34
35
+ /// The confidence score that the response is associated with the corresponding harm ``category``.
36
+ ///
37
+ /// The probability safety score is a confidence score between 0.0 and 1.0, rounded to one decimal
38
+ /// place; it is discretized into a ``HarmProbability`` in ``probability``. See [probability
39
+ /// scores](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters#comparison_of_probability_scores_and_severity_scores)
40
+ /// in the Google Cloud documentation for more details.
41
+ public let probabilityScore : Float
42
+
43
+ /// The severity reflects the magnitude of how harmful a model response might be.
44
+ ///
45
+ /// See ``HarmSeverity`` for a list of possible values. This is a discretized representation of
46
+ /// the ``severityScore``.
47
+ public let severity : HarmSeverity
48
+
49
+ /// The severity score is the magnitude of how harmful a model response might be.
50
+ ///
51
+ /// The severity score ranges from 0.0 to 1.0, rounded to one decimal place; it is discretized
52
+ /// into a ``HarmSeverity`` in ``severity``. See [severity scores](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters#comparison_of_probability_scores_and_severity_scores)
53
+ /// in the Google Cloud documentation for more details.
54
+ public let severityScore : Float
55
+
56
+ /// If true, the response was blocked.
57
+ public let blocked : Bool
58
+
34
59
/// Initializes a new `SafetyRating` instance with the given category and probability.
35
60
/// Use this initializer for SwiftUI previews or tests.
36
- public init ( category: HarmCategory , probability: HarmProbability ) {
61
+ public init ( category: HarmCategory ,
62
+ probability: HarmProbability ,
63
+ probabilityScore: Float ,
64
+ severity: HarmSeverity ,
65
+ severityScore: Float ,
66
+ blocked: Bool ) {
37
67
self . category = category
38
68
self . probability = probability
69
+ self . probabilityScore = probabilityScore
70
+ self . severity = severity
71
+ self . severityScore = severityScore
72
+ self . blocked = blocked
39
73
}
40
74
41
75
/// The probability that a given model output falls under a harmful content category.
@@ -74,6 +108,37 @@ public struct SafetyRating: Equatable, Hashable, Sendable {
74
108
static let unrecognizedValueMessageCode =
75
109
VertexLog . MessageCode. generateContentResponseUnrecognizedHarmProbability
76
110
}
111
+
112
+ /// The magnitude of how harmful a model response might be for the respective ``HarmCategory``.
113
+ public struct HarmSeverity : DecodableProtoEnum , Hashable , Sendable {
114
+ enum Kind : String {
115
+ case negligible = " HARM_SEVERITY_NEGLIGIBLE "
116
+ case low = " HARM_SEVERITY_LOW "
117
+ case medium = " HARM_SEVERITY_MEDIUM "
118
+ case high = " HARM_SEVERITY_HIGH "
119
+ }
120
+
121
+ /// Negligible level of harm severity.
122
+ public static let negligible = HarmSeverity ( kind: . negligible)
123
+
124
+ /// Low level of harm severity.
125
+ public static let low = HarmSeverity ( kind: . low)
126
+
127
+ /// Medium level of harm severity.
128
+ public static let medium = HarmSeverity ( kind: . medium)
129
+
130
+ /// High level of harm severity.
131
+ public static let high = HarmSeverity ( kind: . high)
132
+
133
+ /// Returns the raw string representation of the `HarmSeverity` value.
134
+ ///
135
+ /// > Note: This value directly corresponds to the values in the [REST
136
+ /// > API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/GenerateContentResponse#HarmSeverity).
137
+ public let rawValue : String
138
+
139
+ static let unrecognizedValueMessageCode =
140
+ VertexLog . MessageCode. generateContentResponseUnrecognizedHarmSeverity
141
+ }
77
142
}
78
143
79
144
/// A type used to specify a threshold for harmful content, beyond which the model will return a
@@ -164,7 +229,31 @@ public struct HarmCategory: CodableProtoEnum, Hashable, Sendable {
164
229
// MARK: - Codable Conformances
165
230
166
231
@available ( iOS 15 . 0 , macOS 11 . 0 , macCatalyst 15 . 0 , tvOS 15 . 0 , watchOS 8 . 0 , * )
167
- extension SafetyRating : Decodable { }
232
+ extension SafetyRating : Decodable {
233
+ enum CodingKeys : CodingKey {
234
+ case category
235
+ case probability
236
+ case probabilityScore
237
+ case severity
238
+ case severityScore
239
+ case blocked
240
+ }
241
+
242
+ public init ( from decoder: any Decoder ) throws {
243
+ let container = try decoder. container ( keyedBy: CodingKeys . self)
244
+ category = try container. decode ( HarmCategory . self, forKey: . category)
245
+ probability = try container. decode ( HarmProbability . self, forKey: . probability)
246
+
247
+ // The following 3 fields are only omitted in our test data.
248
+ probabilityScore = try container. decodeIfPresent ( Float . self, forKey: . probabilityScore) ?? 0.0
249
+ severity = try container. decodeIfPresent ( HarmSeverity . self, forKey: . severity) ??
250
+ HarmSeverity ( rawValue: " HARM_SEVERITY_UNSPECIFIED " )
251
+ severityScore = try container. decodeIfPresent ( Float . self, forKey: . severityScore) ?? 0.0
252
+
253
+ // The blocked field is only included when true.
254
+ blocked = try container. decodeIfPresent ( Bool . self, forKey: . blocked) ?? false
255
+ }
256
+ }
168
257
169
258
@available ( iOS 15 . 0 , macOS 11 . 0 , macCatalyst 15 . 0 , tvOS 15 . 0 , watchOS 8 . 0 , * )
170
259
extension SafetySetting . HarmBlockThreshold : Encodable { }
0 commit comments