main
  1# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
  2
  3from typing import List, Optional
  4from typing_extensions import Literal
  5
  6from pydantic import Field as FieldInfo
  7
  8from .._models import BaseModel
  9
 10__all__ = ["Moderation", "Categories", "CategoryAppliedInputTypes", "CategoryScores"]
 11
 12
 13class Categories(BaseModel):
 14    harassment: bool
 15    """
 16    Content that expresses, incites, or promotes harassing language towards any
 17    target.
 18    """
 19
 20    harassment_threatening: bool = FieldInfo(alias="harassment/threatening")
 21    """
 22    Harassment content that also includes violence or serious harm towards any
 23    target.
 24    """
 25
 26    hate: bool
 27    """
 28    Content that expresses, incites, or promotes hate based on race, gender,
 29    ethnicity, religion, nationality, sexual orientation, disability status, or
 30    caste. Hateful content aimed at non-protected groups (e.g., chess players) is
 31    harassment.
 32    """
 33
 34    hate_threatening: bool = FieldInfo(alias="hate/threatening")
 35    """
 36    Hateful content that also includes violence or serious harm towards the targeted
 37    group based on race, gender, ethnicity, religion, nationality, sexual
 38    orientation, disability status, or caste.
 39    """
 40
 41    illicit: Optional[bool] = None
 42    """
 43    Content that includes instructions or advice that facilitate the planning or
 44    execution of wrongdoing, or that gives advice or instruction on how to commit
 45    illicit acts. For example, "how to shoplift" would fit this category.
 46    """
 47
 48    illicit_violent: Optional[bool] = FieldInfo(alias="illicit/violent", default=None)
 49    """
 50    Content that includes instructions or advice that facilitate the planning or
 51    execution of wrongdoing that also includes violence, or that gives advice or
 52    instruction on the procurement of any weapon.
 53    """
 54
 55    self_harm: bool = FieldInfo(alias="self-harm")
 56    """
 57    Content that promotes, encourages, or depicts acts of self-harm, such as
 58    suicide, cutting, and eating disorders.
 59    """
 60
 61    self_harm_instructions: bool = FieldInfo(alias="self-harm/instructions")
 62    """
 63    Content that encourages performing acts of self-harm, such as suicide, cutting,
 64    and eating disorders, or that gives instructions or advice on how to commit such
 65    acts.
 66    """
 67
 68    self_harm_intent: bool = FieldInfo(alias="self-harm/intent")
 69    """
 70    Content where the speaker expresses that they are engaging or intend to engage
 71    in acts of self-harm, such as suicide, cutting, and eating disorders.
 72    """
 73
 74    sexual: bool
 75    """
 76    Content meant to arouse sexual excitement, such as the description of sexual
 77    activity, or that promotes sexual services (excluding sex education and
 78    wellness).
 79    """
 80
 81    sexual_minors: bool = FieldInfo(alias="sexual/minors")
 82    """Sexual content that includes an individual who is under 18 years old."""
 83
 84    violence: bool
 85    """Content that depicts death, violence, or physical injury."""
 86
 87    violence_graphic: bool = FieldInfo(alias="violence/graphic")
 88    """Content that depicts death, violence, or physical injury in graphic detail."""
 89
 90
 91class CategoryAppliedInputTypes(BaseModel):
 92    harassment: List[Literal["text"]]
 93    """The applied input type(s) for the category 'harassment'."""
 94
 95    harassment_threatening: List[Literal["text"]] = FieldInfo(alias="harassment/threatening")
 96    """The applied input type(s) for the category 'harassment/threatening'."""
 97
 98    hate: List[Literal["text"]]
 99    """The applied input type(s) for the category 'hate'."""
100
101    hate_threatening: List[Literal["text"]] = FieldInfo(alias="hate/threatening")
102    """The applied input type(s) for the category 'hate/threatening'."""
103
104    illicit: List[Literal["text"]]
105    """The applied input type(s) for the category 'illicit'."""
106
107    illicit_violent: List[Literal["text"]] = FieldInfo(alias="illicit/violent")
108    """The applied input type(s) for the category 'illicit/violent'."""
109
110    self_harm: List[Literal["text", "image"]] = FieldInfo(alias="self-harm")
111    """The applied input type(s) for the category 'self-harm'."""
112
113    self_harm_instructions: List[Literal["text", "image"]] = FieldInfo(alias="self-harm/instructions")
114    """The applied input type(s) for the category 'self-harm/instructions'."""
115
116    self_harm_intent: List[Literal["text", "image"]] = FieldInfo(alias="self-harm/intent")
117    """The applied input type(s) for the category 'self-harm/intent'."""
118
119    sexual: List[Literal["text", "image"]]
120    """The applied input type(s) for the category 'sexual'."""
121
122    sexual_minors: List[Literal["text"]] = FieldInfo(alias="sexual/minors")
123    """The applied input type(s) for the category 'sexual/minors'."""
124
125    violence: List[Literal["text", "image"]]
126    """The applied input type(s) for the category 'violence'."""
127
128    violence_graphic: List[Literal["text", "image"]] = FieldInfo(alias="violence/graphic")
129    """The applied input type(s) for the category 'violence/graphic'."""
130
131
132class CategoryScores(BaseModel):
133    harassment: float
134    """The score for the category 'harassment'."""
135
136    harassment_threatening: float = FieldInfo(alias="harassment/threatening")
137    """The score for the category 'harassment/threatening'."""
138
139    hate: float
140    """The score for the category 'hate'."""
141
142    hate_threatening: float = FieldInfo(alias="hate/threatening")
143    """The score for the category 'hate/threatening'."""
144
145    illicit: float
146    """The score for the category 'illicit'."""
147
148    illicit_violent: float = FieldInfo(alias="illicit/violent")
149    """The score for the category 'illicit/violent'."""
150
151    self_harm: float = FieldInfo(alias="self-harm")
152    """The score for the category 'self-harm'."""
153
154    self_harm_instructions: float = FieldInfo(alias="self-harm/instructions")
155    """The score for the category 'self-harm/instructions'."""
156
157    self_harm_intent: float = FieldInfo(alias="self-harm/intent")
158    """The score for the category 'self-harm/intent'."""
159
160    sexual: float
161    """The score for the category 'sexual'."""
162
163    sexual_minors: float = FieldInfo(alias="sexual/minors")
164    """The score for the category 'sexual/minors'."""
165
166    violence: float
167    """The score for the category 'violence'."""
168
169    violence_graphic: float = FieldInfo(alias="violence/graphic")
170    """The score for the category 'violence/graphic'."""
171
172
173class Moderation(BaseModel):
174    categories: Categories
175    """A list of the categories, and whether they are flagged or not."""
176
177    category_applied_input_types: CategoryAppliedInputTypes
178    """
179    A list of the categories along with the input type(s) that the score applies to.
180    """
181
182    category_scores: CategoryScores
183    """A list of the categories along with their scores as predicted by model."""
184
185    flagged: bool
186    """Whether any of the below categories are flagged."""