main
  1# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
  2
  3from __future__ import annotations
  4
  5from typing import Union, Iterable
  6
  7import httpx
  8
  9from .. import _legacy_response
 10from ..types import moderation_create_params
 11from .._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
 12from .._utils import maybe_transform, async_maybe_transform
 13from .._compat import cached_property
 14from .._resource import SyncAPIResource, AsyncAPIResource
 15from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
 16from .._base_client import make_request_options
 17from ..types.moderation_model import ModerationModel
 18from ..types.moderation_create_response import ModerationCreateResponse
 19from ..types.moderation_multi_modal_input_param import ModerationMultiModalInputParam
 20
 21__all__ = ["Moderations", "AsyncModerations"]
 22
 23
 24class Moderations(SyncAPIResource):
 25    @cached_property
 26    def with_raw_response(self) -> ModerationsWithRawResponse:
 27        """
 28        This property can be used as a prefix for any HTTP method call to return
 29        the raw response object instead of the parsed content.
 30
 31        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
 32        """
 33        return ModerationsWithRawResponse(self)
 34
 35    @cached_property
 36    def with_streaming_response(self) -> ModerationsWithStreamingResponse:
 37        """
 38        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
 39
 40        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
 41        """
 42        return ModerationsWithStreamingResponse(self)
 43
 44    def create(
 45        self,
 46        *,
 47        input: Union[str, SequenceNotStr[str], Iterable[ModerationMultiModalInputParam]],
 48        model: Union[str, ModerationModel] | Omit = omit,
 49        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
 50        # The extra values given here take precedence over values defined on the client or passed to this method.
 51        extra_headers: Headers | None = None,
 52        extra_query: Query | None = None,
 53        extra_body: Body | None = None,
 54        timeout: float | httpx.Timeout | None | NotGiven = not_given,
 55    ) -> ModerationCreateResponse:
 56        """Classifies if text and/or image inputs are potentially harmful.
 57
 58        Learn more in
 59        the [moderation guide](https://platform.openai.com/docs/guides/moderation).
 60
 61        Args:
 62          input: Input (or inputs) to classify. Can be a single string, an array of strings, or
 63              an array of multi-modal input objects similar to other models.
 64
 65          model: The content moderation model you would like to use. Learn more in
 66              [the moderation guide](https://platform.openai.com/docs/guides/moderation), and
 67              learn about available models
 68              [here](https://platform.openai.com/docs/models#moderation).
 69
 70          extra_headers: Send extra headers
 71
 72          extra_query: Add additional query parameters to the request
 73
 74          extra_body: Add additional JSON properties to the request
 75
 76          timeout: Override the client-level default timeout for this request, in seconds
 77        """
 78        return self._post(
 79            "/moderations",
 80            body=maybe_transform(
 81                {
 82                    "input": input,
 83                    "model": model,
 84                },
 85                moderation_create_params.ModerationCreateParams,
 86            ),
 87            options=make_request_options(
 88                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
 89            ),
 90            cast_to=ModerationCreateResponse,
 91        )
 92
 93
 94class AsyncModerations(AsyncAPIResource):
 95    @cached_property
 96    def with_raw_response(self) -> AsyncModerationsWithRawResponse:
 97        """
 98        This property can be used as a prefix for any HTTP method call to return
 99        the raw response object instead of the parsed content.
100
101        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
102        """
103        return AsyncModerationsWithRawResponse(self)
104
105    @cached_property
106    def with_streaming_response(self) -> AsyncModerationsWithStreamingResponse:
107        """
108        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
109
110        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
111        """
112        return AsyncModerationsWithStreamingResponse(self)
113
114    async def create(
115        self,
116        *,
117        input: Union[str, SequenceNotStr[str], Iterable[ModerationMultiModalInputParam]],
118        model: Union[str, ModerationModel] | Omit = omit,
119        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
120        # The extra values given here take precedence over values defined on the client or passed to this method.
121        extra_headers: Headers | None = None,
122        extra_query: Query | None = None,
123        extra_body: Body | None = None,
124        timeout: float | httpx.Timeout | None | NotGiven = not_given,
125    ) -> ModerationCreateResponse:
126        """Classifies if text and/or image inputs are potentially harmful.
127
128        Learn more in
129        the [moderation guide](https://platform.openai.com/docs/guides/moderation).
130
131        Args:
132          input: Input (or inputs) to classify. Can be a single string, an array of strings, or
133              an array of multi-modal input objects similar to other models.
134
135          model: The content moderation model you would like to use. Learn more in
136              [the moderation guide](https://platform.openai.com/docs/guides/moderation), and
137              learn about available models
138              [here](https://platform.openai.com/docs/models#moderation).
139
140          extra_headers: Send extra headers
141
142          extra_query: Add additional query parameters to the request
143
144          extra_body: Add additional JSON properties to the request
145
146          timeout: Override the client-level default timeout for this request, in seconds
147        """
148        return await self._post(
149            "/moderations",
150            body=await async_maybe_transform(
151                {
152                    "input": input,
153                    "model": model,
154                },
155                moderation_create_params.ModerationCreateParams,
156            ),
157            options=make_request_options(
158                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
159            ),
160            cast_to=ModerationCreateResponse,
161        )
162
163
164class ModerationsWithRawResponse:
165    def __init__(self, moderations: Moderations) -> None:
166        self._moderations = moderations
167
168        self.create = _legacy_response.to_raw_response_wrapper(
169            moderations.create,
170        )
171
172
173class AsyncModerationsWithRawResponse:
174    def __init__(self, moderations: AsyncModerations) -> None:
175        self._moderations = moderations
176
177        self.create = _legacy_response.async_to_raw_response_wrapper(
178            moderations.create,
179        )
180
181
182class ModerationsWithStreamingResponse:
183    def __init__(self, moderations: Moderations) -> None:
184        self._moderations = moderations
185
186        self.create = to_streamed_response_wrapper(
187            moderations.create,
188        )
189
190
191class AsyncModerationsWithStreamingResponse:
192    def __init__(self, moderations: AsyncModerations) -> None:
193        self._moderations = moderations
194
195        self.create = async_to_streamed_response_wrapper(
196            moderations.create,
197        )