
    go                       d dl mZ d dlmZmZmZmZmZmZm	Z	 d dl
mZ d dlmZ d dlZddlmZ ddlmZmZmZmZmZ dd	lmZmZ dd
lmZ ddlmZmZ ddlm Z m!Z! ddl"m#Z# ddl$m%Z%m&Z& ddl'm(Z( ddl)m*Z*m+Z,m-Z.m/Z0 ddl1m2Z2 ddl3m4Z4m5Z5 ddl6m7Z7 ddl8m9Z9 ddl:m;Z; ddl<m=Z= ddl>m?Z? ddl@mAZA ddlBmCZC ddlDmEZE ddlFmGZG ddlHmIZI ddgZJ G d de      ZK G d  de      ZL G d! d"      ZM G d# d$      ZN G d% d&      ZO G d' d(      ZPy))    )annotations)DictListTypeUnionIterableOptionalcast)partial)LiteralN   )_legacy_response)	NOT_GIVENBodyQueryHeadersNotGiven)maybe_transformasync_maybe_transform)cached_property)SyncAPIResourceAsyncAPIResource)to_streamed_response_wrapper"async_to_streamed_response_wrapper)Stream)ChatCompletionReasoningEffortcompletion_create_params)make_request_options)ResponseFormatTvalidate_input_toolsparse_chat_completiontype_to_response_format_param)	ChatModel)ChatCompletionStreamManager AsyncChatCompletionStreamManager)ChatCompletion)ChatCompletionChunk)ParsedChatCompletion)ChatCompletionModality)ChatCompletionToolParam)ChatCompletionAudioParam)ChatCompletionMessageParam) ChatCompletionStreamOptionsParam)$ChatCompletionPredictionContentParam)#ChatCompletionToolChoiceOptionParamCompletionsAsyncCompletionsc                      e Zd Zedd       Zedd       Zeeeeeeeeeeeeeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d	dZeeeeeeeeeeeeeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d
dZy)r0   c                    t        |       S a  
        This property can be used as a prefix for any HTTP method call to return the
        the raw response object instead of the parsed content.

        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
        )CompletionsWithRawResponseselfs    [/var/www/openai/venv/lib/python3.12/site-packages/openai/resources/beta/chat/completions.pywith_raw_responsezCompletions.with_raw_response.   s     *$//    c                    t        |       S z
        An alternative to `.with_raw_response` that doesn't eagerly read the response body.

        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
        ) CompletionsWithStreamingResponser6   s    r8   with_streaming_responsez#Completions.with_streaming_response8   s     055r:   Naudioresponse_formatfrequency_penaltyfunction_call	functions
logit_biaslogprobsmax_completion_tokens
max_tokensmetadata
modalitiesnparallel_tool_calls
predictionpresence_penaltyreasoning_effortseedservice_tierstopstorestream_optionstemperaturetool_choicetoolstop_logprobstop_puserextra_headersextra_query
extra_bodytimeoutc       !           t               ddi|xs i }dfd}"| j                  dt        i d|d|d|d|d	|d
|d|d|	d|
d|d|d|d|d|d|d|d|t              ||||d||||||dt        j
                        t        ||| |!|"      t        t        t        t              t              d      S )a  Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types
        & returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class.

        You can pass a pydantic model to this method and it will automatically convert the model
        into a JSON schema, send it to the API and parse the response content back into the given model.

        This method will also automatically parse `function` tool calls if:
        - You use the `openai.pydantic_function_tool()` helper method
        - You mark your tool schema with `"strict": True`

        Example usage:
        ```py
        from pydantic import BaseModel
        from openai import OpenAI


        class Step(BaseModel):
            explanation: str
            output: str


        class MathResponse(BaseModel):
            steps: List[Step]
            final_answer: str


        client = OpenAI()
        completion = client.beta.chat.completions.parse(
            model="gpt-4o-2024-08-06",
            messages=[
                {"role": "system", "content": "You are a helpful math tutor."},
                {"role": "user", "content": "solve 8x + 31 = 2"},
            ],
            response_format=MathResponse,
        )

        message = completion.choices[0].message
        if message.parsed:
            print(message.parsed.steps)
            print("answer: ", message.parsed.final_answer)
        ```
        X-Stainless-Helper-Methodbeta.chat.completions.parsec                     t        |       S N)rA   chat_completioninput_tools_parse_chat_completionraw_completionrA   rW   s    r8   parserz!Completions.parse.<locals>.parser       ) / .! r:   /chat/completionsmessagesmodelr@   rB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   F)rA   rP   rQ   rR   rS   streamrT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   post_parserbodyoptionscast_toro   ri   r&   return%ParsedChatCompletion[ResponseFormatT])_validate_input_tools_postr   _type_to_response_formatr   CompletionCreateParamsr   r
   r   r(   r   r&   #r7   rm   rn   r@   rA   rB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   rj   s#       `                     `        r8   parsezCompletions.parseA   s   b 	e$ ()F
"

	 zz U U ():	
 $]   !*  ,-B !*  !*  *+> !*  '(8!" '(8#$ (@'P $0 "#&4#.#."$0" =@ )??C"F )+'%" 2?CDnU_  0
 0	
r:   c       !        t   ddi|xs i }t        | j                  j                  j                  j                  fi d|d|d|dddt        |      d	|d
|d|d|d|	d|
d|d|d|d|d|d|d|d|d|d|d|d|d|d|d|d|d|d |d!|d"|d#|d$| d%|!}"t        |"||&      S )'a  Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API
        and automatic accumulation of each delta.

        This also supports all of the parsing utilities that `.parse()` does.

        Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:

        ```py
        with client.beta.chat.completions.stream(
            model="gpt-4o-2024-08-06",
            messages=[...],
        ) as stream:
            for event in stream:
                if event.type == "content.delta":
                    print(event.delta, flush=True, end="")
        ```

        When the context manager is entered, a `ChatCompletionStream` instance is returned which, like `.create(stream=True)` is an iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events).

        When the context manager exits, the response will be closed, however the `stream` instance is still available outside
        the context manager.
        r`   beta.chat.completions.streamrm   rn   r@   ro   TrA   rB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rS   rR   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   rA   re   )r   _clientchatcompletionscreater{   r$   #r7   rm   rn   r@   rA   rB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   api_requests#                                      r8   ro   zCompletions.stream   s   | ()G
"

 =DLL))00$=
$=
 $=
 	$=

 $=
 5_E$=
 0$=
 ($=
  $=
 "$=
 $=
 #8$=
 "$=
 $=
 "$=
  !$=
" !4#$=
$ "%$=
& .'$=
( .)$=
* +$=
, &-$=
. /$=
0 1$=
2 *3$=
4 $5$=
6 $7$=
8 9$=
: &;$=
< =$=
> ?$=
@ (A$=
B $C$=
D "E$=
F G$=
J ++
 	
r:   )rw   r5   )rw   r=   Drm   $Iterable[ChatCompletionMessageParam]rn   Union[str, ChatModel]r@   -Optional[ChatCompletionAudioParam] | NotGivenrA   z type[ResponseFormatT] | NotGivenrB   Optional[float] | NotGivenrC   0completion_create_params.FunctionCall | NotGivenrD   6Iterable[completion_create_params.Function] | NotGivenrE   #Optional[Dict[str, int]] | NotGivenrF   Optional[bool] | NotGivenrG   Optional[int] | NotGivenrH   r   rI   #Optional[Dict[str, str]] | NotGivenrJ   1Optional[List[ChatCompletionModality]] | NotGivenrK   r   rL   bool | NotGivenrM   9Optional[ChatCompletionPredictionContentParam] | NotGivenrN   r   rO   (ChatCompletionReasoningEffort | NotGivenrP   r   rQ   /Optional[Literal['auto', 'default']] | NotGivenrR   *Union[Optional[str], List[str]] | NotGivenrS   r   rT   5Optional[ChatCompletionStreamOptionsParam] | NotGivenrU   r   rV   .ChatCompletionToolChoiceOptionParam | NotGivenrW   ,Iterable[ChatCompletionToolParam] | NotGivenrX   r   rY   r   rZ   str | NotGivenr[   Headers | Noner\   Query | Noner]   Body | Noner^   'float | httpx.Timeout | None | NotGivenrw   rx   )Drm   r   rn   r   r@   r   rA   Jcompletion_create_params.ResponseFormat | type[ResponseFormatT] | NotGivenrB   r   rC   r   rD   r   rE   r   rF   r   rG   r   rH   r   rI   r   rJ   r   rK   r   rL   r   rM   r   rN   r   rO   r   rP   r   rQ   r   rR   r   rS   r   rT   r   rU   r   rV   r   rW   r   rX   r   rY   r   rZ   r   r[   r   r\   r   r]   r   r^   r   rw   z,ChatCompletionStreamManager[ResponseFormatT]	__name__
__module____qualname__r   r9   r>   r   r~   ro    r:   r8   r0   r0   -   s   0 0 6 6 @I<E8AJSLU:C.7:C/88AHQ&//8PY7@EN)2HQ;D+4PY2;FO>G1:,5( )-$("&;DKO
 7O
 %	O

 =O
 :O
 6O
 HO
 JO
 8O
 ,O
  8O
 -O
 6O
 FO
  $!O
" -#O
$ N%O
& 5'O
( C)O
* '+O
, F-O
. 9/O
0 )1O
2 N3O
4 05O
6 D7O
8 <9O
: /;O
< *=O
> ?O
D &EO
F "GO
H  IO
J 9KO
L 
/MO
l @Ifo8AJSLU:C.7:C/88AHQ&//8PY7@EN)2HQ;D+4PY2;FO>G1:,5( )-$("&;DKk
 7k
 %	k

 =k
 dk
 6k
 Hk
 Jk
 8k
 ,k
  8k
 -k
 6k
 Fk
  $!k
" -#k
$ N%k
& 5'k
( C)k
* '+k
, F-k
. 9/k
0 )1k
2 N3k
4 05k
6 D7k
8 <9k
: /;k
< *=k
> ?k
D &Ek
F "Gk
H  Ik
J 9Kk
L 
6Mk
r:   c                      e Zd Zedd       Zedd       Zeeeeeeeeeeeeeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d	dZeeeeeeeeeeeeeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d
dZy)r1   c                    t        |       S r4   )AsyncCompletionsWithRawResponser6   s    r8   r9   z"AsyncCompletions.with_raw_responseA  s     /t44r:   c                    t        |       S r<   )%AsyncCompletionsWithStreamingResponser6   s    r8   r>   z(AsyncCompletions.with_streaming_responseK  s     5T::r:   Nr?   c       !          K   t               ddi|xs i }dfd}"| j                  dt        i d|d|d|d|d	|d
|d|d|	d|
d|d|d|d|d|d|d|d|t              ||||d||||||dt        j
                         d{   t        ||| |!|"      t        t        t        t              t              d       d{   S 7 B7 w)a  Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types
        & returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class.

        You can pass a pydantic model to this method and it will automatically convert the model
        into a JSON schema, send it to the API and parse the response content back into the given model.

        This method will also automatically parse `function` tool calls if:
        - You use the `openai.pydantic_function_tool()` helper method
        - You mark your tool schema with `"strict": True`

        Example usage:
        ```py
        from pydantic import BaseModel
        from openai import AsyncOpenAI


        class Step(BaseModel):
            explanation: str
            output: str


        class MathResponse(BaseModel):
            steps: List[Step]
            final_answer: str


        client = AsyncOpenAI()
        completion = await client.beta.chat.completions.parse(
            model="gpt-4o-2024-08-06",
            messages=[
                {"role": "system", "content": "You are a helpful math tutor."},
                {"role": "user", "content": "solve 8x + 31 = 2"},
            ],
            response_format=MathResponse,
        )

        message = completion.choices[0].message
        if message.parsed:
            print(message.parsed.steps)
            print("answer: ", message.parsed.final_answer)
        ```
        r`   ra   c                     t        |       S rc   rf   rh   s    r8   rj   z&AsyncCompletions.parse.<locals>.parser  rk   r:   rl   rm   rn   r@   rB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   F)rA   rP   rQ   rS   rR   ro   rT   rU   rV   rW   rX   rY   rZ   Nrp   rr   rv   )ry   rz   r   r{   r   r|   r   r
   r   r(   r   r&   r}   s#       `                     `        r8   r~   zAsyncCompletions.parseT  s    b 	e$ ()F
"

	 ZZ,U U ():	
 $]   !*  ,-B !*  !*  *+> !*  '(8!" '(8#$ (@'P $0" #&4#.#."$0" =@ )??C" "F )+'%" 2?CDnU_   0
 0
 0	
"0
s$   BCC
=CCCCc       !           t        |       ddi|xs i } | j                  j                  j                  j                  d'i d|d|d|dddt        |      d	|d
|d|d|d|	d|
d|d|d|d|d|d|d|d|d|d|d|d|d|d|d|d|d|d |d!|d"|d#|d$| d%|!}"t        |"||&      S )(a  Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API
        and automatic accumulation of each delta.

        This also supports all of the parsing utilities that `.parse()` does.

        Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:

        ```py
        async with client.beta.chat.completions.stream(
            model="gpt-4o-2024-08-06",
            messages=[...],
        ) as stream:
            async for event in stream:
                if event.type == "content.delta":
                    print(event.delta, flush=True, end="")
        ```

        When the context manager is entered, an `AsyncChatCompletionStream` instance is returned which, like `.create(stream=True)` is an async iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events).

        When the context manager exits, the response will be closed, however the `stream` instance is still available outside
        the context manager.
        r`   r   rm   rn   r@   ro   TrA   rB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r   r   )ry   r   r   r   r   r{   r%   r   s#                                      r8   ro   zAsyncCompletions.stream  s   z 	e$ ()G
"

 ;dll''33:: #
#
#
 #
 	#

 5_E#
 0#
 (#
  #
 "#
 #
 #8#
 "#
 #
 "#
 #
  !4!#
" "##
$ .%#
& .'#
( )#
* &+#
, -#
. /#
0 *1#
2 $3#
4 $5#
6 7#
8 &9#
: ;#
< =#
> (?#
@ $A#
B "C#
D E#
H 0+
 	
r:   )rw   r   )rw   r   r   )Drm   r   rn   r   r@   r   rA   r   rB   r   rC   r   rD   r   rE   r   rF   r   rG   r   rH   r   rI   r   rJ   r   rK   r   rL   r   rM   r   rN   r   rO   r   rP   r   rQ   r   rR   r   rS   r   rT   r   rU   r   rV   r   rW   r   rX   r   rY   r   rZ   r   r[   r   r\   r   r]   r   r^   r   rw   z1AsyncChatCompletionStreamManager[ResponseFormatT]r   r   r:   r8   r1   r1   @  s   5 5 ; ; @I<E8AJSLU:C.7:C/88AHQ&//8PY7@EN)2HQ;D+4PY2;FO>G1:,5( )-$("&;DKO
 7O
 %	O

 =O
 :O
 6O
 HO
 JO
 8O
 ,O
  8O
 -O
 6O
 FO
  $!O
" -#O
$ N%O
& 5'O
( C)O
* '+O
, F-O
. 9/O
0 )1O
2 N3O
4 05O
6 D7O
8 <9O
: /;O
< *=O
> ?O
D &EO
F "GO
H  IO
J 9KO
L 
/MO
l @Ifo8AJSLU:C.7:C/88AHQ&//8PY7@EN)2HQ;D+4PY2;FO>G1:,5( )-$("&;DKl
 7l
 %	l

 =l
 dl
 6l
 Hl
 Jl
 8l
 ,l
  8l
 -l
 6l
 Fl
  $!l
" -#l
$ N%l
& 5'l
( C)l
* '+l
, F-l
. 9/l
0 )1l
2 N3l
4 05l
6 D7l
8 <9l
: /;l
< *=l
> ?l
D &El
F "Gl
H  Il
J 9Kl
L 
;Ml
r:   c                      e Zd ZddZy)r5   c                Z    || _         t        j                  |j                        | _        y N)_completionsr   to_raw_response_wrapperr~   r7   r   s     r8   __init__z#CompletionsWithRawResponse.__init__U  s%    '%==

r:   Nr   r0   rw   Noner   r   r   r   r   r:   r8   r5   r5   T      
r:   r5   c                      e Zd ZddZy)r   c                Z    || _         t        j                  |j                        | _        y r   )r   r   async_to_raw_response_wrapperr~   r   s     r8   r   z(AsyncCompletionsWithRawResponse.__init__^  s%    '%CC

r:   Nr   r1   rw   r   r   r   r:   r8   r   r   ]  r   r:   r   c                      e Zd ZddZy)r=   c                F    || _         t        |j                        | _        y r   )r   r   r~   r   s     r8   r   z)CompletionsWithStreamingResponse.__init__g  s    '1

r:   Nr   r   r   r:   r8   r=   r=   f  r   r:   r=   c                      e Zd ZddZy)r   c                F    || _         t        |j                        | _        y r   )r   r   r~   r   s     r8   r   z.AsyncCompletionsWithStreamingResponse.__init__p  s    '7

r:   Nr   r   r   r:   r8   r   r   o  r   r:   r   )Q
__future__r   typingr   r   r   r   r   r	   r
   	functoolsr   typing_extensionsr   httpx r   _typesr   r   r   r   r   _utilsr   r   _compatr   	_resourcer   r   	_responser   r   
_streamingr   
types.chatr   r   _base_clientr   lib._parsingr   r    ry   r!   rg   r"   r{   types.chat_modelr#   lib.streaming.chatr$   r%   types.chat.chat_completionr&    types.chat.chat_completion_chunkr'   !types.chat.parsed_chat_completionr(   #types.chat.chat_completion_modalityr)   %types.chat.chat_completion_tool_paramr*   &types.chat.chat_completion_audio_paramr+   (types.chat.chat_completion_message_paramr,   /types.chat.chat_completion_stream_options_paramr-   3types.chat.chat_completion_prediction_content_paramr.   3types.chat.chat_completion_tool_choice_option_paramr/   __all__r0   r1   r5   r   r=   r   r   r:   r8   <module>r      s    # D D D  %  ! @ @ = ' ; Z ! 2  + ` 9 D F J M O S ` h g,
-P
/ P
fQ
' Q
h
 

 

 

 
r:   