
    g                    |   d dl mZ d dlmZ d dlmZmZ d dlZddlm	Z	 ddl
mZ ddlmZmZmZmZmZ dd	lmZmZmZmZ dd
lmZ ddlmZmZ ddlmZmZ ddlm Z  ddl!m"Z"m#Z# ddl$m%Z% ddl&m'Z' ddl(m)Z) ddl*m+Z+ ddgZ, G d de      Z- G d de      Z. G d d      Z/ G d d      Z0 G d d      Z1 G d d      Z2y)    )annotations)List)LiteraloverloadN   )_legacy_response)completion_create_params)	NOT_GIVENBodyQueryHeadersNotGiven)is_givenrequired_argsmaybe_transformasync_maybe_transform)cached_property)SyncAPIResourceAsyncAPIResource)to_streamed_response_wrapper"async_to_streamed_response_wrapper)DEFAULT_TIMEOUT)StreamAsyncStream)make_request_options)
Completion)
ModelParam)MetadataParamCompletionsAsyncCompletionsc                     e Zd Zedd       Zedd       Zeeeeeeeddded
	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zeeeeeeddded		 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zeeeeeeddded		 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Z e	g d	g d
      eeeeeeddded
	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zy)r   c                    t        |       S a  
        This property can be used as a prefix for any HTTP method call to return the
        the raw response object instead of the parsed content.

        For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
        )CompletionsWithRawResponseselfs    T/var/www/openai/venv/lib/python3.12/site-packages/anthropic/resources/completions.pywith_raw_responsezCompletions.with_raw_response!   s     *$//    c                    t        |       S z
        An alternative to `.with_raw_response` that doesn't eagerly read the response body.

        For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
        ) CompletionsWithStreamingResponser%   s    r'   with_streaming_responsez#Completions.with_streaming_response+   s     055r)   N
metadatastop_sequencesstreamtemperaturetop_ktop_pextra_headersextra_query
extra_bodytimeoutc                    ya[  [Legacy] Create a Text Completion.

        The Text Completions API is a legacy API.

        We recommend using the
        [Messages API](https://docs.anthropic.com/en/api/messages) going forward.

        Future models and features will not be compatible with Text Completions. See our
        [migration guide](https://docs.anthropic.com/en/api/migrating-from-text-completions-to-messages)
        for guidance in migrating from Text Completions to Messages.

        Args:
          max_tokens_to_sample: The maximum number of tokens to generate before stopping.

              Note that our models may stop _before_ reaching this maximum. This parameter
              only specifies the absolute maximum number of tokens to generate.

          model: The model that will complete your prompt.

See
              [models](https://docs.anthropic.com/en/docs/models-overview) for additional
              details and options.

          prompt: The prompt that you want Claude to complete.

              For proper response generation you will need to format your prompt using
              alternating `

Human:` and `

Assistant:` conversational turns. For example:

              ```
              "

Human: {userQuestion}

Assistant:"
              ```

              See [prompt validation](https://docs.anthropic.com/en/api/prompt-validation) and
              our guide to
              [prompt design](https://docs.anthropic.com/en/docs/intro-to-prompting) for more
              details.

          metadata: An object describing metadata about the request.

          stop_sequences: Sequences that will cause the model to stop generating.

              Our models stop on `"

Human:"`, and may include additional built-in stop
              sequences in the future. By providing the stop_sequences parameter, you may
              include additional strings that will cause the model to stop generating.

          stream: Whether to incrementally stream the response using server-sent events.

              See [streaming](https://docs.anthropic.com/en/api/streaming) for details.

          temperature: Amount of randomness injected into the response.

              Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
              for analytical / multiple choice, and closer to `1.0` for creative and
              generative tasks.

              Note that even with `temperature` of `0.0`, the results will not be fully
              deterministic.

          top_k: Only sample from the top K options for each subsequent token.

              Used to remove "long tail" low probability responses.
              [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).

              Recommended for advanced use cases only. You usually only need to use
              `temperature`.

          top_p: Use nucleus sampling.

              In nucleus sampling, we compute the cumulative distribution over all the options
              for each subsequent token in decreasing probability order and cut it off once it
              reaches a particular probability specified by `top_p`. You should either alter
              `temperature` or `top_p`, but not both.

              Recommended for advanced use cases only. You usually only need to use
              `temperature`.

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        N r&   max_tokens_to_samplemodelpromptr/   r0   r1   r2   r3   r4   r5   r6   r7   r8   s                 r'   createzCompletions.create4       L 	r)   	r/   r0   r2   r3   r4   r5   r6   r7   r8   c                    ya[  [Legacy] Create a Text Completion.

        The Text Completions API is a legacy API.

        We recommend using the
        [Messages API](https://docs.anthropic.com/en/api/messages) going forward.

        Future models and features will not be compatible with Text Completions. See our
        [migration guide](https://docs.anthropic.com/en/api/migrating-from-text-completions-to-messages)
        for guidance in migrating from Text Completions to Messages.

        Args:
          max_tokens_to_sample: The maximum number of tokens to generate before stopping.

              Note that our models may stop _before_ reaching this maximum. This parameter
              only specifies the absolute maximum number of tokens to generate.

          model: The model that will complete your prompt.

See
              [models](https://docs.anthropic.com/en/docs/models-overview) for additional
              details and options.

          prompt: The prompt that you want Claude to complete.

              For proper response generation you will need to format your prompt using
              alternating `

Human:` and `

Assistant:` conversational turns. For example:

              ```
              "

Human: {userQuestion}

Assistant:"
              ```

              See [prompt validation](https://docs.anthropic.com/en/api/prompt-validation) and
              our guide to
              [prompt design](https://docs.anthropic.com/en/docs/intro-to-prompting) for more
              details.

          stream: Whether to incrementally stream the response using server-sent events.

              See [streaming](https://docs.anthropic.com/en/api/streaming) for details.

          metadata: An object describing metadata about the request.

          stop_sequences: Sequences that will cause the model to stop generating.

              Our models stop on `"

Human:"`, and may include additional built-in stop
              sequences in the future. By providing the stop_sequences parameter, you may
              include additional strings that will cause the model to stop generating.

          temperature: Amount of randomness injected into the response.

              Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
              for analytical / multiple choice, and closer to `1.0` for creative and
              generative tasks.

              Note that even with `temperature` of `0.0`, the results will not be fully
              deterministic.

          top_k: Only sample from the top K options for each subsequent token.

              Used to remove "long tail" low probability responses.
              [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).

              Recommended for advanced use cases only. You usually only need to use
              `temperature`.

          top_p: Use nucleus sampling.

              In nucleus sampling, we compute the cumulative distribution over all the options
              for each subsequent token in decreasing probability order and cut it off once it
              reaches a particular probability specified by `top_p`. You should either alter
              `temperature` or `top_p`, but not both.

              Recommended for advanced use cases only. You usually only need to use
              `temperature`.

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        Nr;   r&   r=   r>   r?   r1   r/   r0   r2   r3   r4   r5   r6   r7   r8   s                 r'   r@   zCompletions.create   rA   r)   c                    yrD   r;   rE   s                 r'   r@   zCompletions.create  rA   r)   r=   r>   r?   r=   r>   r?   r1   c                  t        |      s| j                  j                  t        k(  rd}| j	                  dt        |||||||||	d	t        j                        t        |
|||      t        |xs dt        t                 S NiX  z/v1/complete)	r=   r>   r?   r/   r0   r1   r2   r3   r4   )r5   r6   r7   r8   F)bodyoptionscast_tor1   
stream_cls)r   _clientr8   r   _postr   r	   CompletionCreateParamsr   r   r   r<   s                 r'   r@   zCompletions.createl  s    (  T\\%9%9_%LGzz ,@"$ (&4$#.""
 )?? )+Q[el ?Uj)+  
 	
r)   )returnr$   )rR   r,   r=   intr>   r   r?   strr/   MetadataParam | NotGivenr0   List[str] | NotGivenr1   zLiteral[False] | NotGivenr2   float | NotGivenr3   int | NotGivenr4   rX   r5   Headers | Noner6   Query | Noner7   Body | Noner8   'float | httpx.Timeout | None | NotGivenrR   r   )r=   rT   r>   r   r?   rU   r1   Literal[True]r/   rV   r0   rW   r2   rX   r3   rY   r4   rX   r5   rZ   r6   r[   r7   r\   r8   r]   rR   zStream[Completion])r=   rT   r>   r   r?   rU   r1   boolr/   rV   r0   rW   r2   rX   r3   rY   r4   rX   r5   rZ   r6   r[   r7   r\   r8   r]   rR   Completion | Stream[Completion])r=   rT   r>   r   r?   rU   r/   rV   r0   rW   r1   )Literal[False] | Literal[True] | NotGivenr2   rX   r3   rY   r4   rX   r5   rZ   r6   r[   r7   r\   r8   r]   rR   r`   
__name__
__module____qualname__r   r(   r-   r   r
   r@   r   r;   r)   r'   r   r       sg   0 0 6 6  .7/8,5(1 )"+ )-$("&;D#e "e 	e
 e +e -e *e &e e  e &e "e   !e" 9#e$ 
%e eN  .7/8(1 )"+ )-$("&;D#e "e 	e
 e e +e -e &e e  e &e "e   !e" 9#e$ 
%e eN  .7/8(1 )"+ )-$("&;D#e "e 	e
 e e +e -e &e e  e &e "e   !e" 9#e$ 
)%e eN >@uv .7/8<E(1 )"+ )-$("&;D#+
 "+
 	+

 +
 ++
 -+
 :+
 &+
 +
  +
 &+
 "+
   !+
" 9#+
$ 
)%+
 w+
r)   c                     e Zd Zedd       Zedd       Zeeeeeeeddded
	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zeeeeeeddded		 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zeeeeeeddded		 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Z e	g d	g d
      eeeeeeddded
	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zy)r    c                    t        |       S r#   )AsyncCompletionsWithRawResponser%   s    r'   r(   z"AsyncCompletions.with_raw_response  s     /t44r)   c                    t        |       S r+   )%AsyncCompletionsWithStreamingResponser%   s    r'   r-   z(AsyncCompletions.with_streaming_response  s     5T::r)   Nr.   c                  K   ywr:   r;   r<   s                 r'   r@   zAsyncCompletions.create       L 	   rB   c                  K   ywrD   r;   rE   s                 r'   r@   zAsyncCompletions.create  rl   rm   c                  K   ywrD   r;   rE   s                 r'   r@   zAsyncCompletions.create  rl   rm   rG   rH   c               8  K   t        |      s| j                  j                  t        k(  rd}| j	                  dt        |||||||||	d	t        j                         d {   t        |
|||      t        |xs dt        t                  d {   S 7 57 wrJ   )r   rO   r8   r   rP   r   r	   rQ   r   r   r   r<   s                 r'   r@   zAsyncCompletions.create  s     (  T\\%9%9_%LGZZ,,@"$ (&4$#.""
 )??  )+Q[el ?U":.+   
 
 	

s$   AB B
!0BBBB)rR   rh   )rR   rj   rS   )r=   rT   r>   r   r?   rU   r1   r^   r/   rV   r0   rW   r2   rX   r3   rY   r4   rX   r5   rZ   r6   r[   r7   r\   r8   r]   rR   zAsyncStream[Completion])r=   rT   r>   r   r?   rU   r1   r_   r/   rV   r0   rW   r2   rX   r3   rY   r4   rX   r5   rZ   r6   r[   r7   r\   r8   r]   rR   $Completion | AsyncStream[Completion])r=   rT   r>   r   r?   rU   r/   rV   r0   rW   r1   ra   r2   rX   r3   rY   r4   rX   r5   rZ   r6   r[   r7   r\   r8   r]   rR   rq   rb   r;   r)   r'   r    r      sg   5 5 ; ;  .7/8,5(1 )"+ )-$("&;D#e "e 	e
 e +e -e *e &e e  e &e "e   !e" 9#e$ 
%e eN  .7/8(1 )"+ )-$("&;D#e "e 	e
 e e +e -e &e e  e &e "e   !e" 9#e$ 
!%e eN  .7/8(1 )"+ )-$("&;D#e "e 	e
 e e +e -e &e e  e &e "e   !e" 9#e$ 
.%e eN >@uv .7/8<E(1 )"+ )-$("&;D#+
 "+
 	+

 +
 ++
 -+
 :+
 &+
 +
  +
 &+
 "+
   !+
" 9#+
$ 
.%+
 w+
r)   c                      e Zd ZddZy)r$   c                Z    || _         t        j                  |j                        | _        y N)_completionsr   to_raw_response_wrapperr@   r&   completionss     r'   __init__z#CompletionsWithRawResponse.__init__  s%    '&>>
r)   Nrx   r   rR   Nonerc   rd   re   ry   r;   r)   r'   r$   r$         
r)   r$   c                      e Zd ZddZy)rh   c                Z    || _         t        j                  |j                        | _        y rt   )ru   r   async_to_raw_response_wrapperr@   rw   s     r'   ry   z(AsyncCompletionsWithRawResponse.__init__   s%    '&DD
r)   Nrx   r    rR   r{   r|   r;   r)   r'   rh   rh     r}   r)   rh   c                      e Zd ZddZy)r,   c                F    || _         t        |j                        | _        y rt   )ru   r   r@   rw   s     r'   ry   z)CompletionsWithStreamingResponse.__init__)  s    '2
r)   Nrz   r|   r;   r)   r'   r,   r,   (  r}   r)   r,   c                      e Zd ZddZy)rj   c                F    || _         t        |j                        | _        y rt   )ru   r   r@   rw   s     r'   ry   z.AsyncCompletionsWithStreamingResponse.__init__2  s    '8
r)   Nr   r|   r;   r)   r'   rj   rj   1  r}   r)   rj   )3
__future__r   typingr   typing_extensionsr   r   httpx r   typesr	   _typesr
   r   r   r   r   _utilsr   r   r   r   _compatr   	_resourcer   r   	_responser   r   
_constantsr   
_streamingr   r   _base_clientr   types.completionr   types.model_paramr   types.metadata_paramr   __all__r   r    r$   rh   r,   rj   r;   r)   r'   <module>r      s    #  /   , > >  & 9 X ( , / ) * 0,
-x
/ x
vx
' x
v
 

 

 

 
r)   