Ë
    Ýªgd  ã                   óJ   — d Z ddlmZ ddlmZ ddlmZ dededeeef   fd„Z	y	)
zi
This file is used to calculate the cost of the Gemini API.

Handles the context caching for Gemini API.
é    )ÚTuple©Úgeneric_cost_per_token)ÚUsageÚmodelÚusageÚreturnc                 ó   — t        | |d¬«      S )z§
    Calculates the cost per token for a given model, prompt tokens, and completion tokens.

    Follows the same logic as Anthropic's cost per token calculation.
    Úgemini)r   r   Úcustom_llm_providerr   )r   r   s     úX/var/www/openai/venv/lib/python3.12/site-packages/litellm/llms/gemini/cost_calculator.pyÚcost_per_tokenr      s   € ô "Ø˜5°hôð ó    N)
Ú__doc__Útypingr   Ú.litellm.litellm_core_utils.llm_cost_calc.utilsr   Úlitellm.types.utilsr   ÚstrÚfloatr   © r   r   Ú<module>r      s8   ðñõ å QÝ %ð˜#ð  eð °°e¸U°lÑ0Cô r   