
    g              	       ~   d dl Z d dlmZ d dlmZ d dlmZmZmZm	Z	m
Z
mZ d dlmZmZ  ej                  e      Ze G d d             Ze G d d	             Z G d
 de      Z G d de      Z G d de      Zdeeeef      de
eee   f   fdZd Zdedee   deeeef      fdZde	ee      de	ee      fdZy)    N)defaultdict)	dataclass)AnyDictListOptionalTupleUnion)logging	yaml_dumpc                   v   e Zd ZU dZeed<   eed<   eed<   eed<   eed<   dZee   ed<   dZ	ee   ed	<   dZ
ee   ed
<   dZee   ed<   dZeeeef      ed<   dZee   ed<   dZee   ed<   dZeeeef      ed<   dZee   ed<   dZee   ed<   dZee   ed<   dZee   ed<   edefd       Zdd defdZddZy)
EvalResultu  
    Flattened representation of individual evaluation results found in model-index of Model Cards.

    For more information on the model-index spec, see https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1.

    Args:
        task_type (`str`):
            The task identifier. Example: "image-classification".
        dataset_type (`str`):
            The dataset identifier. Example: "common_voice". Use dataset id from https://hf.co/datasets.
        dataset_name (`str`):
            A pretty name for the dataset. Example: "Common Voice (French)".
        metric_type (`str`):
            The metric identifier. Example: "wer". Use metric id from https://hf.co/metrics.
        metric_value (`Any`):
            The metric value. Example: 0.9 or "20.0 ± 1.2".
        task_name (`str`, *optional*):
            A pretty name for the task. Example: "Speech Recognition".
        dataset_config (`str`, *optional*):
            The name of the dataset configuration used in `load_dataset()`.
            Example: fr in `load_dataset("common_voice", "fr")`. See the `datasets` docs for more info:
            https://hf.co/docs/datasets/package_reference/loading_methods#datasets.load_dataset.name
        dataset_split (`str`, *optional*):
            The split used in `load_dataset()`. Example: "test".
        dataset_revision (`str`, *optional*):
            The revision (AKA Git Sha) of the dataset used in `load_dataset()`.
            Example: 5503434ddd753f426f4b38109466949a1217c2bb
        dataset_args (`Dict[str, Any]`, *optional*):
            The arguments passed during `Metric.compute()`. Example for `bleu`: `{"max_order": 4}`
        metric_name (`str`, *optional*):
            A pretty name for the metric. Example: "Test WER".
        metric_config (`str`, *optional*):
            The name of the metric configuration used in `load_metric()`.
            Example: bleurt-large-512 in `load_metric("bleurt", "bleurt-large-512")`.
            See the `datasets` docs for more info: https://huggingface.co/docs/datasets/v2.1.0/en/loading#load-configurations
        metric_args (`Dict[str, Any]`, *optional*):
            The arguments passed during `Metric.compute()`. Example for `bleu`: max_order: 4
        verified (`bool`, *optional*):
            Indicates whether the metrics originate from Hugging Face's [evaluation service](https://huggingface.co/spaces/autoevaluate/model-evaluator) or not. Automatically computed by Hugging Face, do not set.
        verify_token (`str`, *optional*):
            A JSON Web Token that is used to verify whether the metrics originate from Hugging Face's [evaluation service](https://huggingface.co/spaces/autoevaluate/model-evaluator) or not.
        source_name (`str`, *optional*):
            The name of the source of the evaluation result. Example: "Open LLM Leaderboard".
        source_url (`str`, *optional*):
            The URL of the source of the evaluation result. Example: "https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard".
    	task_typedataset_typedataset_namemetric_typemetric_valueN	task_namedataset_configdataset_splitdataset_revisiondataset_argsmetric_namemetric_configmetric_argsverifiedverify_tokensource_name
source_urlreturnc                 t    | j                   | j                  | j                  | j                  | j                  fS )z9Returns a tuple that uniquely identifies this evaluation.)r   r   r   r   r   selfs    R/var/www/openai/venv/lib/python3.12/site-packages/huggingface_hub/repocard_data.pyunique_identifierzEvalResult.unique_identifier   s9     NN!!
 	
    otherc                     | j                   j                         D ],  \  }}|dk(  r|dk7  st        | |      t        ||      k7  s, y y)zx
        Return True if `self` and `other` describe exactly the same metric but with a
        different value.
        r   r   FT)__dict__itemsgetattr)r#   r'   key_s       r$   is_equal_except_valuez EvalResult.is_equal_except_value   sQ    
 mm))+FCn$ n$s);wuc?R)R , r&   c                 L    | j                   | j                  t        d      y y )NzAIf `source_name` is provided, `source_url` must also be provided.)r   r   
ValueErrorr"   s    r$   __post_init__zEvalResult.__post_init__   s,    'DOO,C`aa -D'r&   )r    N)__name__
__module____qualname____doc__str__annotations__r   r   r   r   r   r   r   r   r   r   r   r   boolr   r   r   propertytupler%   r.   r1    r&   r$   r   r      s4   -f N      $Ix}# %)NHSM( $(M8C=' '+hsm* .2L(4S>*1 "&K#%
 $(M8C=' -1K$sCx.)0  $Hhtn# #'L(3-& "&K#% !%J$
5 
 
< D br&   r   c                       e Zd ZdZddefdZd Zd Zddee	e
      de
fd	Zd
 Zd Zdde
dedefdZdde
dedefdZde
defdZde
deddfdZde
defdZdefdZy)CardDataa  Structure containing metadata from a RepoCard.

    [`CardData`] is the parent class of [`ModelCardData`] and [`DatasetCardData`].

    Metadata can be exported as a dictionary or YAML. Export can be customized to alter the representation of the data
    (example: flatten evaluation results). `CardData` behaves as a dictionary (can get, pop, set values) but do not
    inherit from `dict` to allow this export step.
    ignore_metadata_errorsc                 :    | j                   j                  |       y N)r)   update)r#   r>   kwargss      r$   __init__zCardData.__init__   s    V$r&   c                     t        j                  | j                        }| j                  |       |j	                         D ci c]  \  }}|	|| c}}S c c}}w )zConverts CardData to a dict.

        Returns:
            `dict`: CardData represented as a dictionary ready to be dumped to a YAML
            block for inclusion in a README.md file.
        )copydeepcopyr)   _to_dictr*   )r#   	data_dictr,   values       r$   to_dictzCardData.to_dict   sP     MM$--0	i -6__->T->zsE%BSU
->TTTs   
AAc                      y)zUse this method in child classes to alter the dict representation of the data. Alter the dict in-place.

        Args:
            data_dict (`dict`): The raw dict representation of the card data.
        Nr;   r#   rH   s     r$   rG   zCardData._to_dict   s     	r&   Noriginal_orderr    c                 8   |rj|t        t        | j                  j                               t        |      z
        z   D ci c]   }|| j                  v r|| j                  |   " c}| _        t	        | j                         d|      j                         S c c}w )a
  Dumps CardData to a YAML block for inclusion in a README.md file.

        Args:
            line_break (str, *optional*):
                The line break to use when dumping to yaml.

        Returns:
            `str`: CardData represented as a YAML block.
        F)	sort_keys
line_break)listsetr)   keysr   rJ   strip)r#   rP   rM   ks       r$   to_yamlzCardData.to_yaml   s      ($s4==3E3E3G/H3~K^/^*___A% 4==##_DM
 5ZPVVXXs    %Bc                 ,    t        | j                        S r@   )reprr)   r"   s    r$   __repr__zCardData.__repr__   s    DMM""r&   c                 "    | j                         S r@   )rV   r"   s    r$   __str__zCardData.__str__   s    ||~r&   r,   defaultc                 :    | j                   j                  ||      S z#Get value for a given metadata key.)r)   getr#   r,   r\   s      r$   r_   zCardData.get       }}  g..r&   c                 :    | j                   j                  ||      S )z#Pop value for a given metadata key.)r)   popr`   s      r$   rc   zCardData.pop   ra   r&   c                      | j                   |   S r^   r)   r#   r,   s     r$   __getitem__zCardData.__getitem__   s    }}S!!r&   rI   c                 "    || j                   |<   y)z#Set value for a given metadata key.Nre   )r#   r,   rI   s      r$   __setitem__zCardData.__setitem__   s    "cr&   c                     || j                   v S )z%Check if a given metadata key is set.re   rf   s     r$   __contains__zCardData.__contains__   s    dmm##r&   c                 ,    t        | j                        S )z'Return the number of metadata keys set.)lenr)   r"   s    r$   __len__zCardData.__len__   s    4==!!r&   )F)NNr@   )r2   r3   r4   r5   r8   rC   rJ   rG   r   r   r6   rV   rY   r[   r   r_   rc   rg   ri   rk   intrn   r;   r&   r$   r=   r=      s    %t %
UYxS	7J YVY Y$#/s /S /C //s /S /C /"s "s "#s #3 #4 #$ $ $" "r&   r=   c                       e Zd ZdZdddddddddddddddeeeee   f      deeeee   f      deee      deeeee   f      d	ee   d
ee   dee   dee   deee      dee   dee   deee      de	f fdZ
d Z xZS )ModelCardDataaQ  Model Card Metadata that is used by Hugging Face Hub when included at the top of your README.md

    Args:
        base_model (`str` or `List[str]`, *optional*):
            The identifier of the base model from which the model derives. This is applicable for example if your model is a
            fine-tune or adapter of an existing model. The value must be the ID of a model on the Hub (or a list of IDs
            if your model derives from multiple models). Defaults to None.
        datasets (`Union[str, List[str]]`, *optional*):
            Dataset or list of datasets that were used to train this model. Should be a dataset ID
            found on https://hf.co/datasets. Defaults to None.
        eval_results (`Union[List[EvalResult], EvalResult]`, *optional*):
            List of `huggingface_hub.EvalResult` that define evaluation results of the model. If provided,
            `model_name` is used to as a name on PapersWithCode's leaderboards. Defaults to `None`.
        language (`Union[str, List[str]]`, *optional*):
            Language of model's training data or metadata. It must be an ISO 639-1, 639-2 or
            639-3 code (two/three letters), or a special value like "code", "multilingual". Defaults to `None`.
        library_name (`str`, *optional*):
            Name of library used by this model. Example: keras or any library from
            https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/src/model-libraries.ts.
            Defaults to None.
        license (`str`, *optional*):
            License of this model. Example: apache-2.0 or any license from
            https://huggingface.co/docs/hub/repositories-licenses. Defaults to None.
        license_name (`str`, *optional*):
            Name of the license of this model. Defaults to None. To be used in conjunction with `license_link`.
            Common licenses (Apache-2.0, MIT, CC-BY-SA-4.0) do not need a name. In that case, use `license` instead.
        license_link (`str`, *optional*):
            Link to the license of this model. Defaults to None. To be used in conjunction with `license_name`.
            Common licenses (Apache-2.0, MIT, CC-BY-SA-4.0) do not need a link. In that case, use `license` instead.
        metrics (`List[str]`, *optional*):
            List of metrics used to evaluate this model. Should be a metric name that can be found
            at https://hf.co/metrics. Example: 'accuracy'. Defaults to None.
        model_name (`str`, *optional*):
            A name for this model. It is used along with
            `eval_results` to construct the `model-index` within the card's metadata. The name
            you supply here is what will be used on PapersWithCode's leaderboards. If None is provided
            then the repo name is used as a default. Defaults to None.
        pipeline_tag (`str`, *optional*):
            The pipeline tag associated with the model. Example: "text-classification".
        tags (`List[str]`, *optional*):
            List of tags to add to your model that can be used when filtering on the Hugging
            Face Hub. Defaults to None.
        ignore_metadata_errors (`str`):
            If True, errors while parsing the metadata section will be ignored. Some information might be lost during
            the process. Use it at your own risk.
        kwargs (`dict`, *optional*):
            Additional metadata that will be added to the model card. Defaults to None.

    Example:
        ```python
        >>> from huggingface_hub import ModelCardData
        >>> card_data = ModelCardData(
        ...     language="en",
        ...     license="mit",
        ...     library_name="timm",
        ...     tags=['image-classification', 'resnet'],
        ... )
        >>> card_data.to_dict()
        {'language': 'en', 'license': 'mit', 'library_name': 'timm', 'tags': ['image-classification', 'resnet']}

        ```
    NF)
base_modeldatasetseval_resultslanguagelibrary_namelicenselicense_namelicense_linkmetrics
model_namepipeline_tagtagsr>   rr   rs   rt   ru   rv   rw   rx   ry   rz   r{   r|   r}   r>   c                   || _         || _        || _        || _        || _        || _        || _        || _        |	| _        |
| _	        || _
        t        |      | _        |j                  dd       }|r	 t        |      \  }
}|
| _	        || _        t+        | X  di | | j                  rDt/        | j                  t0              r| j                  g| _        | j                  t'        d      y y # t        t         f$ r>}|rt"        j%                  d       nt'        d|j(                   d| d      Y d }~d }~ww xY w)Nmodel-indexz<Invalid model-index. Not loading eval results into CardData.z4Invalid `model_index` in metadata cannot be parsed:  z. Pass `ignore_metadata_errors=True` to ignore this error while loading a Model Card. Warning: some information will be lost. Use it at your own risk.z7Passing `eval_results` requires `model_name` to be set.r;   )rr   rs   rt   ru   rv   rw   rx   ry   rz   r{   r|   _to_unique_listr}   rc   model_index_to_eval_resultsKeyError	TypeErrorloggerwarningr0   	__class__superrC   
isinstancer   )r#   rr   rs   rt   ru   rv   rw   rx   ry   rz   r{   r|   r}   r>   rB   model_indexerrorr   s                    r$   rC   zModelCardData.__init__7  sQ   $ % ( ((($(#D)	jj5+F{+S(
L",$0! 	"6"$++Z8%)%6%6$7!& !Z[[ '  i( )NN#ab$NuN__`af`g hS S  cs   4C0 0D=?4D88D=c                 p    | j                   *t        | j                  | j                         |d<   |d= |d= yy)z[Format the internal data dict. In this case, we convert eval results to a valid model indexNr   rt   r{   )rt   eval_results_to_model_indexr{   rL   s     r$   rG   zModelCardData._to_dictn  s>    ('B4??TXTeTe'fIm$.)9\+B )r&   )r2   r3   r4   r5   r   r
   r6   r   r   r8   rC   rG   __classcell__r   s   @r$   rq   rq      s8   =D 7;483748&*!%&*&*'+$(&*$(',5\ U3S	>235\ 5d3i01	5\
 tJ/05\ 5d3i015\ sm5\ #5\ sm5\ sm5\ $s)$5\ SM5\ sm5\ tCy!5\ !%5\nCr&   rq   c                   f    e Zd ZdZddddddddddddddddeeeee   f      deeeee   f      deeeee   f      deeeee   f      d	eeeee   f      d
eeeee   f      deee      deeeee   f      deeeee   f      dee   dee   dee   deeeee   f      de	f fdZ
d Z xZS )DatasetCardDataa	  Dataset Card Metadata that is used by Hugging Face Hub when included at the top of your README.md

    Args:
        language (`List[str]`, *optional*):
            Language of dataset's data or metadata. It must be an ISO 639-1, 639-2 or
            639-3 code (two/three letters), or a special value like "code", "multilingual".
        license (`Union[str, List[str]]`, *optional*):
            License(s) of this dataset. Example: apache-2.0 or any license from
            https://huggingface.co/docs/hub/repositories-licenses.
        annotations_creators (`Union[str, List[str]]`, *optional*):
            How the annotations for the dataset were created.
            Options are: 'found', 'crowdsourced', 'expert-generated', 'machine-generated', 'no-annotation', 'other'.
        language_creators (`Union[str, List[str]]`, *optional*):
            How the text-based data in the dataset was created.
            Options are: 'found', 'crowdsourced', 'expert-generated', 'machine-generated', 'other'
        multilinguality (`Union[str, List[str]]`, *optional*):
            Whether the dataset is multilingual.
            Options are: 'monolingual', 'multilingual', 'translation', 'other'.
        size_categories (`Union[str, List[str]]`, *optional*):
            The number of examples in the dataset. Options are: 'n<1K', '1K<n<10K', '10K<n<100K',
            '100K<n<1M', '1M<n<10M', '10M<n<100M', '100M<n<1B', '1B<n<10B', '10B<n<100B', '100B<n<1T', 'n>1T', and 'other'.
        source_datasets (`List[str]]`, *optional*):
            Indicates whether the dataset is an original dataset or extended from another existing dataset.
            Options are: 'original' and 'extended'.
        task_categories (`Union[str, List[str]]`, *optional*):
            What categories of task does the dataset support?
        task_ids (`Union[str, List[str]]`, *optional*):
            What specific tasks does the dataset support?
        paperswithcode_id (`str`, *optional*):
            ID of the dataset on PapersWithCode.
        pretty_name (`str`, *optional*):
            A more human-readable name for the dataset. (ex. "Cats vs. Dogs")
        train_eval_index (`Dict`, *optional*):
            A dictionary that describes the necessary spec for doing evaluation on the Hub.
            If not provided, it will be gathered from the 'train-eval-index' key of the kwargs.
        config_names (`Union[str, List[str]]`, *optional*):
            A list of the available dataset configs for the dataset.
    NF)ru   rw   annotations_creatorslanguage_creatorsmultilingualitysize_categoriessource_datasetstask_categoriestask_idspaperswithcode_idpretty_nametrain_eval_indexconfig_namesr>   ru   rw   r   r   r   r   r   r   r   r   r   r   r   r>   c                   || _         || _        || _        || _        || _        || _        || _        || _        |	| _        |
| _	        || _
        || _        |xs |j                  dd       | _        t        | <  di | y )Ntrain-eval-indexr;   )r   r   ru   rw   r   r   r   r   r   r   r   r   rc   r   r   rC   )r#   ru   rw   r   r   r   r   r   r   r   r   r   r   r   r>   rB   r   s                   r$   rC   zDatasetCardData.__init__  s    & %9!!2 .... !2&( !1 XFJJ?QSW4X"6"r&   c                 ,    |j                  d      |d<   y )Nr   r   )rc   rL   s     r$   rG   zDatasetCardData._to_dict  s    (16H(I	$%r&   )r2   r3   r4   r5   r   r
   r6   r   r   r8   rC   rG   r   r   s   @r$   r   r   u  s   %T 5937@D=A;?;?/3;?48+/%)+/8<',!"# 5d3i01"# %T#Y/0	"#
 'uS$s)^'<="# $E#tCy.$9:"# "%T#Y"78"# "%T#Y"78"# "$s),"# "%T#Y"78"# 5d3i01"# $C="# c]"# #4."# uS$s)^45"#  !%!"#HJr&   r   c                        e Zd ZdZddddddddddddddee   dee   dee   dee   d	ee   d
ee   dee   dee   deee      deee      deee      def fdZ	 xZ
S )SpaceCardDataa	  Space Card Metadata that is used by Hugging Face Hub when included at the top of your README.md

    To get an exhaustive reference of Spaces configuration, please visit https://huggingface.co/docs/hub/spaces-config-reference#spaces-configuration-reference.

    Args:
        title (`str`, *optional*)
            Title of the Space.
        sdk (`str`, *optional*)
            SDK of the Space (one of `gradio`, `streamlit`, `docker`, or `static`).
        sdk_version (`str`, *optional*)
            Version of the used SDK (if Gradio/Streamlit sdk).
        python_version (`str`, *optional*)
            Python version used in the Space (if Gradio/Streamlit sdk).
        app_file (`str`, *optional*)
            Path to your main application file (which contains either gradio or streamlit Python code, or static html code).
            Path is relative to the root of the repository.
        app_port (`str`, *optional*)
            Port on which your application is running. Used only if sdk is `docker`.
        license (`str`, *optional*)
            License of this model. Example: apache-2.0 or any license from
            https://huggingface.co/docs/hub/repositories-licenses.
        duplicated_from (`str`, *optional*)
            ID of the original Space if this is a duplicated Space.
        models (List[`str`], *optional*)
            List of models related to this Space. Should be a dataset ID found on https://hf.co/models.
        datasets (`List[str]`, *optional*)
            List of datasets related to this Space. Should be a dataset ID found on https://hf.co/datasets.
        tags (`List[str]`, *optional*)
            List of tags to add to your Space that can be used when filtering on the Hub.
        ignore_metadata_errors (`str`):
            If True, errors while parsing the metadata section will be ignored. Some information might be lost during
            the process. Use it at your own risk.
        kwargs (`dict`, *optional*):
            Additional metadata that will be added to the space card.

    Example:
        ```python
        >>> from huggingface_hub import SpaceCardData
        >>> card_data = SpaceCardData(
        ...     title="Dreambooth Training",
        ...     license="mit",
        ...     sdk="gradio",
        ...     duplicated_from="multimodalart/dreambooth-training"
        ... )
        >>> card_data.to_dict()
        {'title': 'Dreambooth Training', 'sdk': 'gradio', 'license': 'mit', 'duplicated_from': 'multimodalart/dreambooth-training'}
        ```
    NF)titlesdksdk_versionpython_versionapp_fileapp_portrw   duplicated_frommodelsrs   r}   r>   r   r   r   r   r   r   rw   r   r   rs   r}   r>   c                    || _         || _        || _        || _        || _        || _        || _        || _        |	| _        |
| _	        t        |      | _        t        | 4  di | y )Nr;   )r   r   r   r   r   r   rw   r   r   rs   r   r}   r   rC   )r#   r   r   r   r   r   r   rw   r   r   rs   r}   r>   rB   r   s                 r$   rC   zSpaceCardData.__init__  sj    " 
&,  . #D)	"6"r&   )r2   r3   r4   r5   r   r6   ro   r   r8   rC   r   r   s   @r$   r   r     s    /h  $!%)(,"&"&!%)-&*(,$(',# }# c]	#
 c]# !# 3-# 3-# ## "## c## 49%# tCy!# !%# #r&   r   r   r    c           	      .   g }| D ]  }|d   }|d   }|D ]w  }|d   d   }|d   j                  d      }|d   d   }|d   d   }	|d   j                  d      }
|d   j                  d      }|d   j                  d      }|d   j                  d	      }|j                  d
i       j                  d      }|j                  d
i       j                  d      }|d   D ]  }|d   }|d   }|j                  d      }|j                  d	      }|j                  d      }|j                  d      }|j                  d      }t        d i d|d|d|	d|d|d|d|
d|d|d|d|d|d|d|d|d|d|}|j                  |        z  |fS )!a  Takes in a model index and returns the model name and a list of `huggingface_hub.EvalResult` objects.

    A detailed spec of the model index can be found here:
    https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1

    Args:
        model_index (`List[Dict[str, Any]]`):
            A model index data structure, likely coming from a README.md file on the
            Hugging Face Hub.

    Returns:
        model_name (`str`):
            The name of the model as found in the model index. This is used as the
            identifier for the model on leaderboards like PapersWithCode.
        eval_results (`List[EvalResult]`):
            A list of `huggingface_hub.EvalResult` objects containing the metrics
            reported in the provided model_index.

    Example:
        ```python
        >>> from huggingface_hub.repocard_data import model_index_to_eval_results
        >>> # Define a minimal model index
        >>> model_index = [
        ...     {
        ...         "name": "my-cool-model",
        ...         "results": [
        ...             {
        ...                 "task": {
        ...                     "type": "image-classification"
        ...                 },
        ...                 "dataset": {
        ...                     "type": "beans",
        ...                     "name": "Beans"
        ...                 },
        ...                 "metrics": [
        ...                     {
        ...                         "type": "accuracy",
        ...                         "value": 0.9
        ...                     }
        ...                 ]
        ...             }
        ...         ]
        ...     }
        ... ]
        >>> model_name, eval_results = model_index_to_eval_results(model_index)
        >>> model_name
        'my-cool-model'
        >>> eval_results[0].task_type
        'image-classification'
        >>> eval_results[0].metric_type
        'accuracy'

        ```
    nameresultstasktypedatasetconfigsplitrevisionargssourceurlrz   rI   r   verifyTokenr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r;   )r_   r   append)r   rt   elemr   r   resultr   r   r   r   r   r   r   r   r   r   metricr   r   r   r   r   r   r   eval_results                            r$   r   r     s0   p LF|y/Fvv.Iv**62I!),V4L!),V4L#I.228<N"9-11':M%i044Z@!),008L **Xr266v>KHb155e<J +$Vn%g$jj0$jj0 &

8 4!::j1%zz-8( '!- ". !,	
 ". ( $2 #0 &6 ". !, !, #0 & ".  !,!"  *#& ##K09 ,  X r&   c                     t        | t        t        t        f      r t	        |       d | D              S t        | t
              r& t	        |       d | j                         D              S | S )zk
    Recursively remove `None` values from a dict. Borrowed from: https://stackoverflow.com/a/20558778
    c              3   8   K   | ]  }|t        |        y wr@   _remove_none).0xs     r$   	<genexpr>z_remove_none.<locals>.<genexpr>  s     G#Qa#s   c              3   Z   K   | ]#  \  }}|	|t        |      t        |      f % y wr@   r   )r   rU   vs      r$   r   z_remove_none.<locals>.<genexpr>  s.     w1WXWdijiv,q/<?;s   
+++)r   rQ   r:   rR   r   dictr*   )objs    r$   r   r   ~  sX     #eS)*tCyG#GGG	C	tCyw		www
r&   r{   rt   c                    t        t              }|D ]   }||j                     j                  |       " g }|j	                         D ]  }|d   }|j
                  |j                  d|j                  |j                  |j                  |j                  |j                  |j                  d|D cg c]R  }|j                  |j                  |j                  |j                   |j"                  |j$                  |j&                  dT c}d}|j(                  .d|j(                  i}	|j*                  |j*                  |	d<   |	|d<   |j                  |        | |d	g}
t-        |
      S c c}w )
a  Takes in given model name and list of `huggingface_hub.EvalResult` and returns a
    valid model-index that will be compatible with the format expected by the
    Hugging Face Hub.

    Args:
        model_name (`str`):
            Name of the model (ex. "my-cool-model"). This is used as the identifier
            for the model on leaderboards like PapersWithCode.
        eval_results (`List[EvalResult]`):
            List of `huggingface_hub.EvalResult` objects containing the metrics to be
            reported in the model-index.

    Returns:
        model_index (`List[Dict[str, Any]]`): The eval_results converted to a model-index.

    Example:
        ```python
        >>> from huggingface_hub.repocard_data import eval_results_to_model_index, EvalResult
        >>> # Define minimal eval_results
        >>> eval_results = [
        ...     EvalResult(
        ...         task_type="image-classification",  # Required
        ...         dataset_type="beans",  # Required
        ...         dataset_name="Beans",  # Required
        ...         metric_type="accuracy",  # Required
        ...         metric_value=0.9,  # Required
        ...     )
        ... ]
        >>> eval_results_to_model_index("my-cool-model", eval_results)
        [{'name': 'my-cool-model', 'results': [{'task': {'type': 'image-classification'}, 'dataset': {'name': 'Beans', 'type': 'beans'}, 'metrics': [{'type': 'accuracy', 'value': 0.9}]}]}]

        ```
    r   )r   r   )r   r   r   r   r   r   )r   rI   r   r   r   r   r   )r   r   rz   r   r   r   )r   r   )r   rQ   r%   r   valuesr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   )r{   rt   task_and_ds_types_mapr   model_index_datar   sample_resultr   datar   r   s              r$   r   r     s   J :ET9J#k;;<CCKP $ (//1
 &//%//
 &22%22'66&44)::%22$ & &F #..#00"..$22".. &#)#6#6 &
4 ##/}//F ((4!.!:!:v#DN%I 2T '	
K $$=s   0AE+
r}   c                 L    | | S g }| D ]  }||vs|j                  |        |S r@   )r   )r}   unique_tagstags      r$   r   r     s9    |Kk!s#  r&   )rE   collectionsr   dataclassesr   typingr   r   r   r   r	   r
   huggingface_hub.utilsr   r   
get_loggerr2   r   r   r=   rq   r   r   r6   r   r   r   r   r;   r&   r$   <module>r      s/    # ! : : 4 
		H	% Tb Tb Tbn O" O" O"d{CH {C|MJh MJ`N#H N#beT$sCx.-A eeCQUV`QaLaFb eP	Y%C Y%tJ?O Y%TXY]^acf^fYgTh Y%x(49- (492E r&   