o
    il                     @   s  d Z ddlZddlZddlZddlZddlZddlmZ ddlm	Z	 ddl
mZmZmZ ddlmZ ddlmZmZ dd	lmZmZmZmZmZmZmZmZmZ d
dlmZmZm Z  e rcddl!m"Z" e#e$Z%edZ&e'ee(e df ee(e df f Z)dZ*dZ+dZ,dZ-dZ.dd Z/G dd dZ0G dd de0Z1d(de2fddZ3d)de2de2fddZ4d d! Z5d"d# Z6d$d% Z7G d&d' d'ee(e e)f Z8d!gZ9dS )*z-Factory function to build auto-model classes.    N)OrderedDict)Iterator)AnyTypeVarUnion   )PretrainedConfig)get_class_from_dynamic_moduleresolve_trust_remote_code)	CONFIG_NAMEcached_file	copy_funcextract_commit_hashfind_adapter_config_fileis_peft_availableis_torch_availableloggingrequires_backends   )
AutoConfigmodel_type_to_module_name!replace_list_option_in_docstrings)GenerationMixin_TaJ  
    This is a generic model class that will be instantiated as one of the model classes of the library when created
    with the [`~BaseAutoModelClass.from_pretrained`] class method or the [`~BaseAutoModelClass.from_config`] class
    method.

    This class cannot be instantiated directly using `__init__()` (throws an error).
ax  
        Instantiates one of the model classes of the library from a configuration.

        Note:
            Loading a model from its configuration file does **not** load the model weights. It only affects the
            model's configuration. Use [`~BaseAutoModelClass.from_pretrained`] to load the model weights.

        Args:
            config ([`PretrainedConfig`]):
                The model class to instantiate is selected based on the configuration class:

                List options
            attn_implementation (`str`, *optional*):
                The attention implementation to use in the model (if relevant). Can be any of `"eager"` (manual implementation of the attention), `"sdpa"` (using [`F.scaled_dot_product_attention`](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html)), or `"flash_attention_2"` (using [Dao-AILab/flash-attention](https://github.com/Dao-AILab/flash-attention)). By default, if available, SDPA will be used for torch>=2.1.1. The default is otherwise the manual `"eager"` implementation.

        Examples:

        ```python
        >>> from transformers import AutoConfig, BaseAutoModelClass

        >>> # Download configuration from huggingface.co and cache.
        >>> config = AutoConfig.from_pretrained("checkpoint_placeholder")
        >>> model = BaseAutoModelClass.from_config(config)
        ```
ac  
        Instantiate one of the model classes of the library from a pretrained model.

        The model class to instantiate is selected based on the `model_type` property of the config object (either
        passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by
        falling back to using pattern matching on `pretrained_model_name_or_path`:

        List options

        The model is set in evaluation mode by default using `model.eval()` (so for instance, dropout modules are
        deactivated). To train the model, you should first set it back in training mode with `model.train()`

        Args:
            pretrained_model_name_or_path (`str` or `os.PathLike`):
                Can be either:

                    - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
                    - A path to a *directory* containing model weights saved using
                      [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
                    - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
                      this case, `from_tf` should be set to `True` and a configuration object should be provided as
                      `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
                      PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
            model_args (additional positional arguments, *optional*):
                Will be passed along to the underlying model `__init__()` method.
            config ([`PretrainedConfig`], *optional*):
                Configuration for the model to use instead of an automatically loaded configuration. Configuration can
                be automatically loaded when:

                    - The model is a model provided by the library (loaded with the *model id* string of a pretrained
                      model).
                    - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the
                      save directory.
                    - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
                      configuration JSON file named *config.json* is found in the directory.
            state_dict (*dict[str, torch.Tensor]*, *optional*):
                A state dictionary to use instead of a state dictionary loaded from saved weights file.

                This option can be used if you want to create a model from a pretrained configuration but load your own
                weights. In this case though, you should check if using [`~PreTrainedModel.save_pretrained`] and
                [`~PreTrainedModel.from_pretrained`] is not a simpler option.
            cache_dir (`str` or `os.PathLike`, *optional*):
                Path to a directory in which a downloaded pretrained model configuration should be cached if the
                standard cache should not be used.
            from_tf (`bool`, *optional*, defaults to `False`):
                Load the model weights from a TensorFlow checkpoint save file (see docstring of
                `pretrained_model_name_or_path` argument).
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            resume_download:
                Deprecated and ignored. All downloads are now resumed by default when possible.
                Will be removed in v5 of Transformers.
            proxies (`dict[str, str]`, *optional*):
                A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
            output_loading_info(`bool`, *optional*, defaults to `False`):
                Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
            local_files_only(`bool`, *optional*, defaults to `False`):
                Whether or not to only look at local files (e.g., not try downloading the model).
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
                git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
                identifier allowed by git.
            trust_remote_code (`bool`, *optional*, defaults to `False`):
                Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
                should only be set to `True` for repositories you trust and in which you have read the code, as it will
                execute code present on the Hub on your local machine.
            code_revision (`str`, *optional*, defaults to `"main"`):
                The specific revision to use for the code on the Hub, if the code leaves in a different repository than
                the rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based
                system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier
                allowed by git.
            kwargs (additional keyword arguments, *optional*):
                Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
                `output_attentions=True`). Behaves differently depending on whether a `config` is provided or
                automatically loaded:

                    - If a configuration is provided with `config`, `**kwargs` will be directly passed to the
                      underlying model's `__init__` method (we assume all relevant updates to the configuration have
                      already been done)
                    - If a configuration is not provided, `kwargs` will be first passed to the configuration class
                      initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that
                      corresponds to a configuration attribute will be used to override said attribute with the
                      supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
                      will be passed to the underlying model's `__init__` function.

        Examples:

        ```python
        >>> from transformers import AutoConfig, BaseAutoModelClass

        >>> # Download model and configuration from huggingface.co and cache.
        >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder")

        >>> # Update configuration during loading
        >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True)
        >>> model.config.output_attentions
        True

        >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
        >>> config = AutoConfig.from_pretrained("./tf_model/shortcut_placeholder_tf_model_config.json")
        >>> model = BaseAutoModelClass.from_pretrained(
        ...     "./tf_model/shortcut_placeholder_tf_checkpoint.ckpt.index", from_tf=True, config=config
        ... )
        ```
a  
        Instantiate one of the model classes of the library from a pretrained model.

        The model class to instantiate is selected based on the `model_type` property of the config object (either
        passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by
        falling back to using pattern matching on `pretrained_model_name_or_path`:

        List options

        Args:
            pretrained_model_name_or_path (`str` or `os.PathLike`):
                Can be either:

                    - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
                    - A path to a *directory* containing model weights saved using
                      [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
                    - A path or url to a *PyTorch state_dict save file* (e.g, `./pt_model/pytorch_model.bin`). In this
                      case, `from_pt` should be set to `True` and a configuration object should be provided as `config`
                      argument. This loading path is slower than converting the PyTorch model in a TensorFlow model
                      using the provided conversion scripts and loading the TensorFlow model afterwards.
            model_args (additional positional arguments, *optional*):
                Will be passed along to the underlying model `__init__()` method.
            config ([`PretrainedConfig`], *optional*):
                Configuration for the model to use instead of an automatically loaded configuration. Configuration can
                be automatically loaded when:

                    - The model is a model provided by the library (loaded with the *model id* string of a pretrained
                      model).
                    - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the
                      save directory.
                    - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
                      configuration JSON file named *config.json* is found in the directory.
            cache_dir (`str` or `os.PathLike`, *optional*):
                Path to a directory in which a downloaded pretrained model configuration should be cached if the
                standard cache should not be used.
            from_pt (`bool`, *optional*, defaults to `False`):
                Load the model weights from a PyTorch checkpoint save file (see docstring of
                `pretrained_model_name_or_path` argument).
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            resume_download:
                Deprecated and ignored. All downloads are now resumed by default when possible.
                Will be removed in v5 of Transformers.
            proxies (`dict[str, str]`, *optional*):
                A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
            output_loading_info(`bool`, *optional*, defaults to `False`):
                Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
            local_files_only(`bool`, *optional*, defaults to `False`):
                Whether or not to only look at local files (e.g., not try downloading the model).
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
                git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
                identifier allowed by git.
            trust_remote_code (`bool`, *optional*, defaults to `False`):
                Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
                should only be set to `True` for repositories you trust and in which you have read the code, as it will
                execute code present on the Hub on your local machine.
            code_revision (`str`, *optional*, defaults to `"main"`):
                The specific revision to use for the code on the Hub, if the code leaves in a different repository than
                the rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based
                system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier
                allowed by git.
            kwargs (additional keyword arguments, *optional*):
                Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
                `output_attentions=True`). Behaves differently depending on whether a `config` is provided or
                automatically loaded:

                    - If a configuration is provided with `config`, `**kwargs` will be directly passed to the
                      underlying model's `__init__` method (we assume all relevant updates to the configuration have
                      already been done)
                    - If a configuration is not provided, `kwargs` will be first passed to the configuration class
                      initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that
                      corresponds to a configuration attribute will be used to override said attribute with the
                      supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
                      will be passed to the underlying model's `__init__` function.

        Examples:

        ```python
        >>> from transformers import AutoConfig, BaseAutoModelClass

        >>> # Download model and configuration from huggingface.co and cache.
        >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder")

        >>> # Update configuration during loading
        >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True)
        >>> model.config.output_attentions
        True

        >>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
        >>> config = AutoConfig.from_pretrained("./pt_model/shortcut_placeholder_pt_model_config.json")
        >>> model = BaseAutoModelClass.from_pretrained(
        ...     "./pt_model/shortcut_placeholder_pytorch_model.bin", from_pt=True, config=config
        ... )
        ```
c                 C   s   |t |  }t|ttfs|S dd |D }t| dg }|D ],}||v r*||   S d| |v r:|d|    S d| |v rJ|d|    S q|d S )Nc                 S   s   i | ]}|j |qS  __name__).0modelr   r   Y/home/ubuntu/.local/lib/python3.10/site-packages/transformers/models/auto/auto_factory.py
<dictcomp>  s    z$_get_model_class.<locals>.<dictcomp>architecturesTFFlaxr   )type
isinstancelisttuplegetattr)configmodel_mappingsupported_modelsname_to_modelr!   archr   r   r   _get_model_class  s   r.   c                   @   sl   e Zd ZdZdddZedd Zededefdd	Zed
e	e
eje
 f fddZeddddZdS )_BaseAutoModelClassNreturnc                 O   s&   t | jj d| jj d| jj d)Nz+ is designed to be instantiated using the `z5.from_pretrained(pretrained_model_name_or_path)` or `z.from_config(config)` methods.)OSError	__class__r   )selfargskwargsr   r   r   __init__  s   
z_BaseAutoModelClass.__init__c              
   K   sN  | dd }t|do| j|jv }t|| jv }|r8|j| j }d|v r,|dd }nd }t||j|||d}|rx|rxd|v rH|d\}}n|j	}t
||fi |}	|se| j|j|	dd |	j| d | d	d }
t|	}	|	j|fi |S t|| jv rt|| j}	|	j|fi |S td
|j d| j dddd | jD  d)Ntrust_remote_codeauto_map--r   upstream_repoTexist_ok
auto_classcode_revision!Unrecognized configuration class  for this kind of AutoModel: .
Model type should be one of , c                 s       | ]}|j V  qd S Nr   r   cr   r   r   	<genexpr>      z2_BaseAutoModelClass.from_config.<locals>.<genexpr>.)pophasattrr   r8   r$   _model_mappingsplitr
   _name_or_pathname_or_pathr	   registerr2   register_for_auto_class$add_generation_mixin_to_remote_model_from_configr.   
ValueErrorjoin)clsr)   r5   r7   has_remote_codehas_local_code	class_refr;   repo_idmodel_class_r   r   r   from_config  s<   z_BaseAutoModelClass.from_configr)   c                 C   s   |S )z`Additional autoclass-specific config post-loading manipulation. May be overridden in subclasses.r   )rX   r)   r   r   r   _prepare_config_for_auto_class  s   z2_BaseAutoModelClass._prepare_config_for_auto_classpretrained_model_name_or_pathc              
      s    dd } d}d d< g d} fdd|D }  dd }  d	d }	  d
d }
| dd }| dd }|d urNtdt |d urLtd|}|d urV||d< |	d u rxt|tsrt|t	fdddd|}t
||	}	nt|d	d }	t r|
d u ri }
|d ur||
d< t|fd	|	i|
}|d urt|ddd}t|}||
d< |d }W d    n1 sw   Y  t|ts$t } ddkr҈  d} ddkrވ  d} dd ur  d}tj|fd||	d| \} |dd dkrd d< |dd dkrd d< |dd d ur$|d  d< t|do/| j|jv }t|| jv }d }|rN|j| j }d|v rN|dd }t|||||d}| d< |
 d
< |r|rt||fd|i| }| dd }|s| j|j|dd |j| d  t |}|j|g|R d|i| S t|| jv rt!|| j}|j"|j#d!d kr|$ }|j|g|R d|i| S td"|j d#| j d$d%%d&d' | jD  d())Nr)   r7   T
_from_auto)		cache_dirforce_downloadlocal_files_onlyproxiesresume_downloadrevision	subfolderuse_auth_tokentokenc                    s    i | ]}| v r|  |qS r   )rL   )r   namer5   r   r   r      s     z7_BaseAutoModelClass.from_pretrained.<locals>.<dictcomp>r@   _commit_hashadapter_kwargsrk   rj   zrThe `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.zV`token` and `use_auth_token` are both specified. Please set only the argument `token`.F) _raise_exceptions_for_gated_repo%_raise_exceptions_for_missing_entries'_raise_exceptions_for_connection_errorsrzutf-8)encoding_adapter_model_pathbase_model_name_or_pathtorch_dtypeautodtypequantization_config)return_unused_kwargsr@   rn   r8   r9   r   r:   r<   r>   text_configrA   rB   rC   rD   c                 s   rE   rF   r   rG   r   r   r   rI   a  rJ   z6_BaseAutoModelClass.from_pretrained.<locals>.<genexpr>rK   )&rL   getwarningswarnFutureWarningrV   r%   r   r   r   r   r(   r   r   openjsonloadcopydeepcopyr   from_pretrainedrM   r   r8   r$   rN   rO   r
   r	   rR   r2   rS   rT   r.   config_classsub_configsget_text_configrW   )rX   ra   
model_argsr5   r)   r7   hub_kwargs_names
hub_kwargsr@   commit_hashro   rk   rj   resolved_config_filemaybe_adapter_pathfadapter_configkwargs_origr^   rY   rZ   r;   r[   r]   r   rm   r   r     s  










z#_BaseAutoModelClass.from_pretrainedFc                 C   sF   t |dr|jj|jkrtd|j d| d| jj|||d dS )a  
        Register a new model for this class.

        Args:
            config_class ([`PretrainedConfig`]):
                The configuration corresponding to the model to register.
            model_class ([`PreTrainedModel`]):
                The model to register.
        r   zThe model class you are passing has a `config_class` attribute that is not consistent with the config class you passed (model has z and you passed z!. Fix one of those so they match!r<   N)rM   r   r   rV   rN   rR   )rX   r   r]   r=   r   r   r   rR   d  s   z_BaseAutoModelClass.registerr0   NF)r   
__module____qualname__rN   r6   classmethodr_   r   r`   r   strosPathLiker   rR   r   r   r   r   r/     s    

& r/   c                       s4   e Zd ZdZe fddZe fddZ  ZS )_BaseAutoBackboneClassNc           
         s   t | ddg ddlm} |d| }|dd urtd|dd	r)td
|d|j}|d|j}|d|j}|d|j	}	||||||	d}t
 j|fi |S )Nvisiontimmr   )TimmBackboneConfigr)   out_featuresz0Cannot specify `out_features` for timm backbonesoutput_loading_infoFz@Cannot specify `output_loading_info=True` when loading from timmnum_channelsfeatures_onlyuse_pretrained_backboneout_indices)backboner   r   r   r   )r   models.timm_backboner   rL   r}   rV   r   r   r   r   superr_   )
rX   ra   r   r5   r   r)   r   r   r   r   r2   r   r   #_load_timm_backbone_from_pretrained|  s&   z:_BaseAutoBackboneClass._load_timm_backbone_from_pretrainedc                    sB   | dd}|r| j|g|R i |S t j|g|R i |S )Nuse_timm_backboneF)rL   r   r   r   )rX   ra   r   r5   r   r   r   r   r     s   z&_BaseAutoBackboneClass.from_pretrained)r   r   r   rN   r   r   r   __classcell__r   r   r   r   r   x  s    r    head_docc                 C   s,   t |dkr| dd| dS | ddS )Nr   z(one of the model classes of the library z0one of the model classes of the library (with a z head) z-one of the base model classes of the library )lenreplace)	docstringr   r   r   r   insert_head_doc  s   
r   google-bert/bert-base-casedcheckpoint_for_examplec                 C   s  | j }| j}tt|d}|d|| _ttj}tt	|d}|d|}|d|}||_t
|j dd|}t|| _|drCt}n
|drKt}nt}ttj}	t||d}|d|}|d|}|dd	 d
d }
|d|
}||	_t
|j |	}	t|	| _| S )N)r   BaseAutoModelClasscheckpoint_placeholderF)use_model_typesr"   r#   /-r   shortcut_placeholder)rN   r   r   CLASS_DOCSTRINGr   __doc__r   r/   r_   FROM_CONFIG_DOCSTRINGr   r   
startswithFROM_PRETRAINED_TF_DOCSTRINGFROM_PRETRAINED_FLAX_DOCSTRINGFROM_PRETRAINED_TORCH_DOCSTRINGr   rO   )rX   r   r   r*   rl   class_docstringr_   from_config_docstringfrom_pretrained_docstringr   shortcutr   r   r   auto_class_update  s4   





r   c                 C   s<   g }|   D ]}t|ttfr|t|7 }q|| q|S rF   )valuesr%   r&   r'   append)r*   resultr   r   r   r   
get_values  s   r   c                    s   |d u rd S t |trt fdd|D S t |r t |S td} |krEzt||W S  tyD   td| d  d| dw td| d| d)	Nc                 3   s    | ]}t  |V  qd S rF   )getattribute_from_module)r   amoduler   r   rI     s    z+getattribute_from_module.<locals>.<genexpr>transformerszCould not find z neither in z nor in !z in )r%   r'   rM   r(   	importlibimport_moduler   rV   )r   attrtransformers_moduler   r   r   r     s   



r   c                 C   s   dt | jvr	| S dt | jv r| S t| dodt t| dv}t| do-dt t| dv}|s2|r@t| j| tfi | j}|S | S )a  
    Adds `GenerationMixin` to the inheritance of `model_class`, if `model_class` is a PyTorch model.

    This function is used for backwards compatibility purposes: in v4.45, we've started a deprecation cycle to make
    `PreTrainedModel` stop inheriting from `GenerationMixin`. Without this function, older models dynamically loaded
    from the Hub may not have the `generate` method after we remove the inheritance.
    ztorch.nn.modules.module.Moduler   generateprepare_inputs_for_generation)	r   __mro__	__bases__rM   r(   r$   r   r   __dict__)r]   has_custom_generate_in_classhas_custom_prepare_inputs!model_class_with_generation_mixinr   r   r   rT     s    	rT   c                   @   s   e Zd ZdZd!ddZdefddZdee de	fd	d
Z
dd Zdeee  fddZdee dedee	ef fddZdefddZdee	 fddZdeeee e	f  fddZdeee  fddZdedefddZd"dee de	ddfdd ZdS )#_LazyAutoMappinga  
    " A mapping config to object (model or tokenizer for instance) that will load keys and values when it is accessed.

    Args:
        - config_mapping: The map model type to config class
        - model_mapping: The map model type to model (or tokenizer) class
    r0   Nc                 C   s8   || _ dd | D | _|| _| | j_i | _i | _d S )Nc                 S   s   i | ]\}}||qS r   r   r   kvr   r   r   r      s    z-_LazyAutoMapping.__init__.<locals>.<dictcomp>)_config_mappingitems_reverse_config_mappingrN   _extra_content_modules)r3   config_mappingr*   r   r   r   r6     s   
z_LazyAutoMapping.__init__c                 C   s,   t | j | j }t|t| j S rF   )setr   keysintersectionrN   r   r   )r3   common_keysr   r   r   __len__  s   z_LazyAutoMapping.__len__keyc                    s    | j v r
| j   S | j j }|| jv r | j| }| ||S  fdd| j D }|D ]}|| jv rB| j| }| ||  S q.t )Nc                    s   g | ]\}}| j kr|qS r   r   r   r   r   r   
<listcomp>*  s    z0_LazyAutoMapping.__getitem__.<locals>.<listcomp>)r   r   r   rN   _load_attr_from_moduler   r   KeyError)r3   r   
model_type
model_namemodel_typesmtyper   r   r   __getitem__!  s   





z_LazyAutoMapping.__getitem__c                 C   s:   t |}|| jvrtd| d| j|< t| j| |S )NrK   ztransformers.models)r   r   r   r   r   )r3   r   r   module_namer   r   r   r   1  s   
z'_LazyAutoMapping._load_attr_from_modulec                    *    fdd j  D }|t j  S )Nc                    &   g | ]\}}| j v r ||qS r   )rN   r   r   r   rl   r3   r   r   r   8  
    

z)_LazyAutoMapping.keys.<locals>.<listcomp>)r   r   r&   r   r   )r3   mapping_keysr   r   r   r   7     
z_LazyAutoMapping.keysdefaultc                 C   s$   z|  |W S  ty   | Y S w rF   )r   r   )r3   r   r   r   r   r   r}   ?  s
   z_LazyAutoMapping.getc                 C      t |  S rF   )boolr   r   r   r   r   __bool__E     z_LazyAutoMapping.__bool__c                    r   )Nc                    r   r   )r   r   r   r   r   r   r   I  r   z+_LazyAutoMapping.values.<locals>.<listcomp>)rN   r   r&   r   r   )r3   mapping_valuesr   r   r   r   H  r   z_LazyAutoMapping.valuesc                    s&    fdd j D }|t j  S )Nc                    s:   g | ]}| j v r | j |  | j| fqS r   )r   r   rN   )r   r   r   r   r   r   Q  s    
z*_LazyAutoMapping.items.<locals>.<listcomp>)rN   r&   r   r   )r3   mapping_itemsr   r   r   r   P  s   
z_LazyAutoMapping.itemsc                 C   r   rF   )iterr   r   r   r   r   __iter__[  r  z_LazyAutoMapping.__iter__itemc                 C   s>   || j v rdS t|dr|j| jvrdS | j|j }|| jv S )NTr   F)r   rM   r   r   rN   )r3   r  r   r   r   r   __contains__^  s   

z_LazyAutoMapping.__contains__Fvaluec                 C   sN   t |dr |j| jv r | j|j }|| jv r |s td| d|| j|< dS )z7
        Register a new model in this mapping.
        r   'z*' is already used by a Transformers model.N)rM   r   r   rN   rV   r   )r3   r   r	  r=   r   r   r   r   rR   f  s
   z_LazyAutoMapping.registerr   r   )r   r   r   r   r6   intr   r$   r   _LazyAutoMappingValuer   r   r&   r   r   r   r}   r   r  r   r'   r   r   r  r  rR   r   r   r   r   r     s    
" r   )r   )r   r   ):r   r   r   r   r   r~   collectionsr   collections.abcr   typingr   r   r   configuration_utilsr   dynamic_module_utilsr	   r
   utilsr   r   r   r   r   r   r   r   r   configuration_autor   r   r   
generationr   
get_loggerr   loggerr   r'   r$   r  r   r   r   r   r   r.   r/   r   r   r   r   r   r   rT   r   __all__r   r   r   r   <module>   sD   ,
$lcd \'# 
f