From 9f132659a002280736b5f728f31b513f5dcbb584 Mon Sep 17 00:00:00 2001 From: Lysandre Date: Wed, 18 Dec 2024 16:13:40 +0100 Subject: [PATCH] [test_all] Applies the rest of the init refactor except to modular files --- .../audio_spectrogram_transformer/__init__.py | 1 - src/transformers/models/bark/__init__.py | 2 - src/transformers/models/bart/__init__.py | 1 - src/transformers/models/beit/__init__.py | 1 - src/transformers/models/bert/__init__.py | 4 - src/transformers/models/big_bird/__init__.py | 1 - .../models/bigbird_pegasus/__init__.py | 1 - src/transformers/models/biogpt/__init__.py | 1 - src/transformers/models/bit/__init__.py | 1 - .../models/blenderbot/__init__.py | 1 - src/transformers/models/blip/__init__.py | 3 - src/transformers/models/blip_2/__init__.py | 1 - src/transformers/models/bloom/__init__.py | 1 - src/transformers/models/bros/__init__.py | 1 - src/transformers/models/byt5/__init__.py | 1 - src/transformers/models/canine/__init__.py | 1 - src/transformers/models/chameleon/__init__.py | 1 - .../models/chinese_clip/__init__.py | 1 - src/transformers/models/clap/__init__.py | 1 - src/transformers/models/clip/__init__.py | 1 - src/transformers/models/clipseg/__init__.py | 1 - src/transformers/models/clvp/__init__.py | 69 ++----- .../models/clvp/configuration_clvp.py | 3 + .../models/clvp/feature_extraction_clvp.py | 3 + src/transformers/models/clvp/modeling_clvp.py | 10 ++ .../models/clvp/processing_clvp.py | 3 + .../models/clvp/tokenization_clvp.py | 3 + .../models/code_llama/__init__.py | 44 +---- .../code_llama/tokenization_code_llama.py | 3 + .../tokenization_code_llama_fast.py | 3 + src/transformers/models/codegen/__init__.py | 60 +------ .../models/codegen/configuration_codegen.py | 3 + .../models/codegen/modeling_codegen.py | 3 + .../models/codegen/tokenization_codegen.py | 3 + .../codegen/tokenization_codegen_fast.py | 3 + src/transformers/models/cohere/__init__.py | 65 +------ .../models/cohere/configuration_cohere.py | 3 + .../models/cohere/modeling_cohere.py | 3 + .../models/cohere/tokenization_cohere_fast.py | 3 + .../models/conditional_detr/__init__.py | 70 +------- .../configuration_conditional_detr.py | 3 + .../feature_extraction_conditional_detr.py | 3 + .../image_processing_conditional_detr.py | 3 + .../modeling_conditional_detr.py | 8 + src/transformers/models/convbert/__init__.py | 116 ++---------- .../models/convbert/configuration_convbert.py | 3 + .../models/convbert/modeling_convbert.py | 13 ++ .../models/convbert/modeling_tf_convbert.py | 12 ++ .../models/convbert/tokenization_convbert.py | 3 + .../convbert/tokenization_convbert_fast.py | 3 + src/transformers/models/convnext/__init__.py | 88 ++------- .../models/convnext/configuration_convnext.py | 3 + .../convnext/feature_extraction_convnext.py | 3 + .../convnext/image_processing_convnext.py | 3 + .../models/convnext/modeling_convnext.py | 3 + .../models/convnext/modeling_tf_convnext.py | 3 + .../models/convnextv2/__init__.py | 77 +------- .../convnextv2/configuration_convnextv2.py | 3 + .../models/convnextv2/modeling_convnextv2.py | 3 + .../convnextv2/modeling_tf_convnextv2.py | 3 + src/transformers/models/cpm/__init__.py | 46 +---- .../models/cpm/tokenization_cpm.py | 3 + .../models/cpm/tokenization_cpm_fast.py | 3 + src/transformers/models/cpmant/__init__.py | 50 +----- .../models/cpmant/configuration_cpmant.py | 3 + .../models/cpmant/modeling_cpmant.py | 3 + .../models/cpmant/tokenization_cpmant.py | 3 + src/transformers/models/ctrl/__init__.py | 74 +------- .../models/ctrl/configuration_ctrl.py | 3 + src/transformers/models/ctrl/modeling_ctrl.py | 3 + .../models/ctrl/modeling_tf_ctrl.py | 3 + .../models/ctrl/tokenization_ctrl.py | 3 + src/transformers/models/cvt/__init__.py | 65 +------ .../models/cvt/configuration_cvt.py | 3 + src/transformers/models/cvt/modeling_cvt.py | 3 + .../models/cvt/modeling_tf_cvt.py | 3 + src/transformers/models/dac/__init__.py | 48 +---- .../models/dac/configuration_dac.py | 3 + .../models/dac/feature_extraction_dac.py | 3 + src/transformers/models/dac/modeling_dac.py | 3 + src/transformers/models/data2vec/__init__.py | 117 ++---------- .../data2vec/configuration_data2vec_audio.py | 3 + .../data2vec/configuration_data2vec_text.py | 3 + .../data2vec/configuration_data2vec_vision.py | 3 + .../data2vec/modeling_data2vec_audio.py | 10 ++ .../models/data2vec/modeling_data2vec_text.py | 12 ++ .../data2vec/modeling_data2vec_vision.py | 8 + .../data2vec/modeling_tf_data2vec_vision.py | 8 + src/transformers/models/dbrx/__init__.py | 36 +--- .../models/dbrx/configuration_dbrx.py | 3 + src/transformers/models/dbrx/modeling_dbrx.py | 3 + src/transformers/models/deberta/__init__.py | 106 ++--------- .../models/deberta/configuration_deberta.py | 3 + .../models/deberta/modeling_deberta.py | 10 ++ .../models/deberta/modeling_tf_deberta.py | 10 ++ .../models/deberta/tokenization_deberta.py | 3 + .../deberta/tokenization_deberta_fast.py | 3 + .../models/deberta_v2/__init__.py | 112 ++---------- .../deberta_v2/configuration_deberta_v2.py | 3 + .../models/deberta_v2/modeling_deberta_v2.py | 11 ++ .../deberta_v2/modeling_tf_deberta_v2.py | 11 ++ .../deberta_v2/tokenization_deberta_v2.py | 3 + .../tokenization_deberta_v2_fast.py | 3 + .../models/decision_transformer/__init__.py | 46 +---- .../configuration_decision_transformer.py | 3 + .../modeling_decision_transformer.py | 8 + src/transformers/models/deit/__init__.py | 99 ++-------- .../models/deit/configuration_deit.py | 3 + .../models/deit/feature_extraction_deit.py | 3 + .../models/deit/image_processing_deit.py | 3 + src/transformers/models/deit/modeling_deit.py | 9 + .../models/deit/modeling_tf_deit.py | 9 + .../models/depth_anything/__init__.py | 37 +--- .../configuration_depth_anything.py | 3 + .../depth_anything/modeling_depth_anything.py | 3 + src/transformers/models/dinat/__init__.py | 41 +---- .../models/dinat/configuration_dinat.py | 3 + .../models/dinat/modeling_dinat.py | 3 + src/transformers/models/dinov2/__init__.py | 70 +------- .../models/dinov2/configuration_dinov2.py | 3 + .../models/dinov2/modeling_dinov2.py | 3 + .../models/dinov2/modeling_flax_dinov2.py | 3 + .../models/distilbert/__init__.py | 151 ++-------------- .../distilbert/configuration_distilbert.py | 3 + .../models/distilbert/modeling_distilbert.py | 11 ++ .../distilbert/modeling_flax_distilbert.py | 11 ++ .../distilbert/modeling_tf_distilbert.py | 12 ++ .../distilbert/tokenization_distilbert.py | 3 + .../tokenization_distilbert_fast.py | 3 + src/transformers/models/donut/__init__.py | 62 ++----- .../models/donut/configuration_donut_swin.py | 3 + .../models/donut/feature_extraction_donut.py | 3 + .../models/donut/image_processing_donut.py | 3 + .../models/donut/modeling_donut_swin.py | 3 + .../models/donut/processing_donut.py | 3 + src/transformers/models/dpr/__init__.py | 126 ++----------- .../models/dpr/configuration_dpr.py | 3 + src/transformers/models/dpr/modeling_dpr.py | 11 ++ .../models/dpr/modeling_tf_dpr.py | 10 ++ .../models/dpr/tokenization_dpr.py | 3 + .../models/dpr/tokenization_dpr_fast.py | 3 + src/transformers/models/dpt/__init__.py | 63 +------ .../models/dpt/configuration_dpt.py | 3 + .../models/dpt/feature_extraction_dpt.py | 3 + .../models/dpt/image_processing_dpt.py | 3 + src/transformers/models/dpt/modeling_dpt.py | 3 + .../models/efficientnet/__init__.py | 68 +------ .../configuration_efficientnet.py | 3 + .../image_processing_efficientnet.py | 3 + .../efficientnet/modeling_efficientnet.py | 3 + src/transformers/models/electra/__init__.py | 155 ++-------------- .../models/electra/configuration_electra.py | 3 + .../models/electra/modeling_electra.py | 14 ++ .../models/electra/modeling_flax_electra.py | 13 ++ .../models/electra/modeling_tf_electra.py | 12 ++ .../models/electra/tokenization_electra.py | 3 + .../electra/tokenization_electra_fast.py | 3 + src/transformers/models/encodec/__init__.py | 47 +---- .../models/encodec/configuration_encodec.py | 3 + .../encodec/feature_extraction_encodec.py | 3 + .../models/encodec/modeling_encodec.py | 3 + .../models/encoder_decoder/__init__.py | 71 +------- .../configuration_encoder_decoder.py | 3 + .../modeling_encoder_decoder.py | 3 + .../modeling_flax_encoder_decoder.py | 3 + .../modeling_tf_encoder_decoder.py | 3 + src/transformers/models/ernie/__init__.py | 55 +----- .../models/ernie/configuration_ernie.py | 3 + .../models/ernie/modeling_ernie.py | 14 ++ src/transformers/models/esm/__init__.py | 80 ++------- .../models/esm/configuration_esm.py | 3 + src/transformers/models/esm/modeling_esm.py | 9 + .../models/esm/modeling_esmfold.py | 3 + .../models/esm/modeling_tf_esm.py | 9 + .../models/esm/tokenization_esm.py | 3 + src/transformers/models/falcon/__init__.py | 53 +----- .../models/falcon/configuration_falcon.py | 3 + .../models/falcon/modeling_falcon.py | 10 ++ .../models/falcon_mamba/__init__.py | 43 +---- .../configuration_falcon_mamba.py | 3 + .../falcon_mamba/modeling_falcon_mamba.py | 3 + .../models/fastspeech2_conformer/__init__.py | 57 +----- .../configuration_fastspeech2_conformer.py | 3 + .../modeling_fastspeech2_conformer.py | 8 + .../tokenization_fastspeech2_conformer.py | 3 + src/transformers/models/flaubert/__init__.py | 88 +-------- .../models/flaubert/configuration_flaubert.py | 3 + .../models/flaubert/modeling_flaubert.py | 12 ++ .../models/flaubert/modeling_tf_flaubert.py | 11 ++ .../models/flaubert/tokenization_flaubert.py | 3 + src/transformers/models/flava/__init__.py | 83 ++------- .../models/flava/configuration_flava.py | 3 + .../models/flava/feature_extraction_flava.py | 3 + .../models/flava/image_processing_flava.py | 3 + .../models/flava/modeling_flava.py | 11 ++ .../models/flava/processing_flava.py | 3 + src/transformers/models/fnet/__init__.py | 94 +--------- .../models/fnet/configuration_fnet.py | 3 + src/transformers/models/fnet/modeling_fnet.py | 14 ++ .../models/fnet/tokenization_fnet.py | 3 + .../models/fnet/tokenization_fnet_fast.py | 3 + src/transformers/models/focalnet/__init__.py | 44 +---- .../models/focalnet/configuration_focalnet.py | 3 + .../models/focalnet/modeling_focalnet.py | 9 + src/transformers/models/fsmt/__init__.py | 37 +--- .../models/fsmt/configuration_fsmt.py | 3 + src/transformers/models/fsmt/modeling_fsmt.py | 3 + .../models/fsmt/tokenization_fsmt.py | 3 + src/transformers/models/funnel/__init__.py | 121 ++----------- .../models/funnel/configuration_funnel.py | 3 + ...unnel_original_tf_checkpoint_to_pytorch.py | 3 + .../models/funnel/modeling_funnel.py | 14 ++ .../models/funnel/modeling_tf_funnel.py | 13 ++ .../models/funnel/tokenization_funnel.py | 3 + .../models/funnel/tokenization_funnel_fast.py | 3 + src/transformers/models/fuyu/__init__.py | 62 +------ .../models/fuyu/configuration_fuyu.py | 3 + .../models/fuyu/image_processing_fuyu.py | 3 + src/transformers/models/fuyu/modeling_fuyu.py | 3 + .../models/fuyu/processing_fuyu.py | 3 + src/transformers/models/git/__init__.py | 46 +---- .../models/git/configuration_git.py | 3 + src/transformers/models/git/modeling_git.py | 3 + src/transformers/models/git/processing_git.py | 3 + src/transformers/models/glpn/__init__.py | 62 +------ .../models/glpn/configuration_glpn.py | 3 + .../models/glpn/feature_extraction_glpn.py | 3 + .../models/glpn/image_processing_glpn.py | 3 + src/transformers/models/glpn/modeling_glpn.py | 3 + src/transformers/models/gpt2/__init__.py | 144 ++------------- .../models/gpt2/configuration_gpt2.py | 3 + .../models/gpt2/modeling_flax_gpt2.py | 3 + src/transformers/models/gpt2/modeling_gpt2.py | 12 ++ .../models/gpt2/modeling_tf_gpt2.py | 10 ++ .../models/gpt2/tokenization_gpt2.py | 3 + .../models/gpt2/tokenization_gpt2_fast.py | 3 + .../models/gpt_bigcode/__init__.py | 50 +----- .../gpt_bigcode/configuration_gpt_bigcode.py | 3 + .../gpt_bigcode/modeling_gpt_bigcode.py | 9 + src/transformers/models/gpt_neo/__init__.py | 71 +------- .../models/gpt_neo/configuration_gpt_neo.py | 3 + .../models/gpt_neo/modeling_flax_gpt_neo.py | 3 + .../models/gpt_neo/modeling_gpt_neo.py | 11 ++ src/transformers/models/gpt_neox/__init__.py | 66 +------ .../models/gpt_neox/configuration_gpt_neox.py | 3 + .../models/gpt_neox/modeling_gpt_neox.py | 11 ++ .../gpt_neox/tokenization_gpt_neox_fast.py | 3 + .../models/gpt_neox_japanese/__init__.py | 48 +---- .../configuration_gpt_neox_japanese.py | 3 + .../modeling_gpt_neox_japanese.py | 8 + .../tokenization_gpt_neox_japanese.py | 3 + src/transformers/models/gpt_sw3/__init__.py | 29 +-- .../models/gpt_sw3/tokenization_gpt_sw3.py | 3 + src/transformers/models/gptj/__init__.py | 99 +--------- .../models/gptj/configuration_gptj.py | 3 + .../models/gptj/modeling_flax_gptj.py | 3 + src/transformers/models/gptj/modeling_gptj.py | 9 + .../models/gptj/modeling_tf_gptj.py | 9 + src/transformers/models/granite/__init__.py | 44 +---- .../models/granite/configuration_granite.py | 3 + .../models/granite/modeling_granite.py | 3 + .../models/granitemoe/__init__.py | 44 +---- .../granitemoe/configuration_granitemoe.py | 3 + .../models/granitemoe/modeling_granitemoe.py | 3 + .../models/grounding_dino/__init__.py | 62 +------ .../configuration_grounding_dino.py | 3 + .../image_processing_grounding_dino.py | 3 + .../grounding_dino/modeling_grounding_dino.py | 3 + .../processing_grounding_dino.py | 3 + src/transformers/models/groupvit/__init__.py | 79 +------- .../models/groupvit/configuration_groupvit.py | 3 + .../models/groupvit/modeling_groupvit.py | 3 + .../models/groupvit/modeling_tf_groupvit.py | 3 + src/transformers/models/herbert/__init__.py | 32 +--- .../models/herbert/tokenization_herbert.py | 3 + .../herbert/tokenization_herbert_fast.py | 3 + src/transformers/models/hiera/__init__.py | 44 +---- .../models/hiera/configuration_hiera.py | 3 + .../models/hiera/modeling_hiera.py | 3 + src/transformers/models/hubert/__init__.py | 67 +------ .../models/hubert/configuration_hubert.py | 3 + .../models/hubert/modeling_hubert.py | 3 + .../models/hubert/modeling_tf_hubert.py | 3 + src/transformers/models/ibert/__init__.py | 47 +---- .../models/ibert/configuration_ibert.py | 3 + .../models/ibert/modeling_ibert.py | 11 ++ src/transformers/models/idefics/__init__.py | 89 ++------- .../models/idefics/configuration_idefics.py | 3 + .../idefics/image_processing_idefics.py | 3 + .../models/idefics/modeling_idefics.py | 3 + .../models/idefics/modeling_tf_idefics.py | 3 + .../models/idefics/processing_idefics.py | 3 + src/transformers/models/idefics2/__init__.py | 59 +----- .../models/idefics2/configuration_idefics2.py | 3 + .../idefics2/image_processing_idefics2.py | 3 + .../models/idefics2/modeling_idefics2.py | 3 + .../models/idefics2/processing_idefics2.py | 3 + src/transformers/models/idefics3/__init__.py | 61 +------ .../models/idefics3/configuration_idefics3.py | 3 + .../idefics3/image_processing_idefics3.py | 3 + .../models/idefics3/modeling_idefics3.py | 3 + .../models/idefics3/processing_idefics3.py | 3 + src/transformers/models/ijepa/__init__.py | 42 +---- .../models/ijepa/configuration_ijepa.py | 3 + src/transformers/models/imagegpt/__init__.py | 64 +------ .../models/imagegpt/configuration_imagegpt.py | 3 + .../imagegpt/feature_extraction_imagegpt.py | 3 + .../imagegpt/image_processing_imagegpt.py | 3 + .../models/imagegpt/modeling_imagegpt.py | 9 + src/transformers/models/informer/__init__.py | 42 +---- .../models/informer/configuration_informer.py | 3 + .../models/informer/modeling_informer.py | 3 + .../models/instructblip/__init__.py | 53 +----- .../configuration_instructblip.py | 3 + .../instructblip/modeling_instructblip.py | 8 + .../instructblip/processing_instructblip.py | 3 + src/transformers/models/jamba/__init__.py | 43 +---- .../models/jamba/configuration_jamba.py | 3 + .../models/jamba/modeling_jamba.py | 3 + src/transformers/models/jetmoe/__init__.py | 43 +---- .../models/jetmoe/configuration_jetmoe.py | 3 + .../models/jetmoe/modeling_jetmoe.py | 3 + src/transformers/models/kosmos2/__init__.py | 50 +----- .../models/kosmos2/configuration_kosmos2.py | 3 + .../models/kosmos2/modeling_kosmos2.py | 3 + .../models/kosmos2/processing_kosmos2.py | 3 + src/transformers/models/layoutlm/__init__.py | 106 ++--------- .../models/layoutlm/configuration_layoutlm.py | 3 + .../models/layoutlm/modeling_layoutlm.py | 10 ++ .../models/layoutlm/modeling_tf_layoutlm.py | 11 ++ .../models/layoutlm/tokenization_layoutlm.py | 3 + .../layoutlm/tokenization_layoutlm_fast.py | 3 + .../models/layoutlmv2/__init__.py | 94 ++-------- .../layoutlmv2/configuration_layoutlmv2.py | 3 + .../feature_extraction_layoutlmv2.py | 3 + .../layoutlmv2/image_processing_layoutlmv2.py | 3 + .../models/layoutlmv2/modeling_layoutlmv2.py | 10 ++ .../layoutlmv2/processing_layoutlmv2.py | 3 + .../layoutlmv2/tokenization_layoutlmv2.py | 3 + .../tokenization_layoutlmv2_fast.py | 3 + .../models/layoutlmv3/__init__.py | 131 ++------------ .../layoutlmv3/configuration_layoutlmv3.py | 3 + .../feature_extraction_layoutlmv3.py | 3 + .../layoutlmv3/image_processing_layoutlmv3.py | 3 + .../models/layoutlmv3/modeling_layoutlmv3.py | 9 + .../layoutlmv3/modeling_tf_layoutlmv3.py | 9 + .../layoutlmv3/processing_layoutlmv3.py | 3 + .../layoutlmv3/tokenization_layoutlmv3.py | 3 + .../tokenization_layoutlmv3_fast.py | 3 + src/transformers/models/layoutxlm/__init__.py | 55 +----- .../models/layoutxlm/processing_layoutxlm.py | 3 + .../layoutxlm/tokenization_layoutxlm.py | 3 + .../layoutxlm/tokenization_layoutxlm_fast.py | 3 + src/transformers/models/led/__init__.py | 89 ++------- .../models/led/configuration_led.py | 3 + src/transformers/models/led/modeling_led.py | 9 + .../models/led/modeling_tf_led.py | 3 + .../models/led/tokenization_led.py | 3 + .../models/led/tokenization_led_fast.py | 3 + src/transformers/models/levit/__init__.py | 60 +------ .../models/levit/configuration_levit.py | 3 + .../models/levit/feature_extraction_levit.py | 3 + .../models/levit/image_processing_levit.py | 3 + .../models/levit/modeling_levit.py | 8 + src/transformers/models/lilt/__init__.py | 45 +---- .../models/lilt/configuration_lilt.py | 3 + src/transformers/models/lilt/modeling_lilt.py | 9 + src/transformers/models/llama/__init__.py | 106 ++--------- .../models/llama/configuration_llama.py | 3 + .../models/llama/modeling_flax_llama.py | 3 + .../models/llama/modeling_llama.py | 10 ++ .../models/llama/tokenization_llama.py | 3 + .../models/llama/tokenization_llama_fast.py | 3 + src/transformers/models/llava/__init__.py | 43 +---- .../models/llava/configuration_llava.py | 3 + .../models/llava/modeling_llava.py | 3 + .../models/llava/processing_llava.py | 3 + .../models/llava_next/__init__.py | 59 +----- .../llava_next/configuration_llava_next.py | 3 + .../llava_next/image_processing_llava_next.py | 3 + .../models/llava_next/modeling_llava_next.py | 3 + .../llava_next/processing_llava_next.py | 3 + .../models/llava_onevision/__init__.py | 60 +------ .../configuration_llava_onevision.py | 3 + .../image_processing_llava_onevision.py | 3 + .../modeling_llava_onevision.py | 3 + .../processing_llava_onevision.py | 3 + .../video_processing_llava_onevision.py | 3 + .../models/longformer/__init__.py | 119 ++---------- .../longformer/configuration_longformer.py | 3 + .../models/longformer/modeling_longformer.py | 12 ++ .../longformer/modeling_tf_longformer.py | 12 ++ .../longformer/tokenization_longformer.py | 3 + .../tokenization_longformer_fast.py | 3 + src/transformers/models/longt5/__init__.py | 70 +------- .../models/longt5/configuration_longt5.py | 3 + .../models/longt5/modeling_flax_longt5.py | 3 + .../models/longt5/modeling_longt5.py | 3 + src/transformers/models/luke/__init__.py | 59 +----- .../models/luke/configuration_luke.py | 3 + src/transformers/models/luke/modeling_luke.py | 14 ++ .../models/luke/tokenization_luke.py | 3 + src/transformers/models/lxmert/__init__.py | 105 ++--------- .../models/lxmert/configuration_lxmert.py | 3 + .../models/lxmert/modeling_lxmert.py | 11 ++ .../models/lxmert/modeling_tf_lxmert.py | 9 + .../models/lxmert/tokenization_lxmert.py | 3 + .../models/lxmert/tokenization_lxmert_fast.py | 3 + src/transformers/models/m2m_100/__init__.py | 46 +---- .../models/m2m_100/configuration_m2m_100.py | 3 + .../models/m2m_100/modeling_m2m_100.py | 3 + .../models/m2m_100/tokenization_m2m_100.py | 3 + src/transformers/models/mamba/__init__.py | 43 +---- .../models/mamba/configuration_mamba.py | 3 + .../models/mamba/modeling_mamba.py | 3 + src/transformers/models/mamba2/__init__.py | 43 +---- .../models/mamba2/configuration_mamba2.py | 3 + .../models/mamba2/modeling_mamba2.py | 3 + src/transformers/models/marian/__init__.py | 101 ++--------- .../models/marian/configuration_marian.py | 3 + .../models/marian/modeling_flax_marian.py | 3 + .../models/marian/modeling_marian.py | 3 + .../models/marian/modeling_tf_marian.py | 3 + .../models/marian/tokenization_marian.py | 3 + src/transformers/models/markuplm/__init__.py | 72 ++------ .../models/markuplm/configuration_markuplm.py | 3 + .../markuplm/feature_extraction_markuplm.py | 3 + .../models/markuplm/modeling_markuplm.py | 9 + .../models/markuplm/processing_markuplm.py | 3 + .../models/markuplm/tokenization_markuplm.py | 3 + .../markuplm/tokenization_markuplm_fast.py | 3 + .../models/mask2former/__init__.py | 58 +----- .../mask2former/configuration_mask2former.py | 3 + .../image_processing_mask2former.py | 3 + .../mask2former/modeling_mask2former.py | 3 + .../models/maskformer/__init__.py | 75 ++------ .../maskformer/configuration_maskformer.py | 3 + .../configuration_maskformer_swin.py | 3 + .../feature_extraction_maskformer.py | 3 + .../maskformer/image_processing_maskformer.py | 3 + .../models/maskformer/modeling_maskformer.py | 3 + .../maskformer/modeling_maskformer_swin.py | 3 + src/transformers/models/mbart/__init__.py | 137 ++------------ .../models/mbart/configuration_mbart.py | 3 + .../models/mbart/modeling_flax_mbart.py | 9 + .../models/mbart/modeling_mbart.py | 10 ++ .../models/mbart/modeling_tf_mbart.py | 3 + .../models/mbart/tokenization_mbart.py | 3 + .../models/mbart/tokenization_mbart_fast.py | 3 + src/transformers/models/mbart50/__init__.py | 45 +---- .../models/mbart50/tokenization_mbart50.py | 3 + .../mbart50/tokenization_mbart50_fast.py | 3 + .../models/megatron_bert/__init__.py | 54 +----- .../configuration_megatron_bert.py | 3 + .../megatron_bert/modeling_megatron_bert.py | 14 ++ src/transformers/models/mgp_str/__init__.py | 49 +---- .../models/mgp_str/configuration_mgp_str.py | 3 + .../models/mgp_str/modeling_mgp_str.py | 3 + .../models/mgp_str/processing_mgp_str.py | 3 + .../models/mgp_str/tokenization_mgp_str.py | 3 + src/transformers/models/mimi/__init__.py | 42 +---- .../models/mimi/configuration_mimi.py | 3 + src/transformers/models/mimi/modeling_mimi.py | 3 + src/transformers/models/mistral/__init__.py | 107 +---------- .../models/mistral/configuration_mistral.py | 3 + .../models/mistral/modeling_flax_mistral.py | 3 + .../models/mistral/modeling_mistral.py | 10 ++ .../models/mistral/modeling_tf_mistral.py | 3 + src/transformers/models/mixtral/__init__.py | 53 +----- .../models/mixtral/configuration_mixtral.py | 3 + .../models/mixtral/modeling_mixtral.py | 10 ++ src/transformers/models/mllama/__init__.py | 71 +------- .../models/mllama/configuration_mllama.py | 3 + .../models/mllama/image_processing_mllama.py | 3 + .../models/mllama/modeling_mllama.py | 9 + .../models/mllama/processing_mllama.py | 3 + src/transformers/models/mluke/__init__.py | 30 +--- .../models/mluke/tokenization_mluke.py | 3 + .../models/mobilebert/__init__.py | 129 ++----------- .../mobilebert/configuration_mobilebert.py | 3 + .../models/mobilebert/modeling_mobilebert.py | 15 ++ .../mobilebert/modeling_tf_mobilebert.py | 14 ++ .../mobilebert/tokenization_mobilebert.py | 3 + .../tokenization_mobilebert_fast.py | 3 + .../models/mobilenet_v1/__init__.py | 70 +------- .../configuration_mobilenet_v1.py | 3 + .../feature_extraction_mobilenet_v1.py | 3 + .../image_processing_mobilenet_v1.py | 3 + .../mobilenet_v1/modeling_mobilenet_v1.py | 8 + .../models/mobilenet_v2/__init__.py | 73 +------- .../configuration_mobilenet_v2.py | 3 + .../feature_extraction_mobilenet_v2.py | 3 + .../image_processing_mobilenet_v2.py | 3 + .../mobilenet_v2/modeling_mobilenet_v2.py | 9 + src/transformers/models/mobilevit/__init__.py | 96 ++-------- .../mobilevit/configuration_mobilevit.py | 3 + .../mobilevit/feature_extraction_mobilevit.py | 3 + .../mobilevit/image_processing_mobilevit.py | 3 + .../models/mobilevit/modeling_mobilevit.py | 8 + .../models/mobilevit/modeling_tf_mobilevit.py | 8 + .../models/mobilevitv2/__init__.py | 54 +----- .../mobilevitv2/configuration_mobilevitv2.py | 3 + .../mobilevitv2/modeling_mobilevitv2.py | 8 + src/transformers/models/mpnet/__init__.py | 116 ++---------- .../models/mpnet/configuration_mpnet.py | 3 + .../models/mpnet/modeling_mpnet.py | 12 ++ .../models/mpnet/modeling_tf_mpnet.py | 13 ++ .../models/mpnet/tokenization_mpnet.py | 3 + .../models/mpnet/tokenization_mpnet_fast.py | 3 + src/transformers/models/mpt/__init__.py | 47 +---- .../models/mpt/configuration_mpt.py | 3 + src/transformers/models/mpt/modeling_mpt.py | 10 ++ src/transformers/models/mra/__init__.py | 53 +----- .../models/mra/configuration_mra.py | 3 + src/transformers/models/mra/modeling_mra.py | 12 ++ src/transformers/models/mt5/__init__.py | 113 ++---------- .../models/mt5/configuration_mt5.py | 3 + .../models/mt5/modeling_flax_mt5.py | 3 + src/transformers/models/mt5/modeling_mt5.py | 12 ++ .../models/mt5/modeling_tf_mt5.py | 3 + .../models/mt5/tokenization_mt5.py | 24 +++ .../models/mt5/tokenization_mt5_fast.py | 24 +++ src/transformers/models/musicgen/__init__.py | 51 +----- .../models/musicgen/configuration_musicgen.py | 3 + .../models/musicgen/modeling_musicgen.py | 3 + .../models/musicgen/processing_musicgen.py | 3 + src/transformers/models/mvp/__init__.py | 66 +------ .../models/mvp/configuration_mvp.py | 3 + src/transformers/models/mvp/modeling_mvp.py | 10 ++ .../models/mvp/tokenization_mvp.py | 3 + .../models/mvp/tokenization_mvp_fast.py | 3 + src/transformers/models/myt5/__init__.py | 11 +- .../models/myt5/tokenization_myt5.py | 3 + src/transformers/models/nemotron/__init__.py | 55 +----- .../models/nemotron/configuration_nemotron.py | 3 + .../models/nemotron/modeling_nemotron.py | 10 ++ src/transformers/models/nllb/__init__.py | 51 +----- .../models/nllb/tokenization_nllb.py | 3 + .../models/nllb/tokenization_nllb_fast.py | 3 + src/transformers/models/nllb_moe/__init__.py | 47 +---- .../models/nllb_moe/configuration_nllb_moe.py | 3 + .../models/nllb_moe/modeling_nllb_moe.py | 9 + src/transformers/models/nougat/__init__.py | 51 +----- .../models/nougat/image_processing_nougat.py | 3 + .../models/nougat/processing_nougat.py | 3 + .../models/nougat/tokenization_nougat_fast.py | 3 + .../models/nystromformer/__init__.py | 52 +----- .../configuration_nystromformer.py | 3 + .../nystromformer/modeling_nystromformer.py | 12 ++ src/transformers/models/olmo/__init__.py | 46 +---- .../models/olmo/configuration_olmo.py | 3 + src/transformers/models/olmo/modeling_olmo.py | 3 + src/transformers/models/olmoe/__init__.py | 44 +---- .../models/olmoe/configuration_olmoe.py | 3 + .../models/olmoe/modeling_olmoe.py | 3 + .../models/omdet_turbo/__init__.py | 42 +---- .../omdet_turbo/configuration_omdet_turbo.py | 3 + .../omdet_turbo/modeling_omdet_turbo.py | 3 + .../omdet_turbo/processing_omdet_turbo.py | 3 + src/transformers/models/oneformer/__init__.py | 60 +------ .../oneformer/configuration_oneformer.py | 3 + .../oneformer/image_processing_oneformer.py | 3 + .../models/oneformer/modeling_oneformer.py | 3 + .../models/oneformer/processing_oneformer.py | 3 + src/transformers/models/openai/__init__.py | 105 ++--------- .../models/openai/configuration_openai.py | 3 + .../models/openai/modeling_openai.py | 10 ++ .../models/openai/modeling_tf_openai.py | 10 ++ .../models/openai/tokenization_openai.py | 3 + .../models/openai/tokenization_openai_fast.py | 3 + src/transformers/models/opt/__init__.py | 88 +-------- .../models/opt/configuration_opt.py | 3 + .../models/opt/modeling_flax_opt.py | 3 + src/transformers/models/opt/modeling_opt.py | 9 + .../models/opt/modeling_tf_opt.py | 3 + src/transformers/models/owlv2/__init__.py | 78 +------- .../models/owlv2/configuration_owlv2.py | 3 + .../models/owlv2/image_processing_owlv2.py | 3 + .../models/owlv2/modeling_owlv2.py | 3 + .../models/owlv2/processing_owlv2.py | 3 + src/transformers/models/owlvit/__init__.py | 86 ++------- .../models/owlvit/configuration_owlvit.py | 3 + .../owlvit/feature_extraction_owlvit.py | 3 + .../models/owlvit/image_processing_owlvit.py | 3 + .../models/owlvit/modeling_owlvit.py | 3 + .../models/owlvit/processing_owlvit.py | 3 + src/transformers/models/paligemma/__init__.py | 40 +---- .../paligemma/configuration_paligemma.py | 3 + .../models/paligemma/modeling_paligemma.py | 3 + .../models/paligemma/processing_paligemma.py | 3 + .../models/patchtsmixer/__init__.py | 50 +----- .../configuration_patchtsmixer.py | 3 + .../patchtsmixer/modeling_patchtsmixer.py | 10 ++ src/transformers/models/patchtst/__init__.py | 48 +---- .../models/patchtst/configuration_patchtst.py | 3 + .../models/patchtst/modeling_patchtst.py | 10 ++ src/transformers/models/pegasus/__init__.py | 129 ++----------- .../models/pegasus/configuration_pegasus.py | 3 + .../models/pegasus/modeling_flax_pegasus.py | 3 + .../models/pegasus/modeling_pegasus.py | 3 + .../models/pegasus/modeling_tf_pegasus.py | 3 + .../models/pegasus/tokenization_pegasus.py | 3 + .../pegasus/tokenization_pegasus_fast.py | 3 + src/transformers/models/pegasus_x/__init__.py | 42 +---- .../pegasus_x/configuration_pegasus_x.py | 3 + .../models/pegasus_x/modeling_pegasus_x.py | 3 + src/transformers/models/perceiver/__init__.py | 84 ++------- .../perceiver/configuration_perceiver.py | 3 + .../perceiver/feature_extraction_perceiver.py | 3 + .../perceiver/image_processing_perceiver.py | 3 + .../models/perceiver/modeling_perceiver.py | 14 ++ .../perceiver/tokenization_perceiver.py | 3 + src/transformers/models/persimmon/__init__.py | 51 +----- .../persimmon/configuration_persimmon.py | 3 + .../models/persimmon/modeling_persimmon.py | 9 + src/transformers/models/phi/__init__.py | 54 +----- .../models/phi/configuration_phi.py | 3 + src/transformers/models/phi/modeling_phi.py | 9 + src/transformers/models/phi3/__init__.py | 54 +----- .../models/phi3/configuration_phi3.py | 3 + src/transformers/models/phi3/modeling_phi3.py | 9 + src/transformers/models/phobert/__init__.py | 13 +- .../models/phobert/tokenization_phobert.py | 3 + .../models/pix2struct/__init__.py | 71 +------- .../pix2struct/configuration_pix2struct.py | 3 + .../pix2struct/image_processing_pix2struct.py | 3 + .../models/pix2struct/modeling_pix2struct.py | 8 + .../pix2struct/processing_pix2struct.py | 3 + src/transformers/models/pixtral/__init__.py | 81 +-------- .../models/pixtral/configuration_pixtral.py | 3 + .../pixtral/image_processing_pixtral.py | 3 + .../pixtral/image_processing_pixtral_fast.py | 3 + .../models/pixtral/modeling_pixtral.py | 3 + .../models/pixtral/processing_pixtral.py | 3 + src/transformers/models/plbart/__init__.py | 67 +------ .../models/plbart/configuration_plbart.py | 3 + .../models/plbart/modeling_plbart.py | 9 + .../models/plbart/tokenization_plbart.py | 3 + .../models/poolformer/__init__.py | 68 +------ .../poolformer/configuration_poolformer.py | 3 + .../feature_extraction_poolformer.py | 3 + .../poolformer/image_processing_poolformer.py | 3 + .../models/poolformer/modeling_poolformer.py | 3 + src/transformers/models/pop2piano/__init__.py | 107 +---------- .../pop2piano/configuration_pop2piano.py | 3 + .../models/pop2piano/modeling_pop2piano.py | 3 + .../models/prophetnet/__init__.py | 51 +----- .../prophetnet/configuration_prophetnet.py | 3 + .../models/prophetnet/modeling_prophetnet.py | 10 ++ .../prophetnet/tokenization_prophetnet.py | 3 + src/transformers/models/pvt/__init__.py | 66 +------ .../models/pvt/configuration_pvt.py | 3 + .../models/pvt/image_processing_pvt.py | 3 + src/transformers/models/pvt/modeling_pvt.py | 3 + src/transformers/models/pvt_v2/__init__.py | 51 +----- .../models/pvt_v2/configuration_pvt_v2.py | 3 + .../models/pvt_v2/modeling_pvt_v2.py | 3 + src/transformers/models/qwen2/__init__.py | 73 +------- .../models/qwen2/configuration_qwen2.py | 3 + .../models/qwen2/modeling_qwen2.py | 10 ++ .../models/qwen2/tokenization_qwen2.py | 3 + .../models/qwen2/tokenization_qwen2_fast.py | 3 + .../models/qwen2_audio/__init__.py | 43 +---- .../qwen2_audio/configuration_qwen2_audio.py | 3 + .../qwen2_audio/modeling_qwen2_audio.py | 3 + .../qwen2_audio/processing_qwen2_audio.py | 3 + src/transformers/models/qwen2_moe/__init__.py | 53 +----- .../qwen2_moe/configuration_qwen2_moe.py | 3 + .../models/qwen2_moe/modeling_qwen2_moe.py | 10 ++ src/transformers/models/qwen2_vl/__init__.py | 63 +------ .../models/qwen2_vl/configuration_qwen2_vl.py | 3 + .../qwen2_vl/image_processing_qwen2_vl.py | 3 + .../models/qwen2_vl/modeling_qwen2_vl.py | 3 + .../models/qwen2_vl/processing_qwen2_vl.py | 3 + src/transformers/models/rag/__init__.py | 72 ++------ .../models/rag/configuration_rag.py | 3 + src/transformers/models/rag/modeling_rag.py | 3 + .../models/rag/modeling_tf_rag.py | 3 + src/transformers/models/rag/retrieval_rag.py | 3 + .../models/rag/tokenization_rag.py | 3 + .../models/recurrent_gemma/__init__.py | 46 +---- .../configuration_recurrent_gemma.py | 3 + .../modeling_recurrent_gemma.py | 3 + src/transformers/models/reformer/__init__.py | 90 +--------- .../models/reformer/configuration_reformer.py | 3 + .../models/reformer/modeling_reformer.py | 12 ++ .../models/reformer/tokenization_reformer.py | 3 + .../reformer/tokenization_reformer_fast.py | 3 + src/transformers/models/regnet/__init__.py | 96 +--------- .../models/regnet/configuration_regnet.py | 3 + .../models/regnet/modeling_flax_regnet.py | 3 + .../models/regnet/modeling_regnet.py | 3 + .../models/regnet/modeling_tf_regnet.py | 3 + src/transformers/models/rembert/__init__.py | 134 ++------------ .../models/rembert/configuration_rembert.py | 3 + .../models/rembert/modeling_rembert.py | 14 ++ .../models/rembert/modeling_tf_rembert.py | 13 ++ .../models/rembert/tokenization_rembert.py | 3 + .../rembert/tokenization_rembert_fast.py | 3 + src/transformers/models/resnet/__init__.py | 93 +--------- .../models/resnet/configuration_resnet.py | 3 + .../models/resnet/modeling_flax_resnet.py | 3 + .../models/resnet/modeling_resnet.py | 3 + .../models/resnet/modeling_tf_resnet.py | 3 + src/transformers/models/roberta/__init__.py | 151 ++-------------- .../models/roberta/configuration_roberta.py | 3 + .../models/roberta/modeling_flax_roberta.py | 12 ++ .../models/roberta/modeling_roberta.py | 12 ++ .../models/roberta/modeling_tf_roberta.py | 13 ++ .../models/roberta/tokenization_roberta.py | 3 + .../roberta/tokenization_roberta_fast.py | 3 + .../models/roberta_prelayernorm/__init__.py | 136 +------------- .../configuration_roberta_prelayernorm.py | 3 + .../modeling_flax_roberta_prelayernorm.py | 12 ++ .../modeling_roberta_prelayernorm.py | 12 ++ .../modeling_tf_roberta_prelayernorm.py | 13 ++ src/transformers/models/roc_bert/__init__.py | 76 +------- .../models/roc_bert/configuration_roc_bert.py | 3 + .../models/roc_bert/modeling_roc_bert.py | 15 ++ .../models/roc_bert/tokenization_roc_bert.py | 3 + src/transformers/models/roformer/__init__.py | 155 ++-------------- .../models/roformer/configuration_roformer.py | 3 + .../models/roformer/modeling_flax_roformer.py | 11 ++ .../models/roformer/modeling_roformer.py | 14 ++ .../models/roformer/modeling_tf_roformer.py | 13 ++ .../models/roformer/tokenization_roformer.py | 3 + .../roformer/tokenization_roformer_fast.py | 3 + src/transformers/models/rwkv/__init__.py | 45 +---- .../models/rwkv/configuration_rwkv.py | 3 + src/transformers/models/rwkv/modeling_rwkv.py | 3 + src/transformers/models/sam/__init__.py | 91 ++-------- .../models/sam/configuration_sam.py | 3 + .../models/sam/image_processing_sam.py | 3 + src/transformers/models/sam/modeling_sam.py | 3 + .../models/sam/modeling_tf_sam.py | 3 + src/transformers/models/sam/processing_sam.py | 3 + .../models/seamless_m4t/__init__.py | 100 ++--------- .../configuration_seamless_m4t.py | 3 + .../feature_extraction_seamless_m4t.py | 3 + .../seamless_m4t/modeling_seamless_m4t.py | 14 ++ .../seamless_m4t/processing_seamless_m4t.py | 3 + .../seamless_m4t/tokenization_seamless_m4t.py | 3 + .../tokenization_seamless_m4t_fast.py | 3 + .../models/seamless_m4t_v2/__init__.py | 50 +----- .../configuration_seamless_m4t_v2.py | 3 + .../modeling_seamless_m4t_v2.py | 10 ++ src/transformers/models/segformer/__init__.py | 99 ++-------- .../segformer/configuration_segformer.py | 3 + .../segformer/feature_extraction_segformer.py | 3 + .../segformer/image_processing_segformer.py | 3 + .../models/segformer/modeling_segformer.py | 10 ++ .../models/segformer/modeling_tf_segformer.py | 9 + src/transformers/models/seggpt/__init__.py | 53 +----- .../models/seggpt/configuration_seggpt.py | 3 + .../models/seggpt/image_processing_seggpt.py | 3 + .../models/seggpt/modeling_seggpt.py | 3 + src/transformers/models/sew/__init__.py | 41 +---- .../models/sew/configuration_sew.py | 3 + src/transformers/models/sew/modeling_sew.py | 3 + src/transformers/models/sew_d/__init__.py | 41 +---- .../models/sew_d/configuration_sew_d.py | 3 + .../models/sew_d/modeling_sew_d.py | 3 + src/transformers/models/siglip/__init__.py | 96 +--------- .../models/siglip/configuration_siglip.py | 3 + .../models/siglip/image_processing_siglip.py | 3 + .../models/siglip/modeling_siglip.py | 9 + .../models/siglip/processing_siglip.py | 3 + .../models/siglip/tokenization_siglip.py | 3 + .../models/speech_encoder_decoder/__init__.py | 48 +---- .../configuration_speech_encoder_decoder.py | 3 + .../modeling_flax_speech_encoder_decoder.py | 3 + .../modeling_speech_encoder_decoder.py | 3 + .../models/speech_to_text/__init__.py | 95 ++-------- .../configuration_speech_to_text.py | 3 + .../feature_extraction_speech_to_text.py | 3 + .../speech_to_text/modeling_speech_to_text.py | 3 + .../modeling_tf_speech_to_text.py | 3 + .../processing_speech_to_text.py | 3 + .../tokenization_speech_to_text.py | 3 + src/transformers/models/speecht5/__init__.py | 80 ++------- .../models/speecht5/configuration_speecht5.py | 3 + .../speecht5/feature_extraction_speecht5.py | 3 + .../models/speecht5/modeling_speecht5.py | 10 ++ .../models/speecht5/processing_speecht5.py | 3 + .../models/speecht5/tokenization_speecht5.py | 3 + src/transformers/models/splinter/__init__.py | 66 +------ .../models/splinter/configuration_splinter.py | 3 + .../models/splinter/modeling_splinter.py | 9 + .../models/splinter/tokenization_splinter.py | 3 + .../splinter/tokenization_splinter_fast.py | 3 + .../models/squeezebert/__init__.py | 78 +------- .../squeezebert/configuration_squeezebert.py | 3 + .../squeezebert/modeling_squeezebert.py | 12 ++ .../squeezebert/tokenization_squeezebert.py | 3 + .../tokenization_squeezebert_fast.py | 3 + src/transformers/models/stablelm/__init__.py | 51 +----- .../models/stablelm/configuration_stablelm.py | 3 + .../models/stablelm/modeling_stablelm.py | 9 + .../models/superpoint/__init__.py | 55 +----- .../superpoint/configuration_superpoint.py | 3 + .../superpoint/image_processing_superpoint.py | 3 + .../models/superpoint/modeling_superpoint.py | 3 + .../models/swiftformer/__init__.py | 75 +------- .../swiftformer/configuration_swiftformer.py | 3 + .../swiftformer/modeling_swiftformer.py | 3 + .../swiftformer/modeling_tf_swiftformer.py | 3 + src/transformers/models/swin/__init__.py | 70 +------- .../models/swin/configuration_swin.py | 3 + src/transformers/models/swin/modeling_swin.py | 9 + .../models/swin/modeling_tf_swin.py | 3 + src/transformers/models/swin2sr/__init__.py | 61 +------ .../models/swin2sr/configuration_swin2sr.py | 3 + .../swin2sr/image_processing_swin2sr.py | 3 + .../models/swin2sr/modeling_swin2sr.py | 3 + src/transformers/models/swinv2/__init__.py | 47 +---- .../models/swinv2/configuration_swinv2.py | 3 + .../models/swinv2/modeling_swinv2.py | 9 + .../models/switch_transformers/__init__.py | 63 +------ .../configuration_switch_transformers.py | 3 + .../modeling_switch_transformers.py | 10 ++ src/transformers/models/t5/__init__.py | 147 ++------------- .../models/t5/configuration_t5.py | 3 + .../models/t5/modeling_flax_t5.py | 3 + src/transformers/models/t5/modeling_t5.py | 12 ++ src/transformers/models/t5/modeling_tf_t5.py | 3 + src/transformers/models/t5/tokenization_t5.py | 3 + .../models/t5/tokenization_t5_fast.py | 3 + .../models/table_transformer/__init__.py | 48 +---- .../configuration_table_transformer.py | 3 + .../modeling_table_transformer.py | 3 + src/transformers/models/tapas/__init__.py | 80 +-------- .../models/tapas/configuration_tapas.py | 3 + .../models/tapas/modeling_tapas.py | 10 ++ .../models/tapas/modeling_tf_tapas.py | 9 + .../models/tapas/tokenization_tapas.py | 3 + .../time_series_transformer/__init__.py | 43 +---- .../configuration_time_series_transformer.py | 3 + .../modeling_time_series_transformer.py | 3 + .../models/timesformer/__init__.py | 40 +---- .../timesformer/configuration_timesformer.py | 3 + .../timesformer/modeling_timesformer.py | 3 + .../models/timm_backbone/__init__.py | 36 +--- .../configuration_timm_backbone.py | 3 + .../timm_backbone/modeling_timm_backbone.py | 3 + src/transformers/models/trocr/__init__.py | 46 +---- .../models/trocr/configuration_trocr.py | 3 + .../models/trocr/modeling_trocr.py | 3 + .../models/trocr/processing_trocr.py | 3 + src/transformers/models/tvp/__init__.py | 71 ++------ .../models/tvp/configuration_tvp.py | 3 + .../models/tvp/image_processing_tvp.py | 3 + src/transformers/models/tvp/modeling_tvp.py | 3 + src/transformers/models/tvp/processing_tvp.py | 3 + src/transformers/models/udop/__init__.py | 84 +-------- .../models/udop/configuration_udop.py | 3 + src/transformers/models/udop/modeling_udop.py | 3 + .../models/udop/processing_udop.py | 3 + .../models/udop/tokenization_udop.py | 3 + .../models/udop/tokenization_udop_fast.py | 3 + src/transformers/models/umt5/__init__.py | 47 +---- .../models/umt5/configuration_umt5.py | 3 + src/transformers/models/umt5/modeling_umt5.py | 11 ++ src/transformers/models/unispeech/__init__.py | 48 +---- .../unispeech/configuration_unispeech.py | 3 + .../models/unispeech/modeling_unispeech.py | 9 + .../models/unispeech_sat/__init__.py | 54 +----- .../configuration_unispeech_sat.py | 3 + .../unispeech_sat/modeling_unispeech_sat.py | 11 ++ src/transformers/models/univnet/__init__.py | 47 +---- .../models/univnet/configuration_univnet.py | 3 + .../univnet/feature_extraction_univnet.py | 3 + .../models/univnet/modeling_univnet.py | 3 + src/transformers/models/upernet/__init__.py | 37 +--- .../models/upernet/configuration_upernet.py | 3 + .../models/upernet/modeling_upernet.py | 3 + .../models/video_llava/__init__.py | 58 +----- .../video_llava/configuration_video_llava.py | 3 + .../image_processing_video_llava.py | 3 + .../video_llava/modeling_video_llava.py | 3 + .../video_llava/processing_video_llava.py | 3 + src/transformers/models/videomae/__init__.py | 62 +------ .../models/videomae/configuration_videomae.py | 3 + .../videomae/feature_extraction_videomae.py | 3 + .../videomae/image_processing_videomae.py | 3 + .../models/videomae/modeling_videomae.py | 3 + src/transformers/models/vilt/__init__.py | 73 ++------ .../models/vilt/configuration_vilt.py | 3 + .../models/vilt/feature_extraction_vilt.py | 3 + .../models/vilt/image_processing_vilt.py | 3 + src/transformers/models/vilt/modeling_vilt.py | 12 ++ .../models/vilt/processing_vilt.py | 3 + src/transformers/models/vipllava/__init__.py | 39 +--- .../models/vipllava/configuration_vipllava.py | 3 + .../models/vipllava/modeling_vipllava.py | 3 + .../models/vision_encoder_decoder/__init__.py | 73 +------- .../configuration_vision_encoder_decoder.py | 3 + .../modeling_flax_vision_encoder_decoder.py | 3 + .../modeling_tf_vision_encoder_decoder.py | 3 + .../modeling_vision_encoder_decoder.py | 3 + .../vision_text_dual_encoder/__init__.py | 79 ++------ .../configuration_vision_text_dual_encoder.py | 3 + .../modeling_flax_vision_text_dual_encoder.py | 3 + .../modeling_tf_vision_text_dual_encoder.py | 3 + .../modeling_vision_text_dual_encoder.py | 3 + .../processing_vision_text_dual_encoder.py | 3 + .../models/visual_bert/__init__.py | 50 +----- .../visual_bert/configuration_visual_bert.py | 3 + .../visual_bert/modeling_visual_bert.py | 12 ++ src/transformers/models/vit/__init__.py | 129 ++----------- .../models/vit/configuration_vit.py | 3 + .../models/vit/feature_extraction_vit.py | 3 + .../models/vit/image_processing_vit.py | 3 + .../models/vit/image_processing_vit_fast.py | 3 + .../models/vit/modeling_flax_vit.py | 3 + .../models/vit/modeling_tf_vit.py | 3 + src/transformers/models/vit/modeling_vit.py | 3 + src/transformers/models/vit_mae/__init__.py | 68 +------ .../models/vit_mae/configuration_vit_mae.py | 3 + .../models/vit_mae/modeling_tf_vit_mae.py | 3 + .../models/vit_mae/modeling_vit_mae.py | 3 + src/transformers/models/vit_msn/__init__.py | 38 +--- .../models/vit_msn/configuration_vit_msn.py | 3 + .../models/vit_msn/modeling_vit_msn.py | 3 + src/transformers/models/vitdet/__init__.py | 42 +---- .../models/vitdet/configuration_vitdet.py | 3 + .../models/vitdet/modeling_vitdet.py | 3 + src/transformers/models/vitmatte/__init__.py | 58 +----- .../models/vitmatte/configuration_vitmatte.py | 3 + .../vitmatte/image_processing_vitmatte.py | 3 + .../models/vitmatte/modeling_vitmatte.py | 3 + src/transformers/models/vits/__init__.py | 49 +---- .../models/vits/configuration_vits.py | 3 + src/transformers/models/vits/modeling_vits.py | 3 + .../models/vits/tokenization_vits.py | 3 + src/transformers/models/vivit/__init__.py | 64 +------ .../models/vivit/configuration_vivit.py | 3 + .../models/vivit/image_processing_vivit.py | 3 + .../models/vivit/modeling_vivit.py | 3 + src/transformers/models/wav2vec2/__init__.py | 122 ++----------- .../models/wav2vec2/configuration_wav2vec2.py | 3 + .../wav2vec2/feature_extraction_wav2vec2.py | 3 + .../models/wav2vec2/modeling_flax_wav2vec2.py | 3 + .../models/wav2vec2/modeling_tf_wav2vec2.py | 3 + .../models/wav2vec2/modeling_wav2vec2.py | 12 ++ .../models/wav2vec2/processing_wav2vec2.py | 3 + .../models/wav2vec2/tokenization_wav2vec2.py | 3 + .../models/wav2vec2_bert/__init__.py | 50 +----- .../configuration_wav2vec2_bert.py | 3 + .../wav2vec2_bert/modeling_wav2vec2_bert.py | 10 ++ .../wav2vec2_bert/processing_wav2vec2_bert.py | 3 + .../models/wav2vec2_conformer/__init__.py | 51 +----- .../configuration_wav2vec2_conformer.py | 3 + .../modeling_wav2vec2_conformer.py | 11 ++ .../models/wav2vec2_phoneme/__init__.py | 11 +- .../tokenization_wav2vec2_phoneme.py | 3 + .../models/wav2vec2_with_lm/__init__.py | 11 +- .../processing_wav2vec2_with_lm.py | 3 + src/transformers/models/wavlm/__init__.py | 44 +---- .../models/wavlm/configuration_wavlm.py | 3 + .../models/wavlm/modeling_wavlm.py | 10 ++ src/transformers/models/whisper/__init__.py | 130 ++------------ .../models/whisper/configuration_whisper.py | 3 + .../whisper/feature_extraction_whisper.py | 3 + .../models/whisper/modeling_flax_whisper.py | 8 + .../models/whisper/modeling_tf_whisper.py | 3 + .../models/whisper/modeling_whisper.py | 9 + .../models/whisper/processing_whisper.py | 3 + .../models/whisper/tokenization_whisper.py | 3 + .../whisper/tokenization_whisper_fast.py | 3 + src/transformers/models/x_clip/__init__.py | 53 +----- .../models/x_clip/configuration_x_clip.py | 3 + .../models/x_clip/modeling_x_clip.py | 3 + .../models/x_clip/processing_x_clip.py | 3 + src/transformers/models/xglm/__init__.py | 126 ++----------- .../models/xglm/configuration_xglm.py | 3 + .../models/xglm/modeling_flax_xglm.py | 3 + .../models/xglm/modeling_tf_xglm.py | 3 + src/transformers/models/xglm/modeling_xglm.py | 3 + .../models/xglm/tokenization_xglm.py | 3 + .../models/xglm/tokenization_xglm_fast.py | 3 + src/transformers/models/xlm/__init__.py | 90 +--------- .../models/xlm/configuration_xlm.py | 3 + .../models/xlm/modeling_tf_xlm.py | 12 ++ src/transformers/models/xlm/modeling_xlm.py | 12 ++ .../models/xlm/tokenization_xlm.py | 3 + .../models/xlm_roberta/__init__.py | 169 ++---------------- .../xlm_roberta/configuration_xlm_roberta.py | 3 + .../xlm_roberta/modeling_flax_xlm_roberta.py | 12 ++ .../xlm_roberta/modeling_tf_xlm_roberta.py | 12 ++ .../xlm_roberta/modeling_xlm_roberta.py | 12 ++ .../xlm_roberta/tokenization_xlm_roberta.py | 3 + .../tokenization_xlm_roberta_fast.py | 3 + .../models/xlm_roberta_xl/__init__.py | 57 +----- .../configuration_xlm_roberta_xl.py | 3 + .../xlm_roberta_xl/modeling_xlm_roberta_xl.py | 12 ++ src/transformers/models/xlnet/__init__.py | 128 ++----------- .../models/xlnet/configuration_xlnet.py | 3 + .../models/xlnet/modeling_tf_xlnet.py | 12 ++ .../models/xlnet/modeling_xlnet.py | 13 ++ .../models/xlnet/tokenization_xlnet.py | 3 + .../models/xlnet/tokenization_xlnet_fast.py | 3 + src/transformers/models/xmod/__init__.py | 58 +----- .../models/xmod/configuration_xmod.py | 3 + src/transformers/models/xmod/modeling_xmod.py | 12 ++ src/transformers/models/yolos/__init__.py | 60 +------ .../models/yolos/configuration_yolos.py | 3 + .../models/yolos/feature_extraction_yolos.py | 3 + .../models/yolos/image_processing_yolos.py | 3 + .../models/yolos/modeling_yolos.py | 3 + src/transformers/models/yoso/__init__.py | 50 +----- .../models/yoso/configuration_yoso.py | 3 + src/transformers/models/yoso/modeling_yoso.py | 12 ++ src/transformers/models/zamba/__init__.py | 42 +---- .../models/zamba/configuration_zamba.py | 3 + .../models/zamba/modeling_zamba.py | 3 + src/transformers/models/zoedepth/__init__.py | 53 +----- .../models/zoedepth/configuration_zoedepth.py | 3 + .../zoedepth/image_processing_zoedepth.py | 3 + .../models/zoedepth/modeling_zoedepth.py | 3 + utils/check_repo.py | 2 +- 1020 files changed, 5353 insertions(+), 13575 deletions(-) create mode 100644 src/transformers/models/mt5/tokenization_mt5.py create mode 100644 src/transformers/models/mt5/tokenization_mt5_fast.py diff --git a/src/transformers/models/audio_spectrogram_transformer/__init__.py b/src/transformers/models/audio_spectrogram_transformer/__init__.py index 3fe10d60c03a92..618fceef70d389 100644 --- a/src/transformers/models/audio_spectrogram_transformer/__init__.py +++ b/src/transformers/models/audio_spectrogram_transformer/__init__.py @@ -19,7 +19,6 @@ if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import * - from .convert_audio_spectrogram_transformer_original_to_pytorch import * from .feature_extraction_audio_spectrogram_transformer import * from .modeling_audio_spectrogram_transformer import * else: diff --git a/src/transformers/models/bark/__init__.py b/src/transformers/models/bark/__init__.py index 6c21cf99976a15..c5296fc47fc423 100644 --- a/src/transformers/models/bark/__init__.py +++ b/src/transformers/models/bark/__init__.py @@ -19,8 +19,6 @@ if TYPE_CHECKING: from .configuration_bark import * - from .convert_suno_to_hf import * - from .generation_configuration_bark import * from .modeling_bark import * from .processing_bark import * else: diff --git a/src/transformers/models/bart/__init__.py b/src/transformers/models/bart/__init__.py index 11c3f4863f46a1..8f4c713f4698d7 100644 --- a/src/transformers/models/bart/__init__.py +++ b/src/transformers/models/bart/__init__.py @@ -19,7 +19,6 @@ if TYPE_CHECKING: from .configuration_bart import * - from .convert_bart_original_pytorch_checkpoint_to_pytorch import * from .modeling_bart import * from .modeling_flax_bart import * from .modeling_tf_bart import * diff --git a/src/transformers/models/beit/__init__.py b/src/transformers/models/beit/__init__.py index 0fc8919c7ea19a..44f838a9a5d97f 100644 --- a/src/transformers/models/beit/__init__.py +++ b/src/transformers/models/beit/__init__.py @@ -19,7 +19,6 @@ if TYPE_CHECKING: from .configuration_beit import * - from .convert_beit_unilm_to_pytorch import * from .feature_extraction_beit import * from .image_processing_beit import * from .modeling_beit import * diff --git a/src/transformers/models/bert/__init__.py b/src/transformers/models/bert/__init__.py index 3ed12a889321e6..2ef22794dde26e 100644 --- a/src/transformers/models/bert/__init__.py +++ b/src/transformers/models/bert/__init__.py @@ -19,10 +19,6 @@ if TYPE_CHECKING: from .configuration_bert import * - from .convert_bert_original_tf2_checkpoint_to_pytorch import * - from .convert_bert_original_tf_checkpoint_to_pytorch import * - from .convert_bert_pytorch_checkpoint_to_original_tf import * - from .convert_bert_token_dropping_original_tf2_checkpoint_to_pytorch import * from .modeling_bert import * from .modeling_flax_bert import * from .modeling_tf_bert import * diff --git a/src/transformers/models/big_bird/__init__.py b/src/transformers/models/big_bird/__init__.py index b89712ab5ab49f..87419e69e5c7f0 100644 --- a/src/transformers/models/big_bird/__init__.py +++ b/src/transformers/models/big_bird/__init__.py @@ -19,7 +19,6 @@ if TYPE_CHECKING: from .configuration_big_bird import * - from .convert_bigbird_original_tf_checkpoint_to_pytorch import * from .modeling_big_bird import * from .modeling_flax_big_bird import * from .tokenization_big_bird import * diff --git a/src/transformers/models/bigbird_pegasus/__init__.py b/src/transformers/models/bigbird_pegasus/__init__.py index 8684d999d85cb4..d203b2a578f9a4 100644 --- a/src/transformers/models/bigbird_pegasus/__init__.py +++ b/src/transformers/models/bigbird_pegasus/__init__.py @@ -19,7 +19,6 @@ if TYPE_CHECKING: from .configuration_bigbird_pegasus import * - from .convert_bigbird_pegasus_tf_to_pytorch import * from .modeling_bigbird_pegasus import * else: import sys diff --git a/src/transformers/models/biogpt/__init__.py b/src/transformers/models/biogpt/__init__.py index 27773fb642459c..641cdb592117b4 100644 --- a/src/transformers/models/biogpt/__init__.py +++ b/src/transformers/models/biogpt/__init__.py @@ -19,7 +19,6 @@ if TYPE_CHECKING: from .configuration_biogpt import * - from .convert_biogpt_original_pytorch_checkpoint_to_pytorch import * from .modeling_biogpt import * from .tokenization_biogpt import * else: diff --git a/src/transformers/models/bit/__init__.py b/src/transformers/models/bit/__init__.py index f46988ca2d8f88..3b6ba91b032f8c 100644 --- a/src/transformers/models/bit/__init__.py +++ b/src/transformers/models/bit/__init__.py @@ -19,7 +19,6 @@ if TYPE_CHECKING: from .configuration_bit import * - from .convert_bit_to_pytorch import * from .image_processing_bit import * from .modeling_bit import * else: diff --git a/src/transformers/models/blenderbot/__init__.py b/src/transformers/models/blenderbot/__init__.py index d1180bd200d45c..76ece6853b381b 100644 --- a/src/transformers/models/blenderbot/__init__.py +++ b/src/transformers/models/blenderbot/__init__.py @@ -19,7 +19,6 @@ if TYPE_CHECKING: from .configuration_blenderbot import * - from .convert_blenderbot_original_pytorch_checkpoint_to_pytorch import * from .modeling_blenderbot import * from .modeling_flax_blenderbot import * from .modeling_tf_blenderbot import * diff --git a/src/transformers/models/blip/__init__.py b/src/transformers/models/blip/__init__.py index b3b604b24307ce..5443a3f6747aaa 100644 --- a/src/transformers/models/blip/__init__.py +++ b/src/transformers/models/blip/__init__.py @@ -19,12 +19,9 @@ if TYPE_CHECKING: from .configuration_blip import * - from .convert_blip_original_pytorch_to_hf import * from .image_processing_blip import * from .modeling_blip import * - from .modeling_blip_text import * from .modeling_tf_blip import * - from .modeling_tf_blip_text import * from .processing_blip import * else: import sys diff --git a/src/transformers/models/blip_2/__init__.py b/src/transformers/models/blip_2/__init__.py index 1014e8c88102c9..0717e81fca606e 100644 --- a/src/transformers/models/blip_2/__init__.py +++ b/src/transformers/models/blip_2/__init__.py @@ -19,7 +19,6 @@ if TYPE_CHECKING: from .configuration_blip_2 import * - from .convert_blip_2_original_to_pytorch import * from .modeling_blip_2 import * from .processing_blip_2 import * else: diff --git a/src/transformers/models/bloom/__init__.py b/src/transformers/models/bloom/__init__.py index 012bbbc15c25d6..72d1d6e6ca4724 100644 --- a/src/transformers/models/bloom/__init__.py +++ b/src/transformers/models/bloom/__init__.py @@ -19,7 +19,6 @@ if TYPE_CHECKING: from .configuration_bloom import * - from .convert_bloom_original_checkpoint_to_pytorch import * from .modeling_bloom import * from .modeling_flax_bloom import * from .tokenization_bloom_fast import * diff --git a/src/transformers/models/bros/__init__.py b/src/transformers/models/bros/__init__.py index 54e429863ec85b..2178cfd03a4059 100644 --- a/src/transformers/models/bros/__init__.py +++ b/src/transformers/models/bros/__init__.py @@ -19,7 +19,6 @@ if TYPE_CHECKING: from .configuration_bros import * - from .convert_bros_to_pytorch import * from .modeling_bros import * from .processing_bros import * else: diff --git a/src/transformers/models/byt5/__init__.py b/src/transformers/models/byt5/__init__.py index c4243d1970d31d..cb726942b0f161 100644 --- a/src/transformers/models/byt5/__init__.py +++ b/src/transformers/models/byt5/__init__.py @@ -18,7 +18,6 @@ if TYPE_CHECKING: - from .convert_byt5_original_tf_checkpoint_to_pytorch import * from .tokenization_byt5 import * else: import sys diff --git a/src/transformers/models/canine/__init__.py b/src/transformers/models/canine/__init__.py index 5f9611153bbd40..bb00d8fd882703 100644 --- a/src/transformers/models/canine/__init__.py +++ b/src/transformers/models/canine/__init__.py @@ -19,7 +19,6 @@ if TYPE_CHECKING: from .configuration_canine import * - from .convert_canine_original_tf_checkpoint_to_pytorch import * from .modeling_canine import * from .tokenization_canine import * else: diff --git a/src/transformers/models/chameleon/__init__.py b/src/transformers/models/chameleon/__init__.py index ad00f5cd3dab3d..4332161036d516 100644 --- a/src/transformers/models/chameleon/__init__.py +++ b/src/transformers/models/chameleon/__init__.py @@ -19,7 +19,6 @@ if TYPE_CHECKING: from .configuration_chameleon import * - from .convert_chameleon_weights_to_hf import * from .image_processing_chameleon import * from .modeling_chameleon import * from .processing_chameleon import * diff --git a/src/transformers/models/chinese_clip/__init__.py b/src/transformers/models/chinese_clip/__init__.py index 8770bde94ecf3a..fc1f002a16ad95 100644 --- a/src/transformers/models/chinese_clip/__init__.py +++ b/src/transformers/models/chinese_clip/__init__.py @@ -19,7 +19,6 @@ if TYPE_CHECKING: from .configuration_chinese_clip import * - from .convert_chinese_clip_original_pytorch_to_hf import * from .feature_extraction_chinese_clip import * from .image_processing_chinese_clip import * from .modeling_chinese_clip import * diff --git a/src/transformers/models/clap/__init__.py b/src/transformers/models/clap/__init__.py index aa2a04536f5d9e..6d54ee86aecef2 100644 --- a/src/transformers/models/clap/__init__.py +++ b/src/transformers/models/clap/__init__.py @@ -19,7 +19,6 @@ if TYPE_CHECKING: from .configuration_clap import * - from .convert_clap_original_pytorch_to_hf import * from .feature_extraction_clap import * from .modeling_clap import * from .processing_clap import * diff --git a/src/transformers/models/clip/__init__.py b/src/transformers/models/clip/__init__.py index 3bc3eff946f60f..f2c43e0b51d63b 100644 --- a/src/transformers/models/clip/__init__.py +++ b/src/transformers/models/clip/__init__.py @@ -19,7 +19,6 @@ if TYPE_CHECKING: from .configuration_clip import * - from .convert_clip_original_pytorch_to_hf import * from .feature_extraction_clip import * from .image_processing_clip import * from .modeling_clip import * diff --git a/src/transformers/models/clipseg/__init__.py b/src/transformers/models/clipseg/__init__.py index 77b338e8fea31c..55b38987fd0a3e 100644 --- a/src/transformers/models/clipseg/__init__.py +++ b/src/transformers/models/clipseg/__init__.py @@ -19,7 +19,6 @@ if TYPE_CHECKING: from .configuration_clipseg import * - from .convert_clipseg_original_pytorch_to_hf import * from .modeling_clipseg import * from .processing_clipseg import * else: diff --git a/src/transformers/models/clvp/__init__.py b/src/transformers/models/clvp/__init__.py index 6ef4bc60e32148..986e185ff7771e 100644 --- a/src/transformers/models/clvp/__init__.py +++ b/src/transformers/models/clvp/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,67 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - - -_import_structure = { - "configuration_clvp": [ - "ClvpConfig", - "ClvpDecoderConfig", - "ClvpEncoderConfig", - ], - "feature_extraction_clvp": ["ClvpFeatureExtractor"], - "processing_clvp": ["ClvpProcessor"], - "tokenization_clvp": ["ClvpTokenizer"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_clvp"] = [ - "ClvpModelForConditionalGeneration", - "ClvpForCausalLM", - "ClvpModel", - "ClvpPreTrainedModel", - "ClvpEncoder", - "ClvpDecoder", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_clvp import ( - ClvpConfig, - ClvpDecoderConfig, - ClvpEncoderConfig, - ) - from .feature_extraction_clvp import ClvpFeatureExtractor - from .processing_clvp import ClvpProcessor - from .tokenization_clvp import ClvpTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_clvp import ( - ClvpDecoder, - ClvpEncoder, - ClvpForCausalLM, - ClvpModel, - ClvpModelForConditionalGeneration, - ClvpPreTrainedModel, - ) - + from .configuration_clvp import * + from .feature_extraction_clvp import * + from .modeling_clvp import * + from .processing_clvp import * + from .tokenization_clvp import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/clvp/configuration_clvp.py b/src/transformers/models/clvp/configuration_clvp.py index 8fd0e150801a66..cffc962eb322aa 100644 --- a/src/transformers/models/clvp/configuration_clvp.py +++ b/src/transformers/models/clvp/configuration_clvp.py @@ -438,3 +438,6 @@ def from_sub_model_configs( decoder_config=decoder_config.to_dict(), **kwargs, ) + + +__all__ = ["ClvpConfig", "ClvpDecoderConfig", "ClvpEncoderConfig"] diff --git a/src/transformers/models/clvp/feature_extraction_clvp.py b/src/transformers/models/clvp/feature_extraction_clvp.py index cb85b17a7f1775..2dbda430bb25e1 100644 --- a/src/transformers/models/clvp/feature_extraction_clvp.py +++ b/src/transformers/models/clvp/feature_extraction_clvp.py @@ -236,3 +236,6 @@ def __call__( padded_inputs["input_features"] = input_features return padded_inputs.convert_to_tensors(return_tensors) + + +__all__ = ["ClvpFeatureExtractor"] diff --git a/src/transformers/models/clvp/modeling_clvp.py b/src/transformers/models/clvp/modeling_clvp.py index a94667481540bd..844ca354cd1079 100644 --- a/src/transformers/models/clvp/modeling_clvp.py +++ b/src/transformers/models/clvp/modeling_clvp.py @@ -2021,3 +2021,13 @@ def generate( text_encoder_hidden_states=text_outputs.hidden_states, speech_encoder_hidden_states=speech_outputs.hidden_states, ) + + +__all__ = [ + "ClvpModelForConditionalGeneration", + "ClvpForCausalLM", + "ClvpModel", + "ClvpPreTrainedModel", + "ClvpEncoder", + "ClvpDecoder", +] diff --git a/src/transformers/models/clvp/processing_clvp.py b/src/transformers/models/clvp/processing_clvp.py index 4e015cea1f8475..3f4d54f259032f 100644 --- a/src/transformers/models/clvp/processing_clvp.py +++ b/src/transformers/models/clvp/processing_clvp.py @@ -88,3 +88,6 @@ def decode(self, *args, **kwargs): the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) + + +__all__ = ["ClvpProcessor"] diff --git a/src/transformers/models/clvp/tokenization_clvp.py b/src/transformers/models/clvp/tokenization_clvp.py index d77564f718a53b..85ae1d6991ebc8 100644 --- a/src/transformers/models/clvp/tokenization_clvp.py +++ b/src/transformers/models/clvp/tokenization_clvp.py @@ -362,3 +362,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = index += 1 return vocab_file, merge_file + + +__all__ = ["ClvpTokenizer"] diff --git a/src/transformers/models/code_llama/__init__.py b/src/transformers/models/code_llama/__init__.py index 8c99c023419bbf..b65c4bddb4b0cd 100644 --- a/src/transformers/models/code_llama/__init__.py +++ b/src/transformers/models/code_llama/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 MetaAI and The HuggingFace Inc. team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,45 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_code_llama"] = ["CodeLlamaTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_code_llama_fast"] = ["CodeLlamaTokenizerFast"] - if TYPE_CHECKING: - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_code_llama import CodeLlamaTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_code_llama_fast import CodeLlamaTokenizerFast - + from .tokenization_code_llama import * + from .tokenization_code_llama_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/code_llama/tokenization_code_llama.py b/src/transformers/models/code_llama/tokenization_code_llama.py index cc906687874ce0..43386ecdaee4dc 100644 --- a/src/transformers/models/code_llama/tokenization_code_llama.py +++ b/src/transformers/models/code_llama/tokenization_code_llama.py @@ -447,3 +447,6 @@ def __setstate__(self, d): self.__dict__ = d self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) + + +__all__ = ["CodeLlamaTokenizer"] diff --git a/src/transformers/models/code_llama/tokenization_code_llama_fast.py b/src/transformers/models/code_llama/tokenization_code_llama_fast.py index b832348d07af4d..3bc831cdd6a15b 100644 --- a/src/transformers/models/code_llama/tokenization_code_llama_fast.py +++ b/src/transformers/models/code_llama/tokenization_code_llama_fast.py @@ -376,3 +376,6 @@ def build_inputs_with_special_tokens( if token_ids_1 is None: return self.bos_token_id + token_ids_0 + self.eos_token_id return self.bos_token_id + token_ids_0 + token_ids_1 + self.eos_token_id + + +__all__ = ["CodeLlamaTokenizerFast"] diff --git a/src/transformers/models/codegen/__init__.py b/src/transformers/models/codegen/__init__.py index 7d4cb05adb20e9..ea2d9af11150f5 100644 --- a/src/transformers/models/codegen/__init__.py +++ b/src/transformers/models/codegen/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 Salesforce authors, The EleutherAI, and HuggingFace Teams. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,59 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_codegen": ["CodeGenConfig", "CodeGenOnnxConfig"], - "tokenization_codegen": ["CodeGenTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_codegen_fast"] = ["CodeGenTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_codegen"] = [ - "CodeGenForCausalLM", - "CodeGenModel", - "CodeGenPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_codegen import CodeGenConfig, CodeGenOnnxConfig - from .tokenization_codegen import CodeGenTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_codegen_fast import CodeGenTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_codegen import ( - CodeGenForCausalLM, - CodeGenModel, - CodeGenPreTrainedModel, - ) - + from .configuration_codegen import * + from .modeling_codegen import * + from .tokenization_codegen import * + from .tokenization_codegen_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/codegen/configuration_codegen.py b/src/transformers/models/codegen/configuration_codegen.py index cf69001480c5f9..6de483cb79449a 100644 --- a/src/transformers/models/codegen/configuration_codegen.py +++ b/src/transformers/models/codegen/configuration_codegen.py @@ -225,3 +225,6 @@ def generate_dummy_inputs( @property def default_onnx_opset(self) -> int: return 13 + + +__all__ = ["CodeGenConfig", "CodeGenOnnxConfig"] diff --git a/src/transformers/models/codegen/modeling_codegen.py b/src/transformers/models/codegen/modeling_codegen.py index 616c93a46e4f4a..f64850628fea59 100644 --- a/src/transformers/models/codegen/modeling_codegen.py +++ b/src/transformers/models/codegen/modeling_codegen.py @@ -809,3 +809,6 @@ def _reorder_cache( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) for layer_past in past_key_values ) + + +__all__ = ["CodeGenForCausalLM", "CodeGenModel", "CodeGenPreTrainedModel"] diff --git a/src/transformers/models/codegen/tokenization_codegen.py b/src/transformers/models/codegen/tokenization_codegen.py index f3f765d273a35f..2b584e83b1b9ef 100644 --- a/src/transformers/models/codegen/tokenization_codegen.py +++ b/src/transformers/models/codegen/tokenization_codegen.py @@ -414,3 +414,6 @@ def find_re(string, pattern, start_pos): return completion[: min(terminals_pos)] else: return completion + + +__all__ = ["CodeGenTokenizer"] diff --git a/src/transformers/models/codegen/tokenization_codegen_fast.py b/src/transformers/models/codegen/tokenization_codegen_fast.py index 9fdf2ec38ed3ed..fcfe1d2795b44a 100644 --- a/src/transformers/models/codegen/tokenization_codegen_fast.py +++ b/src/transformers/models/codegen/tokenization_codegen_fast.py @@ -270,3 +270,6 @@ def find_re(string, pattern, start_pos): return completion[: min(terminals_pos)] else: return completion + + +__all__ = ["CodeGenTokenizerFast"] diff --git a/src/transformers/models/cohere/__init__.py b/src/transformers/models/cohere/__init__.py index f92e8b68a50a72..ad2d57500c4432 100644 --- a/src/transformers/models/cohere/__init__.py +++ b/src/transformers/models/cohere/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2024 Cohere and The HuggingFace Inc. team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,65 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_cohere": ["CohereConfig"], -} - - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_cohere_fast"] = ["CohereTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_cohere"] = [ - "CohereForCausalLM", - "CohereModel", - "CoherePreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_cohere import CohereConfig - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_cohere_fast import CohereTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_cohere import ( - CohereForCausalLM, - CohereModel, - CoherePreTrainedModel, - ) - + from .configuration_cohere import * + from .modeling_cohere import * + from .tokenization_cohere_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/cohere/configuration_cohere.py b/src/transformers/models/cohere/configuration_cohere.py index 3c1237e5113789..9312e2764d33de 100644 --- a/src/transformers/models/cohere/configuration_cohere.py +++ b/src/transformers/models/cohere/configuration_cohere.py @@ -198,3 +198,6 @@ def __init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) + + +__all__ = ["CohereConfig"] diff --git a/src/transformers/models/cohere/modeling_cohere.py b/src/transformers/models/cohere/modeling_cohere.py index b9a235ed500c0c..12de4d0ebe347c 100644 --- a/src/transformers/models/cohere/modeling_cohere.py +++ b/src/transformers/models/cohere/modeling_cohere.py @@ -1161,3 +1161,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["CohereForCausalLM", "CohereModel", "CoherePreTrainedModel"] diff --git a/src/transformers/models/cohere/tokenization_cohere_fast.py b/src/transformers/models/cohere/tokenization_cohere_fast.py index bac665b473c57b..e99df5c609c895 100644 --- a/src/transformers/models/cohere/tokenization_cohere_fast.py +++ b/src/transformers/models/cohere/tokenization_cohere_fast.py @@ -510,3 +510,6 @@ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = output + bos_token_id + token_ids_1 + eos_token_id return output + + +__all__ = ["CohereTokenizerFast"] diff --git a/src/transformers/models/conditional_detr/__init__.py b/src/transformers/models/conditional_detr/__init__.py index c7d5c5261d6e67..46bd017d2b6b19 100644 --- a/src/transformers/models/conditional_detr/__init__.py +++ b/src/transformers/models/conditional_detr/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,71 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_conditional_detr": [ - "ConditionalDetrConfig", - "ConditionalDetrOnnxConfig", - ] -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_conditional_detr"] = ["ConditionalDetrFeatureExtractor"] - _import_structure["image_processing_conditional_detr"] = ["ConditionalDetrImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_conditional_detr"] = [ - "ConditionalDetrForObjectDetection", - "ConditionalDetrForSegmentation", - "ConditionalDetrModel", - "ConditionalDetrPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_conditional_detr import ( - ConditionalDetrConfig, - ConditionalDetrOnnxConfig, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor - from .image_processing_conditional_detr import ConditionalDetrImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_conditional_detr import ( - ConditionalDetrForObjectDetection, - ConditionalDetrForSegmentation, - ConditionalDetrModel, - ConditionalDetrPreTrainedModel, - ) - + from .configuration_conditional_detr import * + from .feature_extraction_conditional_detr import * + from .image_processing_conditional_detr import * + from .modeling_conditional_detr import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/conditional_detr/configuration_conditional_detr.py b/src/transformers/models/conditional_detr/configuration_conditional_detr.py index 64364c653dd964..8dae72edff0896 100644 --- a/src/transformers/models/conditional_detr/configuration_conditional_detr.py +++ b/src/transformers/models/conditional_detr/configuration_conditional_detr.py @@ -273,3 +273,6 @@ def atol_for_validation(self) -> float: @property def default_onnx_opset(self) -> int: return 12 + + +__all__ = ["ConditionalDetrConfig", "ConditionalDetrOnnxConfig"] diff --git a/src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py b/src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py index bfdec373f865c5..8fe92eec42f6a1 100644 --- a/src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py +++ b/src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py @@ -41,3 +41,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["ConditionalDetrFeatureExtractor"] diff --git a/src/transformers/models/conditional_detr/image_processing_conditional_detr.py b/src/transformers/models/conditional_detr/image_processing_conditional_detr.py index c7bc27207bd30d..effb8c3b058168 100644 --- a/src/transformers/models/conditional_detr/image_processing_conditional_detr.py +++ b/src/transformers/models/conditional_detr/image_processing_conditional_detr.py @@ -1851,3 +1851,6 @@ def post_process_panoptic_segmentation( results.append({"segmentation": segmentation, "segments_info": segments}) return results + + +__all__ = ["ConditionalDetrImageProcessor"] diff --git a/src/transformers/models/conditional_detr/modeling_conditional_detr.py b/src/transformers/models/conditional_detr/modeling_conditional_detr.py index d633b92547d7db..0aa4b2afa6bf96 100644 --- a/src/transformers/models/conditional_detr/modeling_conditional_detr.py +++ b/src/transformers/models/conditional_detr/modeling_conditional_detr.py @@ -2105,3 +2105,11 @@ def forward(self, q, k, mask: Optional[Tensor] = None): weights = nn.functional.softmax(weights.flatten(2), dim=-1).view(weights.size()) weights = self.dropout(weights) return weights + + +__all__ = [ + "ConditionalDetrForObjectDetection", + "ConditionalDetrForSegmentation", + "ConditionalDetrModel", + "ConditionalDetrPreTrainedModel", +] diff --git a/src/transformers/models/convbert/__init__.py b/src/transformers/models/convbert/__init__.py index 15c6bb51767af1..670a7d6f47647f 100644 --- a/src/transformers/models/convbert/__init__.py +++ b/src/transformers/models/convbert/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,114 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_convbert": ["ConvBertConfig", "ConvBertOnnxConfig"], - "tokenization_convbert": ["ConvBertTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_convbert_fast"] = ["ConvBertTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_convbert"] = [ - "ConvBertForMaskedLM", - "ConvBertForMultipleChoice", - "ConvBertForQuestionAnswering", - "ConvBertForSequenceClassification", - "ConvBertForTokenClassification", - "ConvBertLayer", - "ConvBertModel", - "ConvBertPreTrainedModel", - "load_tf_weights_in_convbert", - ] - - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_convbert"] = [ - "TFConvBertForMaskedLM", - "TFConvBertForMultipleChoice", - "TFConvBertForQuestionAnswering", - "TFConvBertForSequenceClassification", - "TFConvBertForTokenClassification", - "TFConvBertLayer", - "TFConvBertModel", - "TFConvBertPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_convbert import ConvBertConfig, ConvBertOnnxConfig - from .tokenization_convbert import ConvBertTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_convbert_fast import ConvBertTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_convbert import ( - ConvBertForMaskedLM, - ConvBertForMultipleChoice, - ConvBertForQuestionAnswering, - ConvBertForSequenceClassification, - ConvBertForTokenClassification, - ConvBertLayer, - ConvBertModel, - ConvBertPreTrainedModel, - load_tf_weights_in_convbert, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_convbert import ( - TFConvBertForMaskedLM, - TFConvBertForMultipleChoice, - TFConvBertForQuestionAnswering, - TFConvBertForSequenceClassification, - TFConvBertForTokenClassification, - TFConvBertLayer, - TFConvBertModel, - TFConvBertPreTrainedModel, - ) - - + from .configuration_convbert import * + from .modeling_convbert import * + from .modeling_tf_convbert import * + from .tokenization_convbert import * + from .tokenization_convbert_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/convbert/configuration_convbert.py b/src/transformers/models/convbert/configuration_convbert.py index 2c6b544568b7bf..558ef5638cd4fe 100644 --- a/src/transformers/models/convbert/configuration_convbert.py +++ b/src/transformers/models/convbert/configuration_convbert.py @@ -155,3 +155,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("token_type_ids", dynamic_axis), ] ) + + +__all__ = ["ConvBertConfig", "ConvBertOnnxConfig"] diff --git a/src/transformers/models/convbert/modeling_convbert.py b/src/transformers/models/convbert/modeling_convbert.py index b92ff686edec5d..19eacfe2ac6a9e 100755 --- a/src/transformers/models/convbert/modeling_convbert.py +++ b/src/transformers/models/convbert/modeling_convbert.py @@ -1331,3 +1331,16 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "ConvBertForMaskedLM", + "ConvBertForMultipleChoice", + "ConvBertForQuestionAnswering", + "ConvBertForSequenceClassification", + "ConvBertForTokenClassification", + "ConvBertLayer", + "ConvBertModel", + "ConvBertPreTrainedModel", + "load_tf_weights_in_convbert", +] diff --git a/src/transformers/models/convbert/modeling_tf_convbert.py b/src/transformers/models/convbert/modeling_tf_convbert.py index 95be5a56e19523..9b2696a7e2b009 100644 --- a/src/transformers/models/convbert/modeling_tf_convbert.py +++ b/src/transformers/models/convbert/modeling_tf_convbert.py @@ -1462,3 +1462,15 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFConvBertForMaskedLM", + "TFConvBertForMultipleChoice", + "TFConvBertForQuestionAnswering", + "TFConvBertForSequenceClassification", + "TFConvBertForTokenClassification", + "TFConvBertLayer", + "TFConvBertModel", + "TFConvBertPreTrainedModel", +] diff --git a/src/transformers/models/convbert/tokenization_convbert.py b/src/transformers/models/convbert/tokenization_convbert.py index 10bbc096bf54d3..c2d68428389c5f 100644 --- a/src/transformers/models/convbert/tokenization_convbert.py +++ b/src/transformers/models/convbert/tokenization_convbert.py @@ -507,3 +507,6 @@ def tokenize(self, text): else: output_tokens.extend(sub_tokens) return output_tokens + + +__all__ = ["ConvBertTokenizer"] diff --git a/src/transformers/models/convbert/tokenization_convbert_fast.py b/src/transformers/models/convbert/tokenization_convbert_fast.py index e9c47c2b04bc9c..59262d976e97a9 100644 --- a/src/transformers/models/convbert/tokenization_convbert_fast.py +++ b/src/transformers/models/convbert/tokenization_convbert_fast.py @@ -171,3 +171,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["ConvBertTokenizerFast"] diff --git a/src/transformers/models/convnext/__init__.py b/src/transformers/models/convnext/__init__.py index 4e9a90bd4deb33..796b9a48926fe6 100644 --- a/src/transformers/models/convnext/__init__.py +++ b/src/transformers/models/convnext/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,86 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_torch_available, - is_vision_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_convnext": ["ConvNextConfig", "ConvNextOnnxConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_convnext"] = ["ConvNextFeatureExtractor"] - _import_structure["image_processing_convnext"] = ["ConvNextImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_convnext"] = [ - "ConvNextForImageClassification", - "ConvNextModel", - "ConvNextPreTrainedModel", - "ConvNextBackbone", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_convnext"] = [ - "TFConvNextForImageClassification", - "TFConvNextModel", - "TFConvNextPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_convnext import ConvNextConfig, ConvNextOnnxConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_convnext import ConvNextFeatureExtractor - from .image_processing_convnext import ConvNextImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_convnext import ( - ConvNextBackbone, - ConvNextForImageClassification, - ConvNextModel, - ConvNextPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel - - + from .configuration_convnext import * + from .feature_extraction_convnext import * + from .image_processing_convnext import * + from .modeling_convnext import * + from .modeling_tf_convnext import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/convnext/configuration_convnext.py b/src/transformers/models/convnext/configuration_convnext.py index b4fe1e60e872cd..9f9ed3bfd469df 100644 --- a/src/transformers/models/convnext/configuration_convnext.py +++ b/src/transformers/models/convnext/configuration_convnext.py @@ -137,3 +137,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-5 + + +__all__ = ["ConvNextConfig", "ConvNextOnnxConfig"] diff --git a/src/transformers/models/convnext/feature_extraction_convnext.py b/src/transformers/models/convnext/feature_extraction_convnext.py index 92b8a8f4fba82f..6b2208e5b1134f 100644 --- a/src/transformers/models/convnext/feature_extraction_convnext.py +++ b/src/transformers/models/convnext/feature_extraction_convnext.py @@ -31,3 +31,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["ConvNextFeatureExtractor"] diff --git a/src/transformers/models/convnext/image_processing_convnext.py b/src/transformers/models/convnext/image_processing_convnext.py index aaabc677f182b4..90fc0bb1ff179b 100644 --- a/src/transformers/models/convnext/image_processing_convnext.py +++ b/src/transformers/models/convnext/image_processing_convnext.py @@ -318,3 +318,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["ConvNextImageProcessor"] diff --git a/src/transformers/models/convnext/modeling_convnext.py b/src/transformers/models/convnext/modeling_convnext.py index a0deaf96d5d124..155f466ac4ae68 100755 --- a/src/transformers/models/convnext/modeling_convnext.py +++ b/src/transformers/models/convnext/modeling_convnext.py @@ -546,3 +546,6 @@ def forward( hidden_states=hidden_states if output_hidden_states else None, attentions=None, ) + + +__all__ = ["ConvNextForImageClassification", "ConvNextModel", "ConvNextPreTrainedModel", "ConvNextBackbone"] diff --git a/src/transformers/models/convnext/modeling_tf_convnext.py b/src/transformers/models/convnext/modeling_tf_convnext.py index 0e348a838a9a90..af1bae81db495f 100644 --- a/src/transformers/models/convnext/modeling_tf_convnext.py +++ b/src/transformers/models/convnext/modeling_tf_convnext.py @@ -664,3 +664,6 @@ def build(self, input_shape=None): if hasattr(self.classifier, "name"): with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_sizes[-1]]) + + +__all__ = ["TFConvNextForImageClassification", "TFConvNextModel", "TFConvNextPreTrainedModel"] diff --git a/src/transformers/models/convnextv2/__init__.py b/src/transformers/models/convnextv2/__init__.py index 5505868c14a4f4..0fd1293963b233 100644 --- a/src/transformers/models/convnextv2/__init__.py +++ b/src/transformers/models/convnextv2/__init__.py @@ -1,8 +1,4 @@ -# flake8: noqa -# There's no way to ignore "F401 '...' imported but unused" warnings in this -# module, but to preserve other warnings. So, don't check this module at all. - -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,73 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -# rely on isort to merge the imports -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, - is_tf_available, -) - +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_convnextv2": ["ConvNextV2Config"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_convnextv2"] = [ - "ConvNextV2ForImageClassification", - "ConvNextV2Model", - "ConvNextV2PreTrainedModel", - "ConvNextV2Backbone", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_convnextv2"] = [ - "TFConvNextV2ForImageClassification", - "TFConvNextV2Model", - "TFConvNextV2PreTrainedModel", - ] if TYPE_CHECKING: - from .configuration_convnextv2 import ( - ConvNextV2Config, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_convnextv2 import ( - ConvNextV2Backbone, - ConvNextV2ForImageClassification, - ConvNextV2Model, - ConvNextV2PreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_convnextv2 import ( - TFConvNextV2ForImageClassification, - TFConvNextV2Model, - TFConvNextV2PreTrainedModel, - ) - + from .configuration_convnextv2 import * + from .modeling_convnextv2 import * + from .modeling_tf_convnextv2 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/convnextv2/configuration_convnextv2.py b/src/transformers/models/convnextv2/configuration_convnextv2.py index ec42b033b3ef8a..60b631a340c051 100644 --- a/src/transformers/models/convnextv2/configuration_convnextv2.py +++ b/src/transformers/models/convnextv2/configuration_convnextv2.py @@ -113,3 +113,6 @@ def __init__( self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) + + +__all__ = ["ConvNextV2Config"] diff --git a/src/transformers/models/convnextv2/modeling_convnextv2.py b/src/transformers/models/convnextv2/modeling_convnextv2.py index df13a5ea6b6b13..c0490eead21c88 100644 --- a/src/transformers/models/convnextv2/modeling_convnextv2.py +++ b/src/transformers/models/convnextv2/modeling_convnextv2.py @@ -569,3 +569,6 @@ def forward( hidden_states=hidden_states if output_hidden_states else None, attentions=None, ) + + +__all__ = ["ConvNextV2ForImageClassification", "ConvNextV2Model", "ConvNextV2PreTrainedModel", "ConvNextV2Backbone"] diff --git a/src/transformers/models/convnextv2/modeling_tf_convnextv2.py b/src/transformers/models/convnextv2/modeling_tf_convnextv2.py index d8b1416334723a..c27ba2da453039 100644 --- a/src/transformers/models/convnextv2/modeling_tf_convnextv2.py +++ b/src/transformers/models/convnextv2/modeling_tf_convnextv2.py @@ -678,3 +678,6 @@ def build(self, input_shape=None): if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_sizes[-1]]) + + +__all__ = ["TFConvNextV2ForImageClassification", "TFConvNextV2Model", "TFConvNextV2PreTrainedModel"] diff --git a/src/transformers/models/cpm/__init__.py b/src/transformers/models/cpm/__init__.py index be6b0f66898ecb..aaf4524671fdfe 100644 --- a/src/transformers/models/cpm/__init__.py +++ b/src/transformers/models/cpm/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,49 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available - - -_import_structure = {} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_cpm"] = ["CpmTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_cpm_fast"] = ["CpmTokenizerFast"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_cpm import CpmTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_cpm_fast import CpmTokenizerFast - + from .tokenization_cpm import * + from .tokenization_cpm_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/cpm/tokenization_cpm.py b/src/transformers/models/cpm/tokenization_cpm.py index c92afb7eb6d205..884068f1a15710 100644 --- a/src/transformers/models/cpm/tokenization_cpm.py +++ b/src/transformers/models/cpm/tokenization_cpm.py @@ -343,3 +343,6 @@ def _decode(self, *args, **kwargs): text = super()._decode(*args, **kwargs) text = text.replace(" ", "").replace("\u2582", " ").replace("\u2583", "\n") return text + + +__all__ = ["CpmTokenizer"] diff --git a/src/transformers/models/cpm/tokenization_cpm_fast.py b/src/transformers/models/cpm/tokenization_cpm_fast.py index 3dcf624843c5d5..ef933e084ddb2b 100644 --- a/src/transformers/models/cpm/tokenization_cpm_fast.py +++ b/src/transformers/models/cpm/tokenization_cpm_fast.py @@ -236,3 +236,6 @@ def _decode(self, *args, **kwargs): text = super()._decode(*args, **kwargs) text = text.replace(" ", "").replace("\u2582", " ").replace("\u2583", "\n") return text + + +__all__ = ["CpmTokenizerFast"] diff --git a/src/transformers/models/cpmant/__init__.py b/src/transformers/models/cpmant/__init__.py index 61db942a4f66bd..d92eea75693e72 100644 --- a/src/transformers/models/cpmant/__init__.py +++ b/src/transformers/models/cpmant/__init__.py @@ -1,8 +1,4 @@ -# flake8: noqa -# There's no way to ignore "F401 '...' imported but unused" warnings in this -# module, but to preserve other warnings. So, don't check this module at all. - -# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,46 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -# rely on isort to merge the imports -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available - - -_import_structure = { - "configuration_cpmant": ["CpmAntConfig"], - "tokenization_cpmant": ["CpmAntTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_cpmant"] = [ - "CpmAntForCausalLM", - "CpmAntModel", - "CpmAntPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_cpmant import CpmAntConfig - from .tokenization_cpmant import CpmAntTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_cpmant import ( - CpmAntForCausalLM, - CpmAntModel, - CpmAntPreTrainedModel, - ) - - + from .configuration_cpmant import * + from .modeling_cpmant import * + from .tokenization_cpmant import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/cpmant/configuration_cpmant.py b/src/transformers/models/cpmant/configuration_cpmant.py index 155811913a954c..c3368d67af7ab7 100644 --- a/src/transformers/models/cpmant/configuration_cpmant.py +++ b/src/transformers/models/cpmant/configuration_cpmant.py @@ -117,3 +117,6 @@ def __init__( self.use_cache = use_cache self.vocab_size = vocab_size self.init_std = init_std + + +__all__ = ["CpmAntConfig"] diff --git a/src/transformers/models/cpmant/modeling_cpmant.py b/src/transformers/models/cpmant/modeling_cpmant.py index 5507c8082f1c05..df0aebe3cbf850 100755 --- a/src/transformers/models/cpmant/modeling_cpmant.py +++ b/src/transformers/models/cpmant/modeling_cpmant.py @@ -855,3 +855,6 @@ def _reorder_cache(self, past_key_values, beam_idx): key_value_layer[0] = key_value_layer[0][beam_idx] key_value_layer[1] = key_value_layer[1][beam_idx] return past_key_values + + +__all__ = ["CpmAntForCausalLM", "CpmAntModel", "CpmAntPreTrainedModel"] diff --git a/src/transformers/models/cpmant/tokenization_cpmant.py b/src/transformers/models/cpmant/tokenization_cpmant.py index 094a14ffce069f..2da1d6286c5e8b 100644 --- a/src/transformers/models/cpmant/tokenization_cpmant.py +++ b/src/transformers/models/cpmant/tokenization_cpmant.py @@ -265,3 +265,6 @@ def get_special_tokens_mask( if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) return [1] + ([0] * len(token_ids_0)) + + +__all__ = ["CpmAntTokenizer"] diff --git a/src/transformers/models/ctrl/__init__.py b/src/transformers/models/ctrl/__init__.py index f64cced4e28bfe..ea62163babef93 100644 --- a/src/transformers/models/ctrl/__init__.py +++ b/src/transformers/models/ctrl/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,75 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available - - -_import_structure = { - "configuration_ctrl": ["CTRLConfig"], - "tokenization_ctrl": ["CTRLTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_ctrl"] = [ - "CTRLForSequenceClassification", - "CTRLLMHeadModel", - "CTRLModel", - "CTRLPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_ctrl"] = [ - "TFCTRLForSequenceClassification", - "TFCTRLLMHeadModel", - "TFCTRLModel", - "TFCTRLPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_ctrl import CTRLConfig - from .tokenization_ctrl import CTRLTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_ctrl import ( - CTRLForSequenceClassification, - CTRLLMHeadModel, - CTRLModel, - CTRLPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_ctrl import ( - TFCTRLForSequenceClassification, - TFCTRLLMHeadModel, - TFCTRLModel, - TFCTRLPreTrainedModel, - ) - + from .configuration_ctrl import * + from .modeling_ctrl import * + from .modeling_tf_ctrl import * + from .tokenization_ctrl import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/ctrl/configuration_ctrl.py b/src/transformers/models/ctrl/configuration_ctrl.py index adea61cd67fb23..7a812f0b55650f 100644 --- a/src/transformers/models/ctrl/configuration_ctrl.py +++ b/src/transformers/models/ctrl/configuration_ctrl.py @@ -111,3 +111,6 @@ def __init__( self.use_cache = use_cache super().__init__(**kwargs) + + +__all__ = ["CTRLConfig"] diff --git a/src/transformers/models/ctrl/modeling_ctrl.py b/src/transformers/models/ctrl/modeling_ctrl.py index 1d382a81141ff1..10c325dbee838b 100644 --- a/src/transformers/models/ctrl/modeling_ctrl.py +++ b/src/transformers/models/ctrl/modeling_ctrl.py @@ -839,3 +839,6 @@ def forward( hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) + + +__all__ = ["CTRLForSequenceClassification", "CTRLLMHeadModel", "CTRLModel", "CTRLPreTrainedModel"] diff --git a/src/transformers/models/ctrl/modeling_tf_ctrl.py b/src/transformers/models/ctrl/modeling_tf_ctrl.py index 3feecf9a205fd7..26609cc671649a 100644 --- a/src/transformers/models/ctrl/modeling_tf_ctrl.py +++ b/src/transformers/models/ctrl/modeling_tf_ctrl.py @@ -926,3 +926,6 @@ def build(self, input_shape=None): if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) + + +__all__ = ["TFCTRLForSequenceClassification", "TFCTRLLMHeadModel", "TFCTRLModel", "TFCTRLPreTrainedModel"] diff --git a/src/transformers/models/ctrl/tokenization_ctrl.py b/src/transformers/models/ctrl/tokenization_ctrl.py index 5305f2b231b82b..66dae2b05fa620 100644 --- a/src/transformers/models/ctrl/tokenization_ctrl.py +++ b/src/transformers/models/ctrl/tokenization_ctrl.py @@ -246,3 +246,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far) + + +__all__ = ["CTRLTokenizer"] diff --git a/src/transformers/models/cvt/__init__.py b/src/transformers/models/cvt/__init__.py index 7018b41d58e8b2..756aded9e6ad29 100644 --- a/src/transformers/models/cvt/__init__.py +++ b/src/transformers/models/cvt/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,65 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_cvt": ["CvtConfig"]} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_cvt"] = [ - "CvtForImageClassification", - "CvtModel", - "CvtPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_cvt"] = [ - "TFCvtForImageClassification", - "TFCvtModel", - "TFCvtPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_cvt import CvtConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_cvt import ( - CvtForImageClassification, - CvtModel, - CvtPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_cvt import ( - TFCvtForImageClassification, - TFCvtModel, - TFCvtPreTrainedModel, - ) - - + from .configuration_cvt import * + from .modeling_cvt import * + from .modeling_tf_cvt import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/cvt/configuration_cvt.py b/src/transformers/models/cvt/configuration_cvt.py index a966701cee6447..38cba6874f6860 100644 --- a/src/transformers/models/cvt/configuration_cvt.py +++ b/src/transformers/models/cvt/configuration_cvt.py @@ -141,3 +141,6 @@ def __init__( self.stride_q = stride_q self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps + + +__all__ = ["CvtConfig"] diff --git a/src/transformers/models/cvt/modeling_cvt.py b/src/transformers/models/cvt/modeling_cvt.py index 796382444427ea..cd68b391ba1ff4 100644 --- a/src/transformers/models/cvt/modeling_cvt.py +++ b/src/transformers/models/cvt/modeling_cvt.py @@ -720,3 +720,6 @@ def forward( return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states) + + +__all__ = ["CvtForImageClassification", "CvtModel", "CvtPreTrainedModel"] diff --git a/src/transformers/models/cvt/modeling_tf_cvt.py b/src/transformers/models/cvt/modeling_tf_cvt.py index 617fc99733e05c..fa9a4d9a3a4450 100644 --- a/src/transformers/models/cvt/modeling_tf_cvt.py +++ b/src/transformers/models/cvt/modeling_tf_cvt.py @@ -1091,3 +1091,6 @@ def build(self, input_shape=None): if hasattr(self.classifier, "name"): with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.embed_dim[-1]]) + + +__all__ = ["TFCvtForImageClassification", "TFCvtModel", "TFCvtPreTrainedModel"] diff --git a/src/transformers/models/dac/__init__.py b/src/transformers/models/dac/__init__.py index f72339abef6dcc..40f84ccb59be91 100644 --- a/src/transformers/models/dac/__init__.py +++ b/src/transformers/models/dac/__init__.py @@ -1,5 +1,4 @@ -# coding=utf-8 -# Copyright 2024 Descript and The HuggingFace Inc. team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,47 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_dac": ["DacConfig"], - "feature_extraction_dac": ["DacFeatureExtractor"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_dac"] = [ - "DacModel", - "DacPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_dac import ( - DacConfig, - ) - from .feature_extraction_dac import DacFeatureExtractor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_dac import ( - DacModel, - DacPreTrainedModel, - ) - + from .configuration_dac import * + from .feature_extraction_dac import * + from .modeling_dac import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/dac/configuration_dac.py b/src/transformers/models/dac/configuration_dac.py index 21586341c37861..dbbefd3ccb59d7 100644 --- a/src/transformers/models/dac/configuration_dac.py +++ b/src/transformers/models/dac/configuration_dac.py @@ -109,3 +109,6 @@ def __init__( def frame_rate(self) -> int: hop_length = np.prod(self.upsampling_ratios) return math.ceil(self.sampling_rate / hop_length) + + +__all__ = ["DacConfig"] diff --git a/src/transformers/models/dac/feature_extraction_dac.py b/src/transformers/models/dac/feature_extraction_dac.py index 9bbf0b60302498..c22a7603f0598d 100644 --- a/src/transformers/models/dac/feature_extraction_dac.py +++ b/src/transformers/models/dac/feature_extraction_dac.py @@ -168,3 +168,6 @@ def __call__( padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs + + +__all__ = ["DacFeatureExtractor"] diff --git a/src/transformers/models/dac/modeling_dac.py b/src/transformers/models/dac/modeling_dac.py index f465ee77faa745..f05f5d35bf34a4 100644 --- a/src/transformers/models/dac/modeling_dac.py +++ b/src/transformers/models/dac/modeling_dac.py @@ -719,3 +719,6 @@ def forward( return (loss, audio_values, quantized_representation, audio_codes, projected_latents) return DacOutput(loss, audio_values, quantized_representation, audio_codes, projected_latents) + + +__all__ = ["DacModel", "DacPreTrainedModel"] diff --git a/src/transformers/models/data2vec/__init__.py b/src/transformers/models/data2vec/__init__.py index 525068db59832c..7000ac3d353bf4 100644 --- a/src/transformers/models/data2vec/__init__.py +++ b/src/transformers/models/data2vec/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,115 +11,22 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available - +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_data2vec_audio": ["Data2VecAudioConfig"], - "configuration_data2vec_text": [ - "Data2VecTextConfig", - "Data2VecTextOnnxConfig", - ], - "configuration_data2vec_vision": [ - "Data2VecVisionConfig", - "Data2VecVisionOnnxConfig", - ], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_data2vec_audio"] = [ - "Data2VecAudioForAudioFrameClassification", - "Data2VecAudioForCTC", - "Data2VecAudioForSequenceClassification", - "Data2VecAudioForXVector", - "Data2VecAudioModel", - "Data2VecAudioPreTrainedModel", - ] - _import_structure["modeling_data2vec_text"] = [ - "Data2VecTextForCausalLM", - "Data2VecTextForMaskedLM", - "Data2VecTextForMultipleChoice", - "Data2VecTextForQuestionAnswering", - "Data2VecTextForSequenceClassification", - "Data2VecTextForTokenClassification", - "Data2VecTextModel", - "Data2VecTextPreTrainedModel", - ] - _import_structure["modeling_data2vec_vision"] = [ - "Data2VecVisionForImageClassification", - "Data2VecVisionForMaskedImageModeling", - "Data2VecVisionForSemanticSegmentation", - "Data2VecVisionModel", - "Data2VecVisionPreTrainedModel", - ] - -if is_tf_available(): - _import_structure["modeling_tf_data2vec_vision"] = [ - "TFData2VecVisionForImageClassification", - "TFData2VecVisionForSemanticSegmentation", - "TFData2VecVisionModel", - "TFData2VecVisionPreTrainedModel", - ] if TYPE_CHECKING: - from .configuration_data2vec_audio import Data2VecAudioConfig - from .configuration_data2vec_text import ( - Data2VecTextConfig, - Data2VecTextOnnxConfig, - ) - from .configuration_data2vec_vision import ( - Data2VecVisionConfig, - Data2VecVisionOnnxConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_data2vec_audio import ( - Data2VecAudioForAudioFrameClassification, - Data2VecAudioForCTC, - Data2VecAudioForSequenceClassification, - Data2VecAudioForXVector, - Data2VecAudioModel, - Data2VecAudioPreTrainedModel, - ) - from .modeling_data2vec_text import ( - Data2VecTextForCausalLM, - Data2VecTextForMaskedLM, - Data2VecTextForMultipleChoice, - Data2VecTextForQuestionAnswering, - Data2VecTextForSequenceClassification, - Data2VecTextForTokenClassification, - Data2VecTextModel, - Data2VecTextPreTrainedModel, - ) - from .modeling_data2vec_vision import ( - Data2VecVisionForImageClassification, - Data2VecVisionForMaskedImageModeling, - Data2VecVisionForSemanticSegmentation, - Data2VecVisionModel, - Data2VecVisionPreTrainedModel, - ) - if is_tf_available(): - from .modeling_tf_data2vec_vision import ( - TFData2VecVisionForImageClassification, - TFData2VecVisionForSemanticSegmentation, - TFData2VecVisionModel, - TFData2VecVisionPreTrainedModel, - ) - + from .configuration_data2vec_audio import * + from .configuration_data2vec_text import * + from .configuration_data2vec_vision import * + from .modeling_data2vec_audio import * + from .modeling_data2vec_text import * + from .modeling_data2vec_vision import * + from .modeling_tf_data2vec_vision import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/data2vec/configuration_data2vec_audio.py b/src/transformers/models/data2vec/configuration_data2vec_audio.py index 54754a8c798bc0..8066829027fb6e 100644 --- a/src/transformers/models/data2vec/configuration_data2vec_audio.py +++ b/src/transformers/models/data2vec/configuration_data2vec_audio.py @@ -283,3 +283,6 @@ def __init__( @property def inputs_to_logits_ratio(self): return math.prod(self.conv_stride) + + +__all__ = ["Data2VecAudioConfig"] diff --git a/src/transformers/models/data2vec/configuration_data2vec_text.py b/src/transformers/models/data2vec/configuration_data2vec_text.py index 6cd7b80c302e47..3aa9a6b7bf22a8 100644 --- a/src/transformers/models/data2vec/configuration_data2vec_text.py +++ b/src/transformers/models/data2vec/configuration_data2vec_text.py @@ -149,3 +149,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("attention_mask", dynamic_axis), ] ) + + +__all__ = ["Data2VecTextConfig", "Data2VecTextOnnxConfig"] diff --git a/src/transformers/models/data2vec/configuration_data2vec_vision.py b/src/transformers/models/data2vec/configuration_data2vec_vision.py index d63a564cecfe02..b822b03ef3eb20 100644 --- a/src/transformers/models/data2vec/configuration_data2vec_vision.py +++ b/src/transformers/models/data2vec/configuration_data2vec_vision.py @@ -189,3 +189,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["Data2VecVisionConfig", "Data2VecVisionOnnxConfig"] diff --git a/src/transformers/models/data2vec/modeling_data2vec_audio.py b/src/transformers/models/data2vec/modeling_data2vec_audio.py index 590509eaf9057c..e7ddf6e5fa4abe 100755 --- a/src/transformers/models/data2vec/modeling_data2vec_audio.py +++ b/src/transformers/models/data2vec/modeling_data2vec_audio.py @@ -1763,3 +1763,13 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "Data2VecAudioForAudioFrameClassification", + "Data2VecAudioForCTC", + "Data2VecAudioForSequenceClassification", + "Data2VecAudioForXVector", + "Data2VecAudioModel", + "Data2VecAudioPreTrainedModel", +] diff --git a/src/transformers/models/data2vec/modeling_data2vec_text.py b/src/transformers/models/data2vec/modeling_data2vec_text.py index e62c11943866a3..806a1b0edb5737 100644 --- a/src/transformers/models/data2vec/modeling_data2vec_text.py +++ b/src/transformers/models/data2vec/modeling_data2vec_text.py @@ -1539,3 +1539,15 @@ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_l mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx + + +__all__ = [ + "Data2VecTextForCausalLM", + "Data2VecTextForMaskedLM", + "Data2VecTextForMultipleChoice", + "Data2VecTextForQuestionAnswering", + "Data2VecTextForSequenceClassification", + "Data2VecTextForTokenClassification", + "Data2VecTextModel", + "Data2VecTextPreTrainedModel", +] diff --git a/src/transformers/models/data2vec/modeling_data2vec_vision.py b/src/transformers/models/data2vec/modeling_data2vec_vision.py index 770162285bf33b..d0a3b4dd59e790 100644 --- a/src/transformers/models/data2vec/modeling_data2vec_vision.py +++ b/src/transformers/models/data2vec/modeling_data2vec_vision.py @@ -1444,3 +1444,11 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) + + +__all__ = [ + "Data2VecVisionForImageClassification", + "Data2VecVisionForSemanticSegmentation", + "Data2VecVisionModel", + "Data2VecVisionPreTrainedModel", +] diff --git a/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py b/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py index f95360206bd1db..71595b4a43ce8e 100644 --- a/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py +++ b/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py @@ -1714,3 +1714,11 @@ def build(self, input_shape=None): if getattr(self, "fpn2", None) is not None: with tf.name_scope(self.fpn2[0].name): self.fpn2[0].build([None, None, None, self.config.hidden_size]) + + +__all__ = [ + "TFData2VecVisionForImageClassification", + "TFData2VecVisionForSemanticSegmentation", + "TFData2VecVisionModel", + "TFData2VecVisionPreTrainedModel", +] diff --git a/src/transformers/models/dbrx/__init__.py b/src/transformers/models/dbrx/__init__.py index 693a544c4b3d3f..cce0f34c778d21 100644 --- a/src/transformers/models/dbrx/__init__.py +++ b/src/transformers/models/dbrx/__init__.py @@ -13,39 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_dbrx": ["DbrxConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_dbrx"] = [ - "DbrxForCausalLM", - "DbrxModel", - "DbrxPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_dbrx import DbrxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_dbrx import DbrxForCausalLM, DbrxModel, DbrxPreTrainedModel - - + from .configuration_dbrx import * + from .modeling_dbrx import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/dbrx/configuration_dbrx.py b/src/transformers/models/dbrx/configuration_dbrx.py index 302b5e6a55821d..7935b1d1beb7a7 100644 --- a/src/transformers/models/dbrx/configuration_dbrx.py +++ b/src/transformers/models/dbrx/configuration_dbrx.py @@ -227,3 +227,6 @@ def __init__( raise ValueError("tie_word_embeddings is not supported for DBRX models.") super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) + + +__all__ = ["DbrxConfig"] diff --git a/src/transformers/models/dbrx/modeling_dbrx.py b/src/transformers/models/dbrx/modeling_dbrx.py index 7d20b766658f23..3a68afb11a377d 100644 --- a/src/transformers/models/dbrx/modeling_dbrx.py +++ b/src/transformers/models/dbrx/modeling_dbrx.py @@ -1376,3 +1376,6 @@ def forward( attentions=outputs.attentions, router_logits=outputs.router_logits, ) + + +__all__ = ["DbrxForCausalLM", "DbrxModel", "DbrxPreTrainedModel"] diff --git a/src/transformers/models/deberta/__init__.py b/src/transformers/models/deberta/__init__.py index 76beee798ff075..f7097223796420 100644 --- a/src/transformers/models/deberta/__init__.py +++ b/src/transformers/models/deberta/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,106 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_deberta": ["DebertaConfig", "DebertaOnnxConfig"], - "tokenization_deberta": ["DebertaTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_deberta_fast"] = ["DebertaTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_deberta"] = [ - "DebertaForMaskedLM", - "DebertaForQuestionAnswering", - "DebertaForSequenceClassification", - "DebertaForTokenClassification", - "DebertaModel", - "DebertaPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_deberta"] = [ - "TFDebertaForMaskedLM", - "TFDebertaForQuestionAnswering", - "TFDebertaForSequenceClassification", - "TFDebertaForTokenClassification", - "TFDebertaModel", - "TFDebertaPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_deberta import DebertaConfig, DebertaOnnxConfig - from .tokenization_deberta import DebertaTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_deberta_fast import DebertaTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_deberta import ( - DebertaForMaskedLM, - DebertaForQuestionAnswering, - DebertaForSequenceClassification, - DebertaForTokenClassification, - DebertaModel, - DebertaPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_deberta import ( - TFDebertaForMaskedLM, - TFDebertaForQuestionAnswering, - TFDebertaForSequenceClassification, - TFDebertaForTokenClassification, - TFDebertaModel, - TFDebertaPreTrainedModel, - ) - - + from .configuration_deberta import * + from .modeling_deberta import * + from .modeling_tf_deberta import * + from .tokenization_deberta import * + from .tokenization_deberta_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/deberta/configuration_deberta.py b/src/transformers/models/deberta/configuration_deberta.py index cfee176047e0ce..835c16080a17ef 100644 --- a/src/transformers/models/deberta/configuration_deberta.py +++ b/src/transformers/models/deberta/configuration_deberta.py @@ -194,3 +194,6 @@ def generate_dummy_inputs( if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs + + +__all__ = ["DebertaConfig", "DebertaOnnxConfig"] diff --git a/src/transformers/models/deberta/modeling_deberta.py b/src/transformers/models/deberta/modeling_deberta.py index c9a85bcad1bd6f..415c03fa968589 100644 --- a/src/transformers/models/deberta/modeling_deberta.py +++ b/src/transformers/models/deberta/modeling_deberta.py @@ -1332,3 +1332,13 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "DebertaForMaskedLM", + "DebertaForQuestionAnswering", + "DebertaForSequenceClassification", + "DebertaForTokenClassification", + "DebertaModel", + "DebertaPreTrainedModel", +] diff --git a/src/transformers/models/deberta/modeling_tf_deberta.py b/src/transformers/models/deberta/modeling_tf_deberta.py index 3fa7bd4504a344..6a8b233978ff7e 100644 --- a/src/transformers/models/deberta/modeling_tf_deberta.py +++ b/src/transformers/models/deberta/modeling_tf_deberta.py @@ -1640,3 +1640,13 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFDebertaForMaskedLM", + "TFDebertaForQuestionAnswering", + "TFDebertaForSequenceClassification", + "TFDebertaForTokenClassification", + "TFDebertaModel", + "TFDebertaPreTrainedModel", +] diff --git a/src/transformers/models/deberta/tokenization_deberta.py b/src/transformers/models/deberta/tokenization_deberta.py index 371aa9866232d2..63933c1d2a32fb 100644 --- a/src/transformers/models/deberta/tokenization_deberta.py +++ b/src/transformers/models/deberta/tokenization_deberta.py @@ -391,3 +391,6 @@ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()): text = " " + text return (text, kwargs) + + +__all__ = ["DebertaTokenizer"] diff --git a/src/transformers/models/deberta/tokenization_deberta_fast.py b/src/transformers/models/deberta/tokenization_deberta_fast.py index b28732850b17e0..39c64d90e533a2 100644 --- a/src/transformers/models/deberta/tokenization_deberta_fast.py +++ b/src/transformers/models/deberta/tokenization_deberta_fast.py @@ -245,3 +245,6 @@ def _encode_plus(self, *args, **kwargs) -> BatchEncoding: def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["DebertaTokenizerFast"] diff --git a/src/transformers/models/deberta_v2/__init__.py b/src/transformers/models/deberta_v2/__init__.py index 314901aee1aed3..7c42c9c5028624 100644 --- a/src/transformers/models/deberta_v2/__init__.py +++ b/src/transformers/models/deberta_v2/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,112 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_deberta_v2": ["DebertaV2Config", "DebertaV2OnnxConfig"], - "tokenization_deberta_v2": ["DebertaV2Tokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_deberta_v2_fast"] = ["DebertaV2TokenizerFast"] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_deberta_v2"] = [ - "TFDebertaV2ForMaskedLM", - "TFDebertaV2ForQuestionAnswering", - "TFDebertaV2ForMultipleChoice", - "TFDebertaV2ForSequenceClassification", - "TFDebertaV2ForTokenClassification", - "TFDebertaV2Model", - "TFDebertaV2PreTrainedModel", - ] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_deberta_v2"] = [ - "DebertaV2ForMaskedLM", - "DebertaV2ForMultipleChoice", - "DebertaV2ForQuestionAnswering", - "DebertaV2ForSequenceClassification", - "DebertaV2ForTokenClassification", - "DebertaV2Model", - "DebertaV2PreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_deberta_v2 import ( - DebertaV2Config, - DebertaV2OnnxConfig, - ) - from .tokenization_deberta_v2 import DebertaV2Tokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_deberta_v2_fast import DebertaV2TokenizerFast - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_deberta_v2 import ( - TFDebertaV2ForMaskedLM, - TFDebertaV2ForMultipleChoice, - TFDebertaV2ForQuestionAnswering, - TFDebertaV2ForSequenceClassification, - TFDebertaV2ForTokenClassification, - TFDebertaV2Model, - TFDebertaV2PreTrainedModel, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_deberta_v2 import ( - DebertaV2ForMaskedLM, - DebertaV2ForMultipleChoice, - DebertaV2ForQuestionAnswering, - DebertaV2ForSequenceClassification, - DebertaV2ForTokenClassification, - DebertaV2Model, - DebertaV2PreTrainedModel, - ) - + from .configuration_deberta_v2 import * + from .modeling_deberta_v2 import * + from .modeling_tf_deberta_v2 import * + from .tokenization_deberta_v2 import * + from .tokenization_deberta_v2_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/deberta_v2/configuration_deberta_v2.py b/src/transformers/models/deberta_v2/configuration_deberta_v2.py index cf3f61033c3285..3b1aaa3f046683 100644 --- a/src/transformers/models/deberta_v2/configuration_deberta_v2.py +++ b/src/transformers/models/deberta_v2/configuration_deberta_v2.py @@ -193,3 +193,6 @@ def generate_dummy_inputs( if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs + + +__all__ = ["DebertaV2Config", "DebertaV2OnnxConfig"] diff --git a/src/transformers/models/deberta_v2/modeling_deberta_v2.py b/src/transformers/models/deberta_v2/modeling_deberta_v2.py index 7d2f25603a6f96..177ca3dca551fa 100644 --- a/src/transformers/models/deberta_v2/modeling_deberta_v2.py +++ b/src/transformers/models/deberta_v2/modeling_deberta_v2.py @@ -1506,3 +1506,14 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "DebertaV2ForMaskedLM", + "DebertaV2ForMultipleChoice", + "DebertaV2ForQuestionAnswering", + "DebertaV2ForSequenceClassification", + "DebertaV2ForTokenClassification", + "DebertaV2Model", + "DebertaV2PreTrainedModel", +] diff --git a/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py b/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py index fd8032f747944b..b7b5a01d170fe3 100644 --- a/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py +++ b/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py @@ -1868,3 +1868,14 @@ def build(self, input_shape=None): if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.output_dim]) + + +__all__ = [ + "TFDebertaV2ForMaskedLM", + "TFDebertaV2ForQuestionAnswering", + "TFDebertaV2ForMultipleChoice", + "TFDebertaV2ForSequenceClassification", + "TFDebertaV2ForTokenClassification", + "TFDebertaV2Model", + "TFDebertaV2PreTrainedModel", +] diff --git a/src/transformers/models/deberta_v2/tokenization_deberta_v2.py b/src/transformers/models/deberta_v2/tokenization_deberta_v2.py index 6ff689f80a5c1b..4440cc2e1c4840 100644 --- a/src/transformers/models/deberta_v2/tokenization_deberta_v2.py +++ b/src/transformers/models/deberta_v2/tokenization_deberta_v2.py @@ -519,3 +519,6 @@ def convert_to_unicode(text): return text.decode("utf-8", "ignore") else: raise TypeError(f"Unsupported string type: {type(text)}") + + +__all__ = ["DebertaV2Tokenizer"] diff --git a/src/transformers/models/deberta_v2/tokenization_deberta_v2_fast.py b/src/transformers/models/deberta_v2/tokenization_deberta_v2_fast.py index cb92a61edf1afb..784e8299541902 100644 --- a/src/transformers/models/deberta_v2/tokenization_deberta_v2_fast.py +++ b/src/transformers/models/deberta_v2/tokenization_deberta_v2_fast.py @@ -218,3 +218,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["DebertaV2TokenizerFast"] diff --git a/src/transformers/models/decision_transformer/__init__.py b/src/transformers/models/decision_transformer/__init__.py index ce97cf7352a782..455f7ffec5dee0 100644 --- a/src/transformers/models/decision_transformer/__init__.py +++ b/src/transformers/models/decision_transformer/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,47 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_decision_transformer": ["DecisionTransformerConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_decision_transformer"] = [ - "DecisionTransformerGPT2Model", - "DecisionTransformerGPT2PreTrainedModel", - "DecisionTransformerModel", - "DecisionTransformerPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_decision_transformer import ( - DecisionTransformerConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_decision_transformer import ( - DecisionTransformerGPT2Model, - DecisionTransformerGPT2PreTrainedModel, - DecisionTransformerModel, - DecisionTransformerPreTrainedModel, - ) - - + from .configuration_decision_transformer import * + from .modeling_decision_transformer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/decision_transformer/configuration_decision_transformer.py b/src/transformers/models/decision_transformer/configuration_decision_transformer.py index 19e89afecbfa9c..e677206aa089ce 100644 --- a/src/transformers/models/decision_transformer/configuration_decision_transformer.py +++ b/src/transformers/models/decision_transformer/configuration_decision_transformer.py @@ -152,3 +152,6 @@ def __init__( self.eos_token_id = eos_token_id super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + + +__all__ = ["DecisionTransformerConfig"] diff --git a/src/transformers/models/decision_transformer/modeling_decision_transformer.py b/src/transformers/models/decision_transformer/modeling_decision_transformer.py index b8eb9f5a8b4222..93a6f9df2cbbc6 100755 --- a/src/transformers/models/decision_transformer/modeling_decision_transformer.py +++ b/src/transformers/models/decision_transformer/modeling_decision_transformer.py @@ -931,3 +931,11 @@ def forward( hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) + + +__all__ = [ + "DecisionTransformerGPT2Model", + "DecisionTransformerGPT2PreTrainedModel", + "DecisionTransformerModel", + "DecisionTransformerPreTrainedModel", +] diff --git a/src/transformers/models/deit/__init__.py b/src/transformers/models/deit/__init__.py index 8248823be24c73..994580e8152027 100644 --- a/src/transformers/models/deit/__init__.py +++ b/src/transformers/models/deit/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,97 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_torch_available, - is_vision_available, -) - - -_import_structure = {"configuration_deit": ["DeiTConfig", "DeiTOnnxConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_deit"] = ["DeiTFeatureExtractor"] - _import_structure["image_processing_deit"] = ["DeiTImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_deit"] = [ - "DeiTForImageClassification", - "DeiTForImageClassificationWithTeacher", - "DeiTForMaskedImageModeling", - "DeiTModel", - "DeiTPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_deit"] = [ - "TFDeiTForImageClassification", - "TFDeiTForImageClassificationWithTeacher", - "TFDeiTForMaskedImageModeling", - "TFDeiTModel", - "TFDeiTPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_deit import DeiTConfig, DeiTOnnxConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_deit import DeiTFeatureExtractor - from .image_processing_deit import DeiTImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_deit import ( - DeiTForImageClassification, - DeiTForImageClassificationWithTeacher, - DeiTForMaskedImageModeling, - DeiTModel, - DeiTPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_deit import ( - TFDeiTForImageClassification, - TFDeiTForImageClassificationWithTeacher, - TFDeiTForMaskedImageModeling, - TFDeiTModel, - TFDeiTPreTrainedModel, - ) - - + from .configuration_deit import * + from .feature_extraction_deit import * + from .image_processing_deit import * + from .modeling_deit import * + from .modeling_tf_deit import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/deit/configuration_deit.py b/src/transformers/models/deit/configuration_deit.py index 3784ed76ab2a28..d135144a2a40d2 100644 --- a/src/transformers/models/deit/configuration_deit.py +++ b/src/transformers/models/deit/configuration_deit.py @@ -137,3 +137,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["DeiTConfig", "DeiTOnnxConfig"] diff --git a/src/transformers/models/deit/feature_extraction_deit.py b/src/transformers/models/deit/feature_extraction_deit.py index b66922ea95753a..813c115fce553d 100644 --- a/src/transformers/models/deit/feature_extraction_deit.py +++ b/src/transformers/models/deit/feature_extraction_deit.py @@ -31,3 +31,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["DeiTFeatureExtractor"] diff --git a/src/transformers/models/deit/image_processing_deit.py b/src/transformers/models/deit/image_processing_deit.py index bafb5f6e71adc0..7b5f981b004984 100644 --- a/src/transformers/models/deit/image_processing_deit.py +++ b/src/transformers/models/deit/image_processing_deit.py @@ -294,3 +294,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["DeiTImageProcessor"] diff --git a/src/transformers/models/deit/modeling_deit.py b/src/transformers/models/deit/modeling_deit.py index e0b053e43906b8..dfb7753d6f9dd0 100644 --- a/src/transformers/models/deit/modeling_deit.py +++ b/src/transformers/models/deit/modeling_deit.py @@ -1010,3 +1010,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "DeiTForImageClassification", + "DeiTForImageClassificationWithTeacher", + "DeiTForMaskedImageModeling", + "DeiTModel", + "DeiTPreTrainedModel", +] diff --git a/src/transformers/models/deit/modeling_tf_deit.py b/src/transformers/models/deit/modeling_tf_deit.py index 03ad1385d34c9d..7723f8fc347872 100644 --- a/src/transformers/models/deit/modeling_tf_deit.py +++ b/src/transformers/models/deit/modeling_tf_deit.py @@ -1222,3 +1222,12 @@ def build(self, input_shape=None): if getattr(self, "distillation_classifier", None) is not None: with tf.name_scope(self.distillation_classifier.name): self.distillation_classifier.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFDeiTForImageClassification", + "TFDeiTForImageClassificationWithTeacher", + "TFDeiTForMaskedImageModeling", + "TFDeiTModel", + "TFDeiTPreTrainedModel", +] diff --git a/src/transformers/models/depth_anything/__init__.py b/src/transformers/models/depth_anything/__init__.py index 0640e211259f77..7425e37e0399c7 100644 --- a/src/transformers/models/depth_anything/__init__.py +++ b/src/transformers/models/depth_anything/__init__.py @@ -13,40 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...file_utils import _LazyModule, is_torch_available -from ...utils import OptionalDependencyNotAvailable - - -_import_structure = {"configuration_depth_anything": ["DepthAnythingConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_depth_anything"] = [ - "DepthAnythingForDepthEstimation", - "DepthAnythingPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_depth_anything import DepthAnythingConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_depth_anything import ( - DepthAnythingForDepthEstimation, - DepthAnythingPreTrainedModel, - ) - - + from .configuration_depth_anything import * + from .modeling_depth_anything import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/depth_anything/configuration_depth_anything.py b/src/transformers/models/depth_anything/configuration_depth_anything.py index e1b472bdce1948..3bbe621a44310c 100644 --- a/src/transformers/models/depth_anything/configuration_depth_anything.py +++ b/src/transformers/models/depth_anything/configuration_depth_anything.py @@ -163,3 +163,6 @@ def to_dict(self): output["model_type"] = self.__class__.model_type return output + + +__all__ = ["DepthAnythingConfig"] diff --git a/src/transformers/models/depth_anything/modeling_depth_anything.py b/src/transformers/models/depth_anything/modeling_depth_anything.py index 4667c413457b19..98a6ccde8c17d4 100644 --- a/src/transformers/models/depth_anything/modeling_depth_anything.py +++ b/src/transformers/models/depth_anything/modeling_depth_anything.py @@ -463,3 +463,6 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) + + +__all__ = ["DepthAnythingForDepthEstimation", "DepthAnythingPreTrainedModel"] diff --git a/src/transformers/models/dinat/__init__.py b/src/transformers/models/dinat/__init__.py index 207ebfdaa8693f..b64cdbb3c7eb04 100644 --- a/src/transformers/models/dinat/__init__.py +++ b/src/transformers/models/dinat/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,42 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_dinat": ["DinatConfig"]} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_dinat"] = [ - "DinatForImageClassification", - "DinatModel", - "DinatPreTrainedModel", - "DinatBackbone", - ] - if TYPE_CHECKING: - from .configuration_dinat import DinatConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_dinat import ( - DinatBackbone, - DinatForImageClassification, - DinatModel, - DinatPreTrainedModel, - ) - + from .configuration_dinat import * + from .modeling_dinat import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/dinat/configuration_dinat.py b/src/transformers/models/dinat/configuration_dinat.py index 220561152b3571..7b432e37c85139 100644 --- a/src/transformers/models/dinat/configuration_dinat.py +++ b/src/transformers/models/dinat/configuration_dinat.py @@ -147,3 +147,6 @@ def __init__( self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) + + +__all__ = ["DinatConfig"] diff --git a/src/transformers/models/dinat/modeling_dinat.py b/src/transformers/models/dinat/modeling_dinat.py index 18f8725da86133..69677e406410be 100644 --- a/src/transformers/models/dinat/modeling_dinat.py +++ b/src/transformers/models/dinat/modeling_dinat.py @@ -955,3 +955,6 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) + + +__all__ = ["DinatForImageClassification", "DinatModel", "DinatPreTrainedModel", "DinatBackbone"] diff --git a/src/transformers/models/dinov2/__init__.py b/src/transformers/models/dinov2/__init__.py index 1bb4a4597b9adf..3cc316957eac50 100644 --- a/src/transformers/models/dinov2/__init__.py +++ b/src/transformers/models/dinov2/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,70 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_dinov2": ["Dinov2Config", "Dinov2OnnxConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_dinov2"] = [ - "Dinov2ForImageClassification", - "Dinov2Model", - "Dinov2PreTrainedModel", - "Dinov2Backbone", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_dinov2"] = [ - "FlaxDinov2ForImageClassification", - "FlaxDinov2Model", - "FlaxDinov2PreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_dinov2 import Dinov2Config, Dinov2OnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_dinov2 import ( - Dinov2Backbone, - Dinov2ForImageClassification, - Dinov2Model, - Dinov2PreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_dinov2 import ( - FlaxDinov2ForImageClassification, - FlaxDinov2Model, - FlaxDinov2PreTrainedModel, - ) - + from .configuration_dinov2 import * + from .modeling_dinov2 import * + from .modeling_flax_dinov2 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/dinov2/configuration_dinov2.py b/src/transformers/models/dinov2/configuration_dinov2.py index 9590cddc347fe9..dfc339f49da7de 100644 --- a/src/transformers/models/dinov2/configuration_dinov2.py +++ b/src/transformers/models/dinov2/configuration_dinov2.py @@ -170,3 +170,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["Dinov2Config", "Dinov2OnnxConfig"] diff --git a/src/transformers/models/dinov2/modeling_dinov2.py b/src/transformers/models/dinov2/modeling_dinov2.py index bae21dacb95b0f..71e0029d22d835 100644 --- a/src/transformers/models/dinov2/modeling_dinov2.py +++ b/src/transformers/models/dinov2/modeling_dinov2.py @@ -913,3 +913,6 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions if output_attentions else None, ) + + +__all__ = ["Dinov2ForImageClassification", "Dinov2Model", "Dinov2PreTrainedModel", "Dinov2Backbone"] diff --git a/src/transformers/models/dinov2/modeling_flax_dinov2.py b/src/transformers/models/dinov2/modeling_flax_dinov2.py index 689d0b75316dfb..82d1bf95fa400c 100644 --- a/src/transformers/models/dinov2/modeling_flax_dinov2.py +++ b/src/transformers/models/dinov2/modeling_flax_dinov2.py @@ -793,3 +793,6 @@ class FlaxDinov2ForImageClassification(FlaxDinov2PreTrainedModel): append_replace_return_docstrings( FlaxDinov2ForImageClassification, output_type=FlaxSequenceClassifierOutput, config_class=Dinov2Config ) + + +__all__ = ["FlaxDinov2ForImageClassification", "FlaxDinov2Model", "FlaxDinov2PreTrainedModel"] diff --git a/src/transformers/models/distilbert/__init__.py b/src/transformers/models/distilbert/__init__.py index 7d6586bfa50809..4d6fae2e0236e7 100644 --- a/src/transformers/models/distilbert/__init__.py +++ b/src/transformers/models/distilbert/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,150 +11,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_distilbert": [ - "DistilBertConfig", - "DistilBertOnnxConfig", - ], - "tokenization_distilbert": ["DistilBertTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_distilbert_fast"] = ["DistilBertTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_distilbert"] = [ - "DistilBertForMaskedLM", - "DistilBertForMultipleChoice", - "DistilBertForQuestionAnswering", - "DistilBertForSequenceClassification", - "DistilBertForTokenClassification", - "DistilBertModel", - "DistilBertPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_distilbert"] = [ - "TFDistilBertForMaskedLM", - "TFDistilBertForMultipleChoice", - "TFDistilBertForQuestionAnswering", - "TFDistilBertForSequenceClassification", - "TFDistilBertForTokenClassification", - "TFDistilBertMainLayer", - "TFDistilBertModel", - "TFDistilBertPreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_distilbert"] = [ - "FlaxDistilBertForMaskedLM", - "FlaxDistilBertForMultipleChoice", - "FlaxDistilBertForQuestionAnswering", - "FlaxDistilBertForSequenceClassification", - "FlaxDistilBertForTokenClassification", - "FlaxDistilBertModel", - "FlaxDistilBertPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_distilbert import ( - DistilBertConfig, - DistilBertOnnxConfig, - ) - from .tokenization_distilbert import DistilBertTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_distilbert_fast import DistilBertTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_distilbert import ( - DistilBertForMaskedLM, - DistilBertForMultipleChoice, - DistilBertForQuestionAnswering, - DistilBertForSequenceClassification, - DistilBertForTokenClassification, - DistilBertModel, - DistilBertPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_distilbert import ( - TFDistilBertForMaskedLM, - TFDistilBertForMultipleChoice, - TFDistilBertForQuestionAnswering, - TFDistilBertForSequenceClassification, - TFDistilBertForTokenClassification, - TFDistilBertMainLayer, - TFDistilBertModel, - TFDistilBertPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_distilbert import ( - FlaxDistilBertForMaskedLM, - FlaxDistilBertForMultipleChoice, - FlaxDistilBertForQuestionAnswering, - FlaxDistilBertForSequenceClassification, - FlaxDistilBertForTokenClassification, - FlaxDistilBertModel, - FlaxDistilBertPreTrainedModel, - ) - + from .configuration_distilbert import * + from .modeling_distilbert import * + from .modeling_flax_distilbert import * + from .modeling_tf_distilbert import * + from .tokenization_distilbert import * + from .tokenization_distilbert_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/distilbert/configuration_distilbert.py b/src/transformers/models/distilbert/configuration_distilbert.py index a2ce1a2419dbe6..9a28c8e5d03d02 100644 --- a/src/transformers/models/distilbert/configuration_distilbert.py +++ b/src/transformers/models/distilbert/configuration_distilbert.py @@ -136,3 +136,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("attention_mask", dynamic_axis), ] ) + + +__all__ = ["DistilBertConfig", "DistilBertOnnxConfig"] diff --git a/src/transformers/models/distilbert/modeling_distilbert.py b/src/transformers/models/distilbert/modeling_distilbert.py index 36e35594b3d3c6..5dccbd3bedbb8c 100755 --- a/src/transformers/models/distilbert/modeling_distilbert.py +++ b/src/transformers/models/distilbert/modeling_distilbert.py @@ -1366,3 +1366,14 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "DistilBertForMaskedLM", + "DistilBertForMultipleChoice", + "DistilBertForQuestionAnswering", + "DistilBertForSequenceClassification", + "DistilBertForTokenClassification", + "DistilBertModel", + "DistilBertPreTrainedModel", +] diff --git a/src/transformers/models/distilbert/modeling_flax_distilbert.py b/src/transformers/models/distilbert/modeling_flax_distilbert.py index 0cb7cdb033c148..f1cf0faaed3f6a 100644 --- a/src/transformers/models/distilbert/modeling_flax_distilbert.py +++ b/src/transformers/models/distilbert/modeling_flax_distilbert.py @@ -893,3 +893,14 @@ class FlaxDistilBertForQuestionAnswering(FlaxDistilBertPreTrainedModel): FlaxQuestionAnsweringModelOutput, _CONFIG_FOR_DOC, ) + + +__all__ = [ + "FlaxDistilBertForMaskedLM", + "FlaxDistilBertForMultipleChoice", + "FlaxDistilBertForQuestionAnswering", + "FlaxDistilBertForSequenceClassification", + "FlaxDistilBertForTokenClassification", + "FlaxDistilBertModel", + "FlaxDistilBertPreTrainedModel", +] diff --git a/src/transformers/models/distilbert/modeling_tf_distilbert.py b/src/transformers/models/distilbert/modeling_tf_distilbert.py index 87dab93ca16f82..09b14b89c563b6 100644 --- a/src/transformers/models/distilbert/modeling_tf_distilbert.py +++ b/src/transformers/models/distilbert/modeling_tf_distilbert.py @@ -1133,3 +1133,15 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.dim]) + + +__all__ = [ + "TFDistilBertForMaskedLM", + "TFDistilBertForMultipleChoice", + "TFDistilBertForQuestionAnswering", + "TFDistilBertForSequenceClassification", + "TFDistilBertForTokenClassification", + "TFDistilBertMainLayer", + "TFDistilBertModel", + "TFDistilBertPreTrainedModel", +] diff --git a/src/transformers/models/distilbert/tokenization_distilbert.py b/src/transformers/models/distilbert/tokenization_distilbert.py index 610000ce813a58..c894211a2e0acf 100644 --- a/src/transformers/models/distilbert/tokenization_distilbert.py +++ b/src/transformers/models/distilbert/tokenization_distilbert.py @@ -517,3 +517,6 @@ def tokenize(self, text): else: output_tokens.extend(sub_tokens) return output_tokens + + +__all__ = ["DistilBertTokenizer"] diff --git a/src/transformers/models/distilbert/tokenization_distilbert_fast.py b/src/transformers/models/distilbert/tokenization_distilbert_fast.py index f1d69a27d67c08..d3829763d5e7ab 100644 --- a/src/transformers/models/distilbert/tokenization_distilbert_fast.py +++ b/src/transformers/models/distilbert/tokenization_distilbert_fast.py @@ -174,3 +174,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["DistilBertTokenizerFast"] diff --git a/src/transformers/models/donut/__init__.py b/src/transformers/models/donut/__init__.py index f6f38609e6ff54..54de054051f850 100644 --- a/src/transformers/models/donut/__init__.py +++ b/src/transformers/models/donut/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,60 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_donut_swin": ["DonutSwinConfig"], - "processing_donut": ["DonutProcessor"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_donut_swin"] = [ - "DonutSwinModel", - "DonutSwinPreTrainedModel", - ] - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_donut"] = ["DonutFeatureExtractor"] - _import_structure["image_processing_donut"] = ["DonutImageProcessor"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_donut_swin import DonutSwinConfig - from .processing_donut import DonutProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_donut_swin import ( - DonutSwinModel, - DonutSwinPreTrainedModel, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_donut import DonutFeatureExtractor - from .image_processing_donut import DonutImageProcessor - + from .configuration_donut_swin import * + from .feature_extraction_donut import * + from .image_processing_donut import * + from .modeling_donut_swin import * + from .processing_donut import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/donut/configuration_donut_swin.py b/src/transformers/models/donut/configuration_donut_swin.py index b9f9fae39cef7d..9aac07dace7688 100644 --- a/src/transformers/models/donut/configuration_donut_swin.py +++ b/src/transformers/models/donut/configuration_donut_swin.py @@ -130,3 +130,6 @@ def __init__( # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1)) + + +__all__ = ["DonutSwinConfig"] diff --git a/src/transformers/models/donut/feature_extraction_donut.py b/src/transformers/models/donut/feature_extraction_donut.py index e6ca078c0e8ac4..012b208204c5c3 100644 --- a/src/transformers/models/donut/feature_extraction_donut.py +++ b/src/transformers/models/donut/feature_extraction_donut.py @@ -31,3 +31,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["DonutFeatureExtractor"] diff --git a/src/transformers/models/donut/image_processing_donut.py b/src/transformers/models/donut/image_processing_donut.py index edb0629d44bd04..0ddd0591ca371d 100644 --- a/src/transformers/models/donut/image_processing_donut.py +++ b/src/transformers/models/donut/image_processing_donut.py @@ -457,3 +457,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["DonutImageProcessor"] diff --git a/src/transformers/models/donut/modeling_donut_swin.py b/src/transformers/models/donut/modeling_donut_swin.py index 2d5272e8642ee5..1434ae41504535 100644 --- a/src/transformers/models/donut/modeling_donut_swin.py +++ b/src/transformers/models/donut/modeling_donut_swin.py @@ -1006,3 +1006,6 @@ def forward( attentions=encoder_outputs.attentions, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states, ) + + +__all__ = ["DonutSwinModel", "DonutSwinPreTrainedModel"] diff --git a/src/transformers/models/donut/processing_donut.py b/src/transformers/models/donut/processing_donut.py index b46ff4bcfab902..ed3112ff8dd952 100644 --- a/src/transformers/models/donut/processing_donut.py +++ b/src/transformers/models/donut/processing_donut.py @@ -226,3 +226,6 @@ def feature_extractor(self): FutureWarning, ) return self.image_processor + + +__all__ = ["DonutProcessor"] diff --git a/src/transformers/models/dpr/__init__.py b/src/transformers/models/dpr/__init__.py index ef4bccee54d296..9aeadbeaf41657 100644 --- a/src/transformers/models/dpr/__init__.py +++ b/src/transformers/models/dpr/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,126 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_dpr": ["DPRConfig"], - "tokenization_dpr": [ - "DPRContextEncoderTokenizer", - "DPRQuestionEncoderTokenizer", - "DPRReaderOutput", - "DPRReaderTokenizer", - ], -} - - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_dpr_fast"] = [ - "DPRContextEncoderTokenizerFast", - "DPRQuestionEncoderTokenizerFast", - "DPRReaderTokenizerFast", - ] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_dpr"] = [ - "DPRContextEncoder", - "DPRPretrainedContextEncoder", - "DPRPreTrainedModel", - "DPRPretrainedQuestionEncoder", - "DPRPretrainedReader", - "DPRQuestionEncoder", - "DPRReader", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_dpr"] = [ - "TFDPRContextEncoder", - "TFDPRPretrainedContextEncoder", - "TFDPRPretrainedQuestionEncoder", - "TFDPRPretrainedReader", - "TFDPRQuestionEncoder", - "TFDPRReader", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_dpr import DPRConfig - from .tokenization_dpr import ( - DPRContextEncoderTokenizer, - DPRQuestionEncoderTokenizer, - DPRReaderOutput, - DPRReaderTokenizer, - ) - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_dpr_fast import ( - DPRContextEncoderTokenizerFast, - DPRQuestionEncoderTokenizerFast, - DPRReaderTokenizerFast, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_dpr import ( - DPRContextEncoder, - DPRPretrainedContextEncoder, - DPRPreTrainedModel, - DPRPretrainedQuestionEncoder, - DPRPretrainedReader, - DPRQuestionEncoder, - DPRReader, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_dpr import ( - TFDPRContextEncoder, - TFDPRPretrainedContextEncoder, - TFDPRPretrainedQuestionEncoder, - TFDPRPretrainedReader, - TFDPRQuestionEncoder, - TFDPRReader, - ) - + from .configuration_dpr import * + from .modeling_dpr import * + from .modeling_tf_dpr import * + from .tokenization_dpr import * + from .tokenization_dpr_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/dpr/configuration_dpr.py b/src/transformers/models/dpr/configuration_dpr.py index b22da23ca4cb78..7e4b97c97a4f7f 100644 --- a/src/transformers/models/dpr/configuration_dpr.py +++ b/src/transformers/models/dpr/configuration_dpr.py @@ -126,3 +126,6 @@ def __init__( self.layer_norm_eps = layer_norm_eps self.projection_dim = projection_dim self.position_embedding_type = position_embedding_type + + +__all__ = ["DPRConfig"] diff --git a/src/transformers/models/dpr/modeling_dpr.py b/src/transformers/models/dpr/modeling_dpr.py index 7ba63f134ccc8c..79317202b8ca71 100644 --- a/src/transformers/models/dpr/modeling_dpr.py +++ b/src/transformers/models/dpr/modeling_dpr.py @@ -655,3 +655,14 @@ def forward( output_hidden_states=output_hidden_states, return_dict=return_dict, ) + + +__all__ = [ + "DPRContextEncoder", + "DPRPretrainedContextEncoder", + "DPRPreTrainedModel", + "DPRPretrainedQuestionEncoder", + "DPRPretrainedReader", + "DPRQuestionEncoder", + "DPRReader", +] diff --git a/src/transformers/models/dpr/modeling_tf_dpr.py b/src/transformers/models/dpr/modeling_tf_dpr.py index 92a0e54cbba5f0..49a750fa4ff4b8 100644 --- a/src/transformers/models/dpr/modeling_tf_dpr.py +++ b/src/transformers/models/dpr/modeling_tf_dpr.py @@ -788,3 +788,13 @@ def build(self, input_shape=None): if getattr(self, "span_predictor", None) is not None: with tf.name_scope(self.span_predictor.name): self.span_predictor.build(None) + + +__all__ = [ + "TFDPRContextEncoder", + "TFDPRPretrainedContextEncoder", + "TFDPRPretrainedQuestionEncoder", + "TFDPRPretrainedReader", + "TFDPRQuestionEncoder", + "TFDPRReader", +] diff --git a/src/transformers/models/dpr/tokenization_dpr.py b/src/transformers/models/dpr/tokenization_dpr.py index 45ce73425f23cc..00b8dedfa7e4b7 100644 --- a/src/transformers/models/dpr/tokenization_dpr.py +++ b/src/transformers/models/dpr/tokenization_dpr.py @@ -316,3 +316,6 @@ class DPRReaderTokenizer(CustomDPRReaderTokenizerMixin, BertTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] + + +__all__ = ["DPRContextEncoderTokenizer", "DPRQuestionEncoderTokenizer", "DPRReaderOutput", "DPRReaderTokenizer"] diff --git a/src/transformers/models/dpr/tokenization_dpr_fast.py b/src/transformers/models/dpr/tokenization_dpr_fast.py index 69ac58a77dc191..026ba1a8907dcc 100644 --- a/src/transformers/models/dpr/tokenization_dpr_fast.py +++ b/src/transformers/models/dpr/tokenization_dpr_fast.py @@ -316,3 +316,6 @@ class DPRReaderTokenizerFast(CustomDPRReaderTokenizerMixin, BertTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = DPRReaderTokenizer + + +__all__ = ["DPRContextEncoderTokenizerFast", "DPRQuestionEncoderTokenizerFast", "DPRReaderTokenizerFast"] diff --git a/src/transformers/models/dpt/__init__.py b/src/transformers/models/dpt/__init__.py index ef8999d5efba78..086750423dbd93 100644 --- a/src/transformers/models/dpt/__init__.py +++ b/src/transformers/models/dpt/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,62 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available -from ...utils import OptionalDependencyNotAvailable - - -_import_structure = {"configuration_dpt": ["DPTConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_dpt"] = ["DPTFeatureExtractor"] - _import_structure["image_processing_dpt"] = ["DPTImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_dpt"] = [ - "DPTForDepthEstimation", - "DPTForSemanticSegmentation", - "DPTModel", - "DPTPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_dpt import DPTConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_dpt import DPTFeatureExtractor - from .image_processing_dpt import DPTImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_dpt import ( - DPTForDepthEstimation, - DPTForSemanticSegmentation, - DPTModel, - DPTPreTrainedModel, - ) - - + from .configuration_dpt import * + from .feature_extraction_dpt import * + from .image_processing_dpt import * + from .modeling_dpt import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/dpt/configuration_dpt.py b/src/transformers/models/dpt/configuration_dpt.py index 869f384f56985e..516f8f43f0d2bf 100644 --- a/src/transformers/models/dpt/configuration_dpt.py +++ b/src/transformers/models/dpt/configuration_dpt.py @@ -281,3 +281,6 @@ def to_dict(self): output["model_type"] = self.__class__.model_type return output + + +__all__ = ["DPTConfig"] diff --git a/src/transformers/models/dpt/feature_extraction_dpt.py b/src/transformers/models/dpt/feature_extraction_dpt.py index d375d8229f5ee9..8a13989676edf8 100644 --- a/src/transformers/models/dpt/feature_extraction_dpt.py +++ b/src/transformers/models/dpt/feature_extraction_dpt.py @@ -31,3 +31,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["DPTFeatureExtractor"] diff --git a/src/transformers/models/dpt/image_processing_dpt.py b/src/transformers/models/dpt/image_processing_dpt.py index 20024e5fefa198..c4d79696f0ab62 100644 --- a/src/transformers/models/dpt/image_processing_dpt.py +++ b/src/transformers/models/dpt/image_processing_dpt.py @@ -512,3 +512,6 @@ def post_process_depth_estimation( results.append({"predicted_depth": depth}) return results + + +__all__ = ["DPTImageProcessor"] diff --git a/src/transformers/models/dpt/modeling_dpt.py b/src/transformers/models/dpt/modeling_dpt.py index 5886d288b88271..a82227b45809e8 100755 --- a/src/transformers/models/dpt/modeling_dpt.py +++ b/src/transformers/models/dpt/modeling_dpt.py @@ -1371,3 +1371,6 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) + + +__all__ = ["DPTForDepthEstimation", "DPTForSemanticSegmentation", "DPTModel", "DPTPreTrainedModel"] diff --git a/src/transformers/models/efficientnet/__init__.py b/src/transformers/models/efficientnet/__init__.py index 28cb70490d9675..68a2825c7057df 100644 --- a/src/transformers/models/efficientnet/__init__.py +++ b/src/transformers/models/efficientnet/__init__.py @@ -1,8 +1,4 @@ -# flake8: noqa -# There's no way to ignore "F401 '...' imported but unused" warnings in this -# module, but to preserve other warnings. So, don't check this module at all. - -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,64 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -# rely on isort to merge the imports -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_efficientnet": [ - "EfficientNetConfig", - "EfficientNetOnnxConfig", - ] -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_efficientnet"] = ["EfficientNetImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_efficientnet"] = [ - "EfficientNetForImageClassification", - "EfficientNetModel", - "EfficientNetPreTrainedModel", - ] if TYPE_CHECKING: - from .configuration_efficientnet import ( - EfficientNetConfig, - EfficientNetOnnxConfig, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_efficientnet import EfficientNetImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_efficientnet import ( - EfficientNetForImageClassification, - EfficientNetModel, - EfficientNetPreTrainedModel, - ) - + from .configuration_efficientnet import * + from .image_processing_efficientnet import * + from .modeling_efficientnet import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/efficientnet/configuration_efficientnet.py b/src/transformers/models/efficientnet/configuration_efficientnet.py index 4c7feb377fb9c0..ef25447d6aef3a 100644 --- a/src/transformers/models/efficientnet/configuration_efficientnet.py +++ b/src/transformers/models/efficientnet/configuration_efficientnet.py @@ -164,3 +164,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-5 + + +__all__ = ["EfficientNetConfig", "EfficientNetOnnxConfig"] diff --git a/src/transformers/models/efficientnet/image_processing_efficientnet.py b/src/transformers/models/efficientnet/image_processing_efficientnet.py index 3383fff9b0e8dc..3e2403716ce0af 100644 --- a/src/transformers/models/efficientnet/image_processing_efficientnet.py +++ b/src/transformers/models/efficientnet/image_processing_efficientnet.py @@ -364,3 +364,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["EfficientNetImageProcessor"] diff --git a/src/transformers/models/efficientnet/modeling_efficientnet.py b/src/transformers/models/efficientnet/modeling_efficientnet.py index 057cd42f2a37b9..0ab5fa2e6aacbd 100644 --- a/src/transformers/models/efficientnet/modeling_efficientnet.py +++ b/src/transformers/models/efficientnet/modeling_efficientnet.py @@ -642,3 +642,6 @@ def forward( logits=logits, hidden_states=outputs.hidden_states, ) + + +__all__ = ["EfficientNetForImageClassification", "EfficientNetModel", "EfficientNetPreTrainedModel"] diff --git a/src/transformers/models/electra/__init__.py b/src/transformers/models/electra/__init__.py index b79f2410bf354e..a78ed5c42aea51 100644 --- a/src/transformers/models/electra/__init__.py +++ b/src/transformers/models/electra/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,154 +11,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_electra": ["ElectraConfig", "ElectraOnnxConfig"], - "tokenization_electra": ["ElectraTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_electra_fast"] = ["ElectraTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_electra"] = [ - "ElectraForCausalLM", - "ElectraForMaskedLM", - "ElectraForMultipleChoice", - "ElectraForPreTraining", - "ElectraForQuestionAnswering", - "ElectraForSequenceClassification", - "ElectraForTokenClassification", - "ElectraModel", - "ElectraPreTrainedModel", - "load_tf_weights_in_electra", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_electra"] = [ - "TFElectraForMaskedLM", - "TFElectraForMultipleChoice", - "TFElectraForPreTraining", - "TFElectraForQuestionAnswering", - "TFElectraForSequenceClassification", - "TFElectraForTokenClassification", - "TFElectraModel", - "TFElectraPreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_electra"] = [ - "FlaxElectraForCausalLM", - "FlaxElectraForMaskedLM", - "FlaxElectraForMultipleChoice", - "FlaxElectraForPreTraining", - "FlaxElectraForQuestionAnswering", - "FlaxElectraForSequenceClassification", - "FlaxElectraForTokenClassification", - "FlaxElectraModel", - "FlaxElectraPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_electra import ElectraConfig, ElectraOnnxConfig - from .tokenization_electra import ElectraTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_electra_fast import ElectraTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_electra import ( - ElectraForCausalLM, - ElectraForMaskedLM, - ElectraForMultipleChoice, - ElectraForPreTraining, - ElectraForQuestionAnswering, - ElectraForSequenceClassification, - ElectraForTokenClassification, - ElectraModel, - ElectraPreTrainedModel, - load_tf_weights_in_electra, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_electra import ( - TFElectraForMaskedLM, - TFElectraForMultipleChoice, - TFElectraForPreTraining, - TFElectraForQuestionAnswering, - TFElectraForSequenceClassification, - TFElectraForTokenClassification, - TFElectraModel, - TFElectraPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_electra import ( - FlaxElectraForCausalLM, - FlaxElectraForMaskedLM, - FlaxElectraForMultipleChoice, - FlaxElectraForPreTraining, - FlaxElectraForQuestionAnswering, - FlaxElectraForSequenceClassification, - FlaxElectraForTokenClassification, - FlaxElectraModel, - FlaxElectraPreTrainedModel, - ) - + from .configuration_electra import * + from .modeling_electra import * + from .modeling_flax_electra import * + from .modeling_tf_electra import * + from .tokenization_electra import * + from .tokenization_electra_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/electra/configuration_electra.py b/src/transformers/models/electra/configuration_electra.py index 17be728ed65b65..20b242c0f8d65f 100644 --- a/src/transformers/models/electra/configuration_electra.py +++ b/src/transformers/models/electra/configuration_electra.py @@ -182,3 +182,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("token_type_ids", dynamic_axis), ] ) + + +__all__ = ["ElectraConfig", "ElectraOnnxConfig"] diff --git a/src/transformers/models/electra/modeling_electra.py b/src/transformers/models/electra/modeling_electra.py index 0ce2f8e6985a0a..14fd33b683eacd 100644 --- a/src/transformers/models/electra/modeling_electra.py +++ b/src/transformers/models/electra/modeling_electra.py @@ -1660,3 +1660,17 @@ def _reorder_cache(self, past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = [ + "ElectraForCausalLM", + "ElectraForMaskedLM", + "ElectraForMultipleChoice", + "ElectraForPreTraining", + "ElectraForQuestionAnswering", + "ElectraForSequenceClassification", + "ElectraForTokenClassification", + "ElectraModel", + "ElectraPreTrainedModel", + "load_tf_weights_in_electra", +] diff --git a/src/transformers/models/electra/modeling_flax_electra.py b/src/transformers/models/electra/modeling_flax_electra.py index 64d49eb17a460a..4ca7d1d6dcf484 100644 --- a/src/transformers/models/electra/modeling_flax_electra.py +++ b/src/transformers/models/electra/modeling_flax_electra.py @@ -1599,3 +1599,16 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): FlaxCausalLMOutputWithCrossAttentions, _CONFIG_FOR_DOC, ) + + +__all__ = [ + "FlaxElectraForCausalLM", + "FlaxElectraForMaskedLM", + "FlaxElectraForMultipleChoice", + "FlaxElectraForPreTraining", + "FlaxElectraForQuestionAnswering", + "FlaxElectraForSequenceClassification", + "FlaxElectraForTokenClassification", + "FlaxElectraModel", + "FlaxElectraPreTrainedModel", +] diff --git a/src/transformers/models/electra/modeling_tf_electra.py b/src/transformers/models/electra/modeling_tf_electra.py index a289bb9728fd30..827241d0a874e0 100644 --- a/src/transformers/models/electra/modeling_tf_electra.py +++ b/src/transformers/models/electra/modeling_tf_electra.py @@ -1762,3 +1762,15 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFElectraForMaskedLM", + "TFElectraForMultipleChoice", + "TFElectraForPreTraining", + "TFElectraForQuestionAnswering", + "TFElectraForSequenceClassification", + "TFElectraForTokenClassification", + "TFElectraModel", + "TFElectraPreTrainedModel", +] diff --git a/src/transformers/models/electra/tokenization_electra.py b/src/transformers/models/electra/tokenization_electra.py index 2acd86ca083997..3b21527e6cdae2 100644 --- a/src/transformers/models/electra/tokenization_electra.py +++ b/src/transformers/models/electra/tokenization_electra.py @@ -506,3 +506,6 @@ def tokenize(self, text): else: output_tokens.extend(sub_tokens) return output_tokens + + +__all__ = ["ElectraTokenizer"] diff --git a/src/transformers/models/electra/tokenization_electra_fast.py b/src/transformers/models/electra/tokenization_electra_fast.py index 7b9d6a36cb9210..34ea4339b9382b 100644 --- a/src/transformers/models/electra/tokenization_electra_fast.py +++ b/src/transformers/models/electra/tokenization_electra_fast.py @@ -167,3 +167,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["ElectraTokenizerFast"] diff --git a/src/transformers/models/encodec/__init__.py b/src/transformers/models/encodec/__init__.py index d67075e5560c75..3adeea056604d1 100644 --- a/src/transformers/models/encodec/__init__.py +++ b/src/transformers/models/encodec/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,47 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_encodec": ["EncodecConfig"], - "feature_extraction_encodec": ["EncodecFeatureExtractor"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_encodec"] = [ - "EncodecModel", - "EncodecPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_encodec import ( - EncodecConfig, - ) - from .feature_extraction_encodec import EncodecFeatureExtractor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_encodec import ( - EncodecModel, - EncodecPreTrainedModel, - ) - + from .configuration_encodec import * + from .feature_extraction_encodec import * + from .modeling_encodec import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/encodec/configuration_encodec.py b/src/transformers/models/encodec/configuration_encodec.py index bc10e8ffc3d57b..77fd67727dc390 100644 --- a/src/transformers/models/encodec/configuration_encodec.py +++ b/src/transformers/models/encodec/configuration_encodec.py @@ -187,3 +187,6 @@ def frame_rate(self) -> int: @property def num_quantizers(self) -> int: return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10)) + + +__all__ = ["EncodecConfig"] diff --git a/src/transformers/models/encodec/feature_extraction_encodec.py b/src/transformers/models/encodec/feature_extraction_encodec.py index 6f7536a52e9f99..9bed59de45d8c8 100644 --- a/src/transformers/models/encodec/feature_extraction_encodec.py +++ b/src/transformers/models/encodec/feature_extraction_encodec.py @@ -204,3 +204,6 @@ def __call__( padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs + + +__all__ = ["EncodecFeatureExtractor"] diff --git a/src/transformers/models/encodec/modeling_encodec.py b/src/transformers/models/encodec/modeling_encodec.py index 28ccb9513d63d8..9339c2645374b4 100644 --- a/src/transformers/models/encodec/modeling_encodec.py +++ b/src/transformers/models/encodec/modeling_encodec.py @@ -814,3 +814,6 @@ def forward( return (audio_codes, audio_values) return EncodecOutput(audio_codes=audio_codes, audio_values=audio_values) + + +__all__ = ["EncodecModel", "EncodecPreTrainedModel"] diff --git a/src/transformers/models/encoder_decoder/__init__.py b/src/transformers/models/encoder_decoder/__init__.py index ba71f1f7c7a9e1..c786feb9213fdd 100644 --- a/src/transformers/models/encoder_decoder/__init__.py +++ b/src/transformers/models/encoder_decoder/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,72 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_torch_available, -) - - -_import_structure = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_encoder_decoder"] = ["EncoderDecoderModel"] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_encoder_decoder"] = ["TFEncoderDecoderModel"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_encoder_decoder"] = ["FlaxEncoderDecoderModel"] if TYPE_CHECKING: - from .configuration_encoder_decoder import EncoderDecoderConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_encoder_decoder import EncoderDecoderModel - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_encoder_decoder import TFEncoderDecoderModel - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel - + from .configuration_encoder_decoder import * + from .modeling_encoder_decoder import * + from .modeling_flax_encoder_decoder import * + from .modeling_tf_encoder_decoder import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/encoder_decoder/configuration_encoder_decoder.py b/src/transformers/models/encoder_decoder/configuration_encoder_decoder.py index 5190ed51ffd350..8b5c62363f6ad9 100644 --- a/src/transformers/models/encoder_decoder/configuration_encoder_decoder.py +++ b/src/transformers/models/encoder_decoder/configuration_encoder_decoder.py @@ -106,3 +106,6 @@ def from_encoder_decoder_configs( decoder_config.add_cross_attention = True return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs) + + +__all__ = ["EncoderDecoderConfig"] diff --git a/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py b/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py index 9ebedce07fb833..9ab4b7f2ced167 100644 --- a/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py +++ b/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py @@ -682,3 +682,6 @@ def resize_token_embeddings(self, *args, **kwargs): def _reorder_cache(self, past_key_values, beam_idx): # apply decoder cache reordering here return self.decoder._reorder_cache(past_key_values, beam_idx) + + +__all__ = ["EncoderDecoderModel"] diff --git a/src/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py b/src/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py index 24b053969c7eb3..bdc589484cda10 100644 --- a/src/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py +++ b/src/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py @@ -896,3 +896,6 @@ def from_encoder_decoder_pretrained( model.params["decoder"] = decoder.params return model + + +__all__ = ["FlaxEncoderDecoderModel"] diff --git a/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py b/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py index 85802b77f383f4..66009fc3ef0605 100644 --- a/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py +++ b/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py @@ -660,3 +660,6 @@ def build(self, input_shape=None): if getattr(self, "decoder", None) is not None: with tf.name_scope(self.decoder.name): self.decoder.build(None) + + +__all__ = ["TFEncoderDecoderModel"] diff --git a/src/transformers/models/ernie/__init__.py b/src/transformers/models/ernie/__init__.py index ddd3b30365d80a..9bb8983063ddb0 100644 --- a/src/transformers/models/ernie/__init__.py +++ b/src/transformers/models/ernie/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,58 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available - - -_import_structure = { - "configuration_ernie": ["ErnieConfig", "ErnieOnnxConfig"], -} +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_ernie"] = [ - "ErnieForCausalLM", - "ErnieForMaskedLM", - "ErnieForMultipleChoice", - "ErnieForNextSentencePrediction", - "ErnieForPreTraining", - "ErnieForQuestionAnswering", - "ErnieForSequenceClassification", - "ErnieForTokenClassification", - "ErnieModel", - "ErniePreTrainedModel", - ] if TYPE_CHECKING: - from .configuration_ernie import ErnieConfig, ErnieOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_ernie import ( - ErnieForCausalLM, - ErnieForMaskedLM, - ErnieForMultipleChoice, - ErnieForNextSentencePrediction, - ErnieForPreTraining, - ErnieForQuestionAnswering, - ErnieForSequenceClassification, - ErnieForTokenClassification, - ErnieModel, - ErniePreTrainedModel, - ) - + from .configuration_ernie import * + from .modeling_ernie import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/ernie/configuration_ernie.py b/src/transformers/models/ernie/configuration_ernie.py index 808a0c27220cf4..655e40e163b59d 100644 --- a/src/transformers/models/ernie/configuration_ernie.py +++ b/src/transformers/models/ernie/configuration_ernie.py @@ -158,3 +158,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("task_type_ids", dynamic_axis), ] ) + + +__all__ = ["ErnieConfig", "ErnieOnnxConfig"] diff --git a/src/transformers/models/ernie/modeling_ernie.py b/src/transformers/models/ernie/modeling_ernie.py index 1ab6d44faa1ce8..ec090b712e4420 100644 --- a/src/transformers/models/ernie/modeling_ernie.py +++ b/src/transformers/models/ernie/modeling_ernie.py @@ -1799,3 +1799,17 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "ErnieForCausalLM", + "ErnieForMaskedLM", + "ErnieForMultipleChoice", + "ErnieForNextSentencePrediction", + "ErnieForPreTraining", + "ErnieForQuestionAnswering", + "ErnieForSequenceClassification", + "ErnieForTokenClassification", + "ErnieModel", + "ErniePreTrainedModel", +] diff --git a/src/transformers/models/esm/__init__.py b/src/transformers/models/esm/__init__.py index a764bedc3fadfd..8eac54d6ddcbda 100644 --- a/src/transformers/models/esm/__init__.py +++ b/src/transformers/models/esm/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 Facebook and The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,78 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_esm": ["EsmConfig"], - "tokenization_esm": ["EsmTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_esm"] = [ - "EsmForMaskedLM", - "EsmForSequenceClassification", - "EsmForTokenClassification", - "EsmModel", - "EsmPreTrainedModel", - ] - _import_structure["modeling_esmfold"] = ["EsmForProteinFolding", "EsmFoldPreTrainedModel"] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_esm"] = [ - "TFEsmForMaskedLM", - "TFEsmForSequenceClassification", - "TFEsmForTokenClassification", - "TFEsmModel", - "TFEsmPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_esm import EsmConfig - from .tokenization_esm import EsmTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_esm import ( - EsmForMaskedLM, - EsmForSequenceClassification, - EsmForTokenClassification, - EsmModel, - EsmPreTrainedModel, - ) - from .modeling_esmfold import EsmFoldPreTrainedModel, EsmForProteinFolding - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_esm import ( - TFEsmForMaskedLM, - TFEsmForSequenceClassification, - TFEsmForTokenClassification, - TFEsmModel, - TFEsmPreTrainedModel, - ) - - + from .configuration_esm import * + from .modeling_esm import * + from .modeling_esmfold import * + from .modeling_tf_esm import * + from .tokenization_esm import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/esm/configuration_esm.py b/src/transformers/models/esm/configuration_esm.py index 083664747ddd85..c0a31e6958472d 100644 --- a/src/transformers/models/esm/configuration_esm.py +++ b/src/transformers/models/esm/configuration_esm.py @@ -360,3 +360,6 @@ def get_default_vocab_list(): "", "", ) + + +__all__ = ["EsmConfig"] diff --git a/src/transformers/models/esm/modeling_esm.py b/src/transformers/models/esm/modeling_esm.py index 5df5435bb1229a..a7d07904e06a94 100755 --- a/src/transformers/models/esm/modeling_esm.py +++ b/src/transformers/models/esm/modeling_esm.py @@ -1260,3 +1260,12 @@ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_l mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx + + +__all__ = [ + "EsmForMaskedLM", + "EsmForSequenceClassification", + "EsmForTokenClassification", + "EsmModel", + "EsmPreTrainedModel", +] diff --git a/src/transformers/models/esm/modeling_esmfold.py b/src/transformers/models/esm/modeling_esmfold.py index fe35451d2a99e3..67cee99294a89b 100644 --- a/src/transformers/models/esm/modeling_esmfold.py +++ b/src/transformers/models/esm/modeling_esmfold.py @@ -2320,3 +2320,6 @@ def infer_pdbs(self, seqs: List[str], *args, **kwargs) -> List[str]: """Returns the pdb (file) string from the model given an input sequence.""" output = self.infer(seqs, *args, **kwargs) return self.output_to_pdb(output) + + +__all__ = ["EsmForProteinFolding", "EsmFoldPreTrainedModel"] diff --git a/src/transformers/models/esm/modeling_tf_esm.py b/src/transformers/models/esm/modeling_tf_esm.py index 0e5cf3d8f61f8a..71698486dab0ad 100644 --- a/src/transformers/models/esm/modeling_tf_esm.py +++ b/src/transformers/models/esm/modeling_tf_esm.py @@ -1564,3 +1564,12 @@ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_l mask = tf.cast(input_ids != padding_idx, tf.int64) incremental_indices = (tf.cumsum(mask, axis=1) + past_key_values_length) * mask return incremental_indices + padding_idx + + +__all__ = [ + "TFEsmForMaskedLM", + "TFEsmForSequenceClassification", + "TFEsmForTokenClassification", + "TFEsmModel", + "TFEsmPreTrainedModel", +] diff --git a/src/transformers/models/esm/tokenization_esm.py b/src/transformers/models/esm/tokenization_esm.py index fbb759c1d171ba..4bc433e350e13c 100644 --- a/src/transformers/models/esm/tokenization_esm.py +++ b/src/transformers/models/esm/tokenization_esm.py @@ -142,3 +142,6 @@ def save_vocabulary(self, save_directory, filename_prefix): @property def vocab_size(self) -> int: return len(self.all_tokens) + + +__all__ = ["EsmTokenizer"] diff --git a/src/transformers/models/falcon/__init__.py b/src/transformers/models/falcon/__init__.py index 62c1c9262b70fc..f9789767f11402 100644 --- a/src/transformers/models/falcon/__init__.py +++ b/src/transformers/models/falcon/__init__.py @@ -1,5 +1,4 @@ -# coding=utf-8 -# Copyright 2023 the Falcon authors and HuggingFace Inc. team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,53 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - - -_import_structure = { - "configuration_falcon": ["FalconConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_falcon"] = [ - "FalconForCausalLM", - "FalconModel", - "FalconPreTrainedModel", - "FalconForSequenceClassification", - "FalconForTokenClassification", - "FalconForQuestionAnswering", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_falcon import FalconConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_falcon import ( - FalconForCausalLM, - FalconForQuestionAnswering, - FalconForSequenceClassification, - FalconForTokenClassification, - FalconModel, - FalconPreTrainedModel, - ) - - + from .configuration_falcon import * + from .modeling_falcon import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/falcon/configuration_falcon.py b/src/transformers/models/falcon/configuration_falcon.py index 9f5f8f793ce891..2d072b14f79001 100644 --- a/src/transformers/models/falcon/configuration_falcon.py +++ b/src/transformers/models/falcon/configuration_falcon.py @@ -206,3 +206,6 @@ def head_dim(self): @property def rotary(self): return not self.alibi + + +__all__ = ["FalconConfig"] diff --git a/src/transformers/models/falcon/modeling_falcon.py b/src/transformers/models/falcon/modeling_falcon.py index 51d9ff39d48f88..bf2b69f050395d 100644 --- a/src/transformers/models/falcon/modeling_falcon.py +++ b/src/transformers/models/falcon/modeling_falcon.py @@ -1616,3 +1616,13 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "FalconForCausalLM", + "FalconModel", + "FalconPreTrainedModel", + "FalconForSequenceClassification", + "FalconForTokenClassification", + "FalconForQuestionAnswering", +] diff --git a/src/transformers/models/falcon_mamba/__init__.py b/src/transformers/models/falcon_mamba/__init__.py index 4740d03f332135..202147c938465d 100644 --- a/src/transformers/models/falcon_mamba/__init__.py +++ b/src/transformers/models/falcon_mamba/__init__.py @@ -11,48 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - - -_import_structure = { - "configuration_falcon_mamba": ["FalconMambaConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_falcon_mamba"] = [ - "FalconMambaForCausalLM", - "FalconMambaModel", - "FalconMambaPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_falcon_mamba import FalconMambaConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_falcon_mamba import ( - FalconMambaForCausalLM, - FalconMambaModel, - FalconMambaPreTrainedModel, - ) + from .configuration_falcon_mamba import * + from .modeling_falcon_mamba import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/falcon_mamba/configuration_falcon_mamba.py b/src/transformers/models/falcon_mamba/configuration_falcon_mamba.py index cabba738a479e1..4127551445644f 100644 --- a/src/transformers/models/falcon_mamba/configuration_falcon_mamba.py +++ b/src/transformers/models/falcon_mamba/configuration_falcon_mamba.py @@ -157,3 +157,6 @@ def __init__( self.mixer_rms_eps = mixer_rms_eps super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, **kwargs) + + +__all__ = ["FalconMambaConfig"] diff --git a/src/transformers/models/falcon_mamba/modeling_falcon_mamba.py b/src/transformers/models/falcon_mamba/modeling_falcon_mamba.py index a1954f9d9be474..22ad7af59616fb 100644 --- a/src/transformers/models/falcon_mamba/modeling_falcon_mamba.py +++ b/src/transformers/models/falcon_mamba/modeling_falcon_mamba.py @@ -867,3 +867,6 @@ def forward( cache_params=falcon_mamba_outputs.cache_params, hidden_states=falcon_mamba_outputs.hidden_states, ) + + +__all__ = ["FalconMambaForCausalLM", "FalconMambaModel", "FalconMambaPreTrainedModel"] diff --git a/src/transformers/models/fastspeech2_conformer/__init__.py b/src/transformers/models/fastspeech2_conformer/__init__.py index 2014f74be1f772..44d1ec72363107 100644 --- a/src/transformers/models/fastspeech2_conformer/__init__.py +++ b/src/transformers/models/fastspeech2_conformer/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,57 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_fastspeech2_conformer": [ - "FastSpeech2ConformerConfig", - "FastSpeech2ConformerHifiGanConfig", - "FastSpeech2ConformerWithHifiGanConfig", - ], - "tokenization_fastspeech2_conformer": ["FastSpeech2ConformerTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_fastspeech2_conformer"] = [ - "FastSpeech2ConformerWithHifiGan", - "FastSpeech2ConformerHifiGan", - "FastSpeech2ConformerModel", - "FastSpeech2ConformerPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_fastspeech2_conformer import ( - FastSpeech2ConformerConfig, - FastSpeech2ConformerHifiGanConfig, - FastSpeech2ConformerWithHifiGanConfig, - ) - from .tokenization_fastspeech2_conformer import FastSpeech2ConformerTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_fastspeech2_conformer import ( - FastSpeech2ConformerHifiGan, - FastSpeech2ConformerModel, - FastSpeech2ConformerPreTrainedModel, - FastSpeech2ConformerWithHifiGan, - ) - + from .configuration_fastspeech2_conformer import * + from .modeling_fastspeech2_conformer import * + from .tokenization_fastspeech2_conformer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py b/src/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py index 59a1b029751646..e9a061ffd3733c 100644 --- a/src/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py +++ b/src/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py @@ -475,3 +475,6 @@ def __init__( self.vocoder_config = FastSpeech2ConformerHifiGanConfig(**vocoder_config) super().__init__(**kwargs) + + +__all__ = ["FastSpeech2ConformerConfig", "FastSpeech2ConformerHifiGanConfig", "FastSpeech2ConformerWithHifiGanConfig"] diff --git a/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py b/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py index 1e1900d38afdc3..81c1eef8959e4a 100644 --- a/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py +++ b/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py @@ -1687,3 +1687,11 @@ def forward( return model_outputs + (waveform,) return FastSpeech2ConformerWithHifiGanOutput(waveform=waveform, **model_outputs) + + +__all__ = [ + "FastSpeech2ConformerWithHifiGan", + "FastSpeech2ConformerHifiGan", + "FastSpeech2ConformerModel", + "FastSpeech2ConformerPreTrainedModel", +] diff --git a/src/transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py b/src/transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py index 65c081c4c14371..faa1420d71c4b2 100644 --- a/src/transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py +++ b/src/transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py @@ -183,3 +183,6 @@ def __setstate__(self, d): "You need to install g2p-en to use FastSpeech2ConformerTokenizer. " "See https://pypi.org/project/g2p-en/ for installation." ) + + +__all__ = ["FastSpeech2ConformerTokenizer"] diff --git a/src/transformers/models/flaubert/__init__.py b/src/transformers/models/flaubert/__init__.py index 94cf7b66139643..e981d9cbcb1e45 100644 --- a/src/transformers/models/flaubert/__init__.py +++ b/src/transformers/models/flaubert/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,89 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available - - -_import_structure = { - "configuration_flaubert": ["FlaubertConfig", "FlaubertOnnxConfig"], - "tokenization_flaubert": ["FlaubertTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flaubert"] = [ - "FlaubertForMultipleChoice", - "FlaubertForQuestionAnswering", - "FlaubertForQuestionAnsweringSimple", - "FlaubertForSequenceClassification", - "FlaubertForTokenClassification", - "FlaubertModel", - "FlaubertWithLMHeadModel", - "FlaubertPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_flaubert"] = [ - "TFFlaubertForMultipleChoice", - "TFFlaubertForQuestionAnsweringSimple", - "TFFlaubertForSequenceClassification", - "TFFlaubertForTokenClassification", - "TFFlaubertModel", - "TFFlaubertPreTrainedModel", - "TFFlaubertWithLMHeadModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_flaubert import FlaubertConfig, FlaubertOnnxConfig - from .tokenization_flaubert import FlaubertTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flaubert import ( - FlaubertForMultipleChoice, - FlaubertForQuestionAnswering, - FlaubertForQuestionAnsweringSimple, - FlaubertForSequenceClassification, - FlaubertForTokenClassification, - FlaubertModel, - FlaubertPreTrainedModel, - FlaubertWithLMHeadModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_flaubert import ( - TFFlaubertForMultipleChoice, - TFFlaubertForQuestionAnsweringSimple, - TFFlaubertForSequenceClassification, - TFFlaubertForTokenClassification, - TFFlaubertModel, - TFFlaubertPreTrainedModel, - TFFlaubertWithLMHeadModel, - ) - + from .configuration_flaubert import * + from .modeling_flaubert import * + from .modeling_tf_flaubert import * + from .tokenization_flaubert import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/flaubert/configuration_flaubert.py b/src/transformers/models/flaubert/configuration_flaubert.py index ae5e07245e9c62..93e4645da56514 100644 --- a/src/transformers/models/flaubert/configuration_flaubert.py +++ b/src/transformers/models/flaubert/configuration_flaubert.py @@ -230,3 +230,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("attention_mask", dynamic_axis), ] ) + + +__all__ = ["FlaubertConfig", "FlaubertOnnxConfig"] diff --git a/src/transformers/models/flaubert/modeling_flaubert.py b/src/transformers/models/flaubert/modeling_flaubert.py index 07f3bfad332b23..bc1d66f835572b 100644 --- a/src/transformers/models/flaubert/modeling_flaubert.py +++ b/src/transformers/models/flaubert/modeling_flaubert.py @@ -1300,3 +1300,15 @@ def forward( hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) + + +__all__ = [ + "FlaubertForMultipleChoice", + "FlaubertForQuestionAnswering", + "FlaubertForQuestionAnsweringSimple", + "FlaubertForSequenceClassification", + "FlaubertForTokenClassification", + "FlaubertModel", + "FlaubertWithLMHeadModel", + "FlaubertPreTrainedModel", +] diff --git a/src/transformers/models/flaubert/modeling_tf_flaubert.py b/src/transformers/models/flaubert/modeling_tf_flaubert.py index 71e371da241ad6..a08006815511a8 100644 --- a/src/transformers/models/flaubert/modeling_tf_flaubert.py +++ b/src/transformers/models/flaubert/modeling_tf_flaubert.py @@ -1331,3 +1331,14 @@ def build(self, input_shape=None): if getattr(self, "logits_proj", None) is not None: with tf.name_scope(self.logits_proj.name): self.logits_proj.build([None, None, self.config.num_labels]) + + +__all__ = [ + "TFFlaubertForMultipleChoice", + "TFFlaubertForQuestionAnsweringSimple", + "TFFlaubertForSequenceClassification", + "TFFlaubertForTokenClassification", + "TFFlaubertModel", + "TFFlaubertPreTrainedModel", + "TFFlaubertWithLMHeadModel", +] diff --git a/src/transformers/models/flaubert/tokenization_flaubert.py b/src/transformers/models/flaubert/tokenization_flaubert.py index be9a4e79605fdc..ac9e5aa4336c66 100644 --- a/src/transformers/models/flaubert/tokenization_flaubert.py +++ b/src/transformers/models/flaubert/tokenization_flaubert.py @@ -563,3 +563,6 @@ def __setstate__(self, d): ) self.sm = sacremoses + + +__all__ = ["FlaubertTokenizer"] diff --git a/src/transformers/models/flava/__init__.py b/src/transformers/models/flava/__init__.py index 9fbe54524a6dea..c258a8afc8e92a 100644 --- a/src/transformers/models/flava/__init__.py +++ b/src/transformers/models/flava/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,81 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_flava": [ - "FlavaConfig", - "FlavaImageCodebookConfig", - "FlavaImageConfig", - "FlavaMultimodalConfig", - "FlavaTextConfig", - ], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_flava"] = ["FlavaFeatureExtractor"] - _import_structure["image_processing_flava"] = ["FlavaImageProcessor"] - _import_structure["processing_flava"] = ["FlavaProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flava"] = [ - "FlavaForPreTraining", - "FlavaImageCodebook", - "FlavaImageModel", - "FlavaModel", - "FlavaMultimodalModel", - "FlavaPreTrainedModel", - "FlavaTextModel", - ] - if TYPE_CHECKING: - from .configuration_flava import ( - FlavaConfig, - FlavaImageCodebookConfig, - FlavaImageConfig, - FlavaMultimodalConfig, - FlavaTextConfig, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_flava import FlavaFeatureExtractor - from .image_processing_flava import FlavaImageProcessor - from .processing_flava import FlavaProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flava import ( - FlavaForPreTraining, - FlavaImageCodebook, - FlavaImageModel, - FlavaModel, - FlavaMultimodalModel, - FlavaPreTrainedModel, - FlavaTextModel, - ) - + from .configuration_flava import * + from .feature_extraction_flava import * + from .image_processing_flava import * + from .modeling_flava import * + from .processing_flava import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/flava/configuration_flava.py b/src/transformers/models/flava/configuration_flava.py index 47cdb488a2eb5d..7a18b33ac86ea8 100644 --- a/src/transformers/models/flava/configuration_flava.py +++ b/src/transformers/models/flava/configuration_flava.py @@ -696,3 +696,6 @@ def from_configs( image_codebook_config=image_codebook_config.to_dict(), **kwargs, ) + + +__all__ = ["FlavaConfig", "FlavaImageCodebookConfig", "FlavaImageConfig", "FlavaMultimodalConfig", "FlavaTextConfig"] diff --git a/src/transformers/models/flava/feature_extraction_flava.py b/src/transformers/models/flava/feature_extraction_flava.py index c707b575cef2ef..111795d418fc1b 100644 --- a/src/transformers/models/flava/feature_extraction_flava.py +++ b/src/transformers/models/flava/feature_extraction_flava.py @@ -31,3 +31,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["FlavaFeatureExtractor"] diff --git a/src/transformers/models/flava/image_processing_flava.py b/src/transformers/models/flava/image_processing_flava.py index 72ef141df83d8e..254af58c92d78d 100644 --- a/src/transformers/models/flava/image_processing_flava.py +++ b/src/transformers/models/flava/image_processing_flava.py @@ -698,3 +698,6 @@ def preprocess( data["bool_masked_pos"] = masks return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["FlavaImageProcessor"] diff --git a/src/transformers/models/flava/modeling_flava.py b/src/transformers/models/flava/modeling_flava.py index 589385dffecfb0..c893938e42841e 100644 --- a/src/transformers/models/flava/modeling_flava.py +++ b/src/transformers/models/flava/modeling_flava.py @@ -2102,3 +2102,14 @@ def forward( mmm_image_logits=mmm_image_logits, mmm_text_logits=mmm_text_logits, ) + + +__all__ = [ + "FlavaForPreTraining", + "FlavaImageCodebook", + "FlavaImageModel", + "FlavaModel", + "FlavaMultimodalModel", + "FlavaPreTrainedModel", + "FlavaTextModel", +] diff --git a/src/transformers/models/flava/processing_flava.py b/src/transformers/models/flava/processing_flava.py index 7f439b040a8fd0..0e2a98cc1038f6 100644 --- a/src/transformers/models/flava/processing_flava.py +++ b/src/transformers/models/flava/processing_flava.py @@ -163,3 +163,6 @@ def feature_extractor(self): FutureWarning, ) return self.image_processor + + +__all__ = ["FlavaProcessor"] diff --git a/src/transformers/models/fnet/__init__.py b/src/transformers/models/fnet/__init__.py index 08b6ddf864e15f..756d690e72c11e 100644 --- a/src/transformers/models/fnet/__init__.py +++ b/src/transformers/models/fnet/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,93 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = {"configuration_fnet": ["FNetConfig"]} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_fnet"] = ["FNetTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_fnet_fast"] = ["FNetTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_fnet"] = [ - "FNetForMaskedLM", - "FNetForMultipleChoice", - "FNetForNextSentencePrediction", - "FNetForPreTraining", - "FNetForQuestionAnswering", - "FNetForSequenceClassification", - "FNetForTokenClassification", - "FNetLayer", - "FNetModel", - "FNetPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_fnet import FNetConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_fnet import FNetTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_fnet_fast import FNetTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_fnet import ( - FNetForMaskedLM, - FNetForMultipleChoice, - FNetForNextSentencePrediction, - FNetForPreTraining, - FNetForQuestionAnswering, - FNetForSequenceClassification, - FNetForTokenClassification, - FNetLayer, - FNetModel, - FNetPreTrainedModel, - ) - - + from .configuration_fnet import * + from .modeling_fnet import * + from .tokenization_fnet import * + from .tokenization_fnet_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/fnet/configuration_fnet.py b/src/transformers/models/fnet/configuration_fnet.py index 90b77fc5d77aa7..24a57832856593 100644 --- a/src/transformers/models/fnet/configuration_fnet.py +++ b/src/transformers/models/fnet/configuration_fnet.py @@ -114,3 +114,6 @@ def __init__( self.layer_norm_eps = layer_norm_eps self.use_tpu_fourier_optimizations = use_tpu_fourier_optimizations self.tpu_short_seq_length = tpu_short_seq_length + + +__all__ = ["FNetConfig"] diff --git a/src/transformers/models/fnet/modeling_fnet.py b/src/transformers/models/fnet/modeling_fnet.py index b1842dbc89d8fe..9d02d35210f34f 100755 --- a/src/transformers/models/fnet/modeling_fnet.py +++ b/src/transformers/models/fnet/modeling_fnet.py @@ -1183,3 +1183,17 @@ def forward( return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states ) + + +__all__ = [ + "FNetForMaskedLM", + "FNetForMultipleChoice", + "FNetForNextSentencePrediction", + "FNetForPreTraining", + "FNetForQuestionAnswering", + "FNetForSequenceClassification", + "FNetForTokenClassification", + "FNetLayer", + "FNetModel", + "FNetPreTrainedModel", +] diff --git a/src/transformers/models/fnet/tokenization_fnet.py b/src/transformers/models/fnet/tokenization_fnet.py index 29095c80ff02fb..877a50cc2d1d27 100644 --- a/src/transformers/models/fnet/tokenization_fnet.py +++ b/src/transformers/models/fnet/tokenization_fnet.py @@ -336,3 +336,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = fi.write(content_spiece_model) return (out_vocab_file,) + + +__all__ = ["FNetTokenizer"] diff --git a/src/transformers/models/fnet/tokenization_fnet_fast.py b/src/transformers/models/fnet/tokenization_fnet_fast.py index 3136b9f27c22cb..ac33bc13c60cd1 100644 --- a/src/transformers/models/fnet/tokenization_fnet_fast.py +++ b/src/transformers/models/fnet/tokenization_fnet_fast.py @@ -184,3 +184,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["FNetTokenizerFast"] diff --git a/src/transformers/models/focalnet/__init__.py b/src/transformers/models/focalnet/__init__.py index ceacb8a52a170b..5dec8135f3b303 100644 --- a/src/transformers/models/focalnet/__init__.py +++ b/src/transformers/models/focalnet/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,45 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -# rely on isort to merge the imports -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_focalnet": ["FocalNetConfig"]} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_focalnet"] = [ - "FocalNetForImageClassification", - "FocalNetForMaskedImageModeling", - "FocalNetBackbone", - "FocalNetModel", - "FocalNetPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_focalnet import FocalNetConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_focalnet import ( - FocalNetBackbone, - FocalNetForImageClassification, - FocalNetForMaskedImageModeling, - FocalNetModel, - FocalNetPreTrainedModel, - ) - + from .configuration_focalnet import * + from .modeling_focalnet import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/focalnet/configuration_focalnet.py b/src/transformers/models/focalnet/configuration_focalnet.py index 577530e2ecca2f..8fe5831a112215 100644 --- a/src/transformers/models/focalnet/configuration_focalnet.py +++ b/src/transformers/models/focalnet/configuration_focalnet.py @@ -159,3 +159,6 @@ def __init__( self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) + + +__all__ = ["FocalNetConfig"] diff --git a/src/transformers/models/focalnet/modeling_focalnet.py b/src/transformers/models/focalnet/modeling_focalnet.py index 99f2dc658fcbfc..43de96087cd7f7 100644 --- a/src/transformers/models/focalnet/modeling_focalnet.py +++ b/src/transformers/models/focalnet/modeling_focalnet.py @@ -1027,3 +1027,12 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, ) + + +__all__ = [ + "FocalNetForImageClassification", + "FocalNetForMaskedImageModeling", + "FocalNetBackbone", + "FocalNetModel", + "FocalNetPreTrainedModel", +] diff --git a/src/transformers/models/fsmt/__init__.py b/src/transformers/models/fsmt/__init__.py index db960e4a5ce9c3..f8f31762d681db 100644 --- a/src/transformers/models/fsmt/__init__.py +++ b/src/transformers/models/fsmt/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,39 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_fsmt": ["FSMTConfig"], - "tokenization_fsmt": ["FSMTTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_fsmt"] = ["FSMTForConditionalGeneration", "FSMTModel", "PretrainedFSMTModel"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_fsmt import FSMTConfig - from .tokenization_fsmt import FSMTTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_fsmt import FSMTForConditionalGeneration, FSMTModel, PretrainedFSMTModel - + from .configuration_fsmt import * + from .modeling_fsmt import * + from .tokenization_fsmt import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/fsmt/configuration_fsmt.py b/src/transformers/models/fsmt/configuration_fsmt.py index 72af4ddab239fd..96b617e0da512e 100644 --- a/src/transformers/models/fsmt/configuration_fsmt.py +++ b/src/transformers/models/fsmt/configuration_fsmt.py @@ -213,3 +213,6 @@ def __init__( early_stopping=early_stopping, **common_kwargs, ) + + +__all__ = ["FSMTConfig"] diff --git a/src/transformers/models/fsmt/modeling_fsmt.py b/src/transformers/models/fsmt/modeling_fsmt.py index 3f865c037c01b6..9961ea4c88f00d 100644 --- a/src/transformers/models/fsmt/modeling_fsmt.py +++ b/src/transformers/models/fsmt/modeling_fsmt.py @@ -1364,3 +1364,6 @@ def forward( self.make_weight(max_pos, self.embedding_dim, self.padding_idx) positions = self.make_positions(input, self.padding_idx) return super().forward(positions) + + +__all__ = ["FSMTForConditionalGeneration", "FSMTModel", "PretrainedFSMTModel"] diff --git a/src/transformers/models/fsmt/tokenization_fsmt.py b/src/transformers/models/fsmt/tokenization_fsmt.py index d1f1ee4cac2b59..ce28766100e349 100644 --- a/src/transformers/models/fsmt/tokenization_fsmt.py +++ b/src/transformers/models/fsmt/tokenization_fsmt.py @@ -516,3 +516,6 @@ def __setstate__(self, d): ) self.sm = sacremoses + + +__all__ = ["FSMTTokenizer"] diff --git a/src/transformers/models/funnel/__init__.py b/src/transformers/models/funnel/__init__.py index aa620540dc3fd6..e4e0587ce32f5e 100644 --- a/src/transformers/models/funnel/__init__.py +++ b/src/transformers/models/funnel/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,120 +11,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_funnel": ["FunnelConfig"], - "convert_funnel_original_tf_checkpoint_to_pytorch": [], - "tokenization_funnel": ["FunnelTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_funnel_fast"] = ["FunnelTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_funnel"] = [ - "FunnelBaseModel", - "FunnelForMaskedLM", - "FunnelForMultipleChoice", - "FunnelForPreTraining", - "FunnelForQuestionAnswering", - "FunnelForSequenceClassification", - "FunnelForTokenClassification", - "FunnelModel", - "FunnelPreTrainedModel", - "load_tf_weights_in_funnel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_funnel"] = [ - "TFFunnelBaseModel", - "TFFunnelForMaskedLM", - "TFFunnelForMultipleChoice", - "TFFunnelForPreTraining", - "TFFunnelForQuestionAnswering", - "TFFunnelForSequenceClassification", - "TFFunnelForTokenClassification", - "TFFunnelModel", - "TFFunnelPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_funnel import FunnelConfig - from .tokenization_funnel import FunnelTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_funnel_fast import FunnelTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_funnel import ( - FunnelBaseModel, - FunnelForMaskedLM, - FunnelForMultipleChoice, - FunnelForPreTraining, - FunnelForQuestionAnswering, - FunnelForSequenceClassification, - FunnelForTokenClassification, - FunnelModel, - FunnelPreTrainedModel, - load_tf_weights_in_funnel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_funnel import ( - TFFunnelBaseModel, - TFFunnelForMaskedLM, - TFFunnelForMultipleChoice, - TFFunnelForPreTraining, - TFFunnelForQuestionAnswering, - TFFunnelForSequenceClassification, - TFFunnelForTokenClassification, - TFFunnelModel, - TFFunnelPreTrainedModel, - ) - + from .configuration_funnel import * + from .convert_funnel_original_tf_checkpoint_to_pytorch import * + from .modeling_funnel import * + from .modeling_tf_funnel import * + from .tokenization_funnel import * + from .tokenization_funnel_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/funnel/configuration_funnel.py b/src/transformers/models/funnel/configuration_funnel.py index 53d072d4c82edd..b164f286042a74 100644 --- a/src/transformers/models/funnel/configuration_funnel.py +++ b/src/transformers/models/funnel/configuration_funnel.py @@ -161,3 +161,6 @@ def num_blocks(self): @num_blocks.setter def num_blocks(self, value): raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`.") + + +__all__ = ["FunnelConfig"] diff --git a/src/transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py b/src/transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py index 4eab188f2ab7ba..37f71c0d233e5e 100755 --- a/src/transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py +++ b/src/transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py @@ -62,3 +62,6 @@ def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_du convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model ) + + +__all__ = [] diff --git a/src/transformers/models/funnel/modeling_funnel.py b/src/transformers/models/funnel/modeling_funnel.py index 42a06029284800..0d2f689da89307 100644 --- a/src/transformers/models/funnel/modeling_funnel.py +++ b/src/transformers/models/funnel/modeling_funnel.py @@ -1592,3 +1592,17 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "FunnelBaseModel", + "FunnelForMaskedLM", + "FunnelForMultipleChoice", + "FunnelForPreTraining", + "FunnelForQuestionAnswering", + "FunnelForSequenceClassification", + "FunnelForTokenClassification", + "FunnelModel", + "FunnelPreTrainedModel", + "load_tf_weights_in_funnel", +] diff --git a/src/transformers/models/funnel/modeling_tf_funnel.py b/src/transformers/models/funnel/modeling_tf_funnel.py index ab5f14a4c66d81..d4efd7ba0a3aaa 100644 --- a/src/transformers/models/funnel/modeling_tf_funnel.py +++ b/src/transformers/models/funnel/modeling_tf_funnel.py @@ -1865,3 +1865,16 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFFunnelBaseModel", + "TFFunnelForMaskedLM", + "TFFunnelForMultipleChoice", + "TFFunnelForPreTraining", + "TFFunnelForQuestionAnswering", + "TFFunnelForSequenceClassification", + "TFFunnelForTokenClassification", + "TFFunnelModel", + "TFFunnelPreTrainedModel", +] diff --git a/src/transformers/models/funnel/tokenization_funnel.py b/src/transformers/models/funnel/tokenization_funnel.py index 78499cbee4ec26..8cb6f1af0e8153 100644 --- a/src/transformers/models/funnel/tokenization_funnel.py +++ b/src/transformers/models/funnel/tokenization_funnel.py @@ -537,3 +537,6 @@ def tokenize(self, text): else: output_tokens.extend(sub_tokens) return output_tokens + + +__all__ = ["FunnelTokenizer"] diff --git a/src/transformers/models/funnel/tokenization_funnel_fast.py b/src/transformers/models/funnel/tokenization_funnel_fast.py index 6a48f2f54a8702..c3e45ed62ac29c 100644 --- a/src/transformers/models/funnel/tokenization_funnel_fast.py +++ b/src/transformers/models/funnel/tokenization_funnel_fast.py @@ -198,3 +198,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["FunnelTokenizerFast"] diff --git a/src/transformers/models/fuyu/__init__.py b/src/transformers/models/fuyu/__init__.py index 403acb1964c1ed..c2a7d252010e00 100644 --- a/src/transformers/models/fuyu/__init__.py +++ b/src/transformers/models/fuyu/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 AdeptAI and The HuggingFace Inc. team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,61 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_fuyu": ["FuyuConfig"], -} - - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_fuyu"] = ["FuyuImageProcessor"] - _import_structure["processing_fuyu"] = ["FuyuProcessor"] - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_fuyu"] = [ - "FuyuForCausalLM", - "FuyuPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_fuyu import FuyuConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_fuyu import FuyuImageProcessor - from .processing_fuyu import FuyuProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_fuyu import ( - FuyuForCausalLM, - FuyuPreTrainedModel, - ) - - + from .configuration_fuyu import * + from .image_processing_fuyu import * + from .modeling_fuyu import * + from .processing_fuyu import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/fuyu/configuration_fuyu.py b/src/transformers/models/fuyu/configuration_fuyu.py index 92af404cdbef11..23c3d88a8ec1f4 100644 --- a/src/transformers/models/fuyu/configuration_fuyu.py +++ b/src/transformers/models/fuyu/configuration_fuyu.py @@ -205,3 +205,6 @@ def _rope_scaling_validation(self): ) if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}") + + +__all__ = ["FuyuConfig"] diff --git a/src/transformers/models/fuyu/image_processing_fuyu.py b/src/transformers/models/fuyu/image_processing_fuyu.py index 4bb9ea7964d416..da9319d3a98aa9 100644 --- a/src/transformers/models/fuyu/image_processing_fuyu.py +++ b/src/transformers/models/fuyu/image_processing_fuyu.py @@ -719,3 +719,6 @@ def preprocess_with_tokenizer_info( "image_patch_indices_per_subsequence": image_patch_indices_per_subsequence, } ) + + +__all__ = ["FuyuImageProcessor"] diff --git a/src/transformers/models/fuyu/modeling_fuyu.py b/src/transformers/models/fuyu/modeling_fuyu.py index 2df5dbc8b29177..a7afb411c44805 100644 --- a/src/transformers/models/fuyu/modeling_fuyu.py +++ b/src/transformers/models/fuyu/modeling_fuyu.py @@ -386,3 +386,6 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = ["FuyuForCausalLM", "FuyuPreTrainedModel"] diff --git a/src/transformers/models/fuyu/processing_fuyu.py b/src/transformers/models/fuyu/processing_fuyu.py index e24f2fd4d1abd0..768542a85cbe46 100644 --- a/src/transformers/models/fuyu/processing_fuyu.py +++ b/src/transformers/models/fuyu/processing_fuyu.py @@ -721,3 +721,6 @@ def decode(self, *args, **kwargs): the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) + + +__all__ = ["FuyuProcessor"] diff --git a/src/transformers/models/git/__init__.py b/src/transformers/models/git/__init__.py index 02f5f6d88a1194..06e3e86927ab79 100644 --- a/src/transformers/models/git/__init__.py +++ b/src/transformers/models/git/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,48 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_git": ["GitConfig", "GitVisionConfig"], - "processing_git": ["GitProcessor"], -} +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_git"] = [ - "GitForCausalLM", - "GitModel", - "GitPreTrainedModel", - "GitVisionModel", - ] if TYPE_CHECKING: - from .configuration_git import GitConfig, GitVisionConfig - from .processing_git import GitProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_git import ( - GitForCausalLM, - GitModel, - GitPreTrainedModel, - GitVisionModel, - ) - + from .configuration_git import * + from .modeling_git import * + from .processing_git import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/git/configuration_git.py b/src/transformers/models/git/configuration_git.py index 1be3e7067bdfcf..6266f0f45de757 100644 --- a/src/transformers/models/git/configuration_git.py +++ b/src/transformers/models/git/configuration_git.py @@ -217,3 +217,6 @@ def __init__( self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id + + +__all__ = ["GitConfig", "GitVisionConfig"] diff --git a/src/transformers/models/git/modeling_git.py b/src/transformers/models/git/modeling_git.py index 0b86a41378fe0f..662ff0d1ccef40 100644 --- a/src/transformers/models/git/modeling_git.py +++ b/src/transformers/models/git/modeling_git.py @@ -1644,3 +1644,6 @@ def _reorder_cache(self, past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = ["GitForCausalLM", "GitModel", "GitPreTrainedModel", "GitVisionModel"] diff --git a/src/transformers/models/git/processing_git.py b/src/transformers/models/git/processing_git.py index e9e96fa765d841..29f91badc85a9c 100644 --- a/src/transformers/models/git/processing_git.py +++ b/src/transformers/models/git/processing_git.py @@ -146,3 +146,6 @@ def decode(self, *args, **kwargs): @property def model_input_names(self): return ["input_ids", "attention_mask", "pixel_values"] + + +__all__ = ["GitProcessor"] diff --git a/src/transformers/models/glpn/__init__.py b/src/transformers/models/glpn/__init__.py index 9896e801c93ae7..2a5b38675c3478 100644 --- a/src/transformers/models/glpn/__init__.py +++ b/src/transformers/models/glpn/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,61 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = {"configuration_glpn": ["GLPNConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_glpn"] = ["GLPNFeatureExtractor"] - _import_structure["image_processing_glpn"] = ["GLPNImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_glpn"] = [ - "GLPNForDepthEstimation", - "GLPNLayer", - "GLPNModel", - "GLPNPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_glpn import GLPNConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_glpn import GLPNFeatureExtractor - from .image_processing_glpn import GLPNImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_glpn import ( - GLPNForDepthEstimation, - GLPNLayer, - GLPNModel, - GLPNPreTrainedModel, - ) - - + from .configuration_glpn import * + from .feature_extraction_glpn import * + from .image_processing_glpn import * + from .modeling_glpn import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/glpn/configuration_glpn.py b/src/transformers/models/glpn/configuration_glpn.py index 88e1d6e1f029f6..19eb04b19b8629 100644 --- a/src/transformers/models/glpn/configuration_glpn.py +++ b/src/transformers/models/glpn/configuration_glpn.py @@ -130,3 +130,6 @@ def __init__( self.decoder_hidden_size = decoder_hidden_size self.max_depth = max_depth self.head_in_index = head_in_index + + +__all__ = ["GLPNConfig"] diff --git a/src/transformers/models/glpn/feature_extraction_glpn.py b/src/transformers/models/glpn/feature_extraction_glpn.py index 314268225d2af4..a7f1f5cc85b7c9 100644 --- a/src/transformers/models/glpn/feature_extraction_glpn.py +++ b/src/transformers/models/glpn/feature_extraction_glpn.py @@ -31,3 +31,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["GLPNFeatureExtractor"] diff --git a/src/transformers/models/glpn/image_processing_glpn.py b/src/transformers/models/glpn/image_processing_glpn.py index 115cefc86beec3..0ef93c21d9491e 100644 --- a/src/transformers/models/glpn/image_processing_glpn.py +++ b/src/transformers/models/glpn/image_processing_glpn.py @@ -268,3 +268,6 @@ def post_process_depth_estimation( results.append({"predicted_depth": depth}) return results + + +__all__ = ["GLPNImageProcessor"] diff --git a/src/transformers/models/glpn/modeling_glpn.py b/src/transformers/models/glpn/modeling_glpn.py index 70f175df8c9973..b753db2654938a 100755 --- a/src/transformers/models/glpn/modeling_glpn.py +++ b/src/transformers/models/glpn/modeling_glpn.py @@ -771,3 +771,6 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) + + +__all__ = ["GLPNForDepthEstimation", "GLPNLayer", "GLPNModel", "GLPNPreTrainedModel"] diff --git a/src/transformers/models/gpt2/__init__.py b/src/transformers/models/gpt2/__init__.py index 8c77c68445a830..a2a0e147b33328 100644 --- a/src/transformers/models/gpt2/__init__.py +++ b/src/transformers/models/gpt2/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,143 +11,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_keras_nlp_available, - is_tensorflow_text_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_gpt2": ["GPT2Config", "GPT2OnnxConfig"], - "tokenization_gpt2": ["GPT2Tokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_gpt2_fast"] = ["GPT2TokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_gpt2"] = [ - "GPT2DoubleHeadsModel", - "GPT2ForQuestionAnswering", - "GPT2ForSequenceClassification", - "GPT2ForTokenClassification", - "GPT2LMHeadModel", - "GPT2Model", - "GPT2PreTrainedModel", - "load_tf_weights_in_gpt2", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_gpt2"] = [ - "TFGPT2DoubleHeadsModel", - "TFGPT2ForSequenceClassification", - "TFGPT2LMHeadModel", - "TFGPT2MainLayer", - "TFGPT2Model", - "TFGPT2PreTrainedModel", - ] - -try: - if not is_keras_nlp_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_gpt2_tf"] = ["TFGPT2Tokenizer"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_gpt2"] = ["FlaxGPT2LMHeadModel", "FlaxGPT2Model", "FlaxGPT2PreTrainedModel"] if TYPE_CHECKING: - from .configuration_gpt2 import GPT2Config, GPT2OnnxConfig - from .tokenization_gpt2 import GPT2Tokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_gpt2_fast import GPT2TokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_gpt2 import ( - GPT2DoubleHeadsModel, - GPT2ForQuestionAnswering, - GPT2ForSequenceClassification, - GPT2ForTokenClassification, - GPT2LMHeadModel, - GPT2Model, - GPT2PreTrainedModel, - load_tf_weights_in_gpt2, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_gpt2 import ( - TFGPT2DoubleHeadsModel, - TFGPT2ForSequenceClassification, - TFGPT2LMHeadModel, - TFGPT2MainLayer, - TFGPT2Model, - TFGPT2PreTrainedModel, - ) - - try: - if not is_keras_nlp_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_gpt2_tf import TFGPT2Tokenizer - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_gpt2 import FlaxGPT2LMHeadModel, FlaxGPT2Model, FlaxGPT2PreTrainedModel - + from .configuration_gpt2 import * + from .modeling_flax_gpt2 import * + from .modeling_gpt2 import * + from .modeling_tf_gpt2 import * + from .tokenization_gpt2 import * + from .tokenization_gpt2_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/gpt2/configuration_gpt2.py b/src/transformers/models/gpt2/configuration_gpt2.py index 82a24912958f42..f3ebea02496c50 100644 --- a/src/transformers/models/gpt2/configuration_gpt2.py +++ b/src/transformers/models/gpt2/configuration_gpt2.py @@ -268,3 +268,6 @@ def generate_dummy_inputs( @property def default_onnx_opset(self) -> int: return 13 + + +__all__ = ["GPT2Config", "GPT2OnnxConfig"] diff --git a/src/transformers/models/gpt2/modeling_flax_gpt2.py b/src/transformers/models/gpt2/modeling_flax_gpt2.py index c3ef377642a3c5..62704d203b0edd 100644 --- a/src/transformers/models/gpt2/modeling_flax_gpt2.py +++ b/src/transformers/models/gpt2/modeling_flax_gpt2.py @@ -777,3 +777,6 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): FlaxCausalLMOutputWithCrossAttentions, _CONFIG_FOR_DOC, ) + + +__all__ = ["FlaxGPT2LMHeadModel", "FlaxGPT2Model", "FlaxGPT2PreTrainedModel"] diff --git a/src/transformers/models/gpt2/modeling_gpt2.py b/src/transformers/models/gpt2/modeling_gpt2.py index 58143192c20482..a995ff76eac408 100644 --- a/src/transformers/models/gpt2/modeling_gpt2.py +++ b/src/transformers/models/gpt2/modeling_gpt2.py @@ -1871,3 +1871,15 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "GPT2DoubleHeadsModel", + "GPT2ForQuestionAnswering", + "GPT2ForSequenceClassification", + "GPT2ForTokenClassification", + "GPT2LMHeadModel", + "GPT2Model", + "GPT2PreTrainedModel", + "load_tf_weights_in_gpt2", +] diff --git a/src/transformers/models/gpt2/modeling_tf_gpt2.py b/src/transformers/models/gpt2/modeling_tf_gpt2.py index acdd65006f3e3c..f222cdf486fd5b 100644 --- a/src/transformers/models/gpt2/modeling_tf_gpt2.py +++ b/src/transformers/models/gpt2/modeling_tf_gpt2.py @@ -1233,3 +1233,13 @@ def build(self, input_shape=None): if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) + + +__all__ = [ + "TFGPT2DoubleHeadsModel", + "TFGPT2ForSequenceClassification", + "TFGPT2LMHeadModel", + "TFGPT2MainLayer", + "TFGPT2Model", + "TFGPT2PreTrainedModel", +] diff --git a/src/transformers/models/gpt2/tokenization_gpt2.py b/src/transformers/models/gpt2/tokenization_gpt2.py index badacf6dbe71ff..709bcec5b61123 100644 --- a/src/transformers/models/gpt2/tokenization_gpt2.py +++ b/src/transformers/models/gpt2/tokenization_gpt2.py @@ -329,3 +329,6 @@ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): if is_split_into_words or add_prefix_space: text = " " + text return (text, kwargs) + + +__all__ = ["GPT2Tokenizer"] diff --git a/src/transformers/models/gpt2/tokenization_gpt2_fast.py b/src/transformers/models/gpt2/tokenization_gpt2_fast.py index 795b5ce067298f..07b48faad4e35c 100644 --- a/src/transformers/models/gpt2/tokenization_gpt2_fast.py +++ b/src/transformers/models/gpt2/tokenization_gpt2_fast.py @@ -139,3 +139,6 @@ def _encode_plus(self, *args, **kwargs) -> BatchEncoding: def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["GPT2TokenizerFast"] diff --git a/src/transformers/models/gpt_bigcode/__init__.py b/src/transformers/models/gpt_bigcode/__init__.py index 60eec86ca541d7..92e985d9273455 100644 --- a/src/transformers/models/gpt_bigcode/__init__.py +++ b/src/transformers/models/gpt_bigcode/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,53 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_gpt_bigcode": ["GPTBigCodeConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_gpt_bigcode"] = [ - "GPTBigCodeForSequenceClassification", - "GPTBigCodeForTokenClassification", - "GPTBigCodeForCausalLM", - "GPTBigCodeModel", - "GPTBigCodePreTrainedModel", - ] if TYPE_CHECKING: - from .configuration_gpt_bigcode import GPTBigCodeConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_gpt_bigcode import ( - GPTBigCodeForCausalLM, - GPTBigCodeForSequenceClassification, - GPTBigCodeForTokenClassification, - GPTBigCodeModel, - GPTBigCodePreTrainedModel, - ) - - + from .configuration_gpt_bigcode import * + from .modeling_gpt_bigcode import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py b/src/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py index 5bd72d23f986c6..46a3dfea44101f 100644 --- a/src/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py +++ b/src/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py @@ -139,3 +139,6 @@ def __init__( self.eos_token_id = eos_token_id super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + + +__all__ = ["GPTBigCodeConfig"] diff --git a/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py b/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py index 5326c7b907d4b1..a77308b5bc7e72 100644 --- a/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py +++ b/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py @@ -1420,3 +1420,12 @@ def forward( hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) + + +__all__ = [ + "GPTBigCodeForSequenceClassification", + "GPTBigCodeForTokenClassification", + "GPTBigCodeForCausalLM", + "GPTBigCodeModel", + "GPTBigCodePreTrainedModel", +] diff --git a/src/transformers/models/gpt_neo/__init__.py b/src/transformers/models/gpt_neo/__init__.py index 6c314c89f713a4..578577f22882cd 100644 --- a/src/transformers/models/gpt_neo/__init__.py +++ b/src/transformers/models/gpt_neo/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,71 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available - - -_import_structure = { - "configuration_gpt_neo": ["GPTNeoConfig", "GPTNeoOnnxConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_gpt_neo"] = [ - "GPTNeoForCausalLM", - "GPTNeoForQuestionAnswering", - "GPTNeoForSequenceClassification", - "GPTNeoForTokenClassification", - "GPTNeoModel", - "GPTNeoPreTrainedModel", - "load_tf_weights_in_gpt_neo", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_gpt_neo"] = [ - "FlaxGPTNeoForCausalLM", - "FlaxGPTNeoModel", - "FlaxGPTNeoPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_gpt_neo import GPTNeoConfig, GPTNeoOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_gpt_neo import ( - GPTNeoForCausalLM, - GPTNeoForQuestionAnswering, - GPTNeoForSequenceClassification, - GPTNeoForTokenClassification, - GPTNeoModel, - GPTNeoPreTrainedModel, - load_tf_weights_in_gpt_neo, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel - - + from .configuration_gpt_neo import * + from .modeling_flax_gpt_neo import * + from .modeling_gpt_neo import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/gpt_neo/configuration_gpt_neo.py b/src/transformers/models/gpt_neo/configuration_gpt_neo.py index a3c261e855b956..a8f358e7218b80 100644 --- a/src/transformers/models/gpt_neo/configuration_gpt_neo.py +++ b/src/transformers/models/gpt_neo/configuration_gpt_neo.py @@ -267,3 +267,6 @@ def generate_dummy_inputs( @property def default_onnx_opset(self) -> int: return 13 + + +__all__ = ["GPTNeoConfig", "GPTNeoOnnxConfig"] diff --git a/src/transformers/models/gpt_neo/modeling_flax_gpt_neo.py b/src/transformers/models/gpt_neo/modeling_flax_gpt_neo.py index 5639ca50f166a2..851c20dfcfefac 100644 --- a/src/transformers/models/gpt_neo/modeling_flax_gpt_neo.py +++ b/src/transformers/models/gpt_neo/modeling_flax_gpt_neo.py @@ -682,3 +682,6 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): append_call_sample_docstring(FlaxGPTNeoForCausalLM, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutput, _CONFIG_FOR_DOC) + + +__all__ = ["FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel"] diff --git a/src/transformers/models/gpt_neo/modeling_gpt_neo.py b/src/transformers/models/gpt_neo/modeling_gpt_neo.py index 28bfbabc1fd8e0..22c00508cf2ea0 100755 --- a/src/transformers/models/gpt_neo/modeling_gpt_neo.py +++ b/src/transformers/models/gpt_neo/modeling_gpt_neo.py @@ -1339,3 +1339,14 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "GPTNeoForCausalLM", + "GPTNeoForQuestionAnswering", + "GPTNeoForSequenceClassification", + "GPTNeoForTokenClassification", + "GPTNeoModel", + "GPTNeoPreTrainedModel", + "load_tf_weights_in_gpt_neo", +] diff --git a/src/transformers/models/gpt_neox/__init__.py b/src/transformers/models/gpt_neox/__init__.py index 05a6982acb0b08..fdf26390199635 100644 --- a/src/transformers/models/gpt_neox/__init__.py +++ b/src/transformers/models/gpt_neox/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,66 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available -from ...utils import OptionalDependencyNotAvailable - - -_import_structure = {"configuration_gpt_neox": ["GPTNeoXConfig"]} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_gpt_neox_fast"] = ["GPTNeoXTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_gpt_neox"] = [ - "GPTNeoXForCausalLM", - "GPTNeoXForQuestionAnswering", - "GPTNeoXForSequenceClassification", - "GPTNeoXForTokenClassification", - "GPTNeoXLayer", - "GPTNeoXModel", - "GPTNeoXPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_gpt_neox import GPTNeoXConfig - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_gpt_neox import ( - GPTNeoXForCausalLM, - GPTNeoXForQuestionAnswering, - GPTNeoXForSequenceClassification, - GPTNeoXForTokenClassification, - GPTNeoXLayer, - GPTNeoXModel, - GPTNeoXPreTrainedModel, - ) - - + from .configuration_gpt_neox import * + from .modeling_gpt_neox import * + from .tokenization_gpt_neox_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/gpt_neox/configuration_gpt_neox.py b/src/transformers/models/gpt_neox/configuration_gpt_neox.py index 07514a37c6f2fa..6d73403ed1b0ce 100644 --- a/src/transformers/models/gpt_neox/configuration_gpt_neox.py +++ b/src/transformers/models/gpt_neox/configuration_gpt_neox.py @@ -189,3 +189,6 @@ def __init__( raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) + + +__all__ = ["GPTNeoXConfig"] diff --git a/src/transformers/models/gpt_neox/modeling_gpt_neox.py b/src/transformers/models/gpt_neox/modeling_gpt_neox.py index 70ff07ed7f6dcc..d4e60ab741a875 100755 --- a/src/transformers/models/gpt_neox/modeling_gpt_neox.py +++ b/src/transformers/models/gpt_neox/modeling_gpt_neox.py @@ -1484,3 +1484,14 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "GPTNeoXForCausalLM", + "GPTNeoXForQuestionAnswering", + "GPTNeoXForSequenceClassification", + "GPTNeoXForTokenClassification", + "GPTNeoXLayer", + "GPTNeoXModel", + "GPTNeoXPreTrainedModel", +] diff --git a/src/transformers/models/gpt_neox/tokenization_gpt_neox_fast.py b/src/transformers/models/gpt_neox/tokenization_gpt_neox_fast.py index 7fafa440d05113..1df53f3776d5b9 100644 --- a/src/transformers/models/gpt_neox/tokenization_gpt_neox_fast.py +++ b/src/transformers/models/gpt_neox/tokenization_gpt_neox_fast.py @@ -228,3 +228,6 @@ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["GPTNeoXTokenizerFast"] diff --git a/src/transformers/models/gpt_neox_japanese/__init__.py b/src/transformers/models/gpt_neox_japanese/__init__.py index c43391c04958d4..94ba39d69ad638 100644 --- a/src/transformers/models/gpt_neox_japanese/__init__.py +++ b/src/transformers/models/gpt_neox_japanese/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,48 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...file_utils import _LazyModule, is_torch_available -from ...utils import OptionalDependencyNotAvailable - - -_import_structure = { - "configuration_gpt_neox_japanese": ["GPTNeoXJapaneseConfig"], - "tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_gpt_neox_japanese"] = [ - "GPTNeoXJapaneseForCausalLM", - "GPTNeoXJapaneseLayer", - "GPTNeoXJapaneseModel", - "GPTNeoXJapanesePreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_gpt_neox_japanese import GPTNeoXJapaneseConfig - from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_gpt_neox_japanese import ( - GPTNeoXJapaneseForCausalLM, - GPTNeoXJapaneseLayer, - GPTNeoXJapaneseModel, - GPTNeoXJapanesePreTrainedModel, - ) - - + from .configuration_gpt_neox_japanese import * + from .modeling_gpt_neox_japanese import * + from .tokenization_gpt_neox_japanese import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py b/src/transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py index e305bd28f2fbf4..650c6124949fff 100644 --- a/src/transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py +++ b/src/transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py @@ -162,3 +162,6 @@ def __init__( if self.rope_scaling is not None and "type" in self.rope_scaling: self.rope_scaling["rope_type"] = self.rope_scaling["type"] rope_config_validation(self) + + +__all__ = ["GPTNeoXJapaneseConfig"] diff --git a/src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py b/src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py index c9e1b2d7213587..03f38439692626 100755 --- a/src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py +++ b/src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py @@ -918,3 +918,11 @@ def _reorder_cache(self, past_key_values, beam_idx): + layer_past[2:], ) return reordered_past + + +__all__ = [ + "GPTNeoXJapaneseForCausalLM", + "GPTNeoXJapaneseLayer", + "GPTNeoXJapaneseModel", + "GPTNeoXJapanesePreTrainedModel", +] diff --git a/src/transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py b/src/transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py index 285dcb7d18e2b8..dbb084e930bd7d 100644 --- a/src/transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py +++ b/src/transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py @@ -349,3 +349,6 @@ def convert_id_to_token(self, index, breakline="\n"): words.append(bytearray(byte_tokens).decode("utf-8", errors="replace")) text = "".join(words) return text + + +__all__ = ["GPTNeoXJapaneseTokenizer"] diff --git a/src/transformers/models/gpt_sw3/__init__.py b/src/transformers/models/gpt_sw3/__init__.py index e7c08f0e27e747..e477eb1d2cc2ff 100644 --- a/src/transformers/models/gpt_sw3/__init__.py +++ b/src/transformers/models/gpt_sw3/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,33 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available - - -_import_structure = {} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_gpt_sw3"] = ["GPTSw3Tokenizer"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_gpt_sw3 import GPTSw3Tokenizer - + from .tokenization_gpt_sw3 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/gpt_sw3/tokenization_gpt_sw3.py b/src/transformers/models/gpt_sw3/tokenization_gpt_sw3.py index 262aeaba5eea10..7991988c74849e 100644 --- a/src/transformers/models/gpt_sw3/tokenization_gpt_sw3.py +++ b/src/transformers/models/gpt_sw3/tokenization_gpt_sw3.py @@ -294,3 +294,6 @@ def decode_fast(self, token_ids: Union[int, List[int]]) -> str: """ return self.sp_model.decode(token_ids) + + +__all__ = ["GPTSw3Tokenizer"] diff --git a/src/transformers/models/gptj/__init__.py b/src/transformers/models/gptj/__init__.py index 51520484529f85..84d99fda2e6996 100644 --- a/src/transformers/models/gptj/__init__.py +++ b/src/transformers/models/gptj/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,98 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_torch_available, -) - - -_import_structure = {"configuration_gptj": ["GPTJConfig", "GPTJOnnxConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_gptj"] = [ - "GPTJForCausalLM", - "GPTJForQuestionAnswering", - "GPTJForSequenceClassification", - "GPTJModel", - "GPTJPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_gptj"] = [ - "TFGPTJForCausalLM", - "TFGPTJForQuestionAnswering", - "TFGPTJForSequenceClassification", - "TFGPTJModel", - "TFGPTJPreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_gptj"] = [ - "FlaxGPTJForCausalLM", - "FlaxGPTJModel", - "FlaxGPTJPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_gptj import GPTJConfig, GPTJOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_gptj import ( - GPTJForCausalLM, - GPTJForQuestionAnswering, - GPTJForSequenceClassification, - GPTJModel, - GPTJPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_gptj import ( - TFGPTJForCausalLM, - TFGPTJForQuestionAnswering, - TFGPTJForSequenceClassification, - TFGPTJModel, - TFGPTJPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel, FlaxGPTJPreTrainedModel - + from .configuration_gptj import * + from .modeling_flax_gptj import * + from .modeling_gptj import * + from .modeling_tf_gptj import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/gptj/configuration_gptj.py b/src/transformers/models/gptj/configuration_gptj.py index 1b93f259b05b12..5e76b3f4ba6ee3 100644 --- a/src/transformers/models/gptj/configuration_gptj.py +++ b/src/transformers/models/gptj/configuration_gptj.py @@ -214,3 +214,6 @@ def generate_dummy_inputs( @property def default_onnx_opset(self) -> int: return 13 + + +__all__ = ["GPTJConfig", "GPTJOnnxConfig"] diff --git a/src/transformers/models/gptj/modeling_flax_gptj.py b/src/transformers/models/gptj/modeling_flax_gptj.py index 9f0d4d6e860003..83abf840ac276d 100644 --- a/src/transformers/models/gptj/modeling_flax_gptj.py +++ b/src/transformers/models/gptj/modeling_flax_gptj.py @@ -716,3 +716,6 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): FlaxCausalLMOutput, _CONFIG_FOR_DOC, ) + + +__all__ = ["FlaxGPTJForCausalLM", "FlaxGPTJModel", "FlaxGPTJPreTrainedModel"] diff --git a/src/transformers/models/gptj/modeling_gptj.py b/src/transformers/models/gptj/modeling_gptj.py index 1cc9cf369d1887..496e52da91ccec 100644 --- a/src/transformers/models/gptj/modeling_gptj.py +++ b/src/transformers/models/gptj/modeling_gptj.py @@ -1397,3 +1397,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "GPTJForCausalLM", + "GPTJForQuestionAnswering", + "GPTJForSequenceClassification", + "GPTJModel", + "GPTJPreTrainedModel", +] diff --git a/src/transformers/models/gptj/modeling_tf_gptj.py b/src/transformers/models/gptj/modeling_tf_gptj.py index a931287adfcd01..a72e1f795d2e1b 100644 --- a/src/transformers/models/gptj/modeling_tf_gptj.py +++ b/src/transformers/models/gptj/modeling_tf_gptj.py @@ -1096,3 +1096,12 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFGPTJForCausalLM", + "TFGPTJForQuestionAnswering", + "TFGPTJForSequenceClassification", + "TFGPTJModel", + "TFGPTJPreTrainedModel", +] diff --git a/src/transformers/models/granite/__init__.py b/src/transformers/models/granite/__init__.py index 5a98daa072d583..fbd3f77b1f0542 100644 --- a/src/transformers/models/granite/__init__.py +++ b/src/transformers/models/granite/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2024 EleutherAI and The HuggingFace Inc. team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,45 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_granite": ["GraniteConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_granite"] = [ - "GraniteForCausalLM", - "GraniteModel", - "GranitePreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_granite import GraniteConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_granite import ( - GraniteForCausalLM, - GraniteModel, - GranitePreTrainedModel, - ) - + from .configuration_granite import * + from .modeling_granite import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/granite/configuration_granite.py b/src/transformers/models/granite/configuration_granite.py index ed6191adf65b58..395e6434ff6c20 100644 --- a/src/transformers/models/granite/configuration_granite.py +++ b/src/transformers/models/granite/configuration_granite.py @@ -177,3 +177,6 @@ def __init__( ) rope_config_validation(self) + + +__all__ = ["GraniteConfig"] diff --git a/src/transformers/models/granite/modeling_granite.py b/src/transformers/models/granite/modeling_granite.py index 8cd24265d9edcf..9a0ed5f2ea814d 100644 --- a/src/transformers/models/granite/modeling_granite.py +++ b/src/transformers/models/granite/modeling_granite.py @@ -1137,3 +1137,6 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = ["GraniteForCausalLM", "GraniteModel", "GranitePreTrainedModel"] diff --git a/src/transformers/models/granitemoe/__init__.py b/src/transformers/models/granitemoe/__init__.py index f16f84abd9aa4d..c85333f70b5ee3 100644 --- a/src/transformers/models/granitemoe/__init__.py +++ b/src/transformers/models/granitemoe/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2024 EleutherAI and The HuggingFace Inc. team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,45 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_granitemoe": ["GraniteMoeConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_granitemoe"] = [ - "GraniteMoeForCausalLM", - "GraniteMoeModel", - "GraniteMoePreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_granitemoe import GraniteMoeConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_granitemoe import ( - GraniteMoeForCausalLM, - GraniteMoeModel, - GraniteMoePreTrainedModel, - ) - + from .configuration_granitemoe import * + from .modeling_granitemoe import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/granitemoe/configuration_granitemoe.py b/src/transformers/models/granitemoe/configuration_granitemoe.py index e0807b7795257b..9ef029f95c3e0c 100644 --- a/src/transformers/models/granitemoe/configuration_granitemoe.py +++ b/src/transformers/models/granitemoe/configuration_granitemoe.py @@ -189,3 +189,6 @@ def __init__( ) rope_config_validation(self) + + +__all__ = ["GraniteMoeConfig"] diff --git a/src/transformers/models/granitemoe/modeling_granitemoe.py b/src/transformers/models/granitemoe/modeling_granitemoe.py index 9f5fdeea07d4b1..0da133e7e96745 100644 --- a/src/transformers/models/granitemoe/modeling_granitemoe.py +++ b/src/transformers/models/granitemoe/modeling_granitemoe.py @@ -1396,3 +1396,6 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = ["GraniteMoeForCausalLM", "GraniteMoeModel", "GraniteMoePreTrainedModel"] diff --git a/src/transformers/models/grounding_dino/__init__.py b/src/transformers/models/grounding_dino/__init__.py index 7cd3e115e15d57..15169ed7f8dee4 100644 --- a/src/transformers/models/grounding_dino/__init__.py +++ b/src/transformers/models/grounding_dino/__init__.py @@ -11,65 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_grounding_dino": ["GroundingDinoConfig"], - "processing_grounding_dino": ["GroundingDinoProcessor"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_grounding_dino"] = [ - "GroundingDinoForObjectDetection", - "GroundingDinoModel", - "GroundingDinoPreTrainedModel", - ] - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_grounding_dino"] = ["GroundingDinoImageProcessor"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_grounding_dino import ( - GroundingDinoConfig, - ) - from .processing_grounding_dino import GroundingDinoProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_grounding_dino import ( - GroundingDinoForObjectDetection, - GroundingDinoModel, - GroundingDinoPreTrainedModel, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_grounding_dino import GroundingDinoImageProcessor - + from .configuration_grounding_dino import * + from .image_processing_grounding_dino import * + from .modeling_grounding_dino import * + from .processing_grounding_dino import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/grounding_dino/configuration_grounding_dino.py b/src/transformers/models/grounding_dino/configuration_grounding_dino.py index 362e50a1c1cc68..ca8960ee9a96ec 100644 --- a/src/transformers/models/grounding_dino/configuration_grounding_dino.py +++ b/src/transformers/models/grounding_dino/configuration_grounding_dino.py @@ -293,3 +293,6 @@ def num_attention_heads(self) -> int: @property def hidden_size(self) -> int: return self.d_model + + +__all__ = ["GroundingDinoConfig"] diff --git a/src/transformers/models/grounding_dino/image_processing_grounding_dino.py b/src/transformers/models/grounding_dino/image_processing_grounding_dino.py index 569e22ba470007..23f4b719698cb9 100644 --- a/src/transformers/models/grounding_dino/image_processing_grounding_dino.py +++ b/src/transformers/models/grounding_dino/image_processing_grounding_dino.py @@ -1586,3 +1586,6 @@ def post_process_object_detection( results.append({"scores": score, "labels": label, "boxes": box}) return results + + +__all__ = ["GroundingDinoImageProcessor"] diff --git a/src/transformers/models/grounding_dino/modeling_grounding_dino.py b/src/transformers/models/grounding_dino/modeling_grounding_dino.py index 9c01ce19f32399..4a101b1d93b4f7 100644 --- a/src/transformers/models/grounding_dino/modeling_grounding_dino.py +++ b/src/transformers/models/grounding_dino/modeling_grounding_dino.py @@ -2671,3 +2671,6 @@ def forward( ) return dict_outputs + + +__all__ = ["GroundingDinoForObjectDetection", "GroundingDinoModel", "GroundingDinoPreTrainedModel"] diff --git a/src/transformers/models/grounding_dino/processing_grounding_dino.py b/src/transformers/models/grounding_dino/processing_grounding_dino.py index 2b576992851884..9dbcea643280b8 100644 --- a/src/transformers/models/grounding_dino/processing_grounding_dino.py +++ b/src/transformers/models/grounding_dino/processing_grounding_dino.py @@ -235,3 +235,6 @@ def post_process_grounded_object_detection( results.append({"scores": score, "labels": label, "boxes": box}) return results + + +__all__ = ["GroundingDinoProcessor"] diff --git a/src/transformers/models/groupvit/__init__.py b/src/transformers/models/groupvit/__init__.py index 98fc6f4eccef08..ab7fa27d09d165 100644 --- a/src/transformers/models/groupvit/__init__.py +++ b/src/transformers/models/groupvit/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,79 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_groupvit": [ - "GroupViTConfig", - "GroupViTOnnxConfig", - "GroupViTTextConfig", - "GroupViTVisionConfig", - ], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_groupvit"] = [ - "GroupViTModel", - "GroupViTPreTrainedModel", - "GroupViTTextModel", - "GroupViTVisionModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_groupvit"] = [ - "TFGroupViTModel", - "TFGroupViTPreTrainedModel", - "TFGroupViTTextModel", - "TFGroupViTVisionModel", - ] - if TYPE_CHECKING: - from .configuration_groupvit import ( - GroupViTConfig, - GroupViTOnnxConfig, - GroupViTTextConfig, - GroupViTVisionConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_groupvit import ( - GroupViTModel, - GroupViTPreTrainedModel, - GroupViTTextModel, - GroupViTVisionModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_groupvit import ( - TFGroupViTModel, - TFGroupViTPreTrainedModel, - TFGroupViTTextModel, - TFGroupViTVisionModel, - ) - + from .configuration_groupvit import * + from .modeling_groupvit import * + from .modeling_tf_groupvit import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/groupvit/configuration_groupvit.py b/src/transformers/models/groupvit/configuration_groupvit.py index e85e4fc9184371..e55346f503546b 100644 --- a/src/transformers/models/groupvit/configuration_groupvit.py +++ b/src/transformers/models/groupvit/configuration_groupvit.py @@ -413,3 +413,6 @@ def generate_dummy_inputs( @property def default_onnx_opset(self) -> int: return 14 + + +__all__ = ["GroupViTConfig", "GroupViTOnnxConfig", "GroupViTTextConfig", "GroupViTVisionConfig"] diff --git a/src/transformers/models/groupvit/modeling_groupvit.py b/src/transformers/models/groupvit/modeling_groupvit.py index 3a2ccab8429efa..c2f38ef3dcbe59 100644 --- a/src/transformers/models/groupvit/modeling_groupvit.py +++ b/src/transformers/models/groupvit/modeling_groupvit.py @@ -1584,3 +1584,6 @@ def forward( text_model_output=text_outputs, vision_model_output=vision_outputs, ) + + +__all__ = ["GroupViTModel", "GroupViTPreTrainedModel", "GroupViTTextModel", "GroupViTVisionModel"] diff --git a/src/transformers/models/groupvit/modeling_tf_groupvit.py b/src/transformers/models/groupvit/modeling_tf_groupvit.py index b5838a5264f69d..7c6b3d05f32574 100644 --- a/src/transformers/models/groupvit/modeling_tf_groupvit.py +++ b/src/transformers/models/groupvit/modeling_tf_groupvit.py @@ -2136,3 +2136,6 @@ def build(self, input_shape=None): if getattr(self, "groupvit", None) is not None: with tf.name_scope(self.groupvit.name): self.groupvit.build(None) + + +__all__ = ["TFGroupViTModel", "TFGroupViTPreTrainedModel", "TFGroupViTTextModel", "TFGroupViTVisionModel"] diff --git a/src/transformers/models/herbert/__init__.py b/src/transformers/models/herbert/__init__.py index 54037995229f82..e0d0794a06e8cf 100644 --- a/src/transformers/models/herbert/__init__.py +++ b/src/transformers/models/herbert/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,35 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available - - -_import_structure = {"tokenization_herbert": ["HerbertTokenizer"]} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_herbert_fast"] = ["HerbertTokenizerFast"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .tokenization_herbert import HerbertTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_herbert_fast import HerbertTokenizerFast - + from .tokenization_herbert import * + from .tokenization_herbert_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/herbert/tokenization_herbert.py b/src/transformers/models/herbert/tokenization_herbert.py index bb078d4dde6db6..806bf95af8d299 100644 --- a/src/transformers/models/herbert/tokenization_herbert.py +++ b/src/transformers/models/herbert/tokenization_herbert.py @@ -642,3 +642,6 @@ def __setstate__(self, d): ) self.sm = sacremoses + + +__all__ = ["HerbertTokenizer"] diff --git a/src/transformers/models/herbert/tokenization_herbert_fast.py b/src/transformers/models/herbert/tokenization_herbert_fast.py index 4cd5db58f1b93a..6b2569307fe71d 100644 --- a/src/transformers/models/herbert/tokenization_herbert_fast.py +++ b/src/transformers/models/herbert/tokenization_herbert_fast.py @@ -156,3 +156,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["HerbertTokenizerFast"] diff --git a/src/transformers/models/hiera/__init__.py b/src/transformers/models/hiera/__init__.py index aeda2baf565339..841f13be4c0d2f 100644 --- a/src/transformers/models/hiera/__init__.py +++ b/src/transformers/models/hiera/__init__.py @@ -13,47 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_hiera": ["HieraConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_hiera"] = [ - "HieraForImageClassification", - "HieraForPreTraining", - "HieraBackbone", - "HieraModel", - "HieraPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_hiera import HieraConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_hiera import ( - HieraBackbone, - HieraForImageClassification, - HieraForPreTraining, - HieraModel, - HieraPreTrainedModel, - ) - + from .configuration_hiera import * + from .modeling_hiera import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/hiera/configuration_hiera.py b/src/transformers/models/hiera/configuration_hiera.py index 0412e02be7a33e..ebd6fe594c0bd7 100644 --- a/src/transformers/models/hiera/configuration_hiera.py +++ b/src/transformers/models/hiera/configuration_hiera.py @@ -189,3 +189,6 @@ def __init__( self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) + + +__all__ = ["HieraConfig"] diff --git a/src/transformers/models/hiera/modeling_hiera.py b/src/transformers/models/hiera/modeling_hiera.py index de327eb91d2d7d..dd602e9f048a51 100644 --- a/src/transformers/models/hiera/modeling_hiera.py +++ b/src/transformers/models/hiera/modeling_hiera.py @@ -1568,3 +1568,6 @@ def forward( hidden_states=outputs[1] if output_hidden_states else None, attentions=outputs[2] if output_attentions else None, ) + + +__all__ = ["HieraForImageClassification", "HieraForPreTraining", "HieraBackbone", "HieraModel", "HieraPreTrainedModel"] diff --git a/src/transformers/models/hubert/__init__.py b/src/transformers/models/hubert/__init__.py index 30331ed0d146a4..d975dabc689a73 100644 --- a/src/transformers/models/hubert/__init__.py +++ b/src/transformers/models/hubert/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,67 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_hubert": ["HubertConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_hubert"] = [ - "HubertForCTC", - "HubertForSequenceClassification", - "HubertModel", - "HubertPreTrainedModel", - ] - - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_hubert"] = [ - "TFHubertForCTC", - "TFHubertModel", - "TFHubertPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_hubert import HubertConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_hubert import ( - HubertForCTC, - HubertForSequenceClassification, - HubertModel, - HubertPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_hubert import ( - TFHubertForCTC, - TFHubertModel, - TFHubertPreTrainedModel, - ) - - + from .configuration_hubert import * + from .modeling_hubert import * + from .modeling_tf_hubert import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/hubert/configuration_hubert.py b/src/transformers/models/hubert/configuration_hubert.py index 9f488b19888957..bb5dae7f2cb2e0 100644 --- a/src/transformers/models/hubert/configuration_hubert.py +++ b/src/transformers/models/hubert/configuration_hubert.py @@ -260,3 +260,6 @@ def __init__( @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1) + + +__all__ = ["HubertConfig"] diff --git a/src/transformers/models/hubert/modeling_hubert.py b/src/transformers/models/hubert/modeling_hubert.py index 03904a6abfa08b..1891d5d505745b 100755 --- a/src/transformers/models/hubert/modeling_hubert.py +++ b/src/transformers/models/hubert/modeling_hubert.py @@ -1650,3 +1650,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["HubertForCTC", "HubertForSequenceClassification", "HubertModel", "HubertPreTrainedModel"] diff --git a/src/transformers/models/hubert/modeling_tf_hubert.py b/src/transformers/models/hubert/modeling_tf_hubert.py index 2adfeea5b8b883..8971656365b702 100644 --- a/src/transformers/models/hubert/modeling_tf_hubert.py +++ b/src/transformers/models/hubert/modeling_tf_hubert.py @@ -1670,3 +1670,6 @@ def build(self, input_shape=None): if getattr(self, "lm_head", None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build([None, None, self.output_hidden_size]) + + +__all__ = ["TFHubertForCTC", "TFHubertModel", "TFHubertPreTrainedModel"] diff --git a/src/transformers/models/ibert/__init__.py b/src/transformers/models/ibert/__init__.py index 3b147e414c2edf..cf34ec43ac1014 100644 --- a/src/transformers/models/ibert/__init__.py +++ b/src/transformers/models/ibert/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,50 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = {"configuration_ibert": ["IBertConfig", "IBertOnnxConfig"]} +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_ibert"] = [ - "IBertForMaskedLM", - "IBertForMultipleChoice", - "IBertForQuestionAnswering", - "IBertForSequenceClassification", - "IBertForTokenClassification", - "IBertModel", - "IBertPreTrainedModel", - ] if TYPE_CHECKING: - from .configuration_ibert import IBertConfig, IBertOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_ibert import ( - IBertForMaskedLM, - IBertForMultipleChoice, - IBertForQuestionAnswering, - IBertForSequenceClassification, - IBertForTokenClassification, - IBertModel, - IBertPreTrainedModel, - ) - + from .configuration_ibert import * + from .modeling_ibert import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/ibert/configuration_ibert.py b/src/transformers/models/ibert/configuration_ibert.py index 9af660669d0547..6ddc344b9e10ca 100644 --- a/src/transformers/models/ibert/configuration_ibert.py +++ b/src/transformers/models/ibert/configuration_ibert.py @@ -137,3 +137,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("attention_mask", dynamic_axis), ] ) + + +__all__ = ["IBertConfig", "IBertOnnxConfig"] diff --git a/src/transformers/models/ibert/modeling_ibert.py b/src/transformers/models/ibert/modeling_ibert.py index 311bb4a39fb744..a1470d6a3b7632 100644 --- a/src/transformers/models/ibert/modeling_ibert.py +++ b/src/transformers/models/ibert/modeling_ibert.py @@ -1353,3 +1353,14 @@ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_l mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx + + +__all__ = [ + "IBertForMaskedLM", + "IBertForMultipleChoice", + "IBertForQuestionAnswering", + "IBertForSequenceClassification", + "IBertForTokenClassification", + "IBertModel", + "IBertPreTrainedModel", +] diff --git a/src/transformers/models/idefics/__init__.py b/src/transformers/models/idefics/__init__.py index 3b32064789cabe..4adb66825445f2 100644 --- a/src/transformers/models/idefics/__init__.py +++ b/src/transformers/models/idefics/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,87 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_torch_available, - is_vision_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_idefics": ["IdeficsConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_idefics"] = ["IdeficsImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_idefics"] = [ - "IdeficsForVisionText2Text", - "IdeficsModel", - "IdeficsPreTrainedModel", - ] - _import_structure["processing_idefics"] = ["IdeficsProcessor"] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_idefics"] = [ - "TFIdeficsForVisionText2Text", - "TFIdeficsModel", - "TFIdeficsPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_idefics import IdeficsConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_idefics import IdeficsImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_idefics import ( - IdeficsForVisionText2Text, - IdeficsModel, - IdeficsPreTrainedModel, - ) - from .processing_idefics import IdeficsProcessor - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_idefics import ( - TFIdeficsForVisionText2Text, - TFIdeficsModel, - TFIdeficsPreTrainedModel, - ) - + from .configuration_idefics import * + from .image_processing_idefics import * + from .modeling_idefics import * + from .modeling_tf_idefics import * + from .processing_idefics import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/idefics/configuration_idefics.py b/src/transformers/models/idefics/configuration_idefics.py index e34a5764400196..e8320b98725d0b 100644 --- a/src/transformers/models/idefics/configuration_idefics.py +++ b/src/transformers/models/idefics/configuration_idefics.py @@ -320,3 +320,6 @@ def __init__( # updates the config object with `kwargs` from from_pretrained, so during the instantiation # of this object many attributes have default values and haven't yet been overridden. # Do any required checks inside `from_pretrained` once the superclass' `from_pretrained` was run. + + +__all__ = ["IdeficsConfig"] diff --git a/src/transformers/models/idefics/image_processing_idefics.py b/src/transformers/models/idefics/image_processing_idefics.py index f4998020daf642..2b317da05d498b 100644 --- a/src/transformers/models/idefics/image_processing_idefics.py +++ b/src/transformers/models/idefics/image_processing_idefics.py @@ -166,3 +166,6 @@ def preprocess( images = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)["pixel_values"] return images + + +__all__ = ["IdeficsImageProcessor"] diff --git a/src/transformers/models/idefics/modeling_idefics.py b/src/transformers/models/idefics/modeling_idefics.py index 8bd24728b03885..5977e1ea659a30 100644 --- a/src/transformers/models/idefics/modeling_idefics.py +++ b/src/transformers/models/idefics/modeling_idefics.py @@ -1748,3 +1748,6 @@ def _reorder_cache(past, beam_idx): for layer_past in past: reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) return reordered_past + + +__all__ = ["IdeficsForVisionText2Text", "IdeficsModel", "IdeficsPreTrainedModel"] diff --git a/src/transformers/models/idefics/modeling_tf_idefics.py b/src/transformers/models/idefics/modeling_tf_idefics.py index c5ce2935d33128..f412d28aa80f69 100644 --- a/src/transformers/models/idefics/modeling_tf_idefics.py +++ b/src/transformers/models/idefics/modeling_tf_idefics.py @@ -1810,3 +1810,6 @@ def build(self, input_shape=None): if getattr(self, "lm_head", None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build(None) + + +__all__ = ["TFIdeficsForVisionText2Text", "TFIdeficsModel", "TFIdeficsPreTrainedModel"] diff --git a/src/transformers/models/idefics/processing_idefics.py b/src/transformers/models/idefics/processing_idefics.py index ca6e4702d3173e..444a970e5a3606 100644 --- a/src/transformers/models/idefics/processing_idefics.py +++ b/src/transformers/models/idefics/processing_idefics.py @@ -539,3 +539,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +__all__ = ["IdeficsProcessor"] diff --git a/src/transformers/models/idefics2/__init__.py b/src/transformers/models/idefics2/__init__.py index 1d8d3e4b571df2..b76a53a081fe9a 100644 --- a/src/transformers/models/idefics2/__init__.py +++ b/src/transformers/models/idefics2/__init__.py @@ -13,60 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_idefics2": ["Idefics2Config"]} - - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_idefics2"] = ["Idefics2ImageProcessor"] - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_idefics2"] = [ - "Idefics2ForConditionalGeneration", - "Idefics2PreTrainedModel", - "Idefics2Model", - ] - _import_structure["processing_idefics2"] = ["Idefics2Processor"] - if TYPE_CHECKING: - from .configuration_idefics2 import Idefics2Config - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_idefics2 import Idefics2ImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_idefics2 import ( - Idefics2ForConditionalGeneration, - Idefics2Model, - Idefics2PreTrainedModel, - ) - from .processing_idefics2 import Idefics2Processor - - + from .configuration_idefics2 import * + from .image_processing_idefics2 import * + from .modeling_idefics2 import * + from .processing_idefics2 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/idefics2/configuration_idefics2.py b/src/transformers/models/idefics2/configuration_idefics2.py index 408d374c77f7eb..2f0376a8952db1 100644 --- a/src/transformers/models/idefics2/configuration_idefics2.py +++ b/src/transformers/models/idefics2/configuration_idefics2.py @@ -259,3 +259,6 @@ def __init__( ) super().__init__(**kwargs, tie_word_embeddings=tie_word_embeddings) + + +__all__ = ["Idefics2Config"] diff --git a/src/transformers/models/idefics2/image_processing_idefics2.py b/src/transformers/models/idefics2/image_processing_idefics2.py index ce0032f80c5ece..22ef9c2eac6e48 100644 --- a/src/transformers/models/idefics2/image_processing_idefics2.py +++ b/src/transformers/models/idefics2/image_processing_idefics2.py @@ -595,3 +595,6 @@ def preprocess( data["pixel_attention_mask"] = np.array(pixel_attention_mask) if do_pad else pixel_attention_mask return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["Idefics2ImageProcessor"] diff --git a/src/transformers/models/idefics2/modeling_idefics2.py b/src/transformers/models/idefics2/modeling_idefics2.py index 3d46c3bd82e788..a4062f03786942 100644 --- a/src/transformers/models/idefics2/modeling_idefics2.py +++ b/src/transformers/models/idefics2/modeling_idefics2.py @@ -1753,3 +1753,6 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = ["Idefics2ForConditionalGeneration", "Idefics2PreTrainedModel", "Idefics2Model"] diff --git a/src/transformers/models/idefics2/processing_idefics2.py b/src/transformers/models/idefics2/processing_idefics2.py index f99c1bda474568..8c1647cdff41a7 100644 --- a/src/transformers/models/idefics2/processing_idefics2.py +++ b/src/transformers/models/idefics2/processing_idefics2.py @@ -278,3 +278,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +__all__ = ["Idefics2Processor"] diff --git a/src/transformers/models/idefics3/__init__.py b/src/transformers/models/idefics3/__init__.py index cec07ca6f5e2d3..34bf9c962b6542 100644 --- a/src/transformers/models/idefics3/__init__.py +++ b/src/transformers/models/idefics3/__init__.py @@ -13,62 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_idefics3": ["Idefics3Config", "Idefics3VisionConfig"]} - - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_idefics3"] = ["Idefics3ImageProcessor"] - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_idefics3"] = [ - "Idefics3ForConditionalGeneration", - "Idefics3PreTrainedModel", - "Idefics3Model", - "Idefics3VisionTransformer", - ] - _import_structure["processing_idefics3"] = ["Idefics3Processor"] - if TYPE_CHECKING: - from .configuration_idefics3 import Idefics3Config, Idefics3VisionConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_idefics3 import Idefics3ImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_idefics3 import ( - Idefics3ForConditionalGeneration, - Idefics3Model, - Idefics3PreTrainedModel, - Idefics3VisionTransformer, - ) - from .processing_idefics3 import Idefics3Processor - - + from .configuration_idefics3 import * + from .image_processing_idefics3 import * + from .modeling_idefics3 import * + from .processing_idefics3 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/idefics3/configuration_idefics3.py b/src/transformers/models/idefics3/configuration_idefics3.py index 0d385b0ee48dec..01c96afcaa47bb 100644 --- a/src/transformers/models/idefics3/configuration_idefics3.py +++ b/src/transformers/models/idefics3/configuration_idefics3.py @@ -185,3 +185,6 @@ def __init__( self.scale_factor = scale_factor super().__init__(**kwargs, pad_token_id=pad_token_id, tie_word_embeddings=tie_word_embeddings) + + +__all__ = ["Idefics3Config", "Idefics3VisionConfig"] diff --git a/src/transformers/models/idefics3/image_processing_idefics3.py b/src/transformers/models/idefics3/image_processing_idefics3.py index f9161416656cd7..9005c38af6c072 100644 --- a/src/transformers/models/idefics3/image_processing_idefics3.py +++ b/src/transformers/models/idefics3/image_processing_idefics3.py @@ -883,3 +883,6 @@ def preprocess( encoding["cols"] = images_list_cols return encoding + + +__all__ = ["Idefics3ImageProcessor"] diff --git a/src/transformers/models/idefics3/modeling_idefics3.py b/src/transformers/models/idefics3/modeling_idefics3.py index 31d43948fbd565..f80ad51909728f 100644 --- a/src/transformers/models/idefics3/modeling_idefics3.py +++ b/src/transformers/models/idefics3/modeling_idefics3.py @@ -1316,3 +1316,6 @@ def _update_model_kwargs_for_generation(self, outputs, model_kwargs, is_encoder_ # Get the precomputed image_hidden_states model_kwargs["image_hidden_states"] = outputs.image_hidden_states return model_kwargs + + +__all__ = ["Idefics3ForConditionalGeneration", "Idefics3PreTrainedModel", "Idefics3Model", "Idefics3VisionTransformer"] diff --git a/src/transformers/models/idefics3/processing_idefics3.py b/src/transformers/models/idefics3/processing_idefics3.py index 872f5206f20175..bbc3da39a9f48f 100644 --- a/src/transformers/models/idefics3/processing_idefics3.py +++ b/src/transformers/models/idefics3/processing_idefics3.py @@ -347,3 +347,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +__all__ = ["Idefics3Processor"] diff --git a/src/transformers/models/ijepa/__init__.py b/src/transformers/models/ijepa/__init__.py index efc8c90b17628d..8923af1de11621 100644 --- a/src/transformers/models/ijepa/__init__.py +++ b/src/transformers/models/ijepa/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,43 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_ijepa": ["IJepaConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_ijepa"] = [ - "IJepaForImageClassification", - "IJepaModel", - "IJepaPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_ijepa import IJepaConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_ijepa import ( - IJepaForImageClassification, - IJepaModel, - IJepaPreTrainedModel, - ) - + from .configuration_ijepa import * + from .modeling_ijepa import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/ijepa/configuration_ijepa.py b/src/transformers/models/ijepa/configuration_ijepa.py index 26378e6e81d9ce..338532e68d9e0f 100644 --- a/src/transformers/models/ijepa/configuration_ijepa.py +++ b/src/transformers/models/ijepa/configuration_ijepa.py @@ -106,3 +106,6 @@ def __init__( self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias + + +__all__ = ["IJepaConfig"] diff --git a/src/transformers/models/imagegpt/__init__.py b/src/transformers/models/imagegpt/__init__.py index a64dd9affdbe35..cb79cea50d6e4b 100644 --- a/src/transformers/models/imagegpt/__init__.py +++ b/src/transformers/models/imagegpt/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,65 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = {"configuration_imagegpt": ["ImageGPTConfig", "ImageGPTOnnxConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_imagegpt"] = ["ImageGPTFeatureExtractor"] - _import_structure["image_processing_imagegpt"] = ["ImageGPTImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_imagegpt"] = [ - "ImageGPTForCausalImageModeling", - "ImageGPTForImageClassification", - "ImageGPTModel", - "ImageGPTPreTrainedModel", - "load_tf_weights_in_imagegpt", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_imagegpt import ImageGPTConfig, ImageGPTOnnxConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_imagegpt import ImageGPTFeatureExtractor - from .image_processing_imagegpt import ImageGPTImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_imagegpt import ( - ImageGPTForCausalImageModeling, - ImageGPTForImageClassification, - ImageGPTModel, - ImageGPTPreTrainedModel, - load_tf_weights_in_imagegpt, - ) - + from .configuration_imagegpt import * + from .feature_extraction_imagegpt import * + from .image_processing_imagegpt import * + from .modeling_imagegpt import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/imagegpt/configuration_imagegpt.py b/src/transformers/models/imagegpt/configuration_imagegpt.py index c54c11491cb5f9..1a4e238b0e54cf 100644 --- a/src/transformers/models/imagegpt/configuration_imagegpt.py +++ b/src/transformers/models/imagegpt/configuration_imagegpt.py @@ -194,3 +194,6 @@ def generate_dummy_inputs( inputs = dict(preprocessor(images=input_image, return_tensors=framework)) return inputs + + +__all__ = ["ImageGPTConfig", "ImageGPTOnnxConfig"] diff --git a/src/transformers/models/imagegpt/feature_extraction_imagegpt.py b/src/transformers/models/imagegpt/feature_extraction_imagegpt.py index 1780926bbf24c0..15ecddd307c5fb 100644 --- a/src/transformers/models/imagegpt/feature_extraction_imagegpt.py +++ b/src/transformers/models/imagegpt/feature_extraction_imagegpt.py @@ -31,3 +31,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["ImageGPTFeatureExtractor"] diff --git a/src/transformers/models/imagegpt/image_processing_imagegpt.py b/src/transformers/models/imagegpt/image_processing_imagegpt.py index 47fb0f6056edaa..cca6c8a14b21e0 100644 --- a/src/transformers/models/imagegpt/image_processing_imagegpt.py +++ b/src/transformers/models/imagegpt/image_processing_imagegpt.py @@ -297,3 +297,6 @@ def preprocess( data = {"input_ids": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["ImageGPTImageProcessor"] diff --git a/src/transformers/models/imagegpt/modeling_imagegpt.py b/src/transformers/models/imagegpt/modeling_imagegpt.py index 8031950bc9b10a..16b41ef60fce36 100755 --- a/src/transformers/models/imagegpt/modeling_imagegpt.py +++ b/src/transformers/models/imagegpt/modeling_imagegpt.py @@ -1163,3 +1163,12 @@ def forward( hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) + + +__all__ = [ + "ImageGPTForCausalImageModeling", + "ImageGPTForImageClassification", + "ImageGPTModel", + "ImageGPTPreTrainedModel", + "load_tf_weights_in_imagegpt", +] diff --git a/src/transformers/models/informer/__init__.py b/src/transformers/models/informer/__init__.py index fba309ee2b52b1..cd7a901eab235e 100644 --- a/src/transformers/models/informer/__init__.py +++ b/src/transformers/models/informer/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,43 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -# rely on isort to merge the imports -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_informer": ["InformerConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_informer"] = [ - "InformerForPrediction", - "InformerModel", - "InformerPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_informer import InformerConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_informer import ( - InformerForPrediction, - InformerModel, - InformerPreTrainedModel, - ) - + from .configuration_informer import * + from .modeling_informer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/informer/configuration_informer.py b/src/transformers/models/informer/configuration_informer.py index d933ac6fd530fe..028f5b32295e37 100644 --- a/src/transformers/models/informer/configuration_informer.py +++ b/src/transformers/models/informer/configuration_informer.py @@ -244,3 +244,6 @@ def _number_of_features(self) -> int: + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features ) + + +__all__ = ["InformerConfig"] diff --git a/src/transformers/models/informer/modeling_informer.py b/src/transformers/models/informer/modeling_informer.py index 6b5507a0155913..f5c856dad6c208 100644 --- a/src/transformers/models/informer/modeling_informer.py +++ b/src/transformers/models/informer/modeling_informer.py @@ -2041,3 +2041,6 @@ def generate( (-1, num_parallel_samples, self.config.prediction_length) + self.target_shape, ) ) + + +__all__ = ["InformerForPrediction", "InformerModel", "InformerPreTrainedModel"] diff --git a/src/transformers/models/instructblip/__init__.py b/src/transformers/models/instructblip/__init__.py index 093b9f00f6fc4d..ed2bd053d2c8b9 100644 --- a/src/transformers/models/instructblip/__init__.py +++ b/src/transformers/models/instructblip/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,53 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_instructblip": [ - "InstructBlipConfig", - "InstructBlipQFormerConfig", - "InstructBlipVisionConfig", - ], - "processing_instructblip": ["InstructBlipProcessor"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_instructblip"] = [ - "InstructBlipQFormerModel", - "InstructBlipPreTrainedModel", - "InstructBlipForConditionalGeneration", - "InstructBlipVisionModel", - ] - if TYPE_CHECKING: - from .configuration_instructblip import ( - InstructBlipConfig, - InstructBlipQFormerConfig, - InstructBlipVisionConfig, - ) - from .processing_instructblip import InstructBlipProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_instructblip import ( - InstructBlipForConditionalGeneration, - InstructBlipPreTrainedModel, - InstructBlipQFormerModel, - InstructBlipVisionModel, - ) - + from .configuration_instructblip import * + from .modeling_instructblip import * + from .processing_instructblip import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/instructblip/configuration_instructblip.py b/src/transformers/models/instructblip/configuration_instructblip.py index 6124dba3a08efe..d4f15ab0a58e5f 100644 --- a/src/transformers/models/instructblip/configuration_instructblip.py +++ b/src/transformers/models/instructblip/configuration_instructblip.py @@ -334,3 +334,6 @@ def from_vision_qformer_text_configs( text_config=text_config.to_dict(), **kwargs, ) + + +__all__ = ["InstructBlipConfig", "InstructBlipQFormerConfig", "InstructBlipVisionConfig"] diff --git a/src/transformers/models/instructblip/modeling_instructblip.py b/src/transformers/models/instructblip/modeling_instructblip.py index acce24cc42f5d8..163f978ba25da4 100644 --- a/src/transformers/models/instructblip/modeling_instructblip.py +++ b/src/transformers/models/instructblip/modeling_instructblip.py @@ -1633,3 +1633,11 @@ def generate( outputs = self.language_model.generate(**inputs, **generate_kwargs) return outputs + + +__all__ = [ + "InstructBlipQFormerModel", + "InstructBlipPreTrainedModel", + "InstructBlipForConditionalGeneration", + "InstructBlipVisionModel", +] diff --git a/src/transformers/models/instructblip/processing_instructblip.py b/src/transformers/models/instructblip/processing_instructblip.py index a96d97fb07e1d9..9a46b9787552ff 100644 --- a/src/transformers/models/instructblip/processing_instructblip.py +++ b/src/transformers/models/instructblip/processing_instructblip.py @@ -218,3 +218,6 @@ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): qformer_tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, subfolder="qformer_tokenizer") processor.qformer_tokenizer = qformer_tokenizer return processor + + +__all__ = ["InstructBlipProcessor"] diff --git a/src/transformers/models/jamba/__init__.py b/src/transformers/models/jamba/__init__.py index f6b7c2137b209c..8789007ad0bc8f 100644 --- a/src/transformers/models/jamba/__init__.py +++ b/src/transformers/models/jamba/__init__.py @@ -13,46 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_jamba": ["JambaConfig"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_jamba"] = [ - "JambaForCausalLM", - "JambaForSequenceClassification", - "JambaModel", - "JambaPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_jamba import JambaConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_jamba import ( - JambaForCausalLM, - JambaForSequenceClassification, - JambaModel, - JambaPreTrainedModel, - ) - - + from .configuration_jamba import * + from .modeling_jamba import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/jamba/configuration_jamba.py b/src/transformers/models/jamba/configuration_jamba.py index 3aabe979d8ef2f..185380383e207a 100644 --- a/src/transformers/models/jamba/configuration_jamba.py +++ b/src/transformers/models/jamba/configuration_jamba.py @@ -231,3 +231,6 @@ def _check_supported_offset(self, property_: str, period: int, offset: int): raise ValueError( f"{property_} layer offset ({offset}) must be smaller than {property_} layer period ({period})" ) + + +__all__ = ["JambaConfig"] diff --git a/src/transformers/models/jamba/modeling_jamba.py b/src/transformers/models/jamba/modeling_jamba.py index a185d5ebc6e86c..f5292b3dff242b 100755 --- a/src/transformers/models/jamba/modeling_jamba.py +++ b/src/transformers/models/jamba/modeling_jamba.py @@ -1704,3 +1704,6 @@ def forward( hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) + + +__all__ = ["JambaForCausalLM", "JambaForSequenceClassification", "JambaModel", "JambaPreTrainedModel"] diff --git a/src/transformers/models/jetmoe/__init__.py b/src/transformers/models/jetmoe/__init__.py index 48ac583a6aea38..7058590acc8859 100644 --- a/src/transformers/models/jetmoe/__init__.py +++ b/src/transformers/models/jetmoe/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2024 JetMoe AI and The HuggingFace Inc. team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,44 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_jetmoe": ["JetMoeConfig"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_jetmoe"] = [ - "JetMoeForCausalLM", - "JetMoeModel", - "JetMoePreTrainedModel", - "JetMoeForSequenceClassification", - ] - if TYPE_CHECKING: - from .configuration_jetmoe import JetMoeConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_jetmoe import ( - JetMoeForCausalLM, - JetMoeForSequenceClassification, - JetMoeModel, - JetMoePreTrainedModel, - ) - + from .configuration_jetmoe import * + from .modeling_jetmoe import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/jetmoe/configuration_jetmoe.py b/src/transformers/models/jetmoe/configuration_jetmoe.py index c6913faee1d116..4053c900bf2dc8 100644 --- a/src/transformers/models/jetmoe/configuration_jetmoe.py +++ b/src/transformers/models/jetmoe/configuration_jetmoe.py @@ -147,3 +147,6 @@ def __init__( super().__init__( bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs ) + + +__all__ = ["JetMoeConfig"] diff --git a/src/transformers/models/jetmoe/modeling_jetmoe.py b/src/transformers/models/jetmoe/modeling_jetmoe.py index a4bb1d78fdc5ce..3b572f0ef74008 100644 --- a/src/transformers/models/jetmoe/modeling_jetmoe.py +++ b/src/transformers/models/jetmoe/modeling_jetmoe.py @@ -1449,3 +1449,6 @@ def forward( hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) + + +__all__ = ["JetMoeForCausalLM", "JetMoeModel", "JetMoePreTrainedModel", "JetMoeForSequenceClassification"] diff --git a/src/transformers/models/kosmos2/__init__.py b/src/transformers/models/kosmos2/__init__.py index 171a5cc7071e53..51dbdaf40a1050 100644 --- a/src/transformers/models/kosmos2/__init__.py +++ b/src/transformers/models/kosmos2/__init__.py @@ -1,5 +1,4 @@ -# coding=utf-8 -# Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,49 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, - is_vision_available, -) - - -_import_structure = { - "configuration_kosmos2": ["Kosmos2Config"], - "processing_kosmos2": ["Kosmos2Processor"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_kosmos2"] = [ - "Kosmos2ForConditionalGeneration", - "Kosmos2Model", - "Kosmos2PreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_kosmos2 import Kosmos2Config - from .processing_kosmos2 import Kosmos2Processor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_kosmos2 import ( - Kosmos2ForConditionalGeneration, - Kosmos2Model, - Kosmos2PreTrainedModel, - ) - + from .configuration_kosmos2 import * + from .modeling_kosmos2 import * + from .processing_kosmos2 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/kosmos2/configuration_kosmos2.py b/src/transformers/models/kosmos2/configuration_kosmos2.py index 921ec336c0be80..85695e250cf798 100644 --- a/src/transformers/models/kosmos2/configuration_kosmos2.py +++ b/src/transformers/models/kosmos2/configuration_kosmos2.py @@ -259,3 +259,6 @@ def __init__( self.vision_config = Kosmos2VisionConfig(**vision_config) self.latent_query_num = latent_query_num + + +__all__ = ["Kosmos2Config"] diff --git a/src/transformers/models/kosmos2/modeling_kosmos2.py b/src/transformers/models/kosmos2/modeling_kosmos2.py index ffd8277f0268a3..e6662656e7a25b 100644 --- a/src/transformers/models/kosmos2/modeling_kosmos2.py +++ b/src/transformers/models/kosmos2/modeling_kosmos2.py @@ -2109,3 +2109,6 @@ def generate( ) return output + + +__all__ = ["Kosmos2ForConditionalGeneration", "Kosmos2Model", "Kosmos2PreTrainedModel"] diff --git a/src/transformers/models/kosmos2/processing_kosmos2.py b/src/transformers/models/kosmos2/processing_kosmos2.py index d7befd899f3ad3..ab85f593dd9473 100644 --- a/src/transformers/models/kosmos2/processing_kosmos2.py +++ b/src/transformers/models/kosmos2/processing_kosmos2.py @@ -705,3 +705,6 @@ def clean_text_and_extract_entities_with_bboxes(text, num_patches_per_side=32): entities.append(adjusted_entity + (bboxes_in_coords,)) return _cleanup_spaces(processed_text, entities) + + +__all__ = ["Kosmos2Processor"] diff --git a/src/transformers/models/layoutlm/__init__.py b/src/transformers/models/layoutlm/__init__.py index 070b42368ef958..0f079c33c71578 100644 --- a/src/transformers/models/layoutlm/__init__.py +++ b/src/transformers/models/layoutlm/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,106 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_layoutlm": ["LayoutLMConfig", "LayoutLMOnnxConfig"], - "tokenization_layoutlm": ["LayoutLMTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_layoutlm_fast"] = ["LayoutLMTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_layoutlm"] = [ - "LayoutLMForMaskedLM", - "LayoutLMForSequenceClassification", - "LayoutLMForTokenClassification", - "LayoutLMForQuestionAnswering", - "LayoutLMModel", - "LayoutLMPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_layoutlm"] = [ - "TFLayoutLMForMaskedLM", - "TFLayoutLMForSequenceClassification", - "TFLayoutLMForTokenClassification", - "TFLayoutLMForQuestionAnswering", - "TFLayoutLMMainLayer", - "TFLayoutLMModel", - "TFLayoutLMPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_layoutlm import LayoutLMConfig, LayoutLMOnnxConfig - from .tokenization_layoutlm import LayoutLMTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_layoutlm_fast import LayoutLMTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_layoutlm import ( - LayoutLMForMaskedLM, - LayoutLMForQuestionAnswering, - LayoutLMForSequenceClassification, - LayoutLMForTokenClassification, - LayoutLMModel, - LayoutLMPreTrainedModel, - ) - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_layoutlm import ( - TFLayoutLMForMaskedLM, - TFLayoutLMForQuestionAnswering, - TFLayoutLMForSequenceClassification, - TFLayoutLMForTokenClassification, - TFLayoutLMMainLayer, - TFLayoutLMModel, - TFLayoutLMPreTrainedModel, - ) - + from .configuration_layoutlm import * + from .modeling_layoutlm import * + from .modeling_tf_layoutlm import * + from .tokenization_layoutlm import * + from .tokenization_layoutlm_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/layoutlm/configuration_layoutlm.py b/src/transformers/models/layoutlm/configuration_layoutlm.py index 4198bb26e9798f..aebd25d5369088 100644 --- a/src/transformers/models/layoutlm/configuration_layoutlm.py +++ b/src/transformers/models/layoutlm/configuration_layoutlm.py @@ -194,3 +194,6 @@ def generate_dummy_inputs( batch_size, seq_length = input_dict["input_ids"].shape input_dict["bbox"] = torch.tensor([*[box] * seq_length]).tile(batch_size, 1, 1) return input_dict + + +__all__ = ["LayoutLMConfig", "LayoutLMOnnxConfig"] diff --git a/src/transformers/models/layoutlm/modeling_layoutlm.py b/src/transformers/models/layoutlm/modeling_layoutlm.py index 55e17bfc586d37..56a3776bde2d6f 100644 --- a/src/transformers/models/layoutlm/modeling_layoutlm.py +++ b/src/transformers/models/layoutlm/modeling_layoutlm.py @@ -1373,3 +1373,13 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "LayoutLMForMaskedLM", + "LayoutLMForSequenceClassification", + "LayoutLMForTokenClassification", + "LayoutLMForQuestionAnswering", + "LayoutLMModel", + "LayoutLMPreTrainedModel", +] diff --git a/src/transformers/models/layoutlm/modeling_tf_layoutlm.py b/src/transformers/models/layoutlm/modeling_tf_layoutlm.py index 59aebe15b5d562..43215f6157fa3f 100644 --- a/src/transformers/models/layoutlm/modeling_tf_layoutlm.py +++ b/src/transformers/models/layoutlm/modeling_tf_layoutlm.py @@ -1679,3 +1679,14 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFLayoutLMForMaskedLM", + "TFLayoutLMForSequenceClassification", + "TFLayoutLMForTokenClassification", + "TFLayoutLMForQuestionAnswering", + "TFLayoutLMMainLayer", + "TFLayoutLMModel", + "TFLayoutLMPreTrainedModel", +] diff --git a/src/transformers/models/layoutlm/tokenization_layoutlm.py b/src/transformers/models/layoutlm/tokenization_layoutlm.py index 62fb4c524f22dc..a0b61c93ac1aa8 100644 --- a/src/transformers/models/layoutlm/tokenization_layoutlm.py +++ b/src/transformers/models/layoutlm/tokenization_layoutlm.py @@ -507,3 +507,6 @@ def tokenize(self, text): else: output_tokens.extend(sub_tokens) return output_tokens + + +__all__ = ["LayoutLMTokenizer"] diff --git a/src/transformers/models/layoutlm/tokenization_layoutlm_fast.py b/src/transformers/models/layoutlm/tokenization_layoutlm_fast.py index db1409dfcab1d0..1df94cb625a368 100644 --- a/src/transformers/models/layoutlm/tokenization_layoutlm_fast.py +++ b/src/transformers/models/layoutlm/tokenization_layoutlm_fast.py @@ -171,3 +171,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["LayoutLMTokenizerFast"] diff --git a/src/transformers/models/layoutlmv2/__init__.py b/src/transformers/models/layoutlmv2/__init__.py index 1c45a9f76abb3a..5b529b9b5af690 100644 --- a/src/transformers/models/layoutlmv2/__init__.py +++ b/src/transformers/models/layoutlmv2/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,92 +11,22 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tokenizers_available, - is_torch_available, - is_vision_available, -) - - -_import_structure = { - "configuration_layoutlmv2": ["LayoutLMv2Config"], - "processing_layoutlmv2": ["LayoutLMv2Processor"], - "tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"], -} +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_layoutlmv2_fast"] = ["LayoutLMv2TokenizerFast"] - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_layoutlmv2"] = ["LayoutLMv2FeatureExtractor"] - _import_structure["image_processing_layoutlmv2"] = ["LayoutLMv2ImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_layoutlmv2"] = [ - "LayoutLMv2ForQuestionAnswering", - "LayoutLMv2ForSequenceClassification", - "LayoutLMv2ForTokenClassification", - "LayoutLMv2Layer", - "LayoutLMv2Model", - "LayoutLMv2PreTrainedModel", - ] if TYPE_CHECKING: - from .configuration_layoutlmv2 import LayoutLMv2Config - from .processing_layoutlmv2 import LayoutLMv2Processor - from .tokenization_layoutlmv2 import LayoutLMv2Tokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_layoutlmv2_fast import LayoutLMv2TokenizerFast - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_layoutlmv2 import LayoutLMv2FeatureExtractor, LayoutLMv2ImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_layoutlmv2 import ( - LayoutLMv2ForQuestionAnswering, - LayoutLMv2ForSequenceClassification, - LayoutLMv2ForTokenClassification, - LayoutLMv2Layer, - LayoutLMv2Model, - LayoutLMv2PreTrainedModel, - ) + from .configuration_layoutlmv2 import * + from .feature_extraction_layoutlmv2 import * + from .image_processing_layoutlmv2 import * + from .modeling_layoutlmv2 import * + from .processing_layoutlmv2 import * + from .tokenization_layoutlmv2 import * + from .tokenization_layoutlmv2_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/layoutlmv2/configuration_layoutlmv2.py b/src/transformers/models/layoutlmv2/configuration_layoutlmv2.py index d2a9d37bd12a87..6ffdec66385c59 100644 --- a/src/transformers/models/layoutlmv2/configuration_layoutlmv2.py +++ b/src/transformers/models/layoutlmv2/configuration_layoutlmv2.py @@ -217,3 +217,6 @@ def get_detectron2_config(self): setattr(to_set, attributes[-1], v) return detectron2_config + + +__all__ = ["LayoutLMv2Config"] diff --git a/src/transformers/models/layoutlmv2/feature_extraction_layoutlmv2.py b/src/transformers/models/layoutlmv2/feature_extraction_layoutlmv2.py index eb1042b7c2849d..74abee4575eafd 100644 --- a/src/transformers/models/layoutlmv2/feature_extraction_layoutlmv2.py +++ b/src/transformers/models/layoutlmv2/feature_extraction_layoutlmv2.py @@ -33,3 +33,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["LayoutLMv2FeatureExtractor"] diff --git a/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py b/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py index c47d58c30c01e1..4f326e36e2de1e 100644 --- a/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py +++ b/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py @@ -296,3 +296,6 @@ def preprocess( data["words"] = words_batch data["boxes"] = boxes_batch return data + + +__all__ = ["LayoutLMv2ImageProcessor"] diff --git a/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py b/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py index 50ef27be3f5201..015e43fd4eccde 100755 --- a/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py +++ b/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py @@ -1415,3 +1415,13 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "LayoutLMv2ForQuestionAnswering", + "LayoutLMv2ForSequenceClassification", + "LayoutLMv2ForTokenClassification", + "LayoutLMv2Layer", + "LayoutLMv2Model", + "LayoutLMv2PreTrainedModel", +] diff --git a/src/transformers/models/layoutlmv2/processing_layoutlmv2.py b/src/transformers/models/layoutlmv2/processing_layoutlmv2.py index 1edf87465bbf0b..39d34b3a9975c6 100644 --- a/src/transformers/models/layoutlmv2/processing_layoutlmv2.py +++ b/src/transformers/models/layoutlmv2/processing_layoutlmv2.py @@ -199,3 +199,6 @@ def feature_extractor(self): FutureWarning, ) return self.image_processor + + +__all__ = ["LayoutLMv2Processor"] diff --git a/src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py b/src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py index c5ec79666deede..edd01c47f06ba9 100644 --- a/src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py +++ b/src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py @@ -1563,3 +1563,6 @@ def tokenize(self, text): else: output_tokens.extend(sub_tokens) return output_tokens + + +__all__ = ["LayoutLMv2Tokenizer"] diff --git a/src/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py b/src/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py index a666e3d4ea1a43..90d6904a504225 100644 --- a/src/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py +++ b/src/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py @@ -807,3 +807,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["LayoutLMv2TokenizerFast"] diff --git a/src/transformers/models/layoutlmv3/__init__.py b/src/transformers/models/layoutlmv3/__init__.py index a8ef90906e7a5b..3ed9bd31a63db5 100644 --- a/src/transformers/models/layoutlmv3/__init__.py +++ b/src/transformers/models/layoutlmv3/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,128 +11,23 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_tokenizers_available, - is_torch_available, - is_vision_available, -) - - -_import_structure = { - "configuration_layoutlmv3": [ - "LayoutLMv3Config", - "LayoutLMv3OnnxConfig", - ], - "processing_layoutlmv3": ["LayoutLMv3Processor"], - "tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_layoutlmv3_fast"] = ["LayoutLMv3TokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_layoutlmv3"] = [ - "LayoutLMv3ForQuestionAnswering", - "LayoutLMv3ForSequenceClassification", - "LayoutLMv3ForTokenClassification", - "LayoutLMv3Model", - "LayoutLMv3PreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_layoutlmv3"] = [ - "TFLayoutLMv3ForQuestionAnswering", - "TFLayoutLMv3ForSequenceClassification", - "TFLayoutLMv3ForTokenClassification", - "TFLayoutLMv3Model", - "TFLayoutLMv3PreTrainedModel", - ] - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_layoutlmv3"] = ["LayoutLMv3FeatureExtractor"] - _import_structure["image_processing_layoutlmv3"] = ["LayoutLMv3ImageProcessor"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_layoutlmv3 import ( - LayoutLMv3Config, - LayoutLMv3OnnxConfig, - ) - from .processing_layoutlmv3 import LayoutLMv3Processor - from .tokenization_layoutlmv3 import LayoutLMv3Tokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_layoutlmv3_fast import LayoutLMv3TokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_layoutlmv3 import ( - LayoutLMv3ForQuestionAnswering, - LayoutLMv3ForSequenceClassification, - LayoutLMv3ForTokenClassification, - LayoutLMv3Model, - LayoutLMv3PreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_layoutlmv3 import ( - TFLayoutLMv3ForQuestionAnswering, - TFLayoutLMv3ForSequenceClassification, - TFLayoutLMv3ForTokenClassification, - TFLayoutLMv3Model, - TFLayoutLMv3PreTrainedModel, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_layoutlmv3 import LayoutLMv3FeatureExtractor - from .image_processing_layoutlmv3 import LayoutLMv3ImageProcessor - + from .configuration_layoutlmv3 import * + from .feature_extraction_layoutlmv3 import * + from .image_processing_layoutlmv3 import * + from .modeling_layoutlmv3 import * + from .modeling_tf_layoutlmv3 import * + from .processing_layoutlmv3 import * + from .tokenization_layoutlmv3 import * + from .tokenization_layoutlmv3_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py b/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py index aa50a3228e8638..a13d7fa7cdeda1 100644 --- a/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py @@ -288,3 +288,6 @@ def generate_dummy_inputs( ) return inputs + + +__all__ = ["LayoutLMv3Config", "LayoutLMv3OnnxConfig"] diff --git a/src/transformers/models/layoutlmv3/feature_extraction_layoutlmv3.py b/src/transformers/models/layoutlmv3/feature_extraction_layoutlmv3.py index e120a0ebd07acb..08df81c483dad1 100644 --- a/src/transformers/models/layoutlmv3/feature_extraction_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/feature_extraction_layoutlmv3.py @@ -33,3 +33,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["LayoutLMv3FeatureExtractor"] diff --git a/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py b/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py index 6f16435c14dde3..11db4ad23db431 100644 --- a/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py @@ -372,3 +372,6 @@ def preprocess( data["words"] = words_batch data["boxes"] = boxes_batch return data + + +__all__ = ["LayoutLMv3ImageProcessor"] diff --git a/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py b/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py index 629490350c7dc3..9183bb90240cd3 100644 --- a/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py @@ -1382,3 +1382,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "LayoutLMv3ForQuestionAnswering", + "LayoutLMv3ForSequenceClassification", + "LayoutLMv3ForTokenClassification", + "LayoutLMv3Model", + "LayoutLMv3PreTrainedModel", +] diff --git a/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py b/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py index 574e14cc91086e..c0762afb49a960 100644 --- a/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py @@ -1772,3 +1772,12 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build(None) + + +__all__ = [ + "TFLayoutLMv3ForQuestionAnswering", + "TFLayoutLMv3ForSequenceClassification", + "TFLayoutLMv3ForTokenClassification", + "TFLayoutLMv3Model", + "TFLayoutLMv3PreTrainedModel", +] diff --git a/src/transformers/models/layoutlmv3/processing_layoutlmv3.py b/src/transformers/models/layoutlmv3/processing_layoutlmv3.py index 369bd51bec28a3..4bd9955775d13e 100644 --- a/src/transformers/models/layoutlmv3/processing_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/processing_layoutlmv3.py @@ -197,3 +197,6 @@ def feature_extractor(self): FutureWarning, ) return self.image_processor + + +__all__ = ["LayoutLMv3Processor"] diff --git a/src/transformers/models/layoutlmv3/tokenization_layoutlmv3.py b/src/transformers/models/layoutlmv3/tokenization_layoutlmv3.py index 248a299c141fd5..088a58bfc6bc7f 100644 --- a/src/transformers/models/layoutlmv3/tokenization_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/tokenization_layoutlmv3.py @@ -1482,3 +1482,6 @@ def _pad( raise ValueError("Invalid padding strategy:" + str(padding_side)) return encoded_inputs + + +__all__ = ["LayoutLMv3Tokenizer"] diff --git a/src/transformers/models/layoutlmv3/tokenization_layoutlmv3_fast.py b/src/transformers/models/layoutlmv3/tokenization_layoutlmv3_fast.py index 63cd1022e52170..934f04937b08f0 100644 --- a/src/transformers/models/layoutlmv3/tokenization_layoutlmv3_fast.py +++ b/src/transformers/models/layoutlmv3/tokenization_layoutlmv3_fast.py @@ -851,3 +851,6 @@ def create_token_type_ids_from_sequences( if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] + + +__all__ = ["LayoutLMv3TokenizerFast"] diff --git a/src/transformers/models/layoutxlm/__init__.py b/src/transformers/models/layoutxlm/__init__.py index e3885d381f9c26..6f5a662e6afd2c 100644 --- a/src/transformers/models/layoutxlm/__init__.py +++ b/src/transformers/models/layoutxlm/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,57 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tokenizers_available, - is_torch_available, - is_vision_available, -) - +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"processing_layoutxlm": ["LayoutXLMProcessor"]} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_layoutxlm"] = ["LayoutXLMTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_layoutxlm_fast"] = ["LayoutXLMTokenizerFast"] if TYPE_CHECKING: - from .processing_layoutxlm import LayoutXLMProcessor - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_layoutxlm import LayoutXLMTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast - + from .processing_layoutxlm import * + from .tokenization_layoutxlm import * + from .tokenization_layoutxlm_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/layoutxlm/processing_layoutxlm.py b/src/transformers/models/layoutxlm/processing_layoutxlm.py index 1cbd3f20c2fa7b..a8881c634c2442 100644 --- a/src/transformers/models/layoutxlm/processing_layoutxlm.py +++ b/src/transformers/models/layoutxlm/processing_layoutxlm.py @@ -199,3 +199,6 @@ def feature_extractor(self): FutureWarning, ) return self.image_processor + + +__all__ = ["LayoutXLMProcessor"] diff --git a/src/transformers/models/layoutxlm/tokenization_layoutxlm.py b/src/transformers/models/layoutxlm/tokenization_layoutxlm.py index 060748ea907ad5..9241d59f3c0b3f 100644 --- a/src/transformers/models/layoutxlm/tokenization_layoutxlm.py +++ b/src/transformers/models/layoutxlm/tokenization_layoutxlm.py @@ -1184,3 +1184,6 @@ def _pad( raise ValueError("Invalid padding strategy:" + str(padding_side)) return encoded_inputs + + +__all__ = ["LayoutXLMTokenizer"] diff --git a/src/transformers/models/layoutxlm/tokenization_layoutxlm_fast.py b/src/transformers/models/layoutxlm/tokenization_layoutxlm_fast.py index 7d12cec496ea30..89d784be04a4f6 100644 --- a/src/transformers/models/layoutxlm/tokenization_layoutxlm_fast.py +++ b/src/transformers/models/layoutxlm/tokenization_layoutxlm_fast.py @@ -814,3 +814,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["LayoutXLMTokenizerFast"] diff --git a/src/transformers/models/led/__init__.py b/src/transformers/models/led/__init__.py index 2dbd59dcc34705..786ebd36d7b8cc 100644 --- a/src/transformers/models/led/__init__.py +++ b/src/transformers/models/led/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,87 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_led": ["LEDConfig"], - "tokenization_led": ["LEDTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_led_fast"] = ["LEDTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_led"] = [ - "LEDForConditionalGeneration", - "LEDForQuestionAnswering", - "LEDForSequenceClassification", - "LEDModel", - "LEDPreTrainedModel", - ] - - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_led"] = ["TFLEDForConditionalGeneration", "TFLEDModel", "TFLEDPreTrainedModel"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_led import LEDConfig - from .tokenization_led import LEDTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_led_fast import LEDTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_led import ( - LEDForConditionalGeneration, - LEDForQuestionAnswering, - LEDForSequenceClassification, - LEDModel, - LEDPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_led import TFLEDForConditionalGeneration, TFLEDModel, TFLEDPreTrainedModel - + from .configuration_led import * + from .modeling_led import * + from .modeling_tf_led import * + from .tokenization_led import * + from .tokenization_led_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/led/configuration_led.py b/src/transformers/models/led/configuration_led.py index 9ed3b148c73923..d51c0dc4aa320a 100644 --- a/src/transformers/models/led/configuration_led.py +++ b/src/transformers/models/led/configuration_led.py @@ -160,3 +160,6 @@ def __init__( decoder_start_token_id=decoder_start_token_id, **kwargs, ) + + +__all__ = ["LEDConfig"] diff --git a/src/transformers/models/led/modeling_led.py b/src/transformers/models/led/modeling_led.py index ee1ad90bfceaa2..b5e8b43fbaa3de 100755 --- a/src/transformers/models/led/modeling_led.py +++ b/src/transformers/models/led/modeling_led.py @@ -2712,3 +2712,12 @@ def forward( encoder_attentions=outputs.encoder_attentions, encoder_global_attentions=outputs.encoder_global_attentions, ) + + +__all__ = [ + "LEDForConditionalGeneration", + "LEDForQuestionAnswering", + "LEDForSequenceClassification", + "LEDModel", + "LEDPreTrainedModel", +] diff --git a/src/transformers/models/led/modeling_tf_led.py b/src/transformers/models/led/modeling_tf_led.py index 8c414648d69e1a..ce94c504c4e3ea 100644 --- a/src/transformers/models/led/modeling_tf_led.py +++ b/src/transformers/models/led/modeling_tf_led.py @@ -2661,3 +2661,6 @@ def build(self, input_shape=None): if getattr(self, "bias_layer", None) is not None: with tf.name_scope(self.bias_layer.name): self.bias_layer.build(None) + + +__all__ = ["TFLEDForConditionalGeneration", "TFLEDModel", "TFLEDPreTrainedModel"] diff --git a/src/transformers/models/led/tokenization_led.py b/src/transformers/models/led/tokenization_led.py index 6c1ec9526aefbf..0b4df92882a5ce 100644 --- a/src/transformers/models/led/tokenization_led.py +++ b/src/transformers/models/led/tokenization_led.py @@ -449,3 +449,6 @@ def _pad( raise ValueError("Invalid padding strategy:" + str(self.padding_side)) return encoded_inputs + + +__all__ = ["LEDTokenizer"] diff --git a/src/transformers/models/led/tokenization_led_fast.py b/src/transformers/models/led/tokenization_led_fast.py index 6ee69fbe792752..5b36f513f5a2a0 100644 --- a/src/transformers/models/led/tokenization_led_fast.py +++ b/src/transformers/models/led/tokenization_led_fast.py @@ -325,3 +325,6 @@ def _pad( raise ValueError("Invalid padding strategy:" + str(self.padding_side)) return encoded_inputs + + +__all__ = ["LEDTokenizerFast"] diff --git a/src/transformers/models/levit/__init__.py b/src/transformers/models/levit/__init__.py index 266889963c90f2..ab009d931204a8 100644 --- a/src/transformers/models/levit/__init__.py +++ b/src/transformers/models/levit/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,59 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = {"configuration_levit": ["LevitConfig", "LevitOnnxConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_levit"] = ["LevitFeatureExtractor"] - _import_structure["image_processing_levit"] = ["LevitImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_levit"] = [ - "LevitForImageClassification", - "LevitForImageClassificationWithTeacher", - "LevitModel", - "LevitPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_levit import LevitConfig, LevitOnnxConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_levit import LevitFeatureExtractor - from .image_processing_levit import LevitImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_levit import ( - LevitForImageClassification, - LevitForImageClassificationWithTeacher, - LevitModel, - LevitPreTrainedModel, - ) + from .configuration_levit import * + from .feature_extraction_levit import * + from .image_processing_levit import * + from .modeling_levit import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/levit/configuration_levit.py b/src/transformers/models/levit/configuration_levit.py index 5b049309594cd7..b15cc11226aaa4 100644 --- a/src/transformers/models/levit/configuration_levit.py +++ b/src/transformers/models/levit/configuration_levit.py @@ -139,3 +139,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["LevitConfig", "LevitOnnxConfig"] diff --git a/src/transformers/models/levit/feature_extraction_levit.py b/src/transformers/models/levit/feature_extraction_levit.py index 91308cf0ba18d2..41301a5171bac9 100644 --- a/src/transformers/models/levit/feature_extraction_levit.py +++ b/src/transformers/models/levit/feature_extraction_levit.py @@ -31,3 +31,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["LevitFeatureExtractor"] diff --git a/src/transformers/models/levit/image_processing_levit.py b/src/transformers/models/levit/image_processing_levit.py index fad47ee0273600..f2cb89620d7614 100644 --- a/src/transformers/models/levit/image_processing_levit.py +++ b/src/transformers/models/levit/image_processing_levit.py @@ -304,3 +304,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["LevitImageProcessor"] diff --git a/src/transformers/models/levit/modeling_levit.py b/src/transformers/models/levit/modeling_levit.py index af202787a16617..e1825f7a369327 100644 --- a/src/transformers/models/levit/modeling_levit.py +++ b/src/transformers/models/levit/modeling_levit.py @@ -733,3 +733,11 @@ def forward( distillation_logits=distill_logits, hidden_states=outputs.hidden_states, ) + + +__all__ = [ + "LevitForImageClassification", + "LevitForImageClassificationWithTeacher", + "LevitModel", + "LevitPreTrainedModel", +] diff --git a/src/transformers/models/lilt/__init__.py b/src/transformers/models/lilt/__init__.py index 5b73f3aebd9c2f..8666fa5cdcf6f9 100644 --- a/src/transformers/models/lilt/__init__.py +++ b/src/transformers/models/lilt/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,48 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_lilt": ["LiltConfig"], -} +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_lilt"] = [ - "LiltForQuestionAnswering", - "LiltForSequenceClassification", - "LiltForTokenClassification", - "LiltModel", - "LiltPreTrainedModel", - ] if TYPE_CHECKING: - from .configuration_lilt import LiltConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_lilt import ( - LiltForQuestionAnswering, - LiltForSequenceClassification, - LiltForTokenClassification, - LiltModel, - LiltPreTrainedModel, - ) - + from .configuration_lilt import * + from .modeling_lilt import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/lilt/configuration_lilt.py b/src/transformers/models/lilt/configuration_lilt.py index 57ab8884ed4d76..30f32cca4a5646 100644 --- a/src/transformers/models/lilt/configuration_lilt.py +++ b/src/transformers/models/lilt/configuration_lilt.py @@ -126,3 +126,6 @@ def __init__( self.classifier_dropout = classifier_dropout self.channel_shrink_ratio = channel_shrink_ratio self.max_2d_position_embeddings = max_2d_position_embeddings + + +__all__ = ["LiltConfig"] diff --git a/src/transformers/models/lilt/modeling_lilt.py b/src/transformers/models/lilt/modeling_lilt.py index 85cbcfdc4c45ab..09865489572e4c 100644 --- a/src/transformers/models/lilt/modeling_lilt.py +++ b/src/transformers/models/lilt/modeling_lilt.py @@ -1181,3 +1181,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "LiltForQuestionAnswering", + "LiltForSequenceClassification", + "LiltForTokenClassification", + "LiltModel", + "LiltPreTrainedModel", +] diff --git a/src/transformers/models/llama/__init__.py b/src/transformers/models/llama/__init__.py index 3f6461c4c093f2..0677bb91435c51 100644 --- a/src/transformers/models/llama/__init__.py +++ b/src/transformers/models/llama/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,104 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_sentencepiece_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_llama": ["LlamaConfig"], -} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_llama"] = ["LlamaTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_llama_fast"] = ["LlamaTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_llama"] = [ - "LlamaForCausalLM", - "LlamaModel", - "LlamaPreTrainedModel", - "LlamaForSequenceClassification", - "LlamaForQuestionAnswering", - "LlamaForTokenClassification", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_llama"] = ["FlaxLlamaForCausalLM", "FlaxLlamaModel", "FlaxLlamaPreTrainedModel"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_llama import LlamaConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_llama import LlamaTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_llama_fast import LlamaTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_llama import ( - LlamaForCausalLM, - LlamaForQuestionAnswering, - LlamaForSequenceClassification, - LlamaForTokenClassification, - LlamaModel, - LlamaPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_llama import FlaxLlamaForCausalLM, FlaxLlamaModel, FlaxLlamaPreTrainedModel - - + from .configuration_llama import * + from .modeling_flax_llama import * + from .modeling_llama import * + from .tokenization_llama import * + from .tokenization_llama_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/llama/configuration_llama.py b/src/transformers/models/llama/configuration_llama.py index 98d5ecdd2a4fdb..b8bb43492eef17 100644 --- a/src/transformers/models/llama/configuration_llama.py +++ b/src/transformers/models/llama/configuration_llama.py @@ -214,3 +214,6 @@ def __init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) + + +__all__ = ["LlamaConfig"] diff --git a/src/transformers/models/llama/modeling_flax_llama.py b/src/transformers/models/llama/modeling_flax_llama.py index 26a2c2bb09a3d2..1ffe5ec71435ac 100644 --- a/src/transformers/models/llama/modeling_flax_llama.py +++ b/src/transformers/models/llama/modeling_flax_llama.py @@ -742,3 +742,6 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): _CONFIG_FOR_DOC, real_checkpoint=_REAL_CHECKPOINT_FOR_DOC, ) + + +__all__ = ["FlaxLlamaForCausalLM", "FlaxLlamaModel", "FlaxLlamaPreTrainedModel"] diff --git a/src/transformers/models/llama/modeling_llama.py b/src/transformers/models/llama/modeling_llama.py index 8e06098b04c63a..adf7e76409b0f4 100644 --- a/src/transformers/models/llama/modeling_llama.py +++ b/src/transformers/models/llama/modeling_llama.py @@ -1417,3 +1417,13 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "LlamaForCausalLM", + "LlamaModel", + "LlamaPreTrainedModel", + "LlamaForSequenceClassification", + "LlamaForQuestionAnswering", + "LlamaForTokenClassification", +] diff --git a/src/transformers/models/llama/tokenization_llama.py b/src/transformers/models/llama/tokenization_llama.py index 35f29bfa957806..2d1744b66c197a 100644 --- a/src/transformers/models/llama/tokenization_llama.py +++ b/src/transformers/models/llama/tokenization_llama.py @@ -408,3 +408,6 @@ def create_token_type_ids_from_sequences( output += [1] * len(bos_token_id + token_ids_1 + eos_token_id) return output + + +__all__ = ["LlamaTokenizer"] diff --git a/src/transformers/models/llama/tokenization_llama_fast.py b/src/transformers/models/llama/tokenization_llama_fast.py index 67e339b4290a2b..cb8b742ed41bc6 100644 --- a/src/transformers/models/llama/tokenization_llama_fast.py +++ b/src/transformers/models/llama/tokenization_llama_fast.py @@ -253,3 +253,6 @@ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = output + bos_token_id + token_ids_1 + eos_token_id return output + + +__all__ = ["LlamaTokenizerFast"] diff --git a/src/transformers/models/llava/__init__.py b/src/transformers/models/llava/__init__.py index 3dabdc1f678f03..ac59402ba75125 100644 --- a/src/transformers/models/llava/__init__.py +++ b/src/transformers/models/llava/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,43 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_llava": ["LlavaConfig"], - "processing_llava": ["LlavaProcessor"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_llava"] = [ - "LlavaForConditionalGeneration", - "LlavaPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_llava import LlavaConfig - from .processing_llava import LlavaProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_llava import ( - LlavaForConditionalGeneration, - LlavaPreTrainedModel, - ) - + from .configuration_llava import * + from .modeling_llava import * + from .processing_llava import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/llava/configuration_llava.py b/src/transformers/models/llava/configuration_llava.py index 05034f5cfcf6f8..68ec84b4d3276d 100644 --- a/src/transformers/models/llava/configuration_llava.py +++ b/src/transformers/models/llava/configuration_llava.py @@ -129,3 +129,6 @@ def __init__( self.text_config = text_config super().__init__(**kwargs) + + +__all__ = ["LlavaConfig"] diff --git a/src/transformers/models/llava/modeling_llava.py b/src/transformers/models/llava/modeling_llava.py index e8536ee50f94bb..77cfc57b42bc86 100644 --- a/src/transformers/models/llava/modeling_llava.py +++ b/src/transformers/models/llava/modeling_llava.py @@ -618,3 +618,6 @@ def prepare_inputs_for_generation( model_inputs["pixel_values"] = pixel_values return model_inputs + + +__all__ = ["LlavaForConditionalGeneration", "LlavaPreTrainedModel"] diff --git a/src/transformers/models/llava/processing_llava.py b/src/transformers/models/llava/processing_llava.py index 08caa3d1d8a75a..8dca2e16dff6af 100644 --- a/src/transformers/models/llava/processing_llava.py +++ b/src/transformers/models/llava/processing_llava.py @@ -201,3 +201,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +__all__ = ["LlavaProcessor"] diff --git a/src/transformers/models/llava_next/__init__.py b/src/transformers/models/llava_next/__init__.py index 0fb2ff2b6f28fa..028cc73be4070f 100644 --- a/src/transformers/models/llava_next/__init__.py +++ b/src/transformers/models/llava_next/__init__.py @@ -13,60 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_llava_next": ["LlavaNextConfig"], - "processing_llava_next": ["LlavaNextProcessor"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_llava_next"] = [ - "LlavaNextForConditionalGeneration", - "LlavaNextPreTrainedModel", - ] - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_llava_next"] = ["LlavaNextImageProcessor"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_llava_next import LlavaNextConfig - from .processing_llava_next import LlavaNextProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_llava_next import ( - LlavaNextForConditionalGeneration, - LlavaNextPreTrainedModel, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_llava_next import LlavaNextImageProcessor - - + from .configuration_llava_next import * + from .image_processing_llava_next import * + from .modeling_llava_next import * + from .processing_llava_next import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/llava_next/configuration_llava_next.py b/src/transformers/models/llava_next/configuration_llava_next.py index 54616edbf96dce..2251a330aa4e6b 100644 --- a/src/transformers/models/llava_next/configuration_llava_next.py +++ b/src/transformers/models/llava_next/configuration_llava_next.py @@ -142,3 +142,6 @@ def __init__( self.text_config = text_config super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) + + +__all__ = ["LlavaNextConfig"] diff --git a/src/transformers/models/llava_next/image_processing_llava_next.py b/src/transformers/models/llava_next/image_processing_llava_next.py index 41118599ec93b7..e43d95b80a0ada 100644 --- a/src/transformers/models/llava_next/image_processing_llava_next.py +++ b/src/transformers/models/llava_next/image_processing_llava_next.py @@ -749,3 +749,6 @@ def preprocess( return BatchFeature( data={"pixel_values": processed_images, "image_sizes": image_sizes}, tensor_type=return_tensors ) + + +__all__ = ["LlavaNextImageProcessor"] diff --git a/src/transformers/models/llava_next/modeling_llava_next.py b/src/transformers/models/llava_next/modeling_llava_next.py index 269663c7d6141a..de5a030b61c172 100644 --- a/src/transformers/models/llava_next/modeling_llava_next.py +++ b/src/transformers/models/llava_next/modeling_llava_next.py @@ -1007,3 +1007,6 @@ def prepare_inputs_for_generation( model_inputs["image_sizes"] = image_sizes return model_inputs + + +__all__ = ["LlavaNextForConditionalGeneration", "LlavaNextPreTrainedModel"] diff --git a/src/transformers/models/llava_next/processing_llava_next.py b/src/transformers/models/llava_next/processing_llava_next.py index 38173cbd861fc1..462006bc99ea4d 100644 --- a/src/transformers/models/llava_next/processing_llava_next.py +++ b/src/transformers/models/llava_next/processing_llava_next.py @@ -242,3 +242,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +__all__ = ["LlavaNextProcessor"] diff --git a/src/transformers/models/llava_onevision/__init__.py b/src/transformers/models/llava_onevision/__init__.py index f16948a8f74017..0369165da24d75 100644 --- a/src/transformers/models/llava_onevision/__init__.py +++ b/src/transformers/models/llava_onevision/__init__.py @@ -13,60 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_llava_onevision": ["LlavaOnevisionConfig"], - "processing_llava_onevision": ["LlavaOnevisionProcessor"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_llava_onevision"] = ["LlavaOnevisionImageProcessor"] - - _import_structure["video_processing_llava_onevision"] = ["LlavaOnevisionVideoProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_llava_onevision"] = [ - "LlavaOnevisionForConditionalGeneration", - "LlavaOnevisionPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_llava_onevision import LlavaOnevisionConfig - from .processing_llava_onevision import LlavaOnevisionProcessor - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_llava_onevision import LlavaOnevisionImageProcessor - from .video_processing_llava_onevision import LlavaOnevisionVideoProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_llava_onevision import ( - LlavaOnevisionForConditionalGeneration, - LlavaOnevisionPreTrainedModel, - ) - + from .configuration_llava_onevision import * + from .image_processing_llava_onevision import * + from .modeling_llava_onevision import * + from .processing_llava_onevision import * + from .video_processing_llava_onevision import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/llava_onevision/configuration_llava_onevision.py b/src/transformers/models/llava_onevision/configuration_llava_onevision.py index 46b65b35b1a5cb..74be035a8f2679 100644 --- a/src/transformers/models/llava_onevision/configuration_llava_onevision.py +++ b/src/transformers/models/llava_onevision/configuration_llava_onevision.py @@ -181,3 +181,6 @@ def __init__( self.text_config = text_config super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) + + +__all__ = ["LlavaOnevisionConfig"] diff --git a/src/transformers/models/llava_onevision/image_processing_llava_onevision.py b/src/transformers/models/llava_onevision/image_processing_llava_onevision.py index 2047557208372a..cde9a643e3efc4 100644 --- a/src/transformers/models/llava_onevision/image_processing_llava_onevision.py +++ b/src/transformers/models/llava_onevision/image_processing_llava_onevision.py @@ -710,3 +710,6 @@ def preprocess( return BatchFeature( data={"pixel_values": processed_images, "image_sizes": image_sizes}, tensor_type=return_tensors ) + + +__all__ = ["LlavaOnevisionImageProcessor"] diff --git a/src/transformers/models/llava_onevision/modeling_llava_onevision.py b/src/transformers/models/llava_onevision/modeling_llava_onevision.py index 626db4d96aae2e..4bcdf1aba82ae4 100644 --- a/src/transformers/models/llava_onevision/modeling_llava_onevision.py +++ b/src/transformers/models/llava_onevision/modeling_llava_onevision.py @@ -804,3 +804,6 @@ def prepare_inputs_for_generation( model_inputs["image_sizes_videos"] = image_sizes_videos return model_inputs + + +__all__ = ["LlavaOnevisionForConditionalGeneration", "LlavaOnevisionPreTrainedModel"] diff --git a/src/transformers/models/llava_onevision/processing_llava_onevision.py b/src/transformers/models/llava_onevision/processing_llava_onevision.py index ff808802bcaa63..d42f287da25b22 100644 --- a/src/transformers/models/llava_onevision/processing_llava_onevision.py +++ b/src/transformers/models/llava_onevision/processing_llava_onevision.py @@ -314,3 +314,6 @@ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): ) return processor + + +__all__ = ["LlavaOnevisionProcessor"] diff --git a/src/transformers/models/llava_onevision/video_processing_llava_onevision.py b/src/transformers/models/llava_onevision/video_processing_llava_onevision.py index bd63c45618af94..4391dd33088337 100644 --- a/src/transformers/models/llava_onevision/video_processing_llava_onevision.py +++ b/src/transformers/models/llava_onevision/video_processing_llava_onevision.py @@ -333,3 +333,6 @@ def preprocess( data={"pixel_values_videos": pixel_values}, tensor_type=return_tensors, ) + + +__all__ = ["LlavaOnevisionVideoProcessor"] diff --git a/src/transformers/models/longformer/__init__.py b/src/transformers/models/longformer/__init__.py index ddbd8a68ecc6dc..87f53105424b76 100644 --- a/src/transformers/models/longformer/__init__.py +++ b/src/transformers/models/longformer/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,119 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_longformer": [ - "LongformerConfig", - "LongformerOnnxConfig", - ], - "tokenization_longformer": ["LongformerTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_longformer_fast"] = ["LongformerTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_longformer"] = [ - "LongformerForMaskedLM", - "LongformerForMultipleChoice", - "LongformerForQuestionAnswering", - "LongformerForSequenceClassification", - "LongformerForTokenClassification", - "LongformerModel", - "LongformerPreTrainedModel", - "LongformerSelfAttention", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_longformer"] = [ - "TFLongformerForMaskedLM", - "TFLongformerForMultipleChoice", - "TFLongformerForQuestionAnswering", - "TFLongformerForSequenceClassification", - "TFLongformerForTokenClassification", - "TFLongformerModel", - "TFLongformerPreTrainedModel", - "TFLongformerSelfAttention", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_longformer import ( - LongformerConfig, - LongformerOnnxConfig, - ) - from .tokenization_longformer import LongformerTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_longformer_fast import LongformerTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_longformer import ( - LongformerForMaskedLM, - LongformerForMultipleChoice, - LongformerForQuestionAnswering, - LongformerForSequenceClassification, - LongformerForTokenClassification, - LongformerModel, - LongformerPreTrainedModel, - LongformerSelfAttention, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_longformer import ( - TFLongformerForMaskedLM, - TFLongformerForMultipleChoice, - TFLongformerForQuestionAnswering, - TFLongformerForSequenceClassification, - TFLongformerForTokenClassification, - TFLongformerModel, - TFLongformerPreTrainedModel, - TFLongformerSelfAttention, - ) - + from .configuration_longformer import * + from .modeling_longformer import * + from .modeling_tf_longformer import * + from .tokenization_longformer import * + from .tokenization_longformer_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/longformer/configuration_longformer.py b/src/transformers/models/longformer/configuration_longformer.py index fc6093763709e0..7a4d698471750b 100644 --- a/src/transformers/models/longformer/configuration_longformer.py +++ b/src/transformers/models/longformer/configuration_longformer.py @@ -199,3 +199,6 @@ def generate_dummy_inputs( inputs["global_attention_mask"][:, ::2] = 1 return inputs + + +__all__ = ["LongformerConfig", "LongformerOnnxConfig"] diff --git a/src/transformers/models/longformer/modeling_longformer.py b/src/transformers/models/longformer/modeling_longformer.py index 67b5e2b67f0b7a..ebdba5c4ed2d31 100755 --- a/src/transformers/models/longformer/modeling_longformer.py +++ b/src/transformers/models/longformer/modeling_longformer.py @@ -2322,3 +2322,15 @@ def forward( attentions=outputs.attentions, global_attentions=outputs.global_attentions, ) + + +__all__ = [ + "LongformerForMaskedLM", + "LongformerForMultipleChoice", + "LongformerForQuestionAnswering", + "LongformerForSequenceClassification", + "LongformerForTokenClassification", + "LongformerModel", + "LongformerPreTrainedModel", + "LongformerSelfAttention", +] diff --git a/src/transformers/models/longformer/modeling_tf_longformer.py b/src/transformers/models/longformer/modeling_tf_longformer.py index b32cde202cea20..0f52ca658a7b6f 100644 --- a/src/transformers/models/longformer/modeling_tf_longformer.py +++ b/src/transformers/models/longformer/modeling_tf_longformer.py @@ -2772,3 +2772,15 @@ def build(self, input_shape=None): if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFLongformerForMaskedLM", + "TFLongformerForMultipleChoice", + "TFLongformerForQuestionAnswering", + "TFLongformerForSequenceClassification", + "TFLongformerForTokenClassification", + "TFLongformerModel", + "TFLongformerPreTrainedModel", + "TFLongformerSelfAttention", +] diff --git a/src/transformers/models/longformer/tokenization_longformer.py b/src/transformers/models/longformer/tokenization_longformer.py index 51728d77808158..afecf750135b0d 100644 --- a/src/transformers/models/longformer/tokenization_longformer.py +++ b/src/transformers/models/longformer/tokenization_longformer.py @@ -397,3 +397,6 @@ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()): text = " " + text return (text, kwargs) + + +__all__ = ["LongformerTokenizer"] diff --git a/src/transformers/models/longformer/tokenization_longformer_fast.py b/src/transformers/models/longformer/tokenization_longformer_fast.py index d4b4228b035fef..3d3ca97a6f6457 100644 --- a/src/transformers/models/longformer/tokenization_longformer_fast.py +++ b/src/transformers/models/longformer/tokenization_longformer_fast.py @@ -268,3 +268,6 @@ def create_token_type_ids_from_sequences( if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] + + +__all__ = ["LongformerTokenizerFast"] diff --git a/src/transformers/models/longt5/__init__.py b/src/transformers/models/longt5/__init__.py index 97d2bbe8ccd330..2716e62cd7b28a 100644 --- a/src/transformers/models/longt5/__init__.py +++ b/src/transformers/models/longt5/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,72 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available - - -_import_structure = { - "configuration_longt5": ["LongT5Config", "LongT5OnnxConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_longt5"] = [ - "LongT5EncoderModel", - "LongT5ForConditionalGeneration", - "LongT5Model", - "LongT5PreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_longt5"] = [ - "FlaxLongT5ForConditionalGeneration", - "FlaxLongT5Model", - "FlaxLongT5PreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_longt5 import LongT5Config, LongT5OnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_longt5 import ( - LongT5EncoderModel, - LongT5ForConditionalGeneration, - LongT5Model, - LongT5PreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_longt5 import ( - FlaxLongT5ForConditionalGeneration, - FlaxLongT5Model, - FlaxLongT5PreTrainedModel, - ) - - + from .configuration_longt5 import * + from .modeling_flax_longt5 import * + from .modeling_longt5 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/longt5/configuration_longt5.py b/src/transformers/models/longt5/configuration_longt5.py index b6e7d21b3d677b..9acac4e447d81d 100644 --- a/src/transformers/models/longt5/configuration_longt5.py +++ b/src/transformers/models/longt5/configuration_longt5.py @@ -175,3 +175,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def default_onnx_opset(self) -> int: return 13 + + +__all__ = ["LongT5Config", "LongT5OnnxConfig"] diff --git a/src/transformers/models/longt5/modeling_flax_longt5.py b/src/transformers/models/longt5/modeling_flax_longt5.py index 4ab18a3ca7c829..55081978dbf65d 100644 --- a/src/transformers/models/longt5/modeling_flax_longt5.py +++ b/src/transformers/models/longt5/modeling_flax_longt5.py @@ -2444,3 +2444,6 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): append_replace_return_docstrings( FlaxLongT5ForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC ) + + +__all__ = ["FlaxLongT5ForConditionalGeneration", "FlaxLongT5Model", "FlaxLongT5PreTrainedModel"] diff --git a/src/transformers/models/longt5/modeling_longt5.py b/src/transformers/models/longt5/modeling_longt5.py index 29536d9ad6f284..ebd2844650272a 100644 --- a/src/transformers/models/longt5/modeling_longt5.py +++ b/src/transformers/models/longt5/modeling_longt5.py @@ -2339,3 +2339,6 @@ def forward( ) return encoder_outputs + + +__all__ = ["LongT5EncoderModel", "LongT5ForConditionalGeneration", "LongT5Model", "LongT5PreTrainedModel"] diff --git a/src/transformers/models/luke/__init__.py b/src/transformers/models/luke/__init__.py index 5ae6f488116ff4..db588fb2da1976 100644 --- a/src/transformers/models/luke/__init__.py +++ b/src/transformers/models/luke/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,61 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_luke": ["LukeConfig"], - "tokenization_luke": ["LukeTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_luke"] = [ - "LukeForEntityClassification", - "LukeForEntityPairClassification", - "LukeForEntitySpanClassification", - "LukeForMultipleChoice", - "LukeForQuestionAnswering", - "LukeForSequenceClassification", - "LukeForTokenClassification", - "LukeForMaskedLM", - "LukeModel", - "LukePreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_luke import LukeConfig - from .tokenization_luke import LukeTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_luke import ( - LukeForEntityClassification, - LukeForEntityPairClassification, - LukeForEntitySpanClassification, - LukeForMaskedLM, - LukeForMultipleChoice, - LukeForQuestionAnswering, - LukeForSequenceClassification, - LukeForTokenClassification, - LukeModel, - LukePreTrainedModel, - ) - + from .configuration_luke import * + from .modeling_luke import * + from .tokenization_luke import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/luke/configuration_luke.py b/src/transformers/models/luke/configuration_luke.py index 44e1002cfbdc81..3d4640a9fd2572 100644 --- a/src/transformers/models/luke/configuration_luke.py +++ b/src/transformers/models/luke/configuration_luke.py @@ -137,3 +137,6 @@ def __init__( self.layer_norm_eps = layer_norm_eps self.use_entity_aware_attention = use_entity_aware_attention self.classifier_dropout = classifier_dropout + + +__all__ = ["LukeConfig"] diff --git a/src/transformers/models/luke/modeling_luke.py b/src/transformers/models/luke/modeling_luke.py index 803f4396a2b6a1..7a4f03fdf51766 100644 --- a/src/transformers/models/luke/modeling_luke.py +++ b/src/transformers/models/luke/modeling_luke.py @@ -2226,3 +2226,17 @@ def forward( entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "LukeForEntityClassification", + "LukeForEntityPairClassification", + "LukeForEntitySpanClassification", + "LukeForMultipleChoice", + "LukeForQuestionAnswering", + "LukeForSequenceClassification", + "LukeForTokenClassification", + "LukeForMaskedLM", + "LukeModel", + "LukePreTrainedModel", +] diff --git a/src/transformers/models/luke/tokenization_luke.py b/src/transformers/models/luke/tokenization_luke.py index e06b9c753fe596..97f2d721097b4a 100644 --- a/src/transformers/models/luke/tokenization_luke.py +++ b/src/transformers/models/luke/tokenization_luke.py @@ -1726,3 +1726,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = f.write(json.dumps(self.entity_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n") return vocab_file, merge_file, entity_vocab_file + + +__all__ = ["LukeTokenizer"] diff --git a/src/transformers/models/lxmert/__init__.py b/src/transformers/models/lxmert/__init__.py index 007beb4ecd2dcf..3ad507465039ee 100644 --- a/src/transformers/models/lxmert/__init__.py +++ b/src/transformers/models/lxmert/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,105 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_lxmert": ["LxmertConfig"], - "tokenization_lxmert": ["LxmertTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_lxmert_fast"] = ["LxmertTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_lxmert"] = [ - "LxmertEncoder", - "LxmertForPreTraining", - "LxmertForQuestionAnswering", - "LxmertModel", - "LxmertPreTrainedModel", - "LxmertVisualFeatureEncoder", - "LxmertXLayer", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_lxmert"] = [ - "TFLxmertForPreTraining", - "TFLxmertMainLayer", - "TFLxmertModel", - "TFLxmertPreTrainedModel", - "TFLxmertVisualFeatureEncoder", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_lxmert import LxmertConfig - from .tokenization_lxmert import LxmertTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_lxmert_fast import LxmertTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_lxmert import ( - LxmertEncoder, - LxmertForPreTraining, - LxmertForQuestionAnswering, - LxmertModel, - LxmertPreTrainedModel, - LxmertVisualFeatureEncoder, - LxmertXLayer, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_lxmert import ( - TFLxmertForPreTraining, - TFLxmertMainLayer, - TFLxmertModel, - TFLxmertPreTrainedModel, - TFLxmertVisualFeatureEncoder, - ) - + from .configuration_lxmert import * + from .modeling_lxmert import * + from .modeling_tf_lxmert import * + from .tokenization_lxmert import * + from .tokenization_lxmert_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/lxmert/configuration_lxmert.py b/src/transformers/models/lxmert/configuration_lxmert.py index d753e752272b10..c092d01148a607 100644 --- a/src/transformers/models/lxmert/configuration_lxmert.py +++ b/src/transformers/models/lxmert/configuration_lxmert.py @@ -164,3 +164,6 @@ def __init__( self.visual_feat_loss = visual_feat_loss self.num_hidden_layers = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers} super().__init__(**kwargs) + + +__all__ = ["LxmertConfig"] diff --git a/src/transformers/models/lxmert/modeling_lxmert.py b/src/transformers/models/lxmert/modeling_lxmert.py index 9113fc4fd0eb9d..b97d78d1505b5c 100644 --- a/src/transformers/models/lxmert/modeling_lxmert.py +++ b/src/transformers/models/lxmert/modeling_lxmert.py @@ -1448,3 +1448,14 @@ def forward( vision_attentions=lxmert_output.vision_attentions, cross_encoder_attentions=lxmert_output.cross_encoder_attentions, ) + + +__all__ = [ + "LxmertEncoder", + "LxmertForPreTraining", + "LxmertForQuestionAnswering", + "LxmertModel", + "LxmertPreTrainedModel", + "LxmertVisualFeatureEncoder", + "LxmertXLayer", +] diff --git a/src/transformers/models/lxmert/modeling_tf_lxmert.py b/src/transformers/models/lxmert/modeling_tf_lxmert.py index 8a833fb35adc9d..bd07c49f4918b5 100644 --- a/src/transformers/models/lxmert/modeling_tf_lxmert.py +++ b/src/transformers/models/lxmert/modeling_tf_lxmert.py @@ -1650,3 +1650,12 @@ def build(self, input_shape=None): if getattr(self, "answer_head", None) is not None: with tf.name_scope(self.answer_head.name): self.answer_head.build(None) + + +__all__ = [ + "TFLxmertForPreTraining", + "TFLxmertMainLayer", + "TFLxmertModel", + "TFLxmertPreTrainedModel", + "TFLxmertVisualFeatureEncoder", +] diff --git a/src/transformers/models/lxmert/tokenization_lxmert.py b/src/transformers/models/lxmert/tokenization_lxmert.py index 8310993160a325..2dea92f7e0a16b 100644 --- a/src/transformers/models/lxmert/tokenization_lxmert.py +++ b/src/transformers/models/lxmert/tokenization_lxmert.py @@ -506,3 +506,6 @@ def tokenize(self, text): else: output_tokens.extend(sub_tokens) return output_tokens + + +__all__ = ["LxmertTokenizer"] diff --git a/src/transformers/models/lxmert/tokenization_lxmert_fast.py b/src/transformers/models/lxmert/tokenization_lxmert_fast.py index e31fdbcf761d50..9a6a11bba2178e 100644 --- a/src/transformers/models/lxmert/tokenization_lxmert_fast.py +++ b/src/transformers/models/lxmert/tokenization_lxmert_fast.py @@ -167,3 +167,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["LxmertTokenizerFast"] diff --git a/src/transformers/models/m2m_100/__init__.py b/src/transformers/models/m2m_100/__init__.py index 45232f1390a53b..c456a6b88378a5 100644 --- a/src/transformers/models/m2m_100/__init__.py +++ b/src/transformers/models/m2m_100/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,46 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available - - -_import_structure = { - "configuration_m2m_100": ["M2M100Config", "M2M100OnnxConfig"], - "tokenization_m2m_100": ["M2M100Tokenizer"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_m2m_100"] = [ - "M2M100ForConditionalGeneration", - "M2M100Model", - "M2M100PreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_m2m_100 import M2M100Config, M2M100OnnxConfig - from .tokenization_m2m_100 import M2M100Tokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_m2m_100 import ( - M2M100ForConditionalGeneration, - M2M100Model, - M2M100PreTrainedModel, - ) - - + from .configuration_m2m_100 import * + from .modeling_m2m_100 import * + from .tokenization_m2m_100 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/m2m_100/configuration_m2m_100.py b/src/transformers/models/m2m_100/configuration_m2m_100.py index 7ae3c44127e08e..906a958e3d13aa 100644 --- a/src/transformers/models/m2m_100/configuration_m2m_100.py +++ b/src/transformers/models/m2m_100/configuration_m2m_100.py @@ -278,3 +278,6 @@ def _generate_dummy_inputs_for_default_and_seq2seq_lm( return common_inputs generate_dummy_inputs = _generate_dummy_inputs_for_default_and_seq2seq_lm + + +__all__ = ["M2M100Config", "M2M100OnnxConfig"] diff --git a/src/transformers/models/m2m_100/modeling_m2m_100.py b/src/transformers/models/m2m_100/modeling_m2m_100.py index cc35a3504255bf..67bdd1a1a1b407 100755 --- a/src/transformers/models/m2m_100/modeling_m2m_100.py +++ b/src/transformers/models/m2m_100/modeling_m2m_100.py @@ -1629,3 +1629,6 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = ["M2M100ForConditionalGeneration", "M2M100Model", "M2M100PreTrainedModel"] diff --git a/src/transformers/models/m2m_100/tokenization_m2m_100.py b/src/transformers/models/m2m_100/tokenization_m2m_100.py index 403d8cc50778c1..7ce4643dcdc54a 100644 --- a/src/transformers/models/m2m_100/tokenization_m2m_100.py +++ b/src/transformers/models/m2m_100/tokenization_m2m_100.py @@ -377,3 +377,6 @@ def load_json(path: str) -> Union[Dict, List]: def save_json(data, path: str) -> None: with open(path, "w") as f: json.dump(data, f, indent=2) + + +__all__ = ["M2M100Tokenizer"] diff --git a/src/transformers/models/mamba/__init__.py b/src/transformers/models/mamba/__init__.py index 80cb8e1c68a21d..a8cd57ed3f2bd9 100644 --- a/src/transformers/models/mamba/__init__.py +++ b/src/transformers/models/mamba/__init__.py @@ -11,48 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - - -_import_structure = { - "configuration_mamba": ["MambaConfig", "MambaOnnxConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mamba"] = [ - "MambaForCausalLM", - "MambaModel", - "MambaPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_mamba import MambaConfig, MambaOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mamba import ( - MambaForCausalLM, - MambaModel, - MambaPreTrainedModel, - ) + from .configuration_mamba import * + from .modeling_mamba import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mamba/configuration_mamba.py b/src/transformers/models/mamba/configuration_mamba.py index 89f08dd3cd3276..6bf80e5fe92d93 100644 --- a/src/transformers/models/mamba/configuration_mamba.py +++ b/src/transformers/models/mamba/configuration_mamba.py @@ -155,3 +155,6 @@ def __init__( self.use_mambapy = use_mambapy super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, **kwargs) + + +__all__ = ["MambaConfig"] diff --git a/src/transformers/models/mamba/modeling_mamba.py b/src/transformers/models/mamba/modeling_mamba.py index cf84ac795eba44..054307805f3708 100644 --- a/src/transformers/models/mamba/modeling_mamba.py +++ b/src/transformers/models/mamba/modeling_mamba.py @@ -807,3 +807,6 @@ def forward( cache_params=mamba_outputs.cache_params, hidden_states=mamba_outputs.hidden_states, ) + + +__all__ = ["MambaForCausalLM", "MambaModel", "MambaPreTrainedModel"] diff --git a/src/transformers/models/mamba2/__init__.py b/src/transformers/models/mamba2/__init__.py index 2233ff229c0e5d..1389215e839851 100644 --- a/src/transformers/models/mamba2/__init__.py +++ b/src/transformers/models/mamba2/__init__.py @@ -11,48 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - - -_import_structure = { - "configuration_mamba2": ["Mamba2Config", "Mamba2OnnxConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mamba2"] = [ - "Mamba2ForCausalLM", - "Mamba2Model", - "Mamba2PreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_mamba2 import Mamba2Config, Mamba2OnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mamba2 import ( - Mamba2ForCausalLM, - Mamba2Model, - Mamba2PreTrainedModel, - ) + from .configuration_mamba2 import * + from .modeling_mamba2 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mamba2/configuration_mamba2.py b/src/transformers/models/mamba2/configuration_mamba2.py index 7a690dceb1c4a6..ae6ea5cfaced35 100644 --- a/src/transformers/models/mamba2/configuration_mamba2.py +++ b/src/transformers/models/mamba2/configuration_mamba2.py @@ -178,3 +178,6 @@ def __init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) + + +__all__ = ["Mamba2Config"] diff --git a/src/transformers/models/mamba2/modeling_mamba2.py b/src/transformers/models/mamba2/modeling_mamba2.py index c312b9b94351d2..b3e7289037ac5e 100644 --- a/src/transformers/models/mamba2/modeling_mamba2.py +++ b/src/transformers/models/mamba2/modeling_mamba2.py @@ -1091,3 +1091,6 @@ def forward( cache_params=mamba2_outputs.cache_params, hidden_states=mamba2_outputs.hidden_states, ) + + +__all__ = ["Mamba2ForCausalLM", "Mamba2Model", "Mamba2PreTrainedModel"] diff --git a/src/transformers/models/marian/__init__.py b/src/transformers/models/marian/__init__.py index e3a8c473aeeedf..6cfabc1590f25f 100644 --- a/src/transformers/models/marian/__init__.py +++ b/src/transformers/models/marian/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,99 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_sentencepiece_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_marian": ["MarianConfig", "MarianOnnxConfig"], -} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_marian"] = ["MarianTokenizer"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_marian"] = [ - "MarianForCausalLM", - "MarianModel", - "MarianMTModel", - "MarianPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_marian"] = ["TFMarianModel", "TFMarianMTModel", "TFMarianPreTrainedModel"] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_marian"] = ["FlaxMarianModel", "FlaxMarianMTModel", "FlaxMarianPreTrainedModel"] - if TYPE_CHECKING: - from .configuration_marian import MarianConfig, MarianOnnxConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_marian import MarianTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_marian import ( - MarianForCausalLM, - MarianModel, - MarianMTModel, - MarianPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_marian import TFMarianModel, TFMarianMTModel, TFMarianPreTrainedModel - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_marian import FlaxMarianModel, FlaxMarianMTModel, FlaxMarianPreTrainedModel - + from .configuration_marian import * + from .modeling_flax_marian import * + from .modeling_marian import * + from .modeling_tf_marian import * + from .tokenization_marian import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/marian/configuration_marian.py b/src/transformers/models/marian/configuration_marian.py index 5a3f083804d504..e74958ea6e2839 100644 --- a/src/transformers/models/marian/configuration_marian.py +++ b/src/transformers/models/marian/configuration_marian.py @@ -389,3 +389,6 @@ def _flatten_past_key_values_(self, flattened_output, name, idx, t): @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["MarianConfig", "MarianOnnxConfig"] diff --git a/src/transformers/models/marian/modeling_flax_marian.py b/src/transformers/models/marian/modeling_flax_marian.py index e33df2e06b21ed..2021ca34140596 100644 --- a/src/transformers/models/marian/modeling_flax_marian.py +++ b/src/transformers/models/marian/modeling_flax_marian.py @@ -1495,3 +1495,6 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): MARIAN_INPUTS_DOCSTRING + FLAX_MARIAN_MT_DOCSTRING, ) append_replace_return_docstrings(FlaxMarianMTModel, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) + + +__all__ = ["FlaxMarianModel", "FlaxMarianMTModel", "FlaxMarianPreTrainedModel"] diff --git a/src/transformers/models/marian/modeling_marian.py b/src/transformers/models/marian/modeling_marian.py index 2d7c7d85daed64..c9865256636d79 100755 --- a/src/transformers/models/marian/modeling_marian.py +++ b/src/transformers/models/marian/modeling_marian.py @@ -1655,3 +1655,6 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = ["MarianForCausalLM", "MarianModel", "MarianMTModel", "MarianPreTrainedModel"] diff --git a/src/transformers/models/marian/modeling_tf_marian.py b/src/transformers/models/marian/modeling_tf_marian.py index 30c6157d5008d7..fb3a8f7a651083 100644 --- a/src/transformers/models/marian/modeling_tf_marian.py +++ b/src/transformers/models/marian/modeling_tf_marian.py @@ -1554,3 +1554,6 @@ def build(self, input_shape=None): if getattr(self, "bias_layer", None) is not None: with tf.name_scope(self.bias_layer.name): self.bias_layer.build(None) + + +__all__ = ["TFMarianModel", "TFMarianMTModel", "TFMarianPreTrainedModel"] diff --git a/src/transformers/models/marian/tokenization_marian.py b/src/transformers/models/marian/tokenization_marian.py index 4f0d90b6f0dffe..c401d38f086fab 100644 --- a/src/transformers/models/marian/tokenization_marian.py +++ b/src/transformers/models/marian/tokenization_marian.py @@ -389,3 +389,6 @@ def save_json(data, path: str) -> None: def load_json(path: str) -> Union[Dict, List]: with open(path, "r") as f: return json.load(f) + + +__all__ = ["MarianTokenizer"] diff --git a/src/transformers/models/markuplm/__init__.py b/src/transformers/models/markuplm/__init__.py index 368834f13e98f8..8b03aa2e625637 100644 --- a/src/transformers/models/markuplm/__init__.py +++ b/src/transformers/models/markuplm/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,69 +13,19 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available - - -_import_structure = { - "configuration_markuplm": ["MarkupLMConfig"], - "feature_extraction_markuplm": ["MarkupLMFeatureExtractor"], - "processing_markuplm": ["MarkupLMProcessor"], - "tokenization_markuplm": ["MarkupLMTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_markuplm_fast"] = ["MarkupLMTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_markuplm"] = [ - "MarkupLMForQuestionAnswering", - "MarkupLMForSequenceClassification", - "MarkupLMForTokenClassification", - "MarkupLMModel", - "MarkupLMPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_markuplm import MarkupLMConfig - from .feature_extraction_markuplm import MarkupLMFeatureExtractor - from .processing_markuplm import MarkupLMProcessor - from .tokenization_markuplm import MarkupLMTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_markuplm_fast import MarkupLMTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_markuplm import ( - MarkupLMForQuestionAnswering, - MarkupLMForSequenceClassification, - MarkupLMForTokenClassification, - MarkupLMModel, - MarkupLMPreTrainedModel, - ) - - + from .configuration_markuplm import * + from .feature_extraction_markuplm import * + from .modeling_markuplm import * + from .processing_markuplm import * + from .tokenization_markuplm import * + from .tokenization_markuplm_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/markuplm/configuration_markuplm.py b/src/transformers/models/markuplm/configuration_markuplm.py index e348a5c5a1b41e..f8bee878e83bb1 100644 --- a/src/transformers/models/markuplm/configuration_markuplm.py +++ b/src/transformers/models/markuplm/configuration_markuplm.py @@ -151,3 +151,6 @@ def __init__( self.tag_pad_id = tag_pad_id self.subs_pad_id = subs_pad_id self.xpath_unit_hidden_size = xpath_unit_hidden_size + + +__all__ = ["MarkupLMConfig"] diff --git a/src/transformers/models/markuplm/feature_extraction_markuplm.py b/src/transformers/models/markuplm/feature_extraction_markuplm.py index e3effdc910a8c7..9b1bd29060bf9a 100644 --- a/src/transformers/models/markuplm/feature_extraction_markuplm.py +++ b/src/transformers/models/markuplm/feature_extraction_markuplm.py @@ -181,3 +181,6 @@ def __call__(self, html_strings) -> BatchFeature: encoded_inputs = BatchFeature(data=data, tensor_type=None) return encoded_inputs + + +__all__ = ["MarkupLMFeatureExtractor"] diff --git a/src/transformers/models/markuplm/modeling_markuplm.py b/src/transformers/models/markuplm/modeling_markuplm.py index 3c1935e7b04214..166e63b84b999d 100755 --- a/src/transformers/models/markuplm/modeling_markuplm.py +++ b/src/transformers/models/markuplm/modeling_markuplm.py @@ -1292,3 +1292,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "MarkupLMForQuestionAnswering", + "MarkupLMForSequenceClassification", + "MarkupLMForTokenClassification", + "MarkupLMModel", + "MarkupLMPreTrainedModel", +] diff --git a/src/transformers/models/markuplm/processing_markuplm.py b/src/transformers/models/markuplm/processing_markuplm.py index 757c146c58985a..8e822af6b2daa6 100644 --- a/src/transformers/models/markuplm/processing_markuplm.py +++ b/src/transformers/models/markuplm/processing_markuplm.py @@ -145,3 +145,6 @@ def decode(self, *args, **kwargs): def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names return tokenizer_input_names + + +__all__ = ["MarkupLMProcessor"] diff --git a/src/transformers/models/markuplm/tokenization_markuplm.py b/src/transformers/models/markuplm/tokenization_markuplm.py index e5de1e4e765c93..7ad5054aef5aa5 100644 --- a/src/transformers/models/markuplm/tokenization_markuplm.py +++ b/src/transformers/models/markuplm/tokenization_markuplm.py @@ -1466,3 +1466,6 @@ def _pad( raise ValueError("Invalid padding strategy:" + str(padding_side)) return encoded_inputs + + +__all__ = ["MarkupLMTokenizer"] diff --git a/src/transformers/models/markuplm/tokenization_markuplm_fast.py b/src/transformers/models/markuplm/tokenization_markuplm_fast.py index 796459876425b4..a7ef344f4e3b7d 100644 --- a/src/transformers/models/markuplm/tokenization_markuplm_fast.py +++ b/src/transformers/models/markuplm/tokenization_markuplm_fast.py @@ -932,3 +932,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["MarkupLMTokenizerFast"] diff --git a/src/transformers/models/mask2former/__init__.py b/src/transformers/models/mask2former/__init__.py index 7ede863452bc72..752787e79a1ec6 100644 --- a/src/transformers/models/mask2former/__init__.py +++ b/src/transformers/models/mask2former/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,58 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_mask2former": ["Mask2FormerConfig"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_mask2former"] = ["Mask2FormerImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mask2former"] = [ - "Mask2FormerForUniversalSegmentation", - "Mask2FormerModel", - "Mask2FormerPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_mask2former import Mask2FormerConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_mask2former import Mask2FormerImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mask2former import ( - Mask2FormerForUniversalSegmentation, - Mask2FormerModel, - Mask2FormerPreTrainedModel, - ) - - + from .configuration_mask2former import * + from .image_processing_mask2former import * + from .modeling_mask2former import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mask2former/configuration_mask2former.py b/src/transformers/models/mask2former/configuration_mask2former.py index 5126b3f73cdebd..af247a19a8de32 100644 --- a/src/transformers/models/mask2former/configuration_mask2former.py +++ b/src/transformers/models/mask2former/configuration_mask2former.py @@ -251,3 +251,6 @@ def from_backbone_config(cls, backbone_config: PretrainedConfig, **kwargs): backbone_config=backbone_config, **kwargs, ) + + +__all__ = ["Mask2FormerConfig"] diff --git a/src/transformers/models/mask2former/image_processing_mask2former.py b/src/transformers/models/mask2former/image_processing_mask2former.py index 555ee6e956709f..1184207744c764 100644 --- a/src/transformers/models/mask2former/image_processing_mask2former.py +++ b/src/transformers/models/mask2former/image_processing_mask2former.py @@ -1237,3 +1237,6 @@ def post_process_panoptic_segmentation( results.append({"segmentation": segmentation, "segments_info": segments}) return results + + +__all__ = ["Mask2FormerImageProcessor"] diff --git a/src/transformers/models/mask2former/modeling_mask2former.py b/src/transformers/models/mask2former/modeling_mask2former.py index e91d0357545102..887a617b824129 100644 --- a/src/transformers/models/mask2former/modeling_mask2former.py +++ b/src/transformers/models/mask2former/modeling_mask2former.py @@ -2571,3 +2571,6 @@ def forward( if loss is not None: output = (loss) + output return output + + +__all__ = ["Mask2FormerForUniversalSegmentation", "Mask2FormerModel", "Mask2FormerPreTrainedModel"] diff --git a/src/transformers/models/maskformer/__init__.py b/src/transformers/models/maskformer/__init__.py index 78aa54a4656150..144ae7d0a7bab6 100644 --- a/src/transformers/models/maskformer/__init__.py +++ b/src/transformers/models/maskformer/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,72 +13,19 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_maskformer": ["MaskFormerConfig"], - "configuration_maskformer_swin": ["MaskFormerSwinConfig"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_maskformer"] = ["MaskFormerFeatureExtractor"] - _import_structure["image_processing_maskformer"] = ["MaskFormerImageProcessor"] - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_maskformer"] = [ - "MaskFormerForInstanceSegmentation", - "MaskFormerModel", - "MaskFormerPreTrainedModel", - ] - _import_structure["modeling_maskformer_swin"] = [ - "MaskFormerSwinBackbone", - "MaskFormerSwinModel", - "MaskFormerSwinPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_maskformer import MaskFormerConfig - from .configuration_maskformer_swin import MaskFormerSwinConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_maskformer import MaskFormerFeatureExtractor - from .image_processing_maskformer import MaskFormerImageProcessor - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_maskformer import ( - MaskFormerForInstanceSegmentation, - MaskFormerModel, - MaskFormerPreTrainedModel, - ) - from .modeling_maskformer_swin import ( - MaskFormerSwinBackbone, - MaskFormerSwinModel, - MaskFormerSwinPreTrainedModel, - ) - - + from .configuration_maskformer import * + from .configuration_maskformer_swin import * + from .feature_extraction_maskformer import * + from .image_processing_maskformer import * + from .modeling_maskformer import * + from .modeling_maskformer_swin import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/maskformer/configuration_maskformer.py b/src/transformers/models/maskformer/configuration_maskformer.py index d28ef6ca76d295..b5c42f9425d746 100644 --- a/src/transformers/models/maskformer/configuration_maskformer.py +++ b/src/transformers/models/maskformer/configuration_maskformer.py @@ -221,3 +221,6 @@ def from_backbone_and_decoder_configs( decoder_config=decoder_config, **kwargs, ) + + +__all__ = ["MaskFormerConfig"] diff --git a/src/transformers/models/maskformer/configuration_maskformer_swin.py b/src/transformers/models/maskformer/configuration_maskformer_swin.py index 1cc2feffbff314..f3010e71d017d5 100644 --- a/src/transformers/models/maskformer/configuration_maskformer_swin.py +++ b/src/transformers/models/maskformer/configuration_maskformer_swin.py @@ -148,3 +148,6 @@ def __init__( self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) + + +__all__ = ["MaskFormerSwinConfig"] diff --git a/src/transformers/models/maskformer/feature_extraction_maskformer.py b/src/transformers/models/maskformer/feature_extraction_maskformer.py index 848c8e128296a0..6ce45471abd093 100644 --- a/src/transformers/models/maskformer/feature_extraction_maskformer.py +++ b/src/transformers/models/maskformer/feature_extraction_maskformer.py @@ -31,3 +31,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["MaskFormerFeatureExtractor"] diff --git a/src/transformers/models/maskformer/image_processing_maskformer.py b/src/transformers/models/maskformer/image_processing_maskformer.py index f4eb1bb56f4eb9..104b68cbed4448 100644 --- a/src/transformers/models/maskformer/image_processing_maskformer.py +++ b/src/transformers/models/maskformer/image_processing_maskformer.py @@ -1273,3 +1273,6 @@ def post_process_panoptic_segmentation( results.append({"segmentation": segmentation, "segments_info": segments}) return results + + +__all__ = ["MaskFormerImageProcessor"] diff --git a/src/transformers/models/maskformer/modeling_maskformer.py b/src/transformers/models/maskformer/modeling_maskformer.py index a8398ec9725b30..9f067d3a158184 100644 --- a/src/transformers/models/maskformer/modeling_maskformer.py +++ b/src/transformers/models/maskformer/modeling_maskformer.py @@ -1876,3 +1876,6 @@ def forward( masks_queries_logits=masks_queries_logits, auxiliary_logits=auxiliary_logits, ) + + +__all__ = ["MaskFormerForInstanceSegmentation", "MaskFormerModel", "MaskFormerPreTrainedModel"] diff --git a/src/transformers/models/maskformer/modeling_maskformer_swin.py b/src/transformers/models/maskformer/modeling_maskformer_swin.py index 598e1d8186a24a..2597d2a03e6213 100644 --- a/src/transformers/models/maskformer/modeling_maskformer_swin.py +++ b/src/transformers/models/maskformer/modeling_maskformer_swin.py @@ -956,3 +956,6 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) + + +__all__ = ["MaskFormerSwinBackbone", "MaskFormerSwinModel", "MaskFormerSwinPreTrainedModel"] diff --git a/src/transformers/models/mbart/__init__.py b/src/transformers/models/mbart/__init__.py index 12575fcab74036..f3c5ef5767a450 100644 --- a/src/transformers/models/mbart/__init__.py +++ b/src/transformers/models/mbart/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,134 +13,19 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_sentencepiece_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = {"configuration_mbart": ["MBartConfig", "MBartOnnxConfig"]} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_mbart"] = ["MBartTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_mbart_fast"] = ["MBartTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mbart"] = [ - "MBartForCausalLM", - "MBartForConditionalGeneration", - "MBartForQuestionAnswering", - "MBartForSequenceClassification", - "MBartModel", - "MBartPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_mbart"] = [ - "TFMBartForConditionalGeneration", - "TFMBartModel", - "TFMBartPreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_mbart"] = [ - "FlaxMBartForConditionalGeneration", - "FlaxMBartForQuestionAnswering", - "FlaxMBartForSequenceClassification", - "FlaxMBartModel", - "FlaxMBartPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_mbart import MBartConfig, MBartOnnxConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_mbart import MBartTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_mbart_fast import MBartTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mbart import ( - MBartForCausalLM, - MBartForConditionalGeneration, - MBartForQuestionAnswering, - MBartForSequenceClassification, - MBartModel, - MBartPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_mbart import ( - FlaxMBartForConditionalGeneration, - FlaxMBartForQuestionAnswering, - FlaxMBartForSequenceClassification, - FlaxMBartModel, - FlaxMBartPreTrainedModel, - ) - + from .configuration_mbart import * + from .modeling_flax_mbart import * + from .modeling_mbart import * + from .modeling_tf_mbart import * + from .tokenization_mbart import * + from .tokenization_mbart_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mbart/configuration_mbart.py b/src/transformers/models/mbart/configuration_mbart.py index 8a4fe14b6c831b..878f46b60728b6 100644 --- a/src/transformers/models/mbart/configuration_mbart.py +++ b/src/transformers/models/mbart/configuration_mbart.py @@ -385,3 +385,6 @@ def _flatten_past_key_values_(self, flattened_output, name, idx, t): flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_( flattened_output, name, idx, t ) + + +__all__ = ["MBartConfig", "MBartOnnxConfig"] diff --git a/src/transformers/models/mbart/modeling_flax_mbart.py b/src/transformers/models/mbart/modeling_flax_mbart.py index 83e4dcaee279c3..9583c076748411 100644 --- a/src/transformers/models/mbart/modeling_flax_mbart.py +++ b/src/transformers/models/mbart/modeling_flax_mbart.py @@ -1769,3 +1769,12 @@ class FlaxMBartForQuestionAnswering(FlaxMBartPreTrainedModel): FlaxSeq2SeqQuestionAnsweringModelOutput, _CONFIG_FOR_DOC, ) + + +__all__ = [ + "FlaxMBartForConditionalGeneration", + "FlaxMBartForQuestionAnswering", + "FlaxMBartForSequenceClassification", + "FlaxMBartModel", + "FlaxMBartPreTrainedModel", +] diff --git a/src/transformers/models/mbart/modeling_mbart.py b/src/transformers/models/mbart/modeling_mbart.py index 95cd7c65ef32c2..960099183537c5 100755 --- a/src/transformers/models/mbart/modeling_mbart.py +++ b/src/transformers/models/mbart/modeling_mbart.py @@ -2117,3 +2117,13 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = [ + "MBartForCausalLM", + "MBartForConditionalGeneration", + "MBartForQuestionAnswering", + "MBartForSequenceClassification", + "MBartModel", + "MBartPreTrainedModel", +] diff --git a/src/transformers/models/mbart/modeling_tf_mbart.py b/src/transformers/models/mbart/modeling_tf_mbart.py index 8c9bb981207186..dd9bf976a21a9f 100644 --- a/src/transformers/models/mbart/modeling_tf_mbart.py +++ b/src/transformers/models/mbart/modeling_tf_mbart.py @@ -1570,3 +1570,6 @@ def build(self, input_shape=None): if getattr(self, "bias_layer", None) is not None: with tf.name_scope(self.bias_layer.name): self.bias_layer.build(None) + + +__all__ = ["TFMBartForConditionalGeneration", "TFMBartModel", "TFMBartPreTrainedModel"] diff --git a/src/transformers/models/mbart/tokenization_mbart.py b/src/transformers/models/mbart/tokenization_mbart.py index d9da6cb45cb388..513b9699fcb6f9 100644 --- a/src/transformers/models/mbart/tokenization_mbart.py +++ b/src/transformers/models/mbart/tokenization_mbart.py @@ -335,3 +335,6 @@ def set_tgt_lang_special_tokens(self, lang: str) -> None: self.cur_lang_code = self.lang_code_to_id[lang] self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] + + +__all__ = ["MBartTokenizer"] diff --git a/src/transformers/models/mbart/tokenization_mbart_fast.py b/src/transformers/models/mbart/tokenization_mbart_fast.py index 71107bf0cdaf47..86aa9181ece0d1 100644 --- a/src/transformers/models/mbart/tokenization_mbart_fast.py +++ b/src/transformers/models/mbart/tokenization_mbart_fast.py @@ -268,3 +268,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["MBartTokenizerFast"] diff --git a/src/transformers/models/mbart50/__init__.py b/src/transformers/models/mbart50/__init__.py index b889e374bb6d1e..f7cd8c28da631f 100644 --- a/src/transformers/models/mbart50/__init__.py +++ b/src/transformers/models/mbart50/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,46 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available - - -_import_structure = {} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_mbart50"] = ["MBart50Tokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_mbart50_fast"] = ["MBart50TokenizerFast"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_mbart50 import MBart50Tokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_mbart50_fast import MBart50TokenizerFast - + from .tokenization_mbart50 import * + from .tokenization_mbart50_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mbart50/tokenization_mbart50.py b/src/transformers/models/mbart50/tokenization_mbart50.py index 7acc6ecbf36bbd..c85039991c3406 100644 --- a/src/transformers/models/mbart50/tokenization_mbart50.py +++ b/src/transformers/models/mbart50/tokenization_mbart50.py @@ -352,3 +352,6 @@ def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None: self.cur_lang_code_id = self.lang_code_to_id[tgt_lang] self.prefix_tokens = [self.cur_lang_code_id] self.suffix_tokens = [self.eos_token_id] + + +__all__ = ["MBart50Tokenizer"] diff --git a/src/transformers/models/mbart50/tokenization_mbart50_fast.py b/src/transformers/models/mbart50/tokenization_mbart50_fast.py index cc4678f5f53cce..45f9ff9e81609c 100644 --- a/src/transformers/models/mbart50/tokenization_mbart50_fast.py +++ b/src/transformers/models/mbart50/tokenization_mbart50_fast.py @@ -257,3 +257,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["MBart50TokenizerFast"] diff --git a/src/transformers/models/megatron_bert/__init__.py b/src/transformers/models/megatron_bert/__init__.py index 259e56c25b59a4..09d805dc1bf67d 100644 --- a/src/transformers/models/megatron_bert/__init__.py +++ b/src/transformers/models/megatron_bert/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 NVIDIA Corporation and The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,55 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_megatron_bert": ["MegatronBertConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_megatron_bert"] = [ - "MegatronBertForCausalLM", - "MegatronBertForMaskedLM", - "MegatronBertForMultipleChoice", - "MegatronBertForNextSentencePrediction", - "MegatronBertForPreTraining", - "MegatronBertForQuestionAnswering", - "MegatronBertForSequenceClassification", - "MegatronBertForTokenClassification", - "MegatronBertModel", - "MegatronBertPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_megatron_bert import MegatronBertConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_megatron_bert import ( - MegatronBertForCausalLM, - MegatronBertForMaskedLM, - MegatronBertForMultipleChoice, - MegatronBertForNextSentencePrediction, - MegatronBertForPreTraining, - MegatronBertForQuestionAnswering, - MegatronBertForSequenceClassification, - MegatronBertForTokenClassification, - MegatronBertModel, - MegatronBertPreTrainedModel, - ) - + from .configuration_megatron_bert import * + from .modeling_megatron_bert import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/megatron_bert/configuration_megatron_bert.py b/src/transformers/models/megatron_bert/configuration_megatron_bert.py index a0e216a5352dff..db81a10a475ef0 100644 --- a/src/transformers/models/megatron_bert/configuration_megatron_bert.py +++ b/src/transformers/models/megatron_bert/configuration_megatron_bert.py @@ -124,3 +124,6 @@ def __init__( self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_cache = use_cache + + +__all__ = ["MegatronBertConfig"] diff --git a/src/transformers/models/megatron_bert/modeling_megatron_bert.py b/src/transformers/models/megatron_bert/modeling_megatron_bert.py index 9be1e24aa2ab6c..cf67f0f1d23f29 100755 --- a/src/transformers/models/megatron_bert/modeling_megatron_bert.py +++ b/src/transformers/models/megatron_bert/modeling_megatron_bert.py @@ -1816,3 +1816,17 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "MegatronBertForCausalLM", + "MegatronBertForMaskedLM", + "MegatronBertForMultipleChoice", + "MegatronBertForNextSentencePrediction", + "MegatronBertForPreTraining", + "MegatronBertForQuestionAnswering", + "MegatronBertForSequenceClassification", + "MegatronBertForTokenClassification", + "MegatronBertModel", + "MegatronBertPreTrainedModel", +] diff --git a/src/transformers/models/mgp_str/__init__.py b/src/transformers/models/mgp_str/__init__.py index 901425ca45d61a..a20c4cd4efc3ee 100644 --- a/src/transformers/models/mgp_str/__init__.py +++ b/src/transformers/models/mgp_str/__init__.py @@ -1,8 +1,4 @@ -# flake8: noqa -# There's no way to ignore "F401 '...' imported but unused" warnings in this -# module, but to preserve other warnings. So, don't check this module at all. - -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,44 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_mgp_str": ["MgpstrConfig"], - "processing_mgp_str": ["MgpstrProcessor"], - "tokenization_mgp_str": ["MgpstrTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mgp_str"] = [ - "MgpstrModel", - "MgpstrPreTrainedModel", - "MgpstrForSceneTextRecognition", - ] if TYPE_CHECKING: - from .configuration_mgp_str import MgpstrConfig - from .processing_mgp_str import MgpstrProcessor - from .tokenization_mgp_str import MgpstrTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mgp_str import ( - MgpstrForSceneTextRecognition, - MgpstrModel, - MgpstrPreTrainedModel, - ) + from .configuration_mgp_str import * + from .modeling_mgp_str import * + from .processing_mgp_str import * + from .tokenization_mgp_str import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mgp_str/configuration_mgp_str.py b/src/transformers/models/mgp_str/configuration_mgp_str.py index d7850342dc71d4..c98dd783751b78 100644 --- a/src/transformers/models/mgp_str/configuration_mgp_str.py +++ b/src/transformers/models/mgp_str/configuration_mgp_str.py @@ -132,3 +132,6 @@ def __init__( self.drop_path_rate = drop_path_rate self.output_a3_attentions = output_a3_attentions self.initializer_range = initializer_range + + +__all__ = ["MgpstrConfig"] diff --git a/src/transformers/models/mgp_str/modeling_mgp_str.py b/src/transformers/models/mgp_str/modeling_mgp_str.py index 6b18c45e01d998..07fd2fb4cb4642 100644 --- a/src/transformers/models/mgp_str/modeling_mgp_str.py +++ b/src/transformers/models/mgp_str/modeling_mgp_str.py @@ -508,3 +508,6 @@ def forward( attentions=mgp_outputs.attentions, a3_attentions=all_a3_attentions, ) + + +__all__ = ["MgpstrModel", "MgpstrPreTrainedModel", "MgpstrForSceneTextRecognition"] diff --git a/src/transformers/models/mgp_str/processing_mgp_str.py b/src/transformers/models/mgp_str/processing_mgp_str.py index 207d4230ba09b7..81d49cb10c2142 100644 --- a/src/transformers/models/mgp_str/processing_mgp_str.py +++ b/src/transformers/models/mgp_str/processing_mgp_str.py @@ -228,3 +228,6 @@ def wp_decode(self, sequences): """ decode_strs = [seq.replace(" ", "") for seq in self.wp_tokenizer.batch_decode(sequences)] return decode_strs + + +__all__ = ["MgpstrProcessor"] diff --git a/src/transformers/models/mgp_str/tokenization_mgp_str.py b/src/transformers/models/mgp_str/tokenization_mgp_str.py index a34ba744c1960c..1c224d142f7b98 100644 --- a/src/transformers/models/mgp_str/tokenization_mgp_str.py +++ b/src/transformers/models/mgp_str/tokenization_mgp_str.py @@ -99,3 +99,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = f.write(json.dumps(self.vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n") return (vocab_file,) + + +__all__ = ["MgpstrTokenizer"] diff --git a/src/transformers/models/mimi/__init__.py b/src/transformers/models/mimi/__init__.py index 43b2bec6caa5b3..0ed44a4324ddbf 100644 --- a/src/transformers/models/mimi/__init__.py +++ b/src/transformers/models/mimi/__init__.py @@ -13,45 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_mimi": ["MimiConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mimi"] = [ - "MimiModel", - "MimiPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_mimi import ( - MimiConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mimi import ( - MimiModel, - MimiPreTrainedModel, - ) - + from .configuration_mimi import * + from .modeling_mimi import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mimi/configuration_mimi.py b/src/transformers/models/mimi/configuration_mimi.py index 5564b1a54ba63b..52031411fe677b 100644 --- a/src/transformers/models/mimi/configuration_mimi.py +++ b/src/transformers/models/mimi/configuration_mimi.py @@ -232,3 +232,6 @@ def encodec_frame_rate(self) -> int: def num_codebooks(self) -> int: # alias to num_quantizers return self.num_quantizers + + +__all__ = ["MimiConfig"] diff --git a/src/transformers/models/mimi/modeling_mimi.py b/src/transformers/models/mimi/modeling_mimi.py index cbdd2c663c5844..cbe16873ae744a 100644 --- a/src/transformers/models/mimi/modeling_mimi.py +++ b/src/transformers/models/mimi/modeling_mimi.py @@ -1752,3 +1752,6 @@ def forward( encoder_past_key_values=encoder_past_key_values, decoder_past_key_values=decoder_past_key_values, ) + + +__all__ = ["MimiModel", "MimiPreTrainedModel"] diff --git a/src/transformers/models/mistral/__init__.py b/src/transformers/models/mistral/__init__.py index 31441efe6527d2..e7a3792dc99827 100644 --- a/src/transformers/models/mistral/__init__.py +++ b/src/transformers/models/mistral/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Mistral AI and The HuggingFace Inc. team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,106 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_torch_available, -) - - -_import_structure = { - "configuration_mistral": ["MistralConfig"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mistral"] = [ - "MistralForCausalLM", - "MistralForQuestionAnswering", - "MistralModel", - "MistralPreTrainedModel", - "MistralForSequenceClassification", - "MistralForTokenClassification", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_mistral"] = [ - "FlaxMistralForCausalLM", - "FlaxMistralModel", - "FlaxMistralPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_mistral"] = [ - "TFMistralModel", - "TFMistralForCausalLM", - "TFMistralForSequenceClassification", - "TFMistralPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_mistral import MistralConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mistral import ( - MistralForCausalLM, - MistralForQuestionAnswering, - MistralForSequenceClassification, - MistralForTokenClassification, - MistralModel, - MistralPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_mistral import ( - FlaxMistralForCausalLM, - FlaxMistralModel, - FlaxMistralPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_mistral import ( - TFMistralForCausalLM, - TFMistralForSequenceClassification, - TFMistralModel, - TFMistralPreTrainedModel, - ) - - + from .configuration_mistral import * + from .modeling_flax_mistral import * + from .modeling_mistral import * + from .modeling_tf_mistral import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mistral/configuration_mistral.py b/src/transformers/models/mistral/configuration_mistral.py index c4b874f2701743..2e59909e10feae 100644 --- a/src/transformers/models/mistral/configuration_mistral.py +++ b/src/transformers/models/mistral/configuration_mistral.py @@ -159,3 +159,6 @@ def __init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) + + +__all__ = ["MistralConfig"] diff --git a/src/transformers/models/mistral/modeling_flax_mistral.py b/src/transformers/models/mistral/modeling_flax_mistral.py index 3bff2a6281220e..89446a945e7f65 100644 --- a/src/transformers/models/mistral/modeling_flax_mistral.py +++ b/src/transformers/models/mistral/modeling_flax_mistral.py @@ -740,3 +740,6 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): _CONFIG_FOR_DOC, real_checkpoint=_REAL_CHECKPOINT_FOR_DOC, ) + + +__all__ = ["FlaxMistralForCausalLM", "FlaxMistralModel", "FlaxMistralPreTrainedModel"] diff --git a/src/transformers/models/mistral/modeling_mistral.py b/src/transformers/models/mistral/modeling_mistral.py index 6ed8178ed9821e..8acf8d0ea3a6fd 100644 --- a/src/transformers/models/mistral/modeling_mistral.py +++ b/src/transformers/models/mistral/modeling_mistral.py @@ -1388,3 +1388,13 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "MistralForCausalLM", + "MistralForQuestionAnswering", + "MistralModel", + "MistralPreTrainedModel", + "MistralForSequenceClassification", + "MistralForTokenClassification", +] diff --git a/src/transformers/models/mistral/modeling_tf_mistral.py b/src/transformers/models/mistral/modeling_tf_mistral.py index 5c21dd3c3f5334..681207dac448c3 100644 --- a/src/transformers/models/mistral/modeling_tf_mistral.py +++ b/src/transformers/models/mistral/modeling_tf_mistral.py @@ -1052,3 +1052,6 @@ def build(self, input_shape=None): if getattr(self, "score", None) is not None: with tf.name_scope(self.score.name): self.score.build((self.config.hidden_size,)) + + +__all__ = ["TFMistralModel", "TFMistralForCausalLM", "TFMistralForSequenceClassification", "TFMistralPreTrainedModel"] diff --git a/src/transformers/models/mixtral/__init__.py b/src/transformers/models/mixtral/__init__.py index 4ee4834dd24984..cd60e53458f648 100644 --- a/src/transformers/models/mixtral/__init__.py +++ b/src/transformers/models/mixtral/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Mixtral AI and The HuggingFace Inc. team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,54 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - - -_import_structure = { - "configuration_mixtral": ["MixtralConfig"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mixtral"] = [ - "MixtralForCausalLM", - "MixtralForQuestionAnswering", - "MixtralModel", - "MixtralPreTrainedModel", - "MixtralForSequenceClassification", - "MixtralForTokenClassification", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_mixtral import MixtralConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mixtral import ( - MixtralForCausalLM, - MixtralForQuestionAnswering, - MixtralForSequenceClassification, - MixtralForTokenClassification, - MixtralModel, - MixtralPreTrainedModel, - ) - - + from .configuration_mixtral import * + from .modeling_mixtral import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mixtral/configuration_mixtral.py b/src/transformers/models/mixtral/configuration_mixtral.py index 686c214ef25ce5..7fae89557ec97b 100644 --- a/src/transformers/models/mixtral/configuration_mixtral.py +++ b/src/transformers/models/mixtral/configuration_mixtral.py @@ -171,3 +171,6 @@ def __init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) + + +__all__ = ["MixtralConfig"] diff --git a/src/transformers/models/mixtral/modeling_mixtral.py b/src/transformers/models/mixtral/modeling_mixtral.py index 0f04ef255c431d..72f3d07d58477b 100644 --- a/src/transformers/models/mixtral/modeling_mixtral.py +++ b/src/transformers/models/mixtral/modeling_mixtral.py @@ -1609,3 +1609,13 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "MixtralForCausalLM", + "MixtralForQuestionAnswering", + "MixtralModel", + "MixtralPreTrainedModel", + "MixtralForSequenceClassification", + "MixtralForTokenClassification", +] diff --git a/src/transformers/models/mllama/__init__.py b/src/transformers/models/mllama/__init__.py index b45b08d878aafc..08d3994bbbe13e 100644 --- a/src/transformers/models/mllama/__init__.py +++ b/src/transformers/models/mllama/__init__.py @@ -13,72 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, - is_vision_available, -) - - -_import_structure = { - "configuration_mllama": ["MllamaConfig"], - "processing_mllama": ["MllamaProcessor"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mllama"] = [ - "MllamaForConditionalGeneration", - "MllamaForCausalLM", - "MllamaTextModel", - "MllamaVisionModel", - "MllamaPreTrainedModel", - ] - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_mllama"] = ["MllamaImageProcessor"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_mllama import MllamaConfig - from .processing_mllama import MllamaProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mllama import ( - MllamaForCausalLM, - MllamaForConditionalGeneration, - MllamaPreTrainedModel, - MllamaTextModel, - MllamaVisionModel, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_mllama import ( - MllamaImageProcessor, - ) - + from .configuration_mllama import * + from .image_processing_mllama import * + from .modeling_mllama import * + from .processing_mllama import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mllama/configuration_mllama.py b/src/transformers/models/mllama/configuration_mllama.py index 635ca503205f5f..fc655c9944ebd4 100644 --- a/src/transformers/models/mllama/configuration_mllama.py +++ b/src/transformers/models/mllama/configuration_mllama.py @@ -365,3 +365,6 @@ def __init__( self.text_config = text_config super().__init__(**kwargs) + + +__all__ = ["MllamaConfig"] diff --git a/src/transformers/models/mllama/image_processing_mllama.py b/src/transformers/models/mllama/image_processing_mllama.py index 241db002c8a645..3c852589672625 100644 --- a/src/transformers/models/mllama/image_processing_mllama.py +++ b/src/transformers/models/mllama/image_processing_mllama.py @@ -928,3 +928,6 @@ def resize( ) return image, (num_tiles_height, num_tiles_width) + + +__all__ = ["MllamaImageProcessor"] diff --git a/src/transformers/models/mllama/modeling_mllama.py b/src/transformers/models/mllama/modeling_mllama.py index d53a80dd892901..eb1d481e1e6717 100644 --- a/src/transformers/models/mllama/modeling_mllama.py +++ b/src/transformers/models/mllama/modeling_mllama.py @@ -2227,3 +2227,12 @@ def _update_model_kwargs_for_generation(self, outputs, model_kwargs, is_encoder_ [cross_attention_mask_prev, cross_attention_mask_prev[:, -1:, ...]], dim=1 ) return model_kwargs + + +__all__ = [ + "MllamaForConditionalGeneration", + "MllamaForCausalLM", + "MllamaTextModel", + "MllamaVisionModel", + "MllamaPreTrainedModel", +] diff --git a/src/transformers/models/mllama/processing_mllama.py b/src/transformers/models/mllama/processing_mllama.py index 3d6c08c35cd258..5905f3313f780c 100644 --- a/src/transformers/models/mllama/processing_mllama.py +++ b/src/transformers/models/mllama/processing_mllama.py @@ -368,3 +368,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(tokenizer_input_names + image_processor_input_names + ["cross_attention_mask"]) + + +__all__ = ["MllamaProcessor"] diff --git a/src/transformers/models/mluke/__init__.py b/src/transformers/models/mluke/__init__.py index aae869bdff5104..521db20088e161 100644 --- a/src/transformers/models/mluke/__init__.py +++ b/src/transformers/models/mluke/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,34 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available - +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {} - - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_mluke"] = ["MLukeTokenizer"] if TYPE_CHECKING: - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_mluke import MLukeTokenizer - - + from .tokenization_mluke import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mluke/tokenization_mluke.py b/src/transformers/models/mluke/tokenization_mluke.py index f087c0d92fc63f..79bf2237b2334d 100644 --- a/src/transformers/models/mluke/tokenization_mluke.py +++ b/src/transformers/models/mluke/tokenization_mluke.py @@ -1634,3 +1634,6 @@ def create_token_type_ids_from_sequences( if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] + + +__all__ = ["MLukeTokenizer"] diff --git a/src/transformers/models/mobilebert/__init__.py b/src/transformers/models/mobilebert/__init__.py index c085c3d8636c1e..4ea599122ddcbd 100644 --- a/src/transformers/models/mobilebert/__init__.py +++ b/src/transformers/models/mobilebert/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,129 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_mobilebert": [ - "MobileBertConfig", - "MobileBertOnnxConfig", - ], - "tokenization_mobilebert": ["MobileBertTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_mobilebert_fast"] = ["MobileBertTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mobilebert"] = [ - "MobileBertForMaskedLM", - "MobileBertForMultipleChoice", - "MobileBertForNextSentencePrediction", - "MobileBertForPreTraining", - "MobileBertForQuestionAnswering", - "MobileBertForSequenceClassification", - "MobileBertForTokenClassification", - "MobileBertLayer", - "MobileBertModel", - "MobileBertPreTrainedModel", - "load_tf_weights_in_mobilebert", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_mobilebert"] = [ - "TFMobileBertForMaskedLM", - "TFMobileBertForMultipleChoice", - "TFMobileBertForNextSentencePrediction", - "TFMobileBertForPreTraining", - "TFMobileBertForQuestionAnswering", - "TFMobileBertForSequenceClassification", - "TFMobileBertForTokenClassification", - "TFMobileBertMainLayer", - "TFMobileBertModel", - "TFMobileBertPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_mobilebert import ( - MobileBertConfig, - MobileBertOnnxConfig, - ) - from .tokenization_mobilebert import MobileBertTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_mobilebert_fast import MobileBertTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mobilebert import ( - MobileBertForMaskedLM, - MobileBertForMultipleChoice, - MobileBertForNextSentencePrediction, - MobileBertForPreTraining, - MobileBertForQuestionAnswering, - MobileBertForSequenceClassification, - MobileBertForTokenClassification, - MobileBertLayer, - MobileBertModel, - MobileBertPreTrainedModel, - load_tf_weights_in_mobilebert, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_mobilebert import ( - TFMobileBertForMaskedLM, - TFMobileBertForMultipleChoice, - TFMobileBertForNextSentencePrediction, - TFMobileBertForPreTraining, - TFMobileBertForQuestionAnswering, - TFMobileBertForSequenceClassification, - TFMobileBertForTokenClassification, - TFMobileBertMainLayer, - TFMobileBertModel, - TFMobileBertPreTrainedModel, - ) - + from .configuration_mobilebert import * + from .modeling_mobilebert import * + from .modeling_tf_mobilebert import * + from .tokenization_mobilebert import * + from .tokenization_mobilebert_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mobilebert/configuration_mobilebert.py b/src/transformers/models/mobilebert/configuration_mobilebert.py index 2370fa9b576d4f..742864573ab25f 100644 --- a/src/transformers/models/mobilebert/configuration_mobilebert.py +++ b/src/transformers/models/mobilebert/configuration_mobilebert.py @@ -179,3 +179,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("token_type_ids", dynamic_axis), ] ) + + +__all__ = ["MobileBertConfig", "MobileBertOnnxConfig"] diff --git a/src/transformers/models/mobilebert/modeling_mobilebert.py b/src/transformers/models/mobilebert/modeling_mobilebert.py index 44007667c6b6af..24e29c2a21f223 100644 --- a/src/transformers/models/mobilebert/modeling_mobilebert.py +++ b/src/transformers/models/mobilebert/modeling_mobilebert.py @@ -1617,3 +1617,18 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "MobileBertForMaskedLM", + "MobileBertForMultipleChoice", + "MobileBertForNextSentencePrediction", + "MobileBertForPreTraining", + "MobileBertForQuestionAnswering", + "MobileBertForSequenceClassification", + "MobileBertForTokenClassification", + "MobileBertLayer", + "MobileBertModel", + "MobileBertPreTrainedModel", + "load_tf_weights_in_mobilebert", +] diff --git a/src/transformers/models/mobilebert/modeling_tf_mobilebert.py b/src/transformers/models/mobilebert/modeling_tf_mobilebert.py index d73c276b4f7d61..60815e09367611 100644 --- a/src/transformers/models/mobilebert/modeling_tf_mobilebert.py +++ b/src/transformers/models/mobilebert/modeling_tf_mobilebert.py @@ -1964,3 +1964,17 @@ def build(self, input_shape=None): if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFMobileBertForMaskedLM", + "TFMobileBertForMultipleChoice", + "TFMobileBertForNextSentencePrediction", + "TFMobileBertForPreTraining", + "TFMobileBertForQuestionAnswering", + "TFMobileBertForSequenceClassification", + "TFMobileBertForTokenClassification", + "TFMobileBertMainLayer", + "TFMobileBertModel", + "TFMobileBertPreTrainedModel", +] diff --git a/src/transformers/models/mobilebert/tokenization_mobilebert.py b/src/transformers/models/mobilebert/tokenization_mobilebert.py index e4faaf12d3834c..8a50f539dade06 100644 --- a/src/transformers/models/mobilebert/tokenization_mobilebert.py +++ b/src/transformers/models/mobilebert/tokenization_mobilebert.py @@ -508,3 +508,6 @@ def tokenize(self, text): else: output_tokens.extend(sub_tokens) return output_tokens + + +__all__ = ["MobileBertTokenizer"] diff --git a/src/transformers/models/mobilebert/tokenization_mobilebert_fast.py b/src/transformers/models/mobilebert/tokenization_mobilebert_fast.py index 21057924092e9c..ec39eb2b26bd60 100644 --- a/src/transformers/models/mobilebert/tokenization_mobilebert_fast.py +++ b/src/transformers/models/mobilebert/tokenization_mobilebert_fast.py @@ -172,3 +172,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["MobileBertTokenizerFast"] diff --git a/src/transformers/models/mobilenet_v1/__init__.py b/src/transformers/models/mobilenet_v1/__init__.py index 6ff5725a21a8aa..05c7e8615ddf0f 100644 --- a/src/transformers/models/mobilenet_v1/__init__.py +++ b/src/transformers/models/mobilenet_v1/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,69 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_mobilenet_v1": [ - "MobileNetV1Config", - "MobileNetV1OnnxConfig", - ], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_mobilenet_v1"] = ["MobileNetV1FeatureExtractor"] - _import_structure["image_processing_mobilenet_v1"] = ["MobileNetV1ImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mobilenet_v1"] = [ - "MobileNetV1ForImageClassification", - "MobileNetV1Model", - "MobileNetV1PreTrainedModel", - "load_tf_weights_in_mobilenet_v1", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_mobilenet_v1 import ( - MobileNetV1Config, - MobileNetV1OnnxConfig, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_mobilenet_v1 import MobileNetV1FeatureExtractor - from .image_processing_mobilenet_v1 import MobileNetV1ImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mobilenet_v1 import ( - MobileNetV1ForImageClassification, - MobileNetV1Model, - MobileNetV1PreTrainedModel, - load_tf_weights_in_mobilenet_v1, - ) - - + from .configuration_mobilenet_v1 import * + from .feature_extraction_mobilenet_v1 import * + from .image_processing_mobilenet_v1 import * + from .modeling_mobilenet_v1 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py b/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py index 2bf204a66d778e..59e977b9521275 100644 --- a/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py +++ b/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py @@ -121,3 +121,6 @@ def outputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["MobileNetV1Config", "MobileNetV1OnnxConfig"] diff --git a/src/transformers/models/mobilenet_v1/feature_extraction_mobilenet_v1.py b/src/transformers/models/mobilenet_v1/feature_extraction_mobilenet_v1.py index 34cdb11cd9f32f..b0d2d11a207241 100644 --- a/src/transformers/models/mobilenet_v1/feature_extraction_mobilenet_v1.py +++ b/src/transformers/models/mobilenet_v1/feature_extraction_mobilenet_v1.py @@ -31,3 +31,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["MobileNetV1FeatureExtractor"] diff --git a/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py b/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py index 7981947307fdc2..de15f4fa151e6e 100644 --- a/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py +++ b/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py @@ -300,3 +300,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["MobileNetV1ImageProcessor"] diff --git a/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py b/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py index 00f8c501b21220..a315ccd326c2b4 100755 --- a/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py +++ b/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py @@ -477,3 +477,11 @@ def forward( logits=logits, hidden_states=outputs.hidden_states, ) + + +__all__ = [ + "MobileNetV1ForImageClassification", + "MobileNetV1Model", + "MobileNetV1PreTrainedModel", + "load_tf_weights_in_mobilenet_v1", +] diff --git a/src/transformers/models/mobilenet_v2/__init__.py b/src/transformers/models/mobilenet_v2/__init__.py index 5fcab8fe7c4e58..c29b5fc245ef93 100644 --- a/src/transformers/models/mobilenet_v2/__init__.py +++ b/src/transformers/models/mobilenet_v2/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,72 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_mobilenet_v2": [ - "MobileNetV2Config", - "MobileNetV2OnnxConfig", - ], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_mobilenet_v2"] = ["MobileNetV2FeatureExtractor"] - _import_structure["image_processing_mobilenet_v2"] = ["MobileNetV2ImageProcessor"] - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mobilenet_v2"] = [ - "MobileNetV2ForImageClassification", - "MobileNetV2ForSemanticSegmentation", - "MobileNetV2Model", - "MobileNetV2PreTrainedModel", - "load_tf_weights_in_mobilenet_v2", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_mobilenet_v2 import ( - MobileNetV2Config, - MobileNetV2OnnxConfig, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_mobilenet_v2 import MobileNetV2FeatureExtractor - from .image_processing_mobilenet_v2 import MobileNetV2ImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mobilenet_v2 import ( - MobileNetV2ForImageClassification, - MobileNetV2ForSemanticSegmentation, - MobileNetV2Model, - MobileNetV2PreTrainedModel, - load_tf_weights_in_mobilenet_v2, - ) - - + from .configuration_mobilenet_v2 import * + from .feature_extraction_mobilenet_v2 import * + from .image_processing_mobilenet_v2 import * + from .modeling_mobilenet_v2 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py b/src/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py index 25bcfa57854711..6cf64847e2cd64 100644 --- a/src/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py +++ b/src/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py @@ -149,3 +149,6 @@ def outputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["MobileNetV2Config", "MobileNetV2OnnxConfig"] diff --git a/src/transformers/models/mobilenet_v2/feature_extraction_mobilenet_v2.py b/src/transformers/models/mobilenet_v2/feature_extraction_mobilenet_v2.py index 62581e2c09988b..09043aa4834e8c 100644 --- a/src/transformers/models/mobilenet_v2/feature_extraction_mobilenet_v2.py +++ b/src/transformers/models/mobilenet_v2/feature_extraction_mobilenet_v2.py @@ -31,3 +31,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["MobileNetV2FeatureExtractor"] diff --git a/src/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py b/src/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py index 25d227bd582fb7..68299bc085fa16 100644 --- a/src/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py +++ b/src/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py @@ -347,3 +347,6 @@ def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation + + +__all__ = ["MobileNetV2ImageProcessor"] diff --git a/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py b/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py index 47ec95a79eec31..bd36d111f8af17 100755 --- a/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py +++ b/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py @@ -857,3 +857,12 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, ) + + +__all__ = [ + "MobileNetV2ForImageClassification", + "MobileNetV2ForSemanticSegmentation", + "MobileNetV2Model", + "MobileNetV2PreTrainedModel", + "load_tf_weights_in_mobilenet_v2", +] diff --git a/src/transformers/models/mobilevit/__init__.py b/src/transformers/models/mobilevit/__init__.py index 942a963227b955..63f4f9c4720ad9 100644 --- a/src/transformers/models/mobilevit/__init__.py +++ b/src/transformers/models/mobilevit/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,94 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_torch_available, - is_vision_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_mobilevit": ["MobileViTConfig", "MobileViTOnnxConfig"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_mobilevit"] = ["MobileViTFeatureExtractor"] - _import_structure["image_processing_mobilevit"] = ["MobileViTImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mobilevit"] = [ - "MobileViTForImageClassification", - "MobileViTForSemanticSegmentation", - "MobileViTModel", - "MobileViTPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_mobilevit"] = [ - "TFMobileViTForImageClassification", - "TFMobileViTForSemanticSegmentation", - "TFMobileViTModel", - "TFMobileViTPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_mobilevit import MobileViTConfig, MobileViTOnnxConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_mobilevit import MobileViTFeatureExtractor - from .image_processing_mobilevit import MobileViTImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mobilevit import ( - MobileViTForImageClassification, - MobileViTForSemanticSegmentation, - MobileViTModel, - MobileViTPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_mobilevit import ( - TFMobileViTForImageClassification, - TFMobileViTForSemanticSegmentation, - TFMobileViTModel, - TFMobileViTPreTrainedModel, - ) - - + from .configuration_mobilevit import * + from .feature_extraction_mobilevit import * + from .image_processing_mobilevit import * + from .modeling_mobilevit import * + from .modeling_tf_mobilevit import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mobilevit/configuration_mobilevit.py b/src/transformers/models/mobilevit/configuration_mobilevit.py index 500f8b23db0a53..2ef891a96f6ccc 100644 --- a/src/transformers/models/mobilevit/configuration_mobilevit.py +++ b/src/transformers/models/mobilevit/configuration_mobilevit.py @@ -167,3 +167,6 @@ def outputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["MobileViTConfig", "MobileViTOnnxConfig"] diff --git a/src/transformers/models/mobilevit/feature_extraction_mobilevit.py b/src/transformers/models/mobilevit/feature_extraction_mobilevit.py index a73baed6405c50..eb98b2f6e14522 100644 --- a/src/transformers/models/mobilevit/feature_extraction_mobilevit.py +++ b/src/transformers/models/mobilevit/feature_extraction_mobilevit.py @@ -31,3 +31,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["MobileViTFeatureExtractor"] diff --git a/src/transformers/models/mobilevit/image_processing_mobilevit.py b/src/transformers/models/mobilevit/image_processing_mobilevit.py index e6a8692edfd4f5..2825aa647dc0b8 100644 --- a/src/transformers/models/mobilevit/image_processing_mobilevit.py +++ b/src/transformers/models/mobilevit/image_processing_mobilevit.py @@ -480,3 +480,6 @@ def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation + + +__all__ = ["MobileViTImageProcessor"] diff --git a/src/transformers/models/mobilevit/modeling_mobilevit.py b/src/transformers/models/mobilevit/modeling_mobilevit.py index 59c191b3789641..7f2a23238e5091 100755 --- a/src/transformers/models/mobilevit/modeling_mobilevit.py +++ b/src/transformers/models/mobilevit/modeling_mobilevit.py @@ -1070,3 +1070,11 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, ) + + +__all__ = [ + "MobileViTForImageClassification", + "MobileViTForSemanticSegmentation", + "MobileViTModel", + "MobileViTPreTrainedModel", +] diff --git a/src/transformers/models/mobilevit/modeling_tf_mobilevit.py b/src/transformers/models/mobilevit/modeling_tf_mobilevit.py index 499a7942e938fe..9939ddcb716838 100644 --- a/src/transformers/models/mobilevit/modeling_tf_mobilevit.py +++ b/src/transformers/models/mobilevit/modeling_tf_mobilevit.py @@ -1368,3 +1368,11 @@ def build(self, input_shape=None): if getattr(self, "segmentation_head", None) is not None: with tf.name_scope(self.segmentation_head.name): self.segmentation_head.build(None) + + +__all__ = [ + "TFMobileViTForImageClassification", + "TFMobileViTForSemanticSegmentation", + "TFMobileViTModel", + "TFMobileViTPreTrainedModel", +] diff --git a/src/transformers/models/mobilevitv2/__init__.py b/src/transformers/models/mobilevitv2/__init__.py index 770736c03df7ed..15e10a1d981593 100644 --- a/src/transformers/models/mobilevitv2/__init__.py +++ b/src/transformers/models/mobilevitv2/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,55 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, - is_vision_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_mobilevitv2": [ - "MobileViTV2Config", - "MobileViTV2OnnxConfig", - ], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mobilevitv2"] = [ - "MobileViTV2ForImageClassification", - "MobileViTV2ForSemanticSegmentation", - "MobileViTV2Model", - "MobileViTV2PreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_mobilevitv2 import ( - MobileViTV2Config, - MobileViTV2OnnxConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mobilevitv2 import ( - MobileViTV2ForImageClassification, - MobileViTV2ForSemanticSegmentation, - MobileViTV2Model, - MobileViTV2PreTrainedModel, - ) - + from .configuration_mobilevitv2 import * + from .modeling_mobilevitv2 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mobilevitv2/configuration_mobilevitv2.py b/src/transformers/models/mobilevitv2/configuration_mobilevitv2.py index 65260d6501ebfb..ff9a5977922fde 100644 --- a/src/transformers/models/mobilevitv2/configuration_mobilevitv2.py +++ b/src/transformers/models/mobilevitv2/configuration_mobilevitv2.py @@ -163,3 +163,6 @@ def outputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["MobileViTV2Config", "MobileViTV2OnnxConfig"] diff --git a/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py b/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py index ae043cf567f1bc..5848aada8befe2 100644 --- a/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py +++ b/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py @@ -1025,3 +1025,11 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, ) + + +__all__ = [ + "MobileViTV2ForImageClassification", + "MobileViTV2ForSemanticSegmentation", + "MobileViTV2Model", + "MobileViTV2PreTrainedModel", +] diff --git a/src/transformers/models/mpnet/__init__.py b/src/transformers/models/mpnet/__init__.py index 54c20d9f1967dd..0b7abc8357cc18 100644 --- a/src/transformers/models/mpnet/__init__.py +++ b/src/transformers/models/mpnet/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,116 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_mpnet": ["MPNetConfig"], - "tokenization_mpnet": ["MPNetTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_mpnet_fast"] = ["MPNetTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mpnet"] = [ - "MPNetForMaskedLM", - "MPNetForMultipleChoice", - "MPNetForQuestionAnswering", - "MPNetForSequenceClassification", - "MPNetForTokenClassification", - "MPNetLayer", - "MPNetModel", - "MPNetPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_mpnet"] = [ - "TFMPNetEmbeddings", - "TFMPNetForMaskedLM", - "TFMPNetForMultipleChoice", - "TFMPNetForQuestionAnswering", - "TFMPNetForSequenceClassification", - "TFMPNetForTokenClassification", - "TFMPNetMainLayer", - "TFMPNetModel", - "TFMPNetPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_mpnet import MPNetConfig - from .tokenization_mpnet import MPNetTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_mpnet_fast import MPNetTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mpnet import ( - MPNetForMaskedLM, - MPNetForMultipleChoice, - MPNetForQuestionAnswering, - MPNetForSequenceClassification, - MPNetForTokenClassification, - MPNetLayer, - MPNetModel, - MPNetPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_mpnet import ( - TFMPNetEmbeddings, - TFMPNetForMaskedLM, - TFMPNetForMultipleChoice, - TFMPNetForQuestionAnswering, - TFMPNetForSequenceClassification, - TFMPNetForTokenClassification, - TFMPNetMainLayer, - TFMPNetModel, - TFMPNetPreTrainedModel, - ) - + from .configuration_mpnet import * + from .modeling_mpnet import * + from .modeling_tf_mpnet import * + from .tokenization_mpnet import * + from .tokenization_mpnet_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mpnet/configuration_mpnet.py b/src/transformers/models/mpnet/configuration_mpnet.py index 0abb89c9423e20..e80d6a0c30301f 100644 --- a/src/transformers/models/mpnet/configuration_mpnet.py +++ b/src/transformers/models/mpnet/configuration_mpnet.py @@ -111,3 +111,6 @@ def __init__( self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.relative_attention_num_buckets = relative_attention_num_buckets + + +__all__ = ["MPNetConfig"] diff --git a/src/transformers/models/mpnet/modeling_mpnet.py b/src/transformers/models/mpnet/modeling_mpnet.py index 11a27f5577da1b..6fc8b01ff43030 100644 --- a/src/transformers/models/mpnet/modeling_mpnet.py +++ b/src/transformers/models/mpnet/modeling_mpnet.py @@ -1050,3 +1050,15 @@ def create_position_ids_from_input_ids(input_ids, padding_idx): mask = input_ids.ne(padding_idx).int() incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask return incremental_indices.long() + padding_idx + + +__all__ = [ + "MPNetForMaskedLM", + "MPNetForMultipleChoice", + "MPNetForQuestionAnswering", + "MPNetForSequenceClassification", + "MPNetForTokenClassification", + "MPNetLayer", + "MPNetModel", + "MPNetPreTrainedModel", +] diff --git a/src/transformers/models/mpnet/modeling_tf_mpnet.py b/src/transformers/models/mpnet/modeling_tf_mpnet.py index d1864bd1970e0c..6c9dd5bbd05d86 100644 --- a/src/transformers/models/mpnet/modeling_tf_mpnet.py +++ b/src/transformers/models/mpnet/modeling_tf_mpnet.py @@ -1339,3 +1339,16 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFMPNetEmbeddings", + "TFMPNetForMaskedLM", + "TFMPNetForMultipleChoice", + "TFMPNetForQuestionAnswering", + "TFMPNetForSequenceClassification", + "TFMPNetForTokenClassification", + "TFMPNetMainLayer", + "TFMPNetModel", + "TFMPNetPreTrainedModel", +] diff --git a/src/transformers/models/mpnet/tokenization_mpnet.py b/src/transformers/models/mpnet/tokenization_mpnet.py index 8d46381f05677f..2c8da3b41cc8b7 100644 --- a/src/transformers/models/mpnet/tokenization_mpnet.py +++ b/src/transformers/models/mpnet/tokenization_mpnet.py @@ -532,3 +532,6 @@ def tokenize(self, text): else: output_tokens.extend(sub_tokens) return output_tokens + + +__all__ = ["MPNetTokenizer"] diff --git a/src/transformers/models/mpnet/tokenization_mpnet_fast.py b/src/transformers/models/mpnet/tokenization_mpnet_fast.py index 433c3028fc2093..4a0c59d1e4d14b 100644 --- a/src/transformers/models/mpnet/tokenization_mpnet_fast.py +++ b/src/transformers/models/mpnet/tokenization_mpnet_fast.py @@ -204,3 +204,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["MPNetTokenizerFast"] diff --git a/src/transformers/models/mpt/__init__.py b/src/transformers/models/mpt/__init__.py index 49b3a0d61fcdb3..20bf8c5ebaf987 100644 --- a/src/transformers/models/mpt/__init__.py +++ b/src/transformers/models/mpt/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 HuggingFace Inc. team and MosaicML NLP team. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,50 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_mpt": ["MptConfig", "MptOnnxConfig"], -} +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mpt"] = [ - "MptForCausalLM", - "MptModel", - "MptPreTrainedModel", - "MptForSequenceClassification", - "MptForTokenClassification", - "MptForQuestionAnswering", - ] if TYPE_CHECKING: - from .configuration_mpt import MptConfig, MptOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mpt import ( - MptForCausalLM, - MptForQuestionAnswering, - MptForSequenceClassification, - MptForTokenClassification, - MptModel, - MptPreTrainedModel, - ) - + from .configuration_mpt import * + from .modeling_mpt import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mpt/configuration_mpt.py b/src/transformers/models/mpt/configuration_mpt.py index 8ee3f8c0c07428..f5078bd9bfec8f 100644 --- a/src/transformers/models/mpt/configuration_mpt.py +++ b/src/transformers/models/mpt/configuration_mpt.py @@ -228,3 +228,6 @@ def __init__( self.use_cache = use_cache self.initializer_range = initializer_range super().__init__(**kwargs) + + +__all__ = ["MptConfig"] diff --git a/src/transformers/models/mpt/modeling_mpt.py b/src/transformers/models/mpt/modeling_mpt.py index 2b7e7ae5895a1e..4e132e646d2ea9 100644 --- a/src/transformers/models/mpt/modeling_mpt.py +++ b/src/transformers/models/mpt/modeling_mpt.py @@ -905,3 +905,13 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "MptForCausalLM", + "MptModel", + "MptPreTrainedModel", + "MptForSequenceClassification", + "MptForTokenClassification", + "MptForQuestionAnswering", +] diff --git a/src/transformers/models/mra/__init__.py b/src/transformers/models/mra/__init__.py index 21d82eb3dabac1..2963ad0c97ba8c 100644 --- a/src/transformers/models/mra/__init__.py +++ b/src/transformers/models/mra/__init__.py @@ -1,8 +1,4 @@ -# flake8: noqa -# There's no way to ignore "F401 '...' imported but unused" warnings in this -# module, but to preserve other warnings. So, don't check this module at all. - -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,50 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -# rely on isort to merge the imports -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available - - -_import_structure = {"configuration_mra": ["MraConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mra"] = [ - "MraForMaskedLM", - "MraForMultipleChoice", - "MraForQuestionAnswering", - "MraForSequenceClassification", - "MraForTokenClassification", - "MraLayer", - "MraModel", - "MraPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_mra import MraConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mra import ( - MraForMaskedLM, - MraForMultipleChoice, - MraForQuestionAnswering, - MraForSequenceClassification, - MraForTokenClassification, - MraLayer, - MraModel, - MraPreTrainedModel, - ) + from .configuration_mra import * + from .modeling_mra import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mra/configuration_mra.py b/src/transformers/models/mra/configuration_mra.py index 6837de4f802180..16b064c98f7e6a 100644 --- a/src/transformers/models/mra/configuration_mra.py +++ b/src/transformers/models/mra/configuration_mra.py @@ -132,3 +132,6 @@ def __init__( self.approx_mode = approx_mode self.initial_prior_first_n_blocks = initial_prior_first_n_blocks self.initial_prior_diagonal_n_blocks = initial_prior_diagonal_n_blocks + + +__all__ = ["MraConfig"] diff --git a/src/transformers/models/mra/modeling_mra.py b/src/transformers/models/mra/modeling_mra.py index 09b21365937f00..6071d6a9e9c04d 100644 --- a/src/transformers/models/mra/modeling_mra.py +++ b/src/transformers/models/mra/modeling_mra.py @@ -1478,3 +1478,15 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "MraForMaskedLM", + "MraForMultipleChoice", + "MraForQuestionAnswering", + "MraForSequenceClassification", + "MraForTokenClassification", + "MraLayer", + "MraModel", + "MraPreTrainedModel", +] diff --git a/src/transformers/models/mt5/__init__.py b/src/transformers/models/mt5/__init__.py index e142aa43676e61..444a8f8cc8e020 100644 --- a/src/transformers/models/mt5/__init__.py +++ b/src/transformers/models/mt5/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,113 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_sentencepiece_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -if is_sentencepiece_available(): - from ..t5.tokenization_t5 import T5Tokenizer -else: - from ...utils.dummy_sentencepiece_objects import T5Tokenizer - -MT5Tokenizer = T5Tokenizer - -if is_tokenizers_available(): - from ..t5.tokenization_t5_fast import T5TokenizerFast -else: - from ...utils.dummy_tokenizers_objects import T5TokenizerFast - -MT5TokenizerFast = T5TokenizerFast - -_import_structure = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mt5"] = [ - "MT5EncoderModel", - "MT5ForConditionalGeneration", - "MT5ForQuestionAnswering", - "MT5ForSequenceClassification", - "MT5ForTokenClassification", - "MT5Model", - "MT5PreTrainedModel", - "MT5Stack", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_mt5"] = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_mt5"] = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_mt5 import MT5Config, MT5OnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mt5 import ( - MT5EncoderModel, - MT5ForConditionalGeneration, - MT5ForQuestionAnswering, - MT5ForSequenceClassification, - MT5ForTokenClassification, - MT5Model, - MT5PreTrainedModel, - MT5Stack, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_mt5 import TFMT5EncoderModel, TFMT5ForConditionalGeneration, TFMT5Model - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_mt5 import FlaxMT5EncoderModel, FlaxMT5ForConditionalGeneration, FlaxMT5Model - + from .configuration_mt5 import * + from .modeling_flax_mt5 import * + from .modeling_mt5 import * + from .modeling_tf_mt5 import * + from .tokenization_mt5 import * else: import sys - sys.modules[__name__] = _LazyModule( - __name__, - globals()["__file__"], - _import_structure, - extra_objects={"MT5Tokenizer": MT5Tokenizer, "MT5TokenizerFast": MT5TokenizerFast}, - module_spec=__spec__, - ) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mt5/configuration_mt5.py b/src/transformers/models/mt5/configuration_mt5.py index 267179f81247e8..8b903b908e42f0 100644 --- a/src/transformers/models/mt5/configuration_mt5.py +++ b/src/transformers/models/mt5/configuration_mt5.py @@ -177,3 +177,6 @@ def default_onnx_opset(self) -> int: @property def atol_for_validation(self) -> float: return 5e-4 + + +__all__ = ["MT5Config", "MT5OnnxConfig"] diff --git a/src/transformers/models/mt5/modeling_flax_mt5.py b/src/transformers/models/mt5/modeling_flax_mt5.py index fbb5b107f55e23..13bd83b75034ba 100644 --- a/src/transformers/models/mt5/modeling_flax_mt5.py +++ b/src/transformers/models/mt5/modeling_flax_mt5.py @@ -118,3 +118,6 @@ class FlaxMT5ForConditionalGeneration(FlaxT5ForConditionalGeneration): model_type = "mt5" config_class = MT5Config + + +__all__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] diff --git a/src/transformers/models/mt5/modeling_mt5.py b/src/transformers/models/mt5/modeling_mt5.py index 659a84c5fe3784..26706de1eff025 100644 --- a/src/transformers/models/mt5/modeling_mt5.py +++ b/src/transformers/models/mt5/modeling_mt5.py @@ -2543,3 +2543,15 @@ def forward( encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) + + +__all__ = [ + "MT5EncoderModel", + "MT5ForConditionalGeneration", + "MT5ForQuestionAnswering", + "MT5ForSequenceClassification", + "MT5ForTokenClassification", + "MT5Model", + "MT5PreTrainedModel", + "MT5Stack", +] diff --git a/src/transformers/models/mt5/modeling_tf_mt5.py b/src/transformers/models/mt5/modeling_tf_mt5.py index 7270a54948c4fa..6152aea0a5acad 100644 --- a/src/transformers/models/mt5/modeling_tf_mt5.py +++ b/src/transformers/models/mt5/modeling_tf_mt5.py @@ -93,3 +93,6 @@ class TFMT5EncoderModel(TFT5EncoderModel): model_type = "mt5" config_class = MT5Config + + +__all__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] diff --git a/src/transformers/models/mt5/tokenization_mt5.py b/src/transformers/models/mt5/tokenization_mt5.py new file mode 100644 index 00000000000000..a3058816ff2032 --- /dev/null +++ b/src/transformers/models/mt5/tokenization_mt5.py @@ -0,0 +1,24 @@ +# coding=utf-8 +# Copyright 2020, The T5 Authors and HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""mT5 tokenization file""" + +from ..t5 import T5Tokenizer + + +class MT5Tokenizer(T5Tokenizer): + pass + + +__all__ = ["MT5Tokenizer"] diff --git a/src/transformers/models/mt5/tokenization_mt5_fast.py b/src/transformers/models/mt5/tokenization_mt5_fast.py new file mode 100644 index 00000000000000..8737088cc44206 --- /dev/null +++ b/src/transformers/models/mt5/tokenization_mt5_fast.py @@ -0,0 +1,24 @@ +# coding=utf-8 +# Copyright 2020, The T5 Authors and HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""mT5 tokenization file""" + +from ..t5 import T5TokenizerFast + + +class MT5TokenizerFast(T5TokenizerFast): + pass + + +__all__ = ["MT5TokenizerFast"] diff --git a/src/transformers/models/musicgen/__init__.py b/src/transformers/models/musicgen/__init__.py index 3b03adae12fc76..880274309cbab4 100644 --- a/src/transformers/models/musicgen/__init__.py +++ b/src/transformers/models/musicgen/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,51 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_musicgen": [ - "MusicgenConfig", - "MusicgenDecoderConfig", - ], - "processing_musicgen": ["MusicgenProcessor"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_musicgen"] = [ - "MusicgenForConditionalGeneration", - "MusicgenForCausalLM", - "MusicgenModel", - "MusicgenPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_musicgen import ( - MusicgenConfig, - MusicgenDecoderConfig, - ) - from .processing_musicgen import MusicgenProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_musicgen import ( - MusicgenForCausalLM, - MusicgenForConditionalGeneration, - MusicgenModel, - MusicgenPreTrainedModel, - ) - + from .configuration_musicgen import * + from .modeling_musicgen import * + from .processing_musicgen import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/musicgen/configuration_musicgen.py b/src/transformers/models/musicgen/configuration_musicgen.py index 00c03072198092..6c38caf20dc413 100644 --- a/src/transformers/models/musicgen/configuration_musicgen.py +++ b/src/transformers/models/musicgen/configuration_musicgen.py @@ -242,3 +242,6 @@ def from_sub_models_config( # This is a property because you might want to change the codec model on the fly def sampling_rate(self): return self.audio_encoder.sampling_rate + + +__all__ = ["MusicgenConfig", "MusicgenDecoderConfig"] diff --git a/src/transformers/models/musicgen/modeling_musicgen.py b/src/transformers/models/musicgen/modeling_musicgen.py index 109ddfb626d26b..9e63a5327224d7 100644 --- a/src/transformers/models/musicgen/modeling_musicgen.py +++ b/src/transformers/models/musicgen/modeling_musicgen.py @@ -2751,3 +2751,6 @@ def get_unconditional_inputs(self, num_samples=1): attention_mask=attention_mask, guidance_scale=1.0, ) + + +__all__ = ["MusicgenForConditionalGeneration", "MusicgenForCausalLM", "MusicgenModel", "MusicgenPreTrainedModel"] diff --git a/src/transformers/models/musicgen/processing_musicgen.py b/src/transformers/models/musicgen/processing_musicgen.py index c153c5dfe1b9ee..deebf9045b4ffb 100644 --- a/src/transformers/models/musicgen/processing_musicgen.py +++ b/src/transformers/models/musicgen/processing_musicgen.py @@ -139,3 +139,6 @@ def _decode_audio(self, audio_values, padding_mask: Optional = None) -> List[np. audio_values[i] = sliced_audio.reshape(channels, -1) return audio_values + + +__all__ = ["MusicgenProcessor"] diff --git a/src/transformers/models/mvp/__init__.py b/src/transformers/models/mvp/__init__.py index e865b8827c5cd8..beab37f65c1a8d 100644 --- a/src/transformers/models/mvp/__init__.py +++ b/src/transformers/models/mvp/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,65 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_mvp": ["MvpConfig", "MvpOnnxConfig"], - "tokenization_mvp": ["MvpTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_mvp_fast"] = ["MvpTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mvp"] = [ - "MvpForCausalLM", - "MvpForConditionalGeneration", - "MvpForQuestionAnswering", - "MvpForSequenceClassification", - "MvpModel", - "MvpPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_mvp import MvpConfig, MvpOnnxConfig - from .tokenization_mvp import MvpTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_mvp_fast import MvpTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mvp import ( - MvpForCausalLM, - MvpForConditionalGeneration, - MvpForQuestionAnswering, - MvpForSequenceClassification, - MvpModel, - MvpPreTrainedModel, - ) - + from .configuration_mvp import * + from .modeling_mvp import * + from .tokenization_mvp import * + from .tokenization_mvp_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mvp/configuration_mvp.py b/src/transformers/models/mvp/configuration_mvp.py index 8e2317982b5721..a270461db40d60 100644 --- a/src/transformers/models/mvp/configuration_mvp.py +++ b/src/transformers/models/mvp/configuration_mvp.py @@ -178,3 +178,6 @@ def __init__( f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. " "The config can simply be saved and uploaded again to be fixed." ) + + +__all__ = ["MvpConfig"] diff --git a/src/transformers/models/mvp/modeling_mvp.py b/src/transformers/models/mvp/modeling_mvp.py index f68a4bb76b3e71..7348493f2189e2 100644 --- a/src/transformers/models/mvp/modeling_mvp.py +++ b/src/transformers/models/mvp/modeling_mvp.py @@ -1943,3 +1943,13 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = [ + "MvpForCausalLM", + "MvpForConditionalGeneration", + "MvpForQuestionAnswering", + "MvpForSequenceClassification", + "MvpModel", + "MvpPreTrainedModel", +] diff --git a/src/transformers/models/mvp/tokenization_mvp.py b/src/transformers/models/mvp/tokenization_mvp.py index 5a159320b7a3e0..e3a32082cce8b3 100644 --- a/src/transformers/models/mvp/tokenization_mvp.py +++ b/src/transformers/models/mvp/tokenization_mvp.py @@ -389,3 +389,6 @@ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()): text = " " + text return (text, kwargs) + + +__all__ = ["MvpTokenizer"] diff --git a/src/transformers/models/mvp/tokenization_mvp_fast.py b/src/transformers/models/mvp/tokenization_mvp_fast.py index 5901c2bece4097..a66b4e178e8ae4 100644 --- a/src/transformers/models/mvp/tokenization_mvp_fast.py +++ b/src/transformers/models/mvp/tokenization_mvp_fast.py @@ -277,3 +277,6 @@ def create_token_type_ids_from_sequences( if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] + + +__all__ = ["MvpTokenizerFast"] diff --git a/src/transformers/models/myt5/__init__.py b/src/transformers/models/myt5/__init__.py index 9579f723a00ef3..65c8190ee6d94d 100644 --- a/src/transformers/models/myt5/__init__.py +++ b/src/transformers/models/myt5/__init__.py @@ -11,19 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING from ...utils import _LazyModule - - -_import_structure = {"tokenization_myt5": ["MyT5Tokenizer"]} +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .tokenization_myt5 import MyT5Tokenizer - + from .tokenization_myt5 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/myt5/tokenization_myt5.py b/src/transformers/models/myt5/tokenization_myt5.py index 69cb14b0cc9d02..f86e5e44c2a0e7 100644 --- a/src/transformers/models/myt5/tokenization_myt5.py +++ b/src/transformers/models/myt5/tokenization_myt5.py @@ -375,3 +375,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = with open(vocab_file, "w", encoding="utf-8") as writer: writer.write(json.dumps(self.byte_maps, indent=2, ensure_ascii=False)) return (vocab_file,) + + +__all__ = ["MyT5Tokenizer"] diff --git a/src/transformers/models/nemotron/__init__.py b/src/transformers/models/nemotron/__init__.py index bd0d1b57011dcf..2bd66ddd622e06 100644 --- a/src/transformers/models/nemotron/__init__.py +++ b/src/transformers/models/nemotron/__init__.py @@ -1,5 +1,4 @@ -# Copyright 2024 The HuggingFace Inc. team. All rights reserved. -# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,55 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_torch_available, -) - - -_import_structure = { - "configuration_nemotron": ["NemotronConfig"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_nemotron"] = [ - "NemotronForQuestionAnswering", - "NemotronForCausalLM", - "NemotronModel", - "NemotronPreTrainedModel", - "NemotronForSequenceClassification", - "NemotronForTokenClassification", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_nemotron import NemotronConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_nemotron import ( - NemotronForCausalLM, - NemotronForQuestionAnswering, - NemotronForSequenceClassification, - NemotronForTokenClassification, - NemotronModel, - NemotronPreTrainedModel, - ) - - + from .configuration_nemotron import * + from .modeling_nemotron import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/nemotron/configuration_nemotron.py b/src/transformers/models/nemotron/configuration_nemotron.py index 7690703127ac92..a6f1ad0c5d9c4c 100644 --- a/src/transformers/models/nemotron/configuration_nemotron.py +++ b/src/transformers/models/nemotron/configuration_nemotron.py @@ -151,3 +151,6 @@ def __init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) + + +__all__ = ["NemotronConfig"] diff --git a/src/transformers/models/nemotron/modeling_nemotron.py b/src/transformers/models/nemotron/modeling_nemotron.py index 78dace1a53ce55..ca2298688aa6ca 100644 --- a/src/transformers/models/nemotron/modeling_nemotron.py +++ b/src/transformers/models/nemotron/modeling_nemotron.py @@ -1390,3 +1390,13 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "NemotronForQuestionAnswering", + "NemotronForCausalLM", + "NemotronModel", + "NemotronPreTrainedModel", + "NemotronForSequenceClassification", + "NemotronForTokenClassification", +] diff --git a/src/transformers/models/nllb/__init__.py b/src/transformers/models/nllb/__init__.py index 49e0e5c675ace2..5cdb326098a300 100644 --- a/src/transformers/models/nllb/__init__.py +++ b/src/transformers/models/nllb/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,52 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = {} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_nllb"] = ["NllbTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_nllb_fast"] = ["NllbTokenizerFast"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_nllb import NllbTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_nllb_fast import NllbTokenizerFast - + from .tokenization_nllb import * + from .tokenization_nllb_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/nllb/tokenization_nllb.py b/src/transformers/models/nllb/tokenization_nllb.py index b5ae28b8127379..02e07f0ecadd3f 100644 --- a/src/transformers/models/nllb/tokenization_nllb.py +++ b/src/transformers/models/nllb/tokenization_nllb.py @@ -387,3 +387,6 @@ def set_tgt_lang_special_tokens(self, lang: str) -> None: else: self.prefix_tokens = [self.cur_lang_code] self.suffix_tokens = [self.eos_token_id] + + +__all__ = ["NllbTokenizer"] diff --git a/src/transformers/models/nllb/tokenization_nllb_fast.py b/src/transformers/models/nllb/tokenization_nllb_fast.py index 013dbc97b35d4b..80b00e342462d1 100644 --- a/src/transformers/models/nllb/tokenization_nllb_fast.py +++ b/src/transformers/models/nllb/tokenization_nllb_fast.py @@ -326,3 +326,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["NllbTokenizerFast"] diff --git a/src/transformers/models/nllb_moe/__init__.py b/src/transformers/models/nllb_moe/__init__.py index ccb961ba38e8c0..e21bde18010d3c 100644 --- a/src/transformers/models/nllb_moe/__init__.py +++ b/src/transformers/models/nllb_moe/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,50 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = {"configuration_nllb_moe": ["NllbMoeConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_nllb_moe"] = [ - "NllbMoeForConditionalGeneration", - "NllbMoeModel", - "NllbMoePreTrainedModel", - "NllbMoeTop2Router", - "NllbMoeSparseMLP", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_nllb_moe import ( - NllbMoeConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_nllb_moe import ( - NllbMoeForConditionalGeneration, - NllbMoeModel, - NllbMoePreTrainedModel, - NllbMoeSparseMLP, - NllbMoeTop2Router, - ) - - + from .configuration_nllb_moe import * + from .modeling_nllb_moe import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/nllb_moe/configuration_nllb_moe.py b/src/transformers/models/nllb_moe/configuration_nllb_moe.py index ef12c199ef4ada..c2d7f7f1169923 100644 --- a/src/transformers/models/nllb_moe/configuration_nllb_moe.py +++ b/src/transformers/models/nllb_moe/configuration_nllb_moe.py @@ -214,3 +214,6 @@ def __init__( decoder_start_token_id=decoder_start_token_id, **kwargs, ) + + +__all__ = ["NllbMoeConfig"] diff --git a/src/transformers/models/nllb_moe/modeling_nllb_moe.py b/src/transformers/models/nllb_moe/modeling_nllb_moe.py index 9c095be16506e8..f48c3f51775007 100644 --- a/src/transformers/models/nllb_moe/modeling_nllb_moe.py +++ b/src/transformers/models/nllb_moe/modeling_nllb_moe.py @@ -1770,3 +1770,12 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = [ + "NllbMoeForConditionalGeneration", + "NllbMoeModel", + "NllbMoePreTrainedModel", + "NllbMoeTop2Router", + "NllbMoeSparseMLP", +] diff --git a/src/transformers/models/nougat/__init__.py b/src/transformers/models/nougat/__init__.py index 3cc8bbddf9e9ca..4c87d75e58e8fb 100644 --- a/src/transformers/models/nougat/__init__.py +++ b/src/transformers/models/nougat/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,51 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_vision_available - - -_import_structure = { - "processing_nougat": ["NougatProcessor"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_nougat_fast"] = ["NougatTokenizerFast"] - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_nougat"] = ["NougatImageProcessor"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .processing_nougat import NougatProcessor - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_nougat_fast import NougatTokenizerFast - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_nougat import NougatImageProcessor - - + from .image_processing_nougat import * + from .processing_nougat import * + from .tokenization_nougat_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/nougat/image_processing_nougat.py b/src/transformers/models/nougat/image_processing_nougat.py index 792f4a14325a0a..00a9d069d10ee4 100644 --- a/src/transformers/models/nougat/image_processing_nougat.py +++ b/src/transformers/models/nougat/image_processing_nougat.py @@ -509,3 +509,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["NougatImageProcessor"] diff --git a/src/transformers/models/nougat/processing_nougat.py b/src/transformers/models/nougat/processing_nougat.py index 8f94c6718ba660..58a13454e862f3 100644 --- a/src/transformers/models/nougat/processing_nougat.py +++ b/src/transformers/models/nougat/processing_nougat.py @@ -158,3 +158,6 @@ def post_process_generation(self, *args, **kwargs): Please refer to the docstring of this method for more information. """ return self.tokenizer.post_process_generation(*args, **kwargs) + + +__all__ = ["NougatProcessor"] diff --git a/src/transformers/models/nougat/tokenization_nougat_fast.py b/src/transformers/models/nougat/tokenization_nougat_fast.py index 5d0a8934c05ee1..55802bab804a99 100644 --- a/src/transformers/models/nougat/tokenization_nougat_fast.py +++ b/src/transformers/models/nougat/tokenization_nougat_fast.py @@ -624,3 +624,6 @@ def post_process_generation( return [self.post_process_single(s, fix_markdown=fix_markdown) for s in generation] else: return self.post_process_single(generation, fix_markdown=fix_markdown) + + +__all__ = ["NougatTokenizerFast"] diff --git a/src/transformers/models/nystromformer/__init__.py b/src/transformers/models/nystromformer/__init__.py index 74f8a620204f3f..c32df6d8db9092 100644 --- a/src/transformers/models/nystromformer/__init__.py +++ b/src/transformers/models/nystromformer/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,53 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available - - -_import_structure = { - "configuration_nystromformer": ["NystromformerConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_nystromformer"] = [ - "NystromformerForMaskedLM", - "NystromformerForMultipleChoice", - "NystromformerForQuestionAnswering", - "NystromformerForSequenceClassification", - "NystromformerForTokenClassification", - "NystromformerLayer", - "NystromformerModel", - "NystromformerPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_nystromformer import NystromformerConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_nystromformer import ( - NystromformerForMaskedLM, - NystromformerForMultipleChoice, - NystromformerForQuestionAnswering, - NystromformerForSequenceClassification, - NystromformerForTokenClassification, - NystromformerLayer, - NystromformerModel, - NystromformerPreTrainedModel, - ) - - + from .configuration_nystromformer import * + from .modeling_nystromformer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/nystromformer/configuration_nystromformer.py b/src/transformers/models/nystromformer/configuration_nystromformer.py index e52b02d9f88a08..96a48b99fda4cd 100644 --- a/src/transformers/models/nystromformer/configuration_nystromformer.py +++ b/src/transformers/models/nystromformer/configuration_nystromformer.py @@ -127,3 +127,6 @@ def __init__( self.inv_coeff_init_option = inv_coeff_init_option self.layer_norm_eps = layer_norm_eps super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + + +__all__ = ["NystromformerConfig"] diff --git a/src/transformers/models/nystromformer/modeling_nystromformer.py b/src/transformers/models/nystromformer/modeling_nystromformer.py index 4bb4c33fff629e..afa17d94e5db6b 100755 --- a/src/transformers/models/nystromformer/modeling_nystromformer.py +++ b/src/transformers/models/nystromformer/modeling_nystromformer.py @@ -1110,3 +1110,15 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "NystromformerForMaskedLM", + "NystromformerForMultipleChoice", + "NystromformerForQuestionAnswering", + "NystromformerForSequenceClassification", + "NystromformerForTokenClassification", + "NystromformerLayer", + "NystromformerModel", + "NystromformerPreTrainedModel", +] diff --git a/src/transformers/models/olmo/__init__.py b/src/transformers/models/olmo/__init__.py index b94350cd331047..e804feda3eeed9 100644 --- a/src/transformers/models/olmo/__init__.py +++ b/src/transformers/models/olmo/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2024 EleutherAI and The HuggingFace Inc. team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,47 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tokenizers_available, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_olmo": ["OlmoConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_olmo"] = [ - "OlmoForCausalLM", - "OlmoModel", - "OlmoPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_olmo import OlmoConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_olmo import ( - OlmoForCausalLM, - OlmoModel, - OlmoPreTrainedModel, - ) - + from .configuration_olmo import * + from .modeling_olmo import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/olmo/configuration_olmo.py b/src/transformers/models/olmo/configuration_olmo.py index 77a3b18e364ecf..d3edb90772f23a 100644 --- a/src/transformers/models/olmo/configuration_olmo.py +++ b/src/transformers/models/olmo/configuration_olmo.py @@ -179,3 +179,6 @@ def _rope_scaling_validation(self): ) if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}") + + +__all__ = ["OlmoConfig"] diff --git a/src/transformers/models/olmo/modeling_olmo.py b/src/transformers/models/olmo/modeling_olmo.py index 8b40c41e34dcd3..9e769e9601509c 100644 --- a/src/transformers/models/olmo/modeling_olmo.py +++ b/src/transformers/models/olmo/modeling_olmo.py @@ -1136,3 +1136,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["OlmoForCausalLM", "OlmoModel", "OlmoPreTrainedModel"] diff --git a/src/transformers/models/olmoe/__init__.py b/src/transformers/models/olmoe/__init__.py index 633fc446802670..e0e18d77cbad11 100644 --- a/src/transformers/models/olmoe/__init__.py +++ b/src/transformers/models/olmoe/__init__.py @@ -1,3 +1,5 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,45 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - - -_import_structure = { - "configuration_olmoe": ["OlmoeConfig"], -} +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_olmoe"] = [ - "OlmoeForCausalLM", - "OlmoeModel", - "OlmoePreTrainedModel", - ] if TYPE_CHECKING: - from .configuration_olmoe import OlmoeConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_olmoe import ( - OlmoeForCausalLM, - OlmoeModel, - OlmoePreTrainedModel, - ) - + from .configuration_olmoe import * + from .modeling_olmoe import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/olmoe/configuration_olmoe.py b/src/transformers/models/olmoe/configuration_olmoe.py index dbee3bfa0bd4d7..0f24d5523bafd0 100644 --- a/src/transformers/models/olmoe/configuration_olmoe.py +++ b/src/transformers/models/olmoe/configuration_olmoe.py @@ -177,3 +177,6 @@ def __init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) + + +__all__ = ["OlmoeConfig"] diff --git a/src/transformers/models/olmoe/modeling_olmoe.py b/src/transformers/models/olmoe/modeling_olmoe.py index 4398e2f5c9a1fd..006803b4df999b 100644 --- a/src/transformers/models/olmoe/modeling_olmoe.py +++ b/src/transformers/models/olmoe/modeling_olmoe.py @@ -1319,3 +1319,6 @@ def forward( attentions=outputs.attentions, router_logits=outputs.router_logits, ) + + +__all__ = ["OlmoeForCausalLM", "OlmoeModel", "OlmoePreTrainedModel"] diff --git a/src/transformers/models/omdet_turbo/__init__.py b/src/transformers/models/omdet_turbo/__init__.py index 34eb6386298fb8..a8b49479b2599b 100644 --- a/src/transformers/models/omdet_turbo/__init__.py +++ b/src/transformers/models/omdet_turbo/__init__.py @@ -11,46 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_omdet_turbo": ["OmDetTurboConfig"], - "processing_omdet_turbo": ["OmDetTurboProcessor"], -} +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_omdet_turbo"] = [ - "OmDetTurboForObjectDetection", - "OmDetTurboPreTrainedModel", - ] if TYPE_CHECKING: - from .configuration_omdet_turbo import ( - OmDetTurboConfig, - ) - from .processing_omdet_turbo import OmDetTurboProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_omdet_turbo import ( - OmDetTurboForObjectDetection, - OmDetTurboPreTrainedModel, - ) - + from .configuration_omdet_turbo import * + from .modeling_omdet_turbo import * + from .processing_omdet_turbo import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/omdet_turbo/configuration_omdet_turbo.py b/src/transformers/models/omdet_turbo/configuration_omdet_turbo.py index 8b4a620a17acc4..f26f01dc243a54 100644 --- a/src/transformers/models/omdet_turbo/configuration_omdet_turbo.py +++ b/src/transformers/models/omdet_turbo/configuration_omdet_turbo.py @@ -288,3 +288,6 @@ def __init__( self.is_encoder_decoder = is_encoder_decoder super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) + + +__all__ = ["OmDetTurboConfig"] diff --git a/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py b/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py index 0f44e4bd40208c..2680bc714d9980 100644 --- a/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py +++ b/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py @@ -1788,3 +1788,6 @@ def forward( encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) + + +__all__ = ["OmDetTurboForObjectDetection", "OmDetTurboPreTrainedModel"] diff --git a/src/transformers/models/omdet_turbo/processing_omdet_turbo.py b/src/transformers/models/omdet_turbo/processing_omdet_turbo.py index e4e2e5197f2e2c..7e8d0af8a10d16 100644 --- a/src/transformers/models/omdet_turbo/processing_omdet_turbo.py +++ b/src/transformers/models/omdet_turbo/processing_omdet_turbo.py @@ -353,3 +353,6 @@ def post_process_grounded_object_detection( ) return results + + +__all__ = ["OmDetTurboProcessor"] diff --git a/src/transformers/models/oneformer/__init__.py b/src/transformers/models/oneformer/__init__.py index 11ddde65d05991..195b56e9fbd042 100644 --- a/src/transformers/models/oneformer/__init__.py +++ b/src/transformers/models/oneformer/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,59 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_oneformer": ["OneFormerConfig"], - "processing_oneformer": ["OneFormerProcessor"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_oneformer"] = ["OneFormerImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_oneformer"] = [ - "OneFormerForUniversalSegmentation", - "OneFormerModel", - "OneFormerPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_oneformer import OneFormerConfig - from .processing_oneformer import OneFormerProcessor - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_oneformer import OneFormerImageProcessor - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_oneformer import ( - OneFormerForUniversalSegmentation, - OneFormerModel, - OneFormerPreTrainedModel, - ) - - + from .configuration_oneformer import * + from .image_processing_oneformer import * + from .modeling_oneformer import * + from .processing_oneformer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/oneformer/configuration_oneformer.py b/src/transformers/models/oneformer/configuration_oneformer.py index 86f56a1f571b94..b289032afa24f1 100644 --- a/src/transformers/models/oneformer/configuration_oneformer.py +++ b/src/transformers/models/oneformer/configuration_oneformer.py @@ -272,3 +272,6 @@ def __init__( self.num_hidden_layers = decoder_layers super().__init__(**kwargs) + + +__all__ = ["OneFormerConfig"] diff --git a/src/transformers/models/oneformer/image_processing_oneformer.py b/src/transformers/models/oneformer/image_processing_oneformer.py index 1fefddc07b8014..0988e1fecaa5be 100644 --- a/src/transformers/models/oneformer/image_processing_oneformer.py +++ b/src/transformers/models/oneformer/image_processing_oneformer.py @@ -1351,3 +1351,6 @@ def post_process_panoptic_segmentation( results.append({"segmentation": segmentation, "segments_info": segments}) return results + + +__all__ = ["OneFormerImageProcessor"] diff --git a/src/transformers/models/oneformer/modeling_oneformer.py b/src/transformers/models/oneformer/modeling_oneformer.py index e237467c242bbc..585103f21b1412 100644 --- a/src/transformers/models/oneformer/modeling_oneformer.py +++ b/src/transformers/models/oneformer/modeling_oneformer.py @@ -3258,3 +3258,6 @@ def forward( if loss is not None: output = (loss) + output return output + + +__all__ = ["OneFormerForUniversalSegmentation", "OneFormerModel", "OneFormerPreTrainedModel"] diff --git a/src/transformers/models/oneformer/processing_oneformer.py b/src/transformers/models/oneformer/processing_oneformer.py index 9e55be5d6731c5..78fef3283cd8a4 100644 --- a/src/transformers/models/oneformer/processing_oneformer.py +++ b/src/transformers/models/oneformer/processing_oneformer.py @@ -202,3 +202,6 @@ def post_process_panoptic_segmentation(self, *args, **kwargs): Please refer to the docstring of this method for more information. """ return self.image_processor.post_process_panoptic_segmentation(*args, **kwargs) + + +__all__ = ["OneFormerProcessor"] diff --git a/src/transformers/models/openai/__init__.py b/src/transformers/models/openai/__init__.py index af4ebbfee6630b..a07b0ab669f3a8 100644 --- a/src/transformers/models/openai/__init__.py +++ b/src/transformers/models/openai/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,105 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_openai": ["OpenAIGPTConfig"], - "tokenization_openai": ["OpenAIGPTTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_openai_fast"] = ["OpenAIGPTTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_openai"] = [ - "OpenAIGPTDoubleHeadsModel", - "OpenAIGPTForSequenceClassification", - "OpenAIGPTLMHeadModel", - "OpenAIGPTModel", - "OpenAIGPTPreTrainedModel", - "load_tf_weights_in_openai_gpt", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_openai"] = [ - "TFOpenAIGPTDoubleHeadsModel", - "TFOpenAIGPTForSequenceClassification", - "TFOpenAIGPTLMHeadModel", - "TFOpenAIGPTMainLayer", - "TFOpenAIGPTModel", - "TFOpenAIGPTPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_openai import OpenAIGPTConfig - from .tokenization_openai import OpenAIGPTTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_openai_fast import OpenAIGPTTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_openai import ( - OpenAIGPTDoubleHeadsModel, - OpenAIGPTForSequenceClassification, - OpenAIGPTLMHeadModel, - OpenAIGPTModel, - OpenAIGPTPreTrainedModel, - load_tf_weights_in_openai_gpt, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_openai import ( - TFOpenAIGPTDoubleHeadsModel, - TFOpenAIGPTForSequenceClassification, - TFOpenAIGPTLMHeadModel, - TFOpenAIGPTMainLayer, - TFOpenAIGPTModel, - TFOpenAIGPTPreTrainedModel, - ) - + from .configuration_openai import * + from .modeling_openai import * + from .modeling_tf_openai import * + from .tokenization_openai import * + from .tokenization_openai_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/openai/configuration_openai.py b/src/transformers/models/openai/configuration_openai.py index dde668b32f7dab..b4f2fae9d304bc 100644 --- a/src/transformers/models/openai/configuration_openai.py +++ b/src/transformers/models/openai/configuration_openai.py @@ -151,3 +151,6 @@ def __init__( self.summary_first_dropout = summary_first_dropout self.summary_proj_to_labels = summary_proj_to_labels super().__init__(**kwargs) + + +__all__ = ["OpenAIGPTConfig"] diff --git a/src/transformers/models/openai/modeling_openai.py b/src/transformers/models/openai/modeling_openai.py index 02df7f213e2e7d..156f778e1ce625 100644 --- a/src/transformers/models/openai/modeling_openai.py +++ b/src/transformers/models/openai/modeling_openai.py @@ -855,3 +855,13 @@ def forward( hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) + + +__all__ = [ + "OpenAIGPTDoubleHeadsModel", + "OpenAIGPTForSequenceClassification", + "OpenAIGPTLMHeadModel", + "OpenAIGPTModel", + "OpenAIGPTPreTrainedModel", + "load_tf_weights_in_openai_gpt", +] diff --git a/src/transformers/models/openai/modeling_tf_openai.py b/src/transformers/models/openai/modeling_tf_openai.py index 0f911c1245f757..a11adccbe00806 100644 --- a/src/transformers/models/openai/modeling_tf_openai.py +++ b/src/transformers/models/openai/modeling_tf_openai.py @@ -935,3 +935,13 @@ def build(self, input_shape=None): if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) + + +__all__ = [ + "TFOpenAIGPTDoubleHeadsModel", + "TFOpenAIGPTForSequenceClassification", + "TFOpenAIGPTLMHeadModel", + "TFOpenAIGPTMainLayer", + "TFOpenAIGPTModel", + "TFOpenAIGPTPreTrainedModel", +] diff --git a/src/transformers/models/openai/tokenization_openai.py b/src/transformers/models/openai/tokenization_openai.py index 091dc5697314ea..cbfb41fc888fd6 100644 --- a/src/transformers/models/openai/tokenization_openai.py +++ b/src/transformers/models/openai/tokenization_openai.py @@ -391,3 +391,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = index += 1 return vocab_file, merge_file + + +__all__ = ["OpenAIGPTTokenizer"] diff --git a/src/transformers/models/openai/tokenization_openai_fast.py b/src/transformers/models/openai/tokenization_openai_fast.py index 41f4c8db9061aa..c17d7d29b7dd01 100644 --- a/src/transformers/models/openai/tokenization_openai_fast.py +++ b/src/transformers/models/openai/tokenization_openai_fast.py @@ -61,3 +61,6 @@ def do_lower_case(self): def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["OpenAIGPTTokenizerFast"] diff --git a/src/transformers/models/opt/__init__.py b/src/transformers/models/opt/__init__.py index 5ae39344b2ffce..d230de5ecadc75 100644 --- a/src/transformers/models/opt/__init__.py +++ b/src/transformers/models/opt/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,87 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = {"configuration_opt": ["OPTConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_opt"] = [ - "OPTForCausalLM", - "OPTModel", - "OPTPreTrainedModel", - "OPTForSequenceClassification", - "OPTForQuestionAnswering", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_opt"] = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_opt"] = [ - "FlaxOPTForCausalLM", - "FlaxOPTModel", - "FlaxOPTPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_opt import OPTConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_opt import ( - OPTForCausalLM, - OPTForQuestionAnswering, - OPTForSequenceClassification, - OPTModel, - OPTPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel - + from .configuration_opt import * + from .modeling_flax_opt import * + from .modeling_opt import * + from .modeling_tf_opt import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/opt/configuration_opt.py b/src/transformers/models/opt/configuration_opt.py index 455a6362a725d6..8f8838ad9ef18d 100644 --- a/src/transformers/models/opt/configuration_opt.py +++ b/src/transformers/models/opt/configuration_opt.py @@ -141,3 +141,6 @@ def __init__( # with checkpoints that have been fine-tuned before transformers v4.20.1 # see https://github.com/facebookresearch/metaseq/pull/164 self._remove_final_layer_norm = _remove_final_layer_norm + + +__all__ = ["OPTConfig"] diff --git a/src/transformers/models/opt/modeling_flax_opt.py b/src/transformers/models/opt/modeling_flax_opt.py index c6296e4eeae001..2cbffbaffe1898 100644 --- a/src/transformers/models/opt/modeling_flax_opt.py +++ b/src/transformers/models/opt/modeling_flax_opt.py @@ -797,3 +797,6 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): FlaxBaseModelOutput, _CONFIG_FOR_DOC, ) + + +__all__ = ["FlaxOPTForCausalLM", "FlaxOPTModel", "FlaxOPTPreTrainedModel"] diff --git a/src/transformers/models/opt/modeling_opt.py b/src/transformers/models/opt/modeling_opt.py index e4ef510f099d66..2326f6050abd71 100644 --- a/src/transformers/models/opt/modeling_opt.py +++ b/src/transformers/models/opt/modeling_opt.py @@ -1485,3 +1485,12 @@ def get_input_embeddings(self): def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value + + +__all__ = [ + "OPTForCausalLM", + "OPTModel", + "OPTPreTrainedModel", + "OPTForSequenceClassification", + "OPTForQuestionAnswering", +] diff --git a/src/transformers/models/opt/modeling_tf_opt.py b/src/transformers/models/opt/modeling_tf_opt.py index 9c5dfa4ade6107..ec5ef7616a3c91 100644 --- a/src/transformers/models/opt/modeling_tf_opt.py +++ b/src/transformers/models/opt/modeling_tf_opt.py @@ -1092,3 +1092,6 @@ def build(self, input_shape=None): if getattr(self, "model", None) is not None: with tf.name_scope(self.model.name): self.model.build(None) + + +__all__ = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"] diff --git a/src/transformers/models/owlv2/__init__.py b/src/transformers/models/owlv2/__init__.py index 83d432766d6992..6ce329420882f7 100644 --- a/src/transformers/models/owlv2/__init__.py +++ b/src/transformers/models/owlv2/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,77 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, - is_vision_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_owlv2": [ - "Owlv2Config", - "Owlv2TextConfig", - "Owlv2VisionConfig", - ], - "processing_owlv2": ["Owlv2Processor"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_owlv2"] = ["Owlv2ImageProcessor"] - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_owlv2"] = [ - "Owlv2Model", - "Owlv2PreTrainedModel", - "Owlv2TextModel", - "Owlv2VisionModel", - "Owlv2ForObjectDetection", - ] - if TYPE_CHECKING: - from .configuration_owlv2 import ( - Owlv2Config, - Owlv2TextConfig, - Owlv2VisionConfig, - ) - from .processing_owlv2 import Owlv2Processor - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_owlv2 import Owlv2ImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_owlv2 import ( - Owlv2ForObjectDetection, - Owlv2Model, - Owlv2PreTrainedModel, - Owlv2TextModel, - Owlv2VisionModel, - ) - + from .configuration_owlv2 import * + from .image_processing_owlv2 import * + from .modeling_owlv2 import * + from .processing_owlv2 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/owlv2/configuration_owlv2.py b/src/transformers/models/owlv2/configuration_owlv2.py index f9085eaf9c1546..bf70afc7ccf527 100644 --- a/src/transformers/models/owlv2/configuration_owlv2.py +++ b/src/transformers/models/owlv2/configuration_owlv2.py @@ -284,3 +284,6 @@ def from_text_vision_configs(cls, text_config: Dict, vision_config: Dict, **kwar config_dict["vision_config"] = vision_config return cls.from_dict(config_dict, **kwargs) + + +__all__ = ["Owlv2Config", "Owlv2TextConfig", "Owlv2VisionConfig"] diff --git a/src/transformers/models/owlv2/image_processing_owlv2.py b/src/transformers/models/owlv2/image_processing_owlv2.py index 3dcf145ea41ffc..f21c73ce9c22c9 100644 --- a/src/transformers/models/owlv2/image_processing_owlv2.py +++ b/src/transformers/models/owlv2/image_processing_owlv2.py @@ -608,3 +608,6 @@ def post_process_image_guided_detection(self, outputs, threshold=0.0, nms_thresh results.append({"scores": box_scores, "labels": None, "boxes": boxes}) return results + + +__all__ = ["Owlv2ImageProcessor"] diff --git a/src/transformers/models/owlv2/modeling_owlv2.py b/src/transformers/models/owlv2/modeling_owlv2.py index d773396010a3cb..a21a79dcbfd3ac 100644 --- a/src/transformers/models/owlv2/modeling_owlv2.py +++ b/src/transformers/models/owlv2/modeling_owlv2.py @@ -1733,3 +1733,6 @@ def forward( text_model_output=text_outputs, vision_model_output=vision_outputs, ) + + +__all__ = ["Owlv2Model", "Owlv2PreTrainedModel", "Owlv2TextModel", "Owlv2VisionModel", "Owlv2ForObjectDetection"] diff --git a/src/transformers/models/owlv2/processing_owlv2.py b/src/transformers/models/owlv2/processing_owlv2.py index 8b580ca5026618..4a0b5a712e9d8f 100644 --- a/src/transformers/models/owlv2/processing_owlv2.py +++ b/src/transformers/models/owlv2/processing_owlv2.py @@ -188,3 +188,6 @@ def decode(self, *args, **kwargs): the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) + + +__all__ = ["Owlv2Processor"] diff --git a/src/transformers/models/owlvit/__init__.py b/src/transformers/models/owlvit/__init__.py index a6da47da9a0fb7..3f36ea5e4bd801 100644 --- a/src/transformers/models/owlvit/__init__.py +++ b/src/transformers/models/owlvit/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,84 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, - is_vision_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_owlvit": [ - "OwlViTConfig", - "OwlViTOnnxConfig", - "OwlViTTextConfig", - "OwlViTVisionConfig", - ], - "processing_owlvit": ["OwlViTProcessor"], -} - - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_owlvit"] = ["OwlViTFeatureExtractor"] - _import_structure["image_processing_owlvit"] = ["OwlViTImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_owlvit"] = [ - "OwlViTModel", - "OwlViTPreTrainedModel", - "OwlViTTextModel", - "OwlViTVisionModel", - "OwlViTForObjectDetection", - ] - if TYPE_CHECKING: - from .configuration_owlvit import ( - OwlViTConfig, - OwlViTOnnxConfig, - OwlViTTextConfig, - OwlViTVisionConfig, - ) - from .processing_owlvit import OwlViTProcessor - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_owlvit import OwlViTFeatureExtractor - from .image_processing_owlvit import OwlViTImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_owlvit import ( - OwlViTForObjectDetection, - OwlViTModel, - OwlViTPreTrainedModel, - OwlViTTextModel, - OwlViTVisionModel, - ) - + from .configuration_owlvit import * + from .feature_extraction_owlvit import * + from .image_processing_owlvit import * + from .modeling_owlvit import * + from .processing_owlvit import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/owlvit/configuration_owlvit.py b/src/transformers/models/owlvit/configuration_owlvit.py index 8be707ce99a1c6..d80ac73d57d443 100644 --- a/src/transformers/models/owlvit/configuration_owlvit.py +++ b/src/transformers/models/owlvit/configuration_owlvit.py @@ -330,3 +330,6 @@ def generate_dummy_inputs( @property def default_onnx_opset(self) -> int: return 14 + + +__all__ = ["OwlViTConfig", "OwlViTOnnxConfig", "OwlViTTextConfig", "OwlViTVisionConfig"] diff --git a/src/transformers/models/owlvit/feature_extraction_owlvit.py b/src/transformers/models/owlvit/feature_extraction_owlvit.py index f85fd7f31ea422..2cd3b5a3ec4ba0 100644 --- a/src/transformers/models/owlvit/feature_extraction_owlvit.py +++ b/src/transformers/models/owlvit/feature_extraction_owlvit.py @@ -31,3 +31,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["OwlViTFeatureExtractor"] diff --git a/src/transformers/models/owlvit/image_processing_owlvit.py b/src/transformers/models/owlvit/image_processing_owlvit.py index 63c2d608955955..1d6fc69189fb36 100644 --- a/src/transformers/models/owlvit/image_processing_owlvit.py +++ b/src/transformers/models/owlvit/image_processing_owlvit.py @@ -596,3 +596,6 @@ def post_process_image_guided_detection(self, outputs, threshold=0.0, nms_thresh results.append({"scores": box_scores, "labels": None, "boxes": boxes}) return results + + +__all__ = ["OwlViTImageProcessor"] diff --git a/src/transformers/models/owlvit/modeling_owlvit.py b/src/transformers/models/owlvit/modeling_owlvit.py index 7c3e124a207ff7..6ad9519088f776 100644 --- a/src/transformers/models/owlvit/modeling_owlvit.py +++ b/src/transformers/models/owlvit/modeling_owlvit.py @@ -1670,3 +1670,6 @@ def forward( text_model_output=text_outputs, vision_model_output=vision_outputs, ) + + +__all__ = ["OwlViTModel", "OwlViTPreTrainedModel", "OwlViTTextModel", "OwlViTVisionModel", "OwlViTForObjectDetection"] diff --git a/src/transformers/models/owlvit/processing_owlvit.py b/src/transformers/models/owlvit/processing_owlvit.py index 2c7d490104bdfc..49e913a384ebd2 100644 --- a/src/transformers/models/owlvit/processing_owlvit.py +++ b/src/transformers/models/owlvit/processing_owlvit.py @@ -222,3 +222,6 @@ def feature_extractor(self): FutureWarning, ) return self.image_processor + + +__all__ = ["OwlViTProcessor"] diff --git a/src/transformers/models/paligemma/__init__.py b/src/transformers/models/paligemma/__init__.py index 11ba4f3edd09e8..9048afe6adbdc0 100644 --- a/src/transformers/models/paligemma/__init__.py +++ b/src/transformers/models/paligemma/__init__.py @@ -13,42 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = {"configuration_paligemma": ["PaliGemmaConfig"]} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_paligemma"] = [ - "PaliGemmaForConditionalGeneration", - "PaliGemmaPreTrainedModel", - ] - _import_structure["processing_paligemma"] = ["PaliGemmaProcessor"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_paligemma import PaliGemmaConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_paligemma import ( - PaliGemmaForConditionalGeneration, - PaliGemmaPreTrainedModel, - ) - from .processing_paligemma import PaliGemmaProcessor - - + from .configuration_paligemma import * + from .modeling_paligemma import * + from .processing_paligemma import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/paligemma/configuration_paligemma.py b/src/transformers/models/paligemma/configuration_paligemma.py index de60c501292b30..fe05f38514a739 100644 --- a/src/transformers/models/paligemma/configuration_paligemma.py +++ b/src/transformers/models/paligemma/configuration_paligemma.py @@ -145,3 +145,6 @@ def to_dict(self): output = super().to_dict() output.pop("_ignore_index", None) return output + + +__all__ = ["PaliGemmaConfig"] diff --git a/src/transformers/models/paligemma/modeling_paligemma.py b/src/transformers/models/paligemma/modeling_paligemma.py index b4a231561ba791..028772348c3ac8 100644 --- a/src/transformers/models/paligemma/modeling_paligemma.py +++ b/src/transformers/models/paligemma/modeling_paligemma.py @@ -618,3 +618,6 @@ def prepare_inputs_for_generation( ) model_inputs["attention_mask"] = causal_mask return model_inputs + + +__all__ = ["PaliGemmaForConditionalGeneration", "PaliGemmaPreTrainedModel"] diff --git a/src/transformers/models/paligemma/processing_paligemma.py b/src/transformers/models/paligemma/processing_paligemma.py index cb35aab66cba49..6856e045f71a40 100644 --- a/src/transformers/models/paligemma/processing_paligemma.py +++ b/src/transformers/models/paligemma/processing_paligemma.py @@ -356,3 +356,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +__all__ = ["PaliGemmaProcessor"] diff --git a/src/transformers/models/patchtsmixer/__init__.py b/src/transformers/models/patchtsmixer/__init__.py index b227ca1655c440..285c1970308a47 100644 --- a/src/transformers/models/patchtsmixer/__init__.py +++ b/src/transformers/models/patchtsmixer/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,51 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -# rely on isort to merge the imports -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_patchtsmixer": ["PatchTSMixerConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_patchtsmixer"] = [ - "PatchTSMixerPreTrainedModel", - "PatchTSMixerModel", - "PatchTSMixerForPretraining", - "PatchTSMixerForPrediction", - "PatchTSMixerForTimeSeriesClassification", - "PatchTSMixerForRegression", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_patchtsmixer import ( - PatchTSMixerConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_patchtsmixer import ( - PatchTSMixerForPrediction, - PatchTSMixerForPretraining, - PatchTSMixerForRegression, - PatchTSMixerForTimeSeriesClassification, - PatchTSMixerModel, - PatchTSMixerPreTrainedModel, - ) - + from .configuration_patchtsmixer import * + from .modeling_patchtsmixer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/patchtsmixer/configuration_patchtsmixer.py b/src/transformers/models/patchtsmixer/configuration_patchtsmixer.py index 10089a3fef6ed4..83f374651a7cfc 100644 --- a/src/transformers/models/patchtsmixer/configuration_patchtsmixer.py +++ b/src/transformers/models/patchtsmixer/configuration_patchtsmixer.py @@ -230,3 +230,6 @@ def __init__( self.unmasked_channel_indices = unmasked_channel_indices self.norm_eps = norm_eps super().__init__(**kwargs) + + +__all__ = ["PatchTSMixerConfig"] diff --git a/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py b/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py index 209975b65e8f5d..37b76416061633 100644 --- a/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py +++ b/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py @@ -2170,3 +2170,13 @@ def generate( # [batch_size x num_samples x num_targets] samples = torch.stack(samples, dim=1).view(-1, num_parallel_samples, self.config.num_targets) return SamplePatchTSMixerRegressionOutput(sequences=samples) + + +__all__ = [ + "PatchTSMixerPreTrainedModel", + "PatchTSMixerModel", + "PatchTSMixerForPretraining", + "PatchTSMixerForPrediction", + "PatchTSMixerForTimeSeriesClassification", + "PatchTSMixerForRegression", +] diff --git a/src/transformers/models/patchtst/__init__.py b/src/transformers/models/patchtst/__init__.py index 5ba6316505afdf..f1a045bfb18204 100644 --- a/src/transformers/models/patchtst/__init__.py +++ b/src/transformers/models/patchtst/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,49 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -# rely on isort to merge the imports -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_patchtst": ["PatchTSTConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_patchtst"] = [ - "PatchTSTModel", - "PatchTSTPreTrainedModel", - "PatchTSTForPrediction", - "PatchTSTForPretraining", - "PatchTSTForRegression", - "PatchTSTForClassification", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_patchtst import PatchTSTConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_patchtst import ( - PatchTSTForClassification, - PatchTSTForPrediction, - PatchTSTForPretraining, - PatchTSTForRegression, - PatchTSTModel, - PatchTSTPreTrainedModel, - ) - + from .configuration_patchtst import * + from .modeling_patchtst import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/patchtst/configuration_patchtst.py b/src/transformers/models/patchtst/configuration_patchtst.py index 29d14491752c99..be19f2383acd52 100644 --- a/src/transformers/models/patchtst/configuration_patchtst.py +++ b/src/transformers/models/patchtst/configuration_patchtst.py @@ -251,3 +251,6 @@ def __init__( self.output_range = output_range super().__init__(**kwargs) + + +__all__ = ["PatchTSTConfig"] diff --git a/src/transformers/models/patchtst/modeling_patchtst.py b/src/transformers/models/patchtst/modeling_patchtst.py index 3c761bcae77ab4..645bbfbbd1605d 100755 --- a/src/transformers/models/patchtst/modeling_patchtst.py +++ b/src/transformers/models/patchtst/modeling_patchtst.py @@ -2030,3 +2030,13 @@ def generate( # samples: [bs x num_samples x num_targets] samples = torch.stack(samples, dim=1).view(-1, num_parallel_samples, self.config.num_targets) return SamplePatchTSTOutput(sequences=samples) + + +__all__ = [ + "PatchTSTModel", + "PatchTSTPreTrainedModel", + "PatchTSTForPrediction", + "PatchTSTForPretraining", + "PatchTSTForRegression", + "PatchTSTForClassification", +] diff --git a/src/transformers/models/pegasus/__init__.py b/src/transformers/models/pegasus/__init__.py index 15ac3b56cff038..4903c400f98272 100644 --- a/src/transformers/models/pegasus/__init__.py +++ b/src/transformers/models/pegasus/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,126 +13,19 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_sentencepiece_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = {"configuration_pegasus": ["PegasusConfig"]} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_pegasus"] = ["PegasusTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_pegasus_fast"] = ["PegasusTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_pegasus"] = [ - "PegasusForCausalLM", - "PegasusForConditionalGeneration", - "PegasusModel", - "PegasusPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_pegasus"] = [ - "TFPegasusForConditionalGeneration", - "TFPegasusModel", - "TFPegasusPreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_pegasus"] = [ - "FlaxPegasusForConditionalGeneration", - "FlaxPegasusModel", - "FlaxPegasusPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_pegasus import PegasusConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_pegasus import PegasusTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_pegasus_fast import PegasusTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_pegasus import ( - PegasusForCausalLM, - PegasusForConditionalGeneration, - PegasusModel, - PegasusPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_pegasus import TFPegasusForConditionalGeneration, TFPegasusModel, TFPegasusPreTrainedModel - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_pegasus import ( - FlaxPegasusForConditionalGeneration, - FlaxPegasusModel, - FlaxPegasusPreTrainedModel, - ) - + from .configuration_pegasus import * + from .modeling_flax_pegasus import * + from .modeling_pegasus import * + from .modeling_tf_pegasus import * + from .tokenization_pegasus import * + from .tokenization_pegasus_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/pegasus/configuration_pegasus.py b/src/transformers/models/pegasus/configuration_pegasus.py index 2cc49857f3c975..23bff5d7719f45 100644 --- a/src/transformers/models/pegasus/configuration_pegasus.py +++ b/src/transformers/models/pegasus/configuration_pegasus.py @@ -159,3 +159,6 @@ def num_attention_heads(self) -> int: @property def hidden_size(self) -> int: return self.d_model + + +__all__ = ["PegasusConfig"] diff --git a/src/transformers/models/pegasus/modeling_flax_pegasus.py b/src/transformers/models/pegasus/modeling_flax_pegasus.py index e50fc1710c6aa0..269a3268fed127 100644 --- a/src/transformers/models/pegasus/modeling_flax_pegasus.py +++ b/src/transformers/models/pegasus/modeling_flax_pegasus.py @@ -1527,3 +1527,6 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): append_replace_return_docstrings( FlaxPegasusForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC ) + + +__all__ = ["FlaxPegasusForConditionalGeneration", "FlaxPegasusModel", "FlaxPegasusPreTrainedModel"] diff --git a/src/transformers/models/pegasus/modeling_pegasus.py b/src/transformers/models/pegasus/modeling_pegasus.py index a737ef14d647cf..3b1cd70404c7e6 100755 --- a/src/transformers/models/pegasus/modeling_pegasus.py +++ b/src/transformers/models/pegasus/modeling_pegasus.py @@ -1629,3 +1629,6 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = ["PegasusForCausalLM", "PegasusForConditionalGeneration", "PegasusModel", "PegasusPreTrainedModel"] diff --git a/src/transformers/models/pegasus/modeling_tf_pegasus.py b/src/transformers/models/pegasus/modeling_tf_pegasus.py index 45e9fdbbed75f8..a51835dcfa447f 100644 --- a/src/transformers/models/pegasus/modeling_tf_pegasus.py +++ b/src/transformers/models/pegasus/modeling_tf_pegasus.py @@ -1569,3 +1569,6 @@ def build(self, input_shape=None): if getattr(self, "bias_layer", None) is not None: with tf.name_scope(self.bias_layer.name): self.bias_layer.build(None) + + +__all__ = ["TFPegasusForConditionalGeneration", "TFPegasusModel", "TFPegasusPreTrainedModel"] diff --git a/src/transformers/models/pegasus/tokenization_pegasus.py b/src/transformers/models/pegasus/tokenization_pegasus.py index 2763b739a9644a..5bab5073322a29 100644 --- a/src/transformers/models/pegasus/tokenization_pegasus.py +++ b/src/transformers/models/pegasus/tokenization_pegasus.py @@ -283,3 +283,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = fi.write(content_spiece_model) return (out_vocab_file,) + + +__all__ = ["PegasusTokenizer"] diff --git a/src/transformers/models/pegasus/tokenization_pegasus_fast.py b/src/transformers/models/pegasus/tokenization_pegasus_fast.py index 11ccb1ff4a15fb..af62976cb75145 100644 --- a/src/transformers/models/pegasus/tokenization_pegasus_fast.py +++ b/src/transformers/models/pegasus/tokenization_pegasus_fast.py @@ -214,3 +214,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["PegasusTokenizerFast"] diff --git a/src/transformers/models/pegasus_x/__init__.py b/src/transformers/models/pegasus_x/__init__.py index ce26210d3bc6b9..3d362bacf491ea 100644 --- a/src/transformers/models/pegasus_x/__init__.py +++ b/src/transformers/models/pegasus_x/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,43 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_pegasus_x": ["PegasusXConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_pegasus_x"] = [ - "PegasusXForConditionalGeneration", - "PegasusXModel", - "PegasusXPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_pegasus_x import PegasusXConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_pegasus_x import ( - PegasusXForConditionalGeneration, - PegasusXModel, - PegasusXPreTrainedModel, - ) - - + from .configuration_pegasus_x import * + from .modeling_pegasus_x import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/pegasus_x/configuration_pegasus_x.py b/src/transformers/models/pegasus_x/configuration_pegasus_x.py index b84c79656ef06b..c92f5662b5992f 100644 --- a/src/transformers/models/pegasus_x/configuration_pegasus_x.py +++ b/src/transformers/models/pegasus_x/configuration_pegasus_x.py @@ -172,3 +172,6 @@ def num_attention_heads(self) -> int: @property def hidden_size(self) -> int: return self.d_model + + +__all__ = ["PegasusXConfig"] diff --git a/src/transformers/models/pegasus_x/modeling_pegasus_x.py b/src/transformers/models/pegasus_x/modeling_pegasus_x.py index f90a8d2deb2651..646ab195947b90 100755 --- a/src/transformers/models/pegasus_x/modeling_pegasus_x.py +++ b/src/transformers/models/pegasus_x/modeling_pegasus_x.py @@ -1616,3 +1616,6 @@ def __init__(self, config): def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) + + +__all__ = ["PegasusXForConditionalGeneration", "PegasusXModel", "PegasusXPreTrainedModel"] diff --git a/src/transformers/models/perceiver/__init__.py b/src/transformers/models/perceiver/__init__.py index 5cc52d61977203..0268bda4a183e2 100644 --- a/src/transformers/models/perceiver/__init__.py +++ b/src/transformers/models/perceiver/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,82 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tokenizers_available, - is_torch_available, - is_vision_available, -) - - -_import_structure = { - "configuration_perceiver": ["PerceiverConfig", "PerceiverOnnxConfig"], - "tokenization_perceiver": ["PerceiverTokenizer"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_perceiver"] = ["PerceiverFeatureExtractor"] - _import_structure["image_processing_perceiver"] = ["PerceiverImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_perceiver"] = [ - "PerceiverForImageClassificationConvProcessing", - "PerceiverForImageClassificationFourier", - "PerceiverForImageClassificationLearned", - "PerceiverForMaskedLM", - "PerceiverForMultimodalAutoencoding", - "PerceiverForOpticalFlow", - "PerceiverForSequenceClassification", - "PerceiverLayer", - "PerceiverModel", - "PerceiverPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_perceiver import PerceiverConfig, PerceiverOnnxConfig - from .tokenization_perceiver import PerceiverTokenizer - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_perceiver import PerceiverFeatureExtractor - from .image_processing_perceiver import PerceiverImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_perceiver import ( - PerceiverForImageClassificationConvProcessing, - PerceiverForImageClassificationFourier, - PerceiverForImageClassificationLearned, - PerceiverForMaskedLM, - PerceiverForMultimodalAutoencoding, - PerceiverForOpticalFlow, - PerceiverForSequenceClassification, - PerceiverLayer, - PerceiverModel, - PerceiverPreTrainedModel, - ) - + from .configuration_perceiver import * + from .feature_extraction_perceiver import * + from .image_processing_perceiver import * + from .modeling_perceiver import * + from .tokenization_perceiver import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/perceiver/configuration_perceiver.py b/src/transformers/models/perceiver/configuration_perceiver.py index e2c9cca4c30f08..fc9da1d650421c 100644 --- a/src/transformers/models/perceiver/configuration_perceiver.py +++ b/src/transformers/models/perceiver/configuration_perceiver.py @@ -239,3 +239,6 @@ def generate_dummy_inputs( raise ValueError( "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." ) + + +__all__ = ["PerceiverConfig", "PerceiverOnnxConfig"] diff --git a/src/transformers/models/perceiver/feature_extraction_perceiver.py b/src/transformers/models/perceiver/feature_extraction_perceiver.py index 35f2a6c5c9e72d..b4aa5ce4a1ac94 100644 --- a/src/transformers/models/perceiver/feature_extraction_perceiver.py +++ b/src/transformers/models/perceiver/feature_extraction_perceiver.py @@ -31,3 +31,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["PerceiverFeatureExtractor"] diff --git a/src/transformers/models/perceiver/image_processing_perceiver.py b/src/transformers/models/perceiver/image_processing_perceiver.py index faacc873b9b0ee..b96bea73881b5c 100644 --- a/src/transformers/models/perceiver/image_processing_perceiver.py +++ b/src/transformers/models/perceiver/image_processing_perceiver.py @@ -346,3 +346,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["PerceiverImageProcessor"] diff --git a/src/transformers/models/perceiver/modeling_perceiver.py b/src/transformers/models/perceiver/modeling_perceiver.py index b6c233c7611218..c7212ec3a74eb4 100755 --- a/src/transformers/models/perceiver/modeling_perceiver.py +++ b/src/transformers/models/perceiver/modeling_perceiver.py @@ -3499,3 +3499,17 @@ def forward( final_inputs = torch.cat(padded_ls, dim=1) return final_inputs, modality_sizes, inputs_without_pos + + +__all__ = [ + "PerceiverForImageClassificationConvProcessing", + "PerceiverForImageClassificationFourier", + "PerceiverForImageClassificationLearned", + "PerceiverForMaskedLM", + "PerceiverForMultimodalAutoencoding", + "PerceiverForOpticalFlow", + "PerceiverForSequenceClassification", + "PerceiverLayer", + "PerceiverModel", + "PerceiverPreTrainedModel", +] diff --git a/src/transformers/models/perceiver/tokenization_perceiver.py b/src/transformers/models/perceiver/tokenization_perceiver.py index 90686b78dce0bc..2a7fe6f43c6d7a 100644 --- a/src/transformers/models/perceiver/tokenization_perceiver.py +++ b/src/transformers/models/perceiver/tokenization_perceiver.py @@ -195,3 +195,6 @@ def convert_tokens_to_string(self, tokens): # PerceiverTokenizer has no vocab file def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: return () + + +__all__ = ["PerceiverTokenizer"] diff --git a/src/transformers/models/persimmon/__init__.py b/src/transformers/models/persimmon/__init__.py index e1f24ca1b7c23d..cb71eae2547c59 100644 --- a/src/transformers/models/persimmon/__init__.py +++ b/src/transformers/models/persimmon/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 AdeptAI and The HuggingFace Inc. team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,52 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - - -_import_structure = { - "configuration_persimmon": ["PersimmonConfig"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_persimmon"] = [ - "PersimmonForCausalLM", - "PersimmonModel", - "PersimmonPreTrainedModel", - "PersimmonForSequenceClassification", - "PersimmonForTokenClassification", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_persimmon import PersimmonConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_persimmon import ( - PersimmonForCausalLM, - PersimmonForSequenceClassification, - PersimmonForTokenClassification, - PersimmonModel, - PersimmonPreTrainedModel, - ) - - + from .configuration_persimmon import * + from .modeling_persimmon import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/persimmon/configuration_persimmon.py b/src/transformers/models/persimmon/configuration_persimmon.py index 7619d70c08fb7c..80ca823f28e1fe 100644 --- a/src/transformers/models/persimmon/configuration_persimmon.py +++ b/src/transformers/models/persimmon/configuration_persimmon.py @@ -171,3 +171,6 @@ def __init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) + + +__all__ = ["PersimmonConfig"] diff --git a/src/transformers/models/persimmon/modeling_persimmon.py b/src/transformers/models/persimmon/modeling_persimmon.py index 884ee4d86aafcc..cbe37997cb747d 100644 --- a/src/transformers/models/persimmon/modeling_persimmon.py +++ b/src/transformers/models/persimmon/modeling_persimmon.py @@ -1146,3 +1146,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "PersimmonForCausalLM", + "PersimmonModel", + "PersimmonPreTrainedModel", + "PersimmonForSequenceClassification", + "PersimmonForTokenClassification", +] diff --git a/src/transformers/models/phi/__init__.py b/src/transformers/models/phi/__init__.py index 662c0a9bf3487d..77e2ddd99c0311 100644 --- a/src/transformers/models/phi/__init__.py +++ b/src/transformers/models/phi/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Microsoft and The HuggingFace Inc. team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,57 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_phi": ["PhiConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_phi"] = [ - "PhiPreTrainedModel", - "PhiModel", - "PhiForCausalLM", - "PhiForSequenceClassification", - "PhiForTokenClassification", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_phi import PhiConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_phi import ( - PhiForCausalLM, - PhiForSequenceClassification, - PhiForTokenClassification, - PhiModel, - PhiPreTrainedModel, - ) - - + from .configuration_phi import * + from .modeling_phi import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/phi/configuration_phi.py b/src/transformers/models/phi/configuration_phi.py index 6c871b7ea54fc7..26c207b8e4e8f4 100644 --- a/src/transformers/models/phi/configuration_phi.py +++ b/src/transformers/models/phi/configuration_phi.py @@ -198,3 +198,6 @@ def __init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) + + +__all__ = ["PhiConfig"] diff --git a/src/transformers/models/phi/modeling_phi.py b/src/transformers/models/phi/modeling_phi.py index 8e60798e857f03..c0c8b294bc8eb4 100644 --- a/src/transformers/models/phi/modeling_phi.py +++ b/src/transformers/models/phi/modeling_phi.py @@ -1407,3 +1407,12 @@ def forward( hidden_states=model_outputs.hidden_states, attentions=model_outputs.attentions, ) + + +__all__ = [ + "PhiPreTrainedModel", + "PhiModel", + "PhiForCausalLM", + "PhiForSequenceClassification", + "PhiForTokenClassification", +] diff --git a/src/transformers/models/phi3/__init__.py b/src/transformers/models/phi3/__init__.py index bfe766dfac9fef..0cb1e7a9cd04fb 100644 --- a/src/transformers/models/phi3/__init__.py +++ b/src/transformers/models/phi3/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2024 Microsoft and The HuggingFace Inc. team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,57 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_phi3": ["Phi3Config"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_phi3"] = [ - "Phi3PreTrainedModel", - "Phi3Model", - "Phi3ForCausalLM", - "Phi3ForSequenceClassification", - "Phi3ForTokenClassification", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_phi3 import Phi3Config - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_phi3 import ( - Phi3ForCausalLM, - Phi3ForSequenceClassification, - Phi3ForTokenClassification, - Phi3Model, - Phi3PreTrainedModel, - ) - - + from .configuration_phi3 import * + from .modeling_phi3 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/phi3/configuration_phi3.py b/src/transformers/models/phi3/configuration_phi3.py index 4940f43e5bffe3..361c43c99eca8d 100644 --- a/src/transformers/models/phi3/configuration_phi3.py +++ b/src/transformers/models/phi3/configuration_phi3.py @@ -219,3 +219,6 @@ def _rope_scaling_validation(self): raise ValueError( f"`rope_scaling`'s long_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_long_factor)}" ) + + +__all__ = ["Phi3Config"] diff --git a/src/transformers/models/phi3/modeling_phi3.py b/src/transformers/models/phi3/modeling_phi3.py index bae3f6d4cdaeaa..ab77bcfc246cfb 100644 --- a/src/transformers/models/phi3/modeling_phi3.py +++ b/src/transformers/models/phi3/modeling_phi3.py @@ -1520,3 +1520,12 @@ def forward( hidden_states=model_outputs.hidden_states, attentions=model_outputs.attentions, ) + + +__all__ = [ + "Phi3PreTrainedModel", + "Phi3Model", + "Phi3ForCausalLM", + "Phi3ForSequenceClassification", + "Phi3ForTokenClassification", +] diff --git a/src/transformers/models/phobert/__init__.py b/src/transformers/models/phobert/__init__.py index c974d994eca032..5c14d34aff6136 100644 --- a/src/transformers/models/phobert/__init__.py +++ b/src/transformers/models/phobert/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,19 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING from ...utils import _LazyModule - - -_import_structure = {"tokenization_phobert": ["PhobertTokenizer"]} +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .tokenization_phobert import PhobertTokenizer - + from .tokenization_phobert import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/phobert/tokenization_phobert.py b/src/transformers/models/phobert/tokenization_phobert.py index 85450f4d8e261e..471b3a89f8536f 100644 --- a/src/transformers/models/phobert/tokenization_phobert.py +++ b/src/transformers/models/phobert/tokenization_phobert.py @@ -346,3 +346,6 @@ def add_from_file(self, f): raise ValueError("Incorrect dictionary format, expected ' '") word = line[:idx] self.encoder[word] = len(self.encoder) + + +__all__ = ["PhobertTokenizer"] diff --git a/src/transformers/models/pix2struct/__init__.py b/src/transformers/models/pix2struct/__init__.py index 581d5d7240c664..aa645dff0494dd 100644 --- a/src/transformers/models/pix2struct/__init__.py +++ b/src/transformers/models/pix2struct/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,70 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_pix2struct": [ - "Pix2StructConfig", - "Pix2StructTextConfig", - "Pix2StructVisionConfig", - ], - "processing_pix2struct": ["Pix2StructProcessor"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_pix2struct"] = ["Pix2StructImageProcessor"] - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_pix2struct"] = [ - "Pix2StructPreTrainedModel", - "Pix2StructForConditionalGeneration", - "Pix2StructVisionModel", - "Pix2StructTextModel", - ] - if TYPE_CHECKING: - from .configuration_pix2struct import ( - Pix2StructConfig, - Pix2StructTextConfig, - Pix2StructVisionConfig, - ) - from .processing_pix2struct import Pix2StructProcessor - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_pix2struct import Pix2StructImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_pix2struct import ( - Pix2StructForConditionalGeneration, - Pix2StructPreTrainedModel, - Pix2StructTextModel, - Pix2StructVisionModel, - ) - + from .configuration_pix2struct import * + from .image_processing_pix2struct import * + from .modeling_pix2struct import * + from .processing_pix2struct import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/pix2struct/configuration_pix2struct.py b/src/transformers/models/pix2struct/configuration_pix2struct.py index 3b6ec9b2d844e0..f6924c4784b3ac 100644 --- a/src/transformers/models/pix2struct/configuration_pix2struct.py +++ b/src/transformers/models/pix2struct/configuration_pix2struct.py @@ -388,3 +388,6 @@ def from_text_vision_configs( """ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) + + +__all__ = ["Pix2StructConfig", "Pix2StructTextConfig", "Pix2StructVisionConfig"] diff --git a/src/transformers/models/pix2struct/image_processing_pix2struct.py b/src/transformers/models/pix2struct/image_processing_pix2struct.py index 466997c8d8236e..386ac83b615575 100644 --- a/src/transformers/models/pix2struct/image_processing_pix2struct.py +++ b/src/transformers/models/pix2struct/image_processing_pix2struct.py @@ -459,3 +459,6 @@ def preprocess( ) return encoded_outputs + + +__all__ = ["Pix2StructImageProcessor"] diff --git a/src/transformers/models/pix2struct/modeling_pix2struct.py b/src/transformers/models/pix2struct/modeling_pix2struct.py index 176dadd5b883e1..0feb4f14889502 100644 --- a/src/transformers/models/pix2struct/modeling_pix2struct.py +++ b/src/transformers/models/pix2struct/modeling_pix2struct.py @@ -1892,3 +1892,11 @@ def forward( encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) + + +__all__ = [ + "Pix2StructPreTrainedModel", + "Pix2StructForConditionalGeneration", + "Pix2StructVisionModel", + "Pix2StructTextModel", +] diff --git a/src/transformers/models/pix2struct/processing_pix2struct.py b/src/transformers/models/pix2struct/processing_pix2struct.py index bf02531ffb864f..ac9802dac8f8d8 100644 --- a/src/transformers/models/pix2struct/processing_pix2struct.py +++ b/src/transformers/models/pix2struct/processing_pix2struct.py @@ -161,3 +161,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +__all__ = ["Pix2StructProcessor"] diff --git a/src/transformers/models/pixtral/__init__.py b/src/transformers/models/pixtral/__init__.py index 400a52a8adf2a1..8afbbfb1f47f71 100644 --- a/src/transformers/models/pixtral/__init__.py +++ b/src/transformers/models/pixtral/__init__.py @@ -13,81 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, - is_torchvision_available, - is_vision_available, -) - - -_import_structure = { - "configuration_pixtral": ["PixtralVisionConfig"], - "processing_pixtral": ["PixtralProcessor"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_pixtral"] = [ - "PixtralVisionModel", - "PixtralPreTrainedModel", - ] - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_pixtral"] = ["PixtralImageProcessor"] - -try: - if not is_torchvision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_pixtral_fast"] = ["PixtralImageProcessorFast"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_pixtral import PixtralVisionConfig - from .processing_pixtral import PixtralProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_pixtral import ( - PixtralPreTrainedModel, - PixtralVisionModel, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_pixtral import PixtralImageProcessor - - try: - if not is_torchvision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_pixtral_fast import PixtralImageProcessorFast - + from .configuration_pixtral import * + from .image_processing_pixtral import * + from .image_processing_pixtral_fast import * + from .modeling_pixtral import * + from .processing_pixtral import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/pixtral/configuration_pixtral.py b/src/transformers/models/pixtral/configuration_pixtral.py index 14db51b947e664..d4710e00e42166 100644 --- a/src/transformers/models/pixtral/configuration_pixtral.py +++ b/src/transformers/models/pixtral/configuration_pixtral.py @@ -101,3 +101,6 @@ def __init__( self.rope_theta = rope_theta self.head_dim = hidden_size // num_attention_heads self.initializer_range = initializer_range + + +__all__ = ["PixtralVisionConfig"] diff --git a/src/transformers/models/pixtral/image_processing_pixtral.py b/src/transformers/models/pixtral/image_processing_pixtral.py index 3f3978e1934f5d..0fcd032d2c0c12 100644 --- a/src/transformers/models/pixtral/image_processing_pixtral.py +++ b/src/transformers/models/pixtral/image_processing_pixtral.py @@ -519,3 +519,6 @@ def preprocess( # Convert to tensor type outside of BatchFeature to avoid batching the images of different sizes images_list = [[convert_to_tensor(image, return_tensors) for image in images] for images in images_list] return BatchMixFeature(data={"pixel_values": images_list, "image_sizes": batch_image_sizes}, tensor_type=None) + + +__all__ = ["PixtralImageProcessor"] diff --git a/src/transformers/models/pixtral/image_processing_pixtral_fast.py b/src/transformers/models/pixtral/image_processing_pixtral_fast.py index 82fbf3b2c094a6..63d453bb3919d2 100644 --- a/src/transformers/models/pixtral/image_processing_pixtral_fast.py +++ b/src/transformers/models/pixtral/image_processing_pixtral_fast.py @@ -347,3 +347,6 @@ def preprocess( batch_image_sizes.append(image_sizes) return BatchMixFeature(data={"pixel_values": batch_images, "image_sizes": batch_image_sizes}, tensor_type=None) + + +__all__ = ["PixtralImageProcessorFast"] diff --git a/src/transformers/models/pixtral/modeling_pixtral.py b/src/transformers/models/pixtral/modeling_pixtral.py index b65fbd634ba789..450d5f2f9ab7a9 100644 --- a/src/transformers/models/pixtral/modeling_pixtral.py +++ b/src/transformers/models/pixtral/modeling_pixtral.py @@ -505,3 +505,6 @@ def forward( [p.shape[-2] * p.shape[-1] for p in patch_embeds_list], patch_embeds ) return self.transformer(patch_embeds, attention_mask, position_embedding) + + +__all__ = ["PixtralVisionModel", "PixtralPreTrainedModel"] diff --git a/src/transformers/models/pixtral/processing_pixtral.py b/src/transformers/models/pixtral/processing_pixtral.py index 5913e8688d00be..0b5354daad37fa 100644 --- a/src/transformers/models/pixtral/processing_pixtral.py +++ b/src/transformers/models/pixtral/processing_pixtral.py @@ -281,3 +281,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +__all__ = ["PixtralProcessor"] diff --git a/src/transformers/models/plbart/__init__.py b/src/transformers/models/plbart/__init__.py index cd4c46fad3dd7d..33e5618ca7e172 100644 --- a/src/transformers/models/plbart/__init__.py +++ b/src/transformers/models/plbart/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,67 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = {"configuration_plbart": ["PLBartConfig"]} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_plbart"] = ["PLBartTokenizer"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_plbart"] = [ - "PLBartForCausalLM", - "PLBartForConditionalGeneration", - "PLBartForSequenceClassification", - "PLBartModel", - "PLBartPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_plbart import PLBartConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_plbart import PLBartTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_plbart import ( - PLBartForCausalLM, - PLBartForConditionalGeneration, - PLBartForSequenceClassification, - PLBartModel, - PLBartPreTrainedModel, - ) - - + from .configuration_plbart import * + from .modeling_plbart import * + from .tokenization_plbart import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/plbart/configuration_plbart.py b/src/transformers/models/plbart/configuration_plbart.py index 86dbc0cec83cdf..30871c4b7259a8 100644 --- a/src/transformers/models/plbart/configuration_plbart.py +++ b/src/transformers/models/plbart/configuration_plbart.py @@ -188,3 +188,6 @@ def outputs(self) -> Mapping[str, Mapping[int, str]]: ("encoder_last_hidden_state", {0: "batch", 1: "sequence"}), ] ) + + +__all__ = ["PLBartConfig"] diff --git a/src/transformers/models/plbart/modeling_plbart.py b/src/transformers/models/plbart/modeling_plbart.py index 490fefc686a524..9d387207a904af 100644 --- a/src/transformers/models/plbart/modeling_plbart.py +++ b/src/transformers/models/plbart/modeling_plbart.py @@ -1718,3 +1718,12 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = [ + "PLBartForCausalLM", + "PLBartForConditionalGeneration", + "PLBartForSequenceClassification", + "PLBartModel", + "PLBartPreTrainedModel", +] diff --git a/src/transformers/models/plbart/tokenization_plbart.py b/src/transformers/models/plbart/tokenization_plbart.py index f9648924c8e0fa..9b1c1a799b928a 100644 --- a/src/transformers/models/plbart/tokenization_plbart.py +++ b/src/transformers/models/plbart/tokenization_plbart.py @@ -425,3 +425,6 @@ def _convert_lang_code_special_format(self, lang: str) -> str: """Convert Language Codes to format tokenizer uses if required""" lang = FAIRSEQ_LANGUAGE_CODES_MAP[lang] if lang in FAIRSEQ_LANGUAGE_CODES_MAP.keys() else lang return lang + + +__all__ = ["PLBartTokenizer"] diff --git a/src/transformers/models/poolformer/__init__.py b/src/transformers/models/poolformer/__init__.py index 00c345463697d4..d0d79274910e34 100644 --- a/src/transformers/models/poolformer/__init__.py +++ b/src/transformers/models/poolformer/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,67 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_poolformer": [ - "PoolFormerConfig", - "PoolFormerOnnxConfig", - ] -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_poolformer"] = ["PoolFormerFeatureExtractor"] - _import_structure["image_processing_poolformer"] = ["PoolFormerImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_poolformer"] = [ - "PoolFormerForImageClassification", - "PoolFormerModel", - "PoolFormerPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_poolformer import ( - PoolFormerConfig, - PoolFormerOnnxConfig, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_poolformer import PoolFormerFeatureExtractor - from .image_processing_poolformer import PoolFormerImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_poolformer import ( - PoolFormerForImageClassification, - PoolFormerModel, - PoolFormerPreTrainedModel, - ) - - + from .configuration_poolformer import * + from .feature_extraction_poolformer import * + from .image_processing_poolformer import * + from .modeling_poolformer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/poolformer/configuration_poolformer.py b/src/transformers/models/poolformer/configuration_poolformer.py index a7467b380ec3d7..cdaf1306313830 100644 --- a/src/transformers/models/poolformer/configuration_poolformer.py +++ b/src/transformers/models/poolformer/configuration_poolformer.py @@ -143,3 +143,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 2e-3 + + +__all__ = ["PoolFormerConfig", "PoolFormerOnnxConfig"] diff --git a/src/transformers/models/poolformer/feature_extraction_poolformer.py b/src/transformers/models/poolformer/feature_extraction_poolformer.py index 79ffa037eed36a..ab4337f91f4f0a 100644 --- a/src/transformers/models/poolformer/feature_extraction_poolformer.py +++ b/src/transformers/models/poolformer/feature_extraction_poolformer.py @@ -31,3 +31,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["PoolFormerFeatureExtractor"] diff --git a/src/transformers/models/poolformer/image_processing_poolformer.py b/src/transformers/models/poolformer/image_processing_poolformer.py index 669617f95973b6..0dfb66acf588c7 100644 --- a/src/transformers/models/poolformer/image_processing_poolformer.py +++ b/src/transformers/models/poolformer/image_processing_poolformer.py @@ -355,3 +355,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["PoolFormerImageProcessor"] diff --git a/src/transformers/models/poolformer/modeling_poolformer.py b/src/transformers/models/poolformer/modeling_poolformer.py index e70974507b775c..3f150f93b660d8 100755 --- a/src/transformers/models/poolformer/modeling_poolformer.py +++ b/src/transformers/models/poolformer/modeling_poolformer.py @@ -443,3 +443,6 @@ def forward( return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states) + + +__all__ = ["PoolFormerForImageClassification", "PoolFormerModel", "PoolFormerPreTrainedModel"] diff --git a/src/transformers/models/pop2piano/__init__.py b/src/transformers/models/pop2piano/__init__.py index cd664cb8a70ce5..cbbfcbf157b58a 100644 --- a/src/transformers/models/pop2piano/__init__.py +++ b/src/transformers/models/pop2piano/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,108 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_essentia_available, - is_librosa_available, - is_pretty_midi_available, - is_scipy_available, - is_torch_available, -) - - -_import_structure = { - "configuration_pop2piano": ["Pop2PianoConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_pop2piano"] = [ - "Pop2PianoForConditionalGeneration", - "Pop2PianoPreTrainedModel", - ] - -try: - if not (is_librosa_available() and is_essentia_available() and is_scipy_available() and is_torch_available()): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_pop2piano"] = ["Pop2PianoFeatureExtractor"] - -try: - if not (is_pretty_midi_available() and is_torch_available()): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_pop2piano"] = ["Pop2PianoTokenizer"] - -try: - if not ( - is_pretty_midi_available() - and is_torch_available() - and is_librosa_available() - and is_essentia_available() - and is_scipy_available() - ): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["processing_pop2piano"] = ["Pop2PianoProcessor"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_pop2piano import Pop2PianoConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_pop2piano import ( - Pop2PianoForConditionalGeneration, - Pop2PianoPreTrainedModel, - ) - - try: - if not (is_librosa_available() and is_essentia_available() and is_scipy_available() and is_torch_available()): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_pop2piano import Pop2PianoFeatureExtractor - - try: - if not (is_pretty_midi_available() and is_torch_available()): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_pop2piano import Pop2PianoTokenizer - - try: - if not ( - is_pretty_midi_available() - and is_torch_available() - and is_librosa_available() - and is_essentia_available() - and is_scipy_available() - ): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .processing_pop2piano import Pop2PianoProcessor - + from .configuration_pop2piano import * + from .modeling_pop2piano import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/pop2piano/configuration_pop2piano.py b/src/transformers/models/pop2piano/configuration_pop2piano.py index 51043dab0c43e2..484e1a4f933e96 100644 --- a/src/transformers/models/pop2piano/configuration_pop2piano.py +++ b/src/transformers/models/pop2piano/configuration_pop2piano.py @@ -122,3 +122,6 @@ def __init__( is_encoder_decoder=is_encoder_decoder, **kwargs, ) + + +__all__ = ["Pop2PianoConfig"] diff --git a/src/transformers/models/pop2piano/modeling_pop2piano.py b/src/transformers/models/pop2piano/modeling_pop2piano.py index 6a64a27e007b3e..14a36e83249c71 100644 --- a/src/transformers/models/pop2piano/modeling_pop2piano.py +++ b/src/transformers/models/pop2piano/modeling_pop2piano.py @@ -1476,3 +1476,6 @@ def _reorder_cache(self, past_key_values, beam_idx): reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past + + +__all__ = ["Pop2PianoForConditionalGeneration", "Pop2PianoPreTrainedModel"] diff --git a/src/transformers/models/prophetnet/__init__.py b/src/transformers/models/prophetnet/__init__.py index 2e1a1ac6101483..7485dfbb530c3e 100644 --- a/src/transformers/models/prophetnet/__init__.py +++ b/src/transformers/models/prophetnet/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,53 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_prophetnet": ["ProphetNetConfig"], - "tokenization_prophetnet": ["ProphetNetTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_prophetnet"] = [ - "ProphetNetDecoder", - "ProphetNetEncoder", - "ProphetNetForCausalLM", - "ProphetNetForConditionalGeneration", - "ProphetNetModel", - "ProphetNetPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_prophetnet import ProphetNetConfig - from .tokenization_prophetnet import ProphetNetTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_prophetnet import ( - ProphetNetDecoder, - ProphetNetEncoder, - ProphetNetForCausalLM, - ProphetNetForConditionalGeneration, - ProphetNetModel, - ProphetNetPreTrainedModel, - ) - + from .configuration_prophetnet import * + from .modeling_prophetnet import * + from .tokenization_prophetnet import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/prophetnet/configuration_prophetnet.py b/src/transformers/models/prophetnet/configuration_prophetnet.py index 7a9da32b3cac7a..1219e1faacd42b 100644 --- a/src/transformers/models/prophetnet/configuration_prophetnet.py +++ b/src/transformers/models/prophetnet/configuration_prophetnet.py @@ -175,3 +175,6 @@ def num_hidden_layers(self, value): "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and" " `num_decoder_layers`." ) + + +__all__ = ["ProphetNetConfig"] diff --git a/src/transformers/models/prophetnet/modeling_prophetnet.py b/src/transformers/models/prophetnet/modeling_prophetnet.py index 137bd5ad828df5..fc148edbc49c29 100644 --- a/src/transformers/models/prophetnet/modeling_prophetnet.py +++ b/src/transformers/models/prophetnet/modeling_prophetnet.py @@ -2309,3 +2309,13 @@ def _tie_weights(self): def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) + + +__all__ = [ + "ProphetNetDecoder", + "ProphetNetEncoder", + "ProphetNetForCausalLM", + "ProphetNetForConditionalGeneration", + "ProphetNetModel", + "ProphetNetPreTrainedModel", +] diff --git a/src/transformers/models/prophetnet/tokenization_prophetnet.py b/src/transformers/models/prophetnet/tokenization_prophetnet.py index dc8956da0935cc..276dbb8438f7fa 100644 --- a/src/transformers/models/prophetnet/tokenization_prophetnet.py +++ b/src/transformers/models/prophetnet/tokenization_prophetnet.py @@ -502,3 +502,6 @@ def build_inputs_with_special_tokens( return token_ids_0 + [self.sep_token_id] sep = [self.sep_token_id] return token_ids_0 + sep + token_ids_1 + sep + + +__all__ = ["ProphetNetTokenizer"] diff --git a/src/transformers/models/pvt/__init__.py b/src/transformers/models/pvt/__init__.py index 1ee7092f0c460a..3a7448336f164f 100644 --- a/src/transformers/models/pvt/__init__.py +++ b/src/transformers/models/pvt/__init__.py @@ -1,7 +1,4 @@ -# coding=utf-8 -# Copyright 2023 Authors: Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, -# Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao and The HuggingFace Inc. team. -# All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,63 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, - is_vision_available, -) - - -_import_structure = { - "configuration_pvt": ["PvtConfig", "PvtOnnxConfig"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_pvt"] = ["PvtImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_pvt"] = [ - "PvtForImageClassification", - "PvtModel", - "PvtPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_pvt import PvtConfig, PvtOnnxConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_pvt import PvtImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_pvt import ( - PvtForImageClassification, - PvtModel, - PvtPreTrainedModel, - ) - + from .configuration_pvt import * + from .image_processing_pvt import * + from .modeling_pvt import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/pvt/configuration_pvt.py b/src/transformers/models/pvt/configuration_pvt.py index 25348818f090c1..c97c2703ef2358 100644 --- a/src/transformers/models/pvt/configuration_pvt.py +++ b/src/transformers/models/pvt/configuration_pvt.py @@ -157,3 +157,6 @@ def atol_for_validation(self) -> float: @property def default_onnx_opset(self) -> int: return 12 + + +__all__ = ["PvtConfig", "PvtOnnxConfig"] diff --git a/src/transformers/models/pvt/image_processing_pvt.py b/src/transformers/models/pvt/image_processing_pvt.py index c8edba4dc67bce..f37442a4bdb387 100644 --- a/src/transformers/models/pvt/image_processing_pvt.py +++ b/src/transformers/models/pvt/image_processing_pvt.py @@ -271,3 +271,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["PvtImageProcessor"] diff --git a/src/transformers/models/pvt/modeling_pvt.py b/src/transformers/models/pvt/modeling_pvt.py index 7befa4dad021f6..c061a8fdf0f966 100755 --- a/src/transformers/models/pvt/modeling_pvt.py +++ b/src/transformers/models/pvt/modeling_pvt.py @@ -666,3 +666,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["PvtForImageClassification", "PvtModel", "PvtPreTrainedModel"] diff --git a/src/transformers/models/pvt_v2/__init__.py b/src/transformers/models/pvt_v2/__init__.py index 4825eda165050a..e3cb83f130dd42 100644 --- a/src/transformers/models/pvt_v2/__init__.py +++ b/src/transformers/models/pvt_v2/__init__.py @@ -1,7 +1,4 @@ -# coding=utf-8 -# Copyright 2023 Authors: Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, -# Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao and The HuggingFace Inc. team. -# All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,49 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, - is_vision_available, -) - - -_import_structure = { - "configuration_pvt_v2": ["PvtV2Config"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_pvt_v2"] = [ - "PvtV2ForImageClassification", - "PvtV2Model", - "PvtV2PreTrainedModel", - "PvtV2Backbone", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_pvt_v2 import PvtV2Config - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_pvt_v2 import ( - PvtV2Backbone, - PvtV2ForImageClassification, - PvtV2Model, - PvtV2PreTrainedModel, - ) - + from .configuration_pvt_v2 import * + from .modeling_pvt_v2 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/pvt_v2/configuration_pvt_v2.py b/src/transformers/models/pvt_v2/configuration_pvt_v2.py index f6d7de299ba37d..9aef0e760f3c5d 100644 --- a/src/transformers/models/pvt_v2/configuration_pvt_v2.py +++ b/src/transformers/models/pvt_v2/configuration_pvt_v2.py @@ -151,3 +151,6 @@ def __init__( self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) + + +__all__ = ["PvtV2Config"] diff --git a/src/transformers/models/pvt_v2/modeling_pvt_v2.py b/src/transformers/models/pvt_v2/modeling_pvt_v2.py index a2e1e7a674524f..19e783a4fa2954 100644 --- a/src/transformers/models/pvt_v2/modeling_pvt_v2.py +++ b/src/transformers/models/pvt_v2/modeling_pvt_v2.py @@ -698,3 +698,6 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, ) + + +__all__ = ["PvtV2ForImageClassification", "PvtV2Model", "PvtV2PreTrainedModel", "PvtV2Backbone"] diff --git a/src/transformers/models/qwen2/__init__.py b/src/transformers/models/qwen2/__init__.py index 301531655a1db5..e3a9ea4b293980 100644 --- a/src/transformers/models/qwen2/__init__.py +++ b/src/transformers/models/qwen2/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2024 The Qwen Team and The HuggingFace Inc. team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,72 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_qwen2": ["Qwen2Config"], - "tokenization_qwen2": ["Qwen2Tokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_qwen2_fast"] = ["Qwen2TokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_qwen2"] = [ - "Qwen2ForCausalLM", - "Qwen2ForQuestionAnswering", - "Qwen2Model", - "Qwen2PreTrainedModel", - "Qwen2ForSequenceClassification", - "Qwen2ForTokenClassification", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_qwen2 import Qwen2Config - from .tokenization_qwen2 import Qwen2Tokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_qwen2_fast import Qwen2TokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_qwen2 import ( - Qwen2ForCausalLM, - Qwen2ForQuestionAnswering, - Qwen2ForSequenceClassification, - Qwen2ForTokenClassification, - Qwen2Model, - Qwen2PreTrainedModel, - ) - - + from .configuration_qwen2 import * + from .modeling_qwen2 import * + from .tokenization_qwen2 import * + from .tokenization_qwen2_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/qwen2/configuration_qwen2.py b/src/transformers/models/qwen2/configuration_qwen2.py index 16ce924b9f16f6..9d881cfdaa1fbc 100644 --- a/src/transformers/models/qwen2/configuration_qwen2.py +++ b/src/transformers/models/qwen2/configuration_qwen2.py @@ -194,3 +194,6 @@ def __init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) + + +__all__ = ["Qwen2Config"] diff --git a/src/transformers/models/qwen2/modeling_qwen2.py b/src/transformers/models/qwen2/modeling_qwen2.py index f4875386253c43..0dc35590cae16d 100644 --- a/src/transformers/models/qwen2/modeling_qwen2.py +++ b/src/transformers/models/qwen2/modeling_qwen2.py @@ -1469,3 +1469,13 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "Qwen2ForCausalLM", + "Qwen2ForQuestionAnswering", + "Qwen2Model", + "Qwen2PreTrainedModel", + "Qwen2ForSequenceClassification", + "Qwen2ForTokenClassification", +] diff --git a/src/transformers/models/qwen2/tokenization_qwen2.py b/src/transformers/models/qwen2/tokenization_qwen2.py index be2685430f649e..c388789b728ff0 100644 --- a/src/transformers/models/qwen2/tokenization_qwen2.py +++ b/src/transformers/models/qwen2/tokenization_qwen2.py @@ -337,3 +337,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = def prepare_for_tokenization(self, text, **kwargs): text = unicodedata.normalize("NFC", text) return (text, kwargs) + + +__all__ = ["Qwen2Tokenizer"] diff --git a/src/transformers/models/qwen2/tokenization_qwen2_fast.py b/src/transformers/models/qwen2/tokenization_qwen2_fast.py index fcfc4ab764da45..b7312755ef580f 100644 --- a/src/transformers/models/qwen2/tokenization_qwen2_fast.py +++ b/src/transformers/models/qwen2/tokenization_qwen2_fast.py @@ -132,3 +132,6 @@ def __init__( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["Qwen2TokenizerFast"] diff --git a/src/transformers/models/qwen2_audio/__init__.py b/src/transformers/models/qwen2_audio/__init__.py index 456378e2a53c2b..b126d712bd469e 100644 --- a/src/transformers/models/qwen2_audio/__init__.py +++ b/src/transformers/models/qwen2_audio/__init__.py @@ -13,45 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_qwen2_audio": ["Qwen2AudioConfig", "Qwen2AudioEncoderConfig"], - "processing_qwen2_audio": ["Qwen2AudioProcessor"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_qwen2_audio"] = [ - "Qwen2AudioForConditionalGeneration", - "Qwen2AudioPreTrainedModel", - "Qwen2AudioEncoder", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_qwen2_audio import Qwen2AudioConfig, Qwen2AudioEncoderConfig - from .processing_qwen2_audio import Qwen2AudioProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_qwen2_audio import ( - Qwen2AudioEncoder, - Qwen2AudioForConditionalGeneration, - Qwen2AudioPreTrainedModel, - ) - + from .configuration_qwen2_audio import * + from .modeling_qwen2_audio import * + from .processing_qwen2_audio import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/qwen2_audio/configuration_qwen2_audio.py b/src/transformers/models/qwen2_audio/configuration_qwen2_audio.py index 925aa60a8dc6de..bcfa2ca48e60c7 100644 --- a/src/transformers/models/qwen2_audio/configuration_qwen2_audio.py +++ b/src/transformers/models/qwen2_audio/configuration_qwen2_audio.py @@ -197,3 +197,6 @@ def __init__( self.text_config = text_config super().__init__(**kwargs) + + +__all__ = ["Qwen2AudioConfig", "Qwen2AudioEncoderConfig"] diff --git a/src/transformers/models/qwen2_audio/modeling_qwen2_audio.py b/src/transformers/models/qwen2_audio/modeling_qwen2_audio.py index ce0e427048cf23..b1c258bed739ba 100644 --- a/src/transformers/models/qwen2_audio/modeling_qwen2_audio.py +++ b/src/transformers/models/qwen2_audio/modeling_qwen2_audio.py @@ -1364,3 +1364,6 @@ def _update_model_kwargs_for_generation( def _reorder_cache(self, *args, **kwargs): return self.language_model._reorder_cache(*args, **kwargs) + + +__all__ = ["Qwen2AudioForConditionalGeneration", "Qwen2AudioPreTrainedModel", "Qwen2AudioEncoder"] diff --git a/src/transformers/models/qwen2_audio/processing_qwen2_audio.py b/src/transformers/models/qwen2_audio/processing_qwen2_audio.py index eabf5b7069f200..3fa2252f729d64 100644 --- a/src/transformers/models/qwen2_audio/processing_qwen2_audio.py +++ b/src/transformers/models/qwen2_audio/processing_qwen2_audio.py @@ -175,3 +175,6 @@ def default_chat_template(self): "{% endif %}" ) # fmt: on + + +__all__ = ["Qwen2AudioProcessor"] diff --git a/src/transformers/models/qwen2_moe/__init__.py b/src/transformers/models/qwen2_moe/__init__.py index 9520141ea831fc..ac210a3c10e6a2 100644 --- a/src/transformers/models/qwen2_moe/__init__.py +++ b/src/transformers/models/qwen2_moe/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2024 The Qwen Team and The HuggingFace Inc. team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,54 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - - -_import_structure = { - "configuration_qwen2_moe": ["Qwen2MoeConfig"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_qwen2_moe"] = [ - "Qwen2MoeForCausalLM", - "Qwen2MoeForQuestionAnswering", - "Qwen2MoeModel", - "Qwen2MoePreTrainedModel", - "Qwen2MoeForSequenceClassification", - "Qwen2MoeForTokenClassification", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_qwen2_moe import Qwen2MoeConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_qwen2_moe import ( - Qwen2MoeForCausalLM, - Qwen2MoeForQuestionAnswering, - Qwen2MoeForSequenceClassification, - Qwen2MoeForTokenClassification, - Qwen2MoeModel, - Qwen2MoePreTrainedModel, - ) - - + from .configuration_qwen2_moe import * + from .modeling_qwen2_moe import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/qwen2_moe/configuration_qwen2_moe.py b/src/transformers/models/qwen2_moe/configuration_qwen2_moe.py index ed72ef6e465e52..ac6e8ae17acb34 100644 --- a/src/transformers/models/qwen2_moe/configuration_qwen2_moe.py +++ b/src/transformers/models/qwen2_moe/configuration_qwen2_moe.py @@ -231,3 +231,6 @@ def __init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) + + +__all__ = ["Qwen2MoeConfig"] diff --git a/src/transformers/models/qwen2_moe/modeling_qwen2_moe.py b/src/transformers/models/qwen2_moe/modeling_qwen2_moe.py index a1e36b8ad7bc20..1f7bab8ee8e7e5 100644 --- a/src/transformers/models/qwen2_moe/modeling_qwen2_moe.py +++ b/src/transformers/models/qwen2_moe/modeling_qwen2_moe.py @@ -1653,3 +1653,13 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "Qwen2MoeForCausalLM", + "Qwen2MoeForQuestionAnswering", + "Qwen2MoeModel", + "Qwen2MoePreTrainedModel", + "Qwen2MoeForSequenceClassification", + "Qwen2MoeForTokenClassification", +] diff --git a/src/transformers/models/qwen2_vl/__init__.py b/src/transformers/models/qwen2_vl/__init__.py index 08a0e8f15542e5..6d859059f35b53 100644 --- a/src/transformers/models/qwen2_vl/__init__.py +++ b/src/transformers/models/qwen2_vl/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2024 The Qwen Team and The HuggingFace Inc. team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,62 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_qwen2_vl": ["Qwen2VLConfig"], - "processing_qwen2_vl": ["Qwen2VLProcessor"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_qwen2_vl"] = [ - "Qwen2VLForConditionalGeneration", - "Qwen2VLModel", - "Qwen2VLPreTrainedModel", - ] - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_qwen2_vl"] = ["Qwen2VLImageProcessor"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_qwen2_vl import Qwen2VLConfig - from .processing_qwen2_vl import Qwen2VLProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_qwen2_vl import ( - Qwen2VLForConditionalGeneration, - Qwen2VLModel, - Qwen2VLPreTrainedModel, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_qwen2_vl import Qwen2VLImageProcessor - - + from .configuration_qwen2_vl import * + from .image_processing_qwen2_vl import * + from .modeling_qwen2_vl import * + from .processing_qwen2_vl import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/qwen2_vl/configuration_qwen2_vl.py b/src/transformers/models/qwen2_vl/configuration_qwen2_vl.py index 55042327de4ec3..3d8b10d0d4dd27 100644 --- a/src/transformers/models/qwen2_vl/configuration_qwen2_vl.py +++ b/src/transformers/models/qwen2_vl/configuration_qwen2_vl.py @@ -227,3 +227,6 @@ def __init__( rope_config_validation(self, ignore_keys={"mrope_section"}) super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) + + +__all__ = ["Qwen2VLConfig"] diff --git a/src/transformers/models/qwen2_vl/image_processing_qwen2_vl.py b/src/transformers/models/qwen2_vl/image_processing_qwen2_vl.py index 2b3024187bf7f5..cc0fd33540306f 100644 --- a/src/transformers/models/qwen2_vl/image_processing_qwen2_vl.py +++ b/src/transformers/models/qwen2_vl/image_processing_qwen2_vl.py @@ -456,3 +456,6 @@ def preprocess( data = {"pixel_values_videos": pixel_values, "video_grid_thw": vision_grid_thws} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["Qwen2VLImageProcessor"] diff --git a/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py b/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py index dce0702b081942..7997f4eafe6a86 100644 --- a/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py +++ b/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py @@ -1806,3 +1806,6 @@ def prepare_inputs_for_generation( } ) return model_inputs + + +__all__ = ["Qwen2VLForConditionalGeneration", "Qwen2VLModel", "Qwen2VLPreTrainedModel"] diff --git a/src/transformers/models/qwen2_vl/processing_qwen2_vl.py b/src/transformers/models/qwen2_vl/processing_qwen2_vl.py index f7a59aa15f250f..efeb8d80d08d90 100644 --- a/src/transformers/models/qwen2_vl/processing_qwen2_vl.py +++ b/src/transformers/models/qwen2_vl/processing_qwen2_vl.py @@ -191,3 +191,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +__all__ = ["Qwen2VLProcessor"] diff --git a/src/transformers/models/rag/__init__.py b/src/transformers/models/rag/__init__.py index b238c6290832e8..8a8f135ba454f0 100644 --- a/src/transformers/models/rag/__init__.py +++ b/src/transformers/models/rag/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,72 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available - - -_import_structure = { - "configuration_rag": ["RagConfig"], - "retrieval_rag": ["RagRetriever"], - "tokenization_rag": ["RagTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_rag"] = [ - "RagModel", - "RagPreTrainedModel", - "RagSequenceForGeneration", - "RagTokenForGeneration", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_rag"] = [ - "TFRagModel", - "TFRagPreTrainedModel", - "TFRagSequenceForGeneration", - "TFRagTokenForGeneration", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_rag import RagConfig - from .retrieval_rag import RagRetriever - from .tokenization_rag import RagTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_rag import ( - TFRagModel, - TFRagPreTrainedModel, - TFRagSequenceForGeneration, - TFRagTokenForGeneration, - ) - + from .configuration_rag import * + from .modeling_rag import * + from .modeling_tf_rag import * + from .retrieval_rag import * + from .tokenization_rag import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/rag/configuration_rag.py b/src/transformers/models/rag/configuration_rag.py index 98de7382a45694..c76926f21879aa 100644 --- a/src/transformers/models/rag/configuration_rag.py +++ b/src/transformers/models/rag/configuration_rag.py @@ -181,3 +181,6 @@ def from_question_encoder_generator_configs( [`EncoderDecoderConfig`]: An instance of a configuration object """ return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **kwargs) + + +__all__ = ["RagConfig"] diff --git a/src/transformers/models/rag/modeling_rag.py b/src/transformers/models/rag/modeling_rag.py index dfc2664b78a3dc..d3ca787691c4af 100644 --- a/src/transformers/models/rag/modeling_rag.py +++ b/src/transformers/models/rag/modeling_rag.py @@ -1639,3 +1639,6 @@ def _mask_pads(ll, smooth_obj): eps_i = epsilon / rag_logprobs.size(-1) loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss return loss + + +__all__ = ["RagModel", "RagPreTrainedModel", "RagSequenceForGeneration", "RagTokenForGeneration"] diff --git a/src/transformers/models/rag/modeling_tf_rag.py b/src/transformers/models/rag/modeling_tf_rag.py index 1f243665ea0d13..6714ac61a3bd32 100644 --- a/src/transformers/models/rag/modeling_tf_rag.py +++ b/src/transformers/models/rag/modeling_tf_rag.py @@ -1768,3 +1768,6 @@ def build(self, input_shape=None): if getattr(self, "rag", None) is not None: with tf.name_scope(self.rag.name): self.rag.build(None) + + +__all__ = ["TFRagModel", "TFRagPreTrainedModel", "TFRagSequenceForGeneration", "TFRagTokenForGeneration"] diff --git a/src/transformers/models/rag/retrieval_rag.py b/src/transformers/models/rag/retrieval_rag.py index b9ae49b5e9c1aa..f4000aa6e7f671 100644 --- a/src/transformers/models/rag/retrieval_rag.py +++ b/src/transformers/models/rag/retrieval_rag.py @@ -672,3 +672,6 @@ def __call__( }, tensor_type=return_tensors, ) + + +__all__ = ["RagRetriever"] diff --git a/src/transformers/models/rag/tokenization_rag.py b/src/transformers/models/rag/tokenization_rag.py index 5bc87a895d787d..4d0a994e766fcd 100644 --- a/src/transformers/models/rag/tokenization_rag.py +++ b/src/transformers/models/rag/tokenization_rag.py @@ -119,3 +119,6 @@ def prepare_seq2seq_batch( ) model_inputs["labels"] = labels["input_ids"] return model_inputs + + +__all__ = ["RagTokenizer"] diff --git a/src/transformers/models/recurrent_gemma/__init__.py b/src/transformers/models/recurrent_gemma/__init__.py index 3ac7ff1c99b064..ab9335fc4beff8 100644 --- a/src/transformers/models/recurrent_gemma/__init__.py +++ b/src/transformers/models/recurrent_gemma/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,47 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - - -_import_structure = { - "configuration_recurrent_gemma": ["RecurrentGemmaConfig"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_recurrent_gemma"] = [ - "RecurrentGemmaForCausalLM", - "RecurrentGemmaModel", - "RecurrentGemmaPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_recurrent_gemma import RecurrentGemmaConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_recurrent_gemma import ( - RecurrentGemmaForCausalLM, - RecurrentGemmaModel, - RecurrentGemmaPreTrainedModel, - ) - + from .configuration_recurrent_gemma import * + from .modeling_recurrent_gemma import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/recurrent_gemma/configuration_recurrent_gemma.py b/src/transformers/models/recurrent_gemma/configuration_recurrent_gemma.py index 7f45a41710cf29..60a034f57d3dcf 100644 --- a/src/transformers/models/recurrent_gemma/configuration_recurrent_gemma.py +++ b/src/transformers/models/recurrent_gemma/configuration_recurrent_gemma.py @@ -156,3 +156,6 @@ def __init__( @property def layers_block_type(self): return (self.block_types * 100)[: self.num_hidden_layers] + + +__all__ = ["RecurrentGemmaConfig"] diff --git a/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py b/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py index 2b3cf7eb0cb82e..9cbebb010a6965 100644 --- a/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py +++ b/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py @@ -904,3 +904,6 @@ def _reorder_cache(self, past_key_values, beam_idx): k_state = k_state.index_select(0, beam_idx.to(k_state.device)) v_state = v_state.index_select(0, beam_idx.to(v_state.device)) return None + + +__all__ = ["RecurrentGemmaForCausalLM", "RecurrentGemmaModel", "RecurrentGemmaPreTrainedModel"] diff --git a/src/transformers/models/reformer/__init__.py b/src/transformers/models/reformer/__init__.py index ef13dd7c312dd0..cd6721b6ac2367 100644 --- a/src/transformers/models/reformer/__init__.py +++ b/src/transformers/models/reformer/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,91 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = {"configuration_reformer": ["ReformerConfig"]} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_reformer"] = ["ReformerTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_reformer_fast"] = ["ReformerTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_reformer"] = [ - "ReformerAttention", - "ReformerForMaskedLM", - "ReformerForQuestionAnswering", - "ReformerForSequenceClassification", - "ReformerLayer", - "ReformerModel", - "ReformerModelWithLMHead", - "ReformerPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_reformer import ReformerConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_reformer import ReformerTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_reformer_fast import ReformerTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_reformer import ( - ReformerAttention, - ReformerForMaskedLM, - ReformerForQuestionAnswering, - ReformerForSequenceClassification, - ReformerLayer, - ReformerModel, - ReformerModelWithLMHead, - ReformerPreTrainedModel, - ) - + from .configuration_reformer import * + from .modeling_reformer import * + from .tokenization_reformer import * + from .tokenization_reformer_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/reformer/configuration_reformer.py b/src/transformers/models/reformer/configuration_reformer.py index 018831010b010c..ab9b1c5f64cc5c 100755 --- a/src/transformers/models/reformer/configuration_reformer.py +++ b/src/transformers/models/reformer/configuration_reformer.py @@ -230,3 +230,6 @@ def __init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) + + +__all__ = ["ReformerConfig"] diff --git a/src/transformers/models/reformer/modeling_reformer.py b/src/transformers/models/reformer/modeling_reformer.py index 2c635a7118b451..c593eb0922bd45 100755 --- a/src/transformers/models/reformer/modeling_reformer.py +++ b/src/transformers/models/reformer/modeling_reformer.py @@ -2683,3 +2683,15 @@ def forward( hidden_states=reformer_outputs.hidden_states, attentions=reformer_outputs.attentions, ) + + +__all__ = [ + "ReformerAttention", + "ReformerForMaskedLM", + "ReformerForQuestionAnswering", + "ReformerForSequenceClassification", + "ReformerLayer", + "ReformerModel", + "ReformerModelWithLMHead", + "ReformerPreTrainedModel", +] diff --git a/src/transformers/models/reformer/tokenization_reformer.py b/src/transformers/models/reformer/tokenization_reformer.py index eb45749336734e..65b97d5a6bc68a 100644 --- a/src/transformers/models/reformer/tokenization_reformer.py +++ b/src/transformers/models/reformer/tokenization_reformer.py @@ -169,3 +169,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = fi.write(content_spiece_model) return (out_vocab_file,) + + +__all__ = ["ReformerTokenizer"] diff --git a/src/transformers/models/reformer/tokenization_reformer_fast.py b/src/transformers/models/reformer/tokenization_reformer_fast.py index 26f007a7f71b36..a48441c55e5a78 100644 --- a/src/transformers/models/reformer/tokenization_reformer_fast.py +++ b/src/transformers/models/reformer/tokenization_reformer_fast.py @@ -113,3 +113,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["ReformerTokenizerFast"] diff --git a/src/transformers/models/regnet/__init__.py b/src/transformers/models/regnet/__init__.py index 25507927affde7..cac770fdd0bcf6 100644 --- a/src/transformers/models/regnet/__init__.py +++ b/src/transformers/models/regnet/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,95 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_torch_available, -) - - -_import_structure = {"configuration_regnet": ["RegNetConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_regnet"] = [ - "RegNetForImageClassification", - "RegNetModel", - "RegNetPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_regnet"] = [ - "TFRegNetForImageClassification", - "TFRegNetModel", - "TFRegNetPreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_regnet"] = [ - "FlaxRegNetForImageClassification", - "FlaxRegNetModel", - "FlaxRegNetPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_regnet import RegNetConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_regnet import ( - RegNetForImageClassification, - RegNetModel, - RegNetPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_regnet import ( - TFRegNetForImageClassification, - TFRegNetModel, - TFRegNetPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_regnet import ( - FlaxRegNetForImageClassification, - FlaxRegNetModel, - FlaxRegNetPreTrainedModel, - ) - - + from .configuration_regnet import * + from .modeling_flax_regnet import * + from .modeling_regnet import * + from .modeling_tf_regnet import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/regnet/configuration_regnet.py b/src/transformers/models/regnet/configuration_regnet.py index 34f90ce1841f0e..c089f67297aa77 100644 --- a/src/transformers/models/regnet/configuration_regnet.py +++ b/src/transformers/models/regnet/configuration_regnet.py @@ -89,3 +89,6 @@ def __init__( self.hidden_act = hidden_act # always downsample in the first stage self.downsample_in_first_stage = True + + +__all__ = ["RegNetConfig"] diff --git a/src/transformers/models/regnet/modeling_flax_regnet.py b/src/transformers/models/regnet/modeling_flax_regnet.py index fc4258257bdb19..f575e33db9d74d 100644 --- a/src/transformers/models/regnet/modeling_flax_regnet.py +++ b/src/transformers/models/regnet/modeling_flax_regnet.py @@ -817,3 +817,6 @@ class FlaxRegNetForImageClassification(FlaxRegNetPreTrainedModel): output_type=FlaxImageClassifierOutputWithNoAttention, config_class=RegNetConfig, ) + + +__all__ = ["FlaxRegNetForImageClassification", "FlaxRegNetModel", "FlaxRegNetPreTrainedModel"] diff --git a/src/transformers/models/regnet/modeling_regnet.py b/src/transformers/models/regnet/modeling_regnet.py index 9420fb5edad522..a1e97c302d064c 100644 --- a/src/transformers/models/regnet/modeling_regnet.py +++ b/src/transformers/models/regnet/modeling_regnet.py @@ -449,3 +449,6 @@ def forward( return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states) + + +__all__ = ["RegNetForImageClassification", "RegNetModel", "RegNetPreTrainedModel"] diff --git a/src/transformers/models/regnet/modeling_tf_regnet.py b/src/transformers/models/regnet/modeling_tf_regnet.py index 3d6b38b9e4c031..049c71ae6c0a0f 100644 --- a/src/transformers/models/regnet/modeling_tf_regnet.py +++ b/src/transformers/models/regnet/modeling_tf_regnet.py @@ -606,3 +606,6 @@ def build(self, input_shape=None): if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier[1].name): self.classifier[1].build([None, None, None, self.config.hidden_sizes[-1]]) + + +__all__ = ["TFRegNetForImageClassification", "TFRegNetModel", "TFRegNetPreTrainedModel"] diff --git a/src/transformers/models/rembert/__init__.py b/src/transformers/models/rembert/__init__.py index 5ffaf3c8c04cf3..38566f502ad0cc 100644 --- a/src/transformers/models/rembert/__init__.py +++ b/src/transformers/models/rembert/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,134 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = {"configuration_rembert": ["RemBertConfig", "RemBertOnnxConfig"]} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_rembert"] = ["RemBertTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_rembert_fast"] = ["RemBertTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_rembert"] = [ - "RemBertForCausalLM", - "RemBertForMaskedLM", - "RemBertForMultipleChoice", - "RemBertForQuestionAnswering", - "RemBertForSequenceClassification", - "RemBertForTokenClassification", - "RemBertLayer", - "RemBertModel", - "RemBertPreTrainedModel", - "load_tf_weights_in_rembert", - ] - - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_rembert"] = [ - "TFRemBertForCausalLM", - "TFRemBertForMaskedLM", - "TFRemBertForMultipleChoice", - "TFRemBertForQuestionAnswering", - "TFRemBertForSequenceClassification", - "TFRemBertForTokenClassification", - "TFRemBertLayer", - "TFRemBertModel", - "TFRemBertPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_rembert import RemBertConfig, RemBertOnnxConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_rembert import RemBertTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_rembert_fast import RemBertTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_rembert import ( - RemBertForCausalLM, - RemBertForMaskedLM, - RemBertForMultipleChoice, - RemBertForQuestionAnswering, - RemBertForSequenceClassification, - RemBertForTokenClassification, - RemBertLayer, - RemBertModel, - RemBertPreTrainedModel, - load_tf_weights_in_rembert, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_rembert import ( - TFRemBertForCausalLM, - TFRemBertForMaskedLM, - TFRemBertForMultipleChoice, - TFRemBertForQuestionAnswering, - TFRemBertForSequenceClassification, - TFRemBertForTokenClassification, - TFRemBertLayer, - TFRemBertModel, - TFRemBertPreTrainedModel, - ) - - + from .configuration_rembert import * + from .modeling_rembert import * + from .modeling_tf_rembert import * + from .tokenization_rembert import * + from .tokenization_rembert_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/rembert/configuration_rembert.py b/src/transformers/models/rembert/configuration_rembert.py index f9d28303fdca86..b4b1fbc325c244 100644 --- a/src/transformers/models/rembert/configuration_rembert.py +++ b/src/transformers/models/rembert/configuration_rembert.py @@ -157,3 +157,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["RemBertConfig", "RemBertOnnxConfig"] diff --git a/src/transformers/models/rembert/modeling_rembert.py b/src/transformers/models/rembert/modeling_rembert.py index b73b2efea5accc..7ed22131e38cfc 100755 --- a/src/transformers/models/rembert/modeling_rembert.py +++ b/src/transformers/models/rembert/modeling_rembert.py @@ -1499,3 +1499,17 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "RemBertForCausalLM", + "RemBertForMaskedLM", + "RemBertForMultipleChoice", + "RemBertForQuestionAnswering", + "RemBertForSequenceClassification", + "RemBertForTokenClassification", + "RemBertLayer", + "RemBertModel", + "RemBertPreTrainedModel", + "load_tf_weights_in_rembert", +] diff --git a/src/transformers/models/rembert/modeling_tf_rembert.py b/src/transformers/models/rembert/modeling_tf_rembert.py index 5ee9ba1364d92d..733defb4470bb1 100644 --- a/src/transformers/models/rembert/modeling_tf_rembert.py +++ b/src/transformers/models/rembert/modeling_tf_rembert.py @@ -1706,3 +1706,16 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFRemBertForCausalLM", + "TFRemBertForMaskedLM", + "TFRemBertForMultipleChoice", + "TFRemBertForQuestionAnswering", + "TFRemBertForSequenceClassification", + "TFRemBertForTokenClassification", + "TFRemBertLayer", + "TFRemBertModel", + "TFRemBertPreTrainedModel", +] diff --git a/src/transformers/models/rembert/tokenization_rembert.py b/src/transformers/models/rembert/tokenization_rembert.py index 0c046b9bca1dc3..951ffd2bb088ff 100644 --- a/src/transformers/models/rembert/tokenization_rembert.py +++ b/src/transformers/models/rembert/tokenization_rembert.py @@ -260,3 +260,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = fi.write(content_spiece_model) return (out_vocab_file,) + + +__all__ = ["RemBertTokenizer"] diff --git a/src/transformers/models/rembert/tokenization_rembert_fast.py b/src/transformers/models/rembert/tokenization_rembert_fast.py index 350e02e33bf475..7eed4f80a78788 100644 --- a/src/transformers/models/rembert/tokenization_rembert_fast.py +++ b/src/transformers/models/rembert/tokenization_rembert_fast.py @@ -227,3 +227,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["RemBertTokenizerFast"] diff --git a/src/transformers/models/resnet/__init__.py b/src/transformers/models/resnet/__init__.py index 50b71a4dd4cf4d..625e93a2554370 100644 --- a/src/transformers/models/resnet/__init__.py +++ b/src/transformers/models/resnet/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,92 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_resnet": ["ResNetConfig", "ResNetOnnxConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_resnet"] = [ - "ResNetForImageClassification", - "ResNetModel", - "ResNetPreTrainedModel", - "ResNetBackbone", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_resnet"] = [ - "TFResNetForImageClassification", - "TFResNetModel", - "TFResNetPreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_resnet"] = [ - "FlaxResNetForImageClassification", - "FlaxResNetModel", - "FlaxResNetPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_resnet import ResNetConfig, ResNetOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_resnet import ( - ResNetBackbone, - ResNetForImageClassification, - ResNetModel, - ResNetPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_resnet import ( - TFResNetForImageClassification, - TFResNetModel, - TFResNetPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel - - + from .configuration_resnet import * + from .modeling_flax_resnet import * + from .modeling_resnet import * + from .modeling_tf_resnet import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/resnet/configuration_resnet.py b/src/transformers/models/resnet/configuration_resnet.py index 92fe656287492b..42bc19a8bca5b6 100644 --- a/src/transformers/models/resnet/configuration_resnet.py +++ b/src/transformers/models/resnet/configuration_resnet.py @@ -131,3 +131,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-3 + + +__all__ = ["ResNetConfig", "ResNetOnnxConfig"] diff --git a/src/transformers/models/resnet/modeling_flax_resnet.py b/src/transformers/models/resnet/modeling_flax_resnet.py index 07c07e95115b9c..d7fc2cc1be5546 100644 --- a/src/transformers/models/resnet/modeling_flax_resnet.py +++ b/src/transformers/models/resnet/modeling_flax_resnet.py @@ -699,3 +699,6 @@ class FlaxResNetForImageClassification(FlaxResNetPreTrainedModel): append_replace_return_docstrings( FlaxResNetForImageClassification, output_type=FlaxImageClassifierOutputWithNoAttention, config_class=ResNetConfig ) + + +__all__ = ["FlaxResNetForImageClassification", "FlaxResNetModel", "FlaxResNetPreTrainedModel"] diff --git a/src/transformers/models/resnet/modeling_resnet.py b/src/transformers/models/resnet/modeling_resnet.py index ccd4fac1758275..d3b757414892d0 100644 --- a/src/transformers/models/resnet/modeling_resnet.py +++ b/src/transformers/models/resnet/modeling_resnet.py @@ -515,3 +515,6 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, ) + + +__all__ = ["ResNetForImageClassification", "ResNetModel", "ResNetPreTrainedModel", "ResNetBackbone"] diff --git a/src/transformers/models/resnet/modeling_tf_resnet.py b/src/transformers/models/resnet/modeling_tf_resnet.py index 1e2ec143cda05c..4590cccf4ccbe1 100644 --- a/src/transformers/models/resnet/modeling_tf_resnet.py +++ b/src/transformers/models/resnet/modeling_tf_resnet.py @@ -591,3 +591,6 @@ def build(self, input_shape=None): if getattr(self, "classifier_layer", None) is not None: with tf.name_scope(self.classifier_layer.name): self.classifier_layer.build([None, None, self.config.hidden_sizes[-1]]) + + +__all__ = ["TFResNetForImageClassification", "TFResNetModel", "TFResNetPreTrainedModel"] diff --git a/src/transformers/models/roberta/__init__.py b/src/transformers/models/roberta/__init__.py index 4a97962f4f5704..9f9418d33d354a 100644 --- a/src/transformers/models/roberta/__init__.py +++ b/src/transformers/models/roberta/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,150 +11,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_roberta": ["RobertaConfig", "RobertaOnnxConfig"], - "tokenization_roberta": ["RobertaTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_roberta_fast"] = ["RobertaTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_roberta"] = [ - "RobertaForCausalLM", - "RobertaForMaskedLM", - "RobertaForMultipleChoice", - "RobertaForQuestionAnswering", - "RobertaForSequenceClassification", - "RobertaForTokenClassification", - "RobertaModel", - "RobertaPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_roberta"] = [ - "TFRobertaForCausalLM", - "TFRobertaForMaskedLM", - "TFRobertaForMultipleChoice", - "TFRobertaForQuestionAnswering", - "TFRobertaForSequenceClassification", - "TFRobertaForTokenClassification", - "TFRobertaMainLayer", - "TFRobertaModel", - "TFRobertaPreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_roberta"] = [ - "FlaxRobertaForCausalLM", - "FlaxRobertaForMaskedLM", - "FlaxRobertaForMultipleChoice", - "FlaxRobertaForQuestionAnswering", - "FlaxRobertaForSequenceClassification", - "FlaxRobertaForTokenClassification", - "FlaxRobertaModel", - "FlaxRobertaPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_roberta import RobertaConfig, RobertaOnnxConfig - from .tokenization_roberta import RobertaTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_roberta_fast import RobertaTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_roberta import ( - RobertaForCausalLM, - RobertaForMaskedLM, - RobertaForMultipleChoice, - RobertaForQuestionAnswering, - RobertaForSequenceClassification, - RobertaForTokenClassification, - RobertaModel, - RobertaPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_roberta import ( - TFRobertaForCausalLM, - TFRobertaForMaskedLM, - TFRobertaForMultipleChoice, - TFRobertaForQuestionAnswering, - TFRobertaForSequenceClassification, - TFRobertaForTokenClassification, - TFRobertaMainLayer, - TFRobertaModel, - TFRobertaPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_roberta import ( - FlaxRobertaForCausalLM, - FlaxRobertaForMaskedLM, - FlaxRobertaForMultipleChoice, - FlaxRobertaForQuestionAnswering, - FlaxRobertaForSequenceClassification, - FlaxRobertaForTokenClassification, - FlaxRobertaModel, - FlaxRobertaPreTrainedModel, - ) - + from .configuration_roberta import * + from .modeling_flax_roberta import * + from .modeling_roberta import * + from .modeling_tf_roberta import * + from .tokenization_roberta import * + from .tokenization_roberta_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/roberta/configuration_roberta.py b/src/transformers/models/roberta/configuration_roberta.py index d08f3df47718fc..35ff80115f1d02 100644 --- a/src/transformers/models/roberta/configuration_roberta.py +++ b/src/transformers/models/roberta/configuration_roberta.py @@ -150,3 +150,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("attention_mask", dynamic_axis), ] ) + + +__all__ = ["RobertaConfig", "RobertaOnnxConfig"] diff --git a/src/transformers/models/roberta/modeling_flax_roberta.py b/src/transformers/models/roberta/modeling_flax_roberta.py index ecdd31386b21eb..4d9bf7cb6e8505 100644 --- a/src/transformers/models/roberta/modeling_flax_roberta.py +++ b/src/transformers/models/roberta/modeling_flax_roberta.py @@ -1486,3 +1486,15 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): FlaxCausalLMOutputWithCrossAttentions, _CONFIG_FOR_DOC, ) + + +__all__ = [ + "FlaxRobertaForCausalLM", + "FlaxRobertaForMaskedLM", + "FlaxRobertaForMultipleChoice", + "FlaxRobertaForQuestionAnswering", + "FlaxRobertaForSequenceClassification", + "FlaxRobertaForTokenClassification", + "FlaxRobertaModel", + "FlaxRobertaPreTrainedModel", +] diff --git a/src/transformers/models/roberta/modeling_roberta.py b/src/transformers/models/roberta/modeling_roberta.py index 1cbce28bf999e6..273acaf07140a2 100644 --- a/src/transformers/models/roberta/modeling_roberta.py +++ b/src/transformers/models/roberta/modeling_roberta.py @@ -1680,3 +1680,15 @@ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_l mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx + + +__all__ = [ + "RobertaForCausalLM", + "RobertaForMaskedLM", + "RobertaForMultipleChoice", + "RobertaForQuestionAnswering", + "RobertaForSequenceClassification", + "RobertaForTokenClassification", + "RobertaModel", + "RobertaPreTrainedModel", +] diff --git a/src/transformers/models/roberta/modeling_tf_roberta.py b/src/transformers/models/roberta/modeling_tf_roberta.py index 439d12a870261f..55361cdcc7132b 100644 --- a/src/transformers/models/roberta/modeling_tf_roberta.py +++ b/src/transformers/models/roberta/modeling_tf_roberta.py @@ -1768,3 +1768,16 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFRobertaForCausalLM", + "TFRobertaForMaskedLM", + "TFRobertaForMultipleChoice", + "TFRobertaForQuestionAnswering", + "TFRobertaForSequenceClassification", + "TFRobertaForTokenClassification", + "TFRobertaMainLayer", + "TFRobertaModel", + "TFRobertaPreTrainedModel", +] diff --git a/src/transformers/models/roberta/tokenization_roberta.py b/src/transformers/models/roberta/tokenization_roberta.py index 072c44ac4dd359..6ec6cbbf9866ea 100644 --- a/src/transformers/models/roberta/tokenization_roberta.py +++ b/src/transformers/models/roberta/tokenization_roberta.py @@ -397,3 +397,6 @@ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()): text = " " + text return (text, kwargs) + + +__all__ = ["RobertaTokenizer"] diff --git a/src/transformers/models/roberta/tokenization_roberta_fast.py b/src/transformers/models/roberta/tokenization_roberta_fast.py index 8384397033cee1..336148f413803d 100644 --- a/src/transformers/models/roberta/tokenization_roberta_fast.py +++ b/src/transformers/models/roberta/tokenization_roberta_fast.py @@ -267,3 +267,6 @@ def create_token_type_ids_from_sequences( if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] + + +__all__ = ["RobertaTokenizerFast"] diff --git a/src/transformers/models/roberta_prelayernorm/__init__.py b/src/transformers/models/roberta_prelayernorm/__init__.py index 9f55eed11c4224..208878343d24b3 100644 --- a/src/transformers/models/roberta_prelayernorm/__init__.py +++ b/src/transformers/models/roberta_prelayernorm/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,137 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_torch_available, -) - - -_import_structure = { - "configuration_roberta_prelayernorm": [ - "RobertaPreLayerNormConfig", - "RobertaPreLayerNormOnnxConfig", - ], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_roberta_prelayernorm"] = [ - "RobertaPreLayerNormForCausalLM", - "RobertaPreLayerNormForMaskedLM", - "RobertaPreLayerNormForMultipleChoice", - "RobertaPreLayerNormForQuestionAnswering", - "RobertaPreLayerNormForSequenceClassification", - "RobertaPreLayerNormForTokenClassification", - "RobertaPreLayerNormModel", - "RobertaPreLayerNormPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_roberta_prelayernorm"] = [ - "TFRobertaPreLayerNormForCausalLM", - "TFRobertaPreLayerNormForMaskedLM", - "TFRobertaPreLayerNormForMultipleChoice", - "TFRobertaPreLayerNormForQuestionAnswering", - "TFRobertaPreLayerNormForSequenceClassification", - "TFRobertaPreLayerNormForTokenClassification", - "TFRobertaPreLayerNormMainLayer", - "TFRobertaPreLayerNormModel", - "TFRobertaPreLayerNormPreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_roberta_prelayernorm"] = [ - "FlaxRobertaPreLayerNormForCausalLM", - "FlaxRobertaPreLayerNormForMaskedLM", - "FlaxRobertaPreLayerNormForMultipleChoice", - "FlaxRobertaPreLayerNormForQuestionAnswering", - "FlaxRobertaPreLayerNormForSequenceClassification", - "FlaxRobertaPreLayerNormForTokenClassification", - "FlaxRobertaPreLayerNormModel", - "FlaxRobertaPreLayerNormPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_roberta_prelayernorm import ( - RobertaPreLayerNormConfig, - RobertaPreLayerNormOnnxConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_roberta_prelayernorm import ( - RobertaPreLayerNormForCausalLM, - RobertaPreLayerNormForMaskedLM, - RobertaPreLayerNormForMultipleChoice, - RobertaPreLayerNormForQuestionAnswering, - RobertaPreLayerNormForSequenceClassification, - RobertaPreLayerNormForTokenClassification, - RobertaPreLayerNormModel, - RobertaPreLayerNormPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_roberta_prelayernorm import ( - TFRobertaPreLayerNormForCausalLM, - TFRobertaPreLayerNormForMaskedLM, - TFRobertaPreLayerNormForMultipleChoice, - TFRobertaPreLayerNormForQuestionAnswering, - TFRobertaPreLayerNormForSequenceClassification, - TFRobertaPreLayerNormForTokenClassification, - TFRobertaPreLayerNormMainLayer, - TFRobertaPreLayerNormModel, - TFRobertaPreLayerNormPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_roberta_prelayernorm import ( - FlaxRobertaPreLayerNormForCausalLM, - FlaxRobertaPreLayerNormForMaskedLM, - FlaxRobertaPreLayerNormForMultipleChoice, - FlaxRobertaPreLayerNormForQuestionAnswering, - FlaxRobertaPreLayerNormForSequenceClassification, - FlaxRobertaPreLayerNormForTokenClassification, - FlaxRobertaPreLayerNormModel, - FlaxRobertaPreLayerNormPreTrainedModel, - ) - + from .configuration_roberta_prelayernorm import * + from .modeling_flax_roberta_prelayernorm import * + from .modeling_roberta_prelayernorm import * + from .modeling_tf_roberta_prelayernorm import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py b/src/transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py index e0c939f6575c32..71ecbd4474d4f0 100644 --- a/src/transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py +++ b/src/transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py @@ -152,3 +152,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("attention_mask", dynamic_axis), ] ) + + +__all__ = ["RobertaPreLayerNormConfig", "RobertaPreLayerNormOnnxConfig"] diff --git a/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py b/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py index c50227eaa29614..6584c2e15e359b 100644 --- a/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py +++ b/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py @@ -1513,3 +1513,15 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): FlaxCausalLMOutputWithCrossAttentions, _CONFIG_FOR_DOC, ) + + +__all__ = [ + "FlaxRobertaPreLayerNormForCausalLM", + "FlaxRobertaPreLayerNormForMaskedLM", + "FlaxRobertaPreLayerNormForMultipleChoice", + "FlaxRobertaPreLayerNormForQuestionAnswering", + "FlaxRobertaPreLayerNormForSequenceClassification", + "FlaxRobertaPreLayerNormForTokenClassification", + "FlaxRobertaPreLayerNormModel", + "FlaxRobertaPreLayerNormPreTrainedModel", +] diff --git a/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py b/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py index 5f7a7f849413a6..f70c55ec7aff85 100644 --- a/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py +++ b/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py @@ -1540,3 +1540,15 @@ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_l mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx + + +__all__ = [ + "RobertaPreLayerNormForCausalLM", + "RobertaPreLayerNormForMaskedLM", + "RobertaPreLayerNormForMultipleChoice", + "RobertaPreLayerNormForQuestionAnswering", + "RobertaPreLayerNormForSequenceClassification", + "RobertaPreLayerNormForTokenClassification", + "RobertaPreLayerNormModel", + "RobertaPreLayerNormPreTrainedModel", +] diff --git a/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py b/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py index 1ecd376901fe47..a11f6151c4dba1 100644 --- a/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py +++ b/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py @@ -1793,3 +1793,16 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFRobertaPreLayerNormForCausalLM", + "TFRobertaPreLayerNormForMaskedLM", + "TFRobertaPreLayerNormForMultipleChoice", + "TFRobertaPreLayerNormForQuestionAnswering", + "TFRobertaPreLayerNormForSequenceClassification", + "TFRobertaPreLayerNormForTokenClassification", + "TFRobertaPreLayerNormMainLayer", + "TFRobertaPreLayerNormModel", + "TFRobertaPreLayerNormPreTrainedModel", +] diff --git a/src/transformers/models/roc_bert/__init__.py b/src/transformers/models/roc_bert/__init__.py index 9971c53975d49a..eb847b03b75587 100644 --- a/src/transformers/models/roc_bert/__init__.py +++ b/src/transformers/models/roc_bert/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,76 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_roc_bert": ["RoCBertConfig"], - "tokenization_roc_bert": ["RoCBertTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - pass - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_roc_bert"] = [ - "RoCBertForCausalLM", - "RoCBertForMaskedLM", - "RoCBertForMultipleChoice", - "RoCBertForPreTraining", - "RoCBertForQuestionAnswering", - "RoCBertForSequenceClassification", - "RoCBertForTokenClassification", - "RoCBertLayer", - "RoCBertModel", - "RoCBertPreTrainedModel", - "load_tf_weights_in_roc_bert", - ] - if TYPE_CHECKING: - from .configuration_roc_bert import RoCBertConfig - from .tokenization_roc_bert import RoCBertTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - raise OptionalDependencyNotAvailable() - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_roc_bert import ( - RoCBertForCausalLM, - RoCBertForMaskedLM, - RoCBertForMultipleChoice, - RoCBertForPreTraining, - RoCBertForQuestionAnswering, - RoCBertForSequenceClassification, - RoCBertForTokenClassification, - RoCBertLayer, - RoCBertModel, - RoCBertPreTrainedModel, - load_tf_weights_in_roc_bert, - ) - - + from .configuration_roc_bert import * + from .modeling_roc_bert import * + from .tokenization_roc_bert import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/roc_bert/configuration_roc_bert.py b/src/transformers/models/roc_bert/configuration_roc_bert.py index d402349e67b559..4bf53bf3384268 100644 --- a/src/transformers/models/roc_bert/configuration_roc_bert.py +++ b/src/transformers/models/roc_bert/configuration_roc_bert.py @@ -158,3 +158,6 @@ def __init__( self.position_embedding_type = position_embedding_type self.classifier_dropout = classifier_dropout super().__init__(pad_token_id=pad_token_id, **kwargs) + + +__all__ = ["RoCBertConfig"] diff --git a/src/transformers/models/roc_bert/modeling_roc_bert.py b/src/transformers/models/roc_bert/modeling_roc_bert.py index 2d9c8bbb13b72f..c47d8e5b7d7af0 100644 --- a/src/transformers/models/roc_bert/modeling_roc_bert.py +++ b/src/transformers/models/roc_bert/modeling_roc_bert.py @@ -1996,3 +1996,18 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "RoCBertForCausalLM", + "RoCBertForMaskedLM", + "RoCBertForMultipleChoice", + "RoCBertForPreTraining", + "RoCBertForQuestionAnswering", + "RoCBertForSequenceClassification", + "RoCBertForTokenClassification", + "RoCBertLayer", + "RoCBertModel", + "RoCBertPreTrainedModel", + "load_tf_weights_in_roc_bert", +] diff --git a/src/transformers/models/roc_bert/tokenization_roc_bert.py b/src/transformers/models/roc_bert/tokenization_roc_bert.py index 3a980c0ae66f68..60c5e5414517e5 100644 --- a/src/transformers/models/roc_bert/tokenization_roc_bert.py +++ b/src/transformers/models/roc_bert/tokenization_roc_bert.py @@ -1117,3 +1117,6 @@ def tokenize(self, text): else: output_tokens.extend(sub_tokens) return output_tokens + + +__all__ = ["RoCBertTokenizer"] diff --git a/src/transformers/models/roformer/__init__.py b/src/transformers/models/roformer/__init__.py index d9642eba59fe26..63c1c00e572391 100644 --- a/src/transformers/models/roformer/__init__.py +++ b/src/transformers/models/roformer/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,152 +13,19 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_roformer": ["RoFormerConfig", "RoFormerOnnxConfig"], - "tokenization_roformer": ["RoFormerTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_roformer_fast"] = ["RoFormerTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_roformer"] = [ - "RoFormerForCausalLM", - "RoFormerForMaskedLM", - "RoFormerForMultipleChoice", - "RoFormerForQuestionAnswering", - "RoFormerForSequenceClassification", - "RoFormerForTokenClassification", - "RoFormerLayer", - "RoFormerModel", - "RoFormerPreTrainedModel", - "load_tf_weights_in_roformer", - ] - - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_roformer"] = [ - "TFRoFormerForCausalLM", - "TFRoFormerForMaskedLM", - "TFRoFormerForMultipleChoice", - "TFRoFormerForQuestionAnswering", - "TFRoFormerForSequenceClassification", - "TFRoFormerForTokenClassification", - "TFRoFormerLayer", - "TFRoFormerModel", - "TFRoFormerPreTrainedModel", - ] - - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_roformer"] = [ - "FlaxRoFormerForMaskedLM", - "FlaxRoFormerForMultipleChoice", - "FlaxRoFormerForQuestionAnswering", - "FlaxRoFormerForSequenceClassification", - "FlaxRoFormerForTokenClassification", - "FlaxRoFormerModel", - "FlaxRoFormerPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_roformer import RoFormerConfig, RoFormerOnnxConfig - from .tokenization_roformer import RoFormerTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_roformer_fast import RoFormerTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_roformer import ( - RoFormerForCausalLM, - RoFormerForMaskedLM, - RoFormerForMultipleChoice, - RoFormerForQuestionAnswering, - RoFormerForSequenceClassification, - RoFormerForTokenClassification, - RoFormerLayer, - RoFormerModel, - RoFormerPreTrainedModel, - load_tf_weights_in_roformer, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_roformer import ( - TFRoFormerForCausalLM, - TFRoFormerForMaskedLM, - TFRoFormerForMultipleChoice, - TFRoFormerForQuestionAnswering, - TFRoFormerForSequenceClassification, - TFRoFormerForTokenClassification, - TFRoFormerLayer, - TFRoFormerModel, - TFRoFormerPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_roformer import ( - FlaxRoFormerForMaskedLM, - FlaxRoFormerForMultipleChoice, - FlaxRoFormerForQuestionAnswering, - FlaxRoFormerForSequenceClassification, - FlaxRoFormerForTokenClassification, - FlaxRoFormerModel, - FlaxRoFormerPreTrainedModel, - ) - - + from .configuration_roformer import * + from .modeling_flax_roformer import * + from .modeling_roformer import * + from .modeling_tf_roformer import * + from .tokenization_roformer import * + from .tokenization_roformer_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/roformer/configuration_roformer.py b/src/transformers/models/roformer/configuration_roformer.py index ae4ed0fd7b00c7..1852509199e6d9 100644 --- a/src/transformers/models/roformer/configuration_roformer.py +++ b/src/transformers/models/roformer/configuration_roformer.py @@ -145,3 +145,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("token_type_ids", dynamic_axis), ] ) + + +__all__ = ["RoFormerConfig", "RoFormerOnnxConfig"] diff --git a/src/transformers/models/roformer/modeling_flax_roformer.py b/src/transformers/models/roformer/modeling_flax_roformer.py index f53a056c13af68..0c7cdab581a7f4 100644 --- a/src/transformers/models/roformer/modeling_flax_roformer.py +++ b/src/transformers/models/roformer/modeling_flax_roformer.py @@ -1078,3 +1078,14 @@ class FlaxRoFormerForQuestionAnswering(FlaxRoFormerPreTrainedModel): FlaxQuestionAnsweringModelOutput, _CONFIG_FOR_DOC, ) + + +__all__ = [ + "FlaxRoFormerForMaskedLM", + "FlaxRoFormerForMultipleChoice", + "FlaxRoFormerForQuestionAnswering", + "FlaxRoFormerForSequenceClassification", + "FlaxRoFormerForTokenClassification", + "FlaxRoFormerModel", + "FlaxRoFormerPreTrainedModel", +] diff --git a/src/transformers/models/roformer/modeling_roformer.py b/src/transformers/models/roformer/modeling_roformer.py index b493b1e6bcdacf..012acb9a9388bc 100644 --- a/src/transformers/models/roformer/modeling_roformer.py +++ b/src/transformers/models/roformer/modeling_roformer.py @@ -1543,3 +1543,17 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "RoFormerForCausalLM", + "RoFormerForMaskedLM", + "RoFormerForMultipleChoice", + "RoFormerForQuestionAnswering", + "RoFormerForSequenceClassification", + "RoFormerForTokenClassification", + "RoFormerLayer", + "RoFormerModel", + "RoFormerPreTrainedModel", + "load_tf_weights_in_roformer", +] diff --git a/src/transformers/models/roformer/modeling_tf_roformer.py b/src/transformers/models/roformer/modeling_tf_roformer.py index 20af18369194ab..4e7c0be16f934c 100644 --- a/src/transformers/models/roformer/modeling_tf_roformer.py +++ b/src/transformers/models/roformer/modeling_tf_roformer.py @@ -1532,3 +1532,16 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFRoFormerForCausalLM", + "TFRoFormerForMaskedLM", + "TFRoFormerForMultipleChoice", + "TFRoFormerForQuestionAnswering", + "TFRoFormerForSequenceClassification", + "TFRoFormerForTokenClassification", + "TFRoFormerLayer", + "TFRoFormerModel", + "TFRoFormerPreTrainedModel", +] diff --git a/src/transformers/models/roformer/tokenization_roformer.py b/src/transformers/models/roformer/tokenization_roformer.py index 33fe68f8225c82..312cecca193b79 100644 --- a/src/transformers/models/roformer/tokenization_roformer.py +++ b/src/transformers/models/roformer/tokenization_roformer.py @@ -535,3 +535,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = writer.write(token + "\n") index += 1 return (vocab_file,) + + +__all__ = ["RoFormerTokenizer"] diff --git a/src/transformers/models/roformer/tokenization_roformer_fast.py b/src/transformers/models/roformer/tokenization_roformer_fast.py index cc161c1a26798f..7f75e5a2ff12b8 100644 --- a/src/transformers/models/roformer/tokenization_roformer_fast.py +++ b/src/transformers/models/roformer/tokenization_roformer_fast.py @@ -175,3 +175,6 @@ def save_pretrained( ): self.backend_tokenizer.pre_tokenizer = BertPreTokenizer() return super().save_pretrained(save_directory, legacy_format, filename_prefix, push_to_hub, **kwargs) + + +__all__ = ["RoFormerTokenizerFast"] diff --git a/src/transformers/models/rwkv/__init__.py b/src/transformers/models/rwkv/__init__.py index 2cbfd94bac7bb1..cbfdb86827a0f5 100644 --- a/src/transformers/models/rwkv/__init__.py +++ b/src/transformers/models/rwkv/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,48 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - - -_import_structure = { - "configuration_rwkv": ["RwkvConfig", "RwkvOnnxConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_rwkv"] = [ - "RwkvForCausalLM", - "RwkvModel", - "RwkvPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_rwkv import RwkvConfig, RwkvOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_rwkv import ( - RwkvForCausalLM, - RwkvModel, - RwkvPreTrainedModel, - ) + from .configuration_rwkv import * + from .modeling_rwkv import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/rwkv/configuration_rwkv.py b/src/transformers/models/rwkv/configuration_rwkv.py index 9539b857eac1db..90c5cf7e1c8b3b 100644 --- a/src/transformers/models/rwkv/configuration_rwkv.py +++ b/src/transformers/models/rwkv/configuration_rwkv.py @@ -115,3 +115,6 @@ def __init__( super().__init__( tie_word_embeddings=tie_word_embeddings, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs ) + + +__all__ = ["RwkvConfig"] diff --git a/src/transformers/models/rwkv/modeling_rwkv.py b/src/transformers/models/rwkv/modeling_rwkv.py index a42843b5106510..250b9c908aa861 100644 --- a/src/transformers/models/rwkv/modeling_rwkv.py +++ b/src/transformers/models/rwkv/modeling_rwkv.py @@ -847,3 +847,6 @@ def forward( hidden_states=rwkv_outputs.hidden_states, attentions=rwkv_outputs.attentions, ) + + +__all__ = ["RwkvForCausalLM", "RwkvModel", "RwkvPreTrainedModel"] diff --git a/src/transformers/models/sam/__init__.py b/src/transformers/models/sam/__init__.py index 672281440c1ae9..68da4037a351b3 100644 --- a/src/transformers/models/sam/__init__.py +++ b/src/transformers/models/sam/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,89 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_torch_available, - is_vision_available, -) - - -_import_structure = { - "configuration_sam": [ - "SamConfig", - "SamMaskDecoderConfig", - "SamPromptEncoderConfig", - "SamVisionConfig", - ], - "processing_sam": ["SamProcessor"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_sam"] = [ - "SamModel", - "SamPreTrainedModel", - ] -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_sam"] = [ - "TFSamModel", - "TFSamPreTrainedModel", - ] -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_sam"] = ["SamImageProcessor"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_sam import ( - SamConfig, - SamMaskDecoderConfig, - SamPromptEncoderConfig, - SamVisionConfig, - ) - from .processing_sam import SamProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_sam import SamModel, SamPreTrainedModel - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_sam import TFSamModel, TFSamPreTrainedModel - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_sam import SamImageProcessor - + from .configuration_sam import * + from .image_processing_sam import * + from .modeling_sam import * + from .modeling_tf_sam import * + from .processing_sam import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/sam/configuration_sam.py b/src/transformers/models/sam/configuration_sam.py index 22a237615d1280..e0a759dbf111b4 100644 --- a/src/transformers/models/sam/configuration_sam.py +++ b/src/transformers/models/sam/configuration_sam.py @@ -314,3 +314,6 @@ def __init__( self.prompt_encoder_config = SamPromptEncoderConfig(**prompt_encoder_config) self.mask_decoder_config = SamMaskDecoderConfig(**mask_decoder_config) self.initializer_range = initializer_range + + +__all__ = ["SamConfig", "SamMaskDecoderConfig", "SamPromptEncoderConfig", "SamVisionConfig"] diff --git a/src/transformers/models/sam/image_processing_sam.py b/src/transformers/models/sam/image_processing_sam.py index beea3f4b01c311..a0d54b4afad3f4 100644 --- a/src/transformers/models/sam/image_processing_sam.py +++ b/src/transformers/models/sam/image_processing_sam.py @@ -1473,3 +1473,6 @@ def _postprocess_for_mg_tf(rle_masks, iou_scores, mask_boxes, amg_crops_nms_thre masks = [_rle_to_mask(rle) for rle in rle_masks] return masks, iou_scores, rle_masks, mask_boxes + + +__all__ = ["SamImageProcessor"] diff --git a/src/transformers/models/sam/modeling_sam.py b/src/transformers/models/sam/modeling_sam.py index b935bc9e421e01..9fe4c27bcac863 100644 --- a/src/transformers/models/sam/modeling_sam.py +++ b/src/transformers/models/sam/modeling_sam.py @@ -1563,3 +1563,6 @@ def forward( vision_attentions=vision_attentions, mask_decoder_attentions=mask_decoder_attentions, ) + + +__all__ = ["SamModel", "SamPreTrainedModel"] diff --git a/src/transformers/models/sam/modeling_tf_sam.py b/src/transformers/models/sam/modeling_tf_sam.py index 1e5099f191e9b4..ee75b1bf4f2133 100644 --- a/src/transformers/models/sam/modeling_tf_sam.py +++ b/src/transformers/models/sam/modeling_tf_sam.py @@ -1650,3 +1650,6 @@ def build(self, input_shape=None): if getattr(self, "mask_decoder", None) is not None: with tf.name_scope(self.mask_decoder.name): self.mask_decoder.build(None) + + +__all__ = ["TFSamModel", "TFSamPreTrainedModel"] diff --git a/src/transformers/models/sam/processing_sam.py b/src/transformers/models/sam/processing_sam.py index 9e67be1e1e55c2..5c6cb0132ce14c 100644 --- a/src/transformers/models/sam/processing_sam.py +++ b/src/transformers/models/sam/processing_sam.py @@ -265,3 +265,6 @@ def model_input_names(self): def post_process_masks(self, *args, **kwargs): return self.image_processor.post_process_masks(*args, **kwargs) + + +__all__ = ["SamProcessor"] diff --git a/src/transformers/models/seamless_m4t/__init__.py b/src/transformers/models/seamless_m4t/__init__.py index 56b04e76b62ca6..0d289de0474acf 100644 --- a/src/transformers/models/seamless_m4t/__init__.py +++ b/src/transformers/models/seamless_m4t/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,97 +13,19 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tokenizers_available, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_seamless_m4t": ["SeamlessM4TConfig"], - "feature_extraction_seamless_m4t": ["SeamlessM4TFeatureExtractor"], - "processing_seamless_m4t": ["SeamlessM4TProcessor"], -} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_seamless_m4t"] = ["SeamlessM4TTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_seamless_m4t_fast"] = ["SeamlessM4TTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_seamless_m4t"] = [ - "SeamlessM4TForTextToSpeech", - "SeamlessM4TForSpeechToSpeech", - "SeamlessM4TForTextToText", - "SeamlessM4TForSpeechToText", - "SeamlessM4TModel", - "SeamlessM4TPreTrainedModel", - "SeamlessM4TCodeHifiGan", - "SeamlessM4THifiGan", - "SeamlessM4TTextToUnitForConditionalGeneration", - "SeamlessM4TTextToUnitModel", - ] - if TYPE_CHECKING: - from .configuration_seamless_m4t import SeamlessM4TConfig - from .feature_extraction_seamless_m4t import SeamlessM4TFeatureExtractor - from .processing_seamless_m4t import SeamlessM4TProcessor - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_seamless_m4t import SeamlessM4TTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_seamless_m4t_fast import SeamlessM4TTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_seamless_m4t import ( - SeamlessM4TCodeHifiGan, - SeamlessM4TForSpeechToSpeech, - SeamlessM4TForSpeechToText, - SeamlessM4TForTextToSpeech, - SeamlessM4TForTextToText, - SeamlessM4THifiGan, - SeamlessM4TModel, - SeamlessM4TPreTrainedModel, - SeamlessM4TTextToUnitForConditionalGeneration, - SeamlessM4TTextToUnitModel, - ) - + from .configuration_seamless_m4t import * + from .feature_extraction_seamless_m4t import * + from .modeling_seamless_m4t import * + from .processing_seamless_m4t import * + from .tokenization_seamless_m4t import * + from .tokenization_seamless_m4t_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/seamless_m4t/configuration_seamless_m4t.py b/src/transformers/models/seamless_m4t/configuration_seamless_m4t.py index c24eb0ecb64cc9..f406264b030873 100644 --- a/src/transformers/models/seamless_m4t/configuration_seamless_m4t.py +++ b/src/transformers/models/seamless_m4t/configuration_seamless_m4t.py @@ -411,3 +411,6 @@ def __init__( max_position_embeddings=max_position_embeddings, **kwargs, ) + + +__all__ = ["SeamlessM4TConfig"] diff --git a/src/transformers/models/seamless_m4t/feature_extraction_seamless_m4t.py b/src/transformers/models/seamless_m4t/feature_extraction_seamless_m4t.py index 2a83e56fc0bd12..08ea4ea0828649 100644 --- a/src/transformers/models/seamless_m4t/feature_extraction_seamless_m4t.py +++ b/src/transformers/models/seamless_m4t/feature_extraction_seamless_m4t.py @@ -304,3 +304,6 @@ def __call__( padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs + + +__all__ = ["SeamlessM4TFeatureExtractor"] diff --git a/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py b/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py index 6aa967416d5477..d8b52cf63700b9 100755 --- a/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py +++ b/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py @@ -4286,3 +4286,17 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past + + +__all__ = [ + "SeamlessM4TForTextToSpeech", + "SeamlessM4TForSpeechToSpeech", + "SeamlessM4TForTextToText", + "SeamlessM4TForSpeechToText", + "SeamlessM4TModel", + "SeamlessM4TPreTrainedModel", + "SeamlessM4TCodeHifiGan", + "SeamlessM4THifiGan", + "SeamlessM4TTextToUnitForConditionalGeneration", + "SeamlessM4TTextToUnitModel", +] diff --git a/src/transformers/models/seamless_m4t/processing_seamless_m4t.py b/src/transformers/models/seamless_m4t/processing_seamless_m4t.py index 7e838913ca147c..dd80b503eead7a 100644 --- a/src/transformers/models/seamless_m4t/processing_seamless_m4t.py +++ b/src/transformers/models/seamless_m4t/processing_seamless_m4t.py @@ -115,3 +115,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names feature_extractor_input_names = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names)) + + +__all__ = ["SeamlessM4TProcessor"] diff --git a/src/transformers/models/seamless_m4t/tokenization_seamless_m4t.py b/src/transformers/models/seamless_m4t/tokenization_seamless_m4t.py index d6017a6e057960..6a4509ee78b656 100644 --- a/src/transformers/models/seamless_m4t/tokenization_seamless_m4t.py +++ b/src/transformers/models/seamless_m4t/tokenization_seamless_m4t.py @@ -560,3 +560,6 @@ def set_tgt_lang_special_tokens(self, lang: str) -> None: self.prefix_tokens = [self.eos_token_id, self.cur_lang_code] self.suffix_tokens = [self.eos_token_id] + + +__all__ = ["SeamlessM4TTokenizer"] diff --git a/src/transformers/models/seamless_m4t/tokenization_seamless_m4t_fast.py b/src/transformers/models/seamless_m4t/tokenization_seamless_m4t_fast.py index 70892c9948b8ed..c1142c10719fb1 100644 --- a/src/transformers/models/seamless_m4t/tokenization_seamless_m4t_fast.py +++ b/src/transformers/models/seamless_m4t/tokenization_seamless_m4t_fast.py @@ -445,3 +445,6 @@ def __call__( ) return output + + +__all__ = ["SeamlessM4TTokenizerFast"] diff --git a/src/transformers/models/seamless_m4t_v2/__init__.py b/src/transformers/models/seamless_m4t_v2/__init__.py index 5fde6a5d332a39..10f256f8575595 100644 --- a/src/transformers/models/seamless_m4t_v2/__init__.py +++ b/src/transformers/models/seamless_m4t_v2/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,51 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_seamless_m4t_v2": ["SeamlessM4Tv2Config"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_seamless_m4t_v2"] = [ - "SeamlessM4Tv2ForTextToSpeech", - "SeamlessM4Tv2ForSpeechToSpeech", - "SeamlessM4Tv2ForTextToText", - "SeamlessM4Tv2ForSpeechToText", - "SeamlessM4Tv2Model", - "SeamlessM4Tv2PreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_seamless_m4t_v2 import SeamlessM4Tv2Config - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_seamless_m4t_v2 import ( - SeamlessM4Tv2ForSpeechToSpeech, - SeamlessM4Tv2ForSpeechToText, - SeamlessM4Tv2ForTextToSpeech, - SeamlessM4Tv2ForTextToText, - SeamlessM4Tv2Model, - SeamlessM4Tv2PreTrainedModel, - ) - + from .configuration_seamless_m4t_v2 import * + from .modeling_seamless_m4t_v2 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py b/src/transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py index 30082cd5fd8725..d29eaf45401003 100644 --- a/src/transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py +++ b/src/transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py @@ -420,3 +420,6 @@ def __init__( max_position_embeddings=max_position_embeddings, **kwargs, ) + + +__all__ = ["SeamlessM4Tv2Config"] diff --git a/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py b/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py index 978000086e2c3b..da9a4ecdadb417 100644 --- a/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py +++ b/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py @@ -4707,3 +4707,13 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past + + +__all__ = [ + "SeamlessM4Tv2ForTextToSpeech", + "SeamlessM4Tv2ForSpeechToSpeech", + "SeamlessM4Tv2ForTextToText", + "SeamlessM4Tv2ForSpeechToText", + "SeamlessM4Tv2Model", + "SeamlessM4Tv2PreTrainedModel", +] diff --git a/src/transformers/models/segformer/__init__.py b/src/transformers/models/segformer/__init__.py index 8d8cccdf39ff42..9fb469789773f1 100644 --- a/src/transformers/models/segformer/__init__.py +++ b/src/transformers/models/segformer/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,97 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_torch_available, - is_vision_available, -) - - -_import_structure = {"configuration_segformer": ["SegformerConfig", "SegformerOnnxConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_segformer"] = ["SegformerFeatureExtractor"] - _import_structure["image_processing_segformer"] = ["SegformerImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_segformer"] = [ - "SegformerDecodeHead", - "SegformerForImageClassification", - "SegformerForSemanticSegmentation", - "SegformerLayer", - "SegformerModel", - "SegformerPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_segformer"] = [ - "TFSegformerDecodeHead", - "TFSegformerForImageClassification", - "TFSegformerForSemanticSegmentation", - "TFSegformerModel", - "TFSegformerPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_segformer import SegformerConfig, SegformerOnnxConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_segformer import SegformerFeatureExtractor - from .image_processing_segformer import SegformerImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_segformer import ( - SegformerDecodeHead, - SegformerForImageClassification, - SegformerForSemanticSegmentation, - SegformerLayer, - SegformerModel, - SegformerPreTrainedModel, - ) - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_segformer import ( - TFSegformerDecodeHead, - TFSegformerForImageClassification, - TFSegformerForSemanticSegmentation, - TFSegformerModel, - TFSegformerPreTrainedModel, - ) - + from .configuration_segformer import * + from .feature_extraction_segformer import * + from .image_processing_segformer import * + from .modeling_segformer import * + from .modeling_tf_segformer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/segformer/configuration_segformer.py b/src/transformers/models/segformer/configuration_segformer.py index 28fc1a7334e9c2..58683a86c78a5a 100644 --- a/src/transformers/models/segformer/configuration_segformer.py +++ b/src/transformers/models/segformer/configuration_segformer.py @@ -166,3 +166,6 @@ def atol_for_validation(self) -> float: @property def default_onnx_opset(self) -> int: return 12 + + +__all__ = ["SegformerConfig", "SegformerOnnxConfig"] diff --git a/src/transformers/models/segformer/feature_extraction_segformer.py b/src/transformers/models/segformer/feature_extraction_segformer.py index 3c081e73890680..85442612e23bf7 100644 --- a/src/transformers/models/segformer/feature_extraction_segformer.py +++ b/src/transformers/models/segformer/feature_extraction_segformer.py @@ -31,3 +31,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["SegformerFeatureExtractor"] diff --git a/src/transformers/models/segformer/image_processing_segformer.py b/src/transformers/models/segformer/image_processing_segformer.py index da1c9be40a5e67..d6662f1f620fea 100644 --- a/src/transformers/models/segformer/image_processing_segformer.py +++ b/src/transformers/models/segformer/image_processing_segformer.py @@ -477,3 +477,6 @@ def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation + + +__all__ = ["SegformerImageProcessor"] diff --git a/src/transformers/models/segformer/modeling_segformer.py b/src/transformers/models/segformer/modeling_segformer.py index 44582a74ccc9f1..36efb00e67b0e5 100755 --- a/src/transformers/models/segformer/modeling_segformer.py +++ b/src/transformers/models/segformer/modeling_segformer.py @@ -826,3 +826,13 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) + + +__all__ = [ + "SegformerDecodeHead", + "SegformerForImageClassification", + "SegformerForSemanticSegmentation", + "SegformerLayer", + "SegformerModel", + "SegformerPreTrainedModel", +] diff --git a/src/transformers/models/segformer/modeling_tf_segformer.py b/src/transformers/models/segformer/modeling_tf_segformer.py index 4cd52e135edcbe..9c65a3b5322976 100644 --- a/src/transformers/models/segformer/modeling_tf_segformer.py +++ b/src/transformers/models/segformer/modeling_tf_segformer.py @@ -1034,3 +1034,12 @@ def build(self, input_shape=None): if getattr(self, "decode_head", None) is not None: with tf.name_scope(self.decode_head.name): self.decode_head.build(None) + + +__all__ = [ + "TFSegformerDecodeHead", + "TFSegformerForImageClassification", + "TFSegformerForSemanticSegmentation", + "TFSegformerModel", + "TFSegformerPreTrainedModel", +] diff --git a/src/transformers/models/seggpt/__init__.py b/src/transformers/models/seggpt/__init__.py index b6095b53277ae0..ec646f2f81a21d 100644 --- a/src/transformers/models/seggpt/__init__.py +++ b/src/transformers/models/seggpt/__init__.py @@ -13,55 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_seggpt": ["SegGptConfig", "SegGptOnnxConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_seggpt"] = [ - "SegGptModel", - "SegGptPreTrainedModel", - "SegGptForImageSegmentation", - ] - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_seggpt"] = ["SegGptImageProcessor"] - if TYPE_CHECKING: - from .configuration_seggpt import SegGptConfig, SegGptOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_seggpt import ( - SegGptForImageSegmentation, - SegGptModel, - SegGptPreTrainedModel, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_seggpt import SegGptImageProcessor - + from .configuration_seggpt import * + from .image_processing_seggpt import * + from .modeling_seggpt import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/seggpt/configuration_seggpt.py b/src/transformers/models/seggpt/configuration_seggpt.py index f79e7f12b2ef4c..a735281facccc5 100644 --- a/src/transformers/models/seggpt/configuration_seggpt.py +++ b/src/transformers/models/seggpt/configuration_seggpt.py @@ -138,3 +138,6 @@ def __init__( self.intermediate_hidden_state_indices = intermediate_hidden_state_indices self.beta = beta self.mlp_dim = int(hidden_size * 4) if mlp_dim is None else mlp_dim + + +__all__ = ["SegGptConfig"] diff --git a/src/transformers/models/seggpt/image_processing_seggpt.py b/src/transformers/models/seggpt/image_processing_seggpt.py index 1e4a5e23d093e8..4a91ee76e3ebfd 100644 --- a/src/transformers/models/seggpt/image_processing_seggpt.py +++ b/src/transformers/models/seggpt/image_processing_seggpt.py @@ -613,3 +613,6 @@ def post_process_semantic_segmentation( semantic_segmentation.append(pred) return semantic_segmentation + + +__all__ = ["SegGptImageProcessor"] diff --git a/src/transformers/models/seggpt/modeling_seggpt.py b/src/transformers/models/seggpt/modeling_seggpt.py index c0f1f24a31781f..e2d3c8e78aed7e 100644 --- a/src/transformers/models/seggpt/modeling_seggpt.py +++ b/src/transformers/models/seggpt/modeling_seggpt.py @@ -1020,3 +1020,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["SegGptModel", "SegGptPreTrainedModel", "SegGptForImageSegmentation"] diff --git a/src/transformers/models/sew/__init__.py b/src/transformers/models/sew/__init__.py index aba88cc45133c2..0bd614f74a6806 100644 --- a/src/transformers/models/sew/__init__.py +++ b/src/transformers/models/sew/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,42 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_sew": ["SEWConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_sew"] = [ - "SEWForCTC", - "SEWForSequenceClassification", - "SEWModel", - "SEWPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_sew import SEWConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_sew import ( - SEWForCTC, - SEWForSequenceClassification, - SEWModel, - SEWPreTrainedModel, - ) - - + from .configuration_sew import * + from .modeling_sew import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/sew/configuration_sew.py b/src/transformers/models/sew/configuration_sew.py index 6c877277aec26d..4365e9c33ded70 100644 --- a/src/transformers/models/sew/configuration_sew.py +++ b/src/transformers/models/sew/configuration_sew.py @@ -251,3 +251,6 @@ def __init__( @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1) + + +__all__ = ["SEWConfig"] diff --git a/src/transformers/models/sew/modeling_sew.py b/src/transformers/models/sew/modeling_sew.py index 8638d93385843d..8ebc35e6c6614a 100644 --- a/src/transformers/models/sew/modeling_sew.py +++ b/src/transformers/models/sew/modeling_sew.py @@ -1494,3 +1494,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["SEWForCTC", "SEWForSequenceClassification", "SEWModel", "SEWPreTrainedModel"] diff --git a/src/transformers/models/sew_d/__init__.py b/src/transformers/models/sew_d/__init__.py index c99be845d544b5..7902aa464a9cfd 100644 --- a/src/transformers/models/sew_d/__init__.py +++ b/src/transformers/models/sew_d/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,42 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_sew_d": ["SEWDConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_sew_d"] = [ - "SEWDForCTC", - "SEWDForSequenceClassification", - "SEWDModel", - "SEWDPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_sew_d import SEWDConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_sew_d import ( - SEWDForCTC, - SEWDForSequenceClassification, - SEWDModel, - SEWDPreTrainedModel, - ) - - + from .configuration_sew_d import * + from .modeling_sew_d import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/sew_d/configuration_sew_d.py b/src/transformers/models/sew_d/configuration_sew_d.py index ea791935ba6098..f03a8f43c1a276 100644 --- a/src/transformers/models/sew_d/configuration_sew_d.py +++ b/src/transformers/models/sew_d/configuration_sew_d.py @@ -286,3 +286,6 @@ def to_dict(self): output = super().to_dict() output["hidden_dropout"] = output.pop("_hidden_dropout") return output + + +__all__ = ["SEWDConfig"] diff --git a/src/transformers/models/sew_d/modeling_sew_d.py b/src/transformers/models/sew_d/modeling_sew_d.py index 5cccc0218e6ccf..34c6e665147b9a 100644 --- a/src/transformers/models/sew_d/modeling_sew_d.py +++ b/src/transformers/models/sew_d/modeling_sew_d.py @@ -1741,3 +1741,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["SEWDForCTC", "SEWDForSequenceClassification", "SEWDModel", "SEWDPreTrainedModel"] diff --git a/src/transformers/models/siglip/__init__.py b/src/transformers/models/siglip/__init__.py index 96ce20e7f230bf..c0618262afbb2b 100644 --- a/src/transformers/models/siglip/__init__.py +++ b/src/transformers/models/siglip/__init__.py @@ -13,96 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_torch_available, - is_vision_available, -) - - -_import_structure = { - "configuration_siglip": [ - "SiglipConfig", - "SiglipTextConfig", - "SiglipVisionConfig", - ], - "processing_siglip": ["SiglipProcessor"], -} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_siglip"] = ["SiglipTokenizer"] - - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_siglip"] = ["SiglipImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_siglip"] = [ - "SiglipModel", - "SiglipPreTrainedModel", - "SiglipTextModel", - "SiglipVisionModel", - "SiglipForImageClassification", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_siglip import ( - SiglipConfig, - SiglipTextConfig, - SiglipVisionConfig, - ) - from .processing_siglip import SiglipProcessor - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_siglip import SiglipTokenizer - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_siglip import SiglipImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_siglip import ( - SiglipForImageClassification, - SiglipModel, - SiglipPreTrainedModel, - SiglipTextModel, - SiglipVisionModel, - ) - - + from .configuration_siglip import * + from .image_processing_siglip import * + from .modeling_siglip import * + from .processing_siglip import * + from .tokenization_siglip import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/siglip/configuration_siglip.py b/src/transformers/models/siglip/configuration_siglip.py index cc8fae93cdb25b..ad676046f3483a 100644 --- a/src/transformers/models/siglip/configuration_siglip.py +++ b/src/transformers/models/siglip/configuration_siglip.py @@ -260,3 +260,6 @@ def from_text_vision_configs(cls, text_config: SiglipTextConfig, vision_config: """ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) + + +__all__ = ["SiglipConfig", "SiglipTextConfig", "SiglipVisionConfig"] diff --git a/src/transformers/models/siglip/image_processing_siglip.py b/src/transformers/models/siglip/image_processing_siglip.py index 293716b48fa236..5b22ac51689538 100644 --- a/src/transformers/models/siglip/image_processing_siglip.py +++ b/src/transformers/models/siglip/image_processing_siglip.py @@ -239,3 +239,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["SiglipImageProcessor"] diff --git a/src/transformers/models/siglip/modeling_siglip.py b/src/transformers/models/siglip/modeling_siglip.py index a42bcd0e17461e..63ba9627701a72 100644 --- a/src/transformers/models/siglip/modeling_siglip.py +++ b/src/transformers/models/siglip/modeling_siglip.py @@ -1567,3 +1567,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "SiglipModel", + "SiglipPreTrainedModel", + "SiglipTextModel", + "SiglipVisionModel", + "SiglipForImageClassification", +] diff --git a/src/transformers/models/siglip/processing_siglip.py b/src/transformers/models/siglip/processing_siglip.py index 655fb4d4f78ab0..fd89287fc3f46d 100644 --- a/src/transformers/models/siglip/processing_siglip.py +++ b/src/transformers/models/siglip/processing_siglip.py @@ -140,3 +140,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +__all__ = ["SiglipProcessor"] diff --git a/src/transformers/models/siglip/tokenization_siglip.py b/src/transformers/models/siglip/tokenization_siglip.py index 6203c6887054ca..a9c24df2587409 100644 --- a/src/transformers/models/siglip/tokenization_siglip.py +++ b/src/transformers/models/siglip/tokenization_siglip.py @@ -373,3 +373,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = fi.write(content_spiece_model) return (out_vocab_file,) + + +__all__ = ["SiglipTokenizer"] diff --git a/src/transformers/models/speech_encoder_decoder/__init__.py b/src/transformers/models/speech_encoder_decoder/__init__.py index 392f21296e7242..4e07844d45c2d0 100644 --- a/src/transformers/models/speech_encoder_decoder/__init__.py +++ b/src/transformers/models/speech_encoder_decoder/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,50 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available - +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_speech_encoder_decoder"] = ["SpeechEncoderDecoderModel"] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_speech_encoder_decoder"] = ["FlaxSpeechEncoderDecoderModel"] if TYPE_CHECKING: - from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel - + from .configuration_speech_encoder_decoder import * + from .modeling_flax_speech_encoder_decoder import * + from .modeling_speech_encoder_decoder import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py b/src/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py index d7e0211610b657..47312df27ea67d 100644 --- a/src/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py +++ b/src/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py @@ -107,3 +107,6 @@ def from_encoder_decoder_configs( decoder_config.add_cross_attention = True return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs) + + +__all__ = ["SpeechEncoderDecoderConfig"] diff --git a/src/transformers/models/speech_encoder_decoder/modeling_flax_speech_encoder_decoder.py b/src/transformers/models/speech_encoder_decoder/modeling_flax_speech_encoder_decoder.py index 2a15714cff9e87..fd837146d5ca7d 100644 --- a/src/transformers/models/speech_encoder_decoder/modeling_flax_speech_encoder_decoder.py +++ b/src/transformers/models/speech_encoder_decoder/modeling_flax_speech_encoder_decoder.py @@ -924,3 +924,6 @@ def from_encoder_decoder_pretrained( model.params["decoder"] = decoder.params return model + + +__all__ = ["FlaxSpeechEncoderDecoderModel"] diff --git a/src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py b/src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py index 3bff8f6acd290d..dfadca184483e4 100644 --- a/src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py +++ b/src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py @@ -590,3 +590,6 @@ def resize_token_embeddings(self, *args, **kwargs): def _reorder_cache(self, past_key_values, beam_idx): # apply decoder cache reordering here return self.decoder._reorder_cache(past_key_values, beam_idx) + + +__all__ = ["SpeechEncoderDecoderModel"] diff --git a/src/transformers/models/speech_to_text/__init__.py b/src/transformers/models/speech_to_text/__init__.py index 4ad05da69710ad..ec094769d4aea3 100644 --- a/src/transformers/models/speech_to_text/__init__.py +++ b/src/transformers/models/speech_to_text/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,92 +13,19 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tf_available, - is_torch_available, -) - - -_import_structure = { - "configuration_speech_to_text": ["Speech2TextConfig"], - "feature_extraction_speech_to_text": ["Speech2TextFeatureExtractor"], - "processing_speech_to_text": ["Speech2TextProcessor"], -} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_speech_to_text"] = ["Speech2TextTokenizer"] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_speech_to_text"] = [ - "TFSpeech2TextForConditionalGeneration", - "TFSpeech2TextModel", - "TFSpeech2TextPreTrainedModel", - ] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_speech_to_text"] = [ - "Speech2TextForConditionalGeneration", - "Speech2TextModel", - "Speech2TextPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_speech_to_text import Speech2TextConfig - from .feature_extraction_speech_to_text import Speech2TextFeatureExtractor - from .processing_speech_to_text import Speech2TextProcessor - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_speech_to_text import Speech2TextTokenizer - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_speech_to_text import ( - TFSpeech2TextForConditionalGeneration, - TFSpeech2TextModel, - TFSpeech2TextPreTrainedModel, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_speech_to_text import ( - Speech2TextForConditionalGeneration, - Speech2TextModel, - Speech2TextPreTrainedModel, - ) - + from .configuration_speech_to_text import * + from .feature_extraction_speech_to_text import * + from .modeling_speech_to_text import * + from .modeling_tf_speech_to_text import * + from .processing_speech_to_text import * + from .tokenization_speech_to_text import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/speech_to_text/configuration_speech_to_text.py b/src/transformers/models/speech_to_text/configuration_speech_to_text.py index 80602e9a7d8e3a..fef4069e4e4170 100644 --- a/src/transformers/models/speech_to_text/configuration_speech_to_text.py +++ b/src/transformers/models/speech_to_text/configuration_speech_to_text.py @@ -194,3 +194,6 @@ def __init__( decoder_start_token_id=decoder_start_token_id, **kwargs, ) + + +__all__ = ["Speech2TextConfig"] diff --git a/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py b/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py index b8a2b6bfb29738..56a0b859db4594 100644 --- a/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py +++ b/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py @@ -295,3 +295,6 @@ def __call__( padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs + + +__all__ = ["Speech2TextFeatureExtractor"] diff --git a/src/transformers/models/speech_to_text/modeling_speech_to_text.py b/src/transformers/models/speech_to_text/modeling_speech_to_text.py index aadc1da500ea64..2e4f64c5120c72 100755 --- a/src/transformers/models/speech_to_text/modeling_speech_to_text.py +++ b/src/transformers/models/speech_to_text/modeling_speech_to_text.py @@ -1339,3 +1339,6 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = ["Speech2TextForConditionalGeneration", "Speech2TextModel", "Speech2TextPreTrainedModel"] diff --git a/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py b/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py index bac1256ca4b672..7a20e462279a90 100755 --- a/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py +++ b/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py @@ -1601,3 +1601,6 @@ def tf_to_pt_weight_rename(self, tf_weight): return tf_weight, "model.decoder.embed_tokens.weight" else: return (tf_weight,) + + +__all__ = ["TFSpeech2TextForConditionalGeneration", "TFSpeech2TextModel", "TFSpeech2TextPreTrainedModel"] diff --git a/src/transformers/models/speech_to_text/processing_speech_to_text.py b/src/transformers/models/speech_to_text/processing_speech_to_text.py index 646b3899945422..57eb1f9cb9509f 100644 --- a/src/transformers/models/speech_to_text/processing_speech_to_text.py +++ b/src/transformers/models/speech_to_text/processing_speech_to_text.py @@ -115,3 +115,6 @@ def as_target_processor(self): yield self.current_processor = self.feature_extractor self._in_target_context_manager = False + + +__all__ = ["Speech2TextProcessor"] diff --git a/src/transformers/models/speech_to_text/tokenization_speech_to_text.py b/src/transformers/models/speech_to_text/tokenization_speech_to_text.py index 1b9841f0cfb729..c86a5417c3b86a 100644 --- a/src/transformers/models/speech_to_text/tokenization_speech_to_text.py +++ b/src/transformers/models/speech_to_text/tokenization_speech_to_text.py @@ -288,3 +288,6 @@ def load_json(path: str) -> Union[Dict, List]: def save_json(data, path: str) -> None: with open(path, "w") as f: json.dump(data, f, indent=2) + + +__all__ = ["Speech2TextTokenizer"] diff --git a/src/transformers/models/speecht5/__init__.py b/src/transformers/models/speecht5/__init__.py index f9afe52aa4b7ab..52fee59ae9b96d 100644 --- a/src/transformers/models/speecht5/__init__.py +++ b/src/transformers/models/speecht5/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,78 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_speecht5": [ - "SpeechT5Config", - "SpeechT5HifiGanConfig", - ], - "feature_extraction_speecht5": ["SpeechT5FeatureExtractor"], - "processing_speecht5": ["SpeechT5Processor"], -} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_speecht5"] = ["SpeechT5Tokenizer"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_speecht5"] = [ - "SpeechT5ForSpeechToText", - "SpeechT5ForSpeechToSpeech", - "SpeechT5ForTextToSpeech", - "SpeechT5Model", - "SpeechT5PreTrainedModel", - "SpeechT5HifiGan", - ] - if TYPE_CHECKING: - from .configuration_speecht5 import ( - SpeechT5Config, - SpeechT5HifiGanConfig, - ) - from .feature_extraction_speecht5 import SpeechT5FeatureExtractor - from .processing_speecht5 import SpeechT5Processor - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_speecht5 import SpeechT5Tokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_speecht5 import ( - SpeechT5ForSpeechToSpeech, - SpeechT5ForSpeechToText, - SpeechT5ForTextToSpeech, - SpeechT5HifiGan, - SpeechT5Model, - SpeechT5PreTrainedModel, - ) - + from .configuration_speecht5 import * + from .feature_extraction_speecht5 import * + from .modeling_speecht5 import * + from .processing_speecht5 import * + from .tokenization_speecht5 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/speecht5/configuration_speecht5.py b/src/transformers/models/speecht5/configuration_speecht5.py index d8f4497de8f77e..6f79bdbc61d3cd 100644 --- a/src/transformers/models/speecht5/configuration_speecht5.py +++ b/src/transformers/models/speecht5/configuration_speecht5.py @@ -417,3 +417,6 @@ def __init__( self.leaky_relu_slope = leaky_relu_slope self.normalize_before = normalize_before super().__init__(**kwargs) + + +__all__ = ["SpeechT5Config", "SpeechT5HifiGanConfig"] diff --git a/src/transformers/models/speecht5/feature_extraction_speecht5.py b/src/transformers/models/speecht5/feature_extraction_speecht5.py index 84d51e97df95e0..aea75ca50b9912 100644 --- a/src/transformers/models/speecht5/feature_extraction_speecht5.py +++ b/src/transformers/models/speecht5/feature_extraction_speecht5.py @@ -391,3 +391,6 @@ def to_dict(self) -> Dict[str, Any]: del output[name] return output + + +__all__ = ["SpeechT5FeatureExtractor"] diff --git a/src/transformers/models/speecht5/modeling_speecht5.py b/src/transformers/models/speecht5/modeling_speecht5.py index 72cbe6b14a93be..81cca3bc84804d 100644 --- a/src/transformers/models/speecht5/modeling_speecht5.py +++ b/src/transformers/models/speecht5/modeling_speecht5.py @@ -3382,3 +3382,13 @@ def forward(self, spectrogram: torch.FloatTensor) -> torch.FloatTensor: waveform = hidden_states.squeeze(1) return waveform + + +__all__ = [ + "SpeechT5ForSpeechToText", + "SpeechT5ForSpeechToSpeech", + "SpeechT5ForTextToSpeech", + "SpeechT5Model", + "SpeechT5PreTrainedModel", + "SpeechT5HifiGan", +] diff --git a/src/transformers/models/speecht5/processing_speecht5.py b/src/transformers/models/speecht5/processing_speecht5.py index 468a0c1d89ab21..0c038d97ae8c05 100644 --- a/src/transformers/models/speecht5/processing_speecht5.py +++ b/src/transformers/models/speecht5/processing_speecht5.py @@ -181,3 +181,6 @@ def decode(self, *args, **kwargs): the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) + + +__all__ = ["SpeechT5Processor"] diff --git a/src/transformers/models/speecht5/tokenization_speecht5.py b/src/transformers/models/speecht5/tokenization_speecht5.py index 97b2feaab3ccac..e3cf3867affc97 100644 --- a/src/transformers/models/speecht5/tokenization_speecht5.py +++ b/src/transformers/models/speecht5/tokenization_speecht5.py @@ -216,3 +216,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = fi.write(content_spiece_model) return (out_vocab_file,) + + +__all__ = ["SpeechT5Tokenizer"] diff --git a/src/transformers/models/splinter/__init__.py b/src/transformers/models/splinter/__init__.py index 81896fb15a5b66..258d79a9c34799 100644 --- a/src/transformers/models/splinter/__init__.py +++ b/src/transformers/models/splinter/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,65 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available - - -_import_structure = { - "configuration_splinter": ["SplinterConfig"], - "tokenization_splinter": ["SplinterTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_splinter_fast"] = ["SplinterTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_splinter"] = [ - "SplinterForQuestionAnswering", - "SplinterForPreTraining", - "SplinterLayer", - "SplinterModel", - "SplinterPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_splinter import SplinterConfig - from .tokenization_splinter import SplinterTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_splinter_fast import SplinterTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_splinter import ( - SplinterForPreTraining, - SplinterForQuestionAnswering, - SplinterLayer, - SplinterModel, - SplinterPreTrainedModel, - ) - - + from .configuration_splinter import * + from .modeling_splinter import * + from .tokenization_splinter import * + from .tokenization_splinter_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/splinter/configuration_splinter.py b/src/transformers/models/splinter/configuration_splinter.py index 9a946fd4bedbe2..533b067ed34f31 100644 --- a/src/transformers/models/splinter/configuration_splinter.py +++ b/src/transformers/models/splinter/configuration_splinter.py @@ -118,3 +118,6 @@ def __init__( self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.question_token_id = question_token_id + + +__all__ = ["SplinterConfig"] diff --git a/src/transformers/models/splinter/modeling_splinter.py b/src/transformers/models/splinter/modeling_splinter.py index 6494a57fa4fc1a..64406745a42675 100755 --- a/src/transformers/models/splinter/modeling_splinter.py +++ b/src/transformers/models/splinter/modeling_splinter.py @@ -1105,3 +1105,12 @@ def _prepare_question_positions(self, input_ids: torch.Tensor) -> torch.Tensor: cols = torch.cat([torch.arange(n) for n in num_questions]) positions[rows, cols] = flat_positions return positions + + +__all__ = [ + "SplinterForQuestionAnswering", + "SplinterForPreTraining", + "SplinterLayer", + "SplinterModel", + "SplinterPreTrainedModel", +] diff --git a/src/transformers/models/splinter/tokenization_splinter.py b/src/transformers/models/splinter/tokenization_splinter.py index ffa135556aa47d..76b7543eb2eec4 100644 --- a/src/transformers/models/splinter/tokenization_splinter.py +++ b/src/transformers/models/splinter/tokenization_splinter.py @@ -502,3 +502,6 @@ def tokenize(self, text): else: output_tokens.extend(sub_tokens) return output_tokens + + +__all__ = ["SplinterTokenizer"] diff --git a/src/transformers/models/splinter/tokenization_splinter_fast.py b/src/transformers/models/splinter/tokenization_splinter_fast.py index 0371fdf2828eb2..85dd01a2be036f 100644 --- a/src/transformers/models/splinter/tokenization_splinter_fast.py +++ b/src/transformers/models/splinter/tokenization_splinter_fast.py @@ -188,3 +188,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["SplinterTokenizerFast"] diff --git a/src/transformers/models/squeezebert/__init__.py b/src/transformers/models/squeezebert/__init__.py index 45aff2f64c1610..e0a760d2b5cae3 100644 --- a/src/transformers/models/squeezebert/__init__.py +++ b/src/transformers/models/squeezebert/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,79 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available - - -_import_structure = { - "configuration_squeezebert": [ - "SqueezeBertConfig", - "SqueezeBertOnnxConfig", - ], - "tokenization_squeezebert": ["SqueezeBertTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_squeezebert_fast"] = ["SqueezeBertTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_squeezebert"] = [ - "SqueezeBertForMaskedLM", - "SqueezeBertForMultipleChoice", - "SqueezeBertForQuestionAnswering", - "SqueezeBertForSequenceClassification", - "SqueezeBertForTokenClassification", - "SqueezeBertModel", - "SqueezeBertModule", - "SqueezeBertPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_squeezebert import ( - SqueezeBertConfig, - SqueezeBertOnnxConfig, - ) - from .tokenization_squeezebert import SqueezeBertTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_squeezebert import ( - SqueezeBertForMaskedLM, - SqueezeBertForMultipleChoice, - SqueezeBertForQuestionAnswering, - SqueezeBertForSequenceClassification, - SqueezeBertForTokenClassification, - SqueezeBertModel, - SqueezeBertModule, - SqueezeBertPreTrainedModel, - ) - + from .configuration_squeezebert import * + from .modeling_squeezebert import * + from .tokenization_squeezebert import * + from .tokenization_squeezebert_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/squeezebert/configuration_squeezebert.py b/src/transformers/models/squeezebert/configuration_squeezebert.py index 1f3753ac5c083d..a659517545530e 100644 --- a/src/transformers/models/squeezebert/configuration_squeezebert.py +++ b/src/transformers/models/squeezebert/configuration_squeezebert.py @@ -162,3 +162,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("token_type_ids", dynamic_axis), ] ) + + +__all__ = ["SqueezeBertConfig", "SqueezeBertOnnxConfig"] diff --git a/src/transformers/models/squeezebert/modeling_squeezebert.py b/src/transformers/models/squeezebert/modeling_squeezebert.py index 483bac01bd9ece..1dbba371dca29b 100644 --- a/src/transformers/models/squeezebert/modeling_squeezebert.py +++ b/src/transformers/models/squeezebert/modeling_squeezebert.py @@ -1085,3 +1085,15 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "SqueezeBertForMaskedLM", + "SqueezeBertForMultipleChoice", + "SqueezeBertForQuestionAnswering", + "SqueezeBertForSequenceClassification", + "SqueezeBertForTokenClassification", + "SqueezeBertModel", + "SqueezeBertModule", + "SqueezeBertPreTrainedModel", +] diff --git a/src/transformers/models/squeezebert/tokenization_squeezebert.py b/src/transformers/models/squeezebert/tokenization_squeezebert.py index 9ac72fcc2608fe..71194f3d8df7cb 100644 --- a/src/transformers/models/squeezebert/tokenization_squeezebert.py +++ b/src/transformers/models/squeezebert/tokenization_squeezebert.py @@ -506,3 +506,6 @@ def tokenize(self, text): else: output_tokens.extend(sub_tokens) return output_tokens + + +__all__ = ["SqueezeBertTokenizer"] diff --git a/src/transformers/models/squeezebert/tokenization_squeezebert_fast.py b/src/transformers/models/squeezebert/tokenization_squeezebert_fast.py index 985fe657f0c3b6..a908dcbf146bdd 100644 --- a/src/transformers/models/squeezebert/tokenization_squeezebert_fast.py +++ b/src/transformers/models/squeezebert/tokenization_squeezebert_fast.py @@ -171,3 +171,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["SqueezeBertTokenizerFast"] diff --git a/src/transformers/models/stablelm/__init__.py b/src/transformers/models/stablelm/__init__.py index c00c045f7f81a4..004a82f49cd668 100644 --- a/src/transformers/models/stablelm/__init__.py +++ b/src/transformers/models/stablelm/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2024 Stability AI and The HuggingFace Inc. team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,52 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - - -_import_structure = { - "configuration_stablelm": ["StableLmConfig"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_stablelm"] = [ - "StableLmForCausalLM", - "StableLmModel", - "StableLmPreTrainedModel", - "StableLmForSequenceClassification", - "StableLmForTokenClassification", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_stablelm import StableLmConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_stablelm import ( - StableLmForCausalLM, - StableLmForSequenceClassification, - StableLmForTokenClassification, - StableLmModel, - StableLmPreTrainedModel, - ) - - + from .configuration_stablelm import * + from .modeling_stablelm import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/stablelm/configuration_stablelm.py b/src/transformers/models/stablelm/configuration_stablelm.py index a64c7e701d0373..06501792c42a31 100644 --- a/src/transformers/models/stablelm/configuration_stablelm.py +++ b/src/transformers/models/stablelm/configuration_stablelm.py @@ -197,3 +197,6 @@ def __init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) + + +__all__ = ["StableLmConfig"] diff --git a/src/transformers/models/stablelm/modeling_stablelm.py b/src/transformers/models/stablelm/modeling_stablelm.py index 0ce550697e79ab..46557963a29914 100755 --- a/src/transformers/models/stablelm/modeling_stablelm.py +++ b/src/transformers/models/stablelm/modeling_stablelm.py @@ -1401,3 +1401,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "StableLmForCausalLM", + "StableLmModel", + "StableLmPreTrainedModel", + "StableLmForSequenceClassification", + "StableLmForTokenClassification", +] diff --git a/src/transformers/models/superpoint/__init__.py b/src/transformers/models/superpoint/__init__.py index 90cde651ea0ae0..aab40abaa86d5a 100644 --- a/src/transformers/models/superpoint/__init__.py +++ b/src/transformers/models/superpoint/__init__.py @@ -13,57 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -# rely on isort to merge the imports -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = {"configuration_superpoint": ["SuperPointConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_superpoint"] = ["SuperPointImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_superpoint"] = [ - "SuperPointForKeypointDetection", - "SuperPointPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_superpoint import ( - SuperPointConfig, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_superpoint import SuperPointImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_superpoint import ( - SuperPointForKeypointDetection, - SuperPointPreTrainedModel, - ) - + from .configuration_superpoint import * + from .image_processing_superpoint import * + from .modeling_superpoint import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/superpoint/configuration_superpoint.py b/src/transformers/models/superpoint/configuration_superpoint.py index ac97b0aa8f4231..82104e682bcbd2 100644 --- a/src/transformers/models/superpoint/configuration_superpoint.py +++ b/src/transformers/models/superpoint/configuration_superpoint.py @@ -85,3 +85,6 @@ def __init__( self.initializer_range = initializer_range super().__init__(**kwargs) + + +__all__ = ["SuperPointConfig"] diff --git a/src/transformers/models/superpoint/image_processing_superpoint.py b/src/transformers/models/superpoint/image_processing_superpoint.py index 65309b1c1826f2..9ae1034484386a 100644 --- a/src/transformers/models/superpoint/image_processing_superpoint.py +++ b/src/transformers/models/superpoint/image_processing_superpoint.py @@ -325,3 +325,6 @@ def post_process_keypoint_detection( results.append({"keypoints": keypoints, "scores": scores, "descriptors": descriptors}) return results + + +__all__ = ["SuperPointImageProcessor"] diff --git a/src/transformers/models/superpoint/modeling_superpoint.py b/src/transformers/models/superpoint/modeling_superpoint.py index 1075de299a9f40..f0348cbe4ced43 100644 --- a/src/transformers/models/superpoint/modeling_superpoint.py +++ b/src/transformers/models/superpoint/modeling_superpoint.py @@ -503,3 +503,6 @@ def forward( mask=mask, hidden_states=hidden_states, ) + + +__all__ = ["SuperPointForKeypointDetection", "SuperPointPreTrainedModel"] diff --git a/src/transformers/models/swiftformer/__init__.py b/src/transformers/models/swiftformer/__init__.py index 2f5dcc811dde98..370f6c71fadb5d 100644 --- a/src/transformers/models/swiftformer/__init__.py +++ b/src/transformers/models/swiftformer/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,75 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_swiftformer": [ - "SwiftFormerConfig", - "SwiftFormerOnnxConfig", - ] -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_swiftformer"] = [ - "SwiftFormerForImageClassification", - "SwiftFormerModel", - "SwiftFormerPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_swiftformer"] = [ - "TFSwiftFormerForImageClassification", - "TFSwiftFormerModel", - "TFSwiftFormerPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_swiftformer import ( - SwiftFormerConfig, - SwiftFormerOnnxConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_swiftformer import ( - SwiftFormerForImageClassification, - SwiftFormerModel, - SwiftFormerPreTrainedModel, - ) - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_swiftformer import ( - TFSwiftFormerForImageClassification, - TFSwiftFormerModel, - TFSwiftFormerPreTrainedModel, - ) - + from .configuration_swiftformer import * + from .modeling_swiftformer import * + from .modeling_tf_swiftformer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/swiftformer/configuration_swiftformer.py b/src/transformers/models/swiftformer/configuration_swiftformer.py index abfdf5165271be..00a0aaddfa240b 100644 --- a/src/transformers/models/swiftformer/configuration_swiftformer.py +++ b/src/transformers/models/swiftformer/configuration_swiftformer.py @@ -143,3 +143,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["SwiftFormerConfig", "SwiftFormerOnnxConfig"] diff --git a/src/transformers/models/swiftformer/modeling_swiftformer.py b/src/transformers/models/swiftformer/modeling_swiftformer.py index bd86c3d7173ed6..5870e91338d095 100644 --- a/src/transformers/models/swiftformer/modeling_swiftformer.py +++ b/src/transformers/models/swiftformer/modeling_swiftformer.py @@ -602,3 +602,6 @@ def forward( logits=logits, hidden_states=outputs.hidden_states, ) + + +__all__ = ["SwiftFormerForImageClassification", "SwiftFormerModel", "SwiftFormerPreTrainedModel"] diff --git a/src/transformers/models/swiftformer/modeling_tf_swiftformer.py b/src/transformers/models/swiftformer/modeling_tf_swiftformer.py index 3f1d19e9e33f29..b7dc0e4f94d031 100644 --- a/src/transformers/models/swiftformer/modeling_tf_swiftformer.py +++ b/src/transformers/models/swiftformer/modeling_tf_swiftformer.py @@ -861,3 +861,6 @@ def build(self, input_shape=None): with tf.name_scope(self.dist_head.name): self.dist_head.build(self.config.embed_dims[-1]) self.built = True + + +__all__ = ["TFSwiftFormerForImageClassification", "TFSwiftFormerModel", "TFSwiftFormerPreTrainedModel"] diff --git a/src/transformers/models/swin/__init__.py b/src/transformers/models/swin/__init__.py index a3458fe1efb848..3dc5871b0375e8 100644 --- a/src/transformers/models/swin/__init__.py +++ b/src/transformers/models/swin/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,70 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_swin": ["SwinConfig", "SwinOnnxConfig"]} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_swin"] = [ - "SwinForImageClassification", - "SwinForMaskedImageModeling", - "SwinModel", - "SwinPreTrainedModel", - "SwinBackbone", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_swin"] = [ - "TFSwinForImageClassification", - "TFSwinForMaskedImageModeling", - "TFSwinModel", - "TFSwinPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_swin import SwinConfig, SwinOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_swin import ( - SwinBackbone, - SwinForImageClassification, - SwinForMaskedImageModeling, - SwinModel, - SwinPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_swin import ( - TFSwinForImageClassification, - TFSwinForMaskedImageModeling, - TFSwinModel, - TFSwinPreTrainedModel, - ) - + from .configuration_swin import * + from .modeling_swin import * + from .modeling_tf_swin import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/swin/configuration_swin.py b/src/transformers/models/swin/configuration_swin.py index 321648f149306a..da6ba9871407a6 100644 --- a/src/transformers/models/swin/configuration_swin.py +++ b/src/transformers/models/swin/configuration_swin.py @@ -174,3 +174,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["SwinConfig", "SwinOnnxConfig"] diff --git a/src/transformers/models/swin/modeling_swin.py b/src/transformers/models/swin/modeling_swin.py index 23f0ba6da620cd..a4262491366a0e 100644 --- a/src/transformers/models/swin/modeling_swin.py +++ b/src/transformers/models/swin/modeling_swin.py @@ -1414,3 +1414,12 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) + + +__all__ = [ + "SwinForImageClassification", + "SwinForMaskedImageModeling", + "SwinModel", + "SwinPreTrainedModel", + "SwinBackbone", +] diff --git a/src/transformers/models/swin/modeling_tf_swin.py b/src/transformers/models/swin/modeling_tf_swin.py index f1aa0bfef743ad..865444f081a64f 100644 --- a/src/transformers/models/swin/modeling_tf_swin.py +++ b/src/transformers/models/swin/modeling_tf_swin.py @@ -1633,3 +1633,6 @@ def build(self, input_shape=None): if hasattr(self.classifier, "name"): with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.swin.num_features]) + + +__all__ = ["TFSwinForImageClassification", "TFSwinForMaskedImageModeling", "TFSwinModel", "TFSwinPreTrainedModel"] diff --git a/src/transformers/models/swin2sr/__init__.py b/src/transformers/models/swin2sr/__init__.py index 16495f1dc9712d..cc7ea677b5d5d6 100644 --- a/src/transformers/models/swin2sr/__init__.py +++ b/src/transformers/models/swin2sr/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,61 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_swin2sr": ["Swin2SRConfig"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_swin2sr"] = [ - "Swin2SRForImageSuperResolution", - "Swin2SRModel", - "Swin2SRPreTrainedModel", - ] - - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_swin2sr"] = ["Swin2SRImageProcessor"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_swin2sr import Swin2SRConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_swin2sr import ( - Swin2SRForImageSuperResolution, - Swin2SRModel, - Swin2SRPreTrainedModel, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_swin2sr import Swin2SRImageProcessor - - + from .configuration_swin2sr import * + from .image_processing_swin2sr import * + from .modeling_swin2sr import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/swin2sr/configuration_swin2sr.py b/src/transformers/models/swin2sr/configuration_swin2sr.py index 0d910e89e4eb11..a507d9d625136c 100644 --- a/src/transformers/models/swin2sr/configuration_swin2sr.py +++ b/src/transformers/models/swin2sr/configuration_swin2sr.py @@ -149,3 +149,6 @@ def __init__( self.img_range = img_range self.resi_connection = resi_connection self.upsampler = upsampler + + +__all__ = ["Swin2SRConfig"] diff --git a/src/transformers/models/swin2sr/image_processing_swin2sr.py b/src/transformers/models/swin2sr/image_processing_swin2sr.py index f6584237432091..3ab551ebc6e7ca 100644 --- a/src/transformers/models/swin2sr/image_processing_swin2sr.py +++ b/src/transformers/models/swin2sr/image_processing_swin2sr.py @@ -201,3 +201,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["Swin2SRImageProcessor"] diff --git a/src/transformers/models/swin2sr/modeling_swin2sr.py b/src/transformers/models/swin2sr/modeling_swin2sr.py index d6bd8da9bed638..9c41e7879350b2 100644 --- a/src/transformers/models/swin2sr/modeling_swin2sr.py +++ b/src/transformers/models/swin2sr/modeling_swin2sr.py @@ -1177,3 +1177,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["Swin2SRForImageSuperResolution", "Swin2SRModel", "Swin2SRPreTrainedModel"] diff --git a/src/transformers/models/swinv2/__init__.py b/src/transformers/models/swinv2/__init__.py index e3a13b79651fcd..172f1573f1eb1b 100644 --- a/src/transformers/models/swinv2/__init__.py +++ b/src/transformers/models/swinv2/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,48 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_swinv2": ["Swinv2Config"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_swinv2"] = [ - "Swinv2ForImageClassification", - "Swinv2ForMaskedImageModeling", - "Swinv2Model", - "Swinv2PreTrainedModel", - "Swinv2Backbone", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_swinv2 import Swinv2Config - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_swinv2 import ( - Swinv2Backbone, - Swinv2ForImageClassification, - Swinv2ForMaskedImageModeling, - Swinv2Model, - Swinv2PreTrainedModel, - ) - - + from .configuration_swinv2 import * + from .modeling_swinv2 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/swinv2/configuration_swinv2.py b/src/transformers/models/swinv2/configuration_swinv2.py index c6032c45df8951..addb30e6a102dc 100644 --- a/src/transformers/models/swinv2/configuration_swinv2.py +++ b/src/transformers/models/swinv2/configuration_swinv2.py @@ -154,3 +154,6 @@ def __init__( # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1)) + + +__all__ = ["Swinv2Config"] diff --git a/src/transformers/models/swinv2/modeling_swinv2.py b/src/transformers/models/swinv2/modeling_swinv2.py index 191923958cfbde..626f883ac80399 100644 --- a/src/transformers/models/swinv2/modeling_swinv2.py +++ b/src/transformers/models/swinv2/modeling_swinv2.py @@ -1461,3 +1461,12 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) + + +__all__ = [ + "Swinv2ForImageClassification", + "Swinv2ForMaskedImageModeling", + "Swinv2Model", + "Swinv2PreTrainedModel", + "Swinv2Backbone", +] diff --git a/src/transformers/models/switch_transformers/__init__.py b/src/transformers/models/switch_transformers/__init__.py index e6f9914fcbcc1e..70afa7006d73dd 100644 --- a/src/transformers/models/switch_transformers/__init__.py +++ b/src/transformers/models/switch_transformers/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,66 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_sentencepiece_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_switch_transformers": [ - "SwitchTransformersConfig", - "SwitchTransformersOnnxConfig", - ] -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_switch_transformers"] = [ - "SwitchTransformersEncoderModel", - "SwitchTransformersForConditionalGeneration", - "SwitchTransformersModel", - "SwitchTransformersPreTrainedModel", - "SwitchTransformersTop1Router", - "SwitchTransformersSparseMLP", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_switch_transformers import ( - SwitchTransformersConfig, - SwitchTransformersOnnxConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_switch_transformers import ( - SwitchTransformersEncoderModel, - SwitchTransformersForConditionalGeneration, - SwitchTransformersModel, - SwitchTransformersPreTrainedModel, - SwitchTransformersSparseMLP, - SwitchTransformersTop1Router, - ) - - + from .configuration_switch_transformers import * + from .modeling_switch_transformers import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/switch_transformers/configuration_switch_transformers.py b/src/transformers/models/switch_transformers/configuration_switch_transformers.py index 5ed95f2b61386e..093148f601046b 100644 --- a/src/transformers/models/switch_transformers/configuration_switch_transformers.py +++ b/src/transformers/models/switch_transformers/configuration_switch_transformers.py @@ -180,3 +180,6 @@ def __init__( is_encoder_decoder=is_encoder_decoder, **kwargs, ) + + +__all__ = ["SwitchTransformersConfig"] diff --git a/src/transformers/models/switch_transformers/modeling_switch_transformers.py b/src/transformers/models/switch_transformers/modeling_switch_transformers.py index b150b04eea57b8..588199fb095a8f 100644 --- a/src/transformers/models/switch_transformers/modeling_switch_transformers.py +++ b/src/transformers/models/switch_transformers/modeling_switch_transformers.py @@ -1974,3 +1974,13 @@ def forward( ) return encoder_outputs + + +__all__ = [ + "SwitchTransformersEncoderModel", + "SwitchTransformersForConditionalGeneration", + "SwitchTransformersModel", + "SwitchTransformersPreTrainedModel", + "SwitchTransformersTop1Router", + "SwitchTransformersSparseMLP", +] diff --git a/src/transformers/models/t5/__init__.py b/src/transformers/models/t5/__init__.py index d6549e270abcb6..366eab10826b19 100644 --- a/src/transformers/models/t5/__init__.py +++ b/src/transformers/models/t5/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,146 +11,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_sentencepiece_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = {"configuration_t5": ["T5Config", "T5OnnxConfig"]} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_t5"] = ["T5Tokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_t5_fast"] = ["T5TokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_t5"] = [ - "T5EncoderModel", - "T5ForConditionalGeneration", - "T5Model", - "T5PreTrainedModel", - "load_tf_weights_in_t5", - "T5ForQuestionAnswering", - "T5ForSequenceClassification", - "T5ForTokenClassification", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_t5"] = [ - "TFT5EncoderModel", - "TFT5ForConditionalGeneration", - "TFT5Model", - "TFT5PreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_t5"] = [ - "FlaxT5EncoderModel", - "FlaxT5ForConditionalGeneration", - "FlaxT5Model", - "FlaxT5PreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_t5 import T5Config, T5OnnxConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_t5 import T5Tokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_t5_fast import T5TokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_t5 import ( - T5EncoderModel, - T5ForConditionalGeneration, - T5ForQuestionAnswering, - T5ForSequenceClassification, - T5ForTokenClassification, - T5Model, - T5PreTrainedModel, - load_tf_weights_in_t5, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_t5 import ( - TFT5EncoderModel, - TFT5ForConditionalGeneration, - TFT5Model, - TFT5PreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_t5 import ( - FlaxT5EncoderModel, - FlaxT5ForConditionalGeneration, - FlaxT5Model, - FlaxT5PreTrainedModel, - ) - - + from .configuration_t5 import * + from .modeling_flax_t5 import * + from .modeling_t5 import * + from .modeling_tf_t5 import * + from .tokenization_t5 import * + from .tokenization_t5_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/t5/configuration_t5.py b/src/transformers/models/t5/configuration_t5.py index be6fbe9528d10a..a22286449a27dd 100644 --- a/src/transformers/models/t5/configuration_t5.py +++ b/src/transformers/models/t5/configuration_t5.py @@ -166,3 +166,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def default_onnx_opset(self) -> int: return 13 + + +__all__ = ["T5Config", "T5OnnxConfig"] diff --git a/src/transformers/models/t5/modeling_flax_t5.py b/src/transformers/models/t5/modeling_flax_t5.py index be5ffd44897d19..90bbf31f0678f0 100644 --- a/src/transformers/models/t5/modeling_flax_t5.py +++ b/src/transformers/models/t5/modeling_flax_t5.py @@ -1796,3 +1796,6 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): append_replace_return_docstrings( FlaxT5ForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC ) + + +__all__ = ["FlaxT5EncoderModel", "FlaxT5ForConditionalGeneration", "FlaxT5Model", "FlaxT5PreTrainedModel"] diff --git a/src/transformers/models/t5/modeling_t5.py b/src/transformers/models/t5/modeling_t5.py index 9012c8db9feb0a..c759f5b564066b 100644 --- a/src/transformers/models/t5/modeling_t5.py +++ b/src/transformers/models/t5/modeling_t5.py @@ -2491,3 +2491,15 @@ def forward( encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) + + +__all__ = [ + "T5EncoderModel", + "T5ForConditionalGeneration", + "T5Model", + "T5PreTrainedModel", + "load_tf_weights_in_t5", + "T5ForQuestionAnswering", + "T5ForSequenceClassification", + "T5ForTokenClassification", +] diff --git a/src/transformers/models/t5/modeling_tf_t5.py b/src/transformers/models/t5/modeling_tf_t5.py index 6cd44766bf8833..f7d2b23f8b169a 100644 --- a/src/transformers/models/t5/modeling_tf_t5.py +++ b/src/transformers/models/t5/modeling_tf_t5.py @@ -1678,3 +1678,6 @@ def build(self, input_shape=None): if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) + + +__all__ = ["TFT5EncoderModel", "TFT5ForConditionalGeneration", "TFT5Model", "TFT5PreTrainedModel"] diff --git a/src/transformers/models/t5/tokenization_t5.py b/src/transformers/models/t5/tokenization_t5.py index 1e166a78f10d24..cf31995c4b138b 100644 --- a/src/transformers/models/t5/tokenization_t5.py +++ b/src/transformers/models/t5/tokenization_t5.py @@ -445,3 +445,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = fi.write(content_spiece_model) return (out_vocab_file,) + + +__all__ = ["T5Tokenizer"] diff --git a/src/transformers/models/t5/tokenization_t5_fast.py b/src/transformers/models/t5/tokenization_t5_fast.py index 4c3fa950559637..68d750aca5668b 100644 --- a/src/transformers/models/t5/tokenization_t5_fast.py +++ b/src/transformers/models/t5/tokenization_t5_fast.py @@ -231,3 +231,6 @@ def get_sentinel_tokens(self): def get_sentinel_token_ids(self): return [self.convert_tokens_to_ids(token) for token in self.get_sentinel_tokens()] + + +__all__ = ["T5TokenizerFast"] diff --git a/src/transformers/models/table_transformer/__init__.py b/src/transformers/models/table_transformer/__init__.py index de993193b0c522..8ebd1da6fdaeea 100644 --- a/src/transformers/models/table_transformer/__init__.py +++ b/src/transformers/models/table_transformer/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,51 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_table_transformer": [ - "TableTransformerConfig", - "TableTransformerOnnxConfig", - ] -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_table_transformer"] = [ - "TableTransformerForObjectDetection", - "TableTransformerModel", - "TableTransformerPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_table_transformer import ( - TableTransformerConfig, - TableTransformerOnnxConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_table_transformer import ( - TableTransformerForObjectDetection, - TableTransformerModel, - TableTransformerPreTrainedModel, - ) - + from .configuration_table_transformer import * + from .modeling_table_transformer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/table_transformer/configuration_table_transformer.py b/src/transformers/models/table_transformer/configuration_table_transformer.py index e0afa14154fce3..458be0eea3106b 100644 --- a/src/transformers/models/table_transformer/configuration_table_transformer.py +++ b/src/transformers/models/table_transformer/configuration_table_transformer.py @@ -274,3 +274,6 @@ def atol_for_validation(self) -> float: @property def default_onnx_opset(self) -> int: return 12 + + +__all__ = ["TableTransformerConfig", "TableTransformerOnnxConfig"] diff --git a/src/transformers/models/table_transformer/modeling_table_transformer.py b/src/transformers/models/table_transformer/modeling_table_transformer.py index be57ab46016ccf..cf6c6d4daf01e1 100644 --- a/src/transformers/models/table_transformer/modeling_table_transformer.py +++ b/src/transformers/models/table_transformer/modeling_table_transformer.py @@ -1430,3 +1430,6 @@ def forward(self, x): for i, layer in enumerate(self.layers): x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x + + +__all__ = ["TableTransformerForObjectDetection", "TableTransformerModel", "TableTransformerPreTrainedModel"] diff --git a/src/transformers/models/tapas/__init__.py b/src/transformers/models/tapas/__init__.py index 750bf7e00f5a8f..7df7e765f60e84 100644 --- a/src/transformers/models/tapas/__init__.py +++ b/src/transformers/models/tapas/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,81 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available - - -_import_structure = { - "configuration_tapas": ["TapasConfig"], - "tokenization_tapas": ["TapasTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tapas"] = [ - "TapasForMaskedLM", - "TapasForQuestionAnswering", - "TapasForSequenceClassification", - "TapasModel", - "TapasPreTrainedModel", - "load_tf_weights_in_tapas", - ] -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_tapas"] = [ - "TFTapasForMaskedLM", - "TFTapasForQuestionAnswering", - "TFTapasForSequenceClassification", - "TFTapasModel", - "TFTapasPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_tapas import TapasConfig - from .tokenization_tapas import TapasTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tapas import ( - TapasForMaskedLM, - TapasForQuestionAnswering, - TapasForSequenceClassification, - TapasModel, - TapasPreTrainedModel, - load_tf_weights_in_tapas, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_tapas import ( - TFTapasForMaskedLM, - TFTapasForQuestionAnswering, - TFTapasForSequenceClassification, - TFTapasModel, - TFTapasPreTrainedModel, - ) - - + from .configuration_tapas import * + from .modeling_tapas import * + from .modeling_tf_tapas import * + from .tokenization_tapas import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/tapas/configuration_tapas.py b/src/transformers/models/tapas/configuration_tapas.py index 63d289e38fed89..58769e99a72218 100644 --- a/src/transformers/models/tapas/configuration_tapas.py +++ b/src/transformers/models/tapas/configuration_tapas.py @@ -224,3 +224,6 @@ def __init__( if isinstance(self.aggregation_labels, dict): self.aggregation_labels = {int(k): v for k, v in aggregation_labels.items()} + + +__all__ = ["TapasConfig"] diff --git a/src/transformers/models/tapas/modeling_tapas.py b/src/transformers/models/tapas/modeling_tapas.py index b74a27ae5ce589..887b0135f866b5 100644 --- a/src/transformers/models/tapas/modeling_tapas.py +++ b/src/transformers/models/tapas/modeling_tapas.py @@ -2387,3 +2387,13 @@ def _calculate_regression_loss( per_example_answer_loss_scaled = config.answer_loss_importance * (per_example_answer_loss * aggregate_mask) return per_example_answer_loss_scaled, large_answer_loss_mask + + +__all__ = [ + "TapasForMaskedLM", + "TapasForQuestionAnswering", + "TapasForSequenceClassification", + "TapasModel", + "TapasPreTrainedModel", + "load_tf_weights_in_tapas", +] diff --git a/src/transformers/models/tapas/modeling_tf_tapas.py b/src/transformers/models/tapas/modeling_tf_tapas.py index afb1c3cbda8bc3..b73c3e93b9b655 100644 --- a/src/transformers/models/tapas/modeling_tf_tapas.py +++ b/src/transformers/models/tapas/modeling_tf_tapas.py @@ -2451,3 +2451,12 @@ def _calculate_regression_loss( ) per_example_answer_loss_scaled = config.answer_loss_importance * (per_example_answer_loss * aggregate_mask) return per_example_answer_loss_scaled, large_answer_loss_mask + + +__all__ = [ + "TFTapasForMaskedLM", + "TFTapasForQuestionAnswering", + "TFTapasForSequenceClassification", + "TFTapasModel", + "TFTapasPreTrainedModel", +] diff --git a/src/transformers/models/tapas/tokenization_tapas.py b/src/transformers/models/tapas/tokenization_tapas.py index 69950396079aa1..392ab81ac0d066 100644 --- a/src/transformers/models/tapas/tokenization_tapas.py +++ b/src/transformers/models/tapas/tokenization_tapas.py @@ -2787,3 +2787,6 @@ def add_numeric_table_values(table, min_consolidation_fraction=0.7, debug_info=N table.iloc[row_index, col_index].numeric_value = numeric_value return table + + +__all__ = ["TapasTokenizer"] diff --git a/src/transformers/models/time_series_transformer/__init__.py b/src/transformers/models/time_series_transformer/__init__.py index 39879ed1bc00b7..e36c36bcfccb32 100644 --- a/src/transformers/models/time_series_transformer/__init__.py +++ b/src/transformers/models/time_series_transformer/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,44 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_time_series_transformer": ["TimeSeriesTransformerConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_time_series_transformer"] = [ - "TimeSeriesTransformerForPrediction", - "TimeSeriesTransformerModel", - "TimeSeriesTransformerPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_time_series_transformer import ( - TimeSeriesTransformerConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_time_series_transformer import ( - TimeSeriesTransformerForPrediction, - TimeSeriesTransformerModel, - TimeSeriesTransformerPreTrainedModel, - ) - + from .configuration_time_series_transformer import * + from .modeling_time_series_transformer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/time_series_transformer/configuration_time_series_transformer.py b/src/transformers/models/time_series_transformer/configuration_time_series_transformer.py index 56b06c03841243..bc063774389cba 100644 --- a/src/transformers/models/time_series_transformer/configuration_time_series_transformer.py +++ b/src/transformers/models/time_series_transformer/configuration_time_series_transformer.py @@ -224,3 +224,6 @@ def _number_of_features(self) -> int: + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features ) + + +__all__ = ["TimeSeriesTransformerConfig"] diff --git a/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py b/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py index b45e6d7e850de7..f48567245eae4d 100644 --- a/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py +++ b/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py @@ -1779,3 +1779,6 @@ def generate( (-1, num_parallel_samples, self.config.prediction_length) + self.target_shape, ) ) + + +__all__ = ["TimeSeriesTransformerForPrediction", "TimeSeriesTransformerModel", "TimeSeriesTransformerPreTrainedModel"] diff --git a/src/transformers/models/timesformer/__init__.py b/src/transformers/models/timesformer/__init__.py index 48a2aa9fa47464..c0cc9d3d6a6856 100644 --- a/src/transformers/models/timesformer/__init__.py +++ b/src/transformers/models/timesformer/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,41 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_timesformer": ["TimesformerConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_timesformer"] = [ - "TimesformerModel", - "TimesformerForVideoClassification", - "TimesformerPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_timesformer import TimesformerConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_timesformer import ( - TimesformerForVideoClassification, - TimesformerModel, - TimesformerPreTrainedModel, - ) - + from .configuration_timesformer import * + from .modeling_timesformer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/timesformer/configuration_timesformer.py b/src/transformers/models/timesformer/configuration_timesformer.py index 2ee7125de255bf..edb69af230f901 100644 --- a/src/transformers/models/timesformer/configuration_timesformer.py +++ b/src/transformers/models/timesformer/configuration_timesformer.py @@ -124,3 +124,6 @@ def __init__( self.attention_type = attention_type self.drop_path_rate = drop_path_rate + + +__all__ = ["TimesformerConfig"] diff --git a/src/transformers/models/timesformer/modeling_timesformer.py b/src/transformers/models/timesformer/modeling_timesformer.py index 2262898d54740b..b348791869af2c 100644 --- a/src/transformers/models/timesformer/modeling_timesformer.py +++ b/src/transformers/models/timesformer/modeling_timesformer.py @@ -811,3 +811,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["TimesformerModel", "TimesformerForVideoClassification", "TimesformerPreTrainedModel"] diff --git a/src/transformers/models/timm_backbone/__init__.py b/src/transformers/models/timm_backbone/__init__.py index 4c692f76432f4a..4a58f096b1e482 100644 --- a/src/transformers/models/timm_backbone/__init__.py +++ b/src/transformers/models/timm_backbone/__init__.py @@ -1,8 +1,4 @@ -# flake8: noqa -# There's no way to ignore "F401 '...' imported but unused" warnings in this -# module, but to preserve other warnings. So, don't check this module at all. - -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,33 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = {"configuration_timm_backbone": ["TimmBackboneConfig"]} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_timm_backbone"] = ["TimmBackbone"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_timm_backbone import TimmBackboneConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_timm_backbone import TimmBackbone - + from .configuration_timm_backbone import * + from .modeling_timm_backbone import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/timm_backbone/configuration_timm_backbone.py b/src/transformers/models/timm_backbone/configuration_timm_backbone.py index dd8893820c3b78..6000698c92488d 100644 --- a/src/transformers/models/timm_backbone/configuration_timm_backbone.py +++ b/src/transformers/models/timm_backbone/configuration_timm_backbone.py @@ -81,3 +81,6 @@ def __init__( self.use_timm_backbone = True self.out_indices = out_indices if out_indices is not None else [-1] self.freeze_batch_norm_2d = freeze_batch_norm_2d + + +__all__ = ["TimmBackboneConfig"] diff --git a/src/transformers/models/timm_backbone/modeling_timm_backbone.py b/src/transformers/models/timm_backbone/modeling_timm_backbone.py index ae25852d082851..60a8b40463569d 100644 --- a/src/transformers/models/timm_backbone/modeling_timm_backbone.py +++ b/src/transformers/models/timm_backbone/modeling_timm_backbone.py @@ -156,3 +156,6 @@ def forward( return output return BackboneOutput(feature_maps=feature_maps, hidden_states=hidden_states, attentions=None) + + +__all__ = ["TimmBackbone"] diff --git a/src/transformers/models/trocr/__init__.py b/src/transformers/models/trocr/__init__.py index 14854857586d97..96ae1927c1b560 100644 --- a/src/transformers/models/trocr/__init__.py +++ b/src/transformers/models/trocr/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,46 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_speech_available, - is_torch_available, -) - - -_import_structure = { - "configuration_trocr": ["TrOCRConfig"], - "processing_trocr": ["TrOCRProcessor"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_trocr"] = [ - "TrOCRForCausalLM", - "TrOCRPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_trocr import TrOCRConfig - from .processing_trocr import TrOCRProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_trocr import TrOCRForCausalLM, TrOCRPreTrainedModel - + from .configuration_trocr import * + from .modeling_trocr import * + from .processing_trocr import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/trocr/configuration_trocr.py b/src/transformers/models/trocr/configuration_trocr.py index f47412e93a50b5..6c3aabbe195870 100644 --- a/src/transformers/models/trocr/configuration_trocr.py +++ b/src/transformers/models/trocr/configuration_trocr.py @@ -141,3 +141,6 @@ def __init__( decoder_start_token_id=decoder_start_token_id, **kwargs, ) + + +__all__ = ["TrOCRConfig"] diff --git a/src/transformers/models/trocr/modeling_trocr.py b/src/transformers/models/trocr/modeling_trocr.py index 754515dde0fb4d..2a745516c4f043 100644 --- a/src/transformers/models/trocr/modeling_trocr.py +++ b/src/transformers/models/trocr/modeling_trocr.py @@ -953,3 +953,6 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = ["TrOCRForCausalLM", "TrOCRPreTrainedModel"] diff --git a/src/transformers/models/trocr/processing_trocr.py b/src/transformers/models/trocr/processing_trocr.py index 16b75b9812b482..1ecb96b00f5ddd 100644 --- a/src/transformers/models/trocr/processing_trocr.py +++ b/src/transformers/models/trocr/processing_trocr.py @@ -154,3 +154,6 @@ def feature_extractor(self): FutureWarning, ) return self.image_processor + + +__all__ = ["TrOCRProcessor"] diff --git a/src/transformers/models/tvp/__init__.py b/src/transformers/models/tvp/__init__.py index b8479dbdd331b8..7388075201e03c 100644 --- a/src/transformers/models/tvp/__init__.py +++ b/src/transformers/models/tvp/__init__.py @@ -1,74 +1,29 @@ -# coding=utf-8 -# Copyright 2023 The Intel AIA Team Authors, and HuggingFace Inc. team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # -# Licensed under the Apache License=, Version 2.0 (the "License"); +# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing=, software -# distributed under the License is distributed on an "AS IS" BASIS=, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND=, either express or implied. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_tvp": ["TvpConfig"], - "processing_tvp": ["TvpProcessor"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_tvp"] = ["TvpImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tvp"] = [ - "TvpModel", - "TvpPreTrainedModel", - "TvpForVideoGrounding", - ] - if TYPE_CHECKING: - from .configuration_tvp import ( - TvpConfig, - ) - from .processing_tvp import TvpProcessor - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_tvp import TvpImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tvp import ( - TvpForVideoGrounding, - TvpModel, - TvpPreTrainedModel, - ) - + from .configuration_tvp import * + from .image_processing_tvp import * + from .modeling_tvp import * + from .processing_tvp import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/tvp/configuration_tvp.py b/src/transformers/models/tvp/configuration_tvp.py index 2941c4fcbe1391..803acb220d081f 100644 --- a/src/transformers/models/tvp/configuration_tvp.py +++ b/src/transformers/models/tvp/configuration_tvp.py @@ -196,3 +196,6 @@ def to_dict(self): output["backbone_config"] = self.backbone_config.to_dict() output["model_type"] = self.__class__.model_type return output + + +__all__ = ["TvpConfig"] diff --git a/src/transformers/models/tvp/image_processing_tvp.py b/src/transformers/models/tvp/image_processing_tvp.py index 100ec133e8b026..115433c6c48b75 100644 --- a/src/transformers/models/tvp/image_processing_tvp.py +++ b/src/transformers/models/tvp/image_processing_tvp.py @@ -476,3 +476,6 @@ def preprocess( data = {"pixel_values": videos} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["TvpImageProcessor"] diff --git a/src/transformers/models/tvp/modeling_tvp.py b/src/transformers/models/tvp/modeling_tvp.py index ec00eee928617f..1d6d9a0106d9d5 100644 --- a/src/transformers/models/tvp/modeling_tvp.py +++ b/src/transformers/models/tvp/modeling_tvp.py @@ -980,3 +980,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["TvpModel", "TvpPreTrainedModel", "TvpForVideoGrounding"] diff --git a/src/transformers/models/tvp/processing_tvp.py b/src/transformers/models/tvp/processing_tvp.py index eb8aabfdade3ed..a4ed81e54aaddb 100644 --- a/src/transformers/models/tvp/processing_tvp.py +++ b/src/transformers/models/tvp/processing_tvp.py @@ -151,3 +151,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +__all__ = ["TvpProcessor"] diff --git a/src/transformers/models/udop/__init__.py b/src/transformers/models/udop/__init__.py index 732d97aa7a99c7..cf4c36f6363f5f 100644 --- a/src/transformers/models/udop/__init__.py +++ b/src/transformers/models/udop/__init__.py @@ -11,86 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_udop": ["UdopConfig"], - "processing_udop": ["UdopProcessor"], -} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_udop"] = ["UdopTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_udop_fast"] = ["UdopTokenizerFast"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_udop"] = [ - "UdopForConditionalGeneration", - "UdopPreTrainedModel", - "UdopModel", - "UdopEncoderModel", - ] if TYPE_CHECKING: - from .configuration_udop import UdopConfig - from .processing_udop import UdopProcessor - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_udop import UdopTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_udop_fast import UdopTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_udop import ( - UdopEncoderModel, - UdopForConditionalGeneration, - UdopModel, - UdopPreTrainedModel, - ) - + from .configuration_udop import * + from .modeling_udop import * + from .processing_udop import * + from .tokenization_udop import * + from .tokenization_udop_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/udop/configuration_udop.py b/src/transformers/models/udop/configuration_udop.py index 5ae8bcebfd79a2..6ed28c78c8fb4c 100644 --- a/src/transformers/models/udop/configuration_udop.py +++ b/src/transformers/models/udop/configuration_udop.py @@ -155,3 +155,6 @@ def __init__( is_encoder_decoder=is_encoder_decoder, **kwargs, ) + + +__all__ = ["UdopConfig"] diff --git a/src/transformers/models/udop/modeling_udop.py b/src/transformers/models/udop/modeling_udop.py index 1928ac8a5c20c9..9476c7e9a84fe0 100644 --- a/src/transformers/models/udop/modeling_udop.py +++ b/src/transformers/models/udop/modeling_udop.py @@ -2154,3 +2154,6 @@ def forward( ) return encoder_outputs + + +__all__ = ["UdopForConditionalGeneration", "UdopPreTrainedModel", "UdopModel", "UdopEncoderModel"] diff --git a/src/transformers/models/udop/processing_udop.py b/src/transformers/models/udop/processing_udop.py index 33349af0366d77..ce8847a5c3ac6b 100644 --- a/src/transformers/models/udop/processing_udop.py +++ b/src/transformers/models/udop/processing_udop.py @@ -211,3 +211,6 @@ def decode(self, *args, **kwargs): @property def model_input_names(self): return ["pixel_values", "input_ids", "bbox", "attention_mask"] + + +__all__ = ["UdopProcessor"] diff --git a/src/transformers/models/udop/tokenization_udop.py b/src/transformers/models/udop/tokenization_udop.py index 88708e8b29a3be..608121f9b236a9 100644 --- a/src/transformers/models/udop/tokenization_udop.py +++ b/src/transformers/models/udop/tokenization_udop.py @@ -1483,3 +1483,6 @@ def _pad( raise ValueError("Invalid padding strategy:" + str(padding_side)) return encoded_inputs + + +__all__ = ["UdopTokenizer"] diff --git a/src/transformers/models/udop/tokenization_udop_fast.py b/src/transformers/models/udop/tokenization_udop_fast.py index 8ee0577fa10e58..9b1464d1d8749b 100644 --- a/src/transformers/models/udop/tokenization_udop_fast.py +++ b/src/transformers/models/udop/tokenization_udop_fast.py @@ -1026,3 +1026,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["UdopTokenizerFast"] diff --git a/src/transformers/models/umt5/__init__.py b/src/transformers/models/umt5/__init__.py index e68ae4cb3737cf..5dc2e041359770 100644 --- a/src/transformers/models/umt5/__init__.py +++ b/src/transformers/models/umt5/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,50 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = {"configuration_umt5": ["UMT5Config", "UMT5OnnxConfig"]} +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_umt5"] = [ - "UMT5EncoderModel", - "UMT5ForConditionalGeneration", - "UMT5ForQuestionAnswering", - "UMT5ForSequenceClassification", - "UMT5ForTokenClassification", - "UMT5Model", - "UMT5PreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_umt5 import UMT5Config, UMT5OnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_umt5 import ( - UMT5EncoderModel, - UMT5ForConditionalGeneration, - UMT5ForQuestionAnswering, - UMT5ForSequenceClassification, - UMT5ForTokenClassification, - UMT5Model, - UMT5PreTrainedModel, - ) + from .configuration_umt5 import * + from .modeling_umt5 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/umt5/configuration_umt5.py b/src/transformers/models/umt5/configuration_umt5.py index ba8ea0460ba071..82e7dfeba5bb55 100644 --- a/src/transformers/models/umt5/configuration_umt5.py +++ b/src/transformers/models/umt5/configuration_umt5.py @@ -176,3 +176,6 @@ def default_onnx_opset(self) -> int: @property def atol_for_validation(self) -> float: return 5e-4 + + +__all__ = ["UMT5Config", "UMT5OnnxConfig"] diff --git a/src/transformers/models/umt5/modeling_umt5.py b/src/transformers/models/umt5/modeling_umt5.py index 985dc5e4426dff..4802a8ff80530b 100644 --- a/src/transformers/models/umt5/modeling_umt5.py +++ b/src/transformers/models/umt5/modeling_umt5.py @@ -2014,3 +2014,14 @@ def forward( encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) + + +__all__ = [ + "UMT5EncoderModel", + "UMT5ForConditionalGeneration", + "UMT5ForQuestionAnswering", + "UMT5ForSequenceClassification", + "UMT5ForTokenClassification", + "UMT5Model", + "UMT5PreTrainedModel", +] diff --git a/src/transformers/models/unispeech/__init__.py b/src/transformers/models/unispeech/__init__.py index 91db9ada5ef297..a03daae25f4535 100644 --- a/src/transformers/models/unispeech/__init__.py +++ b/src/transformers/models/unispeech/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,49 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_unispeech": ["UniSpeechConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_unispeech"] = [ - "UniSpeechForCTC", - "UniSpeechForPreTraining", - "UniSpeechForSequenceClassification", - "UniSpeechModel", - "UniSpeechPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_unispeech import UniSpeechConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_unispeech import ( - UniSpeechForCTC, - UniSpeechForPreTraining, - UniSpeechForSequenceClassification, - UniSpeechModel, - UniSpeechPreTrainedModel, - ) - + from .configuration_unispeech import * + from .modeling_unispeech import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/unispeech/configuration_unispeech.py b/src/transformers/models/unispeech/configuration_unispeech.py index 69bc162220d98f..ccfe52f79c1c5f 100644 --- a/src/transformers/models/unispeech/configuration_unispeech.py +++ b/src/transformers/models/unispeech/configuration_unispeech.py @@ -304,3 +304,6 @@ def __init__( @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1) + + +__all__ = ["UniSpeechConfig"] diff --git a/src/transformers/models/unispeech/modeling_unispeech.py b/src/transformers/models/unispeech/modeling_unispeech.py index 6ce5e77706d358..346e2dfac19c89 100755 --- a/src/transformers/models/unispeech/modeling_unispeech.py +++ b/src/transformers/models/unispeech/modeling_unispeech.py @@ -1897,3 +1897,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "UniSpeechForCTC", + "UniSpeechForPreTraining", + "UniSpeechForSequenceClassification", + "UniSpeechModel", + "UniSpeechPreTrainedModel", +] diff --git a/src/transformers/models/unispeech_sat/__init__.py b/src/transformers/models/unispeech_sat/__init__.py index 275f98ac222024..cc3144ed17d14c 100644 --- a/src/transformers/models/unispeech_sat/__init__.py +++ b/src/transformers/models/unispeech_sat/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,55 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_unispeech_sat": ["UniSpeechSatConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_unispeech_sat"] = [ - "UniSpeechSatForAudioFrameClassification", - "UniSpeechSatForCTC", - "UniSpeechSatForPreTraining", - "UniSpeechSatForSequenceClassification", - "UniSpeechSatForXVector", - "UniSpeechSatModel", - "UniSpeechSatPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_unispeech_sat import UniSpeechSatConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_unispeech_sat import ( - UniSpeechSatForAudioFrameClassification, - UniSpeechSatForCTC, - UniSpeechSatForPreTraining, - UniSpeechSatForSequenceClassification, - UniSpeechSatForXVector, - UniSpeechSatModel, - UniSpeechSatPreTrainedModel, - ) - + from .configuration_unispeech_sat import * + from .modeling_unispeech_sat import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/unispeech_sat/configuration_unispeech_sat.py b/src/transformers/models/unispeech_sat/configuration_unispeech_sat.py index 85661b02b6864b..a33403306e8aa7 100644 --- a/src/transformers/models/unispeech_sat/configuration_unispeech_sat.py +++ b/src/transformers/models/unispeech_sat/configuration_unispeech_sat.py @@ -322,3 +322,6 @@ def __init__( @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1) + + +__all__ = ["UniSpeechSatConfig"] diff --git a/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py b/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py index 52d82ea739426b..ad51e3fb8c4952 100755 --- a/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py +++ b/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py @@ -2229,3 +2229,14 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "UniSpeechSatForAudioFrameClassification", + "UniSpeechSatForCTC", + "UniSpeechSatForPreTraining", + "UniSpeechSatForSequenceClassification", + "UniSpeechSatForXVector", + "UniSpeechSatModel", + "UniSpeechSatPreTrainedModel", +] diff --git a/src/transformers/models/univnet/__init__.py b/src/transformers/models/univnet/__init__.py index ea9babc3314f40..5d02d0dbe574eb 100644 --- a/src/transformers/models/univnet/__init__.py +++ b/src/transformers/models/univnet/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,49 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - - -_import_structure = { - "configuration_univnet": ["UnivNetConfig"], - "feature_extraction_univnet": ["UnivNetFeatureExtractor"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_univnet"] = [ - "UnivNetModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_univnet import ( - UnivNetConfig, - ) - from .feature_extraction_univnet import UnivNetFeatureExtractor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_univnet import ( - UnivNetModel, - ) - + from .configuration_univnet import * + from .feature_extraction_univnet import * + from .modeling_univnet import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/univnet/configuration_univnet.py b/src/transformers/models/univnet/configuration_univnet.py index 0f4dceb4794899..0a3811ee3a2618 100644 --- a/src/transformers/models/univnet/configuration_univnet.py +++ b/src/transformers/models/univnet/configuration_univnet.py @@ -120,3 +120,6 @@ def __init__( self.initializer_range = initializer_range self.leaky_relu_slope = leaky_relu_slope super().__init__(**kwargs) + + +__all__ = ["UnivNetConfig"] diff --git a/src/transformers/models/univnet/feature_extraction_univnet.py b/src/transformers/models/univnet/feature_extraction_univnet.py index 067aacc3d8c8ca..ab9d3ed5dd7f71 100644 --- a/src/transformers/models/univnet/feature_extraction_univnet.py +++ b/src/transformers/models/univnet/feature_extraction_univnet.py @@ -454,3 +454,6 @@ def to_dict(self) -> Dict[str, Any]: del output[name] return output + + +__all__ = ["UnivNetFeatureExtractor"] diff --git a/src/transformers/models/univnet/modeling_univnet.py b/src/transformers/models/univnet/modeling_univnet.py index a780e54538f213..9cfd45e188ab06 100644 --- a/src/transformers/models/univnet/modeling_univnet.py +++ b/src/transformers/models/univnet/modeling_univnet.py @@ -649,3 +649,6 @@ def remove_weight_norm(self): for layer in self.resblocks: layer.remove_weight_norm() nn.utils.remove_weight_norm(self.conv_post) + + +__all__ = ["UnivNetModel"] diff --git a/src/transformers/models/upernet/__init__.py b/src/transformers/models/upernet/__init__.py index 3954fe4594dad0..dee32c4758f0e4 100644 --- a/src/transformers/models/upernet/__init__.py +++ b/src/transformers/models/upernet/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,38 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_upernet": ["UperNetConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_upernet"] = [ - "UperNetForSemanticSegmentation", - "UperNetPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_upernet import UperNetConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel - - + from .configuration_upernet import * + from .modeling_upernet import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/upernet/configuration_upernet.py b/src/transformers/models/upernet/configuration_upernet.py index 3e17fd4289d853..c235c17d9cb62a 100644 --- a/src/transformers/models/upernet/configuration_upernet.py +++ b/src/transformers/models/upernet/configuration_upernet.py @@ -135,3 +135,6 @@ def __init__( self.auxiliary_num_convs = auxiliary_num_convs self.auxiliary_concat_input = auxiliary_concat_input self.loss_ignore_index = loss_ignore_index + + +__all__ = ["UperNetConfig"] diff --git a/src/transformers/models/upernet/modeling_upernet.py b/src/transformers/models/upernet/modeling_upernet.py index 9721cdcb4b0e3c..20b7c2a20e5e97 100644 --- a/src/transformers/models/upernet/modeling_upernet.py +++ b/src/transformers/models/upernet/modeling_upernet.py @@ -438,3 +438,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["UperNetForSemanticSegmentation", "UperNetPreTrainedModel"] diff --git a/src/transformers/models/video_llava/__init__.py b/src/transformers/models/video_llava/__init__.py index d1f4beabc979f8..bb5f5971a16799 100644 --- a/src/transformers/models/video_llava/__init__.py +++ b/src/transformers/models/video_llava/__init__.py @@ -13,59 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_video_llava": ["VideoLlavaConfig"], - "processing_video_llava": ["VideoLlavaProcessor"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_video_llava"] = ["VideoLlavaImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_video_llava"] = [ - "VideoLlavaPreTrainedModel", - "VideoLlavaForConditionalGeneration", - ] - if TYPE_CHECKING: - from .configuration_video_llava import ( - VideoLlavaConfig, - ) - from .image_processing_video_llava import VideoLlavaProcessor - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_video_llava import VideoLlavaImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_video_llava import ( - VideoLlavaForConditionalGeneration, - VideoLlavaPreTrainedModel, - ) - + from .configuration_video_llava import * + from .image_processing_video_llava import * + from .modeling_video_llava import * + from .processing_video_llava import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/video_llava/configuration_video_llava.py b/src/transformers/models/video_llava/configuration_video_llava.py index 87d96ca24ffdb4..6a77b283395c49 100644 --- a/src/transformers/models/video_llava/configuration_video_llava.py +++ b/src/transformers/models/video_llava/configuration_video_llava.py @@ -132,3 +132,6 @@ def __init__( self.text_config = text_config super().__init__(**kwargs) + + +__all__ = ["VideoLlavaConfig"] diff --git a/src/transformers/models/video_llava/image_processing_video_llava.py b/src/transformers/models/video_llava/image_processing_video_llava.py index 3e77110c7d45a8..4028f552c1bb24 100644 --- a/src/transformers/models/video_llava/image_processing_video_llava.py +++ b/src/transformers/models/video_llava/image_processing_video_llava.py @@ -402,3 +402,6 @@ def _preprocess_image( image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image + + +__all__ = ["VideoLlavaImageProcessor"] diff --git a/src/transformers/models/video_llava/modeling_video_llava.py b/src/transformers/models/video_llava/modeling_video_llava.py index 30adcb6ab5c089..802304e6b19165 100644 --- a/src/transformers/models/video_llava/modeling_video_llava.py +++ b/src/transformers/models/video_llava/modeling_video_llava.py @@ -738,3 +738,6 @@ def prepare_inputs_for_generation( model_inputs["pixel_values_videos"] = pixel_values_videos return model_inputs + + +__all__ = ["VideoLlavaPreTrainedModel", "VideoLlavaForConditionalGeneration"] diff --git a/src/transformers/models/video_llava/processing_video_llava.py b/src/transformers/models/video_llava/processing_video_llava.py index 3e1884271efe2b..01a1a950346cb0 100644 --- a/src/transformers/models/video_llava/processing_video_llava.py +++ b/src/transformers/models/video_llava/processing_video_llava.py @@ -228,3 +228,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +__all__ = ["VideoLlavaProcessor"] diff --git a/src/transformers/models/videomae/__init__.py b/src/transformers/models/videomae/__init__.py index 0e52081adbca5b..13b7f6e1bf1cb0 100644 --- a/src/transformers/models/videomae/__init__.py +++ b/src/transformers/models/videomae/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,61 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_videomae": ["VideoMAEConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_videomae"] = [ - "VideoMAEForPreTraining", - "VideoMAEModel", - "VideoMAEPreTrainedModel", - "VideoMAEForVideoClassification", - ] - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_videomae"] = ["VideoMAEFeatureExtractor"] - _import_structure["image_processing_videomae"] = ["VideoMAEImageProcessor"] - if TYPE_CHECKING: - from .configuration_videomae import VideoMAEConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_videomae import ( - VideoMAEForPreTraining, - VideoMAEForVideoClassification, - VideoMAEModel, - VideoMAEPreTrainedModel, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_videomae import VideoMAEFeatureExtractor - from .image_processing_videomae import VideoMAEImageProcessor - + from .configuration_videomae import * + from .feature_extraction_videomae import * + from .image_processing_videomae import * + from .modeling_videomae import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/videomae/configuration_videomae.py b/src/transformers/models/videomae/configuration_videomae.py index a6150f34335063..3940b6f010036c 100644 --- a/src/transformers/models/videomae/configuration_videomae.py +++ b/src/transformers/models/videomae/configuration_videomae.py @@ -143,3 +143,6 @@ def __init__( self.decoder_num_hidden_layers = decoder_num_hidden_layers self.decoder_intermediate_size = decoder_intermediate_size self.norm_pix_loss = norm_pix_loss + + +__all__ = ["VideoMAEConfig"] diff --git a/src/transformers/models/videomae/feature_extraction_videomae.py b/src/transformers/models/videomae/feature_extraction_videomae.py index 4a90d10c9c55e8..469cbcf523bd46 100644 --- a/src/transformers/models/videomae/feature_extraction_videomae.py +++ b/src/transformers/models/videomae/feature_extraction_videomae.py @@ -31,3 +31,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["VideoMAEFeatureExtractor"] diff --git a/src/transformers/models/videomae/image_processing_videomae.py b/src/transformers/models/videomae/image_processing_videomae.py index 413589523aa675..1322a69a642456 100644 --- a/src/transformers/models/videomae/image_processing_videomae.py +++ b/src/transformers/models/videomae/image_processing_videomae.py @@ -343,3 +343,6 @@ def preprocess( data = {"pixel_values": videos} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["VideoMAEImageProcessor"] diff --git a/src/transformers/models/videomae/modeling_videomae.py b/src/transformers/models/videomae/modeling_videomae.py index 73a680ba3a7240..6e65ebf06d9c50 100755 --- a/src/transformers/models/videomae/modeling_videomae.py +++ b/src/transformers/models/videomae/modeling_videomae.py @@ -1133,3 +1133,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["VideoMAEForPreTraining", "VideoMAEModel", "VideoMAEPreTrainedModel", "VideoMAEForVideoClassification"] diff --git a/src/transformers/models/vilt/__init__.py b/src/transformers/models/vilt/__init__.py index 6fcfd64c8beb68..b70afa645435c4 100644 --- a/src/transformers/models/vilt/__init__.py +++ b/src/transformers/models/vilt/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,71 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = {"configuration_vilt": ["ViltConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_vilt"] = ["ViltFeatureExtractor"] - _import_structure["image_processing_vilt"] = ["ViltImageProcessor"] - _import_structure["processing_vilt"] = ["ViltProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_vilt"] = [ - "ViltForImageAndTextRetrieval", - "ViltForImagesAndTextClassification", - "ViltForTokenClassification", - "ViltForMaskedLM", - "ViltForQuestionAnswering", - "ViltLayer", - "ViltModel", - "ViltPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_vilt import ViltConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_vilt import ViltFeatureExtractor - from .image_processing_vilt import ViltImageProcessor - from .processing_vilt import ViltProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_vilt import ( - ViltForImageAndTextRetrieval, - ViltForImagesAndTextClassification, - ViltForMaskedLM, - ViltForQuestionAnswering, - ViltForTokenClassification, - ViltLayer, - ViltModel, - ViltPreTrainedModel, - ) - - + from .configuration_vilt import * + from .feature_extraction_vilt import * + from .image_processing_vilt import * + from .modeling_vilt import * + from .processing_vilt import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/vilt/configuration_vilt.py b/src/transformers/models/vilt/configuration_vilt.py index a57b40068207c8..cc6d727f5985cd 100644 --- a/src/transformers/models/vilt/configuration_vilt.py +++ b/src/transformers/models/vilt/configuration_vilt.py @@ -142,3 +142,6 @@ def __init__( self.qkv_bias = qkv_bias self.max_image_length = max_image_length self.num_images = num_images + + +__all__ = ["ViltConfig"] diff --git a/src/transformers/models/vilt/feature_extraction_vilt.py b/src/transformers/models/vilt/feature_extraction_vilt.py index 5091946bf94334..234fd1a2e17fb3 100644 --- a/src/transformers/models/vilt/feature_extraction_vilt.py +++ b/src/transformers/models/vilt/feature_extraction_vilt.py @@ -31,3 +31,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["ViltFeatureExtractor"] diff --git a/src/transformers/models/vilt/image_processing_vilt.py b/src/transformers/models/vilt/image_processing_vilt.py index 66ffeb816fec5e..86351eca1f6615 100644 --- a/src/transformers/models/vilt/image_processing_vilt.py +++ b/src/transformers/models/vilt/image_processing_vilt.py @@ -484,3 +484,6 @@ def preprocess( encoded_outputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors) return encoded_outputs + + +__all__ = ["ViltImageProcessor"] diff --git a/src/transformers/models/vilt/modeling_vilt.py b/src/transformers/models/vilt/modeling_vilt.py index f79606b78966a8..5ffb4b65ffbab5 100755 --- a/src/transformers/models/vilt/modeling_vilt.py +++ b/src/transformers/models/vilt/modeling_vilt.py @@ -1485,3 +1485,15 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "ViltForImageAndTextRetrieval", + "ViltForImagesAndTextClassification", + "ViltForTokenClassification", + "ViltForMaskedLM", + "ViltForQuestionAnswering", + "ViltLayer", + "ViltModel", + "ViltPreTrainedModel", +] diff --git a/src/transformers/models/vilt/processing_vilt.py b/src/transformers/models/vilt/processing_vilt.py index 0ccb884ea00c9d..2c46fd82d12255 100644 --- a/src/transformers/models/vilt/processing_vilt.py +++ b/src/transformers/models/vilt/processing_vilt.py @@ -146,3 +146,6 @@ def feature_extractor(self): FutureWarning, ) return self.image_processor + + +__all__ = ["ViltProcessor"] diff --git a/src/transformers/models/vipllava/__init__.py b/src/transformers/models/vipllava/__init__.py index edc2a5106ba7cf..2ff5c6838dfaff 100644 --- a/src/transformers/models/vipllava/__init__.py +++ b/src/transformers/models/vipllava/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,40 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = {"configuration_vipllava": ["VipLlavaConfig"]} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_vipllava"] = [ - "VipLlavaForConditionalGeneration", - "VipLlavaPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_vipllava import VipLlavaConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_vipllava import ( - VipLlavaForConditionalGeneration, - VipLlavaPreTrainedModel, - ) - - + from .configuration_vipllava import * + from .modeling_vipllava import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/vipllava/configuration_vipllava.py b/src/transformers/models/vipllava/configuration_vipllava.py index f26c2b2f50fb6a..d301721e694f70 100644 --- a/src/transformers/models/vipllava/configuration_vipllava.py +++ b/src/transformers/models/vipllava/configuration_vipllava.py @@ -120,3 +120,6 @@ def __init__( self.text_config = text_config super().__init__(**kwargs) + + +__all__ = ["VipLlavaConfig"] diff --git a/src/transformers/models/vipllava/modeling_vipllava.py b/src/transformers/models/vipllava/modeling_vipllava.py index b45325d2194e24..b4a6c6ae9bb9af 100644 --- a/src/transformers/models/vipllava/modeling_vipllava.py +++ b/src/transformers/models/vipllava/modeling_vipllava.py @@ -605,3 +605,6 @@ def prepare_inputs_for_generation( model_inputs["pixel_values"] = pixel_values return model_inputs + + +__all__ = ["VipLlavaForConditionalGeneration", "VipLlavaPreTrainedModel"] diff --git a/src/transformers/models/vision_encoder_decoder/__init__.py b/src/transformers/models/vision_encoder_decoder/__init__.py index b0fe3bdc82a9a5..613aae114b3396 100644 --- a/src/transformers/models/vision_encoder_decoder/__init__.py +++ b/src/transformers/models/vision_encoder_decoder/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,74 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_torch_available, -) - - -_import_structure = { - "configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"] -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_vision_encoder_decoder"] = ["VisionEncoderDecoderModel"] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_vision_encoder_decoder"] = ["TFVisionEncoderDecoderModel"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_vision_encoder_decoder"] = ["FlaxVisionEncoderDecoderModel"] if TYPE_CHECKING: - from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel - + from .configuration_vision_encoder_decoder import * + from .modeling_flax_vision_encoder_decoder import * + from .modeling_tf_vision_encoder_decoder import * + from .modeling_vision_encoder_decoder import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py b/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py index 59678f2573ff0e..235069ea5a8f5b 100644 --- a/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py +++ b/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py @@ -208,3 +208,6 @@ def get_decoder_config( """ decoder_config.encoder_hidden_size = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(decoder_config, feature) + + +__all__ = ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"] diff --git a/src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py b/src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py index 1af006a3525a5d..5bb927f2e1b0e6 100644 --- a/src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py +++ b/src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py @@ -859,3 +859,6 @@ def from_encoder_decoder_pretrained( model.params["decoder"] = decoder.params return model + + +__all__ = ["FlaxVisionEncoderDecoderModel"] diff --git a/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py b/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py index 383dd0e3e45152..9a027f04784a68 100644 --- a/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py +++ b/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py @@ -695,3 +695,6 @@ def build(self, input_shape=None): if getattr(self, "decoder", None) is not None: with tf.name_scope(self.decoder.name): self.decoder.build(None) + + +__all__ = ["TFVisionEncoderDecoderModel"] diff --git a/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py b/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py index 152a9601403301..abca01987c7293 100644 --- a/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py +++ b/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py @@ -668,3 +668,6 @@ def resize_token_embeddings(self, *args, **kwargs): def _reorder_cache(self, past_key_values, beam_idx): # apply decoder cache reordering here return self.decoder._reorder_cache(past_key_values, beam_idx) + + +__all__ = ["VisionEncoderDecoderModel"] diff --git a/src/transformers/models/vision_text_dual_encoder/__init__.py b/src/transformers/models/vision_text_dual_encoder/__init__.py index 27c117274b645c..4b68df9c336f8e 100644 --- a/src/transformers/models/vision_text_dual_encoder/__init__.py +++ b/src/transformers/models/vision_text_dual_encoder/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,77 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_torch_available, -) - - -_import_structure = { - "configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"], - "processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_vision_text_dual_encoder"] = ["VisionTextDualEncoderModel"] - - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_vision_text_dual_encoder"] = ["FlaxVisionTextDualEncoderModel"] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_vision_text_dual_encoder"] = ["TFVisionTextDualEncoderModel"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig - from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel - - + from .configuration_vision_text_dual_encoder import * + from .modeling_flax_vision_text_dual_encoder import * + from .modeling_tf_vision_text_dual_encoder import * + from .modeling_vision_text_dual_encoder import * + from .processing_vision_text_dual_encoder import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py b/src/transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py index 0d79720e1aa8d2..908e1bf1843ed3 100644 --- a/src/transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py +++ b/src/transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py @@ -117,3 +117,6 @@ def from_vision_text_configs(cls, vision_config: PretrainedConfig, text_config: """ return cls(vision_config=vision_config.to_dict(), text_config=text_config.to_dict(), **kwargs) + + +__all__ = ["VisionTextDualEncoderConfig"] diff --git a/src/transformers/models/vision_text_dual_encoder/modeling_flax_vision_text_dual_encoder.py b/src/transformers/models/vision_text_dual_encoder/modeling_flax_vision_text_dual_encoder.py index 23244af1e31cce..e259041cd92ccc 100644 --- a/src/transformers/models/vision_text_dual_encoder/modeling_flax_vision_text_dual_encoder.py +++ b/src/transformers/models/vision_text_dual_encoder/modeling_flax_vision_text_dual_encoder.py @@ -596,3 +596,6 @@ def from_vision_text_pretrained( append_replace_return_docstrings( FlaxVisionTextDualEncoderModel, output_type=FlaxCLIPOutput, config_class=_CONFIG_FOR_DOC ) + + +__all__ = ["FlaxVisionTextDualEncoderModel"] diff --git a/src/transformers/models/vision_text_dual_encoder/modeling_tf_vision_text_dual_encoder.py b/src/transformers/models/vision_text_dual_encoder/modeling_tf_vision_text_dual_encoder.py index 077b452f70f22b..bb1808aece91aa 100644 --- a/src/transformers/models/vision_text_dual_encoder/modeling_tf_vision_text_dual_encoder.py +++ b/src/transformers/models/vision_text_dual_encoder/modeling_tf_vision_text_dual_encoder.py @@ -619,3 +619,6 @@ def dummy_inputs(self): pixel_values = tf.constant(VISION_DUMMY_INPUTS) dummy = {"pixel_values": pixel_values, "input_ids": input_ids} return dummy + + +__all__ = ["TFVisionTextDualEncoderModel"] diff --git a/src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py b/src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py index 4b39de3df1c882..d7cceb5d2feba6 100755 --- a/src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py +++ b/src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py @@ -534,3 +534,6 @@ def from_vision_text_pretrained( ) return model + + +__all__ = ["VisionTextDualEncoderModel"] diff --git a/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py b/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py index 0d723ed10bf067..7ba82a131d3ac4 100644 --- a/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py +++ b/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py @@ -148,3 +148,6 @@ def feature_extractor(self): FutureWarning, ) return self.image_processor + + +__all__ = ["VisionTextDualEncoderProcessor"] diff --git a/src/transformers/models/visual_bert/__init__.py b/src/transformers/models/visual_bert/__init__.py index db74a924a85cc7..723dc4904f5537 100644 --- a/src/transformers/models/visual_bert/__init__.py +++ b/src/transformers/models/visual_bert/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,51 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = {"configuration_visual_bert": ["VisualBertConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_visual_bert"] = [ - "VisualBertForMultipleChoice", - "VisualBertForPreTraining", - "VisualBertForQuestionAnswering", - "VisualBertForRegionToPhraseAlignment", - "VisualBertForVisualReasoning", - "VisualBertLayer", - "VisualBertModel", - "VisualBertPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_visual_bert import VisualBertConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_visual_bert import ( - VisualBertForMultipleChoice, - VisualBertForPreTraining, - VisualBertForQuestionAnswering, - VisualBertForRegionToPhraseAlignment, - VisualBertForVisualReasoning, - VisualBertLayer, - VisualBertModel, - VisualBertPreTrainedModel, - ) - - + from .configuration_visual_bert import * + from .modeling_visual_bert import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/visual_bert/configuration_visual_bert.py b/src/transformers/models/visual_bert/configuration_visual_bert.py index ae98229a7d98ff..a866227d3470b9 100644 --- a/src/transformers/models/visual_bert/configuration_visual_bert.py +++ b/src/transformers/models/visual_bert/configuration_visual_bert.py @@ -130,3 +130,6 @@ def __init__( self.layer_norm_eps = layer_norm_eps self.bypass_transformer = bypass_transformer self.special_visual_initialize = special_visual_initialize + + +__all__ = ["VisualBertConfig"] diff --git a/src/transformers/models/visual_bert/modeling_visual_bert.py b/src/transformers/models/visual_bert/modeling_visual_bert.py index f7280c0c492c6d..d9ce821101e5d4 100755 --- a/src/transformers/models/visual_bert/modeling_visual_bert.py +++ b/src/transformers/models/visual_bert/modeling_visual_bert.py @@ -1582,3 +1582,15 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "VisualBertForMultipleChoice", + "VisualBertForPreTraining", + "VisualBertForQuestionAnswering", + "VisualBertForRegionToPhraseAlignment", + "VisualBertForVisualReasoning", + "VisualBertLayer", + "VisualBertModel", + "VisualBertPreTrainedModel", +] diff --git a/src/transformers/models/vit/__init__.py b/src/transformers/models/vit/__init__.py index 3066331278e44f..4d6a7a23fa63f4 100644 --- a/src/transformers/models/vit/__init__.py +++ b/src/transformers/models/vit/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,125 +13,20 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_torch_available, - is_torchvision_available, - is_vision_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_vit": ["ViTConfig", "ViTOnnxConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_vit"] = ["ViTFeatureExtractor"] - _import_structure["image_processing_vit"] = ["ViTImageProcessor"] - - -try: - if not is_torchvision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_vit_fast"] = ["ViTImageProcessorFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_vit"] = [ - "ViTForImageClassification", - "ViTForMaskedImageModeling", - "ViTModel", - "ViTPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_vit"] = [ - "TFViTForImageClassification", - "TFViTModel", - "TFViTPreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_vit"] = [ - "FlaxViTForImageClassification", - "FlaxViTModel", - "FlaxViTPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_vit import ViTConfig, ViTOnnxConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_vit import ViTFeatureExtractor - from .image_processing_vit import ViTImageProcessor - - try: - if not is_torchvision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_vit_fast import ViTImageProcessorFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_vit import ( - ViTForImageClassification, - ViTForMaskedImageModeling, - ViTModel, - ViTPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel - - + from .configuration_vit import * + from .feature_extraction_vit import * + from .image_processing_vit import * + from .image_processing_vit_fast import * + from .modeling_flax_vit import * + from .modeling_tf_vit import * + from .modeling_vit import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/vit/configuration_vit.py b/src/transformers/models/vit/configuration_vit.py index bacec851a931e5..bb8b908903fbb2 100644 --- a/src/transformers/models/vit/configuration_vit.py +++ b/src/transformers/models/vit/configuration_vit.py @@ -136,3 +136,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["ViTConfig", "ViTOnnxConfig"] diff --git a/src/transformers/models/vit/feature_extraction_vit.py b/src/transformers/models/vit/feature_extraction_vit.py index 54d47c0f3ad59b..e2479b2dc0d9cb 100644 --- a/src/transformers/models/vit/feature_extraction_vit.py +++ b/src/transformers/models/vit/feature_extraction_vit.py @@ -31,3 +31,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["ViTFeatureExtractor"] diff --git a/src/transformers/models/vit/image_processing_vit.py b/src/transformers/models/vit/image_processing_vit.py index 05bb8bae049748..8e6232ac025a0c 100644 --- a/src/transformers/models/vit/image_processing_vit.py +++ b/src/transformers/models/vit/image_processing_vit.py @@ -281,3 +281,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["ViTImageProcessor"] diff --git a/src/transformers/models/vit/image_processing_vit_fast.py b/src/transformers/models/vit/image_processing_vit_fast.py index e8abdcfe5cc82d..5abf6cf10aa48e 100644 --- a/src/transformers/models/vit/image_processing_vit_fast.py +++ b/src/transformers/models/vit/image_processing_vit_fast.py @@ -298,3 +298,6 @@ def preprocess( data = {"pixel_values": torch.stack(transformed_images, dim=0)} return BatchFeature(data, tensor_type=return_tensors) + + +__all__ = ["ViTImageProcessorFast"] diff --git a/src/transformers/models/vit/modeling_flax_vit.py b/src/transformers/models/vit/modeling_flax_vit.py index 586c8b62f6dad0..9df89b9674a1f1 100644 --- a/src/transformers/models/vit/modeling_flax_vit.py +++ b/src/transformers/models/vit/modeling_flax_vit.py @@ -671,3 +671,6 @@ class FlaxViTForImageClassification(FlaxViTPreTrainedModel): append_replace_return_docstrings( FlaxViTForImageClassification, output_type=FlaxSequenceClassifierOutput, config_class=ViTConfig ) + + +__all__ = ["FlaxViTForImageClassification", "FlaxViTModel", "FlaxViTPreTrainedModel"] diff --git a/src/transformers/models/vit/modeling_tf_vit.py b/src/transformers/models/vit/modeling_tf_vit.py index 2cf120df740881..780a1dc5c7bddd 100644 --- a/src/transformers/models/vit/modeling_tf_vit.py +++ b/src/transformers/models/vit/modeling_tf_vit.py @@ -902,3 +902,6 @@ def build(self, input_shape=None): if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) + + +__all__ = ["TFViTForImageClassification", "TFViTModel", "TFViTPreTrainedModel"] diff --git a/src/transformers/models/vit/modeling_vit.py b/src/transformers/models/vit/modeling_vit.py index bb08acfc0bba67..b026a31d0a4c7b 100644 --- a/src/transformers/models/vit/modeling_vit.py +++ b/src/transformers/models/vit/modeling_vit.py @@ -901,3 +901,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["ViTForImageClassification", "ViTForMaskedImageModeling", "ViTModel", "ViTPreTrainedModel"] diff --git a/src/transformers/models/vit_mae/__init__.py b/src/transformers/models/vit_mae/__init__.py index f5360061762e6f..253017c39d6a2b 100644 --- a/src/transformers/models/vit_mae/__init__.py +++ b/src/transformers/models/vit_mae/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,68 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_vit_mae": ["ViTMAEConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_vit_mae"] = [ - "ViTMAEForPreTraining", - "ViTMAELayer", - "ViTMAEModel", - "ViTMAEPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_vit_mae"] = [ - "TFViTMAEForPreTraining", - "TFViTMAEModel", - "TFViTMAEPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_vit_mae import ViTMAEConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_vit_mae import ( - ViTMAEForPreTraining, - ViTMAELayer, - ViTMAEModel, - ViTMAEPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel - - + from .configuration_vit_mae import * + from .modeling_tf_vit_mae import * + from .modeling_vit_mae import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/vit_mae/configuration_vit_mae.py b/src/transformers/models/vit_mae/configuration_vit_mae.py index d20b5af130158c..2c5ec3600599e6 100644 --- a/src/transformers/models/vit_mae/configuration_vit_mae.py +++ b/src/transformers/models/vit_mae/configuration_vit_mae.py @@ -135,3 +135,6 @@ def __init__( self.decoder_intermediate_size = decoder_intermediate_size self.mask_ratio = mask_ratio self.norm_pix_loss = norm_pix_loss + + +__all__ = ["ViTMAEConfig"] diff --git a/src/transformers/models/vit_mae/modeling_tf_vit_mae.py b/src/transformers/models/vit_mae/modeling_tf_vit_mae.py index 5760dbf1efb638..bc5a14f42090e3 100644 --- a/src/transformers/models/vit_mae/modeling_tf_vit_mae.py +++ b/src/transformers/models/vit_mae/modeling_tf_vit_mae.py @@ -1370,3 +1370,6 @@ def build(self, input_shape=None): if getattr(self, "decoder", None) is not None: with tf.name_scope(self.decoder.name): self.decoder.build(None) + + +__all__ = ["TFViTMAEForPreTraining", "TFViTMAEModel", "TFViTMAEPreTrainedModel"] diff --git a/src/transformers/models/vit_mae/modeling_vit_mae.py b/src/transformers/models/vit_mae/modeling_vit_mae.py index e319f2f655aabf..1595eb80ca319e 100755 --- a/src/transformers/models/vit_mae/modeling_vit_mae.py +++ b/src/transformers/models/vit_mae/modeling_vit_mae.py @@ -1182,3 +1182,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["ViTMAEForPreTraining", "ViTMAELayer", "ViTMAEModel", "ViTMAEPreTrainedModel"] diff --git a/src/transformers/models/vit_msn/__init__.py b/src/transformers/models/vit_msn/__init__.py index 88f7ff73d29b69..547c5f4c04b912 100644 --- a/src/transformers/models/vit_msn/__init__.py +++ b/src/transformers/models/vit_msn/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,39 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_vit_msn": ["ViTMSNConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_vit_msn"] = [ - "ViTMSNModel", - "ViTMSNForImageClassification", - "ViTMSNPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_vit_msn import ViTMSNConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_vit_msn import ( - ViTMSNForImageClassification, - ViTMSNModel, - ViTMSNPreTrainedModel, - ) - + from .configuration_vit_msn import * + from .modeling_vit_msn import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/vit_msn/configuration_vit_msn.py b/src/transformers/models/vit_msn/configuration_vit_msn.py index 7cf4414f8d7351..cd47df3e9932e0 100644 --- a/src/transformers/models/vit_msn/configuration_vit_msn.py +++ b/src/transformers/models/vit_msn/configuration_vit_msn.py @@ -110,3 +110,6 @@ def __init__( self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias + + +__all__ = ["ViTMSNConfig"] diff --git a/src/transformers/models/vit_msn/modeling_vit_msn.py b/src/transformers/models/vit_msn/modeling_vit_msn.py index 39274dd28fef5b..d25611a41a66a4 100644 --- a/src/transformers/models/vit_msn/modeling_vit_msn.py +++ b/src/transformers/models/vit_msn/modeling_vit_msn.py @@ -756,3 +756,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["ViTMSNModel", "ViTMSNForImageClassification", "ViTMSNPreTrainedModel"] diff --git a/src/transformers/models/vitdet/__init__.py b/src/transformers/models/vitdet/__init__.py index a7ee9c755ff19b..f96b2fdf7d6247 100644 --- a/src/transformers/models/vitdet/__init__.py +++ b/src/transformers/models/vitdet/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,43 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_vitdet": ["VitDetConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_vitdet"] = [ - "VitDetModel", - "VitDetPreTrainedModel", - "VitDetBackbone", - ] - if TYPE_CHECKING: - from .configuration_vitdet import VitDetConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_vitdet import ( - VitDetBackbone, - VitDetModel, - VitDetPreTrainedModel, - ) - + from .configuration_vitdet import * + from .modeling_vitdet import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/vitdet/configuration_vitdet.py b/src/transformers/models/vitdet/configuration_vitdet.py index 856f228a5b4b86..cd91dce9b2961e 100644 --- a/src/transformers/models/vitdet/configuration_vitdet.py +++ b/src/transformers/models/vitdet/configuration_vitdet.py @@ -151,3 +151,6 @@ def __init__( self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) + + +__all__ = ["VitDetConfig"] diff --git a/src/transformers/models/vitdet/modeling_vitdet.py b/src/transformers/models/vitdet/modeling_vitdet.py index 40edb6a05c68fd..9bd7ca2ff1c93a 100644 --- a/src/transformers/models/vitdet/modeling_vitdet.py +++ b/src/transformers/models/vitdet/modeling_vitdet.py @@ -872,3 +872,6 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) + + +__all__ = ["VitDetModel", "VitDetPreTrainedModel", "VitDetBackbone"] diff --git a/src/transformers/models/vitmatte/__init__.py b/src/transformers/models/vitmatte/__init__.py index 7745a96cc6d545..5b87cea448ab52 100644 --- a/src/transformers/models/vitmatte/__init__.py +++ b/src/transformers/models/vitmatte/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,58 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, - is_vision_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_vitmatte": ["VitMatteConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_vitmatte"] = ["VitMatteImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_vitmatte"] = [ - "VitMattePreTrainedModel", - "VitMatteForImageMatting", - ] - if TYPE_CHECKING: - from .configuration_vitmatte import VitMatteConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_vitmatte import VitMatteImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_vitmatte import ( - VitMatteForImageMatting, - VitMattePreTrainedModel, - ) - + from .configuration_vitmatte import * + from .image_processing_vitmatte import * + from .modeling_vitmatte import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/vitmatte/configuration_vitmatte.py b/src/transformers/models/vitmatte/configuration_vitmatte.py index 36e46764a0069d..b9f78043306b72 100644 --- a/src/transformers/models/vitmatte/configuration_vitmatte.py +++ b/src/transformers/models/vitmatte/configuration_vitmatte.py @@ -131,3 +131,6 @@ def to_dict(self): output["backbone_config"] = self.backbone_config.to_dict() output["model_type"] = self.__class__.model_type return output + + +__all__ = ["VitMatteConfig"] diff --git a/src/transformers/models/vitmatte/image_processing_vitmatte.py b/src/transformers/models/vitmatte/image_processing_vitmatte.py index 59944226782238..4c51e2a49b3b21 100644 --- a/src/transformers/models/vitmatte/image_processing_vitmatte.py +++ b/src/transformers/models/vitmatte/image_processing_vitmatte.py @@ -267,3 +267,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["VitMatteImageProcessor"] diff --git a/src/transformers/models/vitmatte/modeling_vitmatte.py b/src/transformers/models/vitmatte/modeling_vitmatte.py index fb18ed6e789c2e..b27bc28870800a 100644 --- a/src/transformers/models/vitmatte/modeling_vitmatte.py +++ b/src/transformers/models/vitmatte/modeling_vitmatte.py @@ -336,3 +336,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["VitMattePreTrainedModel", "VitMatteForImageMatting"] diff --git a/src/transformers/models/vits/__init__.py b/src/transformers/models/vits/__init__.py index 14428463d28a50..0cf54b78848487 100644 --- a/src/transformers/models/vits/__init__.py +++ b/src/transformers/models/vits/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,49 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_speech_available, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_vits": ["VitsConfig"], - "tokenization_vits": ["VitsTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_vits"] = [ - "VitsModel", - "VitsPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_vits import ( - VitsConfig, - ) - from .tokenization_vits import VitsTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_vits import ( - VitsModel, - VitsPreTrainedModel, - ) - + from .configuration_vits import * + from .modeling_vits import * + from .tokenization_vits import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/vits/configuration_vits.py b/src/transformers/models/vits/configuration_vits.py index 0f2aeb8ac44571..6de2591b0f3add 100644 --- a/src/transformers/models/vits/configuration_vits.py +++ b/src/transformers/models/vits/configuration_vits.py @@ -248,3 +248,6 @@ def __init__( ) super().__init__(**kwargs) + + +__all__ = ["VitsConfig"] diff --git a/src/transformers/models/vits/modeling_vits.py b/src/transformers/models/vits/modeling_vits.py index 66834167d15e06..4f42c0405d9cb0 100644 --- a/src/transformers/models/vits/modeling_vits.py +++ b/src/transformers/models/vits/modeling_vits.py @@ -1487,3 +1487,6 @@ def forward( hidden_states=text_encoder_output.hidden_states, attentions=text_encoder_output.attentions, ) + + +__all__ = ["VitsModel", "VitsPreTrainedModel"] diff --git a/src/transformers/models/vits/tokenization_vits.py b/src/transformers/models/vits/tokenization_vits.py index b4d8af740375b3..ca40c80c124cae 100644 --- a/src/transformers/models/vits/tokenization_vits.py +++ b/src/transformers/models/vits/tokenization_vits.py @@ -241,3 +241,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") return (vocab_file,) + + +__all__ = ["VitsTokenizer"] diff --git a/src/transformers/models/vivit/__init__.py b/src/transformers/models/vivit/__init__.py index 261238edccbe75..13c52cbcff0b3f 100644 --- a/src/transformers/models/vivit/__init__.py +++ b/src/transformers/models/vivit/__init__.py @@ -1,8 +1,4 @@ -# flake8: noqa -# There's no way to ignore "F401 '...' imported but unused" warnings in this -# module, but to preserve other warnings. So, don't check this module at all. - -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,60 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -# rely on isort to merge the imports -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_vivit": ["VivitConfig"], -} -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_vivit"] = ["VivitImageProcessor"] - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_vivit"] = [ - "VivitModel", - "VivitPreTrainedModel", - "VivitForVideoClassification", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_vivit import VivitConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_vivit import VivitImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_vivit import ( - VivitForVideoClassification, - VivitModel, - VivitPreTrainedModel, - ) - - + from .configuration_vivit import * + from .image_processing_vivit import * + from .modeling_vivit import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/vivit/configuration_vivit.py b/src/transformers/models/vivit/configuration_vivit.py index 63895e4fb79faa..42863454e81c27 100644 --- a/src/transformers/models/vivit/configuration_vivit.py +++ b/src/transformers/models/vivit/configuration_vivit.py @@ -114,3 +114,6 @@ def __init__( self.qkv_bias = qkv_bias super().__init__(**kwargs) + + +__all__ = ["VivitConfig"] diff --git a/src/transformers/models/vivit/image_processing_vivit.py b/src/transformers/models/vivit/image_processing_vivit.py index 5f251bbd1b95b9..19a97f9c645acb 100644 --- a/src/transformers/models/vivit/image_processing_vivit.py +++ b/src/transformers/models/vivit/image_processing_vivit.py @@ -402,3 +402,6 @@ def preprocess( data = {"pixel_values": videos} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["VivitImageProcessor"] diff --git a/src/transformers/models/vivit/modeling_vivit.py b/src/transformers/models/vivit/modeling_vivit.py index 9b6516a25af45b..22877c842f962b 100755 --- a/src/transformers/models/vivit/modeling_vivit.py +++ b/src/transformers/models/vivit/modeling_vivit.py @@ -864,3 +864,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["VivitModel", "VivitPreTrainedModel", "VivitForVideoClassification"] diff --git a/src/transformers/models/wav2vec2/__init__.py b/src/transformers/models/wav2vec2/__init__.py index 06e1c6628db9a8..3516b478194df7 100644 --- a/src/transformers/models/wav2vec2/__init__.py +++ b/src/transformers/models/wav2vec2/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,118 +13,20 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_torch_available, -) - - -_import_structure = { - "configuration_wav2vec2": ["Wav2Vec2Config"], - "feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"], - "processing_wav2vec2": ["Wav2Vec2Processor"], - "tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_wav2vec2"] = [ - "Wav2Vec2ForAudioFrameClassification", - "Wav2Vec2ForCTC", - "Wav2Vec2ForMaskedLM", - "Wav2Vec2ForPreTraining", - "Wav2Vec2ForSequenceClassification", - "Wav2Vec2ForXVector", - "Wav2Vec2Model", - "Wav2Vec2PreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_wav2vec2"] = [ - "TFWav2Vec2ForCTC", - "TFWav2Vec2Model", - "TFWav2Vec2PreTrainedModel", - "TFWav2Vec2ForSequenceClassification", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_wav2vec2"] = [ - "FlaxWav2Vec2ForCTC", - "FlaxWav2Vec2ForPreTraining", - "FlaxWav2Vec2Model", - "FlaxWav2Vec2PreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_wav2vec2 import Wav2Vec2Config - from .feature_extraction_wav2vec2 import Wav2Vec2FeatureExtractor - from .processing_wav2vec2 import Wav2Vec2Processor - from .tokenization_wav2vec2 import Wav2Vec2CTCTokenizer, Wav2Vec2Tokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_wav2vec2 import ( - Wav2Vec2ForAudioFrameClassification, - Wav2Vec2ForCTC, - Wav2Vec2ForMaskedLM, - Wav2Vec2ForPreTraining, - Wav2Vec2ForSequenceClassification, - Wav2Vec2ForXVector, - Wav2Vec2Model, - Wav2Vec2PreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_wav2vec2 import ( - TFWav2Vec2ForCTC, - TFWav2Vec2ForSequenceClassification, - TFWav2Vec2Model, - TFWav2Vec2PreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_wav2vec2 import ( - FlaxWav2Vec2ForCTC, - FlaxWav2Vec2ForPreTraining, - FlaxWav2Vec2Model, - FlaxWav2Vec2PreTrainedModel, - ) - - + from .configuration_wav2vec2 import * + from .feature_extraction_wav2vec2 import * + from .modeling_flax_wav2vec2 import * + from .modeling_tf_wav2vec2 import * + from .modeling_wav2vec2 import * + from .processing_wav2vec2 import * + from .tokenization_wav2vec2 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/wav2vec2/configuration_wav2vec2.py b/src/transformers/models/wav2vec2/configuration_wav2vec2.py index b4a676ddba8f3b..c28aa6305f855c 100644 --- a/src/transformers/models/wav2vec2/configuration_wav2vec2.py +++ b/src/transformers/models/wav2vec2/configuration_wav2vec2.py @@ -342,3 +342,6 @@ def __init__( @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1) + + +__all__ = ["Wav2Vec2Config"] diff --git a/src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py b/src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py index e5266c67ded62c..f76d9830940688 100644 --- a/src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py +++ b/src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py @@ -238,3 +238,6 @@ def __call__( padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs + + +__all__ = ["Wav2Vec2FeatureExtractor"] diff --git a/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py b/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py index 9a24b9d39fdaac..55d34b84ef593e 100644 --- a/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py +++ b/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py @@ -1423,3 +1423,6 @@ def __call__( append_replace_return_docstrings( FlaxWav2Vec2ForPreTraining, output_type=FlaxWav2Vec2ForPreTrainingOutput, config_class=Wav2Vec2Config ) + + +__all__ = ["FlaxWav2Vec2ForCTC", "FlaxWav2Vec2ForPreTraining", "FlaxWav2Vec2Model", "FlaxWav2Vec2PreTrainedModel"] diff --git a/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py b/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py index a8338e363d94a2..3b118162d7cc23 100644 --- a/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py +++ b/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py @@ -1853,3 +1853,6 @@ def build(self, input_shape=None): if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.classifier_proj_size]) + + +__all__ = ["TFWav2Vec2ForCTC", "TFWav2Vec2Model", "TFWav2Vec2PreTrainedModel", "TFWav2Vec2ForSequenceClassification"] diff --git a/src/transformers/models/wav2vec2/modeling_wav2vec2.py b/src/transformers/models/wav2vec2/modeling_wav2vec2.py index bf1bb7746ce802..3ebae6f25e204b 100755 --- a/src/transformers/models/wav2vec2/modeling_wav2vec2.py +++ b/src/transformers/models/wav2vec2/modeling_wav2vec2.py @@ -2715,3 +2715,15 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "Wav2Vec2ForAudioFrameClassification", + "Wav2Vec2ForCTC", + "Wav2Vec2ForMaskedLM", + "Wav2Vec2ForPreTraining", + "Wav2Vec2ForSequenceClassification", + "Wav2Vec2ForXVector", + "Wav2Vec2Model", + "Wav2Vec2PreTrainedModel", +] diff --git a/src/transformers/models/wav2vec2/processing_wav2vec2.py b/src/transformers/models/wav2vec2/processing_wav2vec2.py index 4bd4255315fdc5..077f5617198b9a 100644 --- a/src/transformers/models/wav2vec2/processing_wav2vec2.py +++ b/src/transformers/models/wav2vec2/processing_wav2vec2.py @@ -181,3 +181,6 @@ def as_target_processor(self): yield self.current_processor = self.feature_extractor self._in_target_context_manager = False + + +__all__ = ["Wav2Vec2Processor"] diff --git a/src/transformers/models/wav2vec2/tokenization_wav2vec2.py b/src/transformers/models/wav2vec2/tokenization_wav2vec2.py index c1a333fe48c6b4..ecde491a70efa1 100644 --- a/src/transformers/models/wav2vec2/tokenization_wav2vec2.py +++ b/src/transformers/models/wav2vec2/tokenization_wav2vec2.py @@ -919,3 +919,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") return (vocab_file,) + + +__all__ = ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"] diff --git a/src/transformers/models/wav2vec2_bert/__init__.py b/src/transformers/models/wav2vec2_bert/__init__.py index be37038211a811..7520263c51bcd3 100644 --- a/src/transformers/models/wav2vec2_bert/__init__.py +++ b/src/transformers/models/wav2vec2_bert/__init__.py @@ -13,52 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_wav2vec2_bert": ["Wav2Vec2BertConfig"], - "processing_wav2vec2_bert": ["Wav2Vec2BertProcessor"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_wav2vec2_bert"] = [ - "Wav2Vec2BertForAudioFrameClassification", - "Wav2Vec2BertForCTC", - "Wav2Vec2BertForSequenceClassification", - "Wav2Vec2BertForXVector", - "Wav2Vec2BertModel", - "Wav2Vec2BertPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_wav2vec2_bert import ( - Wav2Vec2BertConfig, - ) - from .processing_wav2vec2_bert import Wav2Vec2BertProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_wav2vec2_bert import ( - Wav2Vec2BertForAudioFrameClassification, - Wav2Vec2BertForCTC, - Wav2Vec2BertForSequenceClassification, - Wav2Vec2BertForXVector, - Wav2Vec2BertModel, - Wav2Vec2BertPreTrainedModel, - ) - + from .configuration_wav2vec2_bert import * + from .modeling_wav2vec2_bert import * + from .processing_wav2vec2_bert import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/wav2vec2_bert/configuration_wav2vec2_bert.py b/src/transformers/models/wav2vec2_bert/configuration_wav2vec2_bert.py index 20b4e4fa1306de..db52cc5baed367 100644 --- a/src/transformers/models/wav2vec2_bert/configuration_wav2vec2_bert.py +++ b/src/transformers/models/wav2vec2_bert/configuration_wav2vec2_bert.py @@ -308,3 +308,6 @@ def inputs_to_logits_ratio(self): if self.add_adapter: ratio = ratio * (self.adapter_stride**self.num_adapter_layers) return ratio + + +__all__ = ["Wav2Vec2BertConfig"] diff --git a/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py b/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py index 6f1d5576df7316..59dc06e73361aa 100644 --- a/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py +++ b/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py @@ -1666,3 +1666,13 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "Wav2Vec2BertForAudioFrameClassification", + "Wav2Vec2BertForCTC", + "Wav2Vec2BertForSequenceClassification", + "Wav2Vec2BertForXVector", + "Wav2Vec2BertModel", + "Wav2Vec2BertPreTrainedModel", +] diff --git a/src/transformers/models/wav2vec2_bert/processing_wav2vec2_bert.py b/src/transformers/models/wav2vec2_bert/processing_wav2vec2_bert.py index 8b09e92419ae97..d8c94d9a6978ae 100644 --- a/src/transformers/models/wav2vec2_bert/processing_wav2vec2_bert.py +++ b/src/transformers/models/wav2vec2_bert/processing_wav2vec2_bert.py @@ -158,3 +158,6 @@ def decode(self, *args, **kwargs): to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) + + +__all__ = ["Wav2Vec2BertProcessor"] diff --git a/src/transformers/models/wav2vec2_conformer/__init__.py b/src/transformers/models/wav2vec2_conformer/__init__.py index a780a50b6cce11..54d0d9e2c3997c 100644 --- a/src/transformers/models/wav2vec2_conformer/__init__.py +++ b/src/transformers/models/wav2vec2_conformer/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,52 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_wav2vec2_conformer": ["Wav2Vec2ConformerConfig"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_wav2vec2_conformer"] = [ - "Wav2Vec2ConformerForAudioFrameClassification", - "Wav2Vec2ConformerForCTC", - "Wav2Vec2ConformerForPreTraining", - "Wav2Vec2ConformerForSequenceClassification", - "Wav2Vec2ConformerForXVector", - "Wav2Vec2ConformerModel", - "Wav2Vec2ConformerPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_wav2vec2_conformer import ( - Wav2Vec2ConformerConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_wav2vec2_conformer import ( - Wav2Vec2ConformerForAudioFrameClassification, - Wav2Vec2ConformerForCTC, - Wav2Vec2ConformerForPreTraining, - Wav2Vec2ConformerForSequenceClassification, - Wav2Vec2ConformerForXVector, - Wav2Vec2ConformerModel, - Wav2Vec2ConformerPreTrainedModel, - ) - + from .configuration_wav2vec2_conformer import * + from .modeling_wav2vec2_conformer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py b/src/transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py index 8f78aa93753513..73a840b9f824b6 100644 --- a/src/transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py +++ b/src/transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py @@ -355,3 +355,6 @@ def __init__( @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1) + + +__all__ = ["Wav2Vec2ConformerConfig"] diff --git a/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py b/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py index 933bf8f6dc0bcd..c2eb53c8a6267e 100644 --- a/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py +++ b/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py @@ -2113,3 +2113,14 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "Wav2Vec2ConformerForAudioFrameClassification", + "Wav2Vec2ConformerForCTC", + "Wav2Vec2ConformerForPreTraining", + "Wav2Vec2ConformerForSequenceClassification", + "Wav2Vec2ConformerForXVector", + "Wav2Vec2ConformerModel", + "Wav2Vec2ConformerPreTrainedModel", +] diff --git a/src/transformers/models/wav2vec2_phoneme/__init__.py b/src/transformers/models/wav2vec2_phoneme/__init__.py index 7859f381dd5190..cbddfb4fe92d2c 100644 --- a/src/transformers/models/wav2vec2_phoneme/__init__.py +++ b/src/transformers/models/wav2vec2_phoneme/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,14 +14,13 @@ from typing import TYPE_CHECKING from ...utils import _LazyModule - - -_import_structure = {"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]} +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .tokenization_wav2vec2_phoneme import Wav2Vec2PhonemeCTCTokenizer + from .tokenization_wav2vec2_phoneme import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/wav2vec2_phoneme/tokenization_wav2vec2_phoneme.py b/src/transformers/models/wav2vec2_phoneme/tokenization_wav2vec2_phoneme.py index ff4704c778c0a9..b617b17d02b937 100644 --- a/src/transformers/models/wav2vec2_phoneme/tokenization_wav2vec2_phoneme.py +++ b/src/transformers/models/wav2vec2_phoneme/tokenization_wav2vec2_phoneme.py @@ -575,3 +575,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") return (vocab_file,) + + +__all__ = ["Wav2Vec2PhonemeCTCTokenizer"] diff --git a/src/transformers/models/wav2vec2_with_lm/__init__.py b/src/transformers/models/wav2vec2_with_lm/__init__.py index 611688f6a683e7..95fd10dab7092f 100644 --- a/src/transformers/models/wav2vec2_with_lm/__init__.py +++ b/src/transformers/models/wav2vec2_with_lm/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,14 +14,13 @@ from typing import TYPE_CHECKING from ...utils import _LazyModule - - -_import_structure = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]} +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .processing_wav2vec2_with_lm import Wav2Vec2ProcessorWithLM + from .processing_wav2vec2_with_lm import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py b/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py index 0081008009e3a0..f569b4f625e70a 100644 --- a/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py +++ b/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py @@ -653,3 +653,6 @@ def as_target_processor(self): yield self.current_processor = self.feature_extractor self._in_target_context_manager = False + + +__all__ = ["Wav2Vec2ProcessorWithLM"] diff --git a/src/transformers/models/wavlm/__init__.py b/src/transformers/models/wavlm/__init__.py index d615a3a5ae4062..d230bf3f0924b4 100644 --- a/src/transformers/models/wavlm/__init__.py +++ b/src/transformers/models/wavlm/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,45 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_wavlm": ["WavLMConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_wavlm"] = [ - "WavLMForAudioFrameClassification", - "WavLMForCTC", - "WavLMForSequenceClassification", - "WavLMForXVector", - "WavLMModel", - "WavLMPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_wavlm import WavLMConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_wavlm import ( - WavLMForAudioFrameClassification, - WavLMForCTC, - WavLMForSequenceClassification, - WavLMForXVector, - WavLMModel, - WavLMPreTrainedModel, - ) - + from .configuration_wavlm import * + from .modeling_wavlm import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/wavlm/configuration_wavlm.py b/src/transformers/models/wavlm/configuration_wavlm.py index 3faeb7ab53b25a..63cd44e12b0f04 100644 --- a/src/transformers/models/wavlm/configuration_wavlm.py +++ b/src/transformers/models/wavlm/configuration_wavlm.py @@ -332,3 +332,6 @@ def __init__( @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1) + + +__all__ = ["WavLMConfig"] diff --git a/src/transformers/models/wavlm/modeling_wavlm.py b/src/transformers/models/wavlm/modeling_wavlm.py index 4df192fda5efa3..a3dd52757a3d7e 100755 --- a/src/transformers/models/wavlm/modeling_wavlm.py +++ b/src/transformers/models/wavlm/modeling_wavlm.py @@ -1850,3 +1850,13 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "WavLMForAudioFrameClassification", + "WavLMForCTC", + "WavLMForSequenceClassification", + "WavLMForXVector", + "WavLMModel", + "WavLMPreTrainedModel", +] diff --git a/src/transformers/models/whisper/__init__.py b/src/transformers/models/whisper/__init__.py index 5d37e72c02b5df..a4956c5fbb257a 100644 --- a/src/transformers/models/whisper/__init__.py +++ b/src/transformers/models/whisper/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,125 +13,21 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_whisper": ["WhisperConfig", "WhisperOnnxConfig"], - "feature_extraction_whisper": ["WhisperFeatureExtractor"], - "processing_whisper": ["WhisperProcessor"], - "tokenization_whisper": ["WhisperTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_whisper_fast"] = ["WhisperTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_whisper"] = [ - "WhisperForCausalLM", - "WhisperForConditionalGeneration", - "WhisperModel", - "WhisperPreTrainedModel", - "WhisperForAudioClassification", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_whisper"] = [ - "TFWhisperForConditionalGeneration", - "TFWhisperModel", - "TFWhisperPreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_whisper"] = [ - "FlaxWhisperForConditionalGeneration", - "FlaxWhisperModel", - "FlaxWhisperPreTrainedModel", - "FlaxWhisperForAudioClassification", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_whisper import WhisperConfig, WhisperOnnxConfig - from .feature_extraction_whisper import WhisperFeatureExtractor - from .processing_whisper import WhisperProcessor - from .tokenization_whisper import WhisperTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_whisper_fast import WhisperTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_whisper import ( - WhisperForAudioClassification, - WhisperForCausalLM, - WhisperForConditionalGeneration, - WhisperModel, - WhisperPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_whisper import ( - TFWhisperForConditionalGeneration, - TFWhisperModel, - TFWhisperPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_whisper import ( - FlaxWhisperForAudioClassification, - FlaxWhisperForConditionalGeneration, - FlaxWhisperModel, - FlaxWhisperPreTrainedModel, - ) - + from .configuration_whisper import * + from .feature_extraction_whisper import * + from .modeling_flax_whisper import * + from .modeling_tf_whisper import * + from .modeling_whisper import * + from .processing_whisper import * + from .tokenization_whisper import * + from .tokenization_whisper_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/whisper/configuration_whisper.py b/src/transformers/models/whisper/configuration_whisper.py index d65811cbc8efe6..3f7991215a8516 100644 --- a/src/transformers/models/whisper/configuration_whisper.py +++ b/src/transformers/models/whisper/configuration_whisper.py @@ -342,3 +342,6 @@ def generate_dummy_inputs( @property def atol_for_validation(self) -> float: return 1e-3 + + +__all__ = ["WhisperConfig", "WhisperOnnxConfig"] diff --git a/src/transformers/models/whisper/feature_extraction_whisper.py b/src/transformers/models/whisper/feature_extraction_whisper.py index a79eeedd0a295c..3c4d413d88e66f 100644 --- a/src/transformers/models/whisper/feature_extraction_whisper.py +++ b/src/transformers/models/whisper/feature_extraction_whisper.py @@ -322,3 +322,6 @@ def __call__( padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs + + +__all__ = ["WhisperFeatureExtractor"] diff --git a/src/transformers/models/whisper/modeling_flax_whisper.py b/src/transformers/models/whisper/modeling_flax_whisper.py index cc4483963c6309..e3c7e9d1c65fd3 100644 --- a/src/transformers/models/whisper/modeling_flax_whisper.py +++ b/src/transformers/models/whisper/modeling_flax_whisper.py @@ -1694,3 +1694,11 @@ def __call__( append_replace_return_docstrings( FlaxWhisperForAudioClassification, output_type=FlaxSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC ) + + +__all__ = [ + "FlaxWhisperForConditionalGeneration", + "FlaxWhisperModel", + "FlaxWhisperPreTrainedModel", + "FlaxWhisperForAudioClassification", +] diff --git a/src/transformers/models/whisper/modeling_tf_whisper.py b/src/transformers/models/whisper/modeling_tf_whisper.py index a2873037163197..ffe49551ef9448 100644 --- a/src/transformers/models/whisper/modeling_tf_whisper.py +++ b/src/transformers/models/whisper/modeling_tf_whisper.py @@ -1756,3 +1756,6 @@ def build(self, input_shape=None): if getattr(self, "model", None) is not None: with tf.name_scope(self.model.name): self.model.build(None) + + +__all__ = ["TFWhisperForConditionalGeneration", "TFWhisperModel", "TFWhisperPreTrainedModel"] diff --git a/src/transformers/models/whisper/modeling_whisper.py b/src/transformers/models/whisper/modeling_whisper.py index ce3df3e16707e5..66b3def913aad9 100644 --- a/src/transformers/models/whisper/modeling_whisper.py +++ b/src/transformers/models/whisper/modeling_whisper.py @@ -2239,3 +2239,12 @@ def forward( hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) + + +__all__ = [ + "WhisperForCausalLM", + "WhisperForConditionalGeneration", + "WhisperModel", + "WhisperPreTrainedModel", + "WhisperForAudioClassification", +] diff --git a/src/transformers/models/whisper/processing_whisper.py b/src/transformers/models/whisper/processing_whisper.py index f22aae143e6bc4..ad5fa22e370fbe 100644 --- a/src/transformers/models/whisper/processing_whisper.py +++ b/src/transformers/models/whisper/processing_whisper.py @@ -95,3 +95,6 @@ def decode(self, *args, **kwargs): def get_prompt_ids(self, text: str, return_tensors="np"): return self.tokenizer.get_prompt_ids(text, return_tensors=return_tensors) + + +__all__ = ["WhisperProcessor"] diff --git a/src/transformers/models/whisper/tokenization_whisper.py b/src/transformers/models/whisper/tokenization_whisper.py index e537ef95da6751..7983799ad8a7ec 100644 --- a/src/transformers/models/whisper/tokenization_whisper.py +++ b/src/transformers/models/whisper/tokenization_whisper.py @@ -1390,3 +1390,6 @@ def _merge_punctuations(words, tokens, indices, prepended, appended): words[:] = [word for word in words if word] tokens[:] = [token for token in tokens if token] indices[:] = [idx for idx in indices if idx] + + +__all__ = ["WhisperTokenizer"] diff --git a/src/transformers/models/whisper/tokenization_whisper_fast.py b/src/transformers/models/whisper/tokenization_whisper_fast.py index f0383cb0def76f..9cc0b7c530f4c1 100644 --- a/src/transformers/models/whisper/tokenization_whisper_fast.py +++ b/src/transformers/models/whisper/tokenization_whisper_fast.py @@ -639,3 +639,6 @@ def _convert_to_list(token_ids): if isinstance(token_ids, np.ndarray): token_ids = token_ids.tolist() return token_ids + + +__all__ = ["WhisperTokenizerFast"] diff --git a/src/transformers/models/x_clip/__init__.py b/src/transformers/models/x_clip/__init__.py index 2f60ad0ddee2d2..d9c5fea1b31473 100644 --- a/src/transformers/models/x_clip/__init__.py +++ b/src/transformers/models/x_clip/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,53 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_x_clip": [ - "XCLIPConfig", - "XCLIPTextConfig", - "XCLIPVisionConfig", - ], - "processing_x_clip": ["XCLIPProcessor"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_x_clip"] = [ - "XCLIPModel", - "XCLIPPreTrainedModel", - "XCLIPTextModel", - "XCLIPVisionModel", - ] - if TYPE_CHECKING: - from .configuration_x_clip import ( - XCLIPConfig, - XCLIPTextConfig, - XCLIPVisionConfig, - ) - from .processing_x_clip import XCLIPProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_x_clip import ( - XCLIPModel, - XCLIPPreTrainedModel, - XCLIPTextModel, - XCLIPVisionModel, - ) - + from .configuration_x_clip import * + from .modeling_x_clip import * + from .processing_x_clip import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/x_clip/configuration_x_clip.py b/src/transformers/models/x_clip/configuration_x_clip.py index 3d3b92d2c8c02e..a500e5dccd90f2 100644 --- a/src/transformers/models/x_clip/configuration_x_clip.py +++ b/src/transformers/models/x_clip/configuration_x_clip.py @@ -376,3 +376,6 @@ def from_text_vision_configs(cls, text_config: XCLIPTextConfig, vision_config: X """ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) + + +__all__ = ["XCLIPConfig", "XCLIPTextConfig", "XCLIPVisionConfig"] diff --git a/src/transformers/models/x_clip/modeling_x_clip.py b/src/transformers/models/x_clip/modeling_x_clip.py index 25208c43a85a6c..bce1ea02e7b9e0 100644 --- a/src/transformers/models/x_clip/modeling_x_clip.py +++ b/src/transformers/models/x_clip/modeling_x_clip.py @@ -1675,3 +1675,6 @@ def forward( vision_model_output=vision_outputs, mit_output=mit_outputs, ) + + +__all__ = ["XCLIPModel", "XCLIPPreTrainedModel", "XCLIPTextModel", "XCLIPVisionModel"] diff --git a/src/transformers/models/x_clip/processing_x_clip.py b/src/transformers/models/x_clip/processing_x_clip.py index a11aeb18dc4f59..4a17d3a15a2084 100644 --- a/src/transformers/models/x_clip/processing_x_clip.py +++ b/src/transformers/models/x_clip/processing_x_clip.py @@ -146,3 +146,6 @@ def feature_extractor(self): FutureWarning, ) return self.image_processor + + +__all__ = ["XCLIPProcessor"] diff --git a/src/transformers/models/xglm/__init__.py b/src/transformers/models/xglm/__init__.py index 59bba032f4ea2a..1eefd79d4cf7c3 100644 --- a/src/transformers/models/xglm/__init__.py +++ b/src/transformers/models/xglm/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,123 +13,19 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_sentencepiece_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = {"configuration_xglm": ["XGLMConfig"]} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_xglm"] = ["XGLMTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_xglm_fast"] = ["XGLMTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_xglm"] = [ - "XGLMForCausalLM", - "XGLMModel", - "XGLMPreTrainedModel", - ] - - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_xglm"] = [ - "FlaxXGLMForCausalLM", - "FlaxXGLMModel", - "FlaxXGLMPreTrainedModel", - ] - - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_xglm"] = [ - "TFXGLMForCausalLM", - "TFXGLMModel", - "TFXGLMPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_xglm import XGLMConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_xglm import XGLMTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_xglm_fast import XGLMTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_xglm import XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_xglm import ( - TFXGLMForCausalLM, - TFXGLMModel, - TFXGLMPreTrainedModel, - ) - - + from .configuration_xglm import * + from .modeling_flax_xglm import * + from .modeling_tf_xglm import * + from .modeling_xglm import * + from .tokenization_xglm import * + from .tokenization_xglm_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/xglm/configuration_xglm.py b/src/transformers/models/xglm/configuration_xglm.py index c5a275405d773a..da5ded49162306 100644 --- a/src/transformers/models/xglm/configuration_xglm.py +++ b/src/transformers/models/xglm/configuration_xglm.py @@ -134,3 +134,6 @@ def __init__( decoder_start_token_id=decoder_start_token_id, **kwargs, ) + + +__all__ = ["XGLMConfig"] diff --git a/src/transformers/models/xglm/modeling_flax_xglm.py b/src/transformers/models/xglm/modeling_flax_xglm.py index 473448c66ccc2a..2c560dc8e63707 100644 --- a/src/transformers/models/xglm/modeling_flax_xglm.py +++ b/src/transformers/models/xglm/modeling_flax_xglm.py @@ -798,3 +798,6 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): FlaxCausalLMOutputWithCrossAttentions, _CONFIG_FOR_DOC, ) + + +__all__ = ["FlaxXGLMForCausalLM", "FlaxXGLMModel", "FlaxXGLMPreTrainedModel"] diff --git a/src/transformers/models/xglm/modeling_tf_xglm.py b/src/transformers/models/xglm/modeling_tf_xglm.py index a62396b79c0ea7..3c1d1afb9aa2c4 100644 --- a/src/transformers/models/xglm/modeling_tf_xglm.py +++ b/src/transformers/models/xglm/modeling_tf_xglm.py @@ -1003,3 +1003,6 @@ def tf_to_pt_weight_rename(self, tf_weight): return tf_weight, "model.embed_tokens.weight" else: return (tf_weight,) + + +__all__ = ["TFXGLMForCausalLM", "TFXGLMModel", "TFXGLMPreTrainedModel"] diff --git a/src/transformers/models/xglm/modeling_xglm.py b/src/transformers/models/xglm/modeling_xglm.py index 70aac350c166c8..3192d6f524ac58 100755 --- a/src/transformers/models/xglm/modeling_xglm.py +++ b/src/transformers/models/xglm/modeling_xglm.py @@ -807,3 +807,6 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = ["XGLMForCausalLM", "XGLMModel", "XGLMPreTrainedModel"] diff --git a/src/transformers/models/xglm/tokenization_xglm.py b/src/transformers/models/xglm/tokenization_xglm.py index 8713d5f129d1e1..79f6acc3640f0b 100644 --- a/src/transformers/models/xglm/tokenization_xglm.py +++ b/src/transformers/models/xglm/tokenization_xglm.py @@ -295,3 +295,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = fi.write(content_spiece_model) return (out_vocab_file,) + + +__all__ = ["XGLMTokenizer"] diff --git a/src/transformers/models/xglm/tokenization_xglm_fast.py b/src/transformers/models/xglm/tokenization_xglm_fast.py index 2f8b0480c82dff..92d99d2f863dc0 100644 --- a/src/transformers/models/xglm/tokenization_xglm_fast.py +++ b/src/transformers/models/xglm/tokenization_xglm_fast.py @@ -191,3 +191,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["XGLMTokenizerFast"] diff --git a/src/transformers/models/xlm/__init__.py b/src/transformers/models/xlm/__init__.py index 97d0933b8b9a7d..1167fc93a10101 100644 --- a/src/transformers/models/xlm/__init__.py +++ b/src/transformers/models/xlm/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,91 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available - - -_import_structure = { - "configuration_xlm": ["XLMConfig", "XLMOnnxConfig"], - "tokenization_xlm": ["XLMTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_xlm"] = [ - "XLMForMultipleChoice", - "XLMForQuestionAnswering", - "XLMForQuestionAnsweringSimple", - "XLMForSequenceClassification", - "XLMForTokenClassification", - "XLMModel", - "XLMPreTrainedModel", - "XLMWithLMHeadModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_xlm"] = [ - "TFXLMForMultipleChoice", - "TFXLMForQuestionAnsweringSimple", - "TFXLMForSequenceClassification", - "TFXLMForTokenClassification", - "TFXLMMainLayer", - "TFXLMModel", - "TFXLMPreTrainedModel", - "TFXLMWithLMHeadModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_xlm import XLMConfig, XLMOnnxConfig - from .tokenization_xlm import XLMTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_xlm import ( - XLMForMultipleChoice, - XLMForQuestionAnswering, - XLMForQuestionAnsweringSimple, - XLMForSequenceClassification, - XLMForTokenClassification, - XLMModel, - XLMPreTrainedModel, - XLMWithLMHeadModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_xlm import ( - TFXLMForMultipleChoice, - TFXLMForQuestionAnsweringSimple, - TFXLMForSequenceClassification, - TFXLMForTokenClassification, - TFXLMMainLayer, - TFXLMModel, - TFXLMPreTrainedModel, - TFXLMWithLMHeadModel, - ) - + from .configuration_xlm import * + from .modeling_tf_xlm import * + from .modeling_xlm import * + from .tokenization_xlm import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/xlm/configuration_xlm.py b/src/transformers/models/xlm/configuration_xlm.py index 39db12c51b0362..ebd2c290f38fe9 100644 --- a/src/transformers/models/xlm/configuration_xlm.py +++ b/src/transformers/models/xlm/configuration_xlm.py @@ -236,3 +236,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("token_type_ids", dynamic_axis), ] ) + + +__all__ = ["XLMConfig", "XLMOnnxConfig"] diff --git a/src/transformers/models/xlm/modeling_tf_xlm.py b/src/transformers/models/xlm/modeling_tf_xlm.py index f03f416a084850..87f4dbca17f6fa 100644 --- a/src/transformers/models/xlm/modeling_tf_xlm.py +++ b/src/transformers/models/xlm/modeling_tf_xlm.py @@ -1343,3 +1343,15 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFXLMForMultipleChoice", + "TFXLMForQuestionAnsweringSimple", + "TFXLMForSequenceClassification", + "TFXLMForTokenClassification", + "TFXLMMainLayer", + "TFXLMModel", + "TFXLMPreTrainedModel", + "TFXLMWithLMHeadModel", +] diff --git a/src/transformers/models/xlm/modeling_xlm.py b/src/transformers/models/xlm/modeling_xlm.py index 781d7a138f261e..28f71b7d7df4c1 100755 --- a/src/transformers/models/xlm/modeling_xlm.py +++ b/src/transformers/models/xlm/modeling_xlm.py @@ -1262,3 +1262,15 @@ def forward( hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) + + +__all__ = [ + "XLMForMultipleChoice", + "XLMForQuestionAnswering", + "XLMForQuestionAnsweringSimple", + "XLMForSequenceClassification", + "XLMForTokenClassification", + "XLMModel", + "XLMPreTrainedModel", + "XLMWithLMHeadModel", +] diff --git a/src/transformers/models/xlm/tokenization_xlm.py b/src/transformers/models/xlm/tokenization_xlm.py index b20823e0171558..14387279708ef8 100644 --- a/src/transformers/models/xlm/tokenization_xlm.py +++ b/src/transformers/models/xlm/tokenization_xlm.py @@ -601,3 +601,6 @@ def __setstate__(self, d): ) self.sm = sacremoses + + +__all__ = ["XLMTokenizer"] diff --git a/src/transformers/models/xlm_roberta/__init__.py b/src/transformers/models/xlm_roberta/__init__.py index 00658bb9ed9b8d..0e684c6c9b2c2c 100644 --- a/src/transformers/models/xlm_roberta/__init__.py +++ b/src/transformers/models/xlm_roberta/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,168 +11,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_sentencepiece_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_xlm_roberta": [ - "XLMRobertaConfig", - "XLMRobertaOnnxConfig", - ], -} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_xlm_roberta"] = ["XLMRobertaTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_xlm_roberta_fast"] = ["XLMRobertaTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_xlm_roberta"] = [ - "XLMRobertaForCausalLM", - "XLMRobertaForMaskedLM", - "XLMRobertaForMultipleChoice", - "XLMRobertaForQuestionAnswering", - "XLMRobertaForSequenceClassification", - "XLMRobertaForTokenClassification", - "XLMRobertaModel", - "XLMRobertaPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_xlm_roberta"] = [ - "TFXLMRobertaForCausalLM", - "TFXLMRobertaForMaskedLM", - "TFXLMRobertaForMultipleChoice", - "TFXLMRobertaForQuestionAnswering", - "TFXLMRobertaForSequenceClassification", - "TFXLMRobertaForTokenClassification", - "TFXLMRobertaModel", - "TFXLMRobertaPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_xlm_roberta"] = [ - "FlaxXLMRobertaForMaskedLM", - "FlaxXLMRobertaForCausalLM", - "FlaxXLMRobertaForMultipleChoice", - "FlaxXLMRobertaForQuestionAnswering", - "FlaxXLMRobertaForSequenceClassification", - "FlaxXLMRobertaForTokenClassification", - "FlaxXLMRobertaModel", - "FlaxXLMRobertaPreTrainedModel", - ] if TYPE_CHECKING: - from .configuration_xlm_roberta import ( - XLMRobertaConfig, - XLMRobertaOnnxConfig, - ) - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_xlm_roberta import XLMRobertaTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_xlm_roberta import ( - XLMRobertaForCausalLM, - XLMRobertaForMaskedLM, - XLMRobertaForMultipleChoice, - XLMRobertaForQuestionAnswering, - XLMRobertaForSequenceClassification, - XLMRobertaForTokenClassification, - XLMRobertaModel, - XLMRobertaPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_xlm_roberta import ( - TFXLMRobertaForCausalLM, - TFXLMRobertaForMaskedLM, - TFXLMRobertaForMultipleChoice, - TFXLMRobertaForQuestionAnswering, - TFXLMRobertaForSequenceClassification, - TFXLMRobertaForTokenClassification, - TFXLMRobertaModel, - TFXLMRobertaPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_xlm_roberta import ( - FlaxXLMRobertaForCausalLM, - FlaxXLMRobertaForMaskedLM, - FlaxXLMRobertaForMultipleChoice, - FlaxXLMRobertaForQuestionAnswering, - FlaxXLMRobertaForSequenceClassification, - FlaxXLMRobertaForTokenClassification, - FlaxXLMRobertaModel, - FlaxXLMRobertaPreTrainedModel, - ) - + from .configuration_xlm_roberta import * + from .modeling_flax_xlm_roberta import * + from .modeling_tf_xlm_roberta import * + from .modeling_xlm_roberta import * + from .tokenization_xlm_roberta import * + from .tokenization_xlm_roberta_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/xlm_roberta/configuration_xlm_roberta.py b/src/transformers/models/xlm_roberta/configuration_xlm_roberta.py index 100321db481f4b..3b17f33ed60063 100644 --- a/src/transformers/models/xlm_roberta/configuration_xlm_roberta.py +++ b/src/transformers/models/xlm_roberta/configuration_xlm_roberta.py @@ -152,3 +152,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("attention_mask", dynamic_axis), ] ) + + +__all__ = ["XLMRobertaConfig", "XLMRobertaOnnxConfig"] diff --git a/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py b/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py index e700fcd0244ad5..271d0aeb9769ea 100644 --- a/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py +++ b/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py @@ -1497,3 +1497,15 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): FlaxCausalLMOutputWithCrossAttentions, _CONFIG_FOR_DOC, ) + + +__all__ = [ + "FlaxXLMRobertaForMaskedLM", + "FlaxXLMRobertaForCausalLM", + "FlaxXLMRobertaForMultipleChoice", + "FlaxXLMRobertaForQuestionAnswering", + "FlaxXLMRobertaForSequenceClassification", + "FlaxXLMRobertaForTokenClassification", + "FlaxXLMRobertaModel", + "FlaxXLMRobertaPreTrainedModel", +] diff --git a/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py b/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py index efc5b696676c0a..5f4e9d006c3419 100644 --- a/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py +++ b/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py @@ -1777,3 +1777,15 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFXLMRobertaForCausalLM", + "TFXLMRobertaForMaskedLM", + "TFXLMRobertaForMultipleChoice", + "TFXLMRobertaForQuestionAnswering", + "TFXLMRobertaForSequenceClassification", + "TFXLMRobertaForTokenClassification", + "TFXLMRobertaModel", + "TFXLMRobertaPreTrainedModel", +] diff --git a/src/transformers/models/xlm_roberta/modeling_xlm_roberta.py b/src/transformers/models/xlm_roberta/modeling_xlm_roberta.py index 7de91d6ce1ff98..055857294417bb 100644 --- a/src/transformers/models/xlm_roberta/modeling_xlm_roberta.py +++ b/src/transformers/models/xlm_roberta/modeling_xlm_roberta.py @@ -1696,3 +1696,15 @@ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_l mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx + + +__all__ = [ + "XLMRobertaForCausalLM", + "XLMRobertaForMaskedLM", + "XLMRobertaForMultipleChoice", + "XLMRobertaForQuestionAnswering", + "XLMRobertaForSequenceClassification", + "XLMRobertaForTokenClassification", + "XLMRobertaModel", + "XLMRobertaPreTrainedModel", +] diff --git a/src/transformers/models/xlm_roberta/tokenization_xlm_roberta.py b/src/transformers/models/xlm_roberta/tokenization_xlm_roberta.py index 35a06aeb91be51..226f1a89572416 100644 --- a/src/transformers/models/xlm_roberta/tokenization_xlm_roberta.py +++ b/src/transformers/models/xlm_roberta/tokenization_xlm_roberta.py @@ -294,3 +294,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = fi.write(content_spiece_model) return (out_vocab_file,) + + +__all__ = ["XLMRobertaTokenizer"] diff --git a/src/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py b/src/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py index 4ad2596a6fbf98..b51a9340dbfea0 100644 --- a/src/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py +++ b/src/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py @@ -193,3 +193,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["XLMRobertaTokenizerFast"] diff --git a/src/transformers/models/xlm_roberta_xl/__init__.py b/src/transformers/models/xlm_roberta_xl/__init__.py index 68ae26b06d6ca9..57aaceadacea83 100644 --- a/src/transformers/models/xlm_roberta_xl/__init__.py +++ b/src/transformers/models/xlm_roberta_xl/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,60 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_xlm_roberta_xl": [ - "XLMRobertaXLConfig", - "XLMRobertaXLOnnxConfig", - ], -} +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_xlm_roberta_xl"] = [ - "XLMRobertaXLForCausalLM", - "XLMRobertaXLForMaskedLM", - "XLMRobertaXLForMultipleChoice", - "XLMRobertaXLForQuestionAnswering", - "XLMRobertaXLForSequenceClassification", - "XLMRobertaXLForTokenClassification", - "XLMRobertaXLModel", - "XLMRobertaXLPreTrainedModel", - ] if TYPE_CHECKING: - from .configuration_xlm_roberta_xl import ( - XLMRobertaXLConfig, - XLMRobertaXLOnnxConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_xlm_roberta_xl import ( - XLMRobertaXLForCausalLM, - XLMRobertaXLForMaskedLM, - XLMRobertaXLForMultipleChoice, - XLMRobertaXLForQuestionAnswering, - XLMRobertaXLForSequenceClassification, - XLMRobertaXLForTokenClassification, - XLMRobertaXLModel, - XLMRobertaXLPreTrainedModel, - ) - + from .configuration_xlm_roberta_xl import * + from .modeling_xlm_roberta_xl import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py b/src/transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py index 6ee323ae76ca7b..dce18640a5b768 100644 --- a/src/transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py +++ b/src/transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py @@ -148,3 +148,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("attention_mask", dynamic_axis), ] ) + + +__all__ = ["XLMRobertaXLConfig", "XLMRobertaXLOnnxConfig"] diff --git a/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py b/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py index cb88cbeabde2b0..92cd9a218afb1e 100644 --- a/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py +++ b/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py @@ -1675,3 +1675,15 @@ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_l mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx + + +__all__ = [ + "XLMRobertaXLForCausalLM", + "XLMRobertaXLForMaskedLM", + "XLMRobertaXLForMultipleChoice", + "XLMRobertaXLForQuestionAnswering", + "XLMRobertaXLForSequenceClassification", + "XLMRobertaXLForTokenClassification", + "XLMRobertaXLModel", + "XLMRobertaXLPreTrainedModel", +] diff --git a/src/transformers/models/xlnet/__init__.py b/src/transformers/models/xlnet/__init__.py index f50d4cc178d3b9..3f453455925328 100644 --- a/src/transformers/models/xlnet/__init__.py +++ b/src/transformers/models/xlnet/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,128 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = {"configuration_xlnet": ["XLNetConfig"]} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_xlnet"] = ["XLNetTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_xlnet_fast"] = ["XLNetTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_xlnet"] = [ - "XLNetForMultipleChoice", - "XLNetForQuestionAnswering", - "XLNetForQuestionAnsweringSimple", - "XLNetForSequenceClassification", - "XLNetForTokenClassification", - "XLNetLMHeadModel", - "XLNetModel", - "XLNetPreTrainedModel", - "load_tf_weights_in_xlnet", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_xlnet"] = [ - "TFXLNetForMultipleChoice", - "TFXLNetForQuestionAnsweringSimple", - "TFXLNetForSequenceClassification", - "TFXLNetForTokenClassification", - "TFXLNetLMHeadModel", - "TFXLNetMainLayer", - "TFXLNetModel", - "TFXLNetPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_xlnet import XLNetConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_xlnet import XLNetTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_xlnet_fast import XLNetTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_xlnet import ( - XLNetForMultipleChoice, - XLNetForQuestionAnswering, - XLNetForQuestionAnsweringSimple, - XLNetForSequenceClassification, - XLNetForTokenClassification, - XLNetLMHeadModel, - XLNetModel, - XLNetPreTrainedModel, - load_tf_weights_in_xlnet, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_xlnet import ( - TFXLNetForMultipleChoice, - TFXLNetForQuestionAnsweringSimple, - TFXLNetForSequenceClassification, - TFXLNetForTokenClassification, - TFXLNetLMHeadModel, - TFXLNetMainLayer, - TFXLNetModel, - TFXLNetPreTrainedModel, - ) - + from .configuration_xlnet import * + from .modeling_tf_xlnet import * + from .modeling_xlnet import * + from .tokenization_xlnet import * + from .tokenization_xlnet_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/xlnet/configuration_xlnet.py b/src/transformers/models/xlnet/configuration_xlnet.py index 0a35b204f4b361..4a7238eb4c098b 100644 --- a/src/transformers/models/xlnet/configuration_xlnet.py +++ b/src/transformers/models/xlnet/configuration_xlnet.py @@ -235,3 +235,6 @@ def max_position_embeddings(self, value): raise NotImplementedError( f"The model {self.model_type} is one of the few models that has no sequence length limit." ) + + +__all__ = ["XLNetConfig"] diff --git a/src/transformers/models/xlnet/modeling_tf_xlnet.py b/src/transformers/models/xlnet/modeling_tf_xlnet.py index eeacebae044842..83e097ebc83543 100644 --- a/src/transformers/models/xlnet/modeling_tf_xlnet.py +++ b/src/transformers/models/xlnet/modeling_tf_xlnet.py @@ -1807,3 +1807,15 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFXLNetForMultipleChoice", + "TFXLNetForQuestionAnsweringSimple", + "TFXLNetForSequenceClassification", + "TFXLNetForTokenClassification", + "TFXLNetLMHeadModel", + "TFXLNetMainLayer", + "TFXLNetModel", + "TFXLNetPreTrainedModel", +] diff --git a/src/transformers/models/xlnet/modeling_xlnet.py b/src/transformers/models/xlnet/modeling_xlnet.py index 975f08c654bdc7..91f2d09f96f7d8 100755 --- a/src/transformers/models/xlnet/modeling_xlnet.py +++ b/src/transformers/models/xlnet/modeling_xlnet.py @@ -2082,3 +2082,16 @@ def forward( hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) + + +__all__ = [ + "XLNetForMultipleChoice", + "XLNetForQuestionAnswering", + "XLNetForQuestionAnsweringSimple", + "XLNetForSequenceClassification", + "XLNetForTokenClassification", + "XLNetLMHeadModel", + "XLNetModel", + "XLNetPreTrainedModel", + "load_tf_weights_in_xlnet", +] diff --git a/src/transformers/models/xlnet/tokenization_xlnet.py b/src/transformers/models/xlnet/tokenization_xlnet.py index 9d4b35775efb18..ab40980211b4ab 100644 --- a/src/transformers/models/xlnet/tokenization_xlnet.py +++ b/src/transformers/models/xlnet/tokenization_xlnet.py @@ -380,3 +380,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = fi.write(content_spiece_model) return (out_vocab_file,) + + +__all__ = ["XLNetTokenizer"] diff --git a/src/transformers/models/xlnet/tokenization_xlnet_fast.py b/src/transformers/models/xlnet/tokenization_xlnet_fast.py index a506e8c45a3c1d..0a6ae907ef573f 100644 --- a/src/transformers/models/xlnet/tokenization_xlnet_fast.py +++ b/src/transformers/models/xlnet/tokenization_xlnet_fast.py @@ -229,3 +229,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["XLNetTokenizerFast"] diff --git a/src/transformers/models/xmod/__init__.py b/src/transformers/models/xmod/__init__.py index 9b9cb36e3b93e5..13cf20cbe49cba 100644 --- a/src/transformers/models/xmod/__init__.py +++ b/src/transformers/models/xmod/__init__.py @@ -1,8 +1,4 @@ -# flake8: noqa -# There's no way to ignore "F401 '...' imported but unused" warnings in this -# module, but to preserve other warnings. So, don't check this module at all. - -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,57 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_xmod": [ - "XmodConfig", - "XmodOnnxConfig", - ], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_xmod"] = [ - "XmodForCausalLM", - "XmodForMaskedLM", - "XmodForMultipleChoice", - "XmodForQuestionAnswering", - "XmodForSequenceClassification", - "XmodForTokenClassification", - "XmodModel", - "XmodPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_xmod import XmodConfig, XmodOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_xmod import ( - XmodForCausalLM, - XmodForMaskedLM, - XmodForMultipleChoice, - XmodForQuestionAnswering, - XmodForSequenceClassification, - XmodForTokenClassification, - XmodModel, - XmodPreTrainedModel, - ) - + from .configuration_xmod import * + from .modeling_xmod import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/xmod/configuration_xmod.py b/src/transformers/models/xmod/configuration_xmod.py index 34261a0d7cdf69..68ad14307f053d 100644 --- a/src/transformers/models/xmod/configuration_xmod.py +++ b/src/transformers/models/xmod/configuration_xmod.py @@ -180,3 +180,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("attention_mask", dynamic_axis), ] ) + + +__all__ = ["XmodConfig", "XmodOnnxConfig"] diff --git a/src/transformers/models/xmod/modeling_xmod.py b/src/transformers/models/xmod/modeling_xmod.py index 7208f80d26ca22..9b70e8b2992ed9 100644 --- a/src/transformers/models/xmod/modeling_xmod.py +++ b/src/transformers/models/xmod/modeling_xmod.py @@ -1619,3 +1619,15 @@ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_l mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx + + +__all__ = [ + "XmodForCausalLM", + "XmodForMaskedLM", + "XmodForMultipleChoice", + "XmodForQuestionAnswering", + "XmodForSequenceClassification", + "XmodForTokenClassification", + "XmodModel", + "XmodPreTrainedModel", +] diff --git a/src/transformers/models/yolos/__init__.py b/src/transformers/models/yolos/__init__.py index fdf7c5db1cb220..5739fc56fd77b8 100644 --- a/src/transformers/models/yolos/__init__.py +++ b/src/transformers/models/yolos/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,59 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = {"configuration_yolos": ["YolosConfig", "YolosOnnxConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_yolos"] = ["YolosFeatureExtractor"] - _import_structure["image_processing_yolos"] = ["YolosImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_yolos"] = [ - "YolosForObjectDetection", - "YolosModel", - "YolosPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_yolos import YolosConfig, YolosOnnxConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_yolos import YolosFeatureExtractor - from .image_processing_yolos import YolosImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_yolos import ( - YolosForObjectDetection, - YolosModel, - YolosPreTrainedModel, - ) - - + from .configuration_yolos import * + from .feature_extraction_yolos import * + from .image_processing_yolos import * + from .modeling_yolos import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/yolos/configuration_yolos.py b/src/transformers/models/yolos/configuration_yolos.py index d6fe04f4266004..a488216cf8da55 100644 --- a/src/transformers/models/yolos/configuration_yolos.py +++ b/src/transformers/models/yolos/configuration_yolos.py @@ -173,3 +173,6 @@ def atol_for_validation(self) -> float: @property def default_onnx_opset(self) -> int: return 12 + + +__all__ = ["YolosConfig", "YolosOnnxConfig"] diff --git a/src/transformers/models/yolos/feature_extraction_yolos.py b/src/transformers/models/yolos/feature_extraction_yolos.py index 5696ee65bbdfa2..4c9bfdde800f05 100644 --- a/src/transformers/models/yolos/feature_extraction_yolos.py +++ b/src/transformers/models/yolos/feature_extraction_yolos.py @@ -41,3 +41,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["YolosFeatureExtractor"] diff --git a/src/transformers/models/yolos/image_processing_yolos.py b/src/transformers/models/yolos/image_processing_yolos.py index 19b21333f60912..38a99cbea52ae0 100644 --- a/src/transformers/models/yolos/image_processing_yolos.py +++ b/src/transformers/models/yolos/image_processing_yolos.py @@ -1530,3 +1530,6 @@ def post_process_object_detection( results.append({"scores": score, "labels": label, "boxes": box}) return results + + +__all__ = ["YolosImageProcessor"] diff --git a/src/transformers/models/yolos/modeling_yolos.py b/src/transformers/models/yolos/modeling_yolos.py index f7ef3e55f5f799..729fd1b354b9b6 100755 --- a/src/transformers/models/yolos/modeling_yolos.py +++ b/src/transformers/models/yolos/modeling_yolos.py @@ -887,3 +887,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["YolosForObjectDetection", "YolosModel", "YolosPreTrainedModel"] diff --git a/src/transformers/models/yoso/__init__.py b/src/transformers/models/yoso/__init__.py index c4c73385017eb7..6b865cb93ce113 100644 --- a/src/transformers/models/yoso/__init__.py +++ b/src/transformers/models/yoso/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,51 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available - - -_import_structure = {"configuration_yoso": ["YosoConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_yoso"] = [ - "YosoForMaskedLM", - "YosoForMultipleChoice", - "YosoForQuestionAnswering", - "YosoForSequenceClassification", - "YosoForTokenClassification", - "YosoLayer", - "YosoModel", - "YosoPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_yoso import YosoConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_yoso import ( - YosoForMaskedLM, - YosoForMultipleChoice, - YosoForQuestionAnswering, - YosoForSequenceClassification, - YosoForTokenClassification, - YosoLayer, - YosoModel, - YosoPreTrainedModel, - ) - - + from .configuration_yoso import * + from .modeling_yoso import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/yoso/configuration_yoso.py b/src/transformers/models/yoso/configuration_yoso.py index e353744cc7a661..9a7fb1218e402e 100644 --- a/src/transformers/models/yoso/configuration_yoso.py +++ b/src/transformers/models/yoso/configuration_yoso.py @@ -139,3 +139,6 @@ def __init__( self.conv_window = conv_window self.use_fast_hash = use_fast_hash self.lsh_backward = lsh_backward + + +__all__ = ["YosoConfig"] diff --git a/src/transformers/models/yoso/modeling_yoso.py b/src/transformers/models/yoso/modeling_yoso.py index a94c6de542ee73..edccabee2ea42d 100644 --- a/src/transformers/models/yoso/modeling_yoso.py +++ b/src/transformers/models/yoso/modeling_yoso.py @@ -1306,3 +1306,15 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "YosoForMaskedLM", + "YosoForMultipleChoice", + "YosoForQuestionAnswering", + "YosoForSequenceClassification", + "YosoForTokenClassification", + "YosoLayer", + "YosoModel", + "YosoPreTrainedModel", +] diff --git a/src/transformers/models/zamba/__init__.py b/src/transformers/models/zamba/__init__.py index e92890d1a71363..48a233755de268 100644 --- a/src/transformers/models/zamba/__init__.py +++ b/src/transformers/models/zamba/__init__.py @@ -13,45 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_zamba": ["ZambaConfig"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_zamba"] = [ - "ZambaForCausalLM", - "ZambaForSequenceClassification", - "ZambaModel", - "ZambaPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_zamba import ZambaConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_zamba import ( - ZambaForCausalLM, - ZambaForSequenceClassification, - ZambaModel, - ZambaPreTrainedModel, - ) - - + from .configuration_zamba import * + from .modeling_zamba import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/zamba/configuration_zamba.py b/src/transformers/models/zamba/configuration_zamba.py index 77aa940141f295..df165154a00b7a 100644 --- a/src/transformers/models/zamba/configuration_zamba.py +++ b/src/transformers/models/zamba/configuration_zamba.py @@ -222,3 +222,6 @@ def _layers_block_type(self, num_hidden_layers, attn_layer_period, attn_layer_of "hybrid", ] + ["hybrid" if i % attn_layer_period == attn_layer_offset else "mamba" for i in range(num_hidden_layers - 3)] return layers + + +__all__ = ["ZambaConfig"] diff --git a/src/transformers/models/zamba/modeling_zamba.py b/src/transformers/models/zamba/modeling_zamba.py index dee7f898fcf93a..0194dfc0885250 100644 --- a/src/transformers/models/zamba/modeling_zamba.py +++ b/src/transformers/models/zamba/modeling_zamba.py @@ -1669,3 +1669,6 @@ def forward( hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) + + +__all__ = ["ZambaForCausalLM", "ZambaForSequenceClassification", "ZambaModel", "ZambaPreTrainedModel"] diff --git a/src/transformers/models/zoedepth/__init__.py b/src/transformers/models/zoedepth/__init__.py index 15ba0883d83241..99879e0f85c2e2 100644 --- a/src/transformers/models/zoedepth/__init__.py +++ b/src/transformers/models/zoedepth/__init__.py @@ -13,55 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...file_utils import _LazyModule, is_torch_available, is_vision_available -from ...utils import OptionalDependencyNotAvailable - - -_import_structure = {"configuration_zoedepth": ["ZOEDEPTH_PRETRAINED_CONFIG_ARCHIVE_MAP", "ZoeDepthConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_zoedepth"] = [ - "ZoeDepthForDepthEstimation", - "ZoeDepthPreTrainedModel", - ] - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_zoedepth"] = ["ZoeDepthImageProcessor"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_zoedepth import ZOEDEPTH_PRETRAINED_CONFIG_ARCHIVE_MAP, ZoeDepthConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_zoedepth import ( - ZoeDepthForDepthEstimation, - ZoeDepthPreTrainedModel, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_zoedepth import ZoeDepthImageProcessor - + from .configuration_zoedepth import * + from .image_processing_zoedepth import * + from .modeling_zoedepth import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/zoedepth/configuration_zoedepth.py b/src/transformers/models/zoedepth/configuration_zoedepth.py index 1b7e2695eb98c9..bffedf321234d8 100644 --- a/src/transformers/models/zoedepth/configuration_zoedepth.py +++ b/src/transformers/models/zoedepth/configuration_zoedepth.py @@ -232,3 +232,6 @@ def __init__( self.patch_transformer_hidden_size = patch_transformer_hidden_size self.patch_transformer_intermediate_size = patch_transformer_intermediate_size self.patch_transformer_num_attention_heads = patch_transformer_num_attention_heads + + +__all__ = ["ZOEDEPTH_PRETRAINED_CONFIG_ARCHIVE_MAP", "ZoeDepthConfig"] diff --git a/src/transformers/models/zoedepth/image_processing_zoedepth.py b/src/transformers/models/zoedepth/image_processing_zoedepth.py index 2211ab07c09d4c..de5f6795f0c8b6 100644 --- a/src/transformers/models/zoedepth/image_processing_zoedepth.py +++ b/src/transformers/models/zoedepth/image_processing_zoedepth.py @@ -556,3 +556,6 @@ def post_process_depth_estimation( results.append({"predicted_depth": depth}) return results + + +__all__ = ["ZoeDepthImageProcessor"] diff --git a/src/transformers/models/zoedepth/modeling_zoedepth.py b/src/transformers/models/zoedepth/modeling_zoedepth.py index 5cbbdcdc04b756..d4ff15fa1f41d3 100644 --- a/src/transformers/models/zoedepth/modeling_zoedepth.py +++ b/src/transformers/models/zoedepth/modeling_zoedepth.py @@ -1400,3 +1400,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["ZoeDepthForDepthEstimation", "ZoeDepthPreTrainedModel"] diff --git a/utils/check_repo.py b/utils/check_repo.py index 3dbe59f192293a..4b6c961d356fe7 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -1106,7 +1106,7 @@ def check_public_method_exists(documented_methods_map): try: obj_class = getattr(submodule, class_name) except AttributeError: - failures.append(f"Could not parse {submodule_name}. Are the required dependencies installed?") + failures.append(f"Could not parse {class_name}. Are the required dependencies installed?") continue # Checks that all explicitly documented methods are defined in the class