Skip to content

Commit

Permalink
[test_all] Applies the rest of the init refactor except to modular files
Browse files Browse the repository at this point in the history
  • Loading branch information
LysandreJik committed Dec 19, 2024
1 parent 69e31eb commit 9f13265
Show file tree
Hide file tree
Showing 1,020 changed files with 5,353 additions and 13,575 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import *
from .convert_audio_spectrogram_transformer_original_to_pytorch import *
from .feature_extraction_audio_spectrogram_transformer import *
from .modeling_audio_spectrogram_transformer import *
else:
Expand Down
2 changes: 0 additions & 2 deletions src/transformers/models/bark/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,6 @@

if TYPE_CHECKING:
from .configuration_bark import *
from .convert_suno_to_hf import *
from .generation_configuration_bark import *
from .modeling_bark import *
from .processing_bark import *
else:
Expand Down
1 change: 0 additions & 1 deletion src/transformers/models/bart/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

if TYPE_CHECKING:
from .configuration_bart import *
from .convert_bart_original_pytorch_checkpoint_to_pytorch import *
from .modeling_bart import *
from .modeling_flax_bart import *
from .modeling_tf_bart import *
Expand Down
1 change: 0 additions & 1 deletion src/transformers/models/beit/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

if TYPE_CHECKING:
from .configuration_beit import *
from .convert_beit_unilm_to_pytorch import *
from .feature_extraction_beit import *
from .image_processing_beit import *
from .modeling_beit import *
Expand Down
4 changes: 0 additions & 4 deletions src/transformers/models/bert/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,6 @@

if TYPE_CHECKING:
from .configuration_bert import *
from .convert_bert_original_tf2_checkpoint_to_pytorch import *
from .convert_bert_original_tf_checkpoint_to_pytorch import *
from .convert_bert_pytorch_checkpoint_to_original_tf import *
from .convert_bert_token_dropping_original_tf2_checkpoint_to_pytorch import *
from .modeling_bert import *
from .modeling_flax_bert import *
from .modeling_tf_bert import *
Expand Down
1 change: 0 additions & 1 deletion src/transformers/models/big_bird/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

if TYPE_CHECKING:
from .configuration_big_bird import *
from .convert_bigbird_original_tf_checkpoint_to_pytorch import *
from .modeling_big_bird import *
from .modeling_flax_big_bird import *
from .tokenization_big_bird import *
Expand Down
1 change: 0 additions & 1 deletion src/transformers/models/bigbird_pegasus/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

if TYPE_CHECKING:
from .configuration_bigbird_pegasus import *
from .convert_bigbird_pegasus_tf_to_pytorch import *
from .modeling_bigbird_pegasus import *
else:
import sys
Expand Down
1 change: 0 additions & 1 deletion src/transformers/models/biogpt/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

if TYPE_CHECKING:
from .configuration_biogpt import *
from .convert_biogpt_original_pytorch_checkpoint_to_pytorch import *
from .modeling_biogpt import *
from .tokenization_biogpt import *
else:
Expand Down
1 change: 0 additions & 1 deletion src/transformers/models/bit/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

if TYPE_CHECKING:
from .configuration_bit import *
from .convert_bit_to_pytorch import *
from .image_processing_bit import *
from .modeling_bit import *
else:
Expand Down
1 change: 0 additions & 1 deletion src/transformers/models/blenderbot/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

if TYPE_CHECKING:
from .configuration_blenderbot import *
from .convert_blenderbot_original_pytorch_checkpoint_to_pytorch import *
from .modeling_blenderbot import *
from .modeling_flax_blenderbot import *
from .modeling_tf_blenderbot import *
Expand Down
3 changes: 0 additions & 3 deletions src/transformers/models/blip/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,9 @@

if TYPE_CHECKING:
from .configuration_blip import *
from .convert_blip_original_pytorch_to_hf import *
from .image_processing_blip import *
from .modeling_blip import *
from .modeling_blip_text import *
from .modeling_tf_blip import *
from .modeling_tf_blip_text import *
from .processing_blip import *
else:
import sys
Expand Down
1 change: 0 additions & 1 deletion src/transformers/models/blip_2/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

if TYPE_CHECKING:
from .configuration_blip_2 import *
from .convert_blip_2_original_to_pytorch import *
from .modeling_blip_2 import *
from .processing_blip_2 import *
else:
Expand Down
1 change: 0 additions & 1 deletion src/transformers/models/bloom/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

if TYPE_CHECKING:
from .configuration_bloom import *
from .convert_bloom_original_checkpoint_to_pytorch import *
from .modeling_bloom import *
from .modeling_flax_bloom import *
from .tokenization_bloom_fast import *
Expand Down
1 change: 0 additions & 1 deletion src/transformers/models/bros/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

if TYPE_CHECKING:
from .configuration_bros import *
from .convert_bros_to_pytorch import *
from .modeling_bros import *
from .processing_bros import *
else:
Expand Down
1 change: 0 additions & 1 deletion src/transformers/models/byt5/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@


if TYPE_CHECKING:
from .convert_byt5_original_tf_checkpoint_to_pytorch import *
from .tokenization_byt5 import *
else:
import sys
Expand Down
1 change: 0 additions & 1 deletion src/transformers/models/canine/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

if TYPE_CHECKING:
from .configuration_canine import *
from .convert_canine_original_tf_checkpoint_to_pytorch import *
from .modeling_canine import *
from .tokenization_canine import *
else:
Expand Down
1 change: 0 additions & 1 deletion src/transformers/models/chameleon/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

if TYPE_CHECKING:
from .configuration_chameleon import *
from .convert_chameleon_weights_to_hf import *
from .image_processing_chameleon import *
from .modeling_chameleon import *
from .processing_chameleon import *
Expand Down
1 change: 0 additions & 1 deletion src/transformers/models/chinese_clip/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

if TYPE_CHECKING:
from .configuration_chinese_clip import *
from .convert_chinese_clip_original_pytorch_to_hf import *
from .feature_extraction_chinese_clip import *
from .image_processing_chinese_clip import *
from .modeling_chinese_clip import *
Expand Down
1 change: 0 additions & 1 deletion src/transformers/models/clap/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

if TYPE_CHECKING:
from .configuration_clap import *
from .convert_clap_original_pytorch_to_hf import *
from .feature_extraction_clap import *
from .modeling_clap import *
from .processing_clap import *
Expand Down
1 change: 0 additions & 1 deletion src/transformers/models/clip/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

if TYPE_CHECKING:
from .configuration_clip import *
from .convert_clip_original_pytorch_to_hf import *
from .feature_extraction_clip import *
from .image_processing_clip import *
from .modeling_clip import *
Expand Down
1 change: 0 additions & 1 deletion src/transformers/models/clipseg/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

if TYPE_CHECKING:
from .configuration_clipseg import *
from .convert_clipseg_original_pytorch_to_hf import *
from .modeling_clipseg import *
from .processing_clipseg import *
else:
Expand Down
69 changes: 10 additions & 59 deletions src/transformers/models/clvp/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright 2023 The HuggingFace Team. All rights reserved.
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -13,67 +13,18 @@
# limitations under the License.
from typing import TYPE_CHECKING

from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)


_import_structure = {
"configuration_clvp": [
"ClvpConfig",
"ClvpDecoderConfig",
"ClvpEncoderConfig",
],
"feature_extraction_clvp": ["ClvpFeatureExtractor"],
"processing_clvp": ["ClvpProcessor"],
"tokenization_clvp": ["ClvpTokenizer"],
}


try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_clvp"] = [
"ClvpModelForConditionalGeneration",
"ClvpForCausalLM",
"ClvpModel",
"ClvpPreTrainedModel",
"ClvpEncoder",
"ClvpDecoder",
]
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure


if TYPE_CHECKING:
from .configuration_clvp import (
ClvpConfig,
ClvpDecoderConfig,
ClvpEncoderConfig,
)
from .feature_extraction_clvp import ClvpFeatureExtractor
from .processing_clvp import ClvpProcessor
from .tokenization_clvp import ClvpTokenizer

try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clvp import (
ClvpDecoder,
ClvpEncoder,
ClvpForCausalLM,
ClvpModel,
ClvpModelForConditionalGeneration,
ClvpPreTrainedModel,
)

from .configuration_clvp import *
from .feature_extraction_clvp import *
from .modeling_clvp import *
from .processing_clvp import *
from .tokenization_clvp import *
else:
import sys

sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
3 changes: 3 additions & 0 deletions src/transformers/models/clvp/configuration_clvp.py
Original file line number Diff line number Diff line change
Expand Up @@ -438,3 +438,6 @@ def from_sub_model_configs(
decoder_config=decoder_config.to_dict(),
**kwargs,
)


__all__ = ["ClvpConfig", "ClvpDecoderConfig", "ClvpEncoderConfig"]
3 changes: 3 additions & 0 deletions src/transformers/models/clvp/feature_extraction_clvp.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,3 +236,6 @@ def __call__(
padded_inputs["input_features"] = input_features

return padded_inputs.convert_to_tensors(return_tensors)


__all__ = ["ClvpFeatureExtractor"]
10 changes: 10 additions & 0 deletions src/transformers/models/clvp/modeling_clvp.py
Original file line number Diff line number Diff line change
Expand Up @@ -2021,3 +2021,13 @@ def generate(
text_encoder_hidden_states=text_outputs.hidden_states,
speech_encoder_hidden_states=speech_outputs.hidden_states,
)


__all__ = [
"ClvpModelForConditionalGeneration",
"ClvpForCausalLM",
"ClvpModel",
"ClvpPreTrainedModel",
"ClvpEncoder",
"ClvpDecoder",
]
3 changes: 3 additions & 0 deletions src/transformers/models/clvp/processing_clvp.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,3 +88,6 @@ def decode(self, *args, **kwargs):
the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)


__all__ = ["ClvpProcessor"]
3 changes: 3 additions & 0 deletions src/transformers/models/clvp/tokenization_clvp.py
Original file line number Diff line number Diff line change
Expand Up @@ -362,3 +362,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] =
index += 1

return vocab_file, merge_file


__all__ = ["ClvpTokenizer"]
44 changes: 7 additions & 37 deletions src/transformers/models/code_llama/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright 2023 MetaAI and The HuggingFace Inc. team. All rights reserved.
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -13,45 +13,15 @@
# limitations under the License.
from typing import TYPE_CHECKING

from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure


_import_structure = {}

try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["tokenization_code_llama"] = ["CodeLlamaTokenizer"]

try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["tokenization_code_llama_fast"] = ["CodeLlamaTokenizerFast"]

if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_code_llama import CodeLlamaTokenizer

try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_code_llama_fast import CodeLlamaTokenizerFast

from .tokenization_code_llama import *
from .tokenization_code_llama_fast import *
else:
import sys

sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
3 changes: 3 additions & 0 deletions src/transformers/models/code_llama/tokenization_code_llama.py
Original file line number Diff line number Diff line change
Expand Up @@ -447,3 +447,6 @@ def __setstate__(self, d):
self.__dict__ = d
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)


__all__ = ["CodeLlamaTokenizer"]
Original file line number Diff line number Diff line change
Expand Up @@ -376,3 +376,6 @@ def build_inputs_with_special_tokens(
if token_ids_1 is None:
return self.bos_token_id + token_ids_0 + self.eos_token_id
return self.bos_token_id + token_ids_0 + token_ids_1 + self.eos_token_id


__all__ = ["CodeLlamaTokenizerFast"]
Loading

0 comments on commit 9f13265

Please sign in to comment.