Skip to content

Commit

Permalink
Cleanup: continue the init refactor (#35167)
Browse files Browse the repository at this point in the history
Round 2
  • Loading branch information
LysandreJik authored Dec 9, 2024
1 parent 7238387 commit 8e806a3
Show file tree
Hide file tree
Showing 42 changed files with 263 additions and 611 deletions.
48 changes: 9 additions & 39 deletions src/transformers/models/audio_spectrogram_transformer/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright 2021 The HuggingFace Team. All rights reserved.
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -13,47 +13,17 @@
# limitations under the License.
from typing import TYPE_CHECKING

from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available


_import_structure = {
"configuration_audio_spectrogram_transformer": ["ASTConfig"],
"feature_extraction_audio_spectrogram_transformer": ["ASTFeatureExtractor"],
}

try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_audio_spectrogram_transformer"] = [
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure


if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
ASTConfig,
)
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor

try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)


from .configuration_audio_spectrogram_transformer import *
from .convert_audio_spectrogram_transformer_original_to_pytorch import *
from .feature_extraction_audio_spectrogram_transformer import *
from .modeling_audio_spectrogram_transformer import *
else:
import sys

sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
Original file line number Diff line number Diff line change
Expand Up @@ -126,3 +126,6 @@ def __init__(
# generative parameters deprecation cycle, overwriting this function prevents this from happening.
def _get_non_default_generation_parameters(self) -> Dict[str, Any]:
return {}


__all__ = ["ASTConfig"]
Original file line number Diff line number Diff line change
Expand Up @@ -234,3 +234,6 @@ def __call__(
padded_inputs = padded_inputs.convert_to_tensors(return_tensors)

return padded_inputs


__all__ = ["ASTFeatureExtractor"]
Original file line number Diff line number Diff line change
Expand Up @@ -670,3 +670,6 @@ def forward(
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)


__all__ = ["ASTForAudioClassification", "ASTModel", "ASTPreTrainedModel"]
65 changes: 10 additions & 55 deletions src/transformers/models/bark/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright 2023 The HuggingFace Team. All rights reserved.
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -13,63 +13,18 @@
# limitations under the License.
from typing import TYPE_CHECKING

from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure


_import_structure = {
"configuration_bark": [
"BarkCoarseConfig",
"BarkConfig",
"BarkFineConfig",
"BarkSemanticConfig",
],
"processing_bark": ["BarkProcessor"],
}

try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_bark"] = [
"BarkFineModel",
"BarkSemanticModel",
"BarkCoarseModel",
"BarkModel",
"BarkPreTrainedModel",
"BarkCausalModel",
]

if TYPE_CHECKING:
from .configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from .processing_bark import BarkProcessor

try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bark import (
BarkCausalModel,
BarkCoarseModel,
BarkFineModel,
BarkModel,
BarkPreTrainedModel,
BarkSemanticModel,
)

from .configuration_bark import *
from .convert_suno_to_hf import *
from .generation_configuration_bark import *
from .modeling_bark import *
from .processing_bark import *
else:
import sys

sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
3 changes: 3 additions & 0 deletions src/transformers/models/bark/configuration_bark.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,3 +298,6 @@ def from_sub_model_configs(
codec_config=codec_config.to_dict(),
**kwargs,
)


__all__ = ["BarkCoarseConfig", "BarkConfig", "BarkFineConfig", "BarkSemanticConfig"]
10 changes: 10 additions & 0 deletions src/transformers/models/bark/modeling_bark.py
Original file line number Diff line number Diff line change
Expand Up @@ -1819,3 +1819,13 @@ def _check_and_enable_flash_attn_2(
config.coarse_acoustics_config._attn_implementation = config._attn_implementation
config.fine_acoustics_config._attn_implementation = config._attn_implementation
return config


__all__ = [
"BarkFineModel",
"BarkSemanticModel",
"BarkCoarseModel",
"BarkModel",
"BarkPreTrainedModel",
"BarkCausalModel",
]
3 changes: 3 additions & 0 deletions src/transformers/models/bark/processing_bark.py
Original file line number Diff line number Diff line change
Expand Up @@ -285,3 +285,6 @@ def __call__(
encoded_text["history_prompt"] = voice_preset

return encoded_text


__all__ = ["BarkProcessor"]
138 changes: 12 additions & 126 deletions src/transformers/models/bart/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright 2020 The HuggingFace Team. All rights reserved.
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -13,134 +13,20 @@
# limitations under the License.
from typing import TYPE_CHECKING

from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure


_import_structure = {
"configuration_bart": ["BartConfig", "BartOnnxConfig"],
"tokenization_bart": ["BartTokenizer"],
}

try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["tokenization_bart_fast"] = ["BartTokenizerFast"]

try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_bart"] = [
"BartForCausalLM",
"BartForConditionalGeneration",
"BartForQuestionAnswering",
"BartForSequenceClassification",
"BartModel",
"BartPreTrainedModel",
"BartPretrainedModel",
"PretrainedBartModel",
]

try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_tf_bart"] = [
"TFBartForConditionalGeneration",
"TFBartForSequenceClassification",
"TFBartModel",
"TFBartPretrainedModel",
]

try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_flax_bart"] = [
"FlaxBartDecoderPreTrainedModel",
"FlaxBartForCausalLM",
"FlaxBartForConditionalGeneration",
"FlaxBartForQuestionAnswering",
"FlaxBartForSequenceClassification",
"FlaxBartModel",
"FlaxBartPreTrainedModel",
]

if TYPE_CHECKING:
from .configuration_bart import BartConfig, BartOnnxConfig
from .tokenization_bart import BartTokenizer

try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bart_fast import BartTokenizerFast

try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bart import (
BartForCausalLM,
BartForConditionalGeneration,
BartForQuestionAnswering,
BartForSequenceClassification,
BartModel,
BartPreTrainedModel,
BartPretrainedModel,
PretrainedBartModel,
)

try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bart import (
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBartModel,
TFBartPretrainedModel,
)

try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bart import (
FlaxBartDecoderPreTrainedModel,
FlaxBartForCausalLM,
FlaxBartForConditionalGeneration,
FlaxBartForQuestionAnswering,
FlaxBartForSequenceClassification,
FlaxBartModel,
FlaxBartPreTrainedModel,
)

from .configuration_bart import *
from .convert_bart_original_pytorch_checkpoint_to_pytorch import *
from .modeling_bart import *
from .modeling_flax_bart import *
from .modeling_tf_bart import *
from .tokenization_bart import *
from .tokenization_bart_fast import *
else:
import sys

sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
3 changes: 3 additions & 0 deletions src/transformers/models/bart/configuration_bart.py
Original file line number Diff line number Diff line change
Expand Up @@ -400,3 +400,6 @@ def _flatten_past_key_values_(self, flattened_output, name, idx, t):
flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_(
flattened_output, name, idx, t
)


__all__ = ["BartConfig", "BartOnnxConfig"]
12 changes: 12 additions & 0 deletions src/transformers/models/bart/modeling_bart.py
Original file line number Diff line number Diff line change
Expand Up @@ -2158,3 +2158,15 @@ def _reorder_cache(past_key_values, beam_idx):
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
)
return reordered_past


__all__ = [
"BartForCausalLM",
"BartForConditionalGeneration",
"BartForQuestionAnswering",
"BartForSequenceClassification",
"BartModel",
"BartPreTrainedModel",
"BartPretrainedModel",
"PretrainedBartModel",
]
11 changes: 11 additions & 0 deletions src/transformers/models/bart/modeling_flax_bart.py
Original file line number Diff line number Diff line change
Expand Up @@ -1993,3 +1993,14 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs):
FlaxCausalLMOutputWithCrossAttentions,
_CONFIG_FOR_DOC,
)


__all__ = [
"FlaxBartDecoderPreTrainedModel",
"FlaxBartForCausalLM",
"FlaxBartForConditionalGeneration",
"FlaxBartForQuestionAnswering",
"FlaxBartForSequenceClassification",
"FlaxBartModel",
"FlaxBartPreTrainedModel",
]
3 changes: 3 additions & 0 deletions src/transformers/models/bart/modeling_tf_bart.py
Original file line number Diff line number Diff line change
Expand Up @@ -1709,3 +1709,6 @@ def build(self, input_shape=None):
if getattr(self, "classification_head", None) is not None:
with tf.name_scope(self.classification_head.name):
self.classification_head.build(None)


__all__ = ["TFBartForConditionalGeneration", "TFBartForSequenceClassification", "TFBartModel", "TFBartPretrainedModel"]
3 changes: 3 additions & 0 deletions src/transformers/models/bart/tokenization_bart.py
Original file line number Diff line number Diff line change
Expand Up @@ -388,3 +388,6 @@ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
text = " " + text
return (text, kwargs)


__all__ = ["BartTokenizer"]
Loading

0 comments on commit 8e806a3

Please sign in to comment.