Skip to content

Commit

Permalink
use a single for loop (huggingface#33148)
Browse files Browse the repository at this point in the history
* use a single for loop

* oups

* fixup

* fix typo
  • Loading branch information
ArthurZucker authored Aug 29, 2024
1 parent 5129671 commit c409cd8
Show file tree
Hide file tree
Showing 10 changed files with 168 additions and 218 deletions.
42 changes: 19 additions & 23 deletions src/transformers/models/bit/image_processing_bit.py
Original file line number Diff line number Diff line change
Expand Up @@ -294,31 +294,27 @@ def preprocess(
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])

if do_resize:
images = [
self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
for image in images
]

if do_center_crop:
images = [
self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
]

if do_rescale:
images = [
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
for image in images
]

if do_normalize:
images = [
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
for image in images
]
all_images = []
for image in images:
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)

if do_center_crop:
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)

if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)

if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)

all_images.append(image)

images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
for image in all_images
]

data = {"pixel_values": images}
Expand Down
40 changes: 17 additions & 23 deletions src/transformers/models/chameleon/image_processing_chameleon.py
Original file line number Diff line number Diff line change
Expand Up @@ -311,32 +311,26 @@ def preprocess(
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
all_images = []
for image in images:
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)

if do_resize:
images = [
self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
for image in images
]

if do_center_crop:
images = [
self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
]

if do_rescale:
images = [
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
for image in images
]

if do_normalize:
images = [
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
for image in images
]
if do_center_crop:
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)

if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)

if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)

all_images.append(image)
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
for image in all_images
]

data = {"pixel_values": images}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -280,31 +280,26 @@ def preprocess(
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])

if do_resize:
images = [
self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
for image in images
]
all_images = []
for image in images:
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)

if do_center_crop:
images = [
self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
]
if do_center_crop:
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)

if do_rescale:
images = [
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
for image in images
]
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)

if do_normalize:
images = [
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
for image in images
]
if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)

all_images.append(image)
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
for image in all_images
]

data = {"pixel_values": images}
Expand Down
41 changes: 18 additions & 23 deletions src/transformers/models/clip/image_processing_clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -319,31 +319,26 @@ def preprocess(
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])

if do_resize:
images = [
self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
for image in images
]

if do_center_crop:
images = [
self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
]

if do_rescale:
images = [
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
for image in images
]

if do_normalize:
images = [
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
for image in images
]
all_images = []
for image in images:
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)

if do_center_crop:
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)

if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)

if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)

all_images.append(image)
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
for image in all_images
]

data = {"pixel_values": images}
Expand Down
35 changes: 15 additions & 20 deletions src/transformers/models/deit/image_processing_deit.py
Original file line number Diff line number Diff line change
Expand Up @@ -270,31 +270,26 @@ def preprocess(
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])

if do_resize:
images = [
self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
for image in images
]
all_images = []
for image in images:
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)

if do_center_crop:
images = [
self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
]
if do_center_crop:
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)

if do_rescale:
images = [
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
for image in images
]
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)

if do_normalize:
images = [
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
for image in images
]
if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)

all_images.append(image)
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
for image in all_images
]

data = {"pixel_values": images}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -312,31 +312,26 @@ def preprocess(
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])

if do_resize:
images = [
self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
for image in images
]
all_images = []
for image in images:
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)

if do_center_crop:
images = [
self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
]
if do_center_crop:
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)

if do_rescale:
images = [
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
for image in images
]
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)

if do_normalize:
images = [
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
for image in images
]
if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)

all_images.append(image)
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
for image in all_images
]

data = {"pixel_values": images}
Expand Down
41 changes: 18 additions & 23 deletions src/transformers/models/llava_next/image_processing_llava_next.py
Original file line number Diff line number Diff line change
Expand Up @@ -409,31 +409,26 @@ def _preprocess(
"""
images = make_list_of_images(images)

if do_resize:
images = [
self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
for image in images
]

if do_center_crop:
images = [
self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
]

if do_rescale:
images = [
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
for image in images
]

if do_normalize:
images = [
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
for image in images
]
all_images = []
for image in images:
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)

if do_center_crop:
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)

if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)

if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)

all_images.append(image)
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
for image in all_images
]

return images
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -272,31 +272,26 @@ def _preprocess(
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])

if do_resize:
images = [
self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
for image in images
]

if do_center_crop:
images = [
self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
]

if do_rescale:
images = [
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
for image in images
]

if do_normalize:
images = [
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
for image in images
]
all_images = []
for image in images:
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)

if do_center_crop:
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)

if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)

if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)

all_images.append(image)
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
for image in all_images
]

return images
Expand Down
Loading

0 comments on commit c409cd8

Please sign in to comment.