-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathimage_Utils.py
110 lines (87 loc) · 3.31 KB
/
image_Utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
from typing import *
from Utils import *
import accimage
from torchvision.transforms.functional import to_tensor
def run(tasks, config: Dict):
proc_num, tasks = tasks
func = eval(config['func_name'])
results = []
with Benchmark(config['func_name'], print=proc_num == 0) as t:
for i in range(len(tasks)):
if i % (len(tasks) // 10 + 1) == 0:
t.print_elapsed(f'{i}/{len(tasks)}, {i / len(tasks) * 100:.2f}%')
task = tasks[i]
results.append(func(task, config))
return results
def printable_dict(x):
result = {}
for k, v in x.items():
result[str(k)] = str(v)
return result
def run_mp(tasks: List, config: Dict):
n_proc = config['n_proc']
print(f"Run {config['func_name']}, config = \n{json.dumps(printable_dict(config), indent=4, ensure_ascii=False)}")
return run_multi_process(tasks, n_proc, partial(run, config=config), with_proc_num=True)
def image_resize(filepath: str, config: Dict):
input_root = config['input_root']
output_root = config['output_root']
output_size = config['output_size']
output_suffix = config['output_suffix']
input_path = str(input_root / filepath)
output_path = str((output_root / filepath).with_suffix(output_suffix))
img = Image.open(input_path)
img = img.resize(output_size, Image.ANTIALIAS)
ensure_file(output_path)
img.save(output_path)
def video_to_frames(filepath: str, config: Dict):
input_root = config['input_root']
output_root = config['output_root']
output_size = config['output_size']
output_suffix = config['output_suffix']
input_path = str(input_root / filepath)
output_path = (output_root / filepath).with_suffix('')
ensure_path(output_path)
vidcap = cv2.VideoCapture(input_path)
success,image = vidcap.read()
count = 0
while success:
image = cv2.resize(image, dsize=output_size, interpolation=cv2.INTER_LINEAR)
cv2.imwrite(str(output_path / f'frame_{count}{output_suffix}'), image)
success,image = vidcap.read()
count += 1
def get_mean_std(filepath: str, config: Dict):
input_root = config['input_root']
input_path = str(input_root / filepath)
img = to_tensor(accimage.Image(input_path))
img = img.view(3, -1)
means = img.mean(dim=-1)
stds = img.std(dim=-1)
result = torch.cat([means, stds], dim=0).numpy()
return result
def convert(config: Dict):
input_root = config['input_root']
input_suffix = config['input_suffix']
if not config['cached']:
tasks = []
for file in tqdm(input_root.rglob(f'*{input_suffix}')):
tasks.append(str(file.relative_to(input_root)))
pkl_dump(tasks, 'tmp/tasks.pkl')
else:
tasks = pkl_load('tmp/tasks.pkl')
print(f'Collected all files: {len(tasks)}, example: {tasks[0]}')
return run_mp(tasks, config)
if __name__ == '__main__':
img_root = Path('/home/octusr2/projects/data_fast/proceeded/cp_projection/')
img_root_600 = img_root / '600'
img_root_512 = img_root / '512'
config = {
'input_root': img_root_600,
'output_root': img_root_512,
'func_name': 'image_resize',
'output_size': (512, 512),
'input_suffix': '.jpg',
'output_suffix': '.jpg',
'n_proc': 80,
'cached': False
}
convert(config)