From ed9dffccf2dc265777b8551034ae1e8901924c31 Mon Sep 17 00:00:00 2001 From: "Anwar, Malik Aqeel" Date: Wed, 5 Aug 2020 02:03:43 -0400 Subject: [PATCH] Restructured --- mask_the_face.py | 32 +++++++++++++-------------- utils/aux_functions.py | 36 +++++++++++++++++++----------- utils/download_links.txt | 1 - utils/fetch_dataset.py | 48 +++++++++++++++++++++++++--------------- 4 files changed, 69 insertions(+), 48 deletions(-) delete mode 100644 utils/download_links.txt diff --git a/mask_the_face.py b/mask_the_face.py index 0caf9ef..58d70a3 100644 --- a/mask_the_face.py +++ b/mask_the_face.py @@ -6,7 +6,7 @@ import dlib from utils.aux_functions import * -#TODO: +# TODO: # 1. Done: surgical_green, surgical_blue --> one surgical # 2. left mask and right mask --> one angled mask # 3. Done: MFR2 Dataset script @@ -28,7 +28,7 @@ "--mask_type", type=str, default="surgical", - choices=['surgical', 'N95', 'KN95', 'cloth', 'gas', 'inpaint', 'random', 'all'], + choices=["surgical", "N95", "KN95", "cloth", "gas", "inpaint", "random", "all"], help="Type of the mask to be applied. Available options: all, surgical_blue, surgical_green, N95, cloth", ) @@ -62,7 +62,7 @@ parser.add_argument( "--code", - type = str, + type=str, # default="cloth-masks/textures/check/check_4.jpg, cloth-#e54294, cloth-#ff0000, cloth, cloth-masks/textures/others/heart_1.png, cloth-masks/textures/fruits/pineapple.png, N95, surgical_blue, surgical_green", default="", help="Generate specific formats", @@ -76,7 +76,7 @@ "--write_original_image", dest="write_original_image", action="store_true", - help="If true, original image is also stored in the masked folder" + help="If true, original image is also stored in the masked folder", ) parser.set_defaults(feature=False) @@ -85,32 +85,32 @@ # Set up dlib face detector and predictor args.detector = dlib.get_frontal_face_detector() -path_to_dlib_model = 'dlib_models/shape_predictor_68_face_landmarks.dat' +path_to_dlib_model = "dlib_models/shape_predictor_68_face_landmarks.dat" if not os.path.exists(path_to_dlib_model): download_dlib_model() args.predictor = dlib.shape_predictor(path_to_dlib_model) # Extract data from code -mask_code = "".join(args.code.split()).split(',') +mask_code = "".join(args.code.split()).split(",") args.code_count = np.zeros(len(mask_code)) args.mask_dict_of_dict = {} for i, entry in enumerate(mask_code): mask_dict = {} - mask_color = '' - mask_texture = '' - mask_type = entry.split('-')[0] - if len(entry.split('-'))==2: - mask_variation = entry.split('-')[1] - if '#' in mask_variation: + mask_color = "" + mask_texture = "" + mask_type = entry.split("-")[0] + if len(entry.split("-")) == 2: + mask_variation = entry.split("-")[1] + if "#" in mask_variation: mask_color = mask_variation else: mask_texture = mask_variation - mask_dict['type'] = mask_type - mask_dict['color'] = mask_color - mask_dict['texture'] = mask_texture + mask_dict["type"] = mask_type + mask_dict["color"] = mask_color + mask_dict["texture"] = mask_texture args.mask_dict_of_dict[i] = mask_dict # Check if path is file or directory or none @@ -121,7 +121,7 @@ path, dirs, files = os.walk(args.path).__next__() file_count = len(files) dirs_count = len(dirs) - if len(files)>0: + if len(files) > 0: print_orderly("Masking image files", 60) # Process files in the directory if any diff --git a/utils/aux_functions.py b/utils/aux_functions.py index 4b5ed35..af0cb66 100644 --- a/utils/aux_functions.py +++ b/utils/aux_functions.py @@ -16,27 +16,33 @@ from tqdm import tqdm import bz2, shutil + def download_dlib_model(): - print_orderly('Get dlib model', 60) + print_orderly("Get dlib model", 60) dlib_model_link = "http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2" - print('Downloading dlib model...') + print("Downloading dlib model...") r = requests.get(dlib_model_link, stream=True) - print("Zip file size: ", np.round(len(r.content)/1024/1024, 2), "MB") - destination = 'dlib_models'+os.path.sep+'shape_predictor_68_face_landmarks.dat.bz2' + print("Zip file size: ", np.round(len(r.content) / 1024 / 1024, 2), "MB") + destination = ( + "dlib_models" + os.path.sep + "shape_predictor_68_face_landmarks.dat.bz2" + ) if not os.path.exists(destination.rsplit(os.path.sep, 1)[0]): os.mkdir(destination.rsplit(os.path.sep, 1)[0]) - print('Saving dlib model...') - with open(destination, 'wb') as fd: + print("Saving dlib model...") + with open(destination, "wb") as fd: for chunk in r.iter_content(chunk_size=32678): fd.write(chunk) - print('Extracting dlib model...') - with bz2.BZ2File(destination) as fr, open("dlib_models/shape_predictor_68_face_landmarks.dat", "wb") as fw: + print("Extracting dlib model...") + with bz2.BZ2File(destination) as fr, open( + "dlib_models/shape_predictor_68_face_landmarks.dat", "wb" + ) as fw: shutil.copyfileobj(fr, fw) - print('Saved: ', destination) - print_orderly('done', 60) + print("Saved: ", destination) + print_orderly("done", 60) os.remove(destination) + def get_line(face_landmark, image, type="eye", debug=False): pil_image = Image.fromarray(image) d = ImageDraw.Draw(pil_image) @@ -456,6 +462,7 @@ def check_path(path): return is_directory, is_file, is_other + def shape_to_landmarks(shape): face_landmarks = {} face_landmarks["left_eyebrow"] = [ @@ -552,6 +559,7 @@ def shape_to_landmarks(shape): ] return face_landmarks + def rect_to_bb(rect): x1 = rect.left() x2 = rect.right() @@ -559,6 +567,7 @@ def rect_to_bb(rect): y2 = rect.bottom() return (x1, x2, y2, x1) + def mask_image(image_path, args): # Read the image image = cv2.imread(image_path) @@ -571,9 +580,9 @@ def mask_image(image_path, args): if args.code: ind = random.randint(0, len(args.code_count) - 1) mask_dict = args.mask_dict_of_dict[ind] - mask_type = mask_dict['type'] - args.color = mask_dict['color'] - args.pattern = mask_dict['texture'] + mask_type = mask_dict["type"] + args.color = mask_dict["color"] + args.pattern = mask_dict["texture"] args.code_count[ind] += 1 elif mask_type == "random": @@ -627,6 +636,7 @@ def mask_image(image_path, args): return masked_images, mask, mask_binary_array, original_image + def is_image(path): split = path.rsplit("/") if split[1][0] == ".": diff --git a/utils/download_links.txt b/utils/download_links.txt deleted file mode 100644 index 5450694..0000000 --- a/utils/download_links.txt +++ /dev/null @@ -1 +0,0 @@ -MFR2, \ No newline at end of file diff --git a/utils/fetch_dataset.py b/utils/fetch_dataset.py index 91c65b5..1c0357c 100644 --- a/utils/fetch_dataset.py +++ b/utils/fetch_dataset.py @@ -3,11 +3,14 @@ # Email: aqeel.anwar@gatech.edu # Code resued from https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url -import requests,os +# Make sure you run this from parent folder and not from utils folder i.e. +# python utils/fetch_dataset.py + +import requests, os from zipfile import ZipFile import argparse import urllib -from utils.aux_functions import print_orderly + parser = argparse.ArgumentParser( description="Download dataset - Python code to download associated datasets" ) @@ -18,35 +21,40 @@ help="Name of the dataset - Details on available datasets can be found at GitHub Page", ) args = parser.parse_args() + + def download_file_from_google_drive(id, destination): URL = "https://docs.google.com/uc?export=download" session = requests.Session() - response = session.get(URL, params = { 'id' : id }, stream = True) + response = session.get(URL, params={"id": id}, stream=True) token = get_confirm_token(response) if token: - params = { 'id' : id, 'confirm' : token } - response = session.get(URL, params = params, stream = True) + params = {"id": id, "confirm": token} + response = session.get(URL, params=params, stream=True) save_response_content(response, destination) + def get_confirm_token(response): for key, value in response.cookies.items(): - if key.startswith('download_warning'): + if key.startswith("download_warning"): return value return None + def save_response_content(response, destination): CHUNK_SIZE = 32768 - + print(destination) with open(destination, "wb") as f: for chunk in response.iter_content(CHUNK_SIZE): - if chunk: # filter out keep-alive new chunks + if chunk: # filter out keep-alive new chunks f.write(chunk) + def download(t_url): response = urllib.request.urlopen(t_url) data = response.read() @@ -54,6 +62,7 @@ def download(t_url): lines = txt_str.split("\\n") return lines + def Convert(lst): it = iter(lst) res_dct = dict(zip(it, it)) @@ -62,19 +71,22 @@ def Convert(lst): if __name__ == "__main__": # Fetch the latest download_links.txt file from GitHub - print_orderly('Download dataset', 60) - link = 'https://raw.githubusercontent.com/aqeelanwar/MaskTheFace/master/datasets/download_links.txt' - links_dict = Convert(download(link)[0].replace(':', '\n').replace("b'", "").replace("\'", "").replace(" ", "").split('\n')) + link = "https://raw.githubusercontent.com/aqeelanwar/MaskTheFace/master/datasets/download_links.txt" + links_dict = Convert( + download(link)[0] + .replace(":", "\n") + .replace("b'", "") + .replace("'", "") + .replace(" ", "") + .split("\n") + ) file_id = links_dict[args.dataset] - destination = os.getcwd().rsplit(os.path.sep,1) - destination = destination[0] + os.path.sep + 'datasets' + os.path.sep + '_.zip' - print('Downloading: ', args.dataset) + destination = "datasets/_.zip" + print("Downloading: ", args.dataset) download_file_from_google_drive(file_id, destination) - print('Extracting: ', args.dataset) - with ZipFile(destination, 'r') as zipObj: + print("Extracting: ", args.dataset) + with ZipFile(destination, "r") as zipObj: # Extract all the contents of zip file in current directory zipObj.extractall(destination.rsplit(os.path.sep, 1)[0]) os.remove(destination) - print_orderly('Done', 60) -