Skip to content

Commit

Permalink
Restructured
Browse files Browse the repository at this point in the history
  • Loading branch information
Anwar, Malik Aqeel committed Aug 5, 2020
1 parent 5a73277 commit ed9dffc
Show file tree
Hide file tree
Showing 4 changed files with 69 additions and 48 deletions.
32 changes: 16 additions & 16 deletions mask_the_face.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import dlib
from utils.aux_functions import *

#TODO:
# TODO:
# 1. Done: surgical_green, surgical_blue --> one surgical
# 2. left mask and right mask --> one angled mask
# 3. Done: MFR2 Dataset script
Expand All @@ -28,7 +28,7 @@
"--mask_type",
type=str,
default="surgical",
choices=['surgical', 'N95', 'KN95', 'cloth', 'gas', 'inpaint', 'random', 'all'],
choices=["surgical", "N95", "KN95", "cloth", "gas", "inpaint", "random", "all"],
help="Type of the mask to be applied. Available options: all, surgical_blue, surgical_green, N95, cloth",
)

Expand Down Expand Up @@ -62,7 +62,7 @@

parser.add_argument(
"--code",
type = str,
type=str,
# default="cloth-masks/textures/check/check_4.jpg, cloth-#e54294, cloth-#ff0000, cloth, cloth-masks/textures/others/heart_1.png, cloth-masks/textures/fruits/pineapple.png, N95, surgical_blue, surgical_green",
default="",
help="Generate specific formats",
Expand All @@ -76,7 +76,7 @@
"--write_original_image",
dest="write_original_image",
action="store_true",
help="If true, original image is also stored in the masked folder"
help="If true, original image is also stored in the masked folder",
)
parser.set_defaults(feature=False)

Expand All @@ -85,32 +85,32 @@

# Set up dlib face detector and predictor
args.detector = dlib.get_frontal_face_detector()
path_to_dlib_model = 'dlib_models/shape_predictor_68_face_landmarks.dat'
path_to_dlib_model = "dlib_models/shape_predictor_68_face_landmarks.dat"
if not os.path.exists(path_to_dlib_model):
download_dlib_model()

args.predictor = dlib.shape_predictor(path_to_dlib_model)

# Extract data from code
mask_code = "".join(args.code.split()).split(',')
mask_code = "".join(args.code.split()).split(",")
args.code_count = np.zeros(len(mask_code))
args.mask_dict_of_dict = {}


for i, entry in enumerate(mask_code):
mask_dict = {}
mask_color = ''
mask_texture = ''
mask_type = entry.split('-')[0]
if len(entry.split('-'))==2:
mask_variation = entry.split('-')[1]
if '#' in mask_variation:
mask_color = ""
mask_texture = ""
mask_type = entry.split("-")[0]
if len(entry.split("-")) == 2:
mask_variation = entry.split("-")[1]
if "#" in mask_variation:
mask_color = mask_variation
else:
mask_texture = mask_variation
mask_dict['type'] = mask_type
mask_dict['color'] = mask_color
mask_dict['texture'] = mask_texture
mask_dict["type"] = mask_type
mask_dict["color"] = mask_color
mask_dict["texture"] = mask_texture
args.mask_dict_of_dict[i] = mask_dict

# Check if path is file or directory or none
Expand All @@ -121,7 +121,7 @@
path, dirs, files = os.walk(args.path).__next__()
file_count = len(files)
dirs_count = len(dirs)
if len(files)>0:
if len(files) > 0:
print_orderly("Masking image files", 60)

# Process files in the directory if any
Expand Down
36 changes: 23 additions & 13 deletions utils/aux_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,27 +16,33 @@
from tqdm import tqdm
import bz2, shutil


def download_dlib_model():
print_orderly('Get dlib model', 60)
print_orderly("Get dlib model", 60)
dlib_model_link = "http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2"
print('Downloading dlib model...')
print("Downloading dlib model...")
r = requests.get(dlib_model_link, stream=True)
print("Zip file size: ", np.round(len(r.content)/1024/1024, 2), "MB")
destination = 'dlib_models'+os.path.sep+'shape_predictor_68_face_landmarks.dat.bz2'
print("Zip file size: ", np.round(len(r.content) / 1024 / 1024, 2), "MB")
destination = (
"dlib_models" + os.path.sep + "shape_predictor_68_face_landmarks.dat.bz2"
)
if not os.path.exists(destination.rsplit(os.path.sep, 1)[0]):
os.mkdir(destination.rsplit(os.path.sep, 1)[0])
print('Saving dlib model...')
with open(destination, 'wb') as fd:
print("Saving dlib model...")
with open(destination, "wb") as fd:
for chunk in r.iter_content(chunk_size=32678):
fd.write(chunk)
print('Extracting dlib model...')
with bz2.BZ2File(destination) as fr, open("dlib_models/shape_predictor_68_face_landmarks.dat", "wb") as fw:
print("Extracting dlib model...")
with bz2.BZ2File(destination) as fr, open(
"dlib_models/shape_predictor_68_face_landmarks.dat", "wb"
) as fw:
shutil.copyfileobj(fr, fw)
print('Saved: ', destination)
print_orderly('done', 60)
print("Saved: ", destination)
print_orderly("done", 60)

os.remove(destination)


def get_line(face_landmark, image, type="eye", debug=False):
pil_image = Image.fromarray(image)
d = ImageDraw.Draw(pil_image)
Expand Down Expand Up @@ -456,6 +462,7 @@ def check_path(path):

return is_directory, is_file, is_other


def shape_to_landmarks(shape):
face_landmarks = {}
face_landmarks["left_eyebrow"] = [
Expand Down Expand Up @@ -552,13 +559,15 @@ def shape_to_landmarks(shape):
]
return face_landmarks


def rect_to_bb(rect):
x1 = rect.left()
x2 = rect.right()
y1 = rect.top()
y2 = rect.bottom()
return (x1, x2, y2, x1)


def mask_image(image_path, args):
# Read the image
image = cv2.imread(image_path)
Expand All @@ -571,9 +580,9 @@ def mask_image(image_path, args):
if args.code:
ind = random.randint(0, len(args.code_count) - 1)
mask_dict = args.mask_dict_of_dict[ind]
mask_type = mask_dict['type']
args.color = mask_dict['color']
args.pattern = mask_dict['texture']
mask_type = mask_dict["type"]
args.color = mask_dict["color"]
args.pattern = mask_dict["texture"]
args.code_count[ind] += 1

elif mask_type == "random":
Expand Down Expand Up @@ -627,6 +636,7 @@ def mask_image(image_path, args):

return masked_images, mask, mask_binary_array, original_image


def is_image(path):
split = path.rsplit("/")
if split[1][0] == ".":
Expand Down
1 change: 0 additions & 1 deletion utils/download_links.txt

This file was deleted.

48 changes: 30 additions & 18 deletions utils/fetch_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,14 @@
# Email: [email protected]

# Code resued from https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url
import requests,os
# Make sure you run this from parent folder and not from utils folder i.e.
# python utils/fetch_dataset.py

import requests, os
from zipfile import ZipFile
import argparse
import urllib
from utils.aux_functions import print_orderly

parser = argparse.ArgumentParser(
description="Download dataset - Python code to download associated datasets"
)
Expand All @@ -18,42 +21,48 @@
help="Name of the dataset - Details on available datasets can be found at GitHub Page",
)
args = parser.parse_args()


def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"

session = requests.Session()

response = session.get(URL, params = { 'id' : id }, stream = True)
response = session.get(URL, params={"id": id}, stream=True)
token = get_confirm_token(response)

if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
params = {"id": id, "confirm": token}
response = session.get(URL, params=params, stream=True)

save_response_content(response, destination)


def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
if key.startswith("download_warning"):
return value

return None


def save_response_content(response, destination):
CHUNK_SIZE = 32768

print(destination)
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
if chunk: # filter out keep-alive new chunks
f.write(chunk)


def download(t_url):
response = urllib.request.urlopen(t_url)
data = response.read()
txt_str = str(data)
lines = txt_str.split("\\n")
return lines


def Convert(lst):
it = iter(lst)
res_dct = dict(zip(it, it))
Expand All @@ -62,19 +71,22 @@ def Convert(lst):

if __name__ == "__main__":
# Fetch the latest download_links.txt file from GitHub
print_orderly('Download dataset', 60)
link = 'https://raw.githubusercontent.com/aqeelanwar/MaskTheFace/master/datasets/download_links.txt'
links_dict = Convert(download(link)[0].replace(':', '\n').replace("b'", "").replace("\'", "").replace(" ", "").split('\n'))
link = "https://raw.githubusercontent.com/aqeelanwar/MaskTheFace/master/datasets/download_links.txt"
links_dict = Convert(
download(link)[0]
.replace(":", "\n")
.replace("b'", "")
.replace("'", "")
.replace(" ", "")
.split("\n")
)
file_id = links_dict[args.dataset]
destination = os.getcwd().rsplit(os.path.sep,1)
destination = destination[0] + os.path.sep + 'datasets' + os.path.sep + '_.zip'
print('Downloading: ', args.dataset)
destination = "datasets/_.zip"
print("Downloading: ", args.dataset)
download_file_from_google_drive(file_id, destination)
print('Extracting: ', args.dataset)
with ZipFile(destination, 'r') as zipObj:
print("Extracting: ", args.dataset)
with ZipFile(destination, "r") as zipObj:
# Extract all the contents of zip file in current directory
zipObj.extractall(destination.rsplit(os.path.sep, 1)[0])

os.remove(destination)
print_orderly('Done', 60)

0 comments on commit ed9dffc

Please sign in to comment.