Skip to content

Commit

Permalink
Restructuring
Browse files Browse the repository at this point in the history
  • Loading branch information
Anwar, Malik Aqeel committed Aug 4, 2020
1 parent a868387 commit a61d29e
Show file tree
Hide file tree
Showing 4 changed files with 12 additions and 11 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,4 @@ vgg_face_dataset
*.mp4
ML_examples
*.pptx
datasets
Binary file added dlib_models/shape_predictor_68_face_landmarks.dat
Binary file not shown.
4 changes: 2 additions & 2 deletions mask_the_face.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@
#TODO:
# 1. surgical_green, surgical_blue --> one surgical
# 2. left mask and right mask --> one angled mask
# 3. MFR2 Dataset script
# 4. Organize MFR2 dataset
# 3. Done: MFR2 Dataset script
# 4. Done: Organize and Upload MFR2 dataset
# 5. Done: Dlib based detector

# Command-line input setup
Expand Down
18 changes: 9 additions & 9 deletions utils/get_MFR2.py → utils/fetch_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,15 @@
import requests,os
from zipfile import ZipFile
import argparse
from urllib2 import urlopen
import urllib

parser = argparse.ArgumentParser(
description="Download dataset - Python code to download associated datasets"
)
parser.add_argument(
"--dataset",
type=str,
default="tensorboard",
default="mfr2",
help="Name of the dataset - Details on available datasets can be found at GitHub Page",
)
args = parser.parse_args()
Expand Down Expand Up @@ -48,7 +48,7 @@ def save_response_content(response, destination):
f.write(chunk)

def download(t_url):
response = urlopen(t_url)
response = urllib.request.urlopen(t_url)
data = response.read()
txt_str = str(data)
lines = txt_str.split("\\n")
Expand All @@ -62,16 +62,16 @@ def Convert(lst):

if __name__ == "__main__":
# Fetch the latest download_links.txt file from GitHub
link = 'https://raw.githubusercontent.com/aqeelanwar/PEDRA/master/datasets/download_links.txt'
links_dict = Convert(download(link)[0].replace(':', '\n').split('\n'))
link = 'https://raw.githubusercontent.com/aqeelanwar/MaskTheFace/master/datasets/download_links.txt'
links_dict = Convert(download(link)[0].replace(':', '\n').replace("b'", "").replace("\'", "").replace(" ", "").split('\n'))
file_id = links_dict[args.dataset]
file_id = '0B_QQR0eth4z2XzhyNVlDNmptbWM'
destination = '_.zip'
destination = os.getcwd().rsplit(os.path.sep,1)
destination = destination[0] + os.path.sep + 'datasets' + os.path.sep + '_.zip'
download_file_from_google_drive(file_id, destination)
# Create a ZipFile Object and load sample.zip in it

with ZipFile(destination, 'r') as zipObj:
# Extract all the contents of zip file in current directory
zipObj.extractall()
zipObj.extractall(destination.rsplit(os.path.sep, 1)[0])

os.remove(destination)

0 comments on commit a61d29e

Please sign in to comment.