-
Notifications
You must be signed in to change notification settings - Fork 4
/
Makefile
155 lines (132 loc) · 5.04 KB
/
Makefile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
.PHONY: clean data lint requirements sync_data_to_s3 sync_data_from_s3
#################################################################################
# GLOBALS #
#################################################################################
PROJECT_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
BUCKET = default
PROFILE = default
PROJECT_NAME = cookiecutter-data-science-example
PYTHON_INTERPRETER = C:\Users\<username>\<venvs>\<venvName>\Scripts\python
ifeq (,$(shell which conda))
HAS_CONDA=False
else
HAS_CONDA=True
endif
#################################################################################
# COMMANDS #
#################################################################################
## Test python environment is setup correctly
test_environment:
$(PYTHON_INTERPRETER) test_environment.py
## Install Python Dependencies
requirements: test_environment
$(PYTHON_INTERPRETER) -m pip install -U pip setuptools wheel
$(PYTHON_INTERPRETER) -m pip install -r requirements.txt
## Download Data from S3
##sync_data_from_s3:
##ifeq (default,$(PROFILE))
## aws s3 sync s3://$(BUCKET)/sagemaker/demo-us-accidents/raw-data/ data/raw/
##else
## aws s3 sync s3://$(BUCKET)/sagemaker/demo-us-accidents/raw-data/ data/raw/ --profile $(PROFILE)
##endif
## Download Data from Kaggle
download: requirements
kaggle datasets download -d sobhanmoosavi/us-accidents --force -p data/raw
## Make Dataset, Process and Analyze Data
data: download
$(PYTHON_INTERPRETER) src/data/make_dataset.py data/raw/us-accidents.zip data/processed/us-accidents.csv
## Run PCA and Clustering
clustering: data
$(PYTHON_INTERPRETER) src/main.py
## Delete all compiled Python files
clean:
find . -type f -name "*.py[co]" -delete
find . -type d -name "__pycache__" -delete
## Lint using flake8
lint:
flake8 src
## Upload Data to S3
##sync_data_to_s3:
##ifeq (default,$(PROFILE))
## aws s3 sync data/raw/ s3://$(BUCKET)/sagemaker/demo-us-accidents/raw-data/
##else
## aws s3 sync data/raw/ s3://$(BUCKET)/sagemaker/demo-us-accidents/raw-data/ --profile $(PROFILE)
##endif
## Set up python interpreter environment
##create_environment:
##ifeq (True,$(HAS_CONDA))
## @echo ">>> Detected conda, creating conda environment."
##ifeq (3,$(findstring 3,$(PYTHON_INTERPRETER)))
## conda create --name $(PROJECT_NAME) python=3
##else
## conda create --name $(PROJECT_NAME) python=2.7
##endif
## @echo ">>> New conda env created. Activate with:\nsource activate $(PROJECT_NAME)"
##else
## $(PYTHON_INTERPRETER) -m pip install -q virtualenv virtualenvwrapper
## @echo ">>> Installing virtualenvwrapper if not already installed.\nMake sure the following lines are in shell startup file\n\
## export WORKON_HOME=$$HOME/.virtualenvs\nexport PROJECT_HOME=$$HOME/Devel\nsource /usr/local/bin/virtualenvwrapper.sh\n"
## @bash -c "source `which virtualenvwrapper.sh`;mkvirtualenv $(PROJECT_NAME) --python=$(PYTHON_INTERPRETER)"
## @echo ">>> New virtualenv created. Activate with:\nworkon $(PROJECT_NAME)"
##endif
#################################################################################
# PROJECT RULES #
#################################################################################
#################################################################################
# Self Documenting Commands #
#################################################################################
.DEFAULT_GOAL := help
# Inspired by <http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html>
# sed script explained:
# /^##/:
# * save line in hold space
# * purge line
# * Loop:
# * append newline + line to hold space
# * go to next line
# * if line starts with doc comment, strip comment character off and loop
# * remove target prerequisites
# * append hold space (+ newline) to line
# * replace newline plus comments by `---`
# * print line
# Separate expressions are necessary because labels cannot be delimited by
# semicolon; see <http://stackoverflow.com/a/11799865/1968>
.PHONY: help
help:
@echo "$$(tput bold)Available rules:$$(tput sgr0)"
@echo
@sed -n -e "/^## / { \
h; \
s/.*//; \
:doc" \
-e "H; \
n; \
s/^## //; \
t doc" \
-e "s/:.*//; \
G; \
s/\\n## /---/; \
s/\\n/ /g; \
p; \
}" ${MAKEFILE_LIST} \
| LC_ALL='C' sort --ignore-case \
| awk -F '---' \
-v ncol=$$(tput cols) \
-v indent=19 \
-v col_on="$$(tput setaf 6)" \
-v col_off="$$(tput sgr0)" \
'{ \
printf "%s%*s%s ", col_on, -indent, $$1, col_off; \
n = split($$2, words, " "); \
line_length = ncol - indent; \
for (i = 1; i <= n; i++) { \
line_length -= length(words[i]) + 1; \
if (line_length <= 0) { \
line_length = ncol - indent - length(words[i]) - 1; \
printf "\n%*s ", -indent, " "; \
} \
printf "%s ", words[i]; \
} \
printf "\n"; \
}' \
| more $(shell test $(shell uname) = Darwin && echo '--no-init --raw-control-chars')