Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[FEAT] Model Registry & Model load & Use mlflow #9

Merged
merged 10 commits into from
Mar 14, 2024
8 changes: 8 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -152,12 +152,17 @@ dmypy.json
# Cython debug symbols
cython_debug/

#Model weight
*.pt
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
wandb/
*.pkl
events.out*
grafana.db
000000*
lock
Expand All @@ -166,5 +171,8 @@ tombstones
prometheus/prometheus-volume/data/*
index
0000*
meta.json
*.tar
*.pth
*.onnx
*.pth
14 changes: 14 additions & 0 deletions docker/mlflow/DockerFile.mlflow
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
FROM amd64/python:3.9-slim

RUN apt-get update && apt-get install -y \
git \
wget \
&& rm -rf /var/lib/apt/lists/*

RUN pip install -U pip &&\
pip install boto3==1.26.8 mlflow==1.30.0 psycopg2-binary

RUN cd /tmp && \
wget https://dl.min.io/client/mc/release/linux-amd64/mc && \
chmod +x mc && \
mv mc /usr/bin/mc
58 changes: 58 additions & 0 deletions docker/mlflow/docker-compose_mlflow.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
version: "3"

services:
mlflow-backend-store:
image: postgres:14.0
container_name: mlflow-backend-store
environment:
POSTGRES_USER: mlflowuser
POSTGRES_PASSWORD: mlflowpassword
POSTGRES_DB: mlflowdatabase
healthcheck:
test: ["CMD", "pg_isready", "-q", "-U", "mlflowuser", "-d", "mlflowdatabase"]
interval: 10s
timeout: 5s
retries: 5

mlflow-artifact-store:
image: minio/minio:RELEASE.2024-01-18T22-51-28Z
container_name: mlflow-artifact-store
ports:
- 9000:9000
- 9001:9001
environment:
MINIO_ROOT_USER: minio
MINIO_ROOT_PASSWORD: miniostorage
command: server /data/minio --console-address :9001
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 5s
timeout: 5s
retries: 5

mlflow-server:
build:
context: .
dockerfile: DockerFile_mlflow
container_name: mlflow-server
depends_on:
mlflow-backend-store:
condition: service_healthy
mlflow-artifact-store:
condition: service_healthy
ports:
- 5001:5000
environment:
AWS_ACCESS_KEY: AKIA3FLD32HPRN22NJQ7
AWS_SECRET_ACCESS_KEY: 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
MLFLOW_S3_ENDPOINT_URL: http://mlflow-artifact-store:9000
command:
- /bin/sh
- -c
- |
mc config host add mlflowminio http://mlflow-artifact-store:9000 minio miniostorage &&
mc mb --ignore-existing mlflowminio/mlflow
mlflow server \
--backend-store-uri postgresql://mlflowuser:mlflowpassword@mlflow-backend-store/mlflowdatabase \
--default-artifact-root s3://mlflow/ \
--host 0.0.0.0
2 changes: 2 additions & 0 deletions mlflow/registry/Swimswap/checkpoints/people/iter.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
519
4062
11,208 changes: 11,208 additions & 0 deletions mlflow/registry/Swimswap/checkpoints/people/loss_log.txt

Large diffs are not rendered by default.

72 changes: 72 additions & 0 deletions mlflow/registry/Swimswap/checkpoints/people/opt.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
------------ Options -------------
batchSize: 8
beta1: 0.5
checkpoints_dir: ./checkpoints
continue_train: False
data_type: 32
dataroot: ./datasets/cityscapes/
debug: False
display_freq: 99
display_winsize: 512
feat_num: 3
fineSize: 512
fp16: False
gan_mode: hinge
gpu_ids: [0]
image_size: 224
input_nc: 3
instance_feat: False
isTrain: True
label_feat: False
label_nc: 0
lambda_GP: 10.0
lambda_feat: 10.0
lambda_id: 20.0
lambda_rec: 10.0
latent_size: 512
loadSize: 1024
load_features: False
load_pretrain:
local_rank: 0
lr: 0.0002
max_dataset_size: inf
model: pix2pixHD
nThreads: 2
n_blocks_global: 6
n_blocks_local: 3
n_clusters: 10
n_downsample_E: 4
n_downsample_global: 3
n_layers_D: 4
n_local_enhancers: 1
name: people
ndf: 64
nef: 16
netG: global
ngf: 64
niter: 10000
niter_decay: 10000
niter_fix_global: 0
no_flip: False
no_ganFeat_loss: False
no_html: False
no_instance: False
no_vgg_loss: False
norm: batch
norm_G: spectralspadesyncbatch3x3
num_D: 2
output_nc: 3
phase: train
pool_size: 0
print_freq: 100
resize_or_crop: scale_width
save_epoch_freq: 10000
save_latest_freq: 10000
semantic_nc: 3
serial_batches: False
tf_log: False
times_G: 1
use_dropout: False
verbose: False
which_epoch: latest
-------------- End ----------------
Empty file.
100 changes: 100 additions & 0 deletions mlflow/registry/Swimswap/insightface_func/face_detect_crop_multi.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
'''
Author: Naiyuan liu
Github: https://github.com/NNNNAI
Date: 2021-11-23 17:03:58
LastEditors: Naiyuan liu
LastEditTime: 2021-11-24 16:45:41
Description:
'''
from __future__ import division
import collections
import numpy as np
import glob
import os
import os.path as osp
import cv2
from insightface.model_zoo import model_zoo
from insightface_func.utils import face_align_ffhqandnewarc as face_align

__all__ = ['Face_detect_crop', 'Face']

Face = collections.namedtuple('Face', [
'bbox', 'kps', 'det_score', 'embedding', 'gender', 'age',
'embedding_norm', 'normed_embedding',
'landmark'
])

Face.__new__.__defaults__ = (None, ) * len(Face._fields)


class Face_detect_crop:
def __init__(self, name, root='~/.insightface_func/models'):
self.models = {}
root = os.path.expanduser(root)
onnx_files = glob.glob(osp.join(root, name, '*.onnx'))
onnx_files = sorted(onnx_files)
for onnx_file in onnx_files:
if onnx_file.find('_selfgen_')>0:
#print('ignore:', onnx_file)
continue
model = model_zoo.get_model(onnx_file)
if model.taskname not in self.models:
print('find model:', onnx_file, model.taskname)
self.models[model.taskname] = model
else:
print('duplicated model task type, ignore:', onnx_file, model.taskname)
del model
assert 'detection' in self.models
self.det_model = self.models['detection']


def prepare(self, ctx_id, det_thresh=0.5, det_size=(640, 640), mode ='None'):
self.det_thresh = det_thresh
self.mode = mode
assert det_size is not None
print('set det-size:', det_size)
self.det_size = det_size
for taskname, model in self.models.items():
if taskname=='detection':
model.prepare(ctx_id, input_size=det_size)
else:
model.prepare(ctx_id)

def get(self, img, crop_size, max_num=0):
bboxes, kpss = self.det_model.detect(img,
threshold=self.det_thresh,
max_num=max_num,
metric='default')
if bboxes.shape[0] == 0:
return None
ret = []
# for i in range(bboxes.shape[0]):
# bbox = bboxes[i, 0:4]
# det_score = bboxes[i, 4]
# kps = None
# if kpss is not None:
# kps = kpss[i]
# M, _ = face_align.estimate_norm(kps, crop_size, mode ='None')
# align_img = cv2.warpAffine(img, M, (crop_size, crop_size), borderValue=0.0)
align_img_list = []
M_list = []
for i in range(bboxes.shape[0]):
kps = None
if kpss is not None:
kps = kpss[i]
M, _ = face_align.estimate_norm(kps, crop_size, mode = self.mode)
align_img = cv2.warpAffine(img, M, (crop_size, crop_size), borderValue=0.0)
align_img_list.append(align_img)
M_list.append(M)

# det_score = bboxes[..., 4]

# best_index = np.argmax(det_score)

# kps = None
# if kpss is not None:
# kps = kpss[best_index]
# M, _ = face_align.estimate_norm(kps, crop_size, mode ='None')
# align_img = cv2.warpAffine(img, M, (crop_size, crop_size), borderValue=0.0)

return align_img_list, M_list
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
'''
Author: Naiyuan liu
Github: https://github.com/NNNNAI
Date: 2021-11-23 17:03:58
LastEditors: Naiyuan liu
LastEditTime: 2021-11-24 16:46:04
Description:
'''
from __future__ import division
import collections
import numpy as np
import glob
import os
import os.path as osp
import cv2
from insightface.model_zoo import model_zoo
from insightface_func.utils import face_align_ffhqandnewarc as face_align

__all__ = ['Face_detect_crop', 'Face']

Face = collections.namedtuple('Face', [
'bbox', 'kps', 'det_score', 'embedding', 'gender', 'age',
'embedding_norm', 'normed_embedding',
'landmark'
])

Face.__new__.__defaults__ = (None, ) * len(Face._fields)


class Face_detect_crop:
def __init__(self, name, root='~/.insightface_func/models'):
self.models = {}
root = os.path.expanduser(root)
onnx_files = glob.glob(osp.join(root, name, '*.onnx'))
onnx_files = sorted(onnx_files)
for onnx_file in onnx_files:
if onnx_file.find('_selfgen_')>0:
#print('ignore:', onnx_file)
continue
model = model_zoo.get_model(onnx_file)
if model.taskname not in self.models:
print('find model:', onnx_file, model.taskname)
self.models[model.taskname] = model
else:
print('duplicated model task type, ignore:', onnx_file, model.taskname)
del model
assert 'detection' in self.models
self.det_model = self.models['detection']


def prepare(self, ctx_id, det_thresh=0.5, det_size=(640, 640), mode ='None'):
self.det_thresh = det_thresh
self.mode = mode
assert det_size is not None
print('set det-size:', det_size)
self.det_size = det_size
for taskname, model in self.models.items():
if taskname=='detection':
model.prepare(ctx_id, input_size=det_size)
else:
model.prepare(ctx_id)

def get(self, img, crop_size, max_num=0):
bboxes, kpss = self.det_model.detect(img,
threshold=self.det_thresh,
max_num=max_num,
metric='default')
if bboxes.shape[0] == 0:
return None
# ret = []
# for i in range(bboxes.shape[0]):
# bbox = bboxes[i, 0:4]
# det_score = bboxes[i, 4]
# kps = None
# if kpss is not None:
# kps = kpss[i]
# M, _ = face_align.estimate_norm(kps, crop_size, mode ='None')
# align_img = cv2.warpAffine(img, M, (crop_size, crop_size), borderValue=0.0)
# for i in range(bboxes.shape[0]):
# kps = None
# if kpss is not None:
# kps = kpss[i]
# M, _ = face_align.estimate_norm(kps, crop_size, mode ='None')
# align_img = cv2.warpAffine(img, M, (crop_size, crop_size), borderValue=0.0)

det_score = bboxes[..., 4]

# select the face with the hightest detection score
best_index = np.argmax(det_score)

kps = None
if kpss is not None:
kps = kpss[best_index]
M, _ = face_align.estimate_norm(kps, crop_size, mode = self.mode)
align_img = cv2.warpAffine(img, M, (crop_size, crop_size), borderValue=0.0)

return [align_img], [M]
Loading
Loading