forked from wuzaipei/python_TensorFlow_yolov3-
-
Notifications
You must be signed in to change notification settings - Fork 0
/
video_demo.py
58 lines (51 loc) · 2.19 KB
/
video_demo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
#! /usr/bin/env python
# coding=utf-8
#================================================================
# Copyright (C) 2018 * Ltd. All rights reserved.
#
# Editor : VIM
# File name : video_demo.py
# Author : YunYang1994
# Created date: 2018-11-30 15:56:37
# Description :
#
#================================================================
import cv2
import time
import numpy as np
import tensorflow as tf
from PIL import Image
from core import utils
IMAGE_H, IMAGE_W = 416, 416
video_path = "./data/demo_data/road.mp4"
video_path = 0 # use camera
classes = utils.read_coco_names('./data/coco.names')
num_classes = len(classes)
input_tensor, output_tensors = utils.read_pb_return_tensors(tf.get_default_graph(),
"./checkpoint/yolov3_cpu_nms.pb",
["Placeholder:0", "concat_9:0", "mul_6:0"])
with tf.Session() as sess:
vid = cv2.VideoCapture(video_path)
while True:
return_value, frame = vid.read()
if return_value:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(frame)
else:
raise ValueError("No image!")
img_resized = np.array(image.resize(size=(IMAGE_H, IMAGE_W)), dtype=np.float32)
img_resized = img_resized / 255.
prev_time = time.time()
boxes, scores = sess.run(output_tensors, feed_dict={input_tensor: np.expand_dims(img_resized, axis=0)})
boxes, scores, labels = utils.cpu_nms(boxes, scores, num_classes, score_thresh=0.4, iou_thresh=0.5)
image = utils.draw_boxes(image, boxes, scores, labels, classes, (IMAGE_H, IMAGE_W), show=False)
curr_time = time.time()
exec_time = curr_time - prev_time
result = np.asarray(image)
info = "time: %.2f ms" %(1000*exec_time)
cv2.putText(result, text=info, org=(50, 70), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1, color=(255, 0, 0), thickness=2)
cv2.namedWindow("result", cv2.WINDOW_AUTOSIZE)
result = cv2.cvtColor(result, cv2.COLOR_RGB2BGR)
cv2.imshow("result", result)
if cv2.waitKey(1) & 0xFF == ord('q'): break