-
Notifications
You must be signed in to change notification settings - Fork 0
/
motion.py
146 lines (122 loc) · 3.83 KB
/
motion.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
# USAGE
# python motion.py --conf conf.json
from pyimagesearch.tempimage import TempImage
import argparse
import warnings
import datetime
import imutils
import json
import time
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--conf", required=True,
help="path to the JSON config")
args = vars(ap.parse_args())
warnings.filterwarnings("ignore")
conf = json.load(open(args["conf"]))
client = None
DETECTED = "Pet on the move"
EMPTY = "Pet Sleeping"
# check cam setup
if conf["pi_cam"]:
# For use with Raspberry pi camera
from picamera.array import PiRGBArray
from picamera import PiCamera
camera = PiCamera()
camera.resolution = tuple(conf["resolution"])
camera.framerate = conf["fps"]
rawCapture = PiRGBArray(camera, size=tuple(conf["resolution"]))
else:
# For use with web camera
camera = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
# allow the cam to warmup
print("warming up camera...")
time.sleep(conf["camera_warmup_time"])
avg = None
lastUploaded = datetime.datetime.now()
motion_counter = 0
# capture frames from the cam
while camera.isOpened():
ret, frame = camera.read()
if ret:
# write frame
out.write(frame)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
timestamp = datetime.datetime.now()
text = EMPTY
# resize the frame
# convert it to grayscale
# blur the frame
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# if the average frame is None, initialize it
if avg is None:
print("starting bg model...")
avg = gray.copy().astype("float")
continue
# accumulate weighted avg between the frame and
# last frames, then compute the diff between the
# frame and average
cv2.accumulateWeighted(gray, avg, 0.5)
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
# threshold the delta, dilate the image to fill
# in holes, then find contours on the image
thresh = cv2.threshold(frameDelta, conf["delta_thresh"], 255,
cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
for c in cnts:
# if the contour is small just ignore
if cv2.contourArea(c) < conf["min_area"]:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = DETECTED
# draw the text and timestamp on the frame
ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.35, (0, 0, 255), 1)
# check to see if pet has moved
if text == DETECTED:
# check to see if enough time has passed
if (timestamp - lastUploaded).seconds >= conf["min_upload_seconds"]:
# increment the motion counter
motion_counter += 1
# check to see if the number of frames with consistent motion is
# high enough
if motion_counter >= conf["min_motion_frames"]:
# update the last uploaded timestamp and reset the motion
# counter
lastUploaded = timestamp
motion_counter = 0
# otherwise, the pet has not moved
else:
motionCounter = 0
# check to see if the frames should be displayed to screen
if conf["show_video"]:
# display the petCam feed
cv2.imshow("PetCam Feed", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key is pressed, break from the lop
if key == ord("q"):
break
# clear for the next frame
if conf["pi_cam"]:
rawCapture.truncate(0)
# Release everything if job is finished
camera.release()
out.release()
cv2.destroyAllWindows()