Update project with minor fixes and improvements
This commit is contained in:
@ -13,72 +13,79 @@ check_settings()
|
|||||||
VIDEO = VideoStreaming()
|
VIDEO = VideoStreaming()
|
||||||
|
|
||||||
|
|
||||||
@application.route('/')
|
@application.route("/")
|
||||||
def home():
|
def home():
|
||||||
TITLE = 'Object detection'
|
TITLE = "Object detection"
|
||||||
return render_template('index.html', TITLE=TITLE)
|
return render_template("index.html", TITLE=TITLE)
|
||||||
|
|
||||||
|
|
||||||
@application.route('/video_feed')
|
@application.route("/video_feed")
|
||||||
def video_feed():
|
def video_feed():
|
||||||
'''
|
"""
|
||||||
Video streaming route.
|
Video streaming route.
|
||||||
'''
|
"""
|
||||||
return Response(
|
return Response(
|
||||||
VIDEO.show(),
|
VIDEO.show(),
|
||||||
mimetype='multipart/x-mixed-replace; boundary=frame'
|
mimetype="multipart/x-mixed-replace; boundary=frame"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
# Button requests called from ajax
|
# * Button requests
|
||||||
@application.route('/request_preview_switch')
|
@application.route("/request_preview_switch")
|
||||||
def request_preview_switch():
|
def request_preview_switch():
|
||||||
VIDEO.preview = not VIDEO.preview
|
VIDEO.preview = not VIDEO.preview
|
||||||
print('*'*10, VIDEO.preview)
|
print("*"*10, VIDEO.preview)
|
||||||
return "nothing"
|
return "nothing"
|
||||||
|
|
||||||
@application.route('/request_flipH_switch')
|
|
||||||
|
@application.route("/request_flipH_switch")
|
||||||
def request_flipH_switch():
|
def request_flipH_switch():
|
||||||
VIDEO.flipH = not VIDEO.flipH
|
VIDEO.flipH = not VIDEO.flipH
|
||||||
print('*'*10, VIDEO.flipH)
|
print("*"*10, VIDEO.flipH)
|
||||||
return "nothing"
|
return "nothing"
|
||||||
|
|
||||||
@application.route('/request_model_switch')
|
|
||||||
|
@application.route("/request_model_switch")
|
||||||
def request_model_switch():
|
def request_model_switch():
|
||||||
VIDEO.detect = not VIDEO.detect
|
VIDEO.detect = not VIDEO.detect
|
||||||
print('*'*10, VIDEO.detect)
|
print("*"*10, VIDEO.detect)
|
||||||
return "nothing"
|
return "nothing"
|
||||||
|
|
||||||
@application.route('/request_exposure_down')
|
|
||||||
|
@application.route("/request_exposure_down")
|
||||||
def request_exposure_down():
|
def request_exposure_down():
|
||||||
VIDEO.exposure -= 1
|
VIDEO.exposure -= 1
|
||||||
print('*'*10, VIDEO.exposure)
|
print("*"*10, VIDEO.exposure)
|
||||||
return "nothing"
|
return "nothing"
|
||||||
|
|
||||||
@application.route('/request_exposure_up')
|
|
||||||
|
@application.route("/request_exposure_up")
|
||||||
def request_exposure_up():
|
def request_exposure_up():
|
||||||
VIDEO.exposure += 1
|
VIDEO.exposure += 1
|
||||||
print('*'*10, VIDEO.exposure)
|
print("*"*10, VIDEO.exposure)
|
||||||
return "nothing"
|
return "nothing"
|
||||||
|
|
||||||
@application.route('/request_contrast_down')
|
|
||||||
|
@application.route("/request_contrast_down")
|
||||||
def request_contrast_down():
|
def request_contrast_down():
|
||||||
VIDEO.contrast -= 4
|
VIDEO.contrast -= 4
|
||||||
print('*'*10, VIDEO.contrast)
|
print("*"*10, VIDEO.contrast)
|
||||||
return "nothing"
|
return "nothing"
|
||||||
|
|
||||||
@application.route('/request_contrast_up')
|
|
||||||
|
@application.route("/request_contrast_up")
|
||||||
def request_contrast_up():
|
def request_contrast_up():
|
||||||
VIDEO.contrast += 4
|
VIDEO.contrast += 4
|
||||||
print('*'*10, VIDEO.contrast)
|
print("*"*10, VIDEO.contrast)
|
||||||
return "nothing"
|
return "nothing"
|
||||||
|
|
||||||
@application.route('/reset_camera')
|
|
||||||
|
@application.route("/reset_camera")
|
||||||
def reset_camera():
|
def reset_camera():
|
||||||
STATUS = reset_settings()
|
STATUS = reset_settings()
|
||||||
print('*'*10, STATUS)
|
print("*"*10, STATUS)
|
||||||
return "nothing"
|
return "nothing"
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
application.run(debug=True)
|
application.run(debug=True)
|
||||||
|
|||||||
@ -1,48 +1,51 @@
|
|||||||
import os
|
import os
|
||||||
import cv2
|
import cv2
|
||||||
|
|
||||||
|
|
||||||
attrib_list = {
|
attrib_list = {
|
||||||
'exposure': cv2.CAP_PROP_EXPOSURE,
|
"exposure": cv2.CAP_PROP_EXPOSURE,
|
||||||
'contrast': cv2.CAP_PROP_CONTRAST
|
"contrast": cv2.CAP_PROP_CONTRAST
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def check_settings():
|
def check_settings():
|
||||||
VIDEO_CHECK = cv2.VideoCapture(0)
|
VIDEO_CHECK = cv2.VideoCapture(0)
|
||||||
|
|
||||||
if not os.path.exists('camera_settings.log'):
|
if not os.path.exists("camera_settings.log"):
|
||||||
f = open('camera_settings.log', 'w')
|
f = open("camera_settings.log", "w")
|
||||||
for attrib, index in attrib_list.items():
|
for attrib, index in attrib_list.items():
|
||||||
f.writelines(f'{attrib} = {VIDEO_CHECK.get(index)}\n')
|
f.writelines(f"{attrib} = {VIDEO_CHECK.get(index)}\n")
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
else:
|
else:
|
||||||
f = open('camera_settings.log', 'r')
|
f = open("camera_settings.log", "r")
|
||||||
lines = f.read().split('\n')
|
lines = f.read().split("\n")
|
||||||
for line in lines:
|
for line in lines:
|
||||||
attrib = line.split(' = ')
|
attrib = line.split(" = ")
|
||||||
if attrib[0] in attrib_list.keys():
|
if attrib[0] in attrib_list.keys():
|
||||||
VIDEO_CHECK.set(attrib_list[attrib[0]], eval(attrib[1]))
|
VIDEO_CHECK.set(attrib_list[attrib[0]], eval(attrib[1]))
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
print('*'*28)
|
print("*"*28)
|
||||||
print('* Checking camera settings *')
|
print("* Checking camera settings *")
|
||||||
print('*'*28)
|
print("*"*28)
|
||||||
for attrib, index in attrib_list.items():
|
for attrib, index in attrib_list.items():
|
||||||
print(f'{attrib} = {VIDEO_CHECK.get(index)}')
|
print(f"{attrib} = {VIDEO_CHECK.get(index)}")
|
||||||
|
|
||||||
VIDEO_CHECK.release()
|
VIDEO_CHECK.release()
|
||||||
|
|
||||||
|
|
||||||
def reset_settings():
|
def reset_settings():
|
||||||
if not os.path.exists('camera_settings.log'):
|
if not os.path.exists("camera_settings.log"):
|
||||||
print('"camera_settings.log" does not exist!')
|
print("'camera_settings.log' does not exist!")
|
||||||
print('Verify your camera settings!')
|
print("Verify your camera settings!")
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
VIDEO_CHECK = cv2.VideoCapture(0)
|
VIDEO_CHECK = cv2.VideoCapture(0)
|
||||||
f = open('camera_settings.log', 'r')
|
f = open("camera_settings.log", "r")
|
||||||
lines = f.read().split('\n')
|
lines = f.read().split("\n")
|
||||||
for line in lines:
|
for line in lines:
|
||||||
attrib = line.split(' = ')
|
attrib = line.split(" = ")
|
||||||
if attrib[0] in attrib_list.keys():
|
if attrib[0] in attrib_list.keys():
|
||||||
VIDEO_CHECK.set(attrib_list[attrib[0]], eval(attrib[1]))
|
VIDEO_CHECK.set(attrib_list[attrib[0]], eval(attrib[1]))
|
||||||
f.close()
|
f.close()
|
||||||
|
|||||||
@ -1,3 +1,4 @@
|
|||||||
|
import os
|
||||||
import time
|
import time
|
||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@ -5,27 +6,34 @@ import numpy as np
|
|||||||
|
|
||||||
class ObjectDetection:
|
class ObjectDetection:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
PROJECT_PATH = os.path.abspath(os.getcwd())
|
||||||
|
MODELS_PATH = os.path.join(PROJECT_PATH, "models")
|
||||||
|
|
||||||
self.MODEL = cv2.dnn.readNet(
|
self.MODEL = cv2.dnn.readNet(
|
||||||
'models/yolov3.weights',
|
os.path.join(MODELS_PATH, "yolov3.weights"),
|
||||||
'models/yolov3.cfg'
|
os.path.join(MODELS_PATH, "yolov3.cfg")
|
||||||
)
|
)
|
||||||
|
|
||||||
self.CLASSES = []
|
self.CLASSES = []
|
||||||
with open("models/coco.names", "r") as f:
|
with open(os.path.join(MODELS_PATH, "coco.names"), "r") as f:
|
||||||
self.CLASSES = [line.strip() for line in f.readlines()]
|
self.CLASSES = [line.strip() for line in f.readlines()]
|
||||||
|
|
||||||
self.OUTPUT_LAYERS = [self.MODEL.getLayerNames()[i[0] - 1] for i in self.MODEL.getUnconnectedOutLayers()]
|
self.OUTPUT_LAYERS = [
|
||||||
|
self.MODEL.getLayerNames()[i - 1] for i in self.MODEL.getUnconnectedOutLayers()
|
||||||
|
]
|
||||||
self.COLORS = np.random.uniform(0, 255, size=(len(self.CLASSES), 3))
|
self.COLORS = np.random.uniform(0, 255, size=(len(self.CLASSES), 3))
|
||||||
self.COLORS /= (np.sum(self.COLORS**2, axis=1)**0.5/255)[np.newaxis].T
|
self.COLORS /= (np.sum(self.COLORS**2, axis=1)**0.5/255)[np.newaxis].T
|
||||||
|
|
||||||
def detectObj(self, snap):
|
def detectObj(self, snap):
|
||||||
height, width, channels = snap.shape
|
height, width, channels = snap.shape
|
||||||
blob = cv2.dnn.blobFromImage(snap, 1/255, (416, 416), swapRB=True, crop=False)
|
blob = cv2.dnn.blobFromImage(
|
||||||
|
snap, 1/255, (416, 416), swapRB=True, crop=False
|
||||||
|
)
|
||||||
|
|
||||||
self.MODEL.setInput(blob)
|
self.MODEL.setInput(blob)
|
||||||
outs = self.MODEL.forward(self.OUTPUT_LAYERS)
|
outs = self.MODEL.forward(self.OUTPUT_LAYERS)
|
||||||
|
|
||||||
# Showing informations on the screen
|
# ! Showing informations on the screen
|
||||||
class_ids = []
|
class_ids = []
|
||||||
confidences = []
|
confidences = []
|
||||||
boxes = []
|
boxes = []
|
||||||
@ -35,13 +43,13 @@ class ObjectDetection:
|
|||||||
class_id = np.argmax(scores)
|
class_id = np.argmax(scores)
|
||||||
confidence = scores[class_id]
|
confidence = scores[class_id]
|
||||||
if confidence > 0.5:
|
if confidence > 0.5:
|
||||||
# Object detected
|
# * Object detected
|
||||||
center_x = int(detection[0]*width)
|
center_x = int(detection[0]*width)
|
||||||
center_y = int(detection[1]*height)
|
center_y = int(detection[1]*height)
|
||||||
w = int(detection[2]*width)
|
w = int(detection[2]*width)
|
||||||
h = int(detection[3]*height)
|
h = int(detection[3]*height)
|
||||||
|
|
||||||
# Rectangle coordinates
|
# * Rectangle coordinates
|
||||||
x = int(center_x - w/2)
|
x = int(center_x - w/2)
|
||||||
y = int(center_y - h/2)
|
y = int(center_y - h/2)
|
||||||
|
|
||||||
@ -97,7 +105,7 @@ class VideoStreaming(object):
|
|||||||
@detect.setter
|
@detect.setter
|
||||||
def detect(self, value):
|
def detect(self, value):
|
||||||
self._detect = bool(value)
|
self._detect = bool(value)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def exposure(self):
|
def exposure(self):
|
||||||
return self._exposure
|
return self._exposure
|
||||||
@ -106,7 +114,7 @@ class VideoStreaming(object):
|
|||||||
def exposure(self, value):
|
def exposure(self, value):
|
||||||
self._exposure = value
|
self._exposure = value
|
||||||
self.VIDEO.set(cv2.CAP_PROP_EXPOSURE, self._exposure)
|
self.VIDEO.set(cv2.CAP_PROP_EXPOSURE, self._exposure)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def contrast(self):
|
def contrast(self):
|
||||||
return self._contrast
|
return self._contrast
|
||||||
@ -121,7 +129,7 @@ class VideoStreaming(object):
|
|||||||
ret, snap = self.VIDEO.read()
|
ret, snap = self.VIDEO.read()
|
||||||
if self.flipH:
|
if self.flipH:
|
||||||
snap = cv2.flip(snap, 1)
|
snap = cv2.flip(snap, 1)
|
||||||
|
|
||||||
if ret == True:
|
if ret == True:
|
||||||
if self._preview:
|
if self._preview:
|
||||||
# snap = cv2.resize(snap, (0, 0), fx=0.5, fy=0.5)
|
# snap = cv2.resize(snap, (0, 0), fx=0.5, fy=0.5)
|
||||||
@ -133,16 +141,17 @@ class VideoStreaming(object):
|
|||||||
int(self.VIDEO.get(cv2.CAP_PROP_FRAME_HEIGHT)),
|
int(self.VIDEO.get(cv2.CAP_PROP_FRAME_HEIGHT)),
|
||||||
int(self.VIDEO.get(cv2.CAP_PROP_FRAME_WIDTH))
|
int(self.VIDEO.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||||
), np.uint8)
|
), np.uint8)
|
||||||
label = 'camera disabled'
|
label = "camera disabled"
|
||||||
H, W = snap.shape
|
H, W = snap.shape
|
||||||
font = cv2.FONT_HERSHEY_PLAIN
|
font = cv2.FONT_HERSHEY_PLAIN
|
||||||
color = (255,255,255)
|
color = (255, 255, 255)
|
||||||
cv2.putText(snap, label, (W//2 - 100, H//2), font, 2, color, 2)
|
cv2.putText(snap, label, (W//2 - 100, H//2),
|
||||||
|
font, 2, color, 2)
|
||||||
frame = cv2.imencode('.jpg', snap)[1].tobytes()
|
|
||||||
|
frame = cv2.imencode(".jpg", snap)[1].tobytes()
|
||||||
yield (b'--frame\r\n'b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
|
yield (b'--frame\r\n'b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
|
||||||
time.sleep(0.01)
|
time.sleep(0.01)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
print('off')
|
print("off")
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
// Functions to deal with button events
|
// ! Functions that deal with button events
|
||||||
$(function () {
|
$(function () {
|
||||||
// Preview switch
|
// * Preview switch
|
||||||
$("a#cam-preview").bind("click", function () {
|
$("a#cam-preview").bind("click", function () {
|
||||||
$.getJSON("/request_preview_switch", function (data) {
|
$.getJSON("/request_preview_switch", function (data) {
|
||||||
// do nothing
|
// do nothing
|
||||||
@ -10,7 +10,7 @@ $(function () {
|
|||||||
});
|
});
|
||||||
|
|
||||||
$(function () {
|
$(function () {
|
||||||
// Flip horizontal switch
|
// * Flip horizontal switch
|
||||||
$("a#flip-horizontal").bind("click", function () {
|
$("a#flip-horizontal").bind("click", function () {
|
||||||
$.getJSON("/request_flipH_switch", function (data) {
|
$.getJSON("/request_flipH_switch", function (data) {
|
||||||
// do nothing
|
// do nothing
|
||||||
@ -20,7 +20,7 @@ $(function () {
|
|||||||
});
|
});
|
||||||
|
|
||||||
$(function () {
|
$(function () {
|
||||||
// Model switch
|
// * Model switch
|
||||||
$("a#use-model").bind("click", function () {
|
$("a#use-model").bind("click", function () {
|
||||||
$.getJSON("/request_model_switch", function (data) {
|
$.getJSON("/request_model_switch", function (data) {
|
||||||
// do nothing
|
// do nothing
|
||||||
@ -30,7 +30,7 @@ $(function () {
|
|||||||
});
|
});
|
||||||
|
|
||||||
$(function () {
|
$(function () {
|
||||||
// exposure down
|
// * exposure down
|
||||||
$("a#exposure-down").bind("click", function () {
|
$("a#exposure-down").bind("click", function () {
|
||||||
$.getJSON("/request_exposure_down", function (data) {
|
$.getJSON("/request_exposure_down", function (data) {
|
||||||
// do nothing
|
// do nothing
|
||||||
@ -40,7 +40,7 @@ $(function () {
|
|||||||
});
|
});
|
||||||
|
|
||||||
$(function () {
|
$(function () {
|
||||||
// exposure up
|
// * exposure up
|
||||||
$("a#exposure-up").bind("click", function () {
|
$("a#exposure-up").bind("click", function () {
|
||||||
$.getJSON("/request_exposure_up", function (data) {
|
$.getJSON("/request_exposure_up", function (data) {
|
||||||
// do nothing
|
// do nothing
|
||||||
@ -50,7 +50,7 @@ $(function () {
|
|||||||
});
|
});
|
||||||
|
|
||||||
$(function () {
|
$(function () {
|
||||||
// contrast down
|
// * contrast down
|
||||||
$("a#contrast-down").bind("click", function () {
|
$("a#contrast-down").bind("click", function () {
|
||||||
$.getJSON("/request_contrast_down", function (data) {
|
$.getJSON("/request_contrast_down", function (data) {
|
||||||
// do nothing
|
// do nothing
|
||||||
@ -60,7 +60,7 @@ $(function () {
|
|||||||
});
|
});
|
||||||
|
|
||||||
$(function () {
|
$(function () {
|
||||||
// contrast up
|
// * contrast up
|
||||||
$("a#contrast-up").bind("click", function () {
|
$("a#contrast-up").bind("click", function () {
|
||||||
$.getJSON("/request_contrast_up", function (data) {
|
$.getJSON("/request_contrast_up", function (data) {
|
||||||
// do nothing
|
// do nothing
|
||||||
@ -70,7 +70,7 @@ $(function () {
|
|||||||
});
|
});
|
||||||
|
|
||||||
$(function () {
|
$(function () {
|
||||||
// reset camera
|
// * reset camera
|
||||||
$("a#reset-cam").bind("click", function () {
|
$("a#reset-cam").bind("click", function () {
|
||||||
$.getJSON("/reset_camera", function (data) {
|
$.getJSON("/reset_camera", function (data) {
|
||||||
// do nothing
|
// do nothing
|
||||||
|
|||||||
@ -1,40 +1,40 @@
|
|||||||
/* * Reset all elements */
|
/* * Reset all elements */
|
||||||
* {
|
* {
|
||||||
margin: 0;
|
margin: 0;
|
||||||
padding: 0;
|
padding: 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* * HTML elements */
|
/* * HTML elements */
|
||||||
body {
|
body {
|
||||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif;
|
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif;
|
||||||
font-size: 18px;
|
font-size: 18px;
|
||||||
font-weight: normal;
|
font-weight: normal;
|
||||||
line-height: 1.5em;
|
line-height: 1.5em;
|
||||||
width: 800px;
|
width: 800px;
|
||||||
margin: auto;
|
margin: auto;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* * Local selectors */
|
/* * Local selectors */
|
||||||
#container {
|
#container {
|
||||||
margin: 0px auto;
|
margin: 0px auto;
|
||||||
margin-top: 40px;
|
margin-top: 40px;
|
||||||
width: 100%;
|
width: 100%;
|
||||||
height: 450px;
|
height: 450px;
|
||||||
border: 10px #333 solid;
|
border: 10px #333 solid;
|
||||||
background-color: black;
|
background-color: black;
|
||||||
}
|
}
|
||||||
|
|
||||||
#container #videoElement {
|
#container #videoElement {
|
||||||
width: auto;
|
width: auto;
|
||||||
height: 100%;
|
height: 100%;
|
||||||
margin-left: auto;
|
margin-left: auto;
|
||||||
margin-right: auto;
|
margin-right: auto;
|
||||||
display: block;
|
display: block;
|
||||||
background-color: black;
|
background-color: black;
|
||||||
}
|
}
|
||||||
|
|
||||||
#control {
|
#control {
|
||||||
margin-top: 40px;
|
margin-top: 40px;
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user