Commit 39af5ece authored by YONG-LIN SU's avatar YONG-LIN SU

Initial commit

parents
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "connected-studio",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np\n",
"from mask_detector import FaceMaskDetector"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "strategic-degree",
"metadata": {},
"outputs": [],
"source": [
"detector = FaceMaskDetector( model_path='/home/allen/RetinaFaceMask/mnn_models/RetinaFaceMask_mobileNet0.25.mnn',input_size=(640, 640))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "concrete-configuration",
"metadata": {},
"outputs": [],
"source": [
"img = cv2.imread('/home/allen/Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/imgs/1.jpg')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "specified-center",
"metadata": {},
"outputs": [],
"source": [
"detector.predict(img)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "disabled-villa",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.6"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
This diff is collapsed.
This diff is collapsed.
#!/usr/bin/env python
# coding: utf-8
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import Ui_MainWindow as ui
from uvc_thermal import Thermal
from camera import Camera
from detector import FaceDetector
from anti import AntiSpoofing
from heater import Heater
from aligner import BoxAligner
from correcter import Correcter
import cv2
import numpy as np
import time
class Main(QMainWindow, ui.Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.resize(640,480)
self.label.setScaledContents(True)
self.label_2.setScaledContents(True)
self.thermal = Thermal(width=160, height=120, framerate=9, frame_width=160, frame_height=120, log=None)
self.camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=1)
self.detector = FaceDetector(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/version-RFB/RFB-320.mnn" , input_size=(320,240))
# self.anti = AntiSpoofing(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/4_0_0_80x80_MiniFASNetV1SE.mnn")
self.aligner = BoxAligner(img_h = 480,img_w = 640)
self.heater = Heater(pwm_pin=12 , target_temp=35.0)
self.correcter = Correcter(model_path="../thermal-tk/20210421_correcter.sav")
self.thermal.start()
time.sleep(2)
self.camera.start()
time.sleep(2)
self.heater.start()
time.sleep(2)
self.num_frame = 0
self.painter = QPainter(self)
def paintEvent(self, QPaintEvent):
# 繪圖事件
ret, frame = self.camera.capture()
thermal_ret,thermal_frame, thermal_row = self.thermal.capture()
if(thermal_ret and ret):
self.num_frame+=1
blackbody_max = thermal_row[:,:34].max()
heater_temp = self.heater.last_temp
# 偵測人臉
# if(self.num_frame % 2 == 0):
if(True):
stime = time.time()
boxes, labels, probs = self.detector.predict(frame)
processing_time = time.time()-stime
self.label_3.setText(str(processing_time))
# 取出所有偵測的結果
for i in range(boxes.shape[0]):
box = boxes[i, :]
score = probs[i]
if(score > 0.9):
#scaled_box = anti.scale_box(img_h=480,img_w=640,box=box)
#expanded_face = frame[scaled_box[1]:scaled_box[3],scaled_box[0]:scaled_box[2]].copy()
#live_score = anti.predict(expanded_face)
live_score = 0.9
if(live_score > 0.9):
color = (0, 255, 0)
else:
color = ( 0, 0, 255)
thermal_box = self.aligner.box_aligment([box])[0]
cv2.rectangle(thermal_frame, (thermal_box[0]//4, thermal_box[1]//4), (thermal_box[2]//4, thermal_box[3]//4), color, 2)
cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), color, 2)
face_temp = self.max_temperature(thermal_box,thermal_row,black_h=32)
corrected_face_temp = self.correcter.predict(x_detect_k= face_temp, x_black_k= blackbody_max, x_black_c= heater_temp)
cv2.putText(frame, "{:.2f}->{:.2f}".format(face_temp*0.0092,corrected_face_temp), ((box[0]+box[2])//2, (box[1]+box[3])//2), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 255, 255), 1, cv2.LINE_AA)
cv2.putText(thermal_frame, "{:.2f}".format(heater_temp), (320//4, 64//4), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 0, 0), 1, cv2.LINE_AA)
# 顯示至UI
frame=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
thermal_frame=cv2.cvtColor(thermal_frame,cv2.COLOR_BGR2RGB)
Qframe=QImage(frame.data,frame.shape[1],frame.shape[0],frame.shape[1]*3,QImage.Format_RGB888)
Qthermal_frame=QImage(thermal_frame.data,thermal_frame.shape[1],thermal_frame.shape[0],thermal_frame.shape[1]*3,QImage.Format_RGB888)
pixMap=QPixmap.fromImage(Qframe)
thermal_pixMap=QPixmap.fromImage(Qthermal_frame)
self.label.setPixmap(pixMap)
self.label_2.setPixmap(thermal_pixMap)
self.update()
def closeEvent(self,event):
# 關閉程式後執行
self.thermal.stop()
self.camera.stop()
self.heater.stop()
def max_temperature(self,box,thermal_row,black_h=0,thermal_height=120,thermal_width=160,image_height=480,image_width=640):
scale_ratio_h = thermal_height/image_height
scale_ratio_w = thermal_width/image_width
x1 = max(int(box[0]*scale_ratio_w),0)
y1 = max(int(box[1]*scale_ratio_h),black_h)
x2 = min(int(box[2]*scale_ratio_w),thermal_width)
y2 = min(int(box[3]*scale_ratio_h),thermal_height)
box_temperature = thermal_row[y1:y2,x1:x2]
if(box_temperature.size != 0):
return box_temperature.max()
else:
return 0
if __name__ == '__main__':
import sys
import os
app = QtWidgets.QApplication(sys.argv)
window = Main()
window.show()
status = app.exec_()
time.sleep(1)
# sys.exit(app.exec_())
os._exit(status)
#!/usr/bin/env python
# coding: utf-8
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
# import Ui_MainWindow as ui
import ui_test as ui
from uvc_thermal import Thermal
from camera import Camera
from detector import FaceDetector
# from mask_detector import FaceMaskDetector
from anti import AntiSpoofing
from heater import Heater
from aligner import BoxAligner
from correcter import Correcter
from osd import OSD
import cv2
import numpy as np
import time
class Main(QMainWindow, ui.Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.resize(640,480)
self.rgb_frame.setScaledContents(True)
self.thermal_frame.setScaledContents(True)
self.thermal = Thermal(width=160, height=120, framerate=9, frame_width=160, frame_height=120, log=None)
self.camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=1)
self.detector = FaceDetector(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/version-RFB/RFB-320.mnn" , input_size=(320,240))
# self.detector = FaceMaskDetector(model_path="/home/allen/RetinaFaceMask/mnn_models/RetinaFaceMask_mobileNet0.25.mnn" , input_size=(640,640))
# self.anti = AntiSpoofing(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/4_0_0_80x80_MiniFASNetV1SE.mnn")
self.aligner = BoxAligner(img_h = 480,img_w = 640)
self.heater = Heater(pwm_pin=12 , target_temp=35.0)
self.correcter = Correcter(model_path="../thermal-tk/20210421_correcter.sav")
self.osd = OSD(local='Asia/Taipei', str_format='%Y-%m-%d %H:%M:%S')
self.thermal.start()
time.sleep(2)
self.camera.start()
time.sleep(2)
self.heater.start()
time.sleep(2)
self.osd.start()
time.sleep(2)
self.faces = [self.face1,self.face2,self.face3]
self.temps = [self.temp1,self.temp2,self.temp3]
self.times = [self.time1,self.time2,self.time3]
self.black_line = 34
self.timer = QTimer(self)
self.timer.timeout.connect(self.capture)
self.timer.start(10)
# self.painter = QPainter(self)
def capture(self):
ret, frame = self.camera.capture()
thermal_ret,thermal_frame, thermal_row = self.thermal.capture()
if(thermal_ret and ret):
blackbody_max = thermal_row[:,:self.black_line].max()
heater_temp = self.heater.last_temp
# 偵測人臉
str_time = self.osd.str_time
boxes, labels, probs = self.detector.predict(frame)
# 取出所有偵測的結果
frame=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
thermal_frame=cv2.cvtColor(thermal_frame,cv2.COLOR_BGR2RGB)
for i in range(boxes.shape[0]):
box = boxes[i, :]
score = probs[i]
if(score > 0.9):
# color = (255, 0, 0)
thermal_box = self.aligner.box_aligment([box])[0]
face_frame = frame[box[1]:box[3],box[0]:box[2]].copy()
Qface_frame=QImage(face_frame.data,face_frame.shape[1],face_frame.shape[0],face_frame.shape[1]*3,QImage.Format_RGB888)
face_pixMap=QPixmap.fromImage(Qface_frame)
face_temp = self.max_temperature(thermal_box,thermal_row,black_h=self.black_line)
corrected_face_temp = self.correcter.predict(x_detect_k= face_temp, x_black_k= blackbody_max, x_black_c= heater_temp)
if(corrected_face_temp > 37.0):
color = (255, 0, 0)
elif(corrected_face_temp < 35.0):
color = (255, 255, 0)
else:
color = (0, 255, 0)
cv2.rectangle(thermal_frame, (thermal_box[0]//4, thermal_box[1]//4), (thermal_box[2]//4, thermal_box[3]//4), color, 2)
cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), color, 2)
cv2.putText(frame, "{:.2f}->{:.2f}".format(face_temp*0.0092,corrected_face_temp), ((box[0]+box[2])//2, (box[1]+box[3])//2), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, color, 1, cv2.LINE_AA)
cv2.putText(thermal_frame, "{:.2f}".format(heater_temp), (320//4, 64//4), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 0, 0), 1, cv2.LINE_AA)
j = i%len(self.faces)
self.faces[j].setPixmap(face_pixMap)
self.temps[j].setStyleSheet("color: rgb{};".format(color))
self.temps[j].setText("{:.2f}".format(corrected_face_temp))
self.times[j].setText(str_time)
# 顯示至UI
# frame=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
# thermal_frame=cv2.cvtColor(thermal_frame,cv2.COLOR_BGR2RGB)
# 繪製黑體底部範圍線
Qframe=QImage(frame.data,frame.shape[1],frame.shape[0],frame.shape[1]*3,QImage.Format_RGB888)
Qthermal_frame=QImage(thermal_frame.data,thermal_frame.shape[1],thermal_frame.shape[0],thermal_frame.shape[1]*3,QImage.Format_RGB888)
pixMap=QPixmap.fromImage(Qframe)
thermal_pixMap=QPixmap.fromImage(Qthermal_frame)
qp = QPainter(thermal_pixMap)
pen = QPen(Qt.red, 3)
qp.setPen(pen)
qp.drawLine(0, self.black_line, 160, self.black_line)
qp.end()
self.rgb_frame.setPixmap(pixMap)
self.thermal_frame.setPixmap(thermal_pixMap)
self.update()
def closeEvent(self,event):
# 關閉程式後執行
self.thermal.stop()
self.camera.stop()
self.heater.stop()
self.osd.stop()
def max_temperature(self,box,thermal_row,black_h=0,thermal_height=120,thermal_width=160,image_height=480,image_width=640):
scale_ratio_h = thermal_height/image_height
scale_ratio_w = thermal_width/image_width
x1 = max(int(box[0]*scale_ratio_w),0)
y1 = max(int(box[1]*scale_ratio_h),black_h)
x2 = min(int(box[2]*scale_ratio_w),thermal_width)
y2 = min(int(box[3]*scale_ratio_h),thermal_height)
box_temperature = thermal_row[y1:y2,x1:x2]
if(box_temperature.size != 0):
return box_temperature.max()
else:
return 0
if __name__ == '__main__':
import sys
import os
app = QtWidgets.QApplication(sys.argv)
window = Main()
window.show()
# sys.exit(app.exec_())
status = app.exec_()
os._exit(status)
#!/usr/bin/env python
# coding: utf-8
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
# import Ui_MainWindow as ui
import ui_test as ui
from uvc_thermal import Thermal
from camera import Camera
from detector import FaceDetector
# from mask_detector import FaceMaskDetector
from anti import AntiSpoofing
from heater import Heater
from aligner import BoxAligner
from correcter import Correcter
from osd import OSD
import cv2
import numpy as np
import time
class Main(QMainWindow, ui.Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.resize(640,480)
self.rgb_frame.setScaledContents(True)
self.thermal_frame.setScaledContents(True)
self.thermal = Thermal(width=160, height=120, framerate=9, frame_width=160, frame_height=120, log=None)
self.camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=1)
self.detector = FaceDetector(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/version-RFB/RFB-320.mnn" , input_size=(320,240))
# self.detector = FaceMaskDetector(model_path="/home/allen/RetinaFaceMask/mnn_models/RetinaFaceMask_mobileNet0.25.mnn" , input_size=(640,640))
# self.anti = AntiSpoofing(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/4_0_0_80x80_MiniFASNetV1SE.mnn")
self.aligner = BoxAligner(img_h = 480,img_w = 640)
self.heater = Heater(pwm_pin=12 , target_temp=35.0)
self.correcter = Correcter(model_path="../thermal-tk/20210421_correcter.sav")
self.osd = OSD(local='Asia/Taipei', str_format='%Y-%m-%d %H:%M:%S')
self.thermal.start()
time.sleep(2)
self.camera.start()
time.sleep(2)
self.heater.start()
time.sleep(2)
self.osd.start()
time.sleep(2)
self.faces = [self.face1,self.face2,self.face3]
self.temps = [self.temp1,self.temp2,self.temp3]
self.times = [self.time1,self.time2,self.time3]
self.black_line = 34
self.timer = QTimer(self)
self.timer.timeout.connect(self.capture)
self.timer.start(10)
# self.painter = QPainter(self)
def capture(self):
ret, frame = self.camera.capture()
thermal_ret,thermal_frame, thermal_row = self.thermal.capture()
if(thermal_ret and ret):
blackbody_max = thermal_row[:,:self.black_line].max()
heater_temp = self.heater.last_temp
# 偵測人臉
str_time = self.osd.str_time
boxes, labels, probs = self.detector.predict(frame)
# 顯示至UI
frame=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
thermal_frame=cv2.cvtColor(thermal_frame,cv2.COLOR_BGR2RGB)
Qframe=QImage(frame.data,frame.shape[1],frame.shape[0],frame.shape[1]*3,QImage.Format_RGB888)
Qthermal_frame=QImage(thermal_frame.data,thermal_frame.shape[1],thermal_frame.shape[0],thermal_frame.shape[1]*3,QImage.Format_RGB888)
pixMap=QPixmap.fromImage(Qframe)
thermal_pixMap=QPixmap.fromImage(Qthermal_frame)
# 建立 Painter 繪製物件
qp = QPainter(pixMap)
thermal_qp = QPainter(thermal_pixMap)
# 取出所有偵測的結果 依序繪製物件
for i in range(boxes.shape[0]):
box = boxes[i, :]
score = probs[i]
if(score > 0.9):
thermal_box = self.aligner.box_aligment([box])[0]
face_frame = frame[box[1]:box[3],box[0]:box[2]].copy()
face_temp = self.max_temperature(thermal_box,thermal_row,black_h=self.black_line)
corrected_face_temp = self.correcter.predict(x_detect_k= face_temp, x_black_k= blackbody_max, x_black_c= heater_temp)
face_rect = QRect(box[0], box[1], box[2]-box[0], box[3]-box[1])
thermal_rect = QRect(thermal_box[0]//4, thermal_box[1]//4, (thermal_box[2]//4 - thermal_box[0]//4), (thermal_box[3]//4 - thermal_box[1]//4))
# 判別溫度範圍
if(corrected_face_temp > 37.0):
pen = QPen(Qt.red, 3)
color = (255, 0, 0)
elif(corrected_face_temp < 35.0):
pen = QPen(Qt.yellow, 3)
color = (255, 255, 0)
else:
pen = QPen(Qt.green, 3)
color = (0, 255, 0)
# 顯示人臉小圖
j = i%len(self.faces)
face_crop = pixMap.copy(face_rect)
self.faces[j].setPixmap(face_crop)
self.temps[j].setStyleSheet("color: rgb{};".format(color))
self.temps[j].setText("{:.2f}".format(corrected_face_temp))
self.times[j].setText(str_time)
# 設定繪製顏色
qp.setPen(pen)
thermal_qp.setPen(pen)
# 繪製至QPixmap
qp.drawRect(face_rect)
qp.drawText(face_rect, Qt.AlignCenter , "{:.2f}".format(corrected_face_temp))
thermal_qp.drawRect(thermal_rect)
# 繪製黑體範圍底線
thermal_qp.setPen(QPen(Qt.red, 3))
thermal_qp.drawLine(0, self.black_line, 160, self.black_line)
thermal_qp.setPen(QPen(Qt.black, 3))
thermal_qp.drawText(QRect(0,0,160,self.black_line), Qt.AlignCenter , "{:.2f}".format(heater_temp))
# 繪製 OSD
qp.setPen(QPen(Qt.black, 3))
qp.drawText(QRect(0,0,640,20),Qt.AlignLeft,str_time)
# 釋放painter
qp.end()
thermal_qp.end()
# 設置至label frame
self.rgb_frame.setPixmap(pixMap)
self.thermal_frame.setPixmap(thermal_pixMap)
self.update()
def closeEvent(self,event):
# 關閉程式後執行
self.thermal.stop()
self.camera.stop()
self.heater.stop()
self.osd.stop()
def max_temperature(self,box,thermal_row,black_h=0,thermal_height=120,thermal_width=160,image_height=480,image_width=640):
scale_ratio_h = thermal_height/image_height
scale_ratio_w = thermal_width/image_width
x1 = max(int(box[0]*scale_ratio_w),0)
y1 = max(int(box[1]*scale_ratio_h),black_h)
x2 = min(int(box[2]*scale_ratio_w),thermal_width)
y2 = min(int(box[3]*scale_ratio_h),thermal_height)
box_temperature = thermal_row[y1:y2,x1:x2]
if(box_temperature.size != 0):
return box_temperature.max()
else:
return 0
if __name__ == '__main__':
import sys
import os
app = QtWidgets.QApplication(sys.argv)
window = Main()
window.show()
# sys.exit(app.exec_())
status = app.exec_()
os._exit(status)
#!/usr/bin/env python
# coding: utf-8
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
# import Ui_MainWindow as ui
import ui_test as ui
from uvc_thermal import Thermal
from camera import Camera
# from detector import FaceDetector
from mask_detector import FaceMaskDetector
from anti import AntiSpoofing
from heater import Heater
from aligner import BoxAligner
from correcter import Correcter
from osd import OSD
import cv2
import numpy as np
import time
class Main(QMainWindow, ui.Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.resize(640,480)
self.rgb_frame.setScaledContents(True)
self.thermal_frame.setScaledContents(True)
self.thermal = Thermal(width=160, height=120, framerate=9, frame_width=160, frame_height=120, log=None)
self.camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=1)
# self.detector = FaceDetector(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/version-RFB/RFB-320.mnn" , input_size=(320,240))
self.detector = FaceMaskDetector(model_path="/home/allen/RetinaFaceMask/mnn_models/RetinaFaceMask_mobileNet0.25.mnn" , input_size=(640,640))
# self.anti = AntiSpoofing(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/4_0_0_80x80_MiniFASNetV1SE.mnn")
self.aligner = BoxAligner(img_h = 480,img_w = 640)
self.heater = Heater(pwm_pin=12 , target_temp=35.0)
self.correcter = Correcter(model_path="../thermal-tk/20210421_correcter.sav")
self.osd = OSD(local='Asia/Taipei', str_format='%Y-%m-%d %H:%M:%S')
self.thermal.start()
time.sleep(2)
self.camera.start()
time.sleep(2)
self.heater.start()
time.sleep(2)
self.osd.start()
time.sleep(2)
self.faces = []
self.timer = QTimer(self)
self.timer.timeout.connect(self.capture)
self.timer.start(10)
# self.painter = QPainter(self)
def capture(self):
ret, frame = self.camera.capture()
thermal_ret,thermal_frame, thermal_row = self.thermal.capture()
if(thermal_ret and ret):
blackbody_max = thermal_row[:,:34].max()
heater_temp = self.heater.last_temp
# 偵測人臉
str_time = self.osd.str_time
boxes, labels, probs, mask_probs = self.detector.predict(frame)
# 取出所有偵測的結果
frame=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
thermal_frame=cv2.cvtColor(thermal_frame,cv2.COLOR_BGR2RGB)
for i in range(boxes.shape[0]):
box = boxes[i, :]
score = probs[i]
mask_score = mask_probs[i]
if(score > 0.9):
# color = (255, 0, 0)
thermal_box = self.aligner.box_aligment([box])[0]
face_frame = frame[box[1]:box[3],box[0]:box[2]].copy()
Qface_frame=QImage(face_frame.data,face_frame.shape[1],face_frame.shape[0],face_frame.shape[1]*3,QImage.Format_RGB888)
face_pixMap=QPixmap.fromImage(Qface_frame)
face_temp = self.max_temperature(thermal_box,thermal_row,black_h=32)
corrected_face_temp = self.correcter.predict(x_detect_k= face_temp, x_black_k= blackbody_max, x_black_c= heater_temp)
if(mask_score > 0.7):
bbox_color = (0, 255, 0)
else:
bbox_color = (255, 0, 0)
if(corrected_face_temp > 37.0):
color = (255, 0, 0)
elif(corrected_face_temp < 35.0):
color = (255, 255, 0)
else:
color = (0, 255, 0)
cv2.rectangle(thermal_frame, (thermal_box[0]//4, thermal_box[1]//4), (thermal_box[2]//4, thermal_box[3]//4), bbox_color, 2)
cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), bbox_color, 2)
cv2.putText(frame, "{:.2f}->{:.2f}".format(face_temp*0.0092,corrected_face_temp), ((box[0]+box[2])//2, (box[1]+box[3])//2), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, color, 1, cv2.LINE_AA)
cv2.putText(thermal_frame, "{:.2f}".format(heater_temp), (320//4, 64//4), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 0, 0), 1, cv2.LINE_AA)
self.face1.setPixmap(face_pixMap)
self.temp1.setStyleSheet("color: rgb{};".format(color))
self.temp1.setText("{:.2f}".format(corrected_face_temp))
self.time1.setText(str_time)
# 顯示至UI
# frame=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
# thermal_frame=cv2.cvtColor(thermal_frame,cv2.COLOR_BGR2RGB)
Qframe=QImage(frame.data,frame.shape[1],frame.shape[0],frame.shape[1]*3,QImage.Format_RGB888)
Qthermal_frame=QImage(thermal_frame.data,thermal_frame.shape[1],thermal_frame.shape[0],thermal_frame.shape[1]*3,QImage.Format_RGB888)
pixMap=QPixmap.fromImage(Qframe)
thermal_pixMap=QPixmap.fromImage(Qthermal_frame)
self.rgb_frame.setPixmap(pixMap)
self.thermal_frame.setPixmap(thermal_pixMap)
self.update()
def closeEvent(self,event):
# 關閉程式後執行
self.thermal.stop()
self.camera.stop()
self.heater.stop()
self.osd.stop()
def max_temperature(self,box,thermal_row,black_h=0,thermal_height=120,thermal_width=160,image_height=480,image_width=640):
scale_ratio_h = thermal_height/image_height
scale_ratio_w = thermal_width/image_width
x1 = max(int(box[0]*scale_ratio_w),0)
y1 = max(int(box[1]*scale_ratio_h),black_h)
x2 = min(int(box[2]*scale_ratio_w),thermal_width)
y2 = min(int(box[3]*scale_ratio_h),thermal_height)
box_temperature = thermal_row[y1:y2,x1:x2]
if(box_temperature.size != 0):
return box_temperature.max()
else:
return 0
if __name__ == '__main__':
import sys
import os
app = QtWidgets.QApplication(sys.argv)
window = Main()
window.show()
# sys.exit(app.exec_())
status = app.exec_()
os._exit(status)
# 設定uvc thermal權限 (暫時做法)
sudo chmod -R 777 /dev/bus/usb/*
sudo chmod 777 /dev/video*
sudo chmod 777 /dev/spidev0.*
sudo chmod 777 /dev/gpio*
\ No newline at end of file
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'untitled.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow,window_size=(640,480)):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(window_size[0],window_size[1])
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.label.setToolTipDuration(0)
self.label.setStyleSheet("background-color: rgb(170, 0, 0);")
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setStyleSheet("background-color: rgb(170, 255, 0);")
self.label_2.setObjectName("label_2")
self.verticalLayout.addWidget(self.label_2)
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setObjectName("label_3")
self.verticalLayout.addWidget(self.label_3)
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setStyleSheet("background-color: rgb(170, 255, 255);")
self.label_4.setObjectName("label_4")
self.verticalLayout.addWidget(self.label_4)
self.label_5 = QtWidgets.QLabel(self.centralwidget)
self.label_5.setObjectName("label_5")
self.verticalLayout.addWidget(self.label_5)
self.horizontalLayout.addLayout(self.verticalLayout)
self.horizontalLayout.setStretch(0, 4)
self.horizontalLayout.setStretch(1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1094, 21))
self.menubar.setObjectName("menubar")
self.menu = QtWidgets.QMenu(self.menubar)
self.menu.setObjectName("menu")
MainWindow.setMenuBar(self.menubar)
self.menubar.addAction(self.menu.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "rgb theraml"))
self.label_2.setText(_translate("MainWindow", "theraml frame"))
self.label_3.setText(_translate("MainWindow", "TextLabel"))
self.label_4.setText(_translate("MainWindow", "TextLabel"))
self.label_5.setText(_translate("MainWindow", "TextLabel"))
self.menu.setTitle(_translate("MainWindow", "選單"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
import cv2
import numpy as np
class BoxAligner:
def __init__(self,img_h,img_w):
self.img_h = img_h
self.img_w = img_w
self.M = np.array([[ 8.13802980e-01, -2.63523694e-02, 9.30324875e+01],
[ 2.10292692e-02, 7.84319221e-01, 7.70246127e+01],
[ 1.48500793e-04, -1.53618915e-04, 1.00000000e+00]])
def box_aligment(self,boxes):
aliged_boxes = []
for b in boxes:
x1,y1,w1 = np.dot(self.M,[b[0],b[1],1])
x2,y2,w2 = np.dot(self.M,[b[2],b[3],1])
x1 = max(int(x1),0)
y1 = max(int(y1),0)
x2 = min(int(x2),self.img_w)
y2 = min(int(y2),self.img_h)
aliged_boxes.append([x1,y1,x2,y2])
return aliged_boxes
def get_omography_matrix(self,rgb_points, thermal_points):
h, mask = cv2.findHomography(rgb_points, thermal_points, cv2.RANSAC)
self.M = h
\ No newline at end of file
import math
import MNN
import cv2
import numpy as np
class AntiSpoofing:
def __init__(self,model_path="../model/4_0_0_80x80_MiniFASNetV1SE.mnn"):
self.interpreter = MNN.Interpreter(model_path)
self.session = self.interpreter.createSession({'numThread':4})
self.input_tensor = self.interpreter.getSessionInput(self.session)
def predict(self,image):
image = cv2.resize(image, (80,80))
image = image.transpose((2, 0, 1))
image = image.astype(np.float32)
tmp_input = MNN.Tensor((1, 3, 80,80), MNN.Halide_Type_Float, image, MNN.Tensor_DimensionType_Caffe)
self.input_tensor.copyFrom(tmp_input)
self.interpreter.runSession(self.session)
isLive = self.interpreter.getSessionOutput(self.session, "Reshape176").getData()
isLive = self.softmax_py(isLive)
return isLive[1]
def softmax_py(self,logits_data):
logits_exp = [math.exp(i) for i in logits_data]
sum_logits_exp = sum(logits_exp)
softmax = [round(i/sum_logits_exp,3) for i in logits_exp]
return softmax
def scale_box(self,img_h,img_w,box,scale=2):
x1,y1,x2,y2 = box
new_x1 = max(scale*x1 - x2,0)
new_y1 = max(scale*y1 - y2,0)
new_x2 = min(scale*x2 - x1,img_w)
new_y2 = min(scale*y2 - y1,img_h)
return [new_x1,new_y1,new_x2,new_y2]
\ No newline at end of file
from threading import Thread
import time, numpy as np, cv2
class Camera(object):
def __init__(self, width, height, framerate, log=None,channel=0,flip=None):
self.__log = self.__log if log is None else log
self.__isCaptured = False
self.__frame = None
self.isOpened = False
self.width = width
self.height = height
self.framerate = framerate
self.channel = channel
self.flip = flip
self.__thread = Thread(target=self.__job)
def start(self):
self.__isCaptured = False
self.__frame = None
self.isOpened = True
self.__thread.start()
def stop(self):
self.__isCaptured = False
self.__frame = None
self.isOpened = False
def capture(self):
return self.__isCaptured, self.__frame
def __job(self):
w = self.width
h = self.height
fps = self.framerate
dev = cv2.VideoCapture(self.channel,cv2.CAP_V4L2)
while self.isOpened:
if dev.isOpened():
ret, frame = dev.read()
if not ret:
break
if(self.flip is not None):
self.__frame = cv2.flip(frame, self.flip)
else:
self.__frame = frame, 1
self.__isCaptured = ret
time.sleep(1 / fps)
dev.release()
self.__isCaptured = False
self.__frame = None
self.__log("camera stop")
def __gstreamer(self, width, height, framerate, flip_method=2):
return (
f'nvarguscamerasrc ! video/x-raw(memory:NVMM), ' +
f'width=(int){width}, height=(int){height}, ' +
f'format=(string)NV12, framerate=(fraction){framerate}/1 ! ' +
f'nvvidconv flip-method={flip_method} ! ' +
f'video/x-raw, width=(int){width}, height=(int){height}, format=(string)BGRx ! ' +
'videoconvert ! video/x-raw, format=(string)BGR ! appsink')
def __log(self, message):
print(message)
import numpy as np
import os
from scipy.optimize import curve_fit
from sklearn.svm import SVR
from sklearn.model_selection import train_test_split # Import train_test_split function
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
import pickle
class Correcter(object):
def __init__(self,model_path="./20210421_correcter.sav"):
# self.clf=""
# self.clf = self.fitting()
self.clf = pickle.load(open(model_path,'rb'))
# MAPE 計算函數
def mean_absolute_percentage_error(self,y_true, y_pred):
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def fitting(self):
# SVR
# y_detect_c = x_black_k + x_black_c + x_detect_k
# data perpare
x = []
y = []
# stop_num = 3000
# for root,folders,files in os.walk('./logs/20201230_tlinear'):
# for f in files:
# if('.log' in f):
# log = open(os.path.join(root,f))
# lines = log.readlines()
# temp_values = f.strip('.log').split('-')
# min_temp = float(temp_values[0])
# max_temp = float(temp_values[1])
# count=0
# for line in lines:
# count+=1
# if(count > stop_num):
# break
# rowdata=line.strip('\n').split(' ')
# temp1 = int(rowdata[1])
# temp2 = int(rowdata[2])
# x.append([temp1,temp2,max_temp])
# y.append(min_temp)
# x.append([temp2,temp2,max_temp])
# y.append(max_temp)
log = open(os.path.join("./logs/argumented_data.txt"))
lines = log.readlines()
for line in lines:
rowdata=line.strip('\n').split(' ')
detect_rowdata = float(rowdata[0])
blackbody_rowdata = float(rowdata[1])
blackbody_temp = float(rowdata[2])
detect_temp = float(rowdata[3])
x.append([detect_rowdata,blackbody_rowdata])
y.append(blackbody_temp-detect_temp)
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=1)
clf = SVR(kernel='poly',degree=1,gamma=0.001)
clf.fit(X_train, y_train)
pred2 = clf.predict(X_test)
r2 = r2_score(y_test,pred2)
mse = mean_squared_error(y_test,pred2)
mape = self.mean_absolute_percentage_error(y_test,pred2)
print("kernel: {}, r_squre: {}, MSE: {}, MAPE: {}".format('rbf',r2,mse,mape))
return clf
def predict(self,x_detect_k,x_black_k,x_black_c):
# return x_black_c - self.clf.predict([[x_detect_k,x_black_k]])[0]
return self.clf.predict([[x_detect_k,x_black_k,x_black_c]])[0]
# a = 1.6806e-05
# b = -0.1183
# c = 238.9117
# p_black_c = a*pow(x_black_k,2)+b*x_black_k+c
# indiv = x_black_c - p_black_c
# return a*pow(x_detect_k,2)+b*x_detect_k+c+indiv
\ No newline at end of file
import math
import MNN
import cv2
import numpy as np
import torch
class FaceDetector:
def __init__(self,model_path="../model/version-slim/slim-320.mnn",input_size=(320,240)):
self.input_size = input_size
self.image_mean = np.array([127, 127, 127])
self.image_std = 128.0
self.iou_threshold = 0.3
self.threshold = 0.7
self.center_variance = 0.1
self.size_variance = 0.2
self.min_boxes = [[10, 16, 24], [32, 48], [64, 96], [128, 192, 256]]
self.strides = [8, 16, 32, 64]
self.priors = self.define_img_size(self.input_size)
self.interpreter = MNN.Interpreter(model_path)
self.session = self.interpreter.createSession({'numThread':4})
self.input_tensor = self.interpreter.getSessionInput(self.session)
def predict(self,image_ori):
image = cv2.resize(image_ori, self.input_size)
image = (image - self.image_mean) / self.image_std
image = image.transpose((2, 0, 1))
image = image.astype(np.float32)
tmp_input = MNN.Tensor((1, 3, self.input_size[1], self.input_size[0]), MNN.Halide_Type_Float, image, MNN.Tensor_DimensionType_Caffe)
self.input_tensor.copyFrom(tmp_input)
self.interpreter.runSession(self.session)
scores = self.interpreter.getSessionOutput(self.session, "scores").getData()
boxes = self.interpreter.getSessionOutput(self.session, "boxes").getData()
boxes = np.expand_dims(np.reshape(boxes, (-1, 4)), axis=0)
scores = np.expand_dims(np.reshape(scores, (-1, 2)), axis=0)
boxes = self.convert_locations_to_boxes(boxes, self.priors, self.center_variance, self.size_variance)
boxes = self.center_form_to_corner_form(boxes)
boxes, labels, probs = self.decode(image_ori.shape[1], image_ori.shape[0], scores, boxes, self.threshold)
return boxes, labels, probs
def define_img_size(self,image_size):
shrinkage_list = []
feature_map_w_h_list = []
for size in image_size:
feature_map = [math.ceil(size / stride) for stride in self.strides]
feature_map_w_h_list.append(feature_map)
for i in range(0, len(image_size)):
shrinkage_list.append(self.strides)
priors = self.generate_priors(feature_map_w_h_list, shrinkage_list, image_size, self.min_boxes)
return priors
def generate_priors(self,feature_map_list, shrinkage_list, image_size, min_boxes, clamp=True):
priors = []
for index in range(0, len(feature_map_list[0])):
scale_w = image_size[0] / shrinkage_list[0][index]
scale_h = image_size[1] / shrinkage_list[1][index]
for j in range(0, feature_map_list[1][index]):
for i in range(0, feature_map_list[0][index]):
x_center = (i + 0.5) / scale_w
y_center = (j + 0.5) / scale_h
for min_box in min_boxes[index]:
w = min_box / image_size[0]
h = min_box / image_size[1]
priors.append([
x_center,
y_center,
w,
h
])
print("priors nums:{}".format(len(priors)))
priors = torch.tensor(priors)
if clamp:
torch.clamp(priors, 0.0, 1.0, out=priors)
return priors
def decode(self,width, height, confidences, boxes, prob_threshold, iou_threshold=0.3, top_k=-1):
boxes = boxes[0]
confidences = confidences[0]
picked_box_probs = []
picked_labels = []
for class_index in range(1, confidences.shape[1]):
probs = confidences[:, class_index]
mask = probs > prob_threshold
probs = probs[mask]
if probs.shape[0] == 0:
continue
subset_boxes = boxes[mask, :]
box_probs = np.concatenate([subset_boxes, probs.reshape(-1, 1)], axis=1)
box_probs = self.hard_nms(box_probs,
iou_threshold=iou_threshold,
top_k=top_k,
)
picked_box_probs.append(box_probs)
picked_labels.extend([class_index] * box_probs.shape[0])
if not picked_box_probs:
return np.array([]), np.array([]), np.array([])
picked_box_probs = np.concatenate(picked_box_probs)
picked_box_probs[:, 0] *= width
picked_box_probs[:, 1] *= height
picked_box_probs[:, 2] *= width
picked_box_probs[:, 3] *= height
return picked_box_probs[:, :4].astype(np.int32), np.array(picked_labels), picked_box_probs[:, 4]
def center_form_to_corner_form(self,locations):
return np.concatenate([locations[..., :2] - locations[..., 2:] / 2,
locations[..., :2] + locations[..., 2:] / 2], len(locations.shape) - 1)
def convert_locations_to_boxes(self,locations, priors, center_variance,
size_variance):
"""Convert regressional location results of SSD into boxes in the form of (center_x, center_y, h, w).
The conversion:
$$predicted\_center * center_variance = \frac {real\_center - prior\_center} {prior\_hw}$$
$$exp(predicted\_hw * size_variance) = \frac {real\_hw} {prior\_hw}$$
We do it in the inverse direction here.
Args:
locations (batch_size, num_priors, 4): the regression output of SSD. It will contain the outputs as well.
priors (num_priors, 4) or (batch_size/1, num_priors, 4): prior boxes.
center_variance: a float used to change the scale of center.
size_variance: a float used to change of scale of size.
Returns:
boxes: priors: [[center_x, center_y, h, w]]. All the values
are relative to the image size.
"""
# priors can have one dimension less.
if len(priors.shape) + 1 == len(locations.shape):
priors = np.expand_dims(priors, 0)
return np.concatenate([
locations[..., :2] * center_variance * priors[..., 2:] + priors[..., :2],
np.exp(locations[..., 2:] * size_variance) * priors[..., 2:]
], axis=len(locations.shape) - 1)
def area_of(self,left_top, right_bottom):
"""Compute the areas of rectangles given two corners.
Args:
left_top (N, 2): left top corner.
right_bottom (N, 2): right bottom corner.
Returns:
area (N): return the area.
"""
hw = np.clip(right_bottom - left_top, 0.0, None)
return hw[..., 0] * hw[..., 1]
def iou_of(self,boxes0, boxes1, eps=1e-5):
"""Return intersection-over-union (Jaccard index) of boxes.
Args:
boxes0 (N, 4): ground truth boxes.
boxes1 (N or 1, 4): predicted boxes.
eps: a small number to avoid 0 as denominator.
Returns:
iou (N): IoU values.
"""
overlap_left_top = np.maximum(boxes0[..., :2], boxes1[..., :2])
overlap_right_bottom = np.minimum(boxes0[..., 2:], boxes1[..., 2:])
overlap_area = self.area_of(overlap_left_top, overlap_right_bottom)
area0 = self.area_of(boxes0[..., :2], boxes0[..., 2:])
area1 = self.area_of(boxes1[..., :2], boxes1[..., 2:])
return overlap_area / (area0 + area1 - overlap_area + eps)
def hard_nms(self,box_scores, iou_threshold, top_k=-1, candidate_size=200):
"""
Args:
box_scores (N, 5): boxes in corner-form and probabilities.
iou_threshold: intersection over union threshold.
top_k: keep top_k results. If k <= 0, keep all the results.
candidate_size: only consider the candidates with the highest scores.
Returns:
picked: a list of indexes of the kept boxes
"""
scores = box_scores[:, -1]
boxes = box_scores[:, :-1]
picked = []
# _, indexes = scores.sort(descending=True)
indexes = np.argsort(scores)
# indexes = indexes[:candidate_size]
indexes = indexes[-candidate_size:]
while len(indexes) > 0:
# current = indexes[0]
current = indexes[-1]
picked.append(current)
if 0 < top_k == len(picked) or len(indexes) == 1:
break
current_box = boxes[current, :]
# indexes = indexes[1:]
indexes = indexes[:-1]
rest_boxes = boxes[indexes, :]
iou = self.iou_of(
rest_boxes,
np.expand_dims(current_box, axis=0),
)
indexes = indexes[iou <= iou_threshold]
return box_scores[picked, :]
\ No newline at end of file
import time
import os
import math
import Adafruit_GPIO.SPI as SPI
import Adafruit_MCP3008
import RPi.GPIO as GPIO
import board
import busio
import numpy as np
import math
import cv2
from threading import Thread
class Heater(object):
def __init__(self,pwm_pin,target_temp):
self.target_temp = target_temp
self.pin = pwm_pin
self.last_temp = 0.0
self.isOpened = False
self.thread = Thread(target=self.job)
# Convert data to voltage level
def ReadVolts(self,data,deci,input_volts=3.3):
volts = (data * input_volts) / float(1023)
volts = round(volts,deci)
return volts
def GetTemperature(self,data,input_volts=3.3):
Rp=10000
T2=273.15+25
Bx=3950
Ka=273.15
vol = (data * input_volts) / float(1023)
Rt=vol*1000/(input_volts-vol)
if((Rt/Rp)<0):
return -1
else:
return 1/(1/T2+math.log(Rt/Rp)/Bx)-Ka+0.5
def job(self):
SPI_PORT = 0
SPI_DEVICE = 0
SIGNAL_CHANNEL = 0
mcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.pin,GPIO.OUT)
p = GPIO.PWM(self.pin,1000)
duty_cycle = 100.0
alpha = 1.0
p.start(duty_cycle)
while self.isOpened:
data = mcp.read_adc(SIGNAL_CHANNEL)
if(data!=0 and data!= 1023):
volts = (data * 5.0) / float(1023)
temp = self.GetTemperature(data,5.0)
self.last_temp = temp
# print(data,volts,temp,duty_cycle)
duty_cycle-=alpha*(temp-self.target_temp)
duty_cycle = 100.0 if duty_cycle > 100.0 else duty_cycle
duty_cycle = 0.0 if duty_cycle < 0.0 else duty_cycle
p.ChangeDutyCycle(duty_cycle)
time.sleep(0.1)
p.stop()
def start(self):
self.isOpened = True
self.thread.start()
def stop(self):
self.isOpened = False
# self.thread.stop()
\ No newline at end of file
from uvc_thermal import Thermal
from camera import Camera
from detector import FaceDetector
from anti import AntiSpoofing
from heater import Heater
from aligner import BoxAligner
from correcter import Correcter
import cv2
import numpy as np
import time
import sys
def max_temperature(box,thermal_row,black_h=0,thermal_height=120,thermal_width=160,image_height=480,image_width=640):
scale_ratio_h = thermal_height/image_height
scale_ratio_w = thermal_width/image_width
x1 = max(int(box[0]*scale_ratio_w),0)
y1 = max(int(box[1]*scale_ratio_h),black_h)
x2 = min(int(box[2]*scale_ratio_w),thermal_width)
y2 = min(int(box[3]*scale_ratio_h),thermal_height)
box_temperature = thermal_row[y1:y2,x1:x2]
if(box_temperature.size != 0):
return box_temperature.max()
else:
return 0
thermal = Thermal(width=160, height=120, framerate=9, frame_width=640, frame_height=480, log=None)
camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=1)
detector = FaceDetector(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/version-RFB/RFB-320.mnn" , input_size=(320,240))
anti = AntiSpoofing(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/4_0_0_80x80_MiniFASNetV1SE.mnn")
aligner = BoxAligner(img_h = 480,img_w = 640)
heater = Heater(pwm_pin=12 , target_temp=36.0)
correcter = Correcter(model_path="../thermal-tk/20210421_correcter.sav")
heater.start()
time.sleep(2)
thermal.start()
time.sleep(2)
camera.start()
time.sleep(2)
while(thermal.isOpened and camera.isOpened):
thermal_ret,thermal_frame, thermal_row = thermal.capture()
ret,frame = camera.capture()
if(thermal_ret and ret):
blackbody_max = thermal_row[:,:34].max()
heater_temp = heater.last_temp
boxes, labels, probs = detector.predict(frame)
for i in range(boxes.shape[0]):
box = boxes[i, :]
score = probs[i]
if(score > 0.9):
#scaled_box = anti.scale_box(img_h=480,img_w=640,box=box)
#expanded_face = frame[scaled_box[1]:scaled_box[3],scaled_box[0]:scaled_box[2]].copy()
#live_score = anti.predict(expanded_face)
live_score = 0.9
if(live_score > 0.9):
color = (0, 255, 0)
else:
color = ( 0, 0, 255)
thermal_box = aligner.box_aligment([box])[0]
cv2.rectangle(thermal_frame, (thermal_box[0], thermal_box[1]), (thermal_box[2], thermal_box[3]), color, 2)
cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), color, 2)
# cv2.rectangle(thermal_frame, (box[0], box[1]), (box[2], box[3]), color, 2)
# max_temp = thermal_row[thermal_box[1]//4:thermal_box[3]//4,thermal_box[0]//4:thermal_box[2]//4]
face_temp = max_temperature(thermal_box,thermal_row,black_h=32)
# if(max_temp.size != 0):
corrected_face_temp = correcter.predict(x_detect_k= face_temp, x_black_k= blackbody_max, x_black_c= heater_temp)
cv2.putText(frame, "{:.2f}->{:.2f}".format(face_temp*0.0092,corrected_face_temp), ((box[0]+box[2])//2, (box[1]+box[3])//2), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 255, 255), 1, cv2.LINE_AA)
cv2.putText(thermal_frame, "{:.2f}".format(heater_temp), (320, 64), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 0, 0), 1, cv2.LINE_AA)
cv2.imshow('frames',np.column_stack((frame,thermal_frame)))
if(cv2.waitKey(1) == ord('q')):
break
else:
break
cv2.destroyAllWindows()
thermal.stop()
time.sleep(2)
camera.stop()
time.sleep(2)
heater.stop()
sys.exit()
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "connected-studio",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np\n",
"from mask_detector import FaceMaskDetector"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "strategic-degree",
"metadata": {},
"outputs": [],
"source": [
"detector = FaceMaskDetector( model_path='/home/allen/RetinaFaceMask/mnn_models/RetinaFaceMask_mobileNet0.25.mnn',input_size=(640, 640))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "concrete-configuration",
"metadata": {},
"outputs": [],
"source": [
"img = cv2.imread('/home/allen/Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/imgs/1.jpg')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "specified-center",
"metadata": {},
"outputs": [],
"source": [
"detector.predict(img)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "disabled-villa",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.6"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
import math
import MNN
import cv2
import numpy as np
import torch
class FaceMaskDetector:
def __init__(self,model_path="../model/version-slim/slim-320.mnn",input_size=(320,240)):
self.input_size = input_size
self.image_mean = np.array([104, 117, 123])
self.image_std = 128.0
self.iou_threshold = 0.3
self.threshold = 0.8
self.center_variance = 0.1
self.size_variance = 0.2
self.min_boxes = [[16, 32], [64, 128], [256, 512]]
self.strides = [8, 16, 32]
self.priors = self.define_img_size(self.input_size)
self.interpreter = MNN.Interpreter(model_path)
self.session = self.interpreter.createSession({'numThread':4})
self.input_tensor = self.interpreter.getSessionInput(self.session)
def predict(self,image_ori):
image = cv2.cvtColor(image_ori, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, self.input_size)
image = image.astype(float)
image = (image - self.image_mean)
image = image.transpose((2, 0, 1))
image = image.astype(np.float32)
tmp_input = MNN.Tensor((1, 3, self.input_size[1], self.input_size[0]), MNN.Halide_Type_Float, image, MNN.Tensor_DimensionType_Caffe)
self.input_tensor.copyFrom(tmp_input)
self.interpreter.runSession(self.session)
facemask_scores = self.interpreter.getSessionOutput(self.session, "facemask_score").getData()
scores = self.interpreter.getSessionOutput(self.session, "face_score").getData()
boxes = self.interpreter.getSessionOutput(self.session, "bbox").getData()
boxes = np.expand_dims(np.reshape(boxes, (-1, 4)), axis=0)
scores = np.expand_dims(np.reshape(scores, (-1, 2)), axis=0)
facemask_scores = np.expand_dims(np.reshape(facemask_scores, (-1, 2)), axis=0)
boxes = self.convert_locations_to_boxes(boxes, self.priors, self.center_variance, self.size_variance)
boxes = self.center_form_to_corner_form(boxes)
boxes, labels, probs, mask_probs = self.decode(image_ori.shape[1], image_ori.shape[0], scores, facemask_scores, boxes, self.threshold)
return boxes, labels, probs, mask_probs
def define_img_size(self,image_size):
shrinkage_list = []
feature_map_w_h_list = []
for size in image_size:
feature_map = [math.ceil(size / stride) for stride in self.strides]
feature_map_w_h_list.append(feature_map)
for i in range(0, len(image_size)):
shrinkage_list.append(self.strides)
priors = self.generate_priors(feature_map_w_h_list, shrinkage_list, image_size, self.min_boxes)
return priors
def generate_priors(self,feature_map_list, shrinkage_list, image_size, min_boxes, clamp=True):
priors = []
for index in range(0, len(feature_map_list[0])):
scale_w = image_size[0] / shrinkage_list[0][index]
scale_h = image_size[1] / shrinkage_list[1][index]
for j in range(0, feature_map_list[1][index]):
for i in range(0, feature_map_list[0][index]):
x_center = (i + 0.5) / scale_w
y_center = (j + 0.5) / scale_h
for min_box in min_boxes[index]:
w = min_box / image_size[0]
h = min_box / image_size[1]
priors.append([
x_center,
y_center,
w,
h
])
print("priors nums:{}".format(len(priors)))
priors = torch.tensor(priors)
if clamp:
torch.clamp(priors, 0.0, 1.0, out=priors)
return priors
def decode(self,width, height, confidences, mask_confidences, boxes, prob_threshold, iou_threshold=0.3, top_k=-1):
boxes = boxes[0]
confidences = confidences[0]
mask_confidences = mask_confidences[0]
picked_box_probs = []
picked_labels = []
for class_index in range(1, confidences.shape[1]):
probs = confidences[:, class_index]
mask_probs = mask_confidences[:, class_index]
mask = probs > prob_threshold
probs = probs[mask]
mask_probs = mask_probs[mask]
if probs.shape[0] == 0:
continue
subset_boxes = boxes[mask, :]
box_probs = np.concatenate([subset_boxes, probs.reshape(-1, 1),mask_probs.reshape(-1, 1)], axis=1)
box_probs = self.hard_nms(box_probs,
iou_threshold=iou_threshold,
top_k=top_k,
)
picked_box_probs.append(box_probs)
picked_labels.extend([class_index] * box_probs.shape[0])
if not picked_box_probs:
return np.array([]), np.array([]), np.array([]), np.array([])
picked_box_probs = np.concatenate(picked_box_probs)
picked_box_probs[:, 0] *= width
picked_box_probs[:, 1] *= height
picked_box_probs[:, 2] *= width
picked_box_probs[:, 3] *= height
return picked_box_probs[:, :4].astype(np.int32), np.array(picked_labels), picked_box_probs[:, 4], picked_box_probs[:, 5]
def center_form_to_corner_form(self,locations):
return np.concatenate([locations[..., :2] - locations[..., 2:] / 2,
locations[..., :2] + locations[..., 2:] / 2], len(locations.shape) - 1)
def convert_locations_to_boxes(self,locations, priors, center_variance,
size_variance):
"""Convert regressional location results of SSD into boxes in the form of (center_x, center_y, h, w).
The conversion:
$$predicted\_center * center_variance = \frac {real\_center - prior\_center} {prior\_hw}$$
$$exp(predicted\_hw * size_variance) = \frac {real\_hw} {prior\_hw}$$
We do it in the inverse direction here.
Args:
locations (batch_size, num_priors, 4): the regression output of SSD. It will contain the outputs as well.
priors (num_priors, 4) or (batch_size/1, num_priors, 4): prior boxes.
center_variance: a float used to change the scale of center.
size_variance: a float used to change of scale of size.
Returns:
boxes: priors: [[center_x, center_y, h, w]]. All the values
are relative to the image size.
"""
# priors can have one dimension less.
if len(priors.shape) + 1 == len(locations.shape):
priors = np.expand_dims(priors, 0)
return np.concatenate([
locations[..., :2] * center_variance * priors[..., 2:] + priors[..., :2],
np.exp(locations[..., 2:] * size_variance) * priors[..., 2:]
], axis=len(locations.shape) - 1)
def area_of(self,left_top, right_bottom):
"""Compute the areas of rectangles given two corners.
Args:
left_top (N, 2): left top corner.
right_bottom (N, 2): right bottom corner.
Returns:
area (N): return the area.
"""
hw = np.clip(right_bottom - left_top, 0.0, None)
return hw[..., 0] * hw[..., 1]
def iou_of(self,boxes0, boxes1, eps=1e-5):
"""Return intersection-over-union (Jaccard index) of boxes.
Args:
boxes0 (N, 4): ground truth boxes.
boxes1 (N or 1, 4): predicted boxes.
eps: a small number to avoid 0 as denominator.
Returns:
iou (N): IoU values.
"""
overlap_left_top = np.maximum(boxes0[..., :2], boxes1[..., :2])
overlap_right_bottom = np.minimum(boxes0[..., 2:], boxes1[..., 2:])
overlap_area = self.area_of(overlap_left_top, overlap_right_bottom)
area0 = self.area_of(boxes0[..., :2], boxes0[..., 2:])
area1 = self.area_of(boxes1[..., :2], boxes1[..., 2:])
return overlap_area / (area0 + area1 - overlap_area + eps)
def hard_nms(self,box_scores, iou_threshold, top_k=-1, candidate_size=200):
"""
Args:
box_scores (N, 5): boxes in corner-form and probabilities.
iou_threshold: intersection over union threshold.
top_k: keep top_k results. If k <= 0, keep all the results.
candidate_size: only consider the candidates with the highest scores.
Returns:
picked: a list of indexes of the kept boxes
"""
scores = box_scores[:, 4]
boxes = box_scores[:, :4]
picked = []
# _, indexes = scores.sort(descending=True)
indexes = np.argsort(scores)
# indexes = indexes[:candidate_size]
indexes = indexes[-candidate_size:]
while len(indexes) > 0:
# current = indexes[0]
current = indexes[-1]
picked.append(current)
if 0 < top_k == len(picked) or len(indexes) == 1:
break
current_box = boxes[current, :]
# indexes = indexes[1:]
indexes = indexes[:-1]
rest_boxes = boxes[indexes, :]
iou = self.iou_of(
rest_boxes,
np.expand_dims(current_box, axis=0),
)
indexes = indexes[iou <= iou_threshold]
return box_scores[picked, :]
\ No newline at end of file
from datetime import datetime
import pytz
import time
from threading import Thread
class OSD:
def __init__(self,local='Asia/Taipei',str_format="%Y-%m-%d %H:%M:%S"):
self.tz = pytz.timezone(local)
self.utc = datetime.utcnow()
self.local_time = pytz.utc.localize(self.utc, is_dst=None).astimezone(self.tz)
self.str_format = str_format
self.str_time = self.local_time.strftime(self.str_format)
self.isOpened = False
self.thread = Thread(target=self.update_job)
def update_job(self):
while(self.isOpened):
self.utc = datetime.utcnow()
self.local_time = pytz.utc.localize(self.utc, is_dst=None).astimezone(self.tz)
self.str_time = self.local_time.strftime(self.str_format)
time.sleep(0.1)
def set_local(self,local):
self.tz = pytz.timezone(local)
def get_lcaol(self):
return self.local
def set_str_format(self,str_format):
self.str_format = pytz.timezone(str_format)
def get_str_format(self):
return self.str_format
def start(self):
print("start OSD")
self.isOpened = True
self.thread.start()
def stop(self):
print("stop OSD")
self.isOpened = False
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'untitled.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1095, 791)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.rgb_frame = QtWidgets.QLabel(self.centralwidget)
self.rgb_frame.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.rgb_frame.setToolTipDuration(0)
self.rgb_frame.setStyleSheet("background-color: rgb(170, 0, 0);")
self.rgb_frame.setAlignment(QtCore.Qt.AlignCenter)
self.rgb_frame.setObjectName("rgb_frame")
self.horizontalLayout.addWidget(self.rgb_frame)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.thermal_frame = QtWidgets.QLabel(self.centralwidget)
self.thermal_frame.setStyleSheet("background-color: rgb(170, 255, 0);")
self.thermal_frame.setObjectName("thermal_frame")
self.verticalLayout.addWidget(self.thermal_frame)
self.faceWidget1 = QtWidgets.QWidget(self.centralwidget)
self.faceWidget1.setStyleSheet("border-color: rgb(85, 255, 255);")
self.faceWidget1.setObjectName("faceWidget1")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.faceWidget1)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.face1 = QtWidgets.QLabel(self.faceWidget1)
self.face1.setObjectName("face1")
self.verticalLayout_2.addWidget(self.face1, 0, QtCore.Qt.AlignHCenter)
self.time1 = QtWidgets.QLabel(self.faceWidget1)
self.time1.setObjectName("time1")
self.verticalLayout_2.addWidget(self.time1, 0, QtCore.Qt.AlignHCenter)
self.temp1 = QtWidgets.QLabel(self.faceWidget1)
self.temp1.setObjectName("temp1")
self.verticalLayout_2.addWidget(self.temp1, 0, QtCore.Qt.AlignHCenter)
self.verticalLayout.addWidget(self.faceWidget1)
self.faceWidget2 = QtWidgets.QWidget(self.centralwidget)
self.faceWidget2.setStyleSheet("border-color: rgb(255, 0, 0);")
self.faceWidget2.setObjectName("faceWidget2")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.faceWidget2)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.face2 = QtWidgets.QLabel(self.faceWidget2)
self.face2.setObjectName("face2")
self.verticalLayout_4.addWidget(self.face2, 0, QtCore.Qt.AlignHCenter)
self.time2 = QtWidgets.QLabel(self.faceWidget2)
self.time2.setObjectName("time2")
self.verticalLayout_4.addWidget(self.time2, 0, QtCore.Qt.AlignHCenter)
self.temp2 = QtWidgets.QLabel(self.faceWidget2)
self.temp2.setObjectName("temp2")
self.verticalLayout_4.addWidget(self.temp2, 0, QtCore.Qt.AlignHCenter)
self.verticalLayout.addWidget(self.faceWidget2)
self.faceWidget3 = QtWidgets.QWidget(self.centralwidget)
self.faceWidget3.setObjectName("faceWidget3")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.faceWidget3)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.face3 = QtWidgets.QLabel(self.faceWidget3)
self.face3.setObjectName("face3")
self.verticalLayout_5.addWidget(self.face3, 0, QtCore.Qt.AlignHCenter)
self.time3 = QtWidgets.QLabel(self.faceWidget3)
self.time3.setObjectName("time3")
self.verticalLayout_5.addWidget(self.time3, 0, QtCore.Qt.AlignHCenter)
self.temp3 = QtWidgets.QLabel(self.faceWidget3)
self.temp3.setObjectName("temp3")
self.verticalLayout_5.addWidget(self.temp3, 0, QtCore.Qt.AlignHCenter)
self.verticalLayout.addWidget(self.faceWidget3)
self.horizontalLayout.addLayout(self.verticalLayout)
self.horizontalLayout.setStretch(0, 4)
self.horizontalLayout.setStretch(1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1095, 21))
self.menubar.setObjectName("menubar")
self.menu = QtWidgets.QMenu(self.menubar)
self.menu.setObjectName("menu")
MainWindow.setMenuBar(self.menubar)
self.menubar.addAction(self.menu.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.rgb_frame.setText(_translate("MainWindow", "rgb"))
self.thermal_frame.setText(_translate("MainWindow", "theraml frame"))
self.face1.setText(_translate("MainWindow", "人臉"))
self.time1.setText(_translate("MainWindow", "時間"))
self.temp1.setText(_translate("MainWindow", "溫度"))
self.face2.setText(_translate("MainWindow", "人臉"))
self.time2.setText(_translate("MainWindow", "時間"))
self.temp2.setText(_translate("MainWindow", "溫度"))
self.face3.setText(_translate("MainWindow", "人臉"))
self.time3.setText(_translate("MainWindow", "時間"))
self.temp3.setText(_translate("MainWindow", "溫度"))
self.menu.setTitle(_translate("MainWindow", "選單"))
from threading import Thread
from datetime import datetime
import time, numpy as np
import cv2
from uvctypes import *
from multiprocessing import Queue
class Thermal(object):
def __init__(self, width, height, framerate, frame_width, frame_height, log=None):
self.__log = self.__log if log is None else log
self.__isCaptured = False
self.__frame = None
self.__frame2c = None
self.isOpened = False
self.isStopping = False
self.isNotSleeping=True
self.width = width
self.height = height
self.frame_width = frame_width
self.frame_height = frame_height
self.framerate = framerate
# uvc thermal init
BUF_SIZE = 8
self.q = Queue(BUF_SIZE)
self.PTR_PY_FRAME_CALLBACK = CFUNCTYPE(None, POINTER(uvc_frame), c_void_p)(self.py_frame_callback)
self.devh = None
self.scale = 0.0092
self.__thread = Thread(target=self.__job)
def start(self):
self.__isCaptured = False
self.__frame = None
self.__frame2c = None
self.isOpened = True
self.isStopping = False
self.__thread.start()
def restart(self):
self.__isCaptured = False
self.__frame = None
self.__frame2c = None
self.isOpened = True
self.isStopping = False
del self.__thread
self.__thread = Thread(target=self.__job)
self.__thread.start()
def stop(self):
self.__isCaptured = False
self.__frame = None
self.__frame2c = None
self.isStopping = True
libuvc.uvc_stop_streaming(self.devh)
def capture(self):
return self.__isCaptured, self.__frame, self.__frame2c
def doFFC(self):
perform_manual_ffc(self.devh)
def update_scale(self,raw_temp,c):
self.scale = c/raw_temp
def py_frame_callback(self,frame, userptr):
array_pointer = cast(frame.contents.data, POINTER(c_uint16 * (frame.contents.width * frame.contents.height)))
data = np.frombuffer(
array_pointer.contents, dtype=np.dtype(np.uint16)).reshape(frame.contents.height, frame.contents.width)
if frame.contents.data_bytes != (2 * frame.contents.width * frame.contents.height):
return
if not self.q.full():
self.q.put(data)
def startStream(self):
ctx = POINTER(uvc_context)()
dev = POINTER(uvc_device)()
self.devh = POINTER(uvc_device_handle)()
ctrl = uvc_stream_ctrl()
res = libuvc.uvc_init(byref(ctx), 0)
if res < 0:
print("uvc_init error")
#exit(1)
try:
res = libuvc.uvc_find_device(ctx, byref(dev), PT_USB_VID, PT_USB_PID, 0)
if res < 0:
print("uvc_find_device error")
exit(1)
try:
res = libuvc.uvc_open(dev, byref(self.devh))
if res < 0:
print("uvc_open error")
exit(1)
print("device opened!")
print_device_info(self.devh)
print_device_formats(self.devh)
frame_formats = uvc_get_frame_formats_by_guid(self.devh, VS_FMT_GUID_Y16)
if len(frame_formats) == 0:
print("device does not support Y16")
exit(1)
libuvc.uvc_get_stream_ctrl_format_size(self.devh, byref(ctrl), UVC_FRAME_FORMAT_Y16,
frame_formats[0].wWidth, frame_formats[0].wHeight, int(1e7 / frame_formats[0].dwDefaultFrameInterval)
)
res = libuvc.uvc_start_streaming(self.devh, byref(ctrl), self.PTR_PY_FRAME_CALLBACK, None, 0)
if res < 0:
print("uvc_start_streaming failed: {0}".format(res))
exit(1)
print("done starting stream, displaying settings")
print_shutter_info(self.devh)
print("resetting settings to default")
set_auto_ffc(self.devh)
set_gain_high(self.devh)
set_radiometry_control(self.devh)
# print_flux_linear_parameters(self.devh)
# set_rbfo(self.devh)
# print_rbfo(self.devh)
#set_tlinear_auto_resolution(self.devh)
# set_tlinear_control(self.devh)
# set_tshutter_control(self.devh)
print("current settings")
print_shutter_info(self.devh)
except:
#libuvc.uvc_unref_device(dev)
print('Failed to Open Device')
except:
#libuvc.uvc_exit(ctx)
print('Failed to Find Device')
exit(1)
def __job(self):
duration = 300 * self.framerate # restart / per one hour
self.startStream()
self.__log("Opened: {0}, Stopping: {1}, Duration: {2}".format(
self.isOpened,
self.isStopping,
duration))
perform_manual_ffc(self.devh)
while self.isOpened and not self.isStopping:
if(duration < 0):
perform_manual_ffc(self.devh)
duration = 300 * self.framerate
time.sleep(1/self.framerate)
a = self.q.get(True, 500)
b = a.copy() # 原始資料
c = b.copy()
# c_min = c.min()
# c_max = c.max()
# c_std = c.std()
# c_var = c.var()
# c_mean = c.mean()
# self.__log("frame {0}, Tmax={1:.1f}, Tmin={2:.1f}, Tmean={3:.1f}, Tstd={4:.1f}, Tvar={5:.1f}, Tmax-Tmin={6:.1f}".format(
# duration,
# float(c_max),
# float(c_min),
# float(c_mean),
# float(c_std),
# float(c_var),
# float(c_max-c_min)
# ))
self.__frame = cv2.resize(self.raw_to_8bit(b),(self.frame_width,self.frame_height), interpolation=cv2.INTER_AREA)
self.__frame2c = c
self.__isCaptured = True
duration = duration - 1
time.sleep(1 / self.framerate)
self.__log("thermal stop")
self.__frame = None
self.__frame2c = None
self.__isCaptured = False
def __k2c(self, value):
return (value - 27315) / 100.0
def __k2f(self, value):
return 1.8 * k2c(value) + 32.0
def __s2c(self, value):
return self.scale*value
def __log(self, message):
print(message)
def raw_to_8bit(self,data):
cv2.normalize(data, data, 0, 65535, cv2.NORM_MINMAX)
np.right_shift(data, 8, data)
return cv2.cvtColor(np.uint8(data), cv2.COLOR_GRAY2RGB)
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment