Commit 39af5ece authored by YONG-LIN SU's avatar YONG-LIN SU

Initial commit

parents
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "connected-studio",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np\n",
"from mask_detector import FaceMaskDetector"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "strategic-degree",
"metadata": {},
"outputs": [],
"source": [
"detector = FaceMaskDetector( model_path='/home/allen/RetinaFaceMask/mnn_models/RetinaFaceMask_mobileNet0.25.mnn',input_size=(640, 640))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "concrete-configuration",
"metadata": {},
"outputs": [],
"source": [
"img = cv2.imread('/home/allen/Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/imgs/1.jpg')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "specified-center",
"metadata": {},
"outputs": [],
"source": [
"detector.predict(img)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "disabled-villa",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.6"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "alternative-throat",
"metadata": {},
"outputs": [],
"source": [
"from uvc_thermal import Thermal\n",
"from camera import Camera\n",
"from detector import FaceDetector\n",
"from anti import AntiSpoofing\n",
"from heater import Heater\n",
"from aligner import BoxAligner\n",
"from correcter import Correcter\n",
"\n",
"import cv2\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "direct-vulnerability",
"metadata": {},
"outputs": [],
"source": [
"def max_temperature(box,thermal_row,thermal_height=120,thermal_width=160,image_height=480,image_width=640):\n",
" scale_ratio_h = thermal_height/image_height\n",
" scale_ratio_w = thermal_width/image_width\n",
" x1 = max(int(box[0]*scale_ratio_w),0)\n",
" y1 = max(int(box[1]*scale_ratio_h),0)\n",
" x2 = min(int(box[2]*scale_ratio_w),thermal_width)\n",
" y2 = min(int(box[3]*scale_ratio_h),thermal_height)\n",
" box_temperature = thermal_row[y1:y2,x1:x2]\n",
" return box_temperature.max()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "pending-pressure",
"metadata": {},
"outputs": [],
"source": [
"thermal = Thermal(width=160, height=120, framerate=9, frame_width=640, frame_height=480, log=None)\n",
"camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=1)\n",
"detector = FaceDetector(model_path=\"../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/version-RFB/RFB-320.mnn\" , input_size=(320,240))\n",
"# anti = AntiSpoofing(model_path=\"../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/4_0_0_80x80_MiniFASNetV1SE.mnn\")\n",
"aligner = BoxAligner(img_h = 480,img_w = 640)\n",
"heater = Heater(pwm_pin=12 , target_temp=35.0)\n",
"correcter = Correcter(model_path=\"../thermal-tk/20210421_correcter.sav\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "moved-capacity",
"metadata": {},
"outputs": [],
"source": [
"thermal.start()\n",
"camera.start()\n",
"heater.start()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "automotive-married",
"metadata": {},
"outputs": [],
"source": [
"while(thermal.isOpened and camera.isOpened):\n",
" thermal_ret,thermal_frame, thermal_row = thermal.capture()\n",
" ret,frame = camera.capture()\n",
" if(thermal_ret and ret):\n",
" blackbody_max = thermal_row[:,:34].max()\n",
" heater_temp = heater.last_temp\n",
" boxes, labels, probs = detector.predict(frame)\n",
" for i in range(boxes.shape[0]):\n",
" box = boxes[i, :]\n",
" score = probs[i]\n",
" if(score > 0.9):\n",
"# scaled_box = anti.scale_box(img_h=480,img_w=640,box=box)\n",
"# expanded_face = frame[scaled_box[1]:scaled_box[3],scaled_box[0]:scaled_box[2]].copy()\n",
"# live_score = anti.predict(expanded_face)\n",
" live_score = 0.99\n",
" if(live_score > 0.9):\n",
" color = (0, 255, 0)\n",
" else:\n",
" color = ( 0, 0, 255)\n",
" thermal_box = aligner.box_aligment([box])[0]\n",
" cv2.rectangle(thermal_frame, (thermal_box[0], thermal_box[1]), (thermal_box[2], thermal_box[3]), color, 2)\n",
" cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), color, 2)\n",
" face_temp = max_temperature(box,thermal_row)\n",
" corrected_face_temp = correcter.predict(x_detect_k= face_temp, x_black_k= blackbody_max, x_black_c= heater_temp)\n",
" cv2.putText(frame, \"original: {:.2f},corrected: {:.2f}\".format(face_temp*0.0092,corrected_face_temp), ((box[0]+box[2])//2, (box[1]+box[3])//2), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 255, 255), 1, cv2.LINE_AA)\n",
" cv2.imshow('frames',np.column_stack((frame,thermal_frame)))\n",
" if(cv2.waitKey(1) == ord('q')):\n",
" break\n",
" else:\n",
" break\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "finnish-lightning",
"metadata": {},
"outputs": [],
"source": [
"thermal.stop()\n",
"camera.stop()\n",
"heater.stop()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "annoying-strike",
"metadata": {},
"outputs": [],
"source": [
"camera.isOpened"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "qualified-labor",
"metadata": {},
"outputs": [],
"source": [
"cv2.imwrite(\"rgb_frame_3.jpg\",frame)\n",
"cv2.imwrite(\"thermal_frame_3.jpg\",thermal_frame)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "extra-injection",
"metadata": {},
"outputs": [],
"source": [
"def box_temperature(box,thermal_row,thermal_height=120,thermal_width=160,image_height=480,image_width=640):\n",
" scale_ratio_h = thermal_height/image_height\n",
" scale_ratio_w = thermal_width/image_width\n",
" print(scale_ratio_h,scale_ratio_w)\n",
" x1 = max(int(box[0]*scale_ratio_w),0)\n",
" y1 = max(int(box[1]*scale_ratio_h),0)\n",
" x2 = min(int(box[2]*scale_ratio_w),thermal_width)\n",
" y2 = min(int(box[3]*scale_ratio_h),thermal_height)\n",
" print(x1,y1,x2,y2)\n",
" return thermal_row[y1:y2,x1:x2]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "twenty-dakota",
"metadata": {},
"outputs": [],
"source": [
"x1 = max(int(thermal_box[0]//4),0)\n",
"y1 = max(int(thermal_box[1]//4),0)\n",
"x2 = min(int(thermal_box[2]//4),160)\n",
"y2 = min(int(thermal_box[3]//4),120)\n",
"print(x1,y1,x2,y2)\n",
"thermal_row_copy = thermal_row.copy()\n",
"thermal_row_copy[y1:y2,x1:x2]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "parental-continent",
"metadata": {},
"outputs": [],
"source": [
"print(thermal_box[0]//4,int(thermal_box[0]*0.25))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "simplified-universal",
"metadata": {},
"outputs": [],
"source": [
"cv2.imshow('thermal frames 1',thermal.raw_to_8bit(thermal_row_copy[y1:y2,x1:x2]))\n",
"cv2.imshow('thermal frames 2',thermal.raw_to_8bit(box_temperature(thermal_box,thermal_row_copy)))\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "revolutionary-repair",
"metadata": {},
"outputs": [],
"source": [
"(thermal_row_copy[y1:y2,x1:x2]*0.0092).mean()"
]
},
{
"cell_type": "markdown",
"id": "hearing-assault",
"metadata": {},
"source": [
"### 物件框校正"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "together-writer",
"metadata": {},
"outputs": [],
"source": [
"class BoxAligner:\n",
" def __init__(self,img_h,img_w):\n",
" self.img_h = img_h\n",
" self.img_w = img_w\n",
" self.M = np.array([[ 8.13802980e-01, -2.63523694e-02, 9.30324875e+01],\n",
" [ 2.10292692e-02, 7.84319221e-01, 7.70246127e+01],\n",
" [ 1.48500793e-04, -1.53618915e-04, 1.00000000e+00]])\n",
" def box_aligment(self,boxes):\n",
" aliged_boxes = []\n",
" for b in boxes:\n",
" x1,y1,w1 = np.dot(self.M,[b[0],b[1],1])\n",
" x2,y2,w2 = np.dot(self.M,[b[2],b[3],1])\n",
" x1 = max(int(x1),0)\n",
" y1 = max(int(y1),0)\n",
" x2 = min(int(x2),self.img_w)\n",
" y2 = min(int(y2),self.img_h)\n",
" aliged_boxes.append([x1,y1,x2,y2])\n",
" return aliged_boxes\n",
" def get_omography_matrix(self,rgb_points, thermal_points):\n",
" h, mask = cv2.findHomography(rgb_points, thermal_points, cv2.RANSAC)\n",
" self.M = h"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "auburn-deficit",
"metadata": {},
"outputs": [],
"source": [
"aligner = BoxAligner(img_h = 480,img_w = 640)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "provincial-example",
"metadata": {},
"outputs": [],
"source": [
"136/4"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "compliant-daily",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"id": "elegant-bachelor",
"metadata": {},
"source": [
"### 自動校正計算"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "french-brunswick",
"metadata": {},
"outputs": [],
"source": [
"cv2.imshow('RGB frames',frame)\n",
"cv2.imshow('Thermal frames',thermal_frame)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "daily-brunswick",
"metadata": {},
"outputs": [],
"source": [
"thermal_face = cv2.selectROI(\"thermal face\",thermal_frame)\n",
"frame_face = cv2.selectROI(\"rgb face\",frame)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "settled-switzerland",
"metadata": {},
"outputs": [],
"source": [
"x1,y1,w1,h1 = thermal_face\n",
"pts_d = np.float32([[x1,y1],[x1+w1,y1],[x1,y1+h1],[x1+w1,y1+h1]])\n",
"x2,y2,w2,h2 = frame_face\n",
"pts_o = np.float32([[x2,y2],[x2+w2,y2],[x2,y2+h2],[x2+w2,y2+h2]])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "center-beijing",
"metadata": {},
"outputs": [],
"source": [
"M = cv2.getPerspectiveTransform(pts_o,pts_d)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "challenging-leeds",
"metadata": {},
"outputs": [],
"source": [
"ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, (640,480),None,None)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "owned-applicant",
"metadata": {},
"outputs": [],
"source": [
"rgb_box = np.array([[319, 75,1]])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "comparable-faith",
"metadata": {},
"outputs": [],
"source": [
"rgb_box.dot(M)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "unsigned-cutting",
"metadata": {},
"outputs": [],
"source": [
"display_thermal = thermal_frame.copy()\n",
"x1 = int(232.65413534)\n",
"y1 = int(53.43137255)\n",
"x2 = int(360.28571429)\n",
"y2 = int(235.09803922)\n",
"display_thermal = cv2.rectangle(display_thermal, (x1,y1), (x2, y2), color, 2)\n",
"# display_thermal = cv2.rectangle(display_thermal, (x,y), (x+w, y+h), color, 2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "flush-differential",
"metadata": {},
"outputs": [],
"source": [
"cv2.imshow('dispaly thermal frames',display_thermal)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "markdown",
"id": "literary-narrative",
"metadata": {},
"source": [
"### 子序偵測 主序顯示"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ideal-investigator",
"metadata": {},
"outputs": [],
"source": [
"from threading import Thread\n",
"import time"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "written-rings",
"metadata": {},
"outputs": [],
"source": [
"class Checker:\n",
" def __init__(self,camera,detector,anti):\n",
" self.camera = camera\n",
" self.detector = detector\n",
" self.anti = anti\n",
" self.faces = []\n",
" self.isOpened = False\n",
" self.frame = None\n",
" self.thread = Thread(target=self.__job)\n",
" def start(self):\n",
" self.isOpened = True\n",
" self.thread.start()\n",
" def stop(self):\n",
" self.isOpened = False\n",
" def __job(self):\n",
" while(self.camera.isOpened and self.isOpened):\n",
" ret,frame = self.camera.capture()\n",
" if(ret):\n",
" self.frame = frame.copy()\n",
" new_faces = []\n",
" boxes, labels, probs = self.detector.predict(frame)\n",
" for i in range(boxes.shape[0]):\n",
" box = boxes[i, :]\n",
" score = probs[i]\n",
" face = {\"location\":box,\"score\":score,\"islive\":False}\n",
" if(score > 0.9):\n",
" scaled_box = self.anti.scale_box(img_h=480,img_w=640,box=box)\n",
" expanded_face = frame[scaled_box[1]:scaled_box[3],scaled_box[0]:scaled_box[2]].copy()\n",
" live_score = self.anti.predict(expanded_face)\n",
" if(live_score > 0.9):\n",
" face[\"islive\"]=True\n",
" new_faces.append(face)\n",
" self.faces = new_faces"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "seeing-plaza",
"metadata": {},
"outputs": [],
"source": [
"checker = Checker(camera=camera ,detector=detector,anti=anti)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "executive-winning",
"metadata": {},
"outputs": [],
"source": [
"checker.stop()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ideal-december",
"metadata": {},
"outputs": [],
"source": [
"while(thermal.isOpened and camera.isOpened):\n",
" thermal_ret,thermal_frame, thermal_row = thermal.capture()\n",
" ret,frame = camera.capture()\n",
"# frame = checker.frame\n",
" ret = checker.isOpened\n",
" if(thermal_ret and ret):\n",
" for face in checker.faces:\n",
" box = face[\"location\"]\n",
" if(face[\"islive\"]):\n",
" color = (0, 255, 0)\n",
" else:\n",
" color = ( 0, 0, 255)\n",
" cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), color, 2)\n",
" cv2.imshow('frames',np.column_stack((frame,thermal_frame)))\n",
" if(cv2.waitKey(1) == ord('q')):\n",
" break\n",
" else:\n",
" break\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "standard-inquiry",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "muslim-carry",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.6"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from PyQt5 import QtWidgets\n",
"from PyQt5.QtWidgets import *\n",
"from PyQt5.QtCore import *\n",
"from PyQt5.QtGui import *\n",
"\n",
"# import Ui_MainWindow as ui\n",
"import ui_test as ui\n",
"\n",
"from uvc_thermal import Thermal\n",
"from camera import Camera\n",
"from detector import FaceDetector\n",
"from anti import AntiSpoofing\n",
"from heater import Heater\n",
"from aligner import BoxAligner\n",
"from correcter import Correcter\n",
"from osd import OSD\n",
"\n",
"import cv2\n",
"import numpy as np\n",
"import time"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"class Main(QMainWindow, ui.Ui_MainWindow):\n",
" def __init__(self):\n",
" super().__init__()\n",
" self.setupUi(self)\n",
" self.resize(640,480)\n",
" self.rgb_frame.setScaledContents(True)\n",
" self.thermal_frame.setScaledContents(True)\n",
" self.thermal = Thermal(width=160, height=120, framerate=9, frame_width=160, frame_height=120, log=None)\n",
" self.camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=1)\n",
" self.detector = FaceDetector(model_path=\"../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/version-RFB/RFB-320.mnn\" , input_size=(320,240))\n",
" # self.anti = AntiSpoofing(model_path=\"../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/4_0_0_80x80_MiniFASNetV1SE.mnn\")\n",
" self.aligner = BoxAligner(img_h = 480,img_w = 640)\n",
" self.heater = Heater(pwm_pin=12 , target_temp=35.0)\n",
" self.correcter = Correcter(model_path=\"../thermal-tk/20210421_correcter.sav\")\n",
" self.osd = OSD(local='Asia/Taipei', str_format='%Y-%m-%d %H:%M:%S')\n",
" self.thermal.start()\n",
" time.sleep(2)\n",
" self.camera.start()\n",
" time.sleep(2)\n",
" self.heater.start()\n",
" time.sleep(2)\n",
" self.osd.start()\n",
" time.sleep(2)\n",
" self.faces = []\n",
" self.timer = QTimer(self)\n",
" self.timer.timeout.connect(self.capture)\n",
" self.timer.start(10)\n",
"# self.painter = QPainter(self)\n",
" def capture(self):\n",
" ret, frame = self.camera.capture()\n",
" thermal_ret,thermal_frame, thermal_row = self.thermal.capture()\n",
" if(thermal_ret and ret):\n",
" blackbody_max = thermal_row[:,:34].max()\n",
" heater_temp = self.heater.last_temp\n",
" # 偵測人臉\n",
" str_time = self.osd.str_time\n",
" boxes, labels, probs = self.detector.predict(frame)\n",
" # 取出所有偵測的結果\n",
" frame=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)\n",
"# for i in range(boxes.shape[0]):\n",
"# box = boxes[i, :]\n",
"# score = probs[i]\n",
"# if(score > 0.9):\n",
"# color = (0, 255, 0)\n",
"# thermal_box = self.aligner.box_aligment([box])[0]\n",
"# face_frame = frame[box[1]:box[3],box[0]:box[2]].copy()\n",
"# Qface_frame=QImage(face_frame.data,face_frame.shape[1],face_frame.shape[0],face_frame.shape[1]*3,QImage.Format_RGB888)\n",
"# face_pixMap=QPixmap.fromImage(Qface_frame)\n",
" \n",
"# cv2.rectangle(thermal_frame, (thermal_box[0]//4, thermal_box[1]//4), (thermal_box[2]//4, thermal_box[3]//4), color, 2)\n",
"# cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), color, 2)\n",
"# face_temp = self.max_temperature(thermal_box,thermal_row,black_h=32)\n",
"# corrected_face_temp = self.correcter.predict(x_detect_k= face_temp, x_black_k= blackbody_max, x_black_c= heater_temp)\n",
"# cv2.putText(frame, \"{:.2f}->{:.2f}\".format(face_temp*0.0092,corrected_face_temp), ((box[0]+box[2])//2, (box[1]+box[3])//2), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 255, 255), 1, cv2.LINE_AA)\n",
"# cv2.putText(thermal_frame, \"{:.2f}\".format(heater_temp), (320//4, 64//4), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 0, 0), 1, cv2.LINE_AA)\n",
" \n",
"# self.face1.setPixmap(face_pixMap)\n",
"# self.temp1.setStyleSheet(\"color: rgb(0, 255, 0);\")\n",
"# self.temp1.setText(\"{:.2f}\".format(corrected_face_temp))\n",
"# self.time1.setText(str_time)\n",
" # 顯示至UI\n",
"# frame=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)\n",
" thermal_frame=cv2.cvtColor(thermal_frame,cv2.COLOR_BGR2RGB)\n",
" Qframe=QImage(frame.data,frame.shape[1],frame.shape[0],frame.shape[1]*3,QImage.Format_RGB888)\n",
" Qthermal_frame=QImage(thermal_frame.data,thermal_frame.shape[1],thermal_frame.shape[0],thermal_frame.shape[1]*3,QImage.Format_RGB888)\n",
" pixMap=QPixmap.fromImage(Qframe)\n",
" thermal_pixMap=QPixmap.fromImage(Qthermal_frame)\n",
" \n",
" qp = QPainter(pixMap)\n",
" thermal_qp = QPainter(thermal_pixMap)\n",
" for i in range(boxes.shape[0]):\n",
" box = boxes[i, :]\n",
" score = probs[i]\n",
" if(score > 0.9):\n",
" thermal_box = self.aligner.box_aligment([box])[0]\n",
" face_frame = frame[box[1]:box[3],box[0]:box[2]].copy()\n",
" \n",
" face_temp = self.max_temperature(thermal_box,thermal_row,black_h=32)\n",
" corrected_face_temp = self.correcter.predict(x_detect_k= face_temp, x_black_k= blackbody_max, x_black_c= heater_temp)\n",
" \n",
" face_rect = QRect(box[0], box[1], box[2]-box[0], box[3]-box[1])\n",
" thermal_rect = QRect(thermal_box[0]//4, thermal_box[1]//4, (thermal_box[2]//4 - thermal_box[0]//4), (thermal_box[3]//4 - thermal_box[1]//4))\n",
" \n",
" \n",
" \n",
" if(corrected_face_temp > 37.0):\n",
" pen = QPen(Qt.red, 3)\n",
" else:\n",
" pen = QPen(Qt.green, 3)\n",
" qp.setPen(pen)\n",
" thermal_qp.setPen(pen)\n",
" \n",
" face_crop = pixMap.copy(face_rect)\n",
" self.face1.setPixmap(face_crop)\n",
" self.temp1.setStyleSheet(\"color: rgb(0, 255, 0);\")\n",
" self.temp1.setText(\"{:.2f}\".format(corrected_face_temp))\n",
" self.time1.setText(str_time)\n",
" \n",
" qp.drawRect(face_rect)\n",
" qp.drawText(face_rect, Qt.AlignCenter , str(corrected_face_temp))\n",
" thermal_qp.drawRect(thermal_rect)\n",
" qp.end()\n",
" thermal_qp.end()\n",
" \n",
" self.rgb_frame.setPixmap(pixMap)\n",
" self.thermal_frame.setPixmap(thermal_pixMap)\n",
" self.update()\n",
" def closeEvent(self,event):\n",
" # 關閉程式後執行\n",
" self.thermal.stop()\n",
" self.camera.stop()\n",
" self.heater.stop()\n",
" self.osd.stop()\n",
" def max_temperature(self,box,thermal_row,black_h=0,thermal_height=120,thermal_width=160,image_height=480,image_width=640):\n",
" scale_ratio_h = thermal_height/image_height\n",
" scale_ratio_w = thermal_width/image_width\n",
" x1 = max(int(box[0]*scale_ratio_w),0)\n",
" y1 = max(int(box[1]*scale_ratio_h),black_h)\n",
" x2 = min(int(box[2]*scale_ratio_w),thermal_width)\n",
" y2 = min(int(box[3]*scale_ratio_h),thermal_height)\n",
" box_temperature = thermal_row[y1:y2,x1:x2]\n",
" if(box_temperature.size != 0):\n",
" return box_temperature.max()\n",
" else:\n",
" return 0\n",
"if __name__ == '__main__':\n",
" import sys\n",
" import os\n",
" app = QtWidgets.QApplication(sys.argv)\n",
" window = Main()\n",
" window.show()\n",
" sys.exit(app.exec_())\n",
"# status = app.exec_()\n",
"# os._exit(status)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
" def paintEvent(self, QPaintEvent):\n",
" # 繪圖事件\n",
" time.sleep(1/10)\n",
" ret, frame = self.camera.capture()\n",
" thermal_ret,thermal_frame, thermal_row = self.thermal.capture()\n",
" if(thermal_ret and ret):\n",
" blackbody_max = thermal_row[:,:34].max()\n",
" heater_temp = self.heater.last_temp\n",
" # 偵測人臉\n",
" boxes, labels, probs = self.detector.predict(frame)\n",
" # 取出所有偵測的結果\n",
" for i in range(boxes.shape[0]):\n",
" box = boxes[i, :]\n",
" score = probs[i]\n",
" if(score > 0.9):\n",
" #scaled_box = anti.scale_box(img_h=480,img_w=640,box=box)\n",
" #expanded_face = frame[scaled_box[1]:scaled_box[3],scaled_box[0]:scaled_box[2]].copy()\n",
" #live_score = anti.predict(expanded_face)\n",
" live_score = 0.9\n",
" if(live_score > 0.9):\n",
" color = (0, 255, 0)\n",
" else:\n",
" color = ( 0, 0, 255)\n",
" thermal_box = self.aligner.box_aligment([box])[0]\n",
" cv2.rectangle(thermal_frame, (thermal_box[0]//4, thermal_box[1]//4), (thermal_box[2]//4, thermal_box[3]//4), color, 2)\n",
" cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), color, 2)\n",
" face_temp = self.max_temperature(thermal_box,thermal_row,black_h=32)\n",
" corrected_face_temp = self.correcter.predict(x_detect_k= face_temp, x_black_k= blackbody_max, x_black_c= heater_temp)\n",
" cv2.putText(frame, \"{:.2f}->{:.2f}\".format(face_temp*0.0092,corrected_face_temp), ((box[0]+box[2])//2, (box[1]+box[3])//2), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 255, 255), 1, cv2.LINE_AA)\n",
" cv2.putText(thermal_frame, \"{:.2f}\".format(heater_temp), (320//4, 64//4), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 0, 0), 1, cv2.LINE_AA)\n",
" # 顯示至UI\n",
" frame=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)\n",
" thermal_frame=cv2.cvtColor(thermal_frame,cv2.COLOR_BGR2RGB)\n",
" Qframe=QImage(frame.data,frame.shape[1],frame.shape[0],frame.shape[1]*3,QImage.Format_RGB888)\n",
" Qthermal_frame=QImage(thermal_frame.data,thermal_frame.shape[1],thermal_frame.shape[0],thermal_frame.shape[1]*3,QImage.Format_RGB888)\n",
" pixMap=QPixmap.fromImage(Qframe)\n",
" thermal_pixMap=QPixmap.fromImage(Qthermal_frame)\n",
" self.label.setPixmap(pixMap)\n",
" self.label_2.setPixmap(thermal_pixMap)\n",
" self.update()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"color = (255,255,0)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\"{}\".format(color)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"aligner = BoxAligner(img_h = 480,img_w = 640)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"aligner.box_aligment([[0,0,640,480]])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.6"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
#!/usr/bin/env python
# coding: utf-8
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import Ui_MainWindow as ui
from uvc_thermal import Thermal
from camera import Camera
from detector import FaceDetector
from anti import AntiSpoofing
from heater import Heater
from aligner import BoxAligner
from correcter import Correcter
import cv2
import numpy as np
import time
class Main(QMainWindow, ui.Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.resize(640,480)
self.label.setScaledContents(True)
self.label_2.setScaledContents(True)
self.thermal = Thermal(width=160, height=120, framerate=9, frame_width=160, frame_height=120, log=None)
self.camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=1)
self.detector = FaceDetector(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/version-RFB/RFB-320.mnn" , input_size=(320,240))
# self.anti = AntiSpoofing(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/4_0_0_80x80_MiniFASNetV1SE.mnn")
self.aligner = BoxAligner(img_h = 480,img_w = 640)
self.heater = Heater(pwm_pin=12 , target_temp=35.0)
self.correcter = Correcter(model_path="../thermal-tk/20210421_correcter.sav")
self.thermal.start()
time.sleep(2)
self.camera.start()
time.sleep(2)
self.heater.start()
time.sleep(2)
self.num_frame = 0
self.painter = QPainter(self)
def paintEvent(self, QPaintEvent):
# 繪圖事件
ret, frame = self.camera.capture()
thermal_ret,thermal_frame, thermal_row = self.thermal.capture()
if(thermal_ret and ret):
self.num_frame+=1
blackbody_max = thermal_row[:,:34].max()
heater_temp = self.heater.last_temp
# 偵測人臉
# if(self.num_frame % 2 == 0):
if(True):
stime = time.time()
boxes, labels, probs = self.detector.predict(frame)
processing_time = time.time()-stime
self.label_3.setText(str(processing_time))
# 取出所有偵測的結果
for i in range(boxes.shape[0]):
box = boxes[i, :]
score = probs[i]
if(score > 0.9):
#scaled_box = anti.scale_box(img_h=480,img_w=640,box=box)
#expanded_face = frame[scaled_box[1]:scaled_box[3],scaled_box[0]:scaled_box[2]].copy()
#live_score = anti.predict(expanded_face)
live_score = 0.9
if(live_score > 0.9):
color = (0, 255, 0)
else:
color = ( 0, 0, 255)
thermal_box = self.aligner.box_aligment([box])[0]
cv2.rectangle(thermal_frame, (thermal_box[0]//4, thermal_box[1]//4), (thermal_box[2]//4, thermal_box[3]//4), color, 2)
cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), color, 2)
face_temp = self.max_temperature(thermal_box,thermal_row,black_h=32)
corrected_face_temp = self.correcter.predict(x_detect_k= face_temp, x_black_k= blackbody_max, x_black_c= heater_temp)
cv2.putText(frame, "{:.2f}->{:.2f}".format(face_temp*0.0092,corrected_face_temp), ((box[0]+box[2])//2, (box[1]+box[3])//2), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 255, 255), 1, cv2.LINE_AA)
cv2.putText(thermal_frame, "{:.2f}".format(heater_temp), (320//4, 64//4), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 0, 0), 1, cv2.LINE_AA)
# 顯示至UI
frame=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
thermal_frame=cv2.cvtColor(thermal_frame,cv2.COLOR_BGR2RGB)
Qframe=QImage(frame.data,frame.shape[1],frame.shape[0],frame.shape[1]*3,QImage.Format_RGB888)
Qthermal_frame=QImage(thermal_frame.data,thermal_frame.shape[1],thermal_frame.shape[0],thermal_frame.shape[1]*3,QImage.Format_RGB888)
pixMap=QPixmap.fromImage(Qframe)
thermal_pixMap=QPixmap.fromImage(Qthermal_frame)
self.label.setPixmap(pixMap)
self.label_2.setPixmap(thermal_pixMap)
self.update()
def closeEvent(self,event):
# 關閉程式後執行
self.thermal.stop()
self.camera.stop()
self.heater.stop()
def max_temperature(self,box,thermal_row,black_h=0,thermal_height=120,thermal_width=160,image_height=480,image_width=640):
scale_ratio_h = thermal_height/image_height
scale_ratio_w = thermal_width/image_width
x1 = max(int(box[0]*scale_ratio_w),0)
y1 = max(int(box[1]*scale_ratio_h),black_h)
x2 = min(int(box[2]*scale_ratio_w),thermal_width)
y2 = min(int(box[3]*scale_ratio_h),thermal_height)
box_temperature = thermal_row[y1:y2,x1:x2]
if(box_temperature.size != 0):
return box_temperature.max()
else:
return 0
if __name__ == '__main__':
import sys
import os
app = QtWidgets.QApplication(sys.argv)
window = Main()
window.show()
status = app.exec_()
time.sleep(1)
# sys.exit(app.exec_())
os._exit(status)
#!/usr/bin/env python
# coding: utf-8
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
# import Ui_MainWindow as ui
import ui_test as ui
from uvc_thermal import Thermal
from camera import Camera
from detector import FaceDetector
# from mask_detector import FaceMaskDetector
from anti import AntiSpoofing
from heater import Heater
from aligner import BoxAligner
from correcter import Correcter
from osd import OSD
import cv2
import numpy as np
import time
class Main(QMainWindow, ui.Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.resize(640,480)
self.rgb_frame.setScaledContents(True)
self.thermal_frame.setScaledContents(True)
self.thermal = Thermal(width=160, height=120, framerate=9, frame_width=160, frame_height=120, log=None)
self.camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=1)
self.detector = FaceDetector(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/version-RFB/RFB-320.mnn" , input_size=(320,240))
# self.detector = FaceMaskDetector(model_path="/home/allen/RetinaFaceMask/mnn_models/RetinaFaceMask_mobileNet0.25.mnn" , input_size=(640,640))
# self.anti = AntiSpoofing(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/4_0_0_80x80_MiniFASNetV1SE.mnn")
self.aligner = BoxAligner(img_h = 480,img_w = 640)
self.heater = Heater(pwm_pin=12 , target_temp=35.0)
self.correcter = Correcter(model_path="../thermal-tk/20210421_correcter.sav")
self.osd = OSD(local='Asia/Taipei', str_format='%Y-%m-%d %H:%M:%S')
self.thermal.start()
time.sleep(2)
self.camera.start()
time.sleep(2)
self.heater.start()
time.sleep(2)
self.osd.start()
time.sleep(2)
self.faces = [self.face1,self.face2,self.face3]
self.temps = [self.temp1,self.temp2,self.temp3]
self.times = [self.time1,self.time2,self.time3]
self.black_line = 34
self.timer = QTimer(self)
self.timer.timeout.connect(self.capture)
self.timer.start(10)
# self.painter = QPainter(self)
def capture(self):
ret, frame = self.camera.capture()
thermal_ret,thermal_frame, thermal_row = self.thermal.capture()
if(thermal_ret and ret):
blackbody_max = thermal_row[:,:self.black_line].max()
heater_temp = self.heater.last_temp
# 偵測人臉
str_time = self.osd.str_time
boxes, labels, probs = self.detector.predict(frame)
# 取出所有偵測的結果
frame=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
thermal_frame=cv2.cvtColor(thermal_frame,cv2.COLOR_BGR2RGB)
for i in range(boxes.shape[0]):
box = boxes[i, :]
score = probs[i]
if(score > 0.9):
# color = (255, 0, 0)
thermal_box = self.aligner.box_aligment([box])[0]
face_frame = frame[box[1]:box[3],box[0]:box[2]].copy()
Qface_frame=QImage(face_frame.data,face_frame.shape[1],face_frame.shape[0],face_frame.shape[1]*3,QImage.Format_RGB888)
face_pixMap=QPixmap.fromImage(Qface_frame)
face_temp = self.max_temperature(thermal_box,thermal_row,black_h=self.black_line)
corrected_face_temp = self.correcter.predict(x_detect_k= face_temp, x_black_k= blackbody_max, x_black_c= heater_temp)
if(corrected_face_temp > 37.0):
color = (255, 0, 0)
elif(corrected_face_temp < 35.0):
color = (255, 255, 0)
else:
color = (0, 255, 0)
cv2.rectangle(thermal_frame, (thermal_box[0]//4, thermal_box[1]//4), (thermal_box[2]//4, thermal_box[3]//4), color, 2)
cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), color, 2)
cv2.putText(frame, "{:.2f}->{:.2f}".format(face_temp*0.0092,corrected_face_temp), ((box[0]+box[2])//2, (box[1]+box[3])//2), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, color, 1, cv2.LINE_AA)
cv2.putText(thermal_frame, "{:.2f}".format(heater_temp), (320//4, 64//4), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 0, 0), 1, cv2.LINE_AA)
j = i%len(self.faces)
self.faces[j].setPixmap(face_pixMap)
self.temps[j].setStyleSheet("color: rgb{};".format(color))
self.temps[j].setText("{:.2f}".format(corrected_face_temp))
self.times[j].setText(str_time)
# 顯示至UI
# frame=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
# thermal_frame=cv2.cvtColor(thermal_frame,cv2.COLOR_BGR2RGB)
# 繪製黑體底部範圍線
Qframe=QImage(frame.data,frame.shape[1],frame.shape[0],frame.shape[1]*3,QImage.Format_RGB888)
Qthermal_frame=QImage(thermal_frame.data,thermal_frame.shape[1],thermal_frame.shape[0],thermal_frame.shape[1]*3,QImage.Format_RGB888)
pixMap=QPixmap.fromImage(Qframe)
thermal_pixMap=QPixmap.fromImage(Qthermal_frame)
qp = QPainter(thermal_pixMap)
pen = QPen(Qt.red, 3)
qp.setPen(pen)
qp.drawLine(0, self.black_line, 160, self.black_line)
qp.end()
self.rgb_frame.setPixmap(pixMap)
self.thermal_frame.setPixmap(thermal_pixMap)
self.update()
def closeEvent(self,event):
# 關閉程式後執行
self.thermal.stop()
self.camera.stop()
self.heater.stop()
self.osd.stop()
def max_temperature(self,box,thermal_row,black_h=0,thermal_height=120,thermal_width=160,image_height=480,image_width=640):
scale_ratio_h = thermal_height/image_height
scale_ratio_w = thermal_width/image_width
x1 = max(int(box[0]*scale_ratio_w),0)
y1 = max(int(box[1]*scale_ratio_h),black_h)
x2 = min(int(box[2]*scale_ratio_w),thermal_width)
y2 = min(int(box[3]*scale_ratio_h),thermal_height)
box_temperature = thermal_row[y1:y2,x1:x2]
if(box_temperature.size != 0):
return box_temperature.max()
else:
return 0
if __name__ == '__main__':
import sys
import os
app = QtWidgets.QApplication(sys.argv)
window = Main()
window.show()
# sys.exit(app.exec_())
status = app.exec_()
os._exit(status)
#!/usr/bin/env python
# coding: utf-8
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
# import Ui_MainWindow as ui
import ui_test as ui
from uvc_thermal import Thermal
from camera import Camera
from detector import FaceDetector
# from mask_detector import FaceMaskDetector
from anti import AntiSpoofing
from heater import Heater
from aligner import BoxAligner
from correcter import Correcter
from osd import OSD
import cv2
import numpy as np
import time
class Main(QMainWindow, ui.Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.resize(640,480)
self.rgb_frame.setScaledContents(True)
self.thermal_frame.setScaledContents(True)
self.thermal = Thermal(width=160, height=120, framerate=9, frame_width=160, frame_height=120, log=None)
self.camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=1)
self.detector = FaceDetector(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/version-RFB/RFB-320.mnn" , input_size=(320,240))
# self.detector = FaceMaskDetector(model_path="/home/allen/RetinaFaceMask/mnn_models/RetinaFaceMask_mobileNet0.25.mnn" , input_size=(640,640))
# self.anti = AntiSpoofing(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/4_0_0_80x80_MiniFASNetV1SE.mnn")
self.aligner = BoxAligner(img_h = 480,img_w = 640)
self.heater = Heater(pwm_pin=12 , target_temp=35.0)
self.correcter = Correcter(model_path="../thermal-tk/20210421_correcter.sav")
self.osd = OSD(local='Asia/Taipei', str_format='%Y-%m-%d %H:%M:%S')
self.thermal.start()
time.sleep(2)
self.camera.start()
time.sleep(2)
self.heater.start()
time.sleep(2)
self.osd.start()
time.sleep(2)
self.faces = [self.face1,self.face2,self.face3]
self.temps = [self.temp1,self.temp2,self.temp3]
self.times = [self.time1,self.time2,self.time3]
self.black_line = 34
self.timer = QTimer(self)
self.timer.timeout.connect(self.capture)
self.timer.start(10)
# self.painter = QPainter(self)
def capture(self):
ret, frame = self.camera.capture()
thermal_ret,thermal_frame, thermal_row = self.thermal.capture()
if(thermal_ret and ret):
blackbody_max = thermal_row[:,:self.black_line].max()
heater_temp = self.heater.last_temp
# 偵測人臉
str_time = self.osd.str_time
boxes, labels, probs = self.detector.predict(frame)
# 顯示至UI
frame=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
thermal_frame=cv2.cvtColor(thermal_frame,cv2.COLOR_BGR2RGB)
Qframe=QImage(frame.data,frame.shape[1],frame.shape[0],frame.shape[1]*3,QImage.Format_RGB888)
Qthermal_frame=QImage(thermal_frame.data,thermal_frame.shape[1],thermal_frame.shape[0],thermal_frame.shape[1]*3,QImage.Format_RGB888)
pixMap=QPixmap.fromImage(Qframe)
thermal_pixMap=QPixmap.fromImage(Qthermal_frame)
# 建立 Painter 繪製物件
qp = QPainter(pixMap)
thermal_qp = QPainter(thermal_pixMap)
# 取出所有偵測的結果 依序繪製物件
for i in range(boxes.shape[0]):
box = boxes[i, :]
score = probs[i]
if(score > 0.9):
thermal_box = self.aligner.box_aligment([box])[0]
face_frame = frame[box[1]:box[3],box[0]:box[2]].copy()
face_temp = self.max_temperature(thermal_box,thermal_row,black_h=self.black_line)
corrected_face_temp = self.correcter.predict(x_detect_k= face_temp, x_black_k= blackbody_max, x_black_c= heater_temp)
face_rect = QRect(box[0], box[1], box[2]-box[0], box[3]-box[1])
thermal_rect = QRect(thermal_box[0]//4, thermal_box[1]//4, (thermal_box[2]//4 - thermal_box[0]//4), (thermal_box[3]//4 - thermal_box[1]//4))
# 判別溫度範圍
if(corrected_face_temp > 37.0):
pen = QPen(Qt.red, 3)
color = (255, 0, 0)
elif(corrected_face_temp < 35.0):
pen = QPen(Qt.yellow, 3)
color = (255, 255, 0)
else:
pen = QPen(Qt.green, 3)
color = (0, 255, 0)
# 顯示人臉小圖
j = i%len(self.faces)
face_crop = pixMap.copy(face_rect)
self.faces[j].setPixmap(face_crop)
self.temps[j].setStyleSheet("color: rgb{};".format(color))
self.temps[j].setText("{:.2f}".format(corrected_face_temp))
self.times[j].setText(str_time)
# 設定繪製顏色
qp.setPen(pen)
thermal_qp.setPen(pen)
# 繪製至QPixmap
qp.drawRect(face_rect)
qp.drawText(face_rect, Qt.AlignCenter , "{:.2f}".format(corrected_face_temp))
thermal_qp.drawRect(thermal_rect)
# 繪製黑體範圍底線
thermal_qp.setPen(QPen(Qt.red, 3))
thermal_qp.drawLine(0, self.black_line, 160, self.black_line)
thermal_qp.setPen(QPen(Qt.black, 3))
thermal_qp.drawText(QRect(0,0,160,self.black_line), Qt.AlignCenter , "{:.2f}".format(heater_temp))
# 繪製 OSD
qp.setPen(QPen(Qt.black, 3))
qp.drawText(QRect(0,0,640,20),Qt.AlignLeft,str_time)
# 釋放painter
qp.end()
thermal_qp.end()
# 設置至label frame
self.rgb_frame.setPixmap(pixMap)
self.thermal_frame.setPixmap(thermal_pixMap)
self.update()
def closeEvent(self,event):
# 關閉程式後執行
self.thermal.stop()
self.camera.stop()
self.heater.stop()
self.osd.stop()
def max_temperature(self,box,thermal_row,black_h=0,thermal_height=120,thermal_width=160,image_height=480,image_width=640):
scale_ratio_h = thermal_height/image_height
scale_ratio_w = thermal_width/image_width
x1 = max(int(box[0]*scale_ratio_w),0)
y1 = max(int(box[1]*scale_ratio_h),black_h)
x2 = min(int(box[2]*scale_ratio_w),thermal_width)
y2 = min(int(box[3]*scale_ratio_h),thermal_height)
box_temperature = thermal_row[y1:y2,x1:x2]
if(box_temperature.size != 0):
return box_temperature.max()
else:
return 0
if __name__ == '__main__':
import sys
import os
app = QtWidgets.QApplication(sys.argv)
window = Main()
window.show()
# sys.exit(app.exec_())
status = app.exec_()
os._exit(status)
#!/usr/bin/env python
# coding: utf-8
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
# import Ui_MainWindow as ui
import ui_test as ui
from uvc_thermal import Thermal
from camera import Camera
# from detector import FaceDetector
from mask_detector import FaceMaskDetector
from anti import AntiSpoofing
from heater import Heater
from aligner import BoxAligner
from correcter import Correcter
from osd import OSD
import cv2
import numpy as np
import time
class Main(QMainWindow, ui.Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.resize(640,480)
self.rgb_frame.setScaledContents(True)
self.thermal_frame.setScaledContents(True)
self.thermal = Thermal(width=160, height=120, framerate=9, frame_width=160, frame_height=120, log=None)
self.camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=1)
# self.detector = FaceDetector(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/version-RFB/RFB-320.mnn" , input_size=(320,240))
self.detector = FaceMaskDetector(model_path="/home/allen/RetinaFaceMask/mnn_models/RetinaFaceMask_mobileNet0.25.mnn" , input_size=(640,640))
# self.anti = AntiSpoofing(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/4_0_0_80x80_MiniFASNetV1SE.mnn")
self.aligner = BoxAligner(img_h = 480,img_w = 640)
self.heater = Heater(pwm_pin=12 , target_temp=35.0)
self.correcter = Correcter(model_path="../thermal-tk/20210421_correcter.sav")
self.osd = OSD(local='Asia/Taipei', str_format='%Y-%m-%d %H:%M:%S')
self.thermal.start()
time.sleep(2)
self.camera.start()
time.sleep(2)
self.heater.start()
time.sleep(2)
self.osd.start()
time.sleep(2)
self.faces = []
self.timer = QTimer(self)
self.timer.timeout.connect(self.capture)
self.timer.start(10)
# self.painter = QPainter(self)
def capture(self):
ret, frame = self.camera.capture()
thermal_ret,thermal_frame, thermal_row = self.thermal.capture()
if(thermal_ret and ret):
blackbody_max = thermal_row[:,:34].max()
heater_temp = self.heater.last_temp
# 偵測人臉
str_time = self.osd.str_time
boxes, labels, probs, mask_probs = self.detector.predict(frame)
# 取出所有偵測的結果
frame=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
thermal_frame=cv2.cvtColor(thermal_frame,cv2.COLOR_BGR2RGB)
for i in range(boxes.shape[0]):
box = boxes[i, :]
score = probs[i]
mask_score = mask_probs[i]
if(score > 0.9):
# color = (255, 0, 0)
thermal_box = self.aligner.box_aligment([box])[0]
face_frame = frame[box[1]:box[3],box[0]:box[2]].copy()
Qface_frame=QImage(face_frame.data,face_frame.shape[1],face_frame.shape[0],face_frame.shape[1]*3,QImage.Format_RGB888)
face_pixMap=QPixmap.fromImage(Qface_frame)
face_temp = self.max_temperature(thermal_box,thermal_row,black_h=32)
corrected_face_temp = self.correcter.predict(x_detect_k= face_temp, x_black_k= blackbody_max, x_black_c= heater_temp)
if(mask_score > 0.7):
bbox_color = (0, 255, 0)
else:
bbox_color = (255, 0, 0)
if(corrected_face_temp > 37.0):
color = (255, 0, 0)
elif(corrected_face_temp < 35.0):
color = (255, 255, 0)
else:
color = (0, 255, 0)
cv2.rectangle(thermal_frame, (thermal_box[0]//4, thermal_box[1]//4), (thermal_box[2]//4, thermal_box[3]//4), bbox_color, 2)
cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), bbox_color, 2)
cv2.putText(frame, "{:.2f}->{:.2f}".format(face_temp*0.0092,corrected_face_temp), ((box[0]+box[2])//2, (box[1]+box[3])//2), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, color, 1, cv2.LINE_AA)
cv2.putText(thermal_frame, "{:.2f}".format(heater_temp), (320//4, 64//4), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 0, 0), 1, cv2.LINE_AA)
self.face1.setPixmap(face_pixMap)
self.temp1.setStyleSheet("color: rgb{};".format(color))
self.temp1.setText("{:.2f}".format(corrected_face_temp))
self.time1.setText(str_time)
# 顯示至UI
# frame=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
# thermal_frame=cv2.cvtColor(thermal_frame,cv2.COLOR_BGR2RGB)
Qframe=QImage(frame.data,frame.shape[1],frame.shape[0],frame.shape[1]*3,QImage.Format_RGB888)
Qthermal_frame=QImage(thermal_frame.data,thermal_frame.shape[1],thermal_frame.shape[0],thermal_frame.shape[1]*3,QImage.Format_RGB888)
pixMap=QPixmap.fromImage(Qframe)
thermal_pixMap=QPixmap.fromImage(Qthermal_frame)
self.rgb_frame.setPixmap(pixMap)
self.thermal_frame.setPixmap(thermal_pixMap)
self.update()
def closeEvent(self,event):
# 關閉程式後執行
self.thermal.stop()
self.camera.stop()
self.heater.stop()
self.osd.stop()
def max_temperature(self,box,thermal_row,black_h=0,thermal_height=120,thermal_width=160,image_height=480,image_width=640):
scale_ratio_h = thermal_height/image_height
scale_ratio_w = thermal_width/image_width
x1 = max(int(box[0]*scale_ratio_w),0)
y1 = max(int(box[1]*scale_ratio_h),black_h)
x2 = min(int(box[2]*scale_ratio_w),thermal_width)
y2 = min(int(box[3]*scale_ratio_h),thermal_height)
box_temperature = thermal_row[y1:y2,x1:x2]
if(box_temperature.size != 0):
return box_temperature.max()
else:
return 0
if __name__ == '__main__':
import sys
import os
app = QtWidgets.QApplication(sys.argv)
window = Main()
window.show()
# sys.exit(app.exec_())
status = app.exec_()
os._exit(status)
# 設定uvc thermal權限 (暫時做法)
sudo chmod -R 777 /dev/bus/usb/*
sudo chmod 777 /dev/video*
sudo chmod 777 /dev/spidev0.*
sudo chmod 777 /dev/gpio*
\ No newline at end of file
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'untitled.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow,window_size=(640,480)):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(window_size[0],window_size[1])
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.label.setToolTipDuration(0)
self.label.setStyleSheet("background-color: rgb(170, 0, 0);")
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setStyleSheet("background-color: rgb(170, 255, 0);")
self.label_2.setObjectName("label_2")
self.verticalLayout.addWidget(self.label_2)
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setObjectName("label_3")
self.verticalLayout.addWidget(self.label_3)
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setStyleSheet("background-color: rgb(170, 255, 255);")
self.label_4.setObjectName("label_4")
self.verticalLayout.addWidget(self.label_4)
self.label_5 = QtWidgets.QLabel(self.centralwidget)
self.label_5.setObjectName("label_5")
self.verticalLayout.addWidget(self.label_5)
self.horizontalLayout.addLayout(self.verticalLayout)
self.horizontalLayout.setStretch(0, 4)
self.horizontalLayout.setStretch(1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1094, 21))
self.menubar.setObjectName("menubar")
self.menu = QtWidgets.QMenu(self.menubar)
self.menu.setObjectName("menu")
MainWindow.setMenuBar(self.menubar)
self.menubar.addAction(self.menu.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "rgb theraml"))
self.label_2.setText(_translate("MainWindow", "theraml frame"))
self.label_3.setText(_translate("MainWindow", "TextLabel"))
self.label_4.setText(_translate("MainWindow", "TextLabel"))
self.label_5.setText(_translate("MainWindow", "TextLabel"))
self.menu.setTitle(_translate("MainWindow", "選單"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
import cv2
import numpy as np
class BoxAligner:
def __init__(self,img_h,img_w):
self.img_h = img_h
self.img_w = img_w
self.M = np.array([[ 8.13802980e-01, -2.63523694e-02, 9.30324875e+01],
[ 2.10292692e-02, 7.84319221e-01, 7.70246127e+01],
[ 1.48500793e-04, -1.53618915e-04, 1.00000000e+00]])
def box_aligment(self,boxes):
aliged_boxes = []
for b in boxes:
x1,y1,w1 = np.dot(self.M,[b[0],b[1],1])
x2,y2,w2 = np.dot(self.M,[b[2],b[3],1])
x1 = max(int(x1),0)
y1 = max(int(y1),0)
x2 = min(int(x2),self.img_w)
y2 = min(int(y2),self.img_h)
aliged_boxes.append([x1,y1,x2,y2])
return aliged_boxes
def get_omography_matrix(self,rgb_points, thermal_points):
h, mask = cv2.findHomography(rgb_points, thermal_points, cv2.RANSAC)
self.M = h
\ No newline at end of file
import math
import MNN
import cv2
import numpy as np
class AntiSpoofing:
def __init__(self,model_path="../model/4_0_0_80x80_MiniFASNetV1SE.mnn"):
self.interpreter = MNN.Interpreter(model_path)
self.session = self.interpreter.createSession({'numThread':4})
self.input_tensor = self.interpreter.getSessionInput(self.session)
def predict(self,image):
image = cv2.resize(image, (80,80))
image = image.transpose((2, 0, 1))
image = image.astype(np.float32)
tmp_input = MNN.Tensor((1, 3, 80,80), MNN.Halide_Type_Float, image, MNN.Tensor_DimensionType_Caffe)
self.input_tensor.copyFrom(tmp_input)
self.interpreter.runSession(self.session)
isLive = self.interpreter.getSessionOutput(self.session, "Reshape176").getData()
isLive = self.softmax_py(isLive)
return isLive[1]
def softmax_py(self,logits_data):
logits_exp = [math.exp(i) for i in logits_data]
sum_logits_exp = sum(logits_exp)
softmax = [round(i/sum_logits_exp,3) for i in logits_exp]
return softmax
def scale_box(self,img_h,img_w,box,scale=2):
x1,y1,x2,y2 = box
new_x1 = max(scale*x1 - x2,0)
new_y1 = max(scale*y1 - y2,0)
new_x2 = min(scale*x2 - x1,img_w)
new_y2 = min(scale*y2 - y1,img_h)
return [new_x1,new_y1,new_x2,new_y2]
\ No newline at end of file
from threading import Thread
import time, numpy as np, cv2
class Camera(object):
def __init__(self, width, height, framerate, log=None,channel=0,flip=None):
self.__log = self.__log if log is None else log
self.__isCaptured = False
self.__frame = None
self.isOpened = False
self.width = width
self.height = height
self.framerate = framerate
self.channel = channel
self.flip = flip
self.__thread = Thread(target=self.__job)
def start(self):
self.__isCaptured = False
self.__frame = None
self.isOpened = True
self.__thread.start()
def stop(self):
self.__isCaptured = False
self.__frame = None
self.isOpened = False
def capture(self):
return self.__isCaptured, self.__frame
def __job(self):
w = self.width
h = self.height
fps = self.framerate
dev = cv2.VideoCapture(self.channel,cv2.CAP_V4L2)
while self.isOpened:
if dev.isOpened():
ret, frame = dev.read()
if not ret:
break
if(self.flip is not None):
self.__frame = cv2.flip(frame, self.flip)
else:
self.__frame = frame, 1
self.__isCaptured = ret
time.sleep(1 / fps)
dev.release()
self.__isCaptured = False
self.__frame = None
self.__log("camera stop")
def __gstreamer(self, width, height, framerate, flip_method=2):
return (
f'nvarguscamerasrc ! video/x-raw(memory:NVMM), ' +
f'width=(int){width}, height=(int){height}, ' +
f'format=(string)NV12, framerate=(fraction){framerate}/1 ! ' +
f'nvvidconv flip-method={flip_method} ! ' +
f'video/x-raw, width=(int){width}, height=(int){height}, format=(string)BGRx ! ' +
'videoconvert ! video/x-raw, format=(string)BGR ! appsink')
def __log(self, message):
print(message)
import numpy as np
import os
from scipy.optimize import curve_fit
from sklearn.svm import SVR
from sklearn.model_selection import train_test_split # Import train_test_split function
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
import pickle
class Correcter(object):
def __init__(self,model_path="./20210421_correcter.sav"):
# self.clf=""
# self.clf = self.fitting()
self.clf = pickle.load(open(model_path,'rb'))
# MAPE 計算函數
def mean_absolute_percentage_error(self,y_true, y_pred):
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def fitting(self):
# SVR
# y_detect_c = x_black_k + x_black_c + x_detect_k
# data perpare
x = []
y = []
# stop_num = 3000
# for root,folders,files in os.walk('./logs/20201230_tlinear'):
# for f in files:
# if('.log' in f):
# log = open(os.path.join(root,f))
# lines = log.readlines()
# temp_values = f.strip('.log').split('-')
# min_temp = float(temp_values[0])
# max_temp = float(temp_values[1])
# count=0
# for line in lines:
# count+=1
# if(count > stop_num):
# break
# rowdata=line.strip('\n').split(' ')
# temp1 = int(rowdata[1])
# temp2 = int(rowdata[2])
# x.append([temp1,temp2,max_temp])
# y.append(min_temp)
# x.append([temp2,temp2,max_temp])
# y.append(max_temp)
log = open(os.path.join("./logs/argumented_data.txt"))
lines = log.readlines()
for line in lines:
rowdata=line.strip('\n').split(' ')
detect_rowdata = float(rowdata[0])
blackbody_rowdata = float(rowdata[1])
blackbody_temp = float(rowdata[2])
detect_temp = float(rowdata[3])
x.append([detect_rowdata,blackbody_rowdata])
y.append(blackbody_temp-detect_temp)
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=1)
clf = SVR(kernel='poly',degree=1,gamma=0.001)
clf.fit(X_train, y_train)
pred2 = clf.predict(X_test)
r2 = r2_score(y_test,pred2)
mse = mean_squared_error(y_test,pred2)
mape = self.mean_absolute_percentage_error(y_test,pred2)
print("kernel: {}, r_squre: {}, MSE: {}, MAPE: {}".format('rbf',r2,mse,mape))
return clf
def predict(self,x_detect_k,x_black_k,x_black_c):
# return x_black_c - self.clf.predict([[x_detect_k,x_black_k]])[0]
return self.clf.predict([[x_detect_k,x_black_k,x_black_c]])[0]
# a = 1.6806e-05
# b = -0.1183
# c = 238.9117
# p_black_c = a*pow(x_black_k,2)+b*x_black_k+c
# indiv = x_black_c - p_black_c
# return a*pow(x_detect_k,2)+b*x_detect_k+c+indiv
\ No newline at end of file
import math
import MNN
import cv2
import numpy as np
import torch
class FaceDetector:
def __init__(self,model_path="../model/version-slim/slim-320.mnn",input_size=(320,240)):
self.input_size = input_size
self.image_mean = np.array([127, 127, 127])
self.image_std = 128.0
self.iou_threshold = 0.3
self.threshold = 0.7
self.center_variance = 0.1
self.size_variance = 0.2
self.min_boxes = [[10, 16, 24], [32, 48], [64, 96], [128, 192, 256]]
self.strides = [8, 16, 32, 64]
self.priors = self.define_img_size(self.input_size)
self.interpreter = MNN.Interpreter(model_path)
self.session = self.interpreter.createSession({'numThread':4})
self.input_tensor = self.interpreter.getSessionInput(self.session)
def predict(self,image_ori):
image = cv2.resize(image_ori, self.input_size)
image = (image - self.image_mean) / self.image_std
image = image.transpose((2, 0, 1))
image = image.astype(np.float32)
tmp_input = MNN.Tensor((1, 3, self.input_size[1], self.input_size[0]), MNN.Halide_Type_Float, image, MNN.Tensor_DimensionType_Caffe)
self.input_tensor.copyFrom(tmp_input)
self.interpreter.runSession(self.session)
scores = self.interpreter.getSessionOutput(self.session, "scores").getData()
boxes = self.interpreter.getSessionOutput(self.session, "boxes").getData()
boxes = np.expand_dims(np.reshape(boxes, (-1, 4)), axis=0)
scores = np.expand_dims(np.reshape(scores, (-1, 2)), axis=0)
boxes = self.convert_locations_to_boxes(boxes, self.priors, self.center_variance, self.size_variance)
boxes = self.center_form_to_corner_form(boxes)
boxes, labels, probs = self.decode(image_ori.shape[1], image_ori.shape[0], scores, boxes, self.threshold)
return boxes, labels, probs
def define_img_size(self,image_size):
shrinkage_list = []
feature_map_w_h_list = []
for size in image_size:
feature_map = [math.ceil(size / stride) for stride in self.strides]
feature_map_w_h_list.append(feature_map)
for i in range(0, len(image_size)):
shrinkage_list.append(self.strides)
priors = self.generate_priors(feature_map_w_h_list, shrinkage_list, image_size, self.min_boxes)
return priors
def generate_priors(self,feature_map_list, shrinkage_list, image_size, min_boxes, clamp=True):
priors = []
for index in range(0, len(feature_map_list[0])):
scale_w = image_size[0] / shrinkage_list[0][index]
scale_h = image_size[1] / shrinkage_list[1][index]
for j in range(0, feature_map_list[1][index]):
for i in range(0, feature_map_list[0][index]):
x_center = (i + 0.5) / scale_w
y_center = (j + 0.5) / scale_h
for min_box in min_boxes[index]:
w = min_box / image_size[0]
h = min_box / image_size[1]
priors.append([
x_center,
y_center,
w,
h
])
print("priors nums:{}".format(len(priors)))
priors = torch.tensor(priors)
if clamp:
torch.clamp(priors, 0.0, 1.0, out=priors)
return priors
def decode(self,width, height, confidences, boxes, prob_threshold, iou_threshold=0.3, top_k=-1):
boxes = boxes[0]
confidences = confidences[0]
picked_box_probs = []
picked_labels = []
for class_index in range(1, confidences.shape[1]):
probs = confidences[:, class_index]
mask = probs > prob_threshold
probs = probs[mask]
if probs.shape[0] == 0:
continue
subset_boxes = boxes[mask, :]
box_probs = np.concatenate([subset_boxes, probs.reshape(-1, 1)], axis=1)
box_probs = self.hard_nms(box_probs,
iou_threshold=iou_threshold,
top_k=top_k,
)
picked_box_probs.append(box_probs)
picked_labels.extend([class_index] * box_probs.shape[0])
if not picked_box_probs:
return np.array([]), np.array([]), np.array([])
picked_box_probs = np.concatenate(picked_box_probs)
picked_box_probs[:, 0] *= width
picked_box_probs[:, 1] *= height
picked_box_probs[:, 2] *= width
picked_box_probs[:, 3] *= height
return picked_box_probs[:, :4].astype(np.int32), np.array(picked_labels), picked_box_probs[:, 4]
def center_form_to_corner_form(self,locations):
return np.concatenate([locations[..., :2] - locations[..., 2:] / 2,
locations[..., :2] + locations[..., 2:] / 2], len(locations.shape) - 1)
def convert_locations_to_boxes(self,locations, priors, center_variance,
size_variance):
"""Convert regressional location results of SSD into boxes in the form of (center_x, center_y, h, w).
The conversion:
$$predicted\_center * center_variance = \frac {real\_center - prior\_center} {prior\_hw}$$
$$exp(predicted\_hw * size_variance) = \frac {real\_hw} {prior\_hw}$$
We do it in the inverse direction here.
Args:
locations (batch_size, num_priors, 4): the regression output of SSD. It will contain the outputs as well.
priors (num_priors, 4) or (batch_size/1, num_priors, 4): prior boxes.
center_variance: a float used to change the scale of center.
size_variance: a float used to change of scale of size.
Returns:
boxes: priors: [[center_x, center_y, h, w]]. All the values
are relative to the image size.
"""
# priors can have one dimension less.
if len(priors.shape) + 1 == len(locations.shape):
priors = np.expand_dims(priors, 0)
return np.concatenate([
locations[..., :2] * center_variance * priors[..., 2:] + priors[..., :2],
np.exp(locations[..., 2:] * size_variance) * priors[..., 2:]
], axis=len(locations.shape) - 1)
def area_of(self,left_top, right_bottom):
"""Compute the areas of rectangles given two corners.
Args:
left_top (N, 2): left top corner.
right_bottom (N, 2): right bottom corner.
Returns:
area (N): return the area.
"""
hw = np.clip(right_bottom - left_top, 0.0, None)
return hw[..., 0] * hw[..., 1]
def iou_of(self,boxes0, boxes1, eps=1e-5):
"""Return intersection-over-union (Jaccard index) of boxes.
Args:
boxes0 (N, 4): ground truth boxes.
boxes1 (N or 1, 4): predicted boxes.
eps: a small number to avoid 0 as denominator.
Returns:
iou (N): IoU values.
"""
overlap_left_top = np.maximum(boxes0[..., :2], boxes1[..., :2])
overlap_right_bottom = np.minimum(boxes0[..., 2:], boxes1[..., 2:])
overlap_area = self.area_of(overlap_left_top, overlap_right_bottom)
area0 = self.area_of(boxes0[..., :2], boxes0[..., 2:])
area1 = self.area_of(boxes1[..., :2], boxes1[..., 2:])
return overlap_area / (area0 + area1 - overlap_area + eps)
def hard_nms(self,box_scores, iou_threshold, top_k=-1, candidate_size=200):
"""
Args:
box_scores (N, 5): boxes in corner-form and probabilities.
iou_threshold: intersection over union threshold.
top_k: keep top_k results. If k <= 0, keep all the results.
candidate_size: only consider the candidates with the highest scores.
Returns:
picked: a list of indexes of the kept boxes
"""
scores = box_scores[:, -1]
boxes = box_scores[:, :-1]
picked = []
# _, indexes = scores.sort(descending=True)
indexes = np.argsort(scores)
# indexes = indexes[:candidate_size]
indexes = indexes[-candidate_size:]
while len(indexes) > 0:
# current = indexes[0]
current = indexes[-1]
picked.append(current)
if 0 < top_k == len(picked) or len(indexes) == 1:
break
current_box = boxes[current, :]
# indexes = indexes[1:]
indexes = indexes[:-1]
rest_boxes = boxes[indexes, :]
iou = self.iou_of(
rest_boxes,
np.expand_dims(current_box, axis=0),
)
indexes = indexes[iou <= iou_threshold]
return box_scores[picked, :]
\ No newline at end of file
import time
import os
import math
import Adafruit_GPIO.SPI as SPI
import Adafruit_MCP3008
import RPi.GPIO as GPIO
import board
import busio
import numpy as np
import math
import cv2
from threading import Thread
class Heater(object):
def __init__(self,pwm_pin,target_temp):
self.target_temp = target_temp
self.pin = pwm_pin
self.last_temp = 0.0
self.isOpened = False
self.thread = Thread(target=self.job)
# Convert data to voltage level
def ReadVolts(self,data,deci,input_volts=3.3):
volts = (data * input_volts) / float(1023)
volts = round(volts,deci)
return volts
def GetTemperature(self,data,input_volts=3.3):
Rp=10000
T2=273.15+25
Bx=3950
Ka=273.15
vol = (data * input_volts) / float(1023)
Rt=vol*1000/(input_volts-vol)
if((Rt/Rp)<0):
return -1
else:
return 1/(1/T2+math.log(Rt/Rp)/Bx)-Ka+0.5
def job(self):
SPI_PORT = 0
SPI_DEVICE = 0
SIGNAL_CHANNEL = 0
mcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.pin,GPIO.OUT)
p = GPIO.PWM(self.pin,1000)
duty_cycle = 100.0
alpha = 1.0
p.start(duty_cycle)
while self.isOpened:
data = mcp.read_adc(SIGNAL_CHANNEL)
if(data!=0 and data!= 1023):
volts = (data * 5.0) / float(1023)
temp = self.GetTemperature(data,5.0)
self.last_temp = temp
# print(data,volts,temp,duty_cycle)
duty_cycle-=alpha*(temp-self.target_temp)
duty_cycle = 100.0 if duty_cycle > 100.0 else duty_cycle
duty_cycle = 0.0 if duty_cycle < 0.0 else duty_cycle
p.ChangeDutyCycle(duty_cycle)
time.sleep(0.1)
p.stop()
def start(self):
self.isOpened = True
self.thread.start()
def stop(self):
self.isOpened = False
# self.thread.stop()
\ No newline at end of file
from uvc_thermal import Thermal
from camera import Camera
from detector import FaceDetector
from anti import AntiSpoofing
from heater import Heater
from aligner import BoxAligner
from correcter import Correcter
import cv2
import numpy as np
import time
import sys
def max_temperature(box,thermal_row,black_h=0,thermal_height=120,thermal_width=160,image_height=480,image_width=640):
scale_ratio_h = thermal_height/image_height
scale_ratio_w = thermal_width/image_width
x1 = max(int(box[0]*scale_ratio_w),0)
y1 = max(int(box[1]*scale_ratio_h),black_h)
x2 = min(int(box[2]*scale_ratio_w),thermal_width)
y2 = min(int(box[3]*scale_ratio_h),thermal_height)
box_temperature = thermal_row[y1:y2,x1:x2]
if(box_temperature.size != 0):
return box_temperature.max()
else:
return 0
thermal = Thermal(width=160, height=120, framerate=9, frame_width=640, frame_height=480, log=None)
camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=1)
detector = FaceDetector(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/version-RFB/RFB-320.mnn" , input_size=(320,240))
anti = AntiSpoofing(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/4_0_0_80x80_MiniFASNetV1SE.mnn")
aligner = BoxAligner(img_h = 480,img_w = 640)
heater = Heater(pwm_pin=12 , target_temp=36.0)
correcter = Correcter(model_path="../thermal-tk/20210421_correcter.sav")
heater.start()
time.sleep(2)
thermal.start()
time.sleep(2)
camera.start()
time.sleep(2)
while(thermal.isOpened and camera.isOpened):
thermal_ret,thermal_frame, thermal_row = thermal.capture()
ret,frame = camera.capture()
if(thermal_ret and ret):
blackbody_max = thermal_row[:,:34].max()
heater_temp = heater.last_temp
boxes, labels, probs = detector.predict(frame)
for i in range(boxes.shape[0]):
box = boxes[i, :]
score = probs[i]
if(score > 0.9):
#scaled_box = anti.scale_box(img_h=480,img_w=640,box=box)
#expanded_face = frame[scaled_box[1]:scaled_box[3],scaled_box[0]:scaled_box[2]].copy()
#live_score = anti.predict(expanded_face)
live_score = 0.9
if(live_score > 0.9):
color = (0, 255, 0)
else:
color = ( 0, 0, 255)
thermal_box = aligner.box_aligment([box])[0]
cv2.rectangle(thermal_frame, (thermal_box[0], thermal_box[1]), (thermal_box[2], thermal_box[3]), color, 2)
cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), color, 2)
# cv2.rectangle(thermal_frame, (box[0], box[1]), (box[2], box[3]), color, 2)
# max_temp = thermal_row[thermal_box[1]//4:thermal_box[3]//4,thermal_box[0]//4:thermal_box[2]//4]
face_temp = max_temperature(thermal_box,thermal_row,black_h=32)
# if(max_temp.size != 0):
corrected_face_temp = correcter.predict(x_detect_k= face_temp, x_black_k= blackbody_max, x_black_c= heater_temp)
cv2.putText(frame, "{:.2f}->{:.2f}".format(face_temp*0.0092,corrected_face_temp), ((box[0]+box[2])//2, (box[1]+box[3])//2), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 255, 255), 1, cv2.LINE_AA)
cv2.putText(thermal_frame, "{:.2f}".format(heater_temp), (320, 64), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 0, 0), 1, cv2.LINE_AA)
cv2.imshow('frames',np.column_stack((frame,thermal_frame)))
if(cv2.waitKey(1) == ord('q')):
break
else:
break
cv2.destroyAllWindows()
thermal.stop()
time.sleep(2)
camera.stop()
time.sleep(2)
heater.stop()
sys.exit()
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "connected-studio",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np\n",
"from mask_detector import FaceMaskDetector"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "strategic-degree",
"metadata": {},
"outputs": [],
"source": [
"detector = FaceMaskDetector( model_path='/home/allen/RetinaFaceMask/mnn_models/RetinaFaceMask_mobileNet0.25.mnn',input_size=(640, 640))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "concrete-configuration",
"metadata": {},
"outputs": [],
"source": [
"img = cv2.imread('/home/allen/Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/imgs/1.jpg')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "specified-center",
"metadata": {},
"outputs": [],
"source": [
"detector.predict(img)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "disabled-villa",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.6"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
import math
import MNN
import cv2
import numpy as np
import torch
class FaceMaskDetector:
def __init__(self,model_path="../model/version-slim/slim-320.mnn",input_size=(320,240)):
self.input_size = input_size
self.image_mean = np.array([104, 117, 123])
self.image_std = 128.0
self.iou_threshold = 0.3
self.threshold = 0.8
self.center_variance = 0.1
self.size_variance = 0.2
self.min_boxes = [[16, 32], [64, 128], [256, 512]]
self.strides = [8, 16, 32]
self.priors = self.define_img_size(self.input_size)
self.interpreter = MNN.Interpreter(model_path)
self.session = self.interpreter.createSession({'numThread':4})
self.input_tensor = self.interpreter.getSessionInput(self.session)
def predict(self,image_ori):
image = cv2.cvtColor(image_ori, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, self.input_size)
image = image.astype(float)
image = (image - self.image_mean)
image = image.transpose((2, 0, 1))
image = image.astype(np.float32)
tmp_input = MNN.Tensor((1, 3, self.input_size[1], self.input_size[0]), MNN.Halide_Type_Float, image, MNN.Tensor_DimensionType_Caffe)
self.input_tensor.copyFrom(tmp_input)
self.interpreter.runSession(self.session)
facemask_scores = self.interpreter.getSessionOutput(self.session, "facemask_score").getData()
scores = self.interpreter.getSessionOutput(self.session, "face_score").getData()
boxes = self.interpreter.getSessionOutput(self.session, "bbox").getData()
boxes = np.expand_dims(np.reshape(boxes, (-1, 4)), axis=0)
scores = np.expand_dims(np.reshape(scores, (-1, 2)), axis=0)
facemask_scores = np.expand_dims(np.reshape(facemask_scores, (-1, 2)), axis=0)
boxes = self.convert_locations_to_boxes(boxes, self.priors, self.center_variance, self.size_variance)
boxes = self.center_form_to_corner_form(boxes)
boxes, labels, probs, mask_probs = self.decode(image_ori.shape[1], image_ori.shape[0], scores, facemask_scores, boxes, self.threshold)
return boxes, labels, probs, mask_probs
def define_img_size(self,image_size):
shrinkage_list = []
feature_map_w_h_list = []
for size in image_size:
feature_map = [math.ceil(size / stride) for stride in self.strides]
feature_map_w_h_list.append(feature_map)
for i in range(0, len(image_size)):
shrinkage_list.append(self.strides)
priors = self.generate_priors(feature_map_w_h_list, shrinkage_list, image_size, self.min_boxes)
return priors
def generate_priors(self,feature_map_list, shrinkage_list, image_size, min_boxes, clamp=True):
priors = []
for index in range(0, len(feature_map_list[0])):
scale_w = image_size[0] / shrinkage_list[0][index]
scale_h = image_size[1] / shrinkage_list[1][index]
for j in range(0, feature_map_list[1][index]):
for i in range(0, feature_map_list[0][index]):
x_center = (i + 0.5) / scale_w
y_center = (j + 0.5) / scale_h
for min_box in min_boxes[index]:
w = min_box / image_size[0]
h = min_box / image_size[1]
priors.append([
x_center,
y_center,
w,
h
])
print("priors nums:{}".format(len(priors)))
priors = torch.tensor(priors)
if clamp:
torch.clamp(priors, 0.0, 1.0, out=priors)
return priors
def decode(self,width, height, confidences, mask_confidences, boxes, prob_threshold, iou_threshold=0.3, top_k=-1):
boxes = boxes[0]
confidences = confidences[0]
mask_confidences = mask_confidences[0]
picked_box_probs = []
picked_labels = []
for class_index in range(1, confidences.shape[1]):
probs = confidences[:, class_index]
mask_probs = mask_confidences[:, class_index]
mask = probs > prob_threshold
probs = probs[mask]
mask_probs = mask_probs[mask]
if probs.shape[0] == 0:
continue
subset_boxes = boxes[mask, :]
box_probs = np.concatenate([subset_boxes, probs.reshape(-1, 1),mask_probs.reshape(-1, 1)], axis=1)
box_probs = self.hard_nms(box_probs,
iou_threshold=iou_threshold,
top_k=top_k,
)
picked_box_probs.append(box_probs)
picked_labels.extend([class_index] * box_probs.shape[0])
if not picked_box_probs:
return np.array([]), np.array([]), np.array([]), np.array([])
picked_box_probs = np.concatenate(picked_box_probs)
picked_box_probs[:, 0] *= width
picked_box_probs[:, 1] *= height
picked_box_probs[:, 2] *= width
picked_box_probs[:, 3] *= height
return picked_box_probs[:, :4].astype(np.int32), np.array(picked_labels), picked_box_probs[:, 4], picked_box_probs[:, 5]
def center_form_to_corner_form(self,locations):
return np.concatenate([locations[..., :2] - locations[..., 2:] / 2,
locations[..., :2] + locations[..., 2:] / 2], len(locations.shape) - 1)
def convert_locations_to_boxes(self,locations, priors, center_variance,
size_variance):
"""Convert regressional location results of SSD into boxes in the form of (center_x, center_y, h, w).
The conversion:
$$predicted\_center * center_variance = \frac {real\_center - prior\_center} {prior\_hw}$$
$$exp(predicted\_hw * size_variance) = \frac {real\_hw} {prior\_hw}$$
We do it in the inverse direction here.
Args:
locations (batch_size, num_priors, 4): the regression output of SSD. It will contain the outputs as well.
priors (num_priors, 4) or (batch_size/1, num_priors, 4): prior boxes.
center_variance: a float used to change the scale of center.
size_variance: a float used to change of scale of size.
Returns:
boxes: priors: [[center_x, center_y, h, w]]. All the values
are relative to the image size.
"""
# priors can have one dimension less.
if len(priors.shape) + 1 == len(locations.shape):
priors = np.expand_dims(priors, 0)
return np.concatenate([
locations[..., :2] * center_variance * priors[..., 2:] + priors[..., :2],
np.exp(locations[..., 2:] * size_variance) * priors[..., 2:]
], axis=len(locations.shape) - 1)
def area_of(self,left_top, right_bottom):
"""Compute the areas of rectangles given two corners.
Args:
left_top (N, 2): left top corner.
right_bottom (N, 2): right bottom corner.
Returns:
area (N): return the area.
"""
hw = np.clip(right_bottom - left_top, 0.0, None)
return hw[..., 0] * hw[..., 1]
def iou_of(self,boxes0, boxes1, eps=1e-5):
"""Return intersection-over-union (Jaccard index) of boxes.
Args:
boxes0 (N, 4): ground truth boxes.
boxes1 (N or 1, 4): predicted boxes.
eps: a small number to avoid 0 as denominator.
Returns:
iou (N): IoU values.
"""
overlap_left_top = np.maximum(boxes0[..., :2], boxes1[..., :2])
overlap_right_bottom = np.minimum(boxes0[..., 2:], boxes1[..., 2:])
overlap_area = self.area_of(overlap_left_top, overlap_right_bottom)
area0 = self.area_of(boxes0[..., :2], boxes0[..., 2:])
area1 = self.area_of(boxes1[..., :2], boxes1[..., 2:])
return overlap_area / (area0 + area1 - overlap_area + eps)
def hard_nms(self,box_scores, iou_threshold, top_k=-1, candidate_size=200):
"""
Args:
box_scores (N, 5): boxes in corner-form and probabilities.
iou_threshold: intersection over union threshold.
top_k: keep top_k results. If k <= 0, keep all the results.
candidate_size: only consider the candidates with the highest scores.
Returns:
picked: a list of indexes of the kept boxes
"""
scores = box_scores[:, 4]
boxes = box_scores[:, :4]
picked = []
# _, indexes = scores.sort(descending=True)
indexes = np.argsort(scores)
# indexes = indexes[:candidate_size]
indexes = indexes[-candidate_size:]
while len(indexes) > 0:
# current = indexes[0]
current = indexes[-1]
picked.append(current)
if 0 < top_k == len(picked) or len(indexes) == 1:
break
current_box = boxes[current, :]
# indexes = indexes[1:]
indexes = indexes[:-1]
rest_boxes = boxes[indexes, :]
iou = self.iou_of(
rest_boxes,
np.expand_dims(current_box, axis=0),
)
indexes = indexes[iou <= iou_threshold]
return box_scores[picked, :]
\ No newline at end of file
from datetime import datetime
import pytz
import time
from threading import Thread
class OSD:
def __init__(self,local='Asia/Taipei',str_format="%Y-%m-%d %H:%M:%S"):
self.tz = pytz.timezone(local)
self.utc = datetime.utcnow()
self.local_time = pytz.utc.localize(self.utc, is_dst=None).astimezone(self.tz)
self.str_format = str_format
self.str_time = self.local_time.strftime(self.str_format)
self.isOpened = False
self.thread = Thread(target=self.update_job)
def update_job(self):
while(self.isOpened):
self.utc = datetime.utcnow()
self.local_time = pytz.utc.localize(self.utc, is_dst=None).astimezone(self.tz)
self.str_time = self.local_time.strftime(self.str_format)
time.sleep(0.1)
def set_local(self,local):
self.tz = pytz.timezone(local)
def get_lcaol(self):
return self.local
def set_str_format(self,str_format):
self.str_format = pytz.timezone(str_format)
def get_str_format(self):
return self.str_format
def start(self):
print("start OSD")
self.isOpened = True
self.thread.start()
def stop(self):
print("stop OSD")
self.isOpened = False
\ No newline at end of file
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "alternative-throat",
"metadata": {},
"outputs": [],
"source": [
"from uvc_thermal import Thermal\n",
"from camera import Camera\n",
"from detector import FaceDetector\n",
"from anti import AntiSpoofing\n",
"from heater import Heater\n",
"from aligner import BoxAligner\n",
"from correcter import Correcter\n",
"\n",
"import cv2\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "direct-vulnerability",
"metadata": {},
"outputs": [],
"source": [
"def max_temperature(box,thermal_row,thermal_height=120,thermal_width=160,image_height=480,image_width=640):\n",
" scale_ratio_h = thermal_height/image_height\n",
" scale_ratio_w = thermal_width/image_width\n",
" x1 = max(int(box[0]*scale_ratio_w),0)\n",
" y1 = max(int(box[1]*scale_ratio_h),0)\n",
" x2 = min(int(box[2]*scale_ratio_w),thermal_width)\n",
" y2 = min(int(box[3]*scale_ratio_h),thermal_height)\n",
" box_temperature = thermal_row[y1:y2,x1:x2]\n",
" return box_temperature.max()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "pending-pressure",
"metadata": {},
"outputs": [],
"source": [
"thermal = Thermal(width=160, height=120, framerate=9, frame_width=640, frame_height=480, log=None)\n",
"camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=1)\n",
"detector = FaceDetector(model_path=\"../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/version-RFB/RFB-320.mnn\" , input_size=(320,240))\n",
"# anti = AntiSpoofing(model_path=\"../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/4_0_0_80x80_MiniFASNetV1SE.mnn\")\n",
"aligner = BoxAligner(img_h = 480,img_w = 640)\n",
"heater = Heater(pwm_pin=12 , target_temp=35.0)\n",
"correcter = Correcter(model_path=\"../thermal-tk/20210421_correcter.sav\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "moved-capacity",
"metadata": {},
"outputs": [],
"source": [
"thermal.start()\n",
"camera.start()\n",
"heater.start()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "automotive-married",
"metadata": {},
"outputs": [],
"source": [
"while(thermal.isOpened and camera.isOpened):\n",
" thermal_ret,thermal_frame, thermal_row = thermal.capture()\n",
" ret,frame = camera.capture()\n",
" if(thermal_ret and ret):\n",
" blackbody_max = thermal_row[:,:34].max()\n",
" heater_temp = heater.last_temp\n",
" boxes, labels, probs = detector.predict(frame)\n",
" for i in range(boxes.shape[0]):\n",
" box = boxes[i, :]\n",
" score = probs[i]\n",
" if(score > 0.9):\n",
"# scaled_box = anti.scale_box(img_h=480,img_w=640,box=box)\n",
"# expanded_face = frame[scaled_box[1]:scaled_box[3],scaled_box[0]:scaled_box[2]].copy()\n",
"# live_score = anti.predict(expanded_face)\n",
" live_score = 0.99\n",
" if(live_score > 0.9):\n",
" color = (0, 255, 0)\n",
" else:\n",
" color = ( 0, 0, 255)\n",
" thermal_box = aligner.box_aligment([box])[0]\n",
" cv2.rectangle(thermal_frame, (thermal_box[0], thermal_box[1]), (thermal_box[2], thermal_box[3]), color, 2)\n",
" cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), color, 2)\n",
" face_temp = max_temperature(box,thermal_row)\n",
" corrected_face_temp = correcter.predict(x_detect_k= face_temp, x_black_k= blackbody_max, x_black_c= heater_temp)\n",
" cv2.putText(frame, \"original: {:.2f},corrected: {:.2f}\".format(face_temp*0.0092,corrected_face_temp), ((box[0]+box[2])//2, (box[1]+box[3])//2), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 255, 255), 1, cv2.LINE_AA)\n",
" cv2.imshow('frames',np.column_stack((frame,thermal_frame)))\n",
" if(cv2.waitKey(1) == ord('q')):\n",
" break\n",
" else:\n",
" break\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "finnish-lightning",
"metadata": {},
"outputs": [],
"source": [
"thermal.stop()\n",
"camera.stop()\n",
"heater.stop()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "annoying-strike",
"metadata": {},
"outputs": [],
"source": [
"camera.isOpened"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "qualified-labor",
"metadata": {},
"outputs": [],
"source": [
"cv2.imwrite(\"rgb_frame_3.jpg\",frame)\n",
"cv2.imwrite(\"thermal_frame_3.jpg\",thermal_frame)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "laden-chuck",
"metadata": {},
"outputs": [],
"source": [
"def box_temperature(box,thermal_row,thermal_height=120,thermal_width=160,image_height=480,image_width=640):\n",
" scale_ratio_h = thermal_height/image_height\n",
" scale_ratio_w = thermal_width/image_width\n",
" print(scale_ratio_h,scale_ratio_w)\n",
" x1 = max(int(box[0]*scale_ratio_w),0)\n",
" y1 = max(int(box[1]*scale_ratio_h),0)\n",
" x2 = min(int(box[2]*scale_ratio_w),thermal_width)\n",
" y2 = min(int(box[3]*scale_ratio_h),thermal_height)\n",
" print(x1,y1,x2,y2)\n",
" return thermal_row[y1:y2,x1:x2]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "twenty-dakota",
"metadata": {},
"outputs": [],
"source": [
"x1 = max(int(thermal_box[0]//4),0)\n",
"y1 = max(int(thermal_box[1]//4),0)\n",
"x2 = min(int(thermal_box[2]//4),160)\n",
"y2 = min(int(thermal_box[3]//4),120)\n",
"print(x1,y1,x2,y2)\n",
"thermal_row_copy = thermal_row.copy()\n",
"thermal_row_copy[y1:y2,x1:x2]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "atlantic-toronto",
"metadata": {},
"outputs": [],
"source": [
"print(thermal_box[0]//4,int(thermal_box[0]*0.25))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "medieval-tactics",
"metadata": {},
"outputs": [],
"source": [
"cv2.imshow('thermal frames 1',thermal.raw_to_8bit(thermal_row_copy[y1:y2,x1:x2]))\n",
"cv2.imshow('thermal frames 2',thermal.raw_to_8bit(box_temperature(thermal_box,thermal_row_copy)))\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "talented-schedule",
"metadata": {},
"outputs": [],
"source": [
"(thermal_row_copy[y1:y2,x1:x2]*0.0092).mean()"
]
},
{
"cell_type": "markdown",
"id": "hearing-assault",
"metadata": {},
"source": [
"### 物件框校正"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "together-writer",
"metadata": {},
"outputs": [],
"source": [
"class BoxAligner:\n",
" def __init__(self,img_h,img_w):\n",
" self.img_h = img_h\n",
" self.img_w = img_w\n",
" self.M = np.array([[ 8.13802980e-01, -2.63523694e-02, 9.30324875e+01],\n",
" [ 2.10292692e-02, 7.84319221e-01, 7.70246127e+01],\n",
" [ 1.48500793e-04, -1.53618915e-04, 1.00000000e+00]])\n",
" def box_aligment(self,boxes):\n",
" aliged_boxes = []\n",
" for b in boxes:\n",
" x1,y1,w1 = np.dot(self.M,[b[0],b[1],1])\n",
" x2,y2,w2 = np.dot(self.M,[b[2],b[3],1])\n",
" x1 = max(int(x1),0)\n",
" y1 = max(int(y1),0)\n",
" x2 = min(int(x2),self.img_w)\n",
" y2 = min(int(y2),self.img_h)\n",
" aliged_boxes.append([x1,y1,x2,y2])\n",
" return aliged_boxes\n",
" def get_omography_matrix(self,rgb_points, thermal_points):\n",
" h, mask = cv2.findHomography(rgb_points, thermal_points, cv2.RANSAC)\n",
" self.M = h"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "auburn-deficit",
"metadata": {},
"outputs": [],
"source": [
"aligner = BoxAligner(img_h = 480,img_w = 640)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "provincial-example",
"metadata": {},
"outputs": [],
"source": [
"136/4"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "compliant-daily",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"id": "elegant-bachelor",
"metadata": {},
"source": [
"### 自動校正計算"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "french-brunswick",
"metadata": {},
"outputs": [],
"source": [
"cv2.imshow('RGB frames',frame)\n",
"cv2.imshow('Thermal frames',thermal_frame)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "daily-brunswick",
"metadata": {},
"outputs": [],
"source": [
"thermal_face = cv2.selectROI(\"thermal face\",thermal_frame)\n",
"frame_face = cv2.selectROI(\"rgb face\",frame)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "settled-switzerland",
"metadata": {},
"outputs": [],
"source": [
"x1,y1,w1,h1 = thermal_face\n",
"pts_d = np.float32([[x1,y1],[x1+w1,y1],[x1,y1+h1],[x1+w1,y1+h1]])\n",
"x2,y2,w2,h2 = frame_face\n",
"pts_o = np.float32([[x2,y2],[x2+w2,y2],[x2,y2+h2],[x2+w2,y2+h2]])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "center-beijing",
"metadata": {},
"outputs": [],
"source": [
"M = cv2.getPerspectiveTransform(pts_o,pts_d)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "challenging-leeds",
"metadata": {},
"outputs": [],
"source": [
"ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, (640,480),None,None)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "owned-applicant",
"metadata": {},
"outputs": [],
"source": [
"rgb_box = np.array([[319, 75,1]])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "comparable-faith",
"metadata": {},
"outputs": [],
"source": [
"rgb_box.dot(M)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "unsigned-cutting",
"metadata": {},
"outputs": [],
"source": [
"display_thermal = thermal_frame.copy()\n",
"x1 = int(232.65413534)\n",
"y1 = int(53.43137255)\n",
"x2 = int(360.28571429)\n",
"y2 = int(235.09803922)\n",
"display_thermal = cv2.rectangle(display_thermal, (x1,y1), (x2, y2), color, 2)\n",
"# display_thermal = cv2.rectangle(display_thermal, (x,y), (x+w, y+h), color, 2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "flush-differential",
"metadata": {},
"outputs": [],
"source": [
"cv2.imshow('dispaly thermal frames',display_thermal)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "markdown",
"id": "literary-narrative",
"metadata": {},
"source": [
"### 子序偵測 主序顯示"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ideal-investigator",
"metadata": {},
"outputs": [],
"source": [
"from threading import Thread\n",
"import time"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "written-rings",
"metadata": {},
"outputs": [],
"source": [
"class Checker:\n",
" def __init__(self,camera,detector,anti):\n",
" self.camera = camera\n",
" self.detector = detector\n",
" self.anti = anti\n",
" self.faces = []\n",
" self.isOpened = False\n",
" self.frame = None\n",
" self.thread = Thread(target=self.__job)\n",
" def start(self):\n",
" self.isOpened = True\n",
" self.thread.start()\n",
" def stop(self):\n",
" self.isOpened = False\n",
" def __job(self):\n",
" while(self.camera.isOpened and self.isOpened):\n",
" ret,frame = self.camera.capture()\n",
" if(ret):\n",
" self.frame = frame.copy()\n",
" new_faces = []\n",
" boxes, labels, probs = self.detector.predict(frame)\n",
" for i in range(boxes.shape[0]):\n",
" box = boxes[i, :]\n",
" score = probs[i]\n",
" face = {\"location\":box,\"score\":score,\"islive\":False}\n",
" if(score > 0.9):\n",
" scaled_box = self.anti.scale_box(img_h=480,img_w=640,box=box)\n",
" expanded_face = frame[scaled_box[1]:scaled_box[3],scaled_box[0]:scaled_box[2]].copy()\n",
" live_score = self.anti.predict(expanded_face)\n",
" if(live_score > 0.9):\n",
" face[\"islive\"]=True\n",
" new_faces.append(face)\n",
" self.faces = new_faces"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "seeing-plaza",
"metadata": {},
"outputs": [],
"source": [
"checker = Checker(camera=camera ,detector=detector,anti=anti)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "executive-winning",
"metadata": {},
"outputs": [],
"source": [
"checker.stop()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ideal-december",
"metadata": {},
"outputs": [],
"source": [
"while(thermal.isOpened and camera.isOpened):\n",
" thermal_ret,thermal_frame, thermal_row = thermal.capture()\n",
" ret,frame = camera.capture()\n",
"# frame = checker.frame\n",
" ret = checker.isOpened\n",
" if(thermal_ret and ret):\n",
" for face in checker.faces:\n",
" box = face[\"location\"]\n",
" if(face[\"islive\"]):\n",
" color = (0, 255, 0)\n",
" else:\n",
" color = ( 0, 0, 255)\n",
" cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), color, 2)\n",
" cv2.imshow('frames',np.column_stack((frame,thermal_frame)))\n",
" if(cv2.waitKey(1) == ord('q')):\n",
" break\n",
" else:\n",
" break\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "standard-inquiry",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "muslim-carry",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.6"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from PyQt5 import QtWidgets\n",
"from PyQt5.QtWidgets import *\n",
"from PyQt5.QtCore import *\n",
"from PyQt5.QtGui import *\n",
"\n",
"# import Ui_MainWindow as ui\n",
"import ui_test as ui\n",
"\n",
"from uvc_thermal import Thermal\n",
"from camera import Camera\n",
"from detector import FaceDetector\n",
"from anti import AntiSpoofing\n",
"from heater import Heater\n",
"from aligner import BoxAligner\n",
"from correcter import Correcter\n",
"from osd import OSD\n",
"\n",
"import cv2\n",
"import numpy as np\n",
"import time"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"class Main(QMainWindow, ui.Ui_MainWindow):\n",
" def __init__(self):\n",
" super().__init__()\n",
" self.setupUi(self)\n",
" self.resize(640,480)\n",
" self.rgb_frame.setScaledContents(True)\n",
" self.thermal_frame.setScaledContents(True)\n",
" self.thermal = Thermal(width=160, height=120, framerate=9, frame_width=160, frame_height=120, log=None)\n",
" self.camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=1)\n",
" self.detector = FaceDetector(model_path=\"../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/version-RFB/RFB-320.mnn\" , input_size=(320,240))\n",
" # self.anti = AntiSpoofing(model_path=\"../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/4_0_0_80x80_MiniFASNetV1SE.mnn\")\n",
" self.aligner = BoxAligner(img_h = 480,img_w = 640)\n",
" self.heater = Heater(pwm_pin=12 , target_temp=35.0)\n",
" self.correcter = Correcter(model_path=\"../thermal-tk/20210421_correcter.sav\")\n",
" self.osd = OSD(local='Asia/Taipei', str_format='%Y-%m-%d %H:%M:%S')\n",
" self.thermal.start()\n",
" time.sleep(2)\n",
" self.camera.start()\n",
" time.sleep(2)\n",
" self.heater.start()\n",
" time.sleep(2)\n",
" self.osd.start()\n",
" time.sleep(2)\n",
" self.faces = []\n",
" self.timer = QTimer(self)\n",
" self.timer.timeout.connect(self.capture)\n",
" self.timer.start(10)\n",
"# self.painter = QPainter(self)\n",
" def capture(self):\n",
" ret, frame = self.camera.capture()\n",
" thermal_ret,thermal_frame, thermal_row = self.thermal.capture()\n",
" if(thermal_ret and ret):\n",
" blackbody_max = thermal_row[:,:34].max()\n",
" heater_temp = self.heater.last_temp\n",
" # 偵測人臉\n",
" str_time = self.osd.str_time\n",
" boxes, labels, probs = self.detector.predict(frame)\n",
" # 取出所有偵測的結果\n",
" frame=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)\n",
"# for i in range(boxes.shape[0]):\n",
"# box = boxes[i, :]\n",
"# score = probs[i]\n",
"# if(score > 0.9):\n",
"# color = (0, 255, 0)\n",
"# thermal_box = self.aligner.box_aligment([box])[0]\n",
"# face_frame = frame[box[1]:box[3],box[0]:box[2]].copy()\n",
"# Qface_frame=QImage(face_frame.data,face_frame.shape[1],face_frame.shape[0],face_frame.shape[1]*3,QImage.Format_RGB888)\n",
"# face_pixMap=QPixmap.fromImage(Qface_frame)\n",
" \n",
"# cv2.rectangle(thermal_frame, (thermal_box[0]//4, thermal_box[1]//4), (thermal_box[2]//4, thermal_box[3]//4), color, 2)\n",
"# cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), color, 2)\n",
"# face_temp = self.max_temperature(thermal_box,thermal_row,black_h=32)\n",
"# corrected_face_temp = self.correcter.predict(x_detect_k= face_temp, x_black_k= blackbody_max, x_black_c= heater_temp)\n",
"# cv2.putText(frame, \"{:.2f}->{:.2f}\".format(face_temp*0.0092,corrected_face_temp), ((box[0]+box[2])//2, (box[1]+box[3])//2), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 255, 255), 1, cv2.LINE_AA)\n",
"# cv2.putText(thermal_frame, \"{:.2f}\".format(heater_temp), (320//4, 64//4), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 0, 0), 1, cv2.LINE_AA)\n",
" \n",
"# self.face1.setPixmap(face_pixMap)\n",
"# self.temp1.setStyleSheet(\"color: rgb(0, 255, 0);\")\n",
"# self.temp1.setText(\"{:.2f}\".format(corrected_face_temp))\n",
"# self.time1.setText(str_time)\n",
" # 顯示至UI\n",
"# frame=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)\n",
" thermal_frame=cv2.cvtColor(thermal_frame,cv2.COLOR_BGR2RGB)\n",
" Qframe=QImage(frame.data,frame.shape[1],frame.shape[0],frame.shape[1]*3,QImage.Format_RGB888)\n",
" Qthermal_frame=QImage(thermal_frame.data,thermal_frame.shape[1],thermal_frame.shape[0],thermal_frame.shape[1]*3,QImage.Format_RGB888)\n",
" pixMap=QPixmap.fromImage(Qframe)\n",
" thermal_pixMap=QPixmap.fromImage(Qthermal_frame)\n",
" \n",
" qp = QPainter(pixMap)\n",
" thermal_qp = QPainter(thermal_pixMap)\n",
" for i in range(boxes.shape[0]):\n",
" box = boxes[i, :]\n",
" score = probs[i]\n",
" if(score > 0.9):\n",
" thermal_box = self.aligner.box_aligment([box])[0]\n",
" face_frame = frame[box[1]:box[3],box[0]:box[2]].copy()\n",
" \n",
" face_temp = self.max_temperature(thermal_box,thermal_row,black_h=32)\n",
" corrected_face_temp = self.correcter.predict(x_detect_k= face_temp, x_black_k= blackbody_max, x_black_c= heater_temp)\n",
" \n",
" face_rect = QRect(box[0], box[1], box[2]-box[0], box[3]-box[1])\n",
" thermal_rect = QRect(thermal_box[0]//4, thermal_box[1]//4, (thermal_box[2]//4 - thermal_box[0]//4), (thermal_box[3]//4 - thermal_box[1]//4))\n",
" \n",
" \n",
" \n",
" if(corrected_face_temp > 37.0):\n",
" pen = QPen(Qt.red, 3)\n",
" else:\n",
" pen = QPen(Qt.green, 3)\n",
" qp.setPen(pen)\n",
" thermal_qp.setPen(pen)\n",
" \n",
" face_crop = pixMap.copy(face_rect)\n",
" self.face1.setPixmap(face_crop)\n",
" self.temp1.setStyleSheet(\"color: rgb(0, 255, 0);\")\n",
" self.temp1.setText(\"{:.2f}\".format(corrected_face_temp))\n",
" self.time1.setText(str_time)\n",
" \n",
" qp.drawRect(face_rect)\n",
" qp.drawText(face_rect, Qt.AlignCenter , str(corrected_face_temp))\n",
" thermal_qp.drawRect(thermal_rect)\n",
" qp.end()\n",
" thermal_qp.end()\n",
" \n",
" self.rgb_frame.setPixmap(pixMap)\n",
" self.thermal_frame.setPixmap(thermal_pixMap)\n",
" self.update()\n",
" def closeEvent(self,event):\n",
" # 關閉程式後執行\n",
" self.thermal.stop()\n",
" self.camera.stop()\n",
" self.heater.stop()\n",
" self.osd.stop()\n",
" def max_temperature(self,box,thermal_row,black_h=0,thermal_height=120,thermal_width=160,image_height=480,image_width=640):\n",
" scale_ratio_h = thermal_height/image_height\n",
" scale_ratio_w = thermal_width/image_width\n",
" x1 = max(int(box[0]*scale_ratio_w),0)\n",
" y1 = max(int(box[1]*scale_ratio_h),black_h)\n",
" x2 = min(int(box[2]*scale_ratio_w),thermal_width)\n",
" y2 = min(int(box[3]*scale_ratio_h),thermal_height)\n",
" box_temperature = thermal_row[y1:y2,x1:x2]\n",
" if(box_temperature.size != 0):\n",
" return box_temperature.max()\n",
" else:\n",
" return 0\n",
"if __name__ == '__main__':\n",
" import sys\n",
" import os\n",
" app = QtWidgets.QApplication(sys.argv)\n",
" window = Main()\n",
" window.show()\n",
" sys.exit(app.exec_())\n",
"# status = app.exec_()\n",
"# os._exit(status)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
" def paintEvent(self, QPaintEvent):\n",
" # 繪圖事件\n",
" time.sleep(1/10)\n",
" ret, frame = self.camera.capture()\n",
" thermal_ret,thermal_frame, thermal_row = self.thermal.capture()\n",
" if(thermal_ret and ret):\n",
" blackbody_max = thermal_row[:,:34].max()\n",
" heater_temp = self.heater.last_temp\n",
" # 偵測人臉\n",
" boxes, labels, probs = self.detector.predict(frame)\n",
" # 取出所有偵測的結果\n",
" for i in range(boxes.shape[0]):\n",
" box = boxes[i, :]\n",
" score = probs[i]\n",
" if(score > 0.9):\n",
" #scaled_box = anti.scale_box(img_h=480,img_w=640,box=box)\n",
" #expanded_face = frame[scaled_box[1]:scaled_box[3],scaled_box[0]:scaled_box[2]].copy()\n",
" #live_score = anti.predict(expanded_face)\n",
" live_score = 0.9\n",
" if(live_score > 0.9):\n",
" color = (0, 255, 0)\n",
" else:\n",
" color = ( 0, 0, 255)\n",
" thermal_box = self.aligner.box_aligment([box])[0]\n",
" cv2.rectangle(thermal_frame, (thermal_box[0]//4, thermal_box[1]//4), (thermal_box[2]//4, thermal_box[3]//4), color, 2)\n",
" cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), color, 2)\n",
" face_temp = self.max_temperature(thermal_box,thermal_row,black_h=32)\n",
" corrected_face_temp = self.correcter.predict(x_detect_k= face_temp, x_black_k= blackbody_max, x_black_c= heater_temp)\n",
" cv2.putText(frame, \"{:.2f}->{:.2f}\".format(face_temp*0.0092,corrected_face_temp), ((box[0]+box[2])//2, (box[1]+box[3])//2), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 255, 255), 1, cv2.LINE_AA)\n",
" cv2.putText(thermal_frame, \"{:.2f}\".format(heater_temp), (320//4, 64//4), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 0, 0), 1, cv2.LINE_AA)\n",
" # 顯示至UI\n",
" frame=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)\n",
" thermal_frame=cv2.cvtColor(thermal_frame,cv2.COLOR_BGR2RGB)\n",
" Qframe=QImage(frame.data,frame.shape[1],frame.shape[0],frame.shape[1]*3,QImage.Format_RGB888)\n",
" Qthermal_frame=QImage(thermal_frame.data,thermal_frame.shape[1],thermal_frame.shape[0],thermal_frame.shape[1]*3,QImage.Format_RGB888)\n",
" pixMap=QPixmap.fromImage(Qframe)\n",
" thermal_pixMap=QPixmap.fromImage(Qthermal_frame)\n",
" self.label.setPixmap(pixMap)\n",
" self.label_2.setPixmap(thermal_pixMap)\n",
" self.update()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"color = (255,255,0)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\"{}\".format(color)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"aligner = BoxAligner(img_h = 480,img_w = 640)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"aligner.box_aligment([[0,0,640,480]])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.6"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'untitled.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1095, 791)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.rgb_frame = QtWidgets.QLabel(self.centralwidget)
self.rgb_frame.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.rgb_frame.setToolTipDuration(0)
self.rgb_frame.setStyleSheet("background-color: rgb(170, 0, 0);")
self.rgb_frame.setAlignment(QtCore.Qt.AlignCenter)
self.rgb_frame.setObjectName("rgb_frame")
self.horizontalLayout.addWidget(self.rgb_frame)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.thermal_frame = QtWidgets.QLabel(self.centralwidget)
self.thermal_frame.setStyleSheet("background-color: rgb(170, 255, 0);")
self.thermal_frame.setObjectName("thermal_frame")
self.verticalLayout.addWidget(self.thermal_frame)
self.faceWidget1 = QtWidgets.QWidget(self.centralwidget)
self.faceWidget1.setStyleSheet("border-color: rgb(85, 255, 255);")
self.faceWidget1.setObjectName("faceWidget1")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.faceWidget1)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.face1 = QtWidgets.QLabel(self.faceWidget1)
self.face1.setObjectName("face1")
self.verticalLayout_2.addWidget(self.face1, 0, QtCore.Qt.AlignHCenter)
self.time1 = QtWidgets.QLabel(self.faceWidget1)
self.time1.setObjectName("time1")
self.verticalLayout_2.addWidget(self.time1, 0, QtCore.Qt.AlignHCenter)
self.temp1 = QtWidgets.QLabel(self.faceWidget1)
self.temp1.setObjectName("temp1")
self.verticalLayout_2.addWidget(self.temp1, 0, QtCore.Qt.AlignHCenter)
self.verticalLayout.addWidget(self.faceWidget1)
self.faceWidget2 = QtWidgets.QWidget(self.centralwidget)
self.faceWidget2.setStyleSheet("border-color: rgb(255, 0, 0);")
self.faceWidget2.setObjectName("faceWidget2")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.faceWidget2)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.face2 = QtWidgets.QLabel(self.faceWidget2)
self.face2.setObjectName("face2")
self.verticalLayout_4.addWidget(self.face2, 0, QtCore.Qt.AlignHCenter)
self.time2 = QtWidgets.QLabel(self.faceWidget2)
self.time2.setObjectName("time2")
self.verticalLayout_4.addWidget(self.time2, 0, QtCore.Qt.AlignHCenter)
self.temp2 = QtWidgets.QLabel(self.faceWidget2)
self.temp2.setObjectName("temp2")
self.verticalLayout_4.addWidget(self.temp2, 0, QtCore.Qt.AlignHCenter)
self.verticalLayout.addWidget(self.faceWidget2)
self.faceWidget3 = QtWidgets.QWidget(self.centralwidget)
self.faceWidget3.setObjectName("faceWidget3")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.faceWidget3)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.face3 = QtWidgets.QLabel(self.faceWidget3)
self.face3.setObjectName("face3")
self.verticalLayout_5.addWidget(self.face3, 0, QtCore.Qt.AlignHCenter)
self.time3 = QtWidgets.QLabel(self.faceWidget3)
self.time3.setObjectName("time3")
self.verticalLayout_5.addWidget(self.time3, 0, QtCore.Qt.AlignHCenter)
self.temp3 = QtWidgets.QLabel(self.faceWidget3)
self.temp3.setObjectName("temp3")
self.verticalLayout_5.addWidget(self.temp3, 0, QtCore.Qt.AlignHCenter)
self.verticalLayout.addWidget(self.faceWidget3)
self.horizontalLayout.addLayout(self.verticalLayout)
self.horizontalLayout.setStretch(0, 4)
self.horizontalLayout.setStretch(1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1095, 21))
self.menubar.setObjectName("menubar")
self.menu = QtWidgets.QMenu(self.menubar)
self.menu.setObjectName("menu")
MainWindow.setMenuBar(self.menubar)
self.menubar.addAction(self.menu.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.rgb_frame.setText(_translate("MainWindow", "rgb"))
self.thermal_frame.setText(_translate("MainWindow", "theraml frame"))
self.face1.setText(_translate("MainWindow", "人臉"))
self.time1.setText(_translate("MainWindow", "時間"))
self.temp1.setText(_translate("MainWindow", "溫度"))
self.face2.setText(_translate("MainWindow", "人臉"))
self.time2.setText(_translate("MainWindow", "時間"))
self.temp2.setText(_translate("MainWindow", "溫度"))
self.face3.setText(_translate("MainWindow", "人臉"))
self.time3.setText(_translate("MainWindow", "時間"))
self.temp3.setText(_translate("MainWindow", "溫度"))
self.menu.setTitle(_translate("MainWindow", "選單"))
from threading import Thread
from datetime import datetime
import time, numpy as np
import cv2
from uvctypes import *
from multiprocessing import Queue
class Thermal(object):
def __init__(self, width, height, framerate, frame_width, frame_height, log=None):
self.__log = self.__log if log is None else log
self.__isCaptured = False
self.__frame = None
self.__frame2c = None
self.isOpened = False
self.isStopping = False
self.isNotSleeping=True
self.width = width
self.height = height
self.frame_width = frame_width
self.frame_height = frame_height
self.framerate = framerate
# uvc thermal init
BUF_SIZE = 8
self.q = Queue(BUF_SIZE)
self.PTR_PY_FRAME_CALLBACK = CFUNCTYPE(None, POINTER(uvc_frame), c_void_p)(self.py_frame_callback)
self.devh = None
self.scale = 0.0092
self.__thread = Thread(target=self.__job)
def start(self):
self.__isCaptured = False
self.__frame = None
self.__frame2c = None
self.isOpened = True
self.isStopping = False
self.__thread.start()
def restart(self):
self.__isCaptured = False
self.__frame = None
self.__frame2c = None
self.isOpened = True
self.isStopping = False
del self.__thread
self.__thread = Thread(target=self.__job)
self.__thread.start()
def stop(self):
self.__isCaptured = False
self.__frame = None
self.__frame2c = None
self.isStopping = True
libuvc.uvc_stop_streaming(self.devh)
def capture(self):
return self.__isCaptured, self.__frame, self.__frame2c
def doFFC(self):
perform_manual_ffc(self.devh)
def update_scale(self,raw_temp,c):
self.scale = c/raw_temp
def py_frame_callback(self,frame, userptr):
array_pointer = cast(frame.contents.data, POINTER(c_uint16 * (frame.contents.width * frame.contents.height)))
data = np.frombuffer(
array_pointer.contents, dtype=np.dtype(np.uint16)).reshape(frame.contents.height, frame.contents.width)
if frame.contents.data_bytes != (2 * frame.contents.width * frame.contents.height):
return
if not self.q.full():
self.q.put(data)
def startStream(self):
ctx = POINTER(uvc_context)()
dev = POINTER(uvc_device)()
self.devh = POINTER(uvc_device_handle)()
ctrl = uvc_stream_ctrl()
res = libuvc.uvc_init(byref(ctx), 0)
if res < 0:
print("uvc_init error")
#exit(1)
try:
res = libuvc.uvc_find_device(ctx, byref(dev), PT_USB_VID, PT_USB_PID, 0)
if res < 0:
print("uvc_find_device error")
exit(1)
try:
res = libuvc.uvc_open(dev, byref(self.devh))
if res < 0:
print("uvc_open error")
exit(1)
print("device opened!")
print_device_info(self.devh)
print_device_formats(self.devh)
frame_formats = uvc_get_frame_formats_by_guid(self.devh, VS_FMT_GUID_Y16)
if len(frame_formats) == 0:
print("device does not support Y16")
exit(1)
libuvc.uvc_get_stream_ctrl_format_size(self.devh, byref(ctrl), UVC_FRAME_FORMAT_Y16,
frame_formats[0].wWidth, frame_formats[0].wHeight, int(1e7 / frame_formats[0].dwDefaultFrameInterval)
)
res = libuvc.uvc_start_streaming(self.devh, byref(ctrl), self.PTR_PY_FRAME_CALLBACK, None, 0)
if res < 0:
print("uvc_start_streaming failed: {0}".format(res))
exit(1)
print("done starting stream, displaying settings")
print_shutter_info(self.devh)
print("resetting settings to default")
set_auto_ffc(self.devh)
set_gain_high(self.devh)
set_radiometry_control(self.devh)
# print_flux_linear_parameters(self.devh)
# set_rbfo(self.devh)
# print_rbfo(self.devh)
#set_tlinear_auto_resolution(self.devh)
# set_tlinear_control(self.devh)
# set_tshutter_control(self.devh)
print("current settings")
print_shutter_info(self.devh)
except:
#libuvc.uvc_unref_device(dev)
print('Failed to Open Device')
except:
#libuvc.uvc_exit(ctx)
print('Failed to Find Device')
exit(1)
def __job(self):
duration = 300 * self.framerate # restart / per one hour
self.startStream()
self.__log("Opened: {0}, Stopping: {1}, Duration: {2}".format(
self.isOpened,
self.isStopping,
duration))
perform_manual_ffc(self.devh)
while self.isOpened and not self.isStopping:
if(duration < 0):
perform_manual_ffc(self.devh)
duration = 300 * self.framerate
time.sleep(1/self.framerate)
a = self.q.get(True, 500)
b = a.copy() # 原始資料
c = b.copy()
# c_min = c.min()
# c_max = c.max()
# c_std = c.std()
# c_var = c.var()
# c_mean = c.mean()
# self.__log("frame {0}, Tmax={1:.1f}, Tmin={2:.1f}, Tmean={3:.1f}, Tstd={4:.1f}, Tvar={5:.1f}, Tmax-Tmin={6:.1f}".format(
# duration,
# float(c_max),
# float(c_min),
# float(c_mean),
# float(c_std),
# float(c_var),
# float(c_max-c_min)
# ))
self.__frame = cv2.resize(self.raw_to_8bit(b),(self.frame_width,self.frame_height), interpolation=cv2.INTER_AREA)
self.__frame2c = c
self.__isCaptured = True
duration = duration - 1
time.sleep(1 / self.framerate)
self.__log("thermal stop")
self.__frame = None
self.__frame2c = None
self.__isCaptured = False
def __k2c(self, value):
return (value - 27315) / 100.0
def __k2f(self, value):
return 1.8 * k2c(value) + 32.0
def __s2c(self, value):
return self.scale*value
def __log(self, message):
print(message)
def raw_to_8bit(self,data):
cv2.normalize(data, data, 0, 65535, cv2.NORM_MINMAX)
np.right_shift(data, 8, data)
return cv2.cvtColor(np.uint8(data), cv2.COLOR_GRAY2RGB)
from ctypes import *
import ctypes
import platform
import gc
try:
if platform.system() == 'Darwin':
libuvc = cdll.LoadLibrary("libuvc.dylib")
elif platform.system() == 'Linux':
libuvc = cdll.LoadLibrary("libuvc.so")
else:
libuvc = cdll.LoadLibrary("libuvc")
except OSError:
print("Error: could not find libuvc!")
exit(1)
class uvc_context(Structure):
_fields_ = [("usb_ctx", c_void_p),
("own_usb_ctx", c_uint8),
("open_devices", c_void_p),
("handler_thread", c_ulong),
("kill_handler_thread", c_int)]
class uvc_device(Structure):
_fields_ = [("ctx", POINTER(uvc_context)),
("ref", c_int),
("usb_dev", c_void_p)]
class uvc_stream_ctrl(Structure):
_fields_ = [("bmHint", c_uint16),
("bFormatIndex", c_uint8),
("bFrameIndex", c_uint8),
("dwFrameInterval", c_uint32),
("wKeyFrameRate", c_uint16),
("wPFrameRate", c_uint16),
("wCompQuality", c_uint16),
("wCompWindowSize", c_uint16),
("wDelay", c_uint16),
("dwMaxVideoFrameSize", c_uint32),
("dwMaxPayloadTransferSize", c_uint32),
("dwClockFrequency", c_uint32),
("bmFramingInfo", c_uint8),
("bPreferredVersion", c_uint8),
("bMinVersion", c_uint8),
("bMaxVersion", c_uint8),
("bInterfaceNumber", c_uint8)]
class uvc_format_desc(Structure):
pass
class uvc_frame_desc(Structure):
pass
uvc_frame_desc._fields_ = [
("parent", POINTER(uvc_format_desc)),
("prev", POINTER(uvc_frame_desc)),
("next", POINTER(uvc_frame_desc)),
# /** Type of frame, such as JPEG frame or uncompressed frme */
("bDescriptorSubtype", c_uint), # enum uvc_vs_desc_subtype bDescriptorSubtype;
# /** Index of the frame within the list of specs available for this format */
("bFrameIndex", c_uint8),
("bmCapabilities", c_uint8),
# /** Image width */
("wWidth", c_uint16),
# /** Image height */
("wHeight", c_uint16),
# /** Bitrate of corresponding stream at minimal frame rate */
("dwMinBitRate", c_uint32),
# /** Bitrate of corresponding stream at maximal frame rate */
("dwMaxBitRate", c_uint32),
# /** Maximum number of bytes for a video frame */
("dwMaxVideoFrameBufferSize", c_uint32),
# /** Default frame interval (in 100ns units) */
("dwDefaultFrameInterval", c_uint32),
# /** Minimum frame interval for continuous mode (100ns units) */
("dwMinFrameInterval", c_uint32),
# /** Maximum frame interval for continuous mode (100ns units) */
("dwMaxFrameInterval", c_uint32),
# /** Granularity of frame interval range for continuous mode (100ns) */
("dwFrameIntervalStep", c_uint32),
# /** Frame intervals */
("bFrameIntervalType", c_uint8),
# /** number of bytes per line */
("dwBytesPerLine", c_uint32),
# /** Available frame rates, zero-terminated (in 100ns units) */
("intervals", POINTER(c_uint32))]
uvc_format_desc._fields_ = [
("parent", c_void_p),
("prev", POINTER(uvc_format_desc)),
("next", POINTER(uvc_format_desc)),
# /** Type of image stream, such as JPEG or uncompressed. */
("bDescriptorSubtype", c_uint), # enum uvc_vs_desc_subtype bDescriptorSubtype;
# /** Identifier of this format within the VS interface's format list */
("bFormatIndex", c_uint8),
("bNumFrameDescriptors", c_uint8),
# /** Format specifier */
("guidFormat", c_char * 16), # union { uint8_t guidFormat[16]; uint8_t fourccFormat[4]; }
# /** Format-specific data */
("bBitsPerPixel", c_uint8),
# /** Default {uvc_frame_desc} to choose given this format */
("bDefaultFrameIndex", c_uint8),
("bAspectRatioX", c_uint8),
("bAspectRatioY", c_uint8),
("bmInterlaceFlags", c_uint8),
("bCopyProtect", c_uint8),
("bVariableSize", c_uint8),
# /** Available frame specifications for this format */
("frame_descs", POINTER(uvc_frame_desc))]
class timeval(Structure):
_fields_ = [("tv_sec", c_long), ("tv_usec", c_long)]
class uvc_frame(Structure):
_fields_ = [# /** Image data for this frame */
("data", POINTER(c_uint8)),
# /** Size of image data buffer */
("data_bytes", c_size_t),
# /** Width of image in pixels */
("width", c_uint32),
# /** Height of image in pixels */
("height", c_uint32),
# /** Pixel data format */
("frame_format", c_uint), # enum uvc_frame_format frame_format
# /** Number of bytes per horizontal line (undefined for compressed format) */
("step", c_size_t),
# /** Frame number (may skip, but is strictly monotonically increasing) */
("sequence", c_uint32),
# /** Estimate of system time when the device started capturing the image */
("capture_time", timeval),
# /** Handle on the device that produced the image.
# * @warning You must not call any uvc_* functions during a callback. */
("source", POINTER(uvc_device)),
# /** Is the data buffer owned by the library?
# * If 1, the data buffer can be arbitrarily reallocated by frame conversion
# * functions.
# * If 0, the data buffer will not be reallocated or freed by the library.
# * Set this field to zero if you are supplying the buffer.
# */
("library_owns_data", c_uint8)]
class lep_rad_flux_linear_params(Structure):
_fields_ = [('sceneEmissivity',c_uint16),
('TBkgK',c_uint16),
('tauWindow',c_uint16),
('TWindowK',c_uint16),
('tauAtm',c_uint16),
('TAtmK',c_uint16),
('reflWindow',c_uint16),
('TReflK',c_uint16)]
class lep_rbfo(Structure):
_fields_ = [('RBFO_R',c_uint32),
('RBFO_B',c_uint32),
('RBFO_F',c_uint32),
('RBFO_O',c_int32),]
class uvc_device_handle(Structure):
_fields_ = [("dev", POINTER(uvc_device)),
("prev", c_void_p),
("next", c_void_p),
("usb_devh", c_void_p),
("info", c_void_p),
("status_xfer", c_void_p),
("status_buf", c_ubyte * 32),
("status_cb", c_void_p),
("status_user_ptr", c_void_p),
("button_cb", c_void_p),
("button_user_ptr", c_void_p),
("streams", c_void_p),
("is_isight", c_ubyte)]
class lep_oem_sw_version(Structure):
_fields_ = [("gpp_major", c_ubyte),
("gpp_minor", c_ubyte),
("gpp_build", c_ubyte),
("dsp_major", c_ubyte),
("dsp_minor", c_ubyte),
("dsp_build", c_ubyte),
("reserved", c_ushort)]
class lep_sys_shutter_mode(Structure):
_fields_ = [("shutterMode", c_uint32),
("tempLockoutState", c_uint32),
("videoFreezeDuringFFC", c_uint32),
("ffcDesired", c_uint32),
("elapsedTimeSinceLastFfc", c_uint32),
("desiredFfcPeriod", c_uint32),
("explicitCmdToOpen", c_bool),
("desiredFfcTempDelta", c_uint16),
("imminentDelay", c_uint16)]
# LEP_SYS_FFC_SHUTTER_MODE_E shutterMode; /* defines current mode */
# LEP_SYS_SHUTTER_TEMP_LOCKOUT_STATE_E tempLockoutState;
# LEP_SYS_ENABLE_E videoFreezeDuringFFC;
# LEP_SYS_ENABLE_E ffcDesired; /* status of FFC desired */
# LEP_UINT32 elapsedTimeSinceLastFfc; /* in milliseconds x1 */
# LEP_UINT32 desiredFfcPeriod; /* in milliseconds x1 */
# LEP_BOOL explicitCmdToOpen; /* true or false */
# LEP_UINT16 desiredFfcTempDelta; /* in Kelvin x100 */
# LEP_UINT16 imminentDelay; /* in frame counts x1 */
#
# }LEP_SYS_FFC_SHUTTER_MODE_OBJ_T, *LEP_SYS_FFC_SHUTTER_MODE_OBJ_T_PTR;
# Original default shutter mode below is incorrect due to improper ctypes
# Incorrect Default Shutter Info: (1, 0, 0, 0, 1, 0, 1, 0, 48928)
# 1 shutterMode
# 0 tempLockoutState
# 0 videoFreezeDuringFFC
# 0 ffcDesired
# 1 elapsedTimeSinceLastFfc
# 0 desiredFfcPeriod
# True explicitCmdToOpen
# 0 desiredFfcTempDelta
# 48928 imminentDelay
# Correct Default Shutter Info According to IDD: (1, 0, 1, 0, 0, 180000, 0, 150, 52)
# 1 shutterMode
# 0 tempLockoutState
# 1 videoFreezeDuringFFC
# 0 ffcDesired
# 0 elapsedTimeSinceLastFfc
# 180000 desiredFfcPeriod
# False explicitCmdToOpen
# 150 desiredFfcTempDelta
# 52 imminentDelay
# Default Shutter Info According to Lepton on Bootup: (1, 0, 1, 0, 0, 180000, 1, 0, 150)
# 1 shutterMode
# 0 tempLockoutState
# 1 videoFreezeDuringFFC
# 0 ffcDesired
# 0 elapsedTimeSinceLastFfc
# 180000 desiredFfcPeriod
# True explicitCmdToOpen
# 0 desiredFfcTempDelta
# 150 imminentDelay
explicitCmdToOpenVal = 1
desiredFfcTempDeltaVal = 0
imminentDelayVal = 150
sysShutterManual = lep_sys_shutter_mode(0, 0, 1, 0, 0, 180000, explicitCmdToOpenVal, desiredFfcTempDeltaVal, imminentDelayVal)
sysShutterAuto = lep_sys_shutter_mode(1, 0, 1, 0, 0, 180000, explicitCmdToOpenVal, desiredFfcTempDeltaVal, imminentDelayVal)
sysShutterExternal = lep_sys_shutter_mode(2, 0, 1, 0, 0, 180000, explicitCmdToOpenVal, desiredFfcTempDeltaVal, imminentDelayVal)
def call_extension_unit(devh, unit, control, data, size):
return libuvc.uvc_get_ctrl(devh, unit, control, data, size, 0x81)
def set_extension_unit(devh, unit, control, data, size):
return libuvc.uvc_set_ctrl(devh, unit, control, data, size, 0x81)
PT_USB_VID = 0x1e4e
PT_USB_PID = 0x0100
AGC_UNIT_ID = 3
OEM_UNIT_ID = 4
RAD_UNIT_ID = 5
SYS_UNIT_ID = 6
VID_UNIT_ID = 7
UVC_FRAME_FORMAT_UYVY = 4
UVC_FRAME_FORMAT_I420 = 5
UVC_FRAME_FORMAT_RGB = 7
UVC_FRAME_FORMAT_BGR = 8
UVC_FRAME_FORMAT_Y16 = 13
VS_FMT_GUID_GREY = create_string_buffer(
b"Y8 \x00\x00\x10\x00\x80\x00\x00\xaa\x00\x38\x9b\x71", 16
)
VS_FMT_GUID_Y16 = create_string_buffer(
b"Y16 \x00\x00\x10\x00\x80\x00\x00\xaa\x00\x38\x9b\x71", 16
)
VS_FMT_GUID_YUYV = create_string_buffer(
b"UYVY\x00\x00\x10\x00\x80\x00\x00\xaa\x00\x38\x9b\x71", 16
)
VS_FMT_GUID_NV12 = create_string_buffer(
b"NV12\x00\x00\x10\x00\x80\x00\x00\xaa\x00\x38\x9b\x71", 16
)
VS_FMT_GUID_YU12 = create_string_buffer(
b"I420\x00\x00\x10\x00\x80\x00\x00\xaa\x00\x38\x9b\x71", 16
)
VS_FMT_GUID_BGR3 = create_string_buffer(
b"\x7d\xeb\x36\xe4\x4f\x52\xce\x11\x9f\x53\x00\x20\xaf\x0b\xa7\x70", 16
)
VS_FMT_GUID_RGB565 = create_string_buffer(
b"RGBP\x00\x00\x10\x00\x80\x00\x00\xaa\x00\x38\x9b\x71", 16
)
libuvc.uvc_get_format_descs.restype = POINTER(uvc_format_desc)
def print_device_info(devh):
vers = lep_oem_sw_version()
call_extension_unit(devh, OEM_UNIT_ID, 9, byref(vers), 8)
print("Version gpp: {0}.{1}.{2} dsp: {3}.{4}.{5}".format(
vers.gpp_major, vers.gpp_minor, vers.gpp_build,
vers.dsp_major, vers.dsp_minor, vers.dsp_build,
))
flir_pn = create_string_buffer(32)
call_extension_unit(devh, OEM_UNIT_ID, 8, flir_pn, 32)
print("FLIR part #: {0}".format(flir_pn.raw))
flir_sn = create_string_buffer(8)
call_extension_unit(devh, SYS_UNIT_ID, 3, flir_sn, 8)
print("FLIR serial #: {0}".format(repr(flir_sn.raw)))
def uvc_iter_formats(devh):
p_format_desc = libuvc.uvc_get_format_descs(devh)
while p_format_desc:
yield p_format_desc.contents
p_format_desc = p_format_desc.contents.next
def uvc_iter_frames_for_format(devh, format_desc):
p_frame_desc = format_desc.frame_descs
while p_frame_desc:
yield p_frame_desc.contents
p_frame_desc = p_frame_desc.contents.next
def print_device_formats(devh):
for format_desc in uvc_iter_formats(devh):
print("format: {0}".format(format_desc.guidFormat[0:4]))
for frame_desc in uvc_iter_frames_for_format(devh, format_desc):
print(" frame {0}x{1} @ {2}fps".format(frame_desc.wWidth, frame_desc.wHeight, int(1e7 / frame_desc.dwDefaultFrameInterval)))
def uvc_get_frame_formats_by_guid(devh, vs_fmt_guid):
for format_desc in uvc_iter_formats(devh):
if vs_fmt_guid[0:4] == format_desc.guidFormat[0:4]:
return [fmt for fmt in uvc_iter_frames_for_format(devh, format_desc)]
return []
def set_manual_ffc(devh):
sizeData = 32
shutter_mode = (c_uint16)(0)
getSDK = 0x3D
controlID = (getSDK >> 2) + 1 #formula from Kurt Kiefer
print('controlID: ' + str(controlID))
set_extension_unit(devh, SYS_UNIT_ID, controlID, byref(sysShutterManual), sizeData) #set_extension_unit(devh, unit, control, data, size)
def set_auto_ffc(devh):
sizeData = 32
shutter_mode = (c_uint16)(1)
getSDK = 0x3D
controlID = (getSDK >> 2) + 1 #formula from Kurt Kiefer
print('controlID: ' + str(controlID))
set_extension_unit(devh, SYS_UNIT_ID, controlID, byref(sysShutterAuto), sizeData)
def set_external_ffc(devh):
sizeData = 32
shutter_mode = (c_uint16)(2) #2 = external
getSDK = 0x3D
controlID = (getSDK >> 2) + 1 #formula from Kurt Kiefer
print('controlID: ' + str(controlID))
set_extension_unit(devh, SYS_UNIT_ID, controlID, byref(sysShutterExternal), sizeData)
shutter = lep_sys_shutter_mode()
def print_shutter_info(devh):
getSDK = 0x3C
controlID = (getSDK >> 2) + 1
call_extension_unit(devh, SYS_UNIT_ID, controlID, byref(shutter), 32)
print("Shutter Info:\n {0}\t shutterMode\n {1}\t tempLockoutState\n {2}\t videoFreezeDuringFFC\n\
{3}\t ffcDesired\n {4}\t elapsedTimeSinceLastFfc\n {5}\t desiredFfcPeriod\n\
{6}\t explicitCmdToOpen\n {7}\t desiredFfcTempDelta\n {8}\t imminentDelay\n".format(
shutter.shutterMode, shutter.tempLockoutState, shutter.videoFreezeDuringFFC,
shutter.ffcDesired, shutter.elapsedTimeSinceLastFfc, shutter.desiredFfcPeriod,
shutter.explicitCmdToOpen, shutter.desiredFfcTempDelta, shutter.imminentDelay,
))
def print_flux_linear_parameters(devh):
flux_linear_parameters = lep_rad_flux_linear_params()
print('flux_linear_parameters')
getSDK = 0xBC
controlID = (getSDK >> 2) + 1
call_extension_unit(devh, RAD_UNIT_ID, controlID, byref(flux_linear_parameters), 16)
print('sceneEmissivity: ',flux_linear_parameters.sceneEmissivity,flux_linear_parameters.TBkgK,flux_linear_parameters.tauWindow)
def print_rbfo(devh):
rbfo = lep_rbfo()
getSDK = 0x04
controlID = (getSDK >> 2) + 1
call_extension_unit(devh, RAD_UNIT_ID, controlID, byref(rbfo), 16)
print("RBFO Parameters -> R: {}, B: {}, F: {}, O:{}".format(rbfo.RBFO_R,rbfo.RBFO_B,rbfo.RBFO_F,rbfo.RBFO_O))
def set_rbfo(devh):
rbfo = lep_rbfo()
rbfo.RBFO_R = 231159
rbfo.RBFO_B = 1428
rbfo.RBFO_F = 1
rbfo.RBFO_O = 6094
sizeData = 16
setRBFOSDK = 0x05
controlID = (setRBFOSDK >> 2) + 1 #formula from Kurt Kiefer
print('controlID: ' + str(controlID))
set_extension_unit(devh, RAD_UNIT_ID, controlID, byref(rbfo), sizeData)
def reboot(devh):
sizeData = 1
shutter_mode = create_string_buffer(sizeData)
runReboot = 0x42
controlID = (runReboot >> 2) + 1 #formula from Kurt Kiefer
print('controlID: ' + str(controlID))
set_extension_unit(devh, OEM_UNIT_ID, controlID, shutter_mode, sizeData)
def perform_manual_ffc(devh):
sizeData = 1
shutter_mode = create_string_buffer(sizeData)
#0x200 Module ID VID
#0x3C get
#0x3D set
getSDK = 0x3D
runFFC = 0x42
controlID = (runFFC >> 2) + 1 #formula from Kurt Kiefer
print('controlID: ' + str(controlID))
set_extension_unit(devh, SYS_UNIT_ID, controlID, shutter_mode, sizeData) #set_extension_unit(devh, unit, control, data, size)
def set_gain_low(devh):
sizeData = 4
gain_mode = (c_uint16)(1) #0=HIGH, 1=LOW, 2=AUTO
setGainSDK = 0x49
controlID = (setGainSDK >> 2) + 1 #formula from Kurt Kiefer
print('controlID: ' + str(controlID))
set_extension_unit(devh, SYS_UNIT_ID, controlID, byref(gain_mode), sizeData) #set_extension_unit(devh, unit, control, data, size)
perform_manual_ffc(devh)
def set_gain_high(devh):
sizeData = 4
gain_mode = (c_uint16)(0) #0=HIGH, 1=LOW, 2=AUTO
setGainSDK = 0x49
controlID = (setGainSDK >> 2) + 1 #formula from Kurt Kiefer
print('controlID: ' + str(controlID))
set_extension_unit(devh, SYS_UNIT_ID, controlID, byref(gain_mode), sizeData) #set_extension_unit(devh, unit, control, data, size)
perform_manual_ffc(devh)
def set_gain_auto(devh):
sizeData = 4
gain_mode = (c_uint16)(2) #0=HIGH, 1=LOW, 2=AUTO
setGainSDK = 0x49
controlID = (setGainSDK >> 2) + 1 #formula from Kurt Kiefer
print('controlID: ' + str(controlID))
set_extension_unit(devh, SYS_UNIT_ID, controlID, byref(gain_mode), sizeData) #set_extension_unit(devh, unit, control, data, size)
perform_manual_ffc(devh)
def set_radiometry_control(devh):
sizeData = 4
radiometry_mode = (c_uint16)(1) #0=disalbe, 1=enable
setRadiometrySDK = 0x11
controlID = (setRadiometrySDK >> 2) + 1 #formula from Kurt Kiefer
print('controlID: ' + str(controlID))
set_extension_unit(devh, RAD_UNIT_ID , controlID, byref(radiometry_mode), sizeData) #set_extension_unit(devh, unit, control, data, size)
#perform_manual_ffc(devh)
def set_tlinear_control(devh):
sizeData = 4
tlinear_mode = (c_uint16)(1) #0=disalbe, 1=enable
setTlinearSDK = 0xC1
controlID = (setTlinearSDK >> 2) + 1 #formula from Kurt Kiefer
print('controlID: ' + str(controlID))
set_extension_unit(devh, RAD_UNIT_ID , controlID, byref(tlinear_mode), sizeData) #set_extension_unit(devh, unit, control, data, size)
#perform_manual_ffc(devh)
def set_tshutter_control(devh):
sizeData = 4
tshutter_mode = (c_uint16)(2) # LEP_RAD_TS_USER_MODE = 0,LEP_RAD_TS_CAL_MODE,LEP_RAD_TS_FIXED_MODE,LEP_RAD_TS_END_TS_MODE
setTShutterSDK = 0x25
controlID = (setTShutterSDK >> 2) + 1 #formula from Kurt Kiefer
print('controlID: ' + str(controlID))
set_extension_unit(devh, RAD_UNIT_ID , controlID, byref(tshutter_mode), sizeData) #set_extension_unit(devh, unit, control, data, size)
#perform_manual_ffc(devh)
def set_tlinear_auto_resolution(devh):
sizeData = 4
auto_resolution_mode = (c_uint16)(1) # LEP_RAD_DISABLE = 0,LEP_RAD_ENABLE,LEP_END_RAD_ENABLE
setTLinear_Auto_ResolutionSDK = 0xC9
controlID = (setTLinear_Auto_ResolutionSDK >> 2) + 1 #formula from Kurt Kiefer
print('controlID: ' + str(controlID))
set_extension_unit(devh, RAD_UNIT_ID , controlID, byref(auto_resolution_mode), sizeData)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment