Commit b1140b53 authored by YONG-LIN SU's avatar YONG-LIN SU

新增界面及IRM80x62鏡頭存取方法

parent ff09f4ca
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "useful-clone",
"metadata": {},
"outputs": [],
"source": [
"import serial\n",
"import time\n",
"import numpy as np\n",
"import cv2\n",
"import math\n",
"from threading import Thread\n",
"from datetime import datetime"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "every-sight",
"metadata": {},
"outputs": [],
"source": [
"# 獲取溫度影像\n",
"cmd_get_image = [0XEE,0xE1,0x01,0x55,0xFF,0xFC,0xFD,0xFF]\n",
"# 設定發射率\n",
"cmd_set_emissivity = [0XEE,0xB2,0x55,0xAA,None,0xFF,0xFC,0xFD,0xFF]\n",
"# 獲取發射率\n",
"cmd_get_emissivity = [0xEE,0xB5,0xFF,0xFC,0XFD,0xFF]\n",
"# 設置偏移\n",
"cmd_set_offset = [0xEE,0xB7,0x55,0xAA,None,0XFF,0xFC,0xFD,0xFF]\n",
"\n",
"ser = serial.Serial()\n",
"\n",
"ser.port = \"/dev/ttyUSB0\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "foster-affect",
"metadata": {},
"outputs": [],
"source": [
"class Thermal(object):\n",
" def __init__(self, width = 80, height = 62, framerate = 4, frame_width = 80*5, frame_height = 62*5, log=None, port=\"/dev/ttyUSB0\"):\n",
" self.__log = self.__log if log is None else log\n",
" self.__isCaptured = False\n",
" self.__frame = None\n",
" self.__frame2c = None\n",
" self.isOpened = False\n",
" self.isStopping = False\n",
" self.isNotSleeping=True\n",
" # comport 定義\n",
" self.ser = serial.Serial()\n",
" self.ser.port = port\n",
" #921600,N,8,1\n",
" self.ser.baudrate = 921600\n",
" self.ser.bytesize = serial.EIGHTBITS #number of bits per bytes\n",
" self.ser.parity = serial.PARITY_NONE #set parity check\n",
" self.ser.stopbits = serial.STOPBITS_ONE #number of stop bits\n",
" self.ser.timeout = 0.5 #non-block read 0.5s\n",
" self.ser.writeTimeout = 0.5 #timeout for write 0.5s\n",
" self.ser.xonxoff = False #disable software flow control\n",
" self.ser.rtscts = False #disable hardware (RTS/CTS) flow control\n",
" self.ser.dsrdtr = False #disable hardware (DSR/DTR) flow control\n",
" \n",
" self.width = width\n",
" self.height = height\n",
" self.frame_width = frame_width\n",
" self.frame_height = frame_height\n",
" self.framerate = framerate\n",
" self.__thread = Thread(target=self.__job)\n",
" def start(self):\n",
" self.__isCaptured = False\n",
" self.__frame = None\n",
" self.__frame2c = None\n",
" self.isOpened = True\n",
" self.isStopping = False\n",
" self.__thread.start()\n",
" def restart(self):\n",
" self.__isCaptured = False\n",
" self.__frame = None\n",
" self.__frame2c = None\n",
" self.isOpened = True\n",
" self.isStopping = False\n",
" del self.__thread\n",
" self.__thread = Thread(target=self.__job)\n",
" self.__thread.start()\n",
" def stop(self):\n",
" self.__isCaptured = False\n",
" self.__frame = None\n",
" self.__frame2c = None\n",
" self.isStopping = True\n",
" self.ser.close()\n",
" def capture(self):\n",
" return self.__isCaptured, self.__frame, self.__frame2c\n",
" def __job(self):\n",
" self.__log(\"Opened: {0}, Stopping: {1}\".format(\n",
" self.isOpened,\n",
" self.isStopping,\n",
" ))\n",
" \n",
" self.ser.open()\n",
" self.ser.flushInput() # flush input buffer\n",
" self.ser.flushOutput() # flush output buffer\n",
" \n",
" while self.isOpened and not self.isStopping and self.ser.isOpen():\n",
" \n",
" #write 8 byte data\n",
" self.ser.write(cmd_get_image)\n",
" #read 8 byte data\n",
" response = self.ser.read(9927)\n",
" a = self.bytes2temps(response)\n",
" if(a is None):\n",
" continue\n",
" b = a.copy() # 原始資料\n",
" c = b.copy()\n",
" self.__frame = cv2.resize(self.temp2image(b),(self.frame_width,self.frame_height), interpolation=cv2.INTER_AREA)\n",
" self.__frame2c = self.temp_reshape(c)\n",
" self.__isCaptured = True\n",
" time.sleep(0.15)\n",
" self.__log(\"thermal stop\")\n",
" self.__frame = None\n",
" self.__frame2c = None\n",
" self.__isCaptured = False\n",
" self.ser.close()\n",
" def __log(self, message):\n",
" print(message)\n",
" def bytes2temps(self,bdata):\n",
" data = list(bdata)\n",
" temperatures = np.zeros(80*62)\n",
" if(len(data) == 9927 and data[0] == 0xE1):\n",
" for i in range(4960):\n",
" htemp = data[i*2 + 1]\n",
" ltemp = data[i*2 + 2]\n",
" temperatures[i] = ((htemp*256 + ltemp) - 2731)/10\n",
" return temperatures\n",
" else:\n",
" return None\n",
" def temp_reshape(self,temperatures,height=62,width=80):\n",
" reshaped_temp = np.zeros((height,width))\n",
" for i in range(height):\n",
" for j in range(width):\n",
" reshaped_temp[i][j] = temperatures[width*i + j]\n",
" return reshaped_temp\n",
" \n",
" def temp2image(self,temperatures,width = 80,height = 62,scale = 1):\n",
" output = temperatures\n",
" # scaling\n",
" minValue = output.min()\n",
" maxValue = output.max()\n",
" output = output - minValue\n",
" output = output * 255/ (maxValue - minValue) # Now scaled to 0 - 255\n",
"\n",
" img = np.zeros((height,width,1))\n",
" for i in range(height):\n",
" for j in range(width):\n",
" img[i][j] = output[width*i + j]\n",
"\n",
" # apply colormap\n",
" #dim = (width*scale, height*scale)\n",
" img = img.astype(np.uint8)\n",
" #img = cv2.resize(img, dim, interpolation = cv2.INTER_LINEAR )\n",
" img = cv2.applyColorMap(img, cv2.COLORMAP_JET)\n",
" return img"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "occasional-voice",
"metadata": {},
"outputs": [],
"source": [
"t = Thermal(width = 80, height = 62, framerate = 4, frame_width = 80*5, frame_height = 62*5, log=None, port=\"/dev/ttyUSB0\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "activated-pitch",
"metadata": {},
"outputs": [],
"source": [
"t.start()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "numeric-claim",
"metadata": {},
"outputs": [],
"source": [
"t.stop()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "affecting-copper",
"metadata": {},
"outputs": [],
"source": [
"t.capture()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "balanced-platinum",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "timely-alliance",
"metadata": {},
"outputs": [],
"source": [
"from camera import Camera\n",
"from detector import FaceDetector\n",
"\n",
"import cv2\n",
"import numpy as np\n",
"import time\n",
"import copy\n",
"\n",
"from tracker import Tracker"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "golden-strip",
"metadata": {},
"outputs": [],
"source": [
"camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=None)\n",
"detector = FaceDetector(model_path=\"./models/version-RFB/RFB-320.mnn\" , input_size=(320,240))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "correct-region",
"metadata": {},
"outputs": [],
"source": [
"camera.start()\n",
"time.sleep(2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "clear-rally",
"metadata": {},
"outputs": [],
"source": [
"# Create Object Tracker\n",
"tracker = Tracker(160, 30, 5, 100)\n",
"\n",
"# Variables initialization\n",
"skip_frame_count = 0\n",
"track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),\n",
" (0, 255, 255), (255, 0, 255), (255, 127, 255),\n",
" (127, 0, 255), (127, 0, 127)]\n",
"pause = False"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "premium-device",
"metadata": {},
"outputs": [],
"source": [
"\n",
"\n",
"# Infinite loop to process video frames\n",
"while(True):\n",
" # Capture frame-by-frame\n",
" ret, frame = camera.capture()\n",
"\n",
" # Make copy of original frame\n",
" orig_frame = copy.copy(frame)\n",
"\n",
" # Skip initial frames that display logo\n",
" if (skip_frame_count < 15):\n",
" skip_frame_count += 1\n",
" continue\n",
"\n",
" # Detect and return centeroids of the objects in the frame\n",
" #centers = detector.Detect(frame)\n",
" boxes, labels, probs = detector.predict(frame)\n",
"\n",
" # If centroids are detected then track them\n",
" if (len(centers) > 0):\n",
"\n",
" # Track object using Kalman Filter\n",
" tracker.Update(centers)\n",
"\n",
" # For identified object tracks draw tracking line\n",
" # Use various colors to indicate different track_id\n",
" for i in range(len(tracker.tracks)):\n",
" if (len(tracker.tracks[i].trace) > 1):\n",
" for j in range(len(tracker.tracks[i].trace)-1):\n",
" # Draw trace line\n",
" x1 = tracker.tracks[i].trace[j][0][0]\n",
" y1 = tracker.tracks[i].trace[j][1][0]\n",
" x2 = tracker.tracks[i].trace[j+1][0][0]\n",
" y2 = tracker.tracks[i].trace[j+1][1][0]\n",
" clr = tracker.tracks[i].track_id % 9\n",
" cv2.line(frame, (int(x1), int(y1)), (int(x2), int(y2)),\n",
" track_colors[clr], 2)\n",
"\n",
" # Display the resulting tracking frame\n",
" cv2.imshow('Tracking', frame)\n",
"\n",
" # Display the original frame\n",
" cv2.imshow('Original', orig_frame)\n",
"\n",
" # Slower the FPS\n",
" cv2.waitKey(50)\n",
"\n",
" # Check for key strokes\n",
" k = cv2.waitKey(50) & 0xff\n",
" if k == 27: # 'esc' key has been pressed, exit program.\n",
" break\n",
" if k == 112: # 'p' has been pressed. this will pause/resume the code.\n",
" pause = not pause\n",
" if (pause is True):\n",
" print(\"Code is paused. Press 'p' to resume..\")\n",
" while (pause is True):\n",
" # stay in this loop until\n",
" key = cv2.waitKey(30) & 0xff\n",
" if key == 112:\n",
" pause = False\n",
" print(\"Resume code..!!\")\n",
" break\n",
"\n",
"# When everything done, release the capture\n",
"cap.release()\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "intense-incident",
"metadata": {},
"outputs": [],
"source": [
"# Infinite loop to process video frames\n",
"while(True):\n",
" # Capture frame-by-frame\n",
" ret, frame = camera.capture()\n",
"\n",
" # Make copy of original frame\n",
" orig_frame = copy.copy(frame)\n",
"\n",
" # Skip initial frames that display logo\n",
" if (skip_frame_count < 15):\n",
" skip_frame_count += 1\n",
" continue\n",
"\n",
" # Detect and return centeroids of the objects in the frame\n",
" #centers = detector.Detect(frame)\n",
" boxes, labels, probs = detector.predict(frame)\n",
" centers = []\n",
" shapes = []\n",
" temperatures = []\n",
" temperature_locs = []\n",
" for i in range(boxes.shape[0]):\n",
" box = boxes[i, :]\n",
" score = probs[i]\n",
" if(score > 0.9):\n",
" x1 = max(box[0],0)\n",
" y1 = max(box[1],0)\n",
" x2 = min(box[2],frame.shape[1])\n",
" y2 = min(box[3],frame.shape[0])\n",
" centers.append([(x1+x2)/2,(y1+y2)/2])\n",
" shapes.append([(y2 - y1),(x2 - x1)]) # w,h\n",
" temperatures.append(35.5)\n",
" temperature_locs.append(33)\n",
" \n",
" \n",
" # If centroids are detected then track them\n",
" if (len(centers) > 0):\n",
"\n",
" # Track object using Kalman Filter\n",
" tracker.Update(centers, shapes, temperatures, temperature_locs)\n",
"\n",
" # For identified object tracks draw tracking line\n",
" # Use various colors to indicate different track_id\n",
" for i in range(len(tracker.tracks)):\n",
" if (len(tracker.tracks[i].trace) > 1):\n",
" cx,cy = tracker.tracks[i].prediction[0]\n",
" clr = tracker.tracks[i].track_id % 9\n",
" tx1 = cx - (tracker.tracks[0].w)/2\n",
" ty1 = cy - (tracker.tracks[0].h)/2\n",
" tx2 = cx + (tracker.tracks[0].w)/2\n",
" ty2 = cy + (tracker.tracks[0].h)/2\n",
" cv2.rectangle(frame, (int(tx1), int(ty1)), (int(tx2), int(ty2)), track_colors[clr], 2)\n",
" for j in range(len(tracker.tracks[i].trace)-1):\n",
" # Draw trace line\n",
" x1 = tracker.tracks[i].trace[j][0][0]\n",
" y1 = tracker.tracks[i].trace[j][1][0]\n",
" x2 = tracker.tracks[i].trace[j+1][0][0]\n",
" y2 = tracker.tracks[i].trace[j+1][1][0]\n",
" \n",
" \n",
" \n",
" clr = tracker.tracks[i].track_id % 9\n",
" \n",
" cv2.line(frame, (int(x1), int(y1)), (int(x2), int(y2)),\n",
" track_colors[clr], 2)\n",
"\n",
" # Display the resulting tracking frame\n",
" cv2.imshow('Tracking', frame)\n",
" # Display the original frame\n",
" cv2.imshow('Original', orig_frame)\n",
"\n",
" # Slower the FPS\n",
" cv2.waitKey(50)\n",
"\n",
" # Check for key strokes\n",
" k = cv2.waitKey(50) & 0xff\n",
" if k == 27: # 'esc' key has been pressed, exit program.\n",
" break\n",
" if k == 112: # 'p' has been pressed. this will pause/resume the code.\n",
" pause = not pause\n",
" if (pause is True):\n",
" print(\"Code is paused. Press 'p' to resume..\")\n",
" while (pause is True):\n",
" # stay in this loop until\n",
" key = cv2.waitKey(30) & 0xff\n",
" if key == 112:\n",
" pause = False\n",
" print(\"Resume code..!!\")\n",
" break\n",
"\n",
"# When everything done, release the capture\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "julian-wisconsin",
"metadata": {},
"outputs": [],
"source": [
"tracker.tracks[0].temp_loc"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "blond-lewis",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "intermediate-jordan",
"metadata": {},
"outputs": [],
"source": [
"boxes, labels, probs"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "peripheral-science",
"metadata": {},
"outputs": [],
"source": [
"centers = []\n",
"shapes = []\n",
"temperatures = []\n",
"temperature_locs = []\n",
"for i in range(boxes.shape[0]):\n",
" box = boxes[i, :]\n",
" score = probs[i]\n",
" if(score > 0.9):\n",
" x1 = max(box[0],0)\n",
" y1 = max(box[1],0)\n",
" x2 = min(box[2],frame.shape[1])\n",
" y2 = min(box[3],frame.shape[0])\n",
" centers.append([(x1+x2)/2,(y1+y2)/2])\n",
" shapes.append([(x2 - x1),(y2 - y1)]) # w,h\n",
" temperatures.append(35.5)\n",
" temperature_locs.append(33)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "signed-czech",
"metadata": {},
"outputs": [],
"source": [
"shapes"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "choice-snowboard",
"metadata": {},
"outputs": [],
"source": [
"tracker.Update(centers, shapes, temperatures, temperature_locs)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "perceived-reservation",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
......@@ -45,7 +45,7 @@
"outputs": [],
"source": [
"thermal = Thermal(width=160, height=120, framerate=9, frame_width=640, frame_height=480, log=None)\n",
"camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=1)\n",
"camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=None)\n",
"detector = FaceDetector(model_path=\"../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/version-RFB/RFB-320.mnn\" , input_size=(320,240))\n",
"# anti = AntiSpoofing(model_path=\"../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/4_0_0_80x80_MiniFASNetV1SE.mnn\")\n",
"aligner = BoxAligner(img_h = 480,img_w = 640)\n",
......@@ -141,7 +141,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "extra-injection",
"id": "laden-chuck",
"metadata": {},
"outputs": [],
"source": [
......@@ -176,7 +176,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "parental-continent",
"id": "atlantic-toronto",
"metadata": {},
"outputs": [],
"source": [
......@@ -186,7 +186,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "simplified-universal",
"id": "medieval-tactics",
"metadata": {},
"outputs": [],
"source": [
......@@ -199,7 +199,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "revolutionary-repair",
"id": "talented-schedule",
"metadata": {},
"outputs": [],
"source": [
......@@ -528,7 +528,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.6"
"version": "3.8.10"
}
},
"nbformat": 4,
......
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "political-chassis",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import os\n",
"from datetime import datetime\n",
"import numpy as np\n",
"# from IRM80x62_thermal import Thermal\n",
"from uvc_thermal import Thermal\n",
"from camera import Camera "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "improved-representative",
"metadata": {},
"outputs": [],
"source": [
"# thermal = Thermal(width = 80, height = 62, framerate = 4, frame_width = 640, frame_height = 480, log=None, port=\"/dev/ttyUSB0\")\n",
"\n",
"thermal = Thermal(width=160, height=120, framerate=9, frame_width=160*4, frame_height=120*4, log=None)\n",
"camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=None)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "light-wednesday",
"metadata": {},
"outputs": [],
"source": [
"thermal.start()\n",
"camera.start()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "comparative-neighbor",
"metadata": {},
"outputs": [],
"source": [
"while(True):\n",
" if(thermal.isOpened and camera.isOpened):\n",
" thermal_ret,thermal_frame, thermal_row = thermal.capture()\n",
" ret, frame = camera.capture()\n",
" cat_img = cv2.hconcat([frame,thermal_frame])\n",
" cv2.imshow(\"frame\",cat_img)\n",
" key = cv2.waitKey(33)\n",
" if(key == ord('q')):\n",
" break\n",
" elif(key == ord('c')):\n",
" dt_now = datetime.timestamp(datetime.now())\n",
" cv2.imwrite(os.path.join(\"./images\",\"frame\" + str(dt_now) + \".jpg\"),frame)\n",
" cv2.imwrite(os.path.join(\"./images\",\"thermal_frame\"+ str(dt_now) +\".jpg\"),thermal_frame)\n",
"\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "foster-owner",
"metadata": {},
"outputs": [],
"source": [
"thermal.stop()\n",
"camera.stop()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "through-annotation",
"metadata": {},
"outputs": [],
"source": [
"cat_img = cv2.hconcat([frame,thermal_frame])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "contained-scanning",
"metadata": {},
"outputs": [],
"source": [
"thermal.stop()\n",
"camera.stop()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "straight-substitute",
"metadata": {},
"outputs": [],
"source": [
"cv2.imshow(\"frame\",thermal_frame)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "amended-anime",
"metadata": {},
"outputs": [],
"source": [
"frame_roi = cv2.selectROI(\"frame\",frame)\n",
"cv2.destroyAllWindows()\n",
"thermal_roi = cv2.selectROI(\"thermal frame\",thermal_frame)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "destroyed-startup",
"metadata": {},
"outputs": [],
"source": [
"from aligner import BoxAligner"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "precise-input",
"metadata": {},
"outputs": [],
"source": [
"aligner = BoxAligner(img_h = 480,img_w = 640)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cultural-pasta",
"metadata": {},
"outputs": [],
"source": [
"rgb_points = []\n",
"thermal_points = []"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "vulnerable-pizza",
"metadata": {},
"outputs": [],
"source": [
"x,y,w,h = frame_roi\n",
"rgb_points.append([x,y])\n",
"rgb_points.append([x+w,y])\n",
"rgb_points.append([x,y+h])\n",
"rgb_points.append([x+w,y+h])\n",
"\n",
"x,y,w,h = thermal_roi\n",
"thermal_points.append([x,y])\n",
"thermal_points.append([x+w,y])\n",
"thermal_points.append([x,y+h])\n",
"thermal_points.append([x+w,y+h])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "automotive-stanford",
"metadata": {},
"outputs": [],
"source": [
"aligner.get_omography_matrix(np.array(rgb_points),np.array(thermal_points))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "stopped-marker",
"metadata": {},
"outputs": [],
"source": [
"np.array(rgb_points)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "arranged-engine",
"metadata": {},
"outputs": [],
"source": [
"np.array(thermal_points)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "removed-employer",
"metadata": {},
"outputs": [],
"source": [
"aligner.M"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "emerging-vocabulary",
"metadata": {},
"outputs": [],
"source": [
"np.array([[ 1.00000000e+00, 6.59371035e-17, -9.84556895e-14],\n",
" [ 0.00000000e+00, 1.00000000e+00, -4.92278448e-14],\n",
" [ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "minor-prague",
"metadata": {},
"outputs": [],
"source": [
"196 233 285 356"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ready-healing",
"metadata": {},
"outputs": [],
"source": [
"np.dot(aligner.M,[139,193,1])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "olive-attachment",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "innocent-payment",
"metadata": {},
"outputs": [],
"source": [
"# load image and crop face area\n",
"rgb_points = []\n",
"thermal_points = []\n",
"for root,dirs,files in os.walk(\"./images/\"):\n",
" for f in files:\n",
" if(f[:5] == \"frame\"):\n",
" frame_path = os.path.join(root,f)\n",
" thermal_path = os.path.join(root,f.replace(\"frame\",\"thermal_frame\"))\n",
" \n",
" frame = cv2.imread(frame_path)\n",
" thermal_frame = cv2.imread(thermal_path)\n",
" \n",
" frame_roi = cv2.selectROI(\"frame\",frame)\n",
" cv2.destroyAllWindows()\n",
" thermal_roi = cv2.selectROI(\"thermal frame\",thermal_frame)\n",
" cv2.destroyAllWindows()\n",
" \n",
" x,y,w,h = frame_roi\n",
" rgb_points.append([x,y])\n",
" rgb_points.append([x+w,y])\n",
" rgb_points.append([x,y+h])\n",
" rgb_points.append([x+w,y+h])\n",
"\n",
" x,y,w,h = thermal_roi\n",
" thermal_points.append([x,y])\n",
" thermal_points.append([x+w,y])\n",
" thermal_points.append([x,y+h])\n",
" thermal_points.append([x+w,y+h])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "played-oasis",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "permanent-kitty",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "false-shoot",
"metadata": {},
"outputs": [],
"source": [
"import serial\n",
"import time\n",
"import numpy as np\n",
"import cv2\n",
"import math\n",
"from threading import Thread\n",
"from datetime import datetime"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "specific-realtor",
"metadata": {},
"outputs": [],
"source": [
"# 獲取溫度影像\n",
"cmd_get_image = [0XEE,0xE1,0x01,0x55,0xFF,0xFC,0xFD,0xFF]\n",
"# 設定發射率\n",
"cmd_set_emissivity = [0XEE,0xB2,0x55,0xAA,None,0xFF,0xFC,0xFD,0xFF]\n",
"# 獲取發射率\n",
"cmd_get_emissivity = [0xEE,0xB5,0xFF,0xFC,0XFD,0xFF]\n",
"# 設置偏移\n",
"cmd_set_offset = [0xEE,0xB7,0x55,0xAA,None,0XFF,0xFC,0xFD,0xFF]\n",
"\n",
"ser = serial.Serial()\n",
"\n",
"ser.port = \"/dev/ttyUSB0\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "logical-princeton",
"metadata": {},
"outputs": [],
"source": [
"class Thermal(object):\n",
" def __init__(self, width = 80, height = 62, framerate = 4, frame_width = 80*5, frame_height = 62*5, log=None, port=\"/dev/ttyUSB0\"):\n",
" self.__log = self.__log if log is None else log\n",
" self.__isCaptured = False\n",
" self.__frame = None\n",
" self.__frame2c = None\n",
" self.isOpened = False\n",
" self.isStopping = False\n",
" self.isNotSleeping=True\n",
" # comport 定義\n",
" self.ser = serial.Serial()\n",
" self.ser.port = port\n",
" #921600,N,8,1\n",
" self.ser.baudrate = 921600\n",
" self.ser.bytesize = serial.EIGHTBITS #number of bits per bytes\n",
" self.ser.parity = serial.PARITY_NONE #set parity check\n",
" self.ser.stopbits = serial.STOPBITS_ONE #number of stop bits\n",
" self.ser.timeout = 0.5 #non-block read 0.5s\n",
" self.ser.writeTimeout = 0.5 #timeout for write 0.5s\n",
" self.ser.xonxoff = False #disable software flow control\n",
" self.ser.rtscts = False #disable hardware (RTS/CTS) flow control\n",
" self.ser.dsrdtr = False #disable hardware (DSR/DTR) flow control\n",
" \n",
" self.width = width\n",
" self.height = height\n",
" self.frame_width = frame_width\n",
" self.frame_height = frame_height\n",
" self.framerate = framerate\n",
" self.__thread = Thread(target=self.__job)\n",
" def start(self):\n",
" self.__isCaptured = False\n",
" self.__frame = None\n",
" self.__frame2c = None\n",
" self.isOpened = True\n",
" self.isStopping = False\n",
" self.__thread.start()\n",
" def restart(self):\n",
" self.__isCaptured = False\n",
" self.__frame = None\n",
" self.__frame2c = None\n",
" self.isOpened = True\n",
" self.isStopping = False\n",
" del self.__thread\n",
" self.__thread = Thread(target=self.__job)\n",
" self.__thread.start()\n",
" def stop(self):\n",
" self.__isCaptured = False\n",
" self.__frame = None\n",
" self.__frame2c = None\n",
" self.isStopping = True\n",
" self.ser.close()\n",
" def capture(self):\n",
" return self.__isCaptured, self.__frame, self.__frame2c\n",
" def __job(self):\n",
" self.__log(\"Opened: {0}, Stopping: {1}\".format(\n",
" self.isOpened,\n",
" self.isStopping,\n",
" ))\n",
" \n",
" self.ser.open()\n",
" self.ser.flushInput() # flush input buffer\n",
" self.ser.flushOutput() # flush output buffer\n",
" \n",
" while self.isOpened and not self.isStopping and self.ser.isOpen():\n",
" \n",
" #write 8 byte data\n",
" self.ser.write(cmd_get_image)\n",
" #read 8 byte data\n",
" response = self.ser.read(9927)\n",
" a = self.bytes2temps(response)\n",
" if(a is None):\n",
" continue\n",
" b = a.copy() # 原始資料\n",
" c = b.copy()\n",
" self.__frame = cv2.resize(self.temp2image(b),(self.frame_width,self.frame_height), interpolation=cv2.INTER_AREA)\n",
" self.__frame2c = self.temp_reshape(c)\n",
" self.__isCaptured = True\n",
" time.sleep(0.15)\n",
" self.__log(\"thermal stop\")\n",
" self.__frame = None\n",
" self.__frame2c = None\n",
" self.__isCaptured = False\n",
" self.ser.close()\n",
" def __log(self, message):\n",
" print(message)\n",
" def bytes2temps(self,bdata):\n",
" data = list(bdata)\n",
" temperatures = np.zeros(80*62)\n",
" if(len(data) == 9927 and data[0] == 0xE1):\n",
" for i in range(4960):\n",
" htemp = data[i*2 + 1]\n",
" ltemp = data[i*2 + 2]\n",
" temperatures[i] = ((htemp*256 + ltemp) - 2731)/10\n",
" return temperatures\n",
" else:\n",
" return None\n",
" def temp_reshape(self,temperatures,height=62,width=80):\n",
" reshaped_temp = np.zeros((height,width))\n",
" for i in range(height):\n",
" for j in range(width):\n",
" reshaped_temp[i][j] = temperatures[width*i + j]\n",
" return reshaped_temp\n",
" \n",
" def temp2image(self,temperatures,width = 80,height = 62,scale = 1):\n",
" output = temperatures\n",
" # scaling\n",
" minValue = output.min()\n",
" maxValue = output.max()\n",
" output = output - minValue\n",
" output = output * 255/ (maxValue - minValue) # Now scaled to 0 - 255\n",
"\n",
" img = np.zeros((height,width,1))\n",
" for i in range(height):\n",
" for j in range(width):\n",
" img[i][j] = output[width*i + j]\n",
"\n",
" # apply colormap\n",
" #dim = (width*scale, height*scale)\n",
" img = img.astype(np.uint8)\n",
" #img = cv2.resize(img, dim, interpolation = cv2.INTER_LINEAR )\n",
" img = cv2.applyColorMap(img, cv2.COLORMAP_JET)\n",
" return img"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "protective-concern",
"metadata": {},
"outputs": [],
"source": [
"t = Thermal(width = 80, height = 62, framerate = 4, frame_width = 80*5, frame_height = 62*5, log=None, port=\"/dev/ttyUSB0\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "juvenile-bottom",
"metadata": {},
"outputs": [],
"source": [
"t.start()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "random-lounge",
"metadata": {},
"outputs": [],
"source": [
"t.stop()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "another-career",
"metadata": {},
"outputs": [],
"source": [
"t.capture()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "current-coast",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
import serial
import time
import numpy as np
import cv2
import math
from threading import Thread
from datetime import datetime
# 獲取溫度影像
cmd_get_image = [0XEE,0xE1,0x01,0x55,0xFF,0xFC,0xFD,0xFF]
# 設定發射率
cmd_set_emissivity = [0XEE,0xB2,0x55,0xAA,None,0xFF,0xFC,0xFD,0xFF]
# 獲取發射率
cmd_get_emissivity = [0xEE,0xB5,0xFF,0xFC,0XFD,0xFF]
# 設置偏移
cmd_set_offset = [0xEE,0xB7,0x55,0xAA,None,0XFF,0xFC,0xFD,0xFF]
class Thermal(object):
def __init__(self, width = 80, height = 62, framerate = 4, frame_width = 80*5, frame_height = 62*5, log=None, port="/dev/ttyUSB0"):
self.__log = self.__log if log is None else log
self.__isCaptured = False
self.__frame = None
self.__frame2c = None
self.isOpened = False
self.isStopping = False
self.isNotSleeping=True
# comport 定義
self.ser = serial.Serial()
self.ser.port = port
#921600,N,8,1
self.ser.baudrate = 921600
self.ser.bytesize = serial.EIGHTBITS #number of bits per bytes
self.ser.parity = serial.PARITY_NONE #set parity check
self.ser.stopbits = serial.STOPBITS_ONE #number of stop bits
self.ser.timeout = 0.5 #non-block read 0.5s
self.ser.writeTimeout = 0.5 #timeout for write 0.5s
self.ser.xonxoff = False #disable software flow control
self.ser.rtscts = False #disable hardware (RTS/CTS) flow control
self.ser.dsrdtr = False #disable hardware (DSR/DTR) flow control
self.width = width
self.height = height
self.frame_width = frame_width
self.frame_height = frame_height
self.framerate = framerate
self.__thread = Thread(target=self.__job)
def start(self):
self.__isCaptured = False
self.__frame = None
self.__frame2c = None
self.isOpened = True
self.isStopping = False
self.__thread.start()
def restart(self):
self.__isCaptured = False
self.__frame = None
self.__frame2c = None
self.isOpened = True
self.isStopping = False
del self.__thread
self.__thread = Thread(target=self.__job)
self.__thread.start()
def stop(self):
self.__isCaptured = False
self.__frame = None
self.__frame2c = None
self.isStopping = True
self.ser.close()
def capture(self):
return self.__isCaptured, self.__frame, self.__frame2c
def __job(self):
self.__log("Opened: {0}, Stopping: {1}".format(
self.isOpened,
self.isStopping,
))
self.ser.open()
self.ser.flushInput() # flush input buffer
self.ser.flushOutput() # flush output buffer
while self.isOpened and not self.isStopping and self.ser.isOpen():
#write 8 byte data
self.ser.write(cmd_get_image)
#read 8 byte data
response = self.ser.read(9927)
a = self.bytes2temps(response)
if(a is None):
continue
b = a.copy() # 原始資料
c = b.copy()
self.__frame = cv2.resize(self.temp2image(b),(self.frame_width,self.frame_height), interpolation=cv2.INTER_AREA)
self.__frame2c = self.temp_reshape(c)
self.__isCaptured = True
#time.sleep(0.15)
self.__log("thermal stop")
self.__frame = None
self.__frame2c = None
self.__isCaptured = False
self.ser.close()
def __log(self, message):
print(message)
def bytes2temps(self,bdata):
data = list(bdata)
temperatures = np.zeros(80*62)
if(len(data) == 9927 and data[0] == 0xE1):
for i in range(4960):
htemp = data[i*2 + 1]
ltemp = data[i*2 + 2]
temperatures[i] = ((htemp*256 + ltemp) - 2731)/10
return temperatures
else:
return None
def temp_reshape(self,temperatures,height=62,width=80):
reshaped_temp = np.zeros((height,width))
for i in range(height):
for j in range(width):
reshaped_temp[i][j] = temperatures[width*i + j]
return reshaped_temp
def temp2image(self,temperatures,width = 80,height = 62,scale = 1):
output = temperatures
# scaling
minValue = output.min()
maxValue = output.max()
output = output - minValue
output = output * 255/ (maxValue - minValue) # Now scaled to 0 - 255
img = np.zeros((height,width,1))
for i in range(height):
for j in range(width):
img[i][j] = output[width*i + j]
# apply colormap
#dim = (width*scale, height*scale)
img = img.astype(np.uint8)
#img = cv2.resize(img, dim, interpolation = cv2.INTER_LINEAR )
img = cv2.applyColorMap(img, cv2.COLORMAP_JET)
return img
#!/usr/bin/env python
# coding: utf-8
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import Ui_MainWindow as ui
from IRM80x62_thermal import Thermal
from camera import Camera
from detector import FaceDetector
from anti import AntiSpoofing
from heater import Heater
from aligner import BoxAligner
from correcter import Correcter
import cv2
import numpy as np
import time
class Main(QMainWindow, ui.Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.resize(640,480)
self.label.setScaledContents(True)
self.label_2.setScaledContents(True)
self.thermal = Thermal(width = 80, height = 62, framerate = 4, frame_width = 640, frame_height = 480, log=None, port="/dev/ttyUSB0")
self.camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=1)
self.detector = FaceDetector(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/version-RFB/RFB-320.mnn" , input_size=(320,240))
# self.anti = AntiSpoofing(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/4_0_0_80x80_MiniFASNetV1SE.mnn")
self.aligner = BoxAligner(img_h = 480,img_w = 640)
self.heater = Heater(pwm_pin=12 , target_temp=36.0)
self.correcter = Correcter(model_path="../thermal-tk/20210421_correcter.sav")
self.thermal.start()
time.sleep(2)
self.camera.start()
time.sleep(2)
self.heater.start()
time.sleep(2)
self.num_frame = 0
self.painter = QPainter(self)
def paintEvent(self, QPaintEvent):
# 繪圖事件
ret, frame = self.camera.capture()
thermal_ret,thermal_frame, thermal_row = self.thermal.capture()
if(thermal_ret and ret):
self.num_frame+=1
blackbody_max = thermal_row[:,:34].max()
heater_temp = self.heater.last_temp
#heater_temp = 36.5
print(heater_temp)
# 偵測人臉
# if(self.num_frame % 2 == 0):
if(True):
stime = time.time()
boxes, labels, probs = self.detector.predict(frame)
processing_time = time.time()-stime
self.label_3.setText(str(processing_time))
# 取出所有偵測的結果
for i in range(boxes.shape[0]):
box = boxes[i, :]
score = probs[i]
if(score > 0.9):
#scaled_box = anti.scale_box(img_h=480,img_w=640,box=box)
#expanded_face = frame[scaled_box[1]:scaled_box[3],scaled_box[0]:scaled_box[2]].copy()
#live_score = anti.predict(expanded_face)
live_score = 0.9
if(live_score > 0.9):
color = (0, 255, 0)
else:
color = ( 0, 0, 255)
thermal_box = self.aligner.box_aligment([box])[0]
cv2.rectangle(thermal_frame, (thermal_box[0], thermal_box[1]), (thermal_box[2], thermal_box[3]), color, 2)
cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), color, 2)
face_temp = self.max_temperature(thermal_box,thermal_row)
corrected_face_temp = self.correcter.predict(x_detect_k= face_temp, x_black_k= blackbody_max, x_black_c= heater_temp)
cv2.putText(frame, "{:.2f}->{:.2f}".format(face_temp,corrected_face_temp), ((box[0]+box[2])//2, (box[1]+box[3])//2), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 255, 255), 1, cv2.LINE_AA)
cv2.putText(thermal_frame, "{:.2f}".format(heater_temp), (320//4, 64//4), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 0, 0), 1, cv2.LINE_AA)
# 顯示至UI
frame=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
thermal_frame=cv2.cvtColor(thermal_frame,cv2.COLOR_BGR2RGB)
Qframe=QImage(frame.data,frame.shape[1],frame.shape[0],frame.shape[1]*3,QImage.Format_RGB888)
Qthermal_frame=QImage(thermal_frame.data,thermal_frame.shape[1],thermal_frame.shape[0],thermal_frame.shape[1]*3,QImage.Format_RGB888)
pixMap=QPixmap.fromImage(Qframe)
thermal_pixMap=QPixmap.fromImage(Qthermal_frame)
self.label.setPixmap(pixMap)
self.label_2.setPixmap(thermal_pixMap)
self.update()
def closeEvent(self,event):
# 關閉程式後執行
self.thermal.stop()
self.camera.stop()
self.heater.stop()
def max_temperature(self,box,thermal_row,black_h=0,thermal_height=62,thermal_width=80,image_height=480,image_width=640):
scale_ratio_h = thermal_height/image_height
scale_ratio_w = thermal_width/image_width
x1 = max(int(box[0]*scale_ratio_w),0)
y1 = max(int(box[1]*scale_ratio_h),black_h)
x2 = min(int(box[2]*scale_ratio_w),thermal_width)
y2 = min(int(box[3]*scale_ratio_h),thermal_height)
box_temperature = thermal_row[y1:y2,x1:x2]
if(box_temperature.size != 0):
return box_temperature.max()
else:
return 0
if __name__ == '__main__':
import sys
import os
app = QtWidgets.QApplication(sys.argv)
window = Main()
window.show()
status = app.exec_()
time.sleep(1)
# sys.exit(app.exec_())
os._exit(status)
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1069, 615)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setAutoFillBackground(False)
MainWindow.setStyleSheet("")
self.centralwidget = QtWidgets.QWidget(MainWindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setStyleSheet("")
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout_2.setObjectName("gridLayout_2")
self.gridWidget = QtWidgets.QWidget(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.gridWidget.sizePolicy().hasHeightForWidth())
self.gridWidget.setSizePolicy(sizePolicy)
self.gridWidget.setObjectName("gridWidget")
self.gridLayout = QtWidgets.QGridLayout(self.gridWidget)
self.gridLayout.setObjectName("gridLayout")
self.title = QtWidgets.QLabel(self.gridWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.title.sizePolicy().hasHeightForWidth())
self.title.setSizePolicy(sizePolicy)
self.title.setMinimumSize(QtCore.QSize(528, 0))
font = QtGui.QFont()
font.setFamily("標楷體")
font.setPointSize(36)
self.title.setFont(font)
self.title.setStyleSheet("")
self.title.setAlignment(QtCore.Qt.AlignCenter)
self.title.setObjectName("title")
self.gridLayout.addWidget(self.title, 0, 0, 1, 2)
self.label_2 = QtWidgets.QLabel(self.gridWidget)
font = QtGui.QFont()
font.setFamily("標楷體")
font.setPointSize(20)
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 2, 1, 1, 1)
self.heigh_face = QtWidgets.QHBoxLayout()
self.heigh_face.setObjectName("heigh_face")
self.heigh_face1 = QtWidgets.QLabel(self.gridWidget)
self.heigh_face1.setText("")
self.heigh_face1.setObjectName("heigh_face1")
self.heigh_face.addWidget(self.heigh_face1)
self.heigh_face2 = QtWidgets.QLabel(self.gridWidget)
self.heigh_face2.setText("")
self.heigh_face2.setObjectName("heigh_face2")
self.heigh_face.addWidget(self.heigh_face2)
self.heigh_face3 = QtWidgets.QLabel(self.gridWidget)
self.heigh_face3.setText("")
self.heigh_face3.setObjectName("heigh_face3")
self.heigh_face.addWidget(self.heigh_face3)
self.gridLayout.addLayout(self.heigh_face, 3, 1, 1, 1)
self.label = QtWidgets.QLabel(self.gridWidget)
font = QtGui.QFont()
font.setFamily("標楷體")
font.setPointSize(20)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 2, 0, 1, 1)
self.frame = QtWidgets.QLabel(self.gridWidget)
self.frame.setStyleSheet("")
self.frame.setText("")
self.frame.setObjectName("frame")
self.gridLayout.addWidget(self.frame, 1, 0, 1, 1)
self.normal_face = QtWidgets.QHBoxLayout()
self.normal_face.setObjectName("normal_face")
self.normal_face1 = QtWidgets.QLabel(self.gridWidget)
self.normal_face1.setText("")
self.normal_face1.setObjectName("normal_face1")
self.normal_face.addWidget(self.normal_face1)
self.normal_face2 = QtWidgets.QLabel(self.gridWidget)
self.normal_face2.setText("")
self.normal_face2.setObjectName("normal_face2")
self.normal_face.addWidget(self.normal_face2)
self.normal_face3 = QtWidgets.QLabel(self.gridWidget)
self.normal_face3.setText("")
self.normal_face3.setObjectName("normal_face3")
self.normal_face.addWidget(self.normal_face3)
self.gridLayout.addLayout(self.normal_face, 3, 0, 1, 1)
self.thermal_frame = QtWidgets.QLabel(self.gridWidget)
self.thermal_frame.setStyleSheet("")
self.thermal_frame.setText("")
self.thermal_frame.setObjectName("thermal_frame")
self.gridLayout.addWidget(self.thermal_frame, 1, 1, 1, 1)
self.normal_temp = QtWidgets.QHBoxLayout()
self.normal_temp.setObjectName("normal_temp")
self.normal_temp1 = QtWidgets.QLabel(self.gridWidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(20)
self.normal_temp1.setFont(font)
self.normal_temp1.setText("")
self.normal_temp1.setAlignment(QtCore.Qt.AlignCenter)
self.normal_temp1.setObjectName("normal_temp1")
self.normal_temp.addWidget(self.normal_temp1)
self.normal_temp2 = QtWidgets.QLabel(self.gridWidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(20)
self.normal_temp2.setFont(font)
self.normal_temp2.setText("")
self.normal_temp2.setAlignment(QtCore.Qt.AlignCenter)
self.normal_temp2.setObjectName("normal_temp2")
self.normal_temp.addWidget(self.normal_temp2)
self.normal_temp3 = QtWidgets.QLabel(self.gridWidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(20)
self.normal_temp3.setFont(font)
self.normal_temp3.setText("")
self.normal_temp3.setAlignment(QtCore.Qt.AlignCenter)
self.normal_temp3.setObjectName("normal_temp3")
self.normal_temp.addWidget(self.normal_temp3)
self.gridLayout.addLayout(self.normal_temp, 4, 0, 1, 1)
self.heigh_temp = QtWidgets.QHBoxLayout()
self.heigh_temp.setObjectName("heigh_temp")
self.heigh_temp1 = QtWidgets.QLabel(self.gridWidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(20)
self.heigh_temp1.setFont(font)
self.heigh_temp1.setText("")
self.heigh_temp1.setAlignment(QtCore.Qt.AlignCenter)
self.heigh_temp1.setObjectName("heigh_temp1")
self.heigh_temp.addWidget(self.heigh_temp1)
self.heigh_temp2 = QtWidgets.QLabel(self.gridWidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(20)
self.heigh_temp2.setFont(font)
self.heigh_temp2.setText("")
self.heigh_temp2.setAlignment(QtCore.Qt.AlignCenter)
self.heigh_temp2.setObjectName("heigh_temp2")
self.heigh_temp.addWidget(self.heigh_temp2)
self.heigh_temp3 = QtWidgets.QLabel(self.gridWidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(20)
self.heigh_temp3.setFont(font)
self.heigh_temp3.setText("")
self.heigh_temp3.setAlignment(QtCore.Qt.AlignCenter)
self.heigh_temp3.setObjectName("heigh_temp3")
self.heigh_temp.addWidget(self.heigh_temp3)
self.gridLayout.addLayout(self.heigh_temp, 4, 1, 1, 1)
self.gridLayout.setRowStretch(0, 1)
self.gridLayout.setRowStretch(1, 8)
self.gridLayout.setRowStretch(2, 1)
self.gridLayout.setRowStretch(3, 3)
self.gridLayout_2.addWidget(self.gridWidget, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1069, 21))
self.menubar.setObjectName("menubar")
self.menu = QtWidgets.QMenu(self.menubar)
self.menu.setObjectName("menu")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.action = QtWidgets.QAction(MainWindow)
self.action.setObjectName("action")
self.menu.addAction(self.action)
self.menubar.addAction(self.menu.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.title.setText(_translate("MainWindow", "AIGuardian人臉測溫系統"))
self.label_2.setText(_translate("MainWindow", "溫度異常"))
self.label.setText(_translate("MainWindow", "溫度正常"))
self.menu.setTitle(_translate("MainWindow", "選單"))
self.action.setText(_translate("MainWindow", "雙相機校正"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1069, 615)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setAutoFillBackground(False)
MainWindow.setStyleSheet("")
self.centralwidget = QtWidgets.QWidget(MainWindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setStyleSheet("")
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout_2.setObjectName("gridLayout_2")
self.gridWidget = QtWidgets.QWidget(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.gridWidget.sizePolicy().hasHeightForWidth())
self.gridWidget.setSizePolicy(sizePolicy)
self.gridWidget.setObjectName("gridWidget")
self.gridLayout = QtWidgets.QGridLayout(self.gridWidget)
self.gridLayout.setObjectName("gridLayout")
self.normal_face = QtWidgets.QHBoxLayout()
self.normal_face.setObjectName("normal_face")
self.normal_face1 = QtWidgets.QLabel(self.gridWidget)
self.normal_face1.setText("")
self.normal_face1.setObjectName("normal_face1")
self.normal_face.addWidget(self.normal_face1)
self.normal_face2 = QtWidgets.QLabel(self.gridWidget)
self.normal_face2.setText("")
self.normal_face2.setObjectName("normal_face2")
self.normal_face.addWidget(self.normal_face2)
self.normal_face3 = QtWidgets.QLabel(self.gridWidget)
self.normal_face3.setText("")
self.normal_face3.setObjectName("normal_face3")
self.normal_face.addWidget(self.normal_face3)
self.gridLayout.addLayout(self.normal_face, 3, 0, 1, 1)
self.frame = QtWidgets.QLabel(self.gridWidget)
self.frame.setStyleSheet("")
self.frame.setText("")
self.frame.setObjectName("frame")
self.gridLayout.addWidget(self.frame, 1, 0, 1, 1)
self.thermal_frame = QtWidgets.QLabel(self.gridWidget)
self.thermal_frame.setStyleSheet("")
self.thermal_frame.setText("")
self.thermal_frame.setObjectName("thermal_frame")
self.gridLayout.addWidget(self.thermal_frame, 1, 1, 1, 1)
self.heigh_face = QtWidgets.QHBoxLayout()
self.heigh_face.setObjectName("heigh_face")
self.heigh_face1 = QtWidgets.QLabel(self.gridWidget)
self.heigh_face1.setText("")
self.heigh_face1.setObjectName("heigh_face1")
self.heigh_face.addWidget(self.heigh_face1)
self.heigh_face2 = QtWidgets.QLabel(self.gridWidget)
self.heigh_face2.setText("")
self.heigh_face2.setObjectName("heigh_face2")
self.heigh_face.addWidget(self.heigh_face2)
self.heigh_face3 = QtWidgets.QLabel(self.gridWidget)
self.heigh_face3.setText("")
self.heigh_face3.setObjectName("heigh_face3")
self.heigh_face.addWidget(self.heigh_face3)
self.gridLayout.addLayout(self.heigh_face, 3, 1, 1, 1)
self.label_2 = QtWidgets.QLabel(self.gridWidget)
font = QtGui.QFont()
font.setFamily("標楷體")
font.setPointSize(20)
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 2, 1, 1, 1)
self.label = QtWidgets.QLabel(self.gridWidget)
font = QtGui.QFont()
font.setFamily("標楷體")
font.setPointSize(20)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 2, 0, 1, 1)
self.title = QtWidgets.QLabel(self.gridWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.title.sizePolicy().hasHeightForWidth())
self.title.setSizePolicy(sizePolicy)
self.title.setMinimumSize(QtCore.QSize(528, 0))
font = QtGui.QFont()
font.setFamily("標楷體")
font.setPointSize(36)
self.title.setFont(font)
self.title.setStyleSheet("")
self.title.setAlignment(QtCore.Qt.AlignCenter)
self.title.setObjectName("title")
self.gridLayout.addWidget(self.title, 0, 0, 1, 2)
self.gridLayout.setRowStretch(0, 1)
self.gridLayout.setRowStretch(1, 8)
self.gridLayout.setRowStretch(2, 1)
self.gridLayout.setRowStretch(3, 3)
self.gridLayout_2.addWidget(self.gridWidget, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1069, 21))
self.menubar.setObjectName("menubar")
self.menu = QtWidgets.QMenu(self.menubar)
self.menu.setObjectName("menu")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.menubar.addAction(self.menu.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label_2.setText(_translate("MainWindow", "溫度異常"))
self.label.setText(_translate("MainWindow", "溫度正常"))
self.title.setText(_translate("MainWindow", "AIGuardian人臉測溫系統"))
self.menu.setTitle(_translate("MainWindow", "選單"))
#!/usr/bin/env python
# coding: utf-8
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import Ui_MainWindow as ui
from uvc_thermal import Thermal
from camera import Camera
from detector import FaceDetector
from anti import AntiSpoofing
from heater import Heater
from aligner import BoxAligner
from correcter import Correcter
import cv2
import numpy as np
import time
class Main(QMainWindow, ui.Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.resize(640,480)
self.label.setScaledContents(True)
self.label_2.setScaledContents(True)
#self.thermal = Thermal(width=160, height=120, framerate=9, frame_width=160, frame_height=120, log=None)
self.camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=1)
#self.thermal.start()
time.sleep(2)
self.camera.start()
time.sleep(2)
self.num_frame = 0
self.painter = QPainter(self)
def paintEvent(self, QPaintEvent):
# 繪圖事件
ret, frame = self.camera.capture()
#thermal_ret,thermal_frame, thermal_row = self.thermal.capture()
thermal_ret = True
if(thermal_ret and ret):
self.num_frame+=1
# 顯示至UI
frame=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
#thermal_frame=cv2.cvtColor(thermal_frame,cv2.COLOR_BGR2RGB)
Qframe=QImage(frame.data,frame.shape[1],frame.shape[0],frame.shape[1]*3,QImage.Format_RGB888)
#Qthermal_frame=QImage(thermal_frame.data,thermal_frame.shape[1],thermal_frame.shape[0],thermal_frame.shape[1]*3,QImage.Format_RGB888)
pixMap=QPixmap.fromImage(Qframe)
#thermal_pixMap=QPixmap.fromImage(Qthermal_frame)
self.label.setPixmap(pixMap)
#self.label_2.setPixmap(thermal_pixMap)
self.update()
def closeEvent(self,event):
# 關閉程式後執行
#self.thermal.stop()
self.camera.stop()
if __name__ == '__main__':
import sys
import os
app = QtWidgets.QApplication(sys.argv)
window = Main()
window.show()
status = app.exec_()
time.sleep(1)
# sys.exit(app.exec_())
os._exit(status)
......@@ -6,7 +6,7 @@ from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import Ui_MainWindow as ui
import NewUi_MainWindow as ui
from uvc_thermal import Thermal
from camera import Camera
......@@ -24,15 +24,34 @@ class Main(QMainWindow, ui.Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.resize(640,480)
self.label.setScaledContents(True)
self.label_2.setScaledContents(True)
#self.resize(640,480)
self.showFullScreen()
# 設置自動填滿大小
self.frame.setScaledContents(True)
self.thermal_frame.setScaledContents(True)
self.normal_face1.setScaledContents(True)
self.normal_face2.setScaledContents(True)
self.normal_face3.setScaledContents(True)
self.heigh_face1.setScaledContents(True)
self.heigh_face2.setScaledContents(True)
self.heigh_face3.setScaledContents(True)
# 顯示人臉
self.normal_faces = [self.normal_face1, self.normal_face2 ,self.normal_face3]
self.normal_temps = [self.normal_temp1, self.normal_temp2 ,self.normal_temp3]
self.heigh_faces = [self.heigh_face1, self.heigh_face2, self.heigh_face3]
self.heigh_temps = [self.heigh_temp1, self.heigh_temp2, self.heigh_temp3]
self.normal_face_count = 0
self.heigh_face_count = 0
self.thermal = Thermal(width=160, height=120, framerate=9, frame_width=160, frame_height=120, log=None)
self.camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=1)
self.detector = FaceDetector(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/version-RFB/RFB-320.mnn" , input_size=(320,240))
self.camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=None)
self.detector = FaceDetector(model_path="./models/version-RFB/RFB-320.mnn" , input_size=(320,240))
# self.anti = AntiSpoofing(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/4_0_0_80x80_MiniFASNetV1SE.mnn")
self.aligner = BoxAligner(img_h = 480,img_w = 640)
self.heater = Heater(pwm_pin=12 , target_temp=35.0)
self.heater = Heater(pwm_pin=12 , target_temp=36.0)
self.correcter = Correcter(model_path="../thermal-tk/20210421_correcter.sav")
self.thermal.start()
time.sleep(2)
......@@ -42,6 +61,11 @@ class Main(QMainWindow, ui.Ui_MainWindow):
time.sleep(2)
self.num_frame = 0
self.painter = QPainter(self)
self.alert_frequency = 1 # s
self.last_alert = time.time()
# self.multiTracker = cv2.MultiTracker_create()
# self.success = False
def paintEvent(self, QPaintEvent):
# 繪圖事件
ret, frame = self.camera.capture()
......@@ -50,13 +74,38 @@ class Main(QMainWindow, ui.Ui_MainWindow):
self.num_frame+=1
blackbody_max = thermal_row[:,:34].max()
heater_temp = self.heater.last_temp
#heater_temp = 40.0
# 偵測人臉
# if(self.num_frame % 2 == 0):
alert_image = False
if(time.time() - self.last_alert > self.alert_frequency):
alert_image = True
self.last_alert = time.time()
if(True):
stime = time.time()
boxes, labels, probs = self.detector.predict(frame)
processing_time = time.time()-stime
self.label_3.setText(str(processing_time))
# for i in range(boxes.shape[0]):
# box = boxes[i, :]
# x1 = max(box[0],0)
# y1 = max(box[1],0)
# x2 = min(box[2],frame.shape[1])
# y2 = min(box[3],frame.shape[0])
# self.success, bb = self.multiTracker.update(frame)
# print(self.success, bb)
# if(len(bb) == 0):
# self.success = False
# if(self.success):
# # draw tracked objects
# for i, newbox in enumerate(bb):
# p1 = (int(newbox[0]), int(newbox[1]))
# p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
# cv2.rectangle(frame, p1, p2, (0,0,0), 2, -1)
# 取出所有偵測的結果
for i in range(boxes.shape[0]):
box = boxes[i, :]
......@@ -71,12 +120,43 @@ class Main(QMainWindow, ui.Ui_MainWindow):
else:
color = ( 0, 0, 255)
thermal_box = self.aligner.box_aligment([box])[0]
x1 = max(box[0],0)
y1 = max(box[1],0)
x2 = min(box[2],frame.shape[1])
y2 = min(box[3],frame.shape[0])
# if(not self.success):
# self.multiTracker.add(cv2.TrackerTLD(), frame, (x1,y1,x2 - x1,y2 - y1))
face_frame = frame[y1:y2,x1:x2].copy()
face_frame=cv2.cvtColor(face_frame,cv2.COLOR_BGR2RGB)
Qface_frame=QImage(face_frame.data,face_frame.shape[1],face_frame.shape[0],face_frame.shape[1]*3,QImage.Format_RGB888)
face_pixMap=QPixmap.fromImage(Qface_frame)
cv2.rectangle(thermal_frame, (thermal_box[0]//4, thermal_box[1]//4), (thermal_box[2]//4, thermal_box[3]//4), color, 2)
cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), color, 2)
cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
face_temp = self.max_temperature(thermal_box,thermal_row,black_h=32)
corrected_face_temp = self.correcter.predict(x_detect_k= face_temp, x_black_k= blackbody_max, x_black_c= heater_temp)
cv2.putText(frame, "{:.2f}->{:.2f}".format(face_temp*0.0092,corrected_face_temp), ((box[0]+box[2])//2, (box[1]+box[3])//2), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 255, 255), 1, cv2.LINE_AA)
#cv2.putText(frame, "{:.2f}->{:.2f}".format(face_temp*0.0092,corrected_face_temp), ((box[0]+box[2])//2, (box[1]+box[3])//2), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 255, 255), 1, cv2.LINE_AA)
cv2.putText(frame, "{:.2f}".format(corrected_face_temp), ((box[0]+box[2])//2, (box[1]+box[3])//2), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 255, 255), 1, cv2.LINE_AA)
cv2.putText(thermal_frame, "{:.2f}".format(heater_temp), (320//4, 64//4), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 0, 0), 1, cv2.LINE_AA)
# 判斷溫度是否過高
if(alert_image):
if(corrected_face_temp < 37.0):
self.normal_face_count+=1
index = self.normal_face_count % 3
self.normal_faces[index].setPixmap(face_pixMap)
self.normal_temps[index].setText("{:.2f}".format(corrected_face_temp))
else:
self.heigh_face_count+=1
index = self.heigh_face_count % 3
self.heigh_faces[index].setPixmap(face_pixMap)
self.heigh_temps[index].setText("{:.2f}".format(corrected_face_temp))
# 顯示至UI
frame=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
thermal_frame=cv2.cvtColor(thermal_frame,cv2.COLOR_BGR2RGB)
......@@ -84,8 +164,10 @@ class Main(QMainWindow, ui.Ui_MainWindow):
Qthermal_frame=QImage(thermal_frame.data,thermal_frame.shape[1],thermal_frame.shape[0],thermal_frame.shape[1]*3,QImage.Format_RGB888)
pixMap=QPixmap.fromImage(Qframe)
thermal_pixMap=QPixmap.fromImage(Qthermal_frame)
self.label.setPixmap(pixMap)
self.label_2.setPixmap(thermal_pixMap)
self.frame.setPixmap(pixMap)
self.thermal_frame.setPixmap(thermal_pixMap)
self.update()
def closeEvent(self,event):
# 關閉程式後執行
......
#!/usr/bin/env python
# coding: utf-8
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import NewUi_MainWindow as ui
from uvc_thermal import Thermal
from camera import Camera
from detector import FaceDetector
from anti import AntiSpoofing
from heater import Heater
from aligner import BoxAligner
from correcter import Correcter
from tracker import Tracker
import cv2
import numpy as np
import time
class Main(QMainWindow, ui.Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
#self.resize(640,480)
self.showFullScreen()
# 設置自動填滿大小
self.frame.setScaledContents(True)
self.thermal_frame.setScaledContents(True)
self.normal_face1.setScaledContents(True)
self.normal_face2.setScaledContents(True)
self.normal_face3.setScaledContents(True)
self.heigh_face1.setScaledContents(True)
self.heigh_face2.setScaledContents(True)
self.heigh_face3.setScaledContents(True)
# 顯示人臉
self.normal_faces = [self.normal_face1, self.normal_face2 ,self.normal_face3]
self.heigh_faces = [self.heigh_face1, self.heigh_face2, self.heigh_face3]
self.normal_face_count = 0
self.heigh_face_count = 0
# 載入相關元件
self.thermal = Thermal(width=160, height=120, framerate=9, frame_width=160, frame_height=120, log=None)
self.camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=None)
self.detector = FaceDetector(model_path="./models/version-RFB/RFB-320.mnn" , input_size=(320,240))
# self.anti = AntiSpoofing(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/4_0_0_80x80_MiniFASNetV1SE.mnn")
self.aligner = BoxAligner(img_h = 480,img_w = 640)
self.heater = Heater(pwm_pin=12 , target_temp=40.0)
self.correcter = Correcter(model_path="../thermal-tk/20210421_correcter.sav")
# Create Object Tracker
self.tracker = Tracker(50, 10, 5, 0)
# Variables initialization
self.skip_frame_count = 0
self.track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
(0, 255, 255), (255, 0, 255), (255, 127, 255),
(127, 0, 255), (127, 0, 127)]
self.last_tracker_id = []
self.thermal.start()
time.sleep(2)
self.camera.start()
time.sleep(2)
self.heater.start()
time.sleep(2)
self.num_frame = 0
self.painter = QPainter(self)
# self.multiTracker = cv2.MultiTracker_create()
# self.success = False
def paintEvent(self, QPaintEvent):
# 繪圖事件
ret, frame = self.camera.capture()
thermal_ret,thermal_frame, thermal_row = self.thermal.capture()
if(thermal_ret and ret):
self.num_frame+=1
blackbody_max = thermal_row[:,:34].max()
heater_temp = self.heater.last_temp
#heater_temp = 40.0
# 偵測人臉
# if(self.num_frame % 2 == 0):
if(True):
stime = time.time()
boxes, labels, probs = self.detector.predict(frame)
processing_time = time.time()-stime
# 取出所有偵測的結果 並 加入追蹤
centers = []
shapes = []
temperatures = []
temperature_locs = []
for i in range(boxes.shape[0]):
box = boxes[i, :]
score = probs[i]
if(score > 0.9):
thermal_box = self.aligner.box_aligment([box])[0]
x1 = max(box[0],0)
y1 = max(box[1],0)
x2 = min(box[2],frame.shape[1])
y2 = min(box[3],frame.shape[0])
centers.append([(x1+x2)/2,(y1+y2)/2])
shapes.append([(y2 - y1),(x2 - x1)]) # h,w
face_temp = self.max_temperature(thermal_box,thermal_row,black_h=32)
corrected_face_temp = self.correcter.predict(x_detect_k= face_temp, x_black_k= blackbody_max, x_black_c= heater_temp)
#temperatures.append("{:.2f}->{:.2f}".format(face_temp*0.0092,corrected_face_temp))
temperatures.append(corrected_face_temp)
temperature_locs.append(thermal_box)
if (len(centers) > 0):
# Track object using Kalman Filter
self.tracker.Update(centers, shapes, temperatures, temperature_locs)
# For identified object tracks draw tracking line
# Use various colors to indicate different track_id
temp_tracker_id = []
for i in range(len(self.tracker.tracks)):
if (len(self.tracker.tracks[i].trace) > 1):
cx,cy = self.tracker.tracks[i].prediction[0]
tid = self.tracker.tracks[i].track_id
tx1 = cx - (self.tracker.tracks[0].w)/2
ty1 = cy - (self.tracker.tracks[0].h)/2
tx2 = cx + (self.tracker.tracks[0].w)/2
ty2 = cy + (self.tracker.tracks[0].h)/2
thermal_box = self.aligner.box_aligment([[tx1,ty1,tx2,ty2]])[0]
#cv2.rectangle(frame, (int(tx1), int(ty1)), (int(tx2), int(ty2)), track_colors[clr], 2)
color = (0, 255, 0)
face_frame = frame[y1:y2,x1:x2].copy()
face_frame=cv2.cvtColor(face_frame,cv2.COLOR_BGR2RGB)
Qface_frame=QImage(face_frame.data,face_frame.shape[1],face_frame.shape[0],face_frame.shape[1]*3,QImage.Format_RGB888)
face_pixMap=QPixmap.fromImage(Qface_frame)
cv2.rectangle(thermal_frame, (thermal_box[0]//4, thermal_box[1]//4), (thermal_box[2]//4, thermal_box[3]//4), color, 2)
cv2.rectangle(frame, (int(tx1), int(ty1)), (int(tx2), int(ty2)), color, 2)
cv2.putText(frame, "{:.2f}".format(self.tracker.tracks[i].temp), (int(cx),int(cy)), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 255, 255), 1, cv2.LINE_AA)
# 判斷溫度是否過高
temp_tracker_id.append(self.tracker.tracks[i].track_id)
if(not self.tracker.tracks[i].track_id in self.last_tracker_id):
if(self.tracker.tracks[i].temp < 37.0):
self.normal_face_count+=1
index = self.normal_face_count % 3
self.normal_faces[index].setPixmap(face_pixMap)
else:
self.heigh_face_count+=1
index = self.heigh_face_count % 3
self.heigh_faces[index].setPixmap(face_pixMap)
self.last_tracker_id = temp_tracker_id
# 加熱片溫度顯示
cv2.putText(thermal_frame, "{:.2f}".format(heater_temp), (320//4, 64//4), cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 0, 0), 1, cv2.LINE_AA)
# 顯示至UI
frame=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
thermal_frame=cv2.cvtColor(thermal_frame,cv2.COLOR_BGR2RGB)
Qframe=QImage(frame.data,frame.shape[1],frame.shape[0],frame.shape[1]*3,QImage.Format_RGB888)
Qthermal_frame=QImage(thermal_frame.data,thermal_frame.shape[1],thermal_frame.shape[0],thermal_frame.shape[1]*3,QImage.Format_RGB888)
pixMap=QPixmap.fromImage(Qframe)
thermal_pixMap=QPixmap.fromImage(Qthermal_frame)
self.frame.setPixmap(pixMap)
self.thermal_frame.setPixmap(thermal_pixMap)
self.update()
def closeEvent(self,event):
# 關閉程式後執行
self.thermal.stop()
self.camera.stop()
self.heater.stop()
def max_temperature(self,box,thermal_row,black_h=0,thermal_height=120,thermal_width=160,image_height=480,image_width=640):
scale_ratio_h = thermal_height/image_height
scale_ratio_w = thermal_width/image_width
x1 = max(int(box[0]*scale_ratio_w),0)
y1 = max(int(box[1]*scale_ratio_h),black_h)
x2 = min(int(box[2]*scale_ratio_w),thermal_width)
y2 = min(int(box[3]*scale_ratio_h),thermal_height)
box_temperature = thermal_row[y1:y2,x1:x2]
if(box_temperature.size != 0):
return box_temperature.max()
else:
return 0
if __name__ == '__main__':
import sys
import os
app = QtWidgets.QApplication(sys.argv)
window = Main()
window.show()
status = app.exec_()
time.sleep(1)
# sys.exit(app.exec_())
os._exit(status)
......@@ -31,7 +31,7 @@ class Main(QMainWindow, ui.Ui_MainWindow):
self.rgb_frame.setScaledContents(True)
self.thermal_frame.setScaledContents(True)
self.thermal = Thermal(width=160, height=120, framerate=9, frame_width=160, frame_height=120, log=None)
self.camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=1)
self.camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=None)
self.detector = FaceDetector(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/version-RFB/RFB-320.mnn" , input_size=(320,240))
# self.detector = FaceMaskDetector(model_path="/home/allen/RetinaFaceMask/mnn_models/RetinaFaceMask_mobileNet0.25.mnn" , input_size=(640,640))
# self.anti = AntiSpoofing(model_path="../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/4_0_0_80x80_MiniFASNetV1SE.mnn")
......@@ -64,6 +64,7 @@ class Main(QMainWindow, ui.Ui_MainWindow):
if(thermal_ret and ret):
blackbody_max = thermal_row[:,:self.black_line].max()
heater_temp = self.heater.last_temp
heater_temp = 36.5
# 偵測人臉
str_time = self.osd.str_time
boxes, labels, probs = self.detector.predict(frame)
......
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "amber-cookbook",
"metadata": {},
"outputs": [],
"source": [
"import cv2"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "stuck-origin",
"metadata": {},
"outputs": [],
"source": [
"tracker = cv2.TrackerGOTURN_create()"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "associate-learning",
"metadata": {},
"outputs": [],
"source": [
"# Create MultiTracker object\n",
"multiTracker = cv2.MultiTracker_create()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "naked-german",
"metadata": {},
"outputs": [],
"source": [
"r,f = multiTracker.update(None)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "innovative-burns",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"0"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"len(f)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "authentic-disclosure",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
......@@ -5,9 +5,20 @@ class BoxAligner:
def __init__(self,img_h,img_w):
self.img_h = img_h
self.img_w = img_w
self.M = np.array([[ 8.13802980e-01, -2.63523694e-02, 9.30324875e+01],
[ 2.10292692e-02, 7.84319221e-01, 7.70246127e+01],
[ 1.48500793e-04, -1.53618915e-04, 1.00000000e+00]])
# 120*160 usb theraml
# self.M = np.array([[ 8.13802980e-01, -2.63523694e-02, 9.30324875e+01],
# [ 2.10292692e-02, 7.84319221e-01, 7.70246127e+01],
# [ 1.48500793e-04, -1.53618915e-04, 1.00000000e+00]])
self.M = np.array([[ 9.71130799e-01, 1.34161454e-02, 1.56875961e-01],
[-2.63476275e-03, 9.22184449e-01, 5.15727605e-01],
[-3.24243700e-05, -1.80459430e-04, 1.00000000e+00]])
# IRM 80*62
# self.M = np.array([[ 9.85839634e-01, 7.87864361e-01, -1.84091518e+02],
# [-2.12006655e-02, 2.12582929e+00, -2.34772300e+02],
# [-1.22150683e-03, 1.38217520e-03, 1.00000000e+00]])
def box_aligment(self,boxes):
aliged_boxes = []
for b in boxes:
......@@ -21,4 +32,4 @@ class BoxAligner:
return aliged_boxes
def get_omography_matrix(self,rgb_points, thermal_points):
h, mask = cv2.findHomography(rgb_points, thermal_points, cv2.RANSAC)
self.M = h
\ No newline at end of file
self.M = h
......@@ -37,7 +37,7 @@ class Camera(object):
if(self.flip is not None):
self.__frame = cv2.flip(frame, self.flip)
else:
self.__frame = frame, 1
self.__frame = frame
self.__isCaptured = ret
time.sleep(1 / fps)
dev.release()
......
'''
File name : common.py
File Description : Common debug functions
Author : Srini Ananthakrishnan
Date created : 07/14/2017
Date last modified: 07/14/2017
Python Version : 2.7
'''
def dprint(*args, **kwargs):
"""Debug print function using inbuilt print
Args:
args : variable number of arguments
kwargs : variable number of keyword argument
Return:
None.
"""
# print(*args, **kwargs)
pass
......@@ -63,10 +63,12 @@ class Correcter(object):
return clf
def predict(self,x_detect_k,x_black_k,x_black_c):
# return x_black_c - self.clf.predict([[x_detect_k,x_black_k]])[0]
return self.clf.predict([[x_detect_k,x_black_k,x_black_c]])[0]
# SVR公式
return 0.0092*x_detect_k
#return self.clf.predict([[x_detect_k,x_black_k,x_black_c]])[0]
# a = 1.6806e-05
# b = -0.1183
# c = 238.9117
# p_black_c = a*pow(x_black_k,2)+b*x_black_k+c
# indiv = x_black_c - p_black_c
# return a*pow(x_detect_k,2)+b*x_detect_k+c+indiv
\ No newline at end of file
# return a*pow(x_detect_k,2)+b*x_detect_k+c+indiv
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "timely-alliance",
"metadata": {},
"outputs": [],
"source": [
"from camera import Camera\n",
"from detector import FaceDetector\n",
"\n",
"import cv2\n",
"import numpy as np\n",
"import time\n",
"import copy\n",
"\n",
"from tracker import Tracker"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "golden-strip",
"metadata": {},
"outputs": [],
"source": [
"camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=None)\n",
"detector = FaceDetector(model_path=\"./models/version-RFB/RFB-320.mnn\" , input_size=(320,240))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "correct-region",
"metadata": {},
"outputs": [],
"source": [
"camera.start()\n",
"time.sleep(2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "clear-rally",
"metadata": {},
"outputs": [],
"source": [
"# Create Object Tracker\n",
"tracker = Tracker(160, 30, 5, 100)\n",
"\n",
"# Variables initialization\n",
"skip_frame_count = 0\n",
"track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),\n",
" (0, 255, 255), (255, 0, 255), (255, 127, 255),\n",
" (127, 0, 255), (127, 0, 127)]\n",
"pause = False"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "premium-device",
"metadata": {},
"outputs": [],
"source": [
"\n",
"\n",
"# Infinite loop to process video frames\n",
"while(True):\n",
" # Capture frame-by-frame\n",
" ret, frame = camera.capture()\n",
"\n",
" # Make copy of original frame\n",
" orig_frame = copy.copy(frame)\n",
"\n",
" # Skip initial frames that display logo\n",
" if (skip_frame_count < 15):\n",
" skip_frame_count += 1\n",
" continue\n",
"\n",
" # Detect and return centeroids of the objects in the frame\n",
" #centers = detector.Detect(frame)\n",
" boxes, labels, probs = detector.predict(frame)\n",
"\n",
" # If centroids are detected then track them\n",
" if (len(centers) > 0):\n",
"\n",
" # Track object using Kalman Filter\n",
" tracker.Update(centers)\n",
"\n",
" # For identified object tracks draw tracking line\n",
" # Use various colors to indicate different track_id\n",
" for i in range(len(tracker.tracks)):\n",
" if (len(tracker.tracks[i].trace) > 1):\n",
" for j in range(len(tracker.tracks[i].trace)-1):\n",
" # Draw trace line\n",
" x1 = tracker.tracks[i].trace[j][0][0]\n",
" y1 = tracker.tracks[i].trace[j][1][0]\n",
" x2 = tracker.tracks[i].trace[j+1][0][0]\n",
" y2 = tracker.tracks[i].trace[j+1][1][0]\n",
" clr = tracker.tracks[i].track_id % 9\n",
" cv2.line(frame, (int(x1), int(y1)), (int(x2), int(y2)),\n",
" track_colors[clr], 2)\n",
"\n",
" # Display the resulting tracking frame\n",
" cv2.imshow('Tracking', frame)\n",
"\n",
" # Display the original frame\n",
" cv2.imshow('Original', orig_frame)\n",
"\n",
" # Slower the FPS\n",
" cv2.waitKey(50)\n",
"\n",
" # Check for key strokes\n",
" k = cv2.waitKey(50) & 0xff\n",
" if k == 27: # 'esc' key has been pressed, exit program.\n",
" break\n",
" if k == 112: # 'p' has been pressed. this will pause/resume the code.\n",
" pause = not pause\n",
" if (pause is True):\n",
" print(\"Code is paused. Press 'p' to resume..\")\n",
" while (pause is True):\n",
" # stay in this loop until\n",
" key = cv2.waitKey(30) & 0xff\n",
" if key == 112:\n",
" pause = False\n",
" print(\"Resume code..!!\")\n",
" break\n",
"\n",
"# When everything done, release the capture\n",
"cap.release()\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "intense-incident",
"metadata": {},
"outputs": [],
"source": [
"# Infinite loop to process video frames\n",
"while(True):\n",
" # Capture frame-by-frame\n",
" ret, frame = camera.capture()\n",
"\n",
" # Make copy of original frame\n",
" orig_frame = copy.copy(frame)\n",
"\n",
" # Skip initial frames that display logo\n",
" if (skip_frame_count < 15):\n",
" skip_frame_count += 1\n",
" continue\n",
"\n",
" # Detect and return centeroids of the objects in the frame\n",
" #centers = detector.Detect(frame)\n",
" boxes, labels, probs = detector.predict(frame)\n",
" centers = []\n",
" shapes = []\n",
" temperatures = []\n",
" temperature_locs = []\n",
" for i in range(boxes.shape[0]):\n",
" box = boxes[i, :]\n",
" score = probs[i]\n",
" if(score > 0.9):\n",
" x1 = max(box[0],0)\n",
" y1 = max(box[1],0)\n",
" x2 = min(box[2],frame.shape[1])\n",
" y2 = min(box[3],frame.shape[0])\n",
" centers.append([(x1+x2)/2,(y1+y2)/2])\n",
" shapes.append([(y2 - y1),(x2 - x1)]) # w,h\n",
" temperatures.append(35.5)\n",
" temperature_locs.append(33)\n",
" \n",
" \n",
" # If centroids are detected then track them\n",
" if (len(centers) > 0):\n",
"\n",
" # Track object using Kalman Filter\n",
" tracker.Update(centers, shapes, temperatures, temperature_locs)\n",
"\n",
" # For identified object tracks draw tracking line\n",
" # Use various colors to indicate different track_id\n",
" for i in range(len(tracker.tracks)):\n",
" if (len(tracker.tracks[i].trace) > 1):\n",
" cx,cy = tracker.tracks[i].prediction[0]\n",
" clr = tracker.tracks[i].track_id % 9\n",
" tx1 = cx - (tracker.tracks[0].w)/2\n",
" ty1 = cy - (tracker.tracks[0].h)/2\n",
" tx2 = cx + (tracker.tracks[0].w)/2\n",
" ty2 = cy + (tracker.tracks[0].h)/2\n",
" cv2.rectangle(frame, (int(tx1), int(ty1)), (int(tx2), int(ty2)), track_colors[clr], 2)\n",
" for j in range(len(tracker.tracks[i].trace)-1):\n",
" # Draw trace line\n",
" x1 = tracker.tracks[i].trace[j][0][0]\n",
" y1 = tracker.tracks[i].trace[j][1][0]\n",
" x2 = tracker.tracks[i].trace[j+1][0][0]\n",
" y2 = tracker.tracks[i].trace[j+1][1][0]\n",
" \n",
" \n",
" \n",
" clr = tracker.tracks[i].track_id % 9\n",
" \n",
" cv2.line(frame, (int(x1), int(y1)), (int(x2), int(y2)),\n",
" track_colors[clr], 2)\n",
"\n",
" # Display the resulting tracking frame\n",
" cv2.imshow('Tracking', frame)\n",
" # Display the original frame\n",
" cv2.imshow('Original', orig_frame)\n",
"\n",
" # Slower the FPS\n",
" cv2.waitKey(50)\n",
"\n",
" # Check for key strokes\n",
" k = cv2.waitKey(50) & 0xff\n",
" if k == 27: # 'esc' key has been pressed, exit program.\n",
" break\n",
" if k == 112: # 'p' has been pressed. this will pause/resume the code.\n",
" pause = not pause\n",
" if (pause is True):\n",
" print(\"Code is paused. Press 'p' to resume..\")\n",
" while (pause is True):\n",
" # stay in this loop until\n",
" key = cv2.waitKey(30) & 0xff\n",
" if key == 112:\n",
" pause = False\n",
" print(\"Resume code..!!\")\n",
" break\n",
"\n",
"# When everything done, release the capture\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "julian-wisconsin",
"metadata": {},
"outputs": [],
"source": [
"tracker.tracks[0].temp_loc"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "blond-lewis",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "intermediate-jordan",
"metadata": {},
"outputs": [],
"source": [
"boxes, labels, probs"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "peripheral-science",
"metadata": {},
"outputs": [],
"source": [
"centers = []\n",
"shapes = []\n",
"temperatures = []\n",
"temperature_locs = []\n",
"for i in range(boxes.shape[0]):\n",
" box = boxes[i, :]\n",
" score = probs[i]\n",
" if(score > 0.9):\n",
" x1 = max(box[0],0)\n",
" y1 = max(box[1],0)\n",
" x2 = min(box[2],frame.shape[1])\n",
" y2 = min(box[3],frame.shape[0])\n",
" centers.append([(x1+x2)/2,(y1+y2)/2])\n",
" shapes.append([(x2 - x1),(y2 - y1)]) # w,h\n",
" temperatures.append(35.5)\n",
" temperature_locs.append(33)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "signed-czech",
"metadata": {},
"outputs": [],
"source": [
"shapes"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "choice-snowboard",
"metadata": {},
"outputs": [],
"source": [
"tracker.Update(centers, shapes, temperatures, temperature_locs)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "perceived-reservation",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
......@@ -5,4 +5,6 @@ sudo chmod 777 /dev/video*
sudo chmod 777 /dev/spidev0.*
sudo chmod 777 /dev/gpio*
\ No newline at end of file
sudo chmod 777 /dev/gpio*
sudo chmod 777 /dev/ttyUSB0
'''
File name : kalman_filter.py
File Description : Kalman Filter Algorithm Implementation
Author : Srini Ananthakrishnan
Date created : 07/14/2017
Date last modified: 07/16/2017
Python Version : 2.7
'''
# Import python libraries
import numpy as np
class KalmanFilter(object):
"""Kalman Filter class keeps track of the estimated state of
the system and the variance or uncertainty of the estimate.
Predict and Correct methods implement the functionality
Reference: https://en.wikipedia.org/wiki/Kalman_filter
Attributes: None
"""
def __init__(self):
"""Initialize variable used by Kalman Filter class
Args:
None
Return:
None
"""
self.dt = 0.005 # delta time
self.A = np.array([[1, 0], [0, 1]]) # matrix in observation equations
self.u = np.zeros((2, 1)) # previous state vector
# (x,y) tracking object center
self.b = np.array([[0], [255]]) # vector of observations
self.P = np.diag((3.0, 3.0)) # covariance matrix
self.F = np.array([[1.0, self.dt], [0.0, 1.0]]) # state transition mat
self.Q = np.eye(self.u.shape[0]) # process noise matrix
self.R = np.eye(self.b.shape[0]) # observation noise matrix
self.lastResult = np.array([[0], [255]])
def predict(self):
"""Predict state vector u and variance of uncertainty P (covariance).
where,
u: previous state vector
P: previous covariance matrix
F: state transition matrix
Q: process noise matrix
Equations:
u'_{k|k-1} = Fu'_{k-1|k-1}
P_{k|k-1} = FP_{k-1|k-1} F.T + Q
where,
F.T is F transpose
Args:
None
Return:
vector of predicted state estimate
"""
# Predicted state estimate
self.u = np.round(np.dot(self.F, self.u))
# Predicted estimate covariance
self.P = np.dot(self.F, np.dot(self.P, self.F.T)) + self.Q
self.lastResult = self.u # same last predicted result
return self.u
def correct(self, b, flag):
"""Correct or update state vector u and variance of uncertainty P (covariance).
where,
u: predicted state vector u
A: matrix in observation equations
b: vector of observations
P: predicted covariance matrix
Q: process noise matrix
R: observation noise matrix
Equations:
C = AP_{k|k-1} A.T + R
K_{k} = P_{k|k-1} A.T(C.Inv)
u'_{k|k} = u'_{k|k-1} + K_{k}(b_{k} - Au'_{k|k-1})
P_{k|k} = P_{k|k-1} - K_{k}(CK.T)
where,
A.T is A transpose
C.Inv is C inverse
Args:
b: vector of observations
flag: if "true" prediction result will be updated else detection
Return:
predicted state vector u
"""
if not flag: # update using prediction
self.b = self.lastResult
else: # update using detection
self.b = b
C = np.dot(self.A, np.dot(self.P, self.A.T)) + self.R
K = np.dot(self.P, np.dot(self.A.T, np.linalg.inv(C)))
self.u = np.round(self.u + np.dot(K, (self.b - np.dot(self.A,
self.u))))
self.P = self.P - np.dot(K, np.dot(C, K.T))
self.lastResult = self.u
return self.u
......@@ -200,4 +200,4 @@ class FaceMaskDetector:
)
indexes = indexes[iou <= iou_threshold]
return box_scores[picked, :]
\ No newline at end of file
return box_scores[picked, :]
'''
File name : object_tracking.py
File Description : Multi Object Tracker Using Kalman Filter
and Hungarian Algorithm
Author : Srini Ananthakrishnan
Date created : 07/14/2017
Date last modified: 07/16/2017
Python Version : 2.7
'''
# Import python libraries
import cv2
import copy
from detectors import Detectors
from tracker import Tracker
def main():
"""Main function for multi object tracking
Usage:
$ python2.7 objectTracking.py
Pre-requisite:
- Python2.7
- Numpy
- SciPy
- Opencv 3.0 for Python
Args:
None
Return:
None
"""
# Create opencv video capture object
cap = cv2.VideoCapture('data/TrackingBugs.mp4')
# Create Object Detector
detector = Detectors()
# Create Object Tracker
tracker = Tracker(160, 30, 5, 100)
# Variables initialization
skip_frame_count = 0
track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
(0, 255, 255), (255, 0, 255), (255, 127, 255),
(127, 0, 255), (127, 0, 127)]
pause = False
# Infinite loop to process video frames
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Make copy of original frame
orig_frame = copy.copy(frame)
# Skip initial frames that display logo
if (skip_frame_count < 15):
skip_frame_count += 1
continue
# Detect and return centeroids of the objects in the frame
centers = detector.Detect(frame)
# If centroids are detected then track them
if (len(centers) > 0):
# Track object using Kalman Filter
tracker.Update(centers)
# For identified object tracks draw tracking line
# Use various colors to indicate different track_id
for i in range(len(tracker.tracks)):
if (len(tracker.tracks[i].trace) > 1):
for j in range(len(tracker.tracks[i].trace)-1):
# Draw trace line
x1 = tracker.tracks[i].trace[j][0][0]
y1 = tracker.tracks[i].trace[j][1][0]
x2 = tracker.tracks[i].trace[j+1][0][0]
y2 = tracker.tracks[i].trace[j+1][1][0]
clr = tracker.tracks[i].track_id % 9
cv2.line(frame, (int(x1), int(y1)), (int(x2), int(y2)),
track_colors[clr], 2)
# Display the resulting tracking frame
cv2.imshow('Tracking', frame)
# Display the original frame
cv2.imshow('Original', orig_frame)
# Slower the FPS
cv2.waitKey(50)
# Check for key strokes
k = cv2.waitKey(50) & 0xff
if k == 27: # 'esc' key has been pressed, exit program.
break
if k == 112: # 'p' has been pressed. this will pause/resume the code.
pause = not pause
if (pause is True):
print("Code is paused. Press 'p' to resume..")
while (pause is True):
# stay in this loop until
key = cv2.waitKey(30) & 0xff
if key == 112:
pause = False
print("Resume code..!!")
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
# execute main
main()
......@@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"id": "alternative-throat",
"metadata": {},
"outputs": [],
......@@ -21,7 +21,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "direct-vulnerability",
"metadata": {},
"outputs": [],
......@@ -39,13 +39,21 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 3,
"id": "pending-pressure",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"priors nums:4420\n"
]
}
],
"source": [
"thermal = Thermal(width=160, height=120, framerate=9, frame_width=640, frame_height=480, log=None)\n",
"camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=1)\n",
"camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=None)\n",
"detector = FaceDetector(model_path=\"../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/version-RFB/RFB-320.mnn\" , input_size=(320,240))\n",
"# anti = AntiSpoofing(model_path=\"../Ultra-Light-Fast-Generic-Face-Detector-1MB/MNN/model/4_0_0_80x80_MiniFASNetV1SE.mnn\")\n",
"aligner = BoxAligner(img_h = 480,img_w = 640)\n",
......@@ -55,10 +63,63 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"id": "moved-capacity",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"device opened!\n",
"Version gpp: 3.1.51 dsp: 3.1.51\n",
"FLIR part #: b'500-0726-01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n",
"FLIR serial #: b'@\\xb8-\\x00\\x00\\x00\\x00\\x00'\n",
"format: b'UYVY'\n",
" frame 160x120 @ 9fps\n",
"format: b'Y16 '\n",
" frame 160x120 @ 9fps\n",
" frame 160x122 @ 9fps\n",
"format: b'Y8 '\n",
" frame 160x120 @ 9fps\n",
"format: b'RGBP'\n",
" frame 160x120 @ 9fps\n",
"format: b'}\\xeb6\\xe4'\n",
" frame 160x120 @ 9fps\n",
"done starting stream, displaying settings\n",
"Shutter Info:\n",
" 1\t shutterMode\n",
" 0\t tempLockoutState\n",
" 1\t videoFreezeDuringFFC\n",
" 0\t ffcDesired\n",
" 81996\t elapsedTimeSinceLastFfc\n",
" 180000\t desiredFfcPeriod\n",
" True\t explicitCmdToOpen\n",
" 0\t desiredFfcTempDelta\n",
" 150\t imminentDelay\n",
"\n",
"resetting settings to default\n",
"controlID: 16\n",
"controlID: 19\n",
"controlID: 17\n",
"controlID: 5\n",
"current settings\n",
"Shutter Info:\n",
" 1\t shutterMode\n",
" 0\t tempLockoutState\n",
" 1\t videoFreezeDuringFFC\n",
" 1\t ffcDesired\n",
" 82034\t elapsedTimeSinceLastFfc\n",
" 180000\t desiredFfcPeriod\n",
" True\t explicitCmdToOpen\n",
" 0\t desiredFfcTempDelta\n",
" 150\t imminentDelay\n",
"\n",
"Opened: True, Stopping: False, Duration: 2700\n",
"controlID: 17\n"
]
}
],
"source": [
"thermal.start()\n",
"camera.start()\n",
......@@ -528,7 +589,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.6"
"version": "3.8.10"
}
},
"nbformat": 4,
......
......@@ -292,7 +292,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.6"
"version": "3.8.10"
}
},
"nbformat": 4,
......
'''
File name : tracker.py
File Description : Tracker Using Kalman Filter & Hungarian Algorithm
Author : Srini Ananthakrishnan
Date created : 07/14/2017
Date last modified: 07/16/2017
Python Version : 2.7
'''
# Import python libraries
import numpy as np
from kalman_filter import KalmanFilter
from common import dprint
from scipy.optimize import linear_sum_assignment
class Track(object):
"""Track class for every object to be tracked
Attributes:
None
"""
def __init__(self, prediction, trackIdCount):
"""Initialize variables used by Track class
Args:
prediction: predicted centroids of object to be tracked
trackIdCount: identification of each track object
Return:
None
"""
self.track_id = trackIdCount # identification of each track object
self.KF = KalmanFilter() # KF instance to track this object
self.prediction = np.asarray(prediction) # predicted centroids (x,y)
self.skipped_frames = 0 # number of frames skipped undetected
self.trace = [] # trace path
self.h = 0
self.w = 0
self.temp = 0.0
self.temp_loc = None
self.temp_isAlarmed = False
self.whois = "unknown"
self.known = False
self.face_file = ""
class Tracker(object):
"""Tracker class that updates track vectors of object tracked
Attributes:
None
"""
def __init__(self, dist_thresh, max_frames_to_skip, max_trace_length,
trackIdCount):
"""Initialize variable used by Tracker class
Args:
dist_thresh: distance threshold. When exceeds the threshold,
track will be deleted and new track is created
max_frames_to_skip: maximum allowed frames to be skipped for
the track object undetected
max_trace_lenght: trace path history length
trackIdCount: identification of each track object
Return:
None
"""
self.dist_thresh = dist_thresh
self.max_frames_to_skip = max_frames_to_skip
self.max_trace_length = max_trace_length
self.tracks = []
self.trackIdCount = trackIdCount
def Update(self, detections, frames_shape, temperature, temperature_loc):
"""Update tracks vector using following steps:
- Create tracks if no tracks vector found
- Calculate cost using sum of square distance
between predicted vs detected centroids
- Using Hungarian Algorithm assign the correct
detected measurements to predicted tracks
https://en.wikipedia.org/wiki/Hungarian_algorithm
- Identify tracks with no assignment, if any
- If tracks are not detected for long time, remove them
- Now look for un_assigned detects
- Start new tracks
- Update KalmanFilter state, lastResults and tracks trace
Args:
detections: detected centroids of object to be tracked
Return:
None
"""
# Create tracks if no tracks vector found
if (len(self.tracks) == 0):
for i in range(len(detections)):
track = Track(detections[i], self.trackIdCount)
track.h = frames_shape[i][0]
track.w = frames_shape[i][1]
track.temp = temperature[i]
track.temp_loc = temperature_loc[i]
self.trackIdCount += 1
self.tracks.append(track)
# Calculate cost using sum of square distance between
# predicted vs detected centroids
N = len(self.tracks)
M = len(detections)
cost = np.zeros(shape=(N, M)) # Cost matrix
for i in range(len(self.tracks)):
for j in range(len(detections)):
try:
diff = self.tracks[i].prediction - detections[j]
distance = np.sqrt(diff[0][0]*diff[0][0] +
diff[1][0]*diff[1][0])
cost[i][j] = distance
except:
pass
# Let's average the squared ERROR
cost = (0.5) * cost
# Using Hungarian Algorithm assign the correct detected measurements
# to predicted tracks
assignment = []
for _ in range(N):
assignment.append(-1)
row_ind, col_ind = linear_sum_assignment(cost)
for i in range(len(row_ind)):
assignment[row_ind[i]] = col_ind[i]
# Identify tracks with no assignment, if any
un_assigned_tracks = []
for i in range(len(assignment)):
if (assignment[i] != -1):
# check for cost distance threshold.
# If cost is very high then un_assign (delete) the track
if (cost[i][assignment[i]] > self.dist_thresh):
assignment[i] = -1
un_assigned_tracks.append(i)
pass
else:
self.tracks[i].skipped_frames += 1
# If tracks are not detected for long time, remove them
del_tracks = []
for i in range(len(self.tracks)):
if (self.tracks[i].skipped_frames > self.max_frames_to_skip):
del_tracks.append(i)
if len(del_tracks) > 0: # only when skipped frame exceeds max
for id in del_tracks:
if id < len(self.tracks):
del self.tracks[id]
del assignment[id]
else:
dprint("ERROR: id is greater than length of tracks")
# Now look for un_assigned detects
un_assigned_detects = []
for i in range(len(detections)):
if i not in assignment:
un_assigned_detects.append(i)
# Start new tracks
if(len(un_assigned_detects) != 0):
for i in range(len(un_assigned_detects)):
track = Track(detections[un_assigned_detects[i]],
self.trackIdCount)
track.h = frames_shape[un_assigned_detects[i]][0]
track.w = frames_shape[un_assigned_detects[i]][1]
track.temp = temperature[un_assigned_detects[i]] if track.temp < temperature[un_assigned_detects[i]] else track.temp
track.temp_loc = temperature_loc[un_assigned_detects[i]] if track.temp < temperature[un_assigned_detects[i]] else track.temp_loc
self.trackIdCount += 1
self.tracks.append(track)
# Update KalmanFilter state, lastResults and tracks trace
for i in range(len(assignment)):
self.tracks[i].KF.predict()
if(assignment[i] != -1):
self.tracks[i].skipped_frames = 0
self.tracks[i].prediction = self.tracks[i].KF.correct(
detections[assignment[i]], 1)
self.tracks[i].h = frames_shape[assignment[i]][0]
self.tracks[i].w = frames_shape[assignment[i]][1]
self.tracks[i].temp = temperature[assignment[i]] if self.tracks[i].temp < temperature[assignment[i]] else self.tracks[i].temp
self.tracks[i].temp_loc = temperature_loc[assignment[i]] if self.tracks[i].temp < temperature[assignment[i]] else self.tracks[i].temp_loc
else:
self.tracks[i].prediction = self.tracks[i].KF.correct(
np.array([[0], [0]]), 0)
if(len(self.tracks[i].trace) > self.max_trace_length):
for j in range(len(self.tracks[i].trace) -
self.max_trace_length):
del self.tracks[i].trace[j]
self.tracks[i].trace.append(self.tracks[i].prediction)
self.tracks[i].KF.lastResult = self.tracks[i].prediction
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "political-chassis",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import os\n",
"from datetime import datetime\n",
"import numpy as np\n",
"# from IRM80x62_thermal import Thermal\n",
"from uvc_thermal import Thermal\n",
"from camera import Camera "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "improved-representative",
"metadata": {},
"outputs": [],
"source": [
"# thermal = Thermal(width = 80, height = 62, framerate = 4, frame_width = 640, frame_height = 480, log=None, port=\"/dev/ttyUSB0\")\n",
"\n",
"thermal = Thermal(width=160, height=120, framerate=9, frame_width=160*4, frame_height=120*4, log=None)\n",
"camera = Camera(width=640, height=480, framerate=30, log=None, channel=0, flip=None)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "light-wednesday",
"metadata": {},
"outputs": [],
"source": [
"thermal.start()\n",
"camera.start()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "comparative-neighbor",
"metadata": {},
"outputs": [],
"source": [
"while(True):\n",
" if(thermal.isOpened and camera.isOpened):\n",
" thermal_ret,thermal_frame, thermal_row = thermal.capture()\n",
" ret, frame = camera.capture()\n",
" cat_img = cv2.hconcat([frame,thermal_frame])\n",
" cv2.imshow(\"frame\",cat_img)\n",
" key = cv2.waitKey(33)\n",
" if(key == ord('q')):\n",
" break\n",
" elif(key == ord('c')):\n",
" dt_now = datetime.timestamp(datetime.now())\n",
" cv2.imwrite(os.path.join(\"./images\",\"frame\" + str(dt_now) + \".jpg\"),frame)\n",
" cv2.imwrite(os.path.join(\"./images\",\"thermal_frame\"+ str(dt_now) +\".jpg\"),thermal_frame)\n",
"\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "foster-owner",
"metadata": {},
"outputs": [],
"source": [
"thermal.stop()\n",
"camera.stop()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "through-annotation",
"metadata": {},
"outputs": [],
"source": [
"cat_img = cv2.hconcat([frame,thermal_frame])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "contained-scanning",
"metadata": {},
"outputs": [],
"source": [
"thermal.stop()\n",
"camera.stop()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "straight-substitute",
"metadata": {},
"outputs": [],
"source": [
"cv2.imshow(\"frame\",thermal_frame)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "amended-anime",
"metadata": {},
"outputs": [],
"source": [
"frame_roi = cv2.selectROI(\"frame\",frame)\n",
"cv2.destroyAllWindows()\n",
"thermal_roi = cv2.selectROI(\"thermal frame\",thermal_frame)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "destroyed-startup",
"metadata": {},
"outputs": [],
"source": [
"from aligner import BoxAligner"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "precise-input",
"metadata": {},
"outputs": [],
"source": [
"aligner = BoxAligner(img_h = 480,img_w = 640)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cultural-pasta",
"metadata": {},
"outputs": [],
"source": [
"rgb_points = []\n",
"thermal_points = []"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "vulnerable-pizza",
"metadata": {},
"outputs": [],
"source": [
"x,y,w,h = frame_roi\n",
"rgb_points.append([x,y])\n",
"rgb_points.append([x+w,y])\n",
"rgb_points.append([x,y+h])\n",
"rgb_points.append([x+w,y+h])\n",
"\n",
"x,y,w,h = thermal_roi\n",
"thermal_points.append([x,y])\n",
"thermal_points.append([x+w,y])\n",
"thermal_points.append([x,y+h])\n",
"thermal_points.append([x+w,y+h])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "automotive-stanford",
"metadata": {},
"outputs": [],
"source": [
"aligner.get_omography_matrix(np.array(rgb_points),np.array(thermal_points))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "stopped-marker",
"metadata": {},
"outputs": [],
"source": [
"np.array(rgb_points)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "arranged-engine",
"metadata": {},
"outputs": [],
"source": [
"np.array(thermal_points)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "removed-employer",
"metadata": {},
"outputs": [],
"source": [
"aligner.M"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "emerging-vocabulary",
"metadata": {},
"outputs": [],
"source": [
"np.array([[ 1.00000000e+00, 6.59371035e-17, -9.84556895e-14],\n",
" [ 0.00000000e+00, 1.00000000e+00, -4.92278448e-14],\n",
" [ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "minor-prague",
"metadata": {},
"outputs": [],
"source": [
"196 233 285 356"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ready-healing",
"metadata": {},
"outputs": [],
"source": [
"np.dot(aligner.M,[139,193,1])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "olive-attachment",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "innocent-payment",
"metadata": {},
"outputs": [],
"source": [
"# load image and crop face area\n",
"rgb_points = []\n",
"thermal_points = []\n",
"for root,dirs,files in os.walk(\"./images/\"):\n",
" for f in files:\n",
" if(f[:5] == \"frame\"):\n",
" frame_path = os.path.join(root,f)\n",
" thermal_path = os.path.join(root,f.replace(\"frame\",\"thermal_frame\"))\n",
" \n",
" frame = cv2.imread(frame_path)\n",
" thermal_frame = cv2.imread(thermal_path)\n",
" \n",
" frame_roi = cv2.selectROI(\"frame\",frame)\n",
" cv2.destroyAllWindows()\n",
" thermal_roi = cv2.selectROI(\"thermal frame\",thermal_frame)\n",
" cv2.destroyAllWindows()\n",
" \n",
" x,y,w,h = frame_roi\n",
" rgb_points.append([x,y])\n",
" rgb_points.append([x+w,y])\n",
" rgb_points.append([x,y+h])\n",
" rgb_points.append([x+w,y+h])\n",
"\n",
" x,y,w,h = thermal_roi\n",
" thermal_points.append([x,y])\n",
" thermal_points.append([x+w,y])\n",
" thermal_points.append([x,y+h])\n",
" thermal_points.append([x+w,y+h])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "played-oasis",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "historical-stone",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment