Commit 9690e1f2 authored by Bruce's avatar Bruce

新增車牌矯正功能

parent 7a4535ec
......@@ -8,6 +8,7 @@ sys.path.append(root_path)
# from retina_utils.TrtRetinaGrayPlate import TrtRetinaPlate as TrtRetinaGrayPlate
# from retina_utils.TrtRetinaGrayPlateSHM import TrtRetinaPlateSHM as TrtRetinaGrayPlateSHM
from retina_utils.alignment import align
from retina_utils.TrtRetinaColorPlate import TrtRetinaPlate as TrtRetinaColorPlate
from retina_utils.TrtRetinaColorPlateSHM import TrtRetinaPlateSHM as TrtRetinaColorPlateSHM
......@@ -46,6 +47,7 @@ class TrtRetinaPlateThread(threading.Thread):
self.conf_th = conf_th
self.trt_plate = None # to be created when run
self.running = False
self.alignment = align
def run(self):
"""Run until 'running' flag is set to False by main thread.
......@@ -76,8 +78,14 @@ class TrtRetinaPlateThread(threading.Thread):
boxes = output[:,:4].astype(int)
confs = output[:,4]
lands = output[:,5:].astype(int)
loc = boxes[0]
crop_img = info.img[loc[1]:loc[3],loc[0]:loc[2]]
loc = boxes[0] # 請考慮以後可能是 多個 batch ouptut
land = output[:,5:][0] # 請考慮以後可能是 多個 batch ouptut
if self.alignment is not None:
crop_img = align(info.img,land,padding=5)
if crop_img is None:
crop_img = info.img[loc[1]:loc[3],loc[0]:loc[2]]
else:
crop_img = info.img[loc[1]:loc[3],loc[0]:loc[2]]
# 過濾物件
plateInfo = PlateInfo(info.track_id, info.img ,crop_img)
self.plates_q.put(plateInfo)
......@@ -204,4 +212,4 @@ class EZLPR(object):
def __del__(self):
self.trtRetinaPlateThread.stop()
self.trtCtcOcrThread.stop()
\ No newline at end of file
self.trtCtcOcrThread.stop()
import numpy as np
import cv2
def align(image,landmarks,padding):
# 取得車牌頂點座標
# 左上左下 右上右下
try:
#oring_landmarks=list(map(float, landmarks))
oring_landmarks = landmarks
left_top_landmark=oring_landmarks[:2]
left_bottom_landmark=oring_landmarks[2:4]
right_top_landmark=oring_landmarks[4:6]
right_bottom_landmark=oring_landmarks[6:8]
src_points = np.array([left_top_landmark, left_bottom_landmark,right_top_landmark, right_bottom_landmark], dtype = "float32")
transform_plate=four_point_transform(image,src_points,padding)
return transform_plate
except Exception as e:
return None
def order_points(pts):
# initialzie a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
'''
rect = np.zeros((4, 2), dtype = "float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
'''
rect = np.zeros((4, 2), dtype = "float32")
rect[0]=pts[0]
rect[1]=pts[2]
rect[2]=pts[3]
rect[3]=pts[1]
return rect
def four_point_transform(image, pts,padding):
# obtain a consistent order of the points and unpack them
# individually
rect = order_points(pts)
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth+padding, maxHeight))
# return the warped image
return warped
def align_with_template(image,landmarks,padding):
# 取得車牌頂點座標
# 左上左下 右上右下
try:
oring_landmarks=list(map(int, landmarks))
left_top_landmark=oring_landmarks[:2]
left_bottom_landmark=oring_landmarks[2:4]
right_top_landmark=oring_landmarks[4:6]
right_bottom_landmark=oring_landmarks[6:8]
src_points = np.array([left_top_landmark, left_bottom_landmark,right_top_landmark, right_bottom_landmark], dtype = "float32")
transform_plate=map_to_template(image,src_points,padding)
return transform_plate
except Exception as e:
print(e)
return False
def map_to_template(image,pts,padding):
rect = order_points(pts)
template_4ps=[[79,866],[79,1589],[1126,866],[1126,1589]]
# 左上 左下 右上 右下
# [79,866,79,1589,1126,866,1126,1589]
dst = np.array([
template_4ps[0], # 左上
template_4ps[2], # 右上
template_4ps[3],# 右下
template_4ps[1]] # 左下
, dtype = "float32")
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (1191, 1685))
x1,y1,x2,y2= template_4ps[0][0],template_4ps[0][1],template_4ps[3][0],template_4ps[3][1]
return warped[y1-padding:y2+padding,x1-padding:x2+padding]
\ No newline at end of file
......@@ -30,10 +30,10 @@ yolo = TrtYOLOSHM()
easyLPR=EZLPR()
#source="/home/tx2/Videos/20220504173000.mp4"
#source="/home/tx2/Videos/Kevin_Car.MOV"
source="/home/tx2/Videos/Kevin_Car.MOV"
#source = "../Ch17-20220314-190000.mp4"
source="rtsp://192.168.5.218/txg/01"
#source="rtsp://192.168.5.218/txg/01"
#source="rtsp://140.120.65.31/live.sdp"
cam=Camera(1,source,encoder = "h265",width=1920,height=1080)
......@@ -123,9 +123,8 @@ try:
InputLPR_span,stime=time_span(stime) # !
for index,platNum in easyLPR.out():
for index,platNum,vechile_img,plate_img in easyLPR.out():
if index in CT.objects:
print(index, platNum)
CT.objects[index].update_lpr_candis(platNum)
OutputLPR_span,stime=time_span(stime) # !
......@@ -143,7 +142,7 @@ try:
total_span=time.time()-staic_stime
sys.stdout.write(f"\rObject Count:{CT.nextObjectID}, Fps:{int(np.mean(FPS))}")
#sys.stdout.write(f"\rObject Count:{CT.nextObjectID}, Fps:{int(np.mean(FPS))}")
# sys.stdout.write(f"\rObject Count:{CT.nextObjectID}, Fps:{int(1/(total_span))}"
# f",yolo_span:{int((yolo_span/total_span)*100)}%"
......
......@@ -170,7 +170,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 2,
"id": "f5590c70",
"metadata": {},
"outputs": [
......@@ -178,7 +178,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"30.0\n"
"60.0\n"
]
},
{
......@@ -188,7 +188,7 @@
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-3-4323a410c5ab>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 32\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mret\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 33\u001b[0m \u001b[0;31m#VioWri.Frame = frame\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 34\u001b[0;31m \u001b[0mwriter\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwrite\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mframe\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 35\u001b[0m \u001b[0;31m# cv2.imshow(\"1\",frame)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 36\u001b[0m \u001b[0;31m# key=cv2.waitKey(1)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-2-4323a410c5ab>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 32\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mret\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 33\u001b[0m \u001b[0;31m#VioWri.Frame = frame\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 34\u001b[0;31m \u001b[0mwriter\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwrite\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mframe\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 35\u001b[0m \u001b[0;31m# cv2.imshow(\"1\",frame)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 36\u001b[0m \u001b[0;31m# key=cv2.waitKey(1)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
]
}
......
{
"cells": [
{
"cell_type": "code",
"execution_count": 7,
"id": "3ec02b9e",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"False\n"
]
}
],
"source": [
"import numpy as np\n",
"\n",
"a=False\n",
"\n",
"if a is False :\n",
" print(a)"
]
},
{
"cell_type": "code",
"execution_count": 4,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment