Commit d03d2cf2 authored by YONG-LIN SU's avatar YONG-LIN SU

20220621 modify camera queue

parent 68633af4
......@@ -7,7 +7,9 @@
"metadata": {},
"outputs": [],
"source": [
"import cv2"
"import cv2\n",
"import time\n",
"import subprocess"
]
},
{
......@@ -18,7 +20,7 @@
"outputs": [],
"source": [
"source = \"rtsp://192.168.5.218/txg/01\"\n",
"framerate = 60"
"framerate = 15"
]
},
{
......@@ -29,7 +31,7 @@
"outputs": [],
"source": [
"gst_format = f'rtspsrc location={source} latency=0 protocols=tcp ! ' \n",
"gst_format += 'rtph265depay ! h265parse ! nvv4l2decoder skip-frames=1 ! nvvidconv ! '\n",
"gst_format += 'rtph265depay ! h265parse ! nvv4l2decoder skip-frames=1 enable-max-performance=1 enable-frame-type-reporting=1 ! nvvidconv ! '\n",
"gst_format += f' video/x-raw,format=BGRx ! videorate ! video/x-raw,framerate={framerate}/1 ! videoconvert ! video/x-raw,format=BGR ! '\n",
"gst_format += 'appsink sync=false'"
]
......@@ -51,11 +53,26 @@
"metadata": {},
"outputs": [],
"source": [
"last_frame = dev.read()[1]\n",
"while(dev.isOpened()):\n",
" stime = time.time()\n",
" ret,frame = dev.read()\n",
" print(\"read frame spend\", time.time() - stime)\n",
" if(ret):\n",
" # 前後景比較\n",
" \n",
" stime = time.time()\n",
" diff = (last_frame - frame).sum()\n",
" print(\"前後景平均差\",diff, \"spend\", time.time() - stime)\n",
" last_frame = frame\n",
" \n",
" # 拉普拉斯模糊計算\n",
" stime = time.time()\n",
" fuzzyValue = getImageVar(frame)\n",
" print(\"模糊度\", fuzzyValue, \"spend\", time.time() - stime)\n",
" cv2.imshow(\"frame\", frame)\n",
" if(cv2.waitKey(1000//framerate) == ord('q')):\n",
"# if(cv2.waitKey(1000//framerate) == ord('q')):\n",
" if(cv2.waitKey(1) == ord('q')):\n",
" break\n",
" else:\n",
" break\n",
......@@ -78,7 +95,13 @@
"id": "f6c5d62c",
"metadata": {},
"outputs": [],
"source": []
"source": [
"# 返回指定路径图像的拉普拉斯算子边缘模糊程度值\n",
"def getImageVar(image):\n",
" img2gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" imageVar = cv2.Laplacian(img2gray, cv2.CV_64F).var()\n",
" return imageVar"
]
},
{
"cell_type": "code",
......
......@@ -31,7 +31,7 @@ class TrtRetinaPlateThread(threading.Thread):
main thread is the consumer.
"""
def __init__(self, condition, vehicles_q, plates_q, conf_th=0.7, budget=10):
def __init__(self, condition, vehicles_q, plates_q, conf_th=0.9, budget=10):
"""__init__
# Arguments
condition: the condition variable used to notify main
......
......@@ -153,7 +153,9 @@ class TrtCTCOCRThread(threading.Thread):
resultInfo = ResultInfo(info.track_id, plate, info.vechile_img, info.plate_img)
if(resultInfo.plate is not None and resultInfo.plate_img is not None):
plate_w, plate_h = resultInfo.plate_img.shape[:2]
if(resultInfo.plate is not None and resultInfo.plate_img is not None and plate_w*plate_h >= 50*30):
fullscreen_save_path = os.path.join(self.save_path, "fullscreen", "{}_{}.jpg".format(resultInfo.track_id, resultInfo.plate))
vehicle_save_path = os.path.join(self.save_path, "vehicles", "{}_{}.jpg".format(resultInfo.track_id, resultInfo.plate))
plate_save_path = os.path.join(self.save_path, "plates", "{}_{}.jpg".format(resultInfo.track_id, resultInfo.plate))
......
......@@ -2,6 +2,7 @@ from AlprTritonClient.yolo import TrtYOLO
from AlprTritonClient.yolo_shared_memory import TrtYOLOSHM
from AlprTritonClient.EZLPR import EZLPR
# from AlprTritonClient.EZLPRDataCollection import EZLPR
from VideoAnalysis.SystemModule import DisplayMeta
from VideoAnalysis.SystemModule import TrafficFlowMeta
......@@ -34,13 +35,13 @@ def time_span(stime):
yolo = TrtYOLOSHM()
easyLPR=EZLPR()
#source="/home/tx2/Videos/20220504173000.mp4"
# source="/home/aaeon/20220504173000.mp4"
# source="/home/aaeon/Kevin_Car.MOV"
#source = "../Ch17-20220314-190000.mp4"
# source = "../Ch17-20220314-190000.mp4"
source="rtsp://192.168.5.218/txg/01"
# source="rtsp://140.120.65.31/live.sdp"
cam=Camera(1,source,encoder = "h265",width=1920,height=1080, use_gstr=True, framerate=60, skip_frames=1)
cam=Camera(1,source,encoder = "h265",width=1920,height=1080, use_gstr=True, framerate=15, skip_frames=0)
# cam=Camera(1,source,encoder = "h264",width=1920,height=1080)
camera_id=1
......@@ -69,11 +70,11 @@ exist_track_obj = []
# to run GUI event loop
plt.ion()
fig_scale = 2
fig, ax_list = plt.subplots(maxSize, 2, figsize=(4*fig_scale, maxSize*fig_scale))
fig, ax_list = plt.subplots(maxSize, 2, figsize=(2*fig_scale, maxSize*fig_scale))
ax_cache_imgs = []
ax_cache_backgrounds = []
ax_list = ax_list.ravel()
fake_img = np.zeros((640,480))
fake_img = np.zeros((480,640))
update_count = 0
for i in range(len(ax_list)):
......@@ -164,7 +165,8 @@ try:
# Process EZLPR
if obj.object_id in LPR_target and not obj.lock_plate_num:
obj.update_lpr_time()
easyLPR.put(obj.ID,frame[y1:y2,x1:x2,:].copy())
# easyLPR.put(obj.ID, frame.copy(), frame[y1:y2,x1:x2,:].copy())
easyLPR.put(obj.ID,frame[y1:y2,x1:x2,:].copy())
InputLPR_span,stime=time_span(stime) # !
......@@ -195,8 +197,9 @@ try:
ax_list[ax_index*2+1].title.set_text("plate num {}".format(platNum))
# 設置顯示圖片
ax_cache_imgs[ax_index*2].set_data(vechile_img)
ax_cache_imgs[ax_index*2+1].set_data(plate_img)
# bgr 2 rgb
ax_cache_imgs[ax_index*2].set_data(vechile_img[:, :, ::-1])
ax_cache_imgs[ax_index*2+1].set_data(plate_img[:, :, ::-1])
# restore background
fig.canvas.restore_region(ax_cache_backgrounds[ax_index*2])
......
from AlprTritonClient.yolo import TrtYOLO
from AlprTritonClient.yolo_shared_memory import TrtYOLOSHM
from AlprTritonClient.EZLPR import EZLPR
# from AlprTritonClient.EZLPRDataCollection import EZLPR
from VideoAnalysis.SystemModule import DisplayMeta
from VideoAnalysis.SystemModule import TrafficFlowMeta
from VideoAnalysis.CentroidTrack import CentroidTracker
#from VideoAnalysis.SystemModule import InfluxdbRecordThread
from VideoAnalysis.Camera_Jetson import Camera
import cv2
import sys
import time
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import queue
def time_span(stime):
span=time.time()-stime
stime=time.time()
return span,stime
'''
Init
'''
#yolo = TrtYOLO()
# yolo triton server shared memory
yolo = TrtYOLOSHM()
easyLPR=EZLPR()
# source="/home/aaeon/20220504173000.mp4"
# source="/home/aaeon/Kevin_Car.MOV"
# source = "../Ch17-20220314-190000.mp4"
source="rtsp://192.168.5.218/txg/01"
# source="rtsp://140.120.65.31/live.sdp"
cam=Camera(1,source,encoder = "h265",width=1920,height=1080, use_gstr=True, framerate=30, skip_frames=0)
# cam=Camera(1,source,encoder = "h264",width=1920,height=1080)
camera_id=1
break_flag=False
displayMeta=DisplayMeta(camera_id,None)
CT=CentroidTracker(camera_id = camera_id ,maxDisappeared = 10, max_dist = 300)
TFMeta=TrafficFlowMeta(width=1920,height=1080,polyPoints = [[658 , 307],[1302 , 804],[1149 , 910],[531 , 340]] )
# [[422,961],[230,667],[492,608],[826,802]]
# 左上 右上 右下 左下 順時鐘點位
#InfluxWriter=InfluxdbRecordThread("127.0.0.1","admin","Ecom84253675","TrafficRecord")
#region 透過matplotlib 顯示車輛截圖及車牌辨識結果,注意 使用該方法將會拖累fps
# 車牌辨識結果顯示queue
maxSize = 5
ax_index = 0
lpr_results_queue = queue.Queue(maxSize)
# 用於過濾已存在物件
exist_track_obj = []
# 初始化 plt 圖表
# to run GUI event loop
plt.ion()
fig_scale = 2
fig, ax_list = plt.subplots(maxSize, 2, figsize=(2*fig_scale, maxSize*fig_scale))
ax_cache_imgs = []
ax_cache_backgrounds = []
ax_list = ax_list.ravel()
fake_img = np.zeros((480,640))
update_count = 0
for i in range(len(ax_list)):
# img = ax_list[i].imshow(fake_img, vmin=-1, vmax=1, interpolation="None", cmap="RdBu")
img = ax_list[i].imshow(fake_img, vmin=-1, vmax=1, interpolation="nearest", cmap="RdBu")
ax_cache_imgs.append(img)
ax_list[i].axis('off')
fig.canvas.draw() # note that the first draw comes before setting data
for i in range(len(ax_list)):
axbackground = fig.canvas.copy_from_bbox(ax_list[i].bbox)
ax_cache_backgrounds.append(axbackground)
plt.show(block=False)
#endregion
'''
prepare
'''
cam.start()
#InfluxWriter.start()
clsName={'2.0':'car','0.0':'person','3.0':'motorbike','5.0':'bus','7.0':'truck'}
LPR_target=['2.0','3.0','5.0','7.0']
FPS=list()
is_display=True; # 是否要畫面顯示的 flag
frameCount = 0
inferencePerFrame = 3
'''
start main
'''
try:
if is_display:
cv2.namedWindow("1",cv2.WINDOW_NORMAL)
cv2.resizeWindow("1",1280,720)
while True:
if break_flag:
break
while not cam.resultQueue.empty():
if is_display:
displayMeta.clear()
ret,frame=cam.resultQueue.get()
if (ret):
frameCount+=1
# inference skip
if(frameCount % inferencePerFrame != 0):
if is_display and len(FPS) > 0:
displayMeta.frame = TFMeta.osd(frame) # 車流判斷的疊圖很耗時可考慮註解
displayMeta.rect_param.append((50,50,400,1))
displayMeta.text_param.append(f"Object Count:{CT.nextObjectID}, Fps:{int(FPS[-1])}")
displayMeta.draw()
if is_display and displayMeta.frame is not None:
cv2.imshow("1",displayMeta.frame)
key=cv2.waitKey(1)
if key==ord('q'):
break_flag=True
break
continue
stime=time.time()
staic_stime=stime
displayMeta.frame=frame
stime=time.time()
results=yolo.detect(frame,0.25)
yolo_span,stime=time_span(stime) # !
obj_bbox_xyxys =[]
obj_crop_imgs = []
obj_ids=[]
obj_names=[]
for cls,box in results:
x1,y1,x2,y2=box
w,h=x2-x1,y2-y1
rect=(x1,y1,w,h)
obj_bbox_xyxys.append([x1,y1,x2,y2])
#obj_crop_imgs.append(frame[y1:y2,x1:x2,:].copy())
obj_crop_imgs.append(None)
obj_names.append(clsName[str(cls)] if str(cls) in clsName else str(cls))
obj_ids.append(str(cls))
processResult_span,stime=time_span(stime) # !
objects = CT.update(obj_bbox_xyxys,obj_crop_imgs,obj_ids,obj_names)
tracker_span,stime=time_span(stime) # !
TFMeta.UpdateTrafficFlow(objects)
TF_span,stime=time_span(stime) # !
if is_display:
displayMeta.frame = TFMeta.osd(frame) # 車流判斷的疊圖很耗時可考慮註解
for index,obj in objects.items():
# draw trajectory
if obj.rect is not None:
x1,y1,x2,y2=obj.rect
x,y,w,h=obj.rect[0],obj.rect[1],obj.rect[2]-obj.rect[0],obj.rect[3]-obj.rect[1]
if is_display:
displayMeta.line_param.append(obj.trajectory)
displayMeta.rect_param.append([x,y,w,h])
objname=f"{obj.ID}_{obj.object_name}"
displayMeta.text_param.append(objname+"_"+ obj.lpr_plate_num if obj.lpr_plate_num else objname)
# Process EZLPR
if obj.object_id in LPR_target and not obj.lock_plate_num:
obj.update_lpr_time()
# easyLPR.put(obj.ID, frame.copy(), frame[y1:y2,x1:x2,:].copy())
easyLPR.put(obj.ID,frame[y1:y2,x1:x2,:].copy())
InputLPR_span,stime=time_span(stime) # !
for out in easyLPR.out():
index,platNum,vechile_img, plate_img = out
if index in CT.objects:
CT.objects[index].update_lpr_candis(platNum)
# 顯示最近辨識結果
if is_display:
update_count+=1
if(index not in exist_track_obj):
if(lpr_results_queue.full()):
qindex, _, _, _ = lpr_results_queue.get_nowait()
exist_track_obj.remove(qindex)
lpr_results_queue.put(out)
exist_track_obj.append(index)
# 顯示最近辨識結果
if(is_display):
qsize = lpr_results_queue.qsize()
if(qsize > 0):
for i in range(qsize):
index,platNum,vechile_img, plate_img = lpr_results_queue.get()
print(index,platNum)
# 更新標題
ax_list[ax_index*2].title.set_text("track id {}".format(index))
ax_list[ax_index*2+1].title.set_text("plate num {}".format(platNum))
# 設置顯示圖片
# bgr 2 rgb
ax_cache_imgs[ax_index*2].set_data(vechile_img[:, :, ::-1])
ax_cache_imgs[ax_index*2+1].set_data(plate_img[:, :, ::-1])
# restore background
fig.canvas.restore_region(ax_cache_backgrounds[ax_index*2])
fig.canvas.restore_region(ax_cache_backgrounds[ax_index*2+1])
# draw img
ax_list[ax_index*2].draw_artist(ax_cache_imgs[ax_index*2])
ax_list[ax_index*2+1].draw_artist(ax_cache_imgs[ax_index*2+1])
ax_index+=1
if(ax_index >= maxSize):
ax_index = 0
# redraw everything
# fig.canvas.draw()
fig.canvas.flush_events()
OutputLPR_span,stime=time_span(stime) # !
CT.GetDisposeBuffer()
for obj in CT.GetDisposeBuffer():
pass
# InfluxWriter.put(obj)
FPS.append(1/(time.time()-staic_stime))
if is_display:
displayMeta.rect_param.append((50,50,400,1))
displayMeta.text_param.append(f"Object Count:{CT.nextObjectID}, Fps:{int(FPS[-1])}")
displayMeta.draw()
draw_span,stime=time_span(stime) # !
total_span=time.time()-staic_stime
sys.stdout.write(f"\rObject Count:{CT.nextObjectID}, Fps:{int(np.mean(FPS))}")
# sys.stdout.write(f"\rObject Count:{CT.nextObjectID}, Fps:{int(1/(total_span))}"
# f",yolo_span:{int((yolo_span/total_span)*100)}%"
# f",draw_span:{int((draw_span/total_span)*100)}%"
# f",tracker_span:{int((tracker_span/total_span)*100)}%"
# f",TF_span:{int((TF_span/total_span)*100)}%"
# f",InputLPR_span:{int((InputLPR_span/total_span)*100)}%"
# f",OutputLPR_span:{int((OutputLPR_span/total_span)*100)}%")
# sys.stdout.write(f"\rCTDispose_Buffer:{CT.disposeBuffer.qsize()}"
# f"Influx_Buffer:{InfluxWriter.ObjectMetaUploadQueue.qsize()}")
if is_display:
cv2.imshow("1",displayMeta.frame)
key=cv2.waitKey(1)
if key==ord('q'):
break_flag=True
break
except Exception as e:
raise(e)
cam.stop()
del (cam)
#del (InfluxWriter)
del (easyLPR)
cv2.destroyAllWindows()
finally:
cam.stop()
del (cam)
#del (InfluxWriter)
del (easyLPR)
cv2.destroyAllWindows()
......@@ -112,11 +112,11 @@ class Camera(object):
if self.encoder=="h265":
print("h265")
return (
f'rtspsrc location={source} latency=0 protocols=tcp ! ' +
f'rtspsrc location={source} latency=0 protocols=tcp ! queue ! ' +
# 'watchdog timeout=10000 !'+
f'rtph265depay ! h265parse ! nvv4l2decoder skip-frames={skip_frames} ! nvvidconv ! '+
f' video/x-raw,format=BGRx ! videorate ! video/x-raw,framerate={framerate}/1 ! videoconvert ! video/x-raw,format=BGR ! '+
'appsink sync=false'
'queue ! appsink sync=false max-buffers=1 drop=True'
)
elif self.encoder=="h264":
print("h264")
......
......@@ -7,7 +7,9 @@
"metadata": {},
"outputs": [],
"source": [
"import cv2"
"import cv2\n",
"import time\n",
"import subprocess"
]
},
{
......@@ -18,7 +20,7 @@
"outputs": [],
"source": [
"source = \"rtsp://192.168.5.218/txg/01\"\n",
"framerate = 60"
"framerate = 15"
]
},
{
......@@ -29,7 +31,7 @@
"outputs": [],
"source": [
"gst_format = f'rtspsrc location={source} latency=0 protocols=tcp ! ' \n",
"gst_format += 'rtph265depay ! h265parse ! nvv4l2decoder skip-frames=2 ! nvvidconv ! '\n",
"gst_format += 'rtph265depay ! h265parse ! nvv4l2decoder skip-frames=1 enable-max-performance=1 enable-frame-type-reporting=1 ! nvvidconv ! '\n",
"gst_format += f' video/x-raw,format=BGRx ! videorate ! video/x-raw,framerate={framerate}/1 ! videoconvert ! video/x-raw,format=BGR ! '\n",
"gst_format += 'appsink sync=false'"
]
......@@ -51,11 +53,26 @@
"metadata": {},
"outputs": [],
"source": [
"last_frame = dev.read()[1]\n",
"while(dev.isOpened()):\n",
" stime = time.time()\n",
" ret,frame = dev.read()\n",
" print(\"read frame spend\", time.time() - stime)\n",
" if(ret):\n",
" # 前後景比較\n",
" \n",
" stime = time.time()\n",
" diff = (last_frame - frame).sum()\n",
" print(\"前後景平均差\",diff, \"spend\", time.time() - stime)\n",
" last_frame = frame\n",
" \n",
" # 拉普拉斯模糊計算\n",
" stime = time.time()\n",
" fuzzyValue = getImageVar(frame)\n",
" print(\"模糊度\", fuzzyValue, \"spend\", time.time() - stime)\n",
" cv2.imshow(\"frame\", frame)\n",
" if(cv2.waitKey(1000//framerate) == ord('q')):\n",
"# if(cv2.waitKey(1000//framerate) == ord('q')):\n",
" if(cv2.waitKey(1) == ord('q')):\n",
" break\n",
" else:\n",
" break\n",
......@@ -78,7 +95,13 @@
"id": "f6c5d62c",
"metadata": {},
"outputs": [],
"source": []
"source": [
"# 返回指定路径图像的拉普拉斯算子边缘模糊程度值\n",
"def getImageVar(image):\n",
" img2gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" imageVar = cv2.Laplacian(img2gray, cv2.CV_64F).var()\n",
" return imageVar"
]
},
{
"cell_type": "code",
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment