Commit eb45b60d authored by Bruce's avatar Bruce

rm pycache

parents dc3fe5c1 68633af4
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "dc84920a",
"metadata": {},
"outputs": [],
"source": [
"import cv2"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0bc9ac0c",
"metadata": {},
"outputs": [],
"source": [
"source = \"rtsp://192.168.5.218/txg/01\"\n",
"framerate = 60"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f7607446",
"metadata": {},
"outputs": [],
"source": [
"gst_format = f'rtspsrc location={source} latency=0 protocols=tcp ! ' \n",
"gst_format += 'rtph265depay ! h265parse ! nvv4l2decoder skip-frames=1 ! nvvidconv ! '\n",
"gst_format += f' video/x-raw,format=BGRx ! videorate ! video/x-raw,framerate={framerate}/1 ! videoconvert ! video/x-raw,format=BGR ! '\n",
"gst_format += 'appsink sync=false'"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9d9df82f",
"metadata": {},
"outputs": [],
"source": [
"dev = cv2.VideoCapture(gst_format, cv2.CAP_GSTREAMER)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bf294fdb",
"metadata": {},
"outputs": [],
"source": [
"while(dev.isOpened()):\n",
" ret,frame = dev.read()\n",
" if(ret):\n",
" cv2.imshow(\"frame\", frame)\n",
" if(cv2.waitKey(1000//framerate) == ord('q')):\n",
" break\n",
" else:\n",
" break\n",
" \n",
"dev.release()\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2d377c68",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "f6c5d62c",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "84ef06e2",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
# import common
# from retina_utils.TrtRetinaPlate import TrtRetinaPlate
import sys
import os
root_path=os.path.dirname(__file__)
sys.path.append(root_path)
# from retina_utils.TrtRetinaGrayPlate import TrtRetinaPlate as TrtRetinaGrayPlate
# from retina_utils.TrtRetinaGrayPlateSHM import TrtRetinaPlateSHM as TrtRetinaGrayPlateSHM
from retina_utils.TrtRetinaColorPlate import TrtRetinaPlate as TrtRetinaColorPlate
from retina_utils.TrtRetinaColorPlateSHM import TrtRetinaPlateSHM as TrtRetinaColorPlateSHM
from ocr_utils.TrtCtcOcr import TrtCtcOcr
from ocr_utils.TrtCtcOcrSHM import TrtCtcOcrSHM
import cv2
import numpy as np
import time
import threading, queue
class TrtRetinaPlateThread(threading.Thread):
"""TrtThread
This implements the child thread which continues to read images
from cam (input) and to do TRT engine inferencing. The child
thread stores the input image and detection results into global
variables and uses a condition varaiable to inform main thread.
In other words, the TrtThread acts as the producer while the
main thread is the consumer.
"""
def __init__(self, condition, vehicles_q, plates_q, conf_th=0.9, budget=10):
"""__init__
# Arguments
condition: the condition variable used to notify main
thread about new frame and detection result
cam: the camera object for reading input image frames
model: a string, specifying the TRT SSD model
conf_th: confidence threshold for detection
"""
threading.Thread.__init__(self)
self.condition = condition
self.vehicles_q = vehicles_q
self.plates_q = plates_q
self.conf_th = conf_th
self.trt_plate = None # to be created when run
self.running = False
def run(self):
"""Run until 'running' flag is set to False by main thread.
NOTE: CUDA context is created here, i.e. inside the thread
which calls CUDA kernels. In other words, creating CUDA
context in __init__() doesn't work.
"""
global s_img, s_boxes, s_confs, s_lands
print('TrtThread: loading the TRT RetinaPlate engine...')
# self.trt_plate = TrtRetinaGrayPlate((320,320))
# self.trt_plate = TrtRetinaGrayPlateSHM((320,320))
self.trt_plate = TrtRetinaColorPlateSHM((320,320))
print('TrtThread: TRT RetinaPlate start running...')
self.running = True
while self.running:
if(self.vehicles_q.empty()):
time.sleep(1)
continue
info = self.vehicles_q.get()
if(not isinstance(info, VehicleInfo)):
continue
if(info.vechile_img is None):
continue
output = self.trt_plate.detect(info.vechile_img, self.conf_th)
if(output.shape[0] > 0):
boxes = output[:,:4].astype(int)
confs = output[:,4]
lands = output[:,5:].astype(int)
loc = boxes[0]
crop_img = info.vechile_img[loc[1]:loc[3],loc[0]:loc[2]]
# 過濾物件
plateInfo = PlateInfo(info.track_id, info.img, info.vechile_img, crop_img)
self.plates_q.put(plateInfo)
# with self.condition:
# s_img, s_boxes, s_confs, s_lands = info.img, boxes, confs, lands
# self.condition.notify()
del self.trt_plate
print('TrtThread: stopped...')
def stop(self):
self.running = False
self.join()
class TrtCTCOCRThread(threading.Thread):
"""TrtThread
This implements the child thread which continues to read images
from cam (input) and to do TRT engine inferencing. The child
thread stores the input image and detection results into global
variables and uses a condition varaiable to inform main thread.
In other words, the TrtThread acts as the producer while the
main thread is the consumer.
"""
def __init__(self, condition, plates_q, results_q, budget=10):
"""__init__
# Arguments
condition: the condition variable used to notify main
thread about new frame and detection result
cam: the camera object for reading input image frames
model: a string, specifying the TRT SSD model
conf_th: confidence threshold for detection
"""
threading.Thread.__init__(self)
self.condition = condition
self.plates_q = plates_q
self.results_q = results_q
self.trt_ocr = None # to be created when run
self.running = False
self.save_path = "/media/aaeon/Data/samples"
def run(self):
"""Run until 'running' flag is set to False by main thread.
NOTE: CUDA context is created here, i.e. inside the thread
which calls CUDA kernels. In other words, creating CUDA
context in __init__() doesn't work.
"""
# global s_img, s_boxes, s_confs, s_lands
print('TrtThread: loading the TRT CTC OCR engine...')
# self.trt_ocr = TrtCtcOcr()
self.trt_ocr = TrtCtcOcrSHM()
print('TrtThread: TRT CTC OCR start running...')
self.running = True
while self.running:
if(self.plates_q.empty()):
time.sleep(1)
continue
info = self.plates_q.get()
if(not isinstance(info, PlateInfo)):
continue
if(info.plate_img is None):
continue
# stime = time.time()
try:
plate = self.trt_ocr.recognition(info.plate_img)
except Exception as e:
print(e)
plate = ""
# print("Resnet32 CTC OCR 480*16 shared memory inference time", time.time() - stime)
resultInfo = ResultInfo(info.track_id, plate, info.vechile_img, info.plate_img)
if(resultInfo.plate is not None and resultInfo.plate_img is not None):
fullscreen_save_path = os.path.join(self.save_path, "fullscreen", "{}_{}.jpg".format(resultInfo.track_id, resultInfo.plate))
vehicle_save_path = os.path.join(self.save_path, "vehicles", "{}_{}.jpg".format(resultInfo.track_id, resultInfo.plate))
plate_save_path = os.path.join(self.save_path, "plates", "{}_{}.jpg".format(resultInfo.track_id, resultInfo.plate))
cv2.imwrite(fullscreen_save_path, info.img)
cv2.imwrite(vehicle_save_path, resultInfo.vechile_img)
cv2.imwrite(plate_save_path, resultInfo.plate_img)
self.results_q.put(resultInfo)
# with self.condition:
# s_img, s_boxes, s_confs, s_lands = img, boxes, confs, lands
# self.condition.notify()
del self.trt_ocr
print('TrtThread: stopped...')
def stop(self):
self.running = False
self.join()
class VehicleInfo(object):
def __init__(self,track_id, img, vechile_img):
self.track_id = track_id
self.img = img
self.vechile_img = vechile_img
class PlateInfo(object):
def __init__(self,track_id, img, vechile_img, plate_img):
self.track_id = track_id
self.img = img
self.vechile_img = vechile_img
self.plate_img = plate_img
class ResultInfo(object):
def __init__(self,track_id, plate,vechile_img, plate_img):
self.track_id = track_id
self.plate = plate
self.vechile_img = vechile_img
self.plate_img = plate_img
class EZLPR(object):
def __init__(self):
self.vehiclesQueue = queue.Queue()
self.platesQueue = queue.Queue()
self.resultsQueue = queue.Queue()
condition = threading.Condition() # condition 暫時用不到
self.trtRetinaPlateThread = TrtRetinaPlateThread(condition, self.vehiclesQueue, self.platesQueue)
self.trtCtcOcrThread = TrtCTCOCRThread(condition, self.platesQueue, self.resultsQueue)
self.trtRetinaPlateThread.start() # start the child thread
self.trtCtcOcrThread.start()
def put(self, track_id, img, crop_img):
vehicleInfo = VehicleInfo(track_id = track_id, img = img, vechile_img = crop_img)
self.vehiclesQueue.put(vehicleInfo)
def out(self):
while not self.resultsQueue.empty():
resultInfo = self.resultsQueue.get()
yield resultInfo.track_id,resultInfo.plate, resultInfo.vechile_img, resultInfo.plate_img
def __del__(self):
self.trtRetinaPlateThread.stop()
self.trtCtcOcrThread.stop()
\ No newline at end of file
...@@ -38,10 +38,10 @@ easyLPR=EZLPR() ...@@ -38,10 +38,10 @@ easyLPR=EZLPR()
# source="/home/aaeon/Kevin_Car.MOV" # source="/home/aaeon/Kevin_Car.MOV"
#source = "../Ch17-20220314-190000.mp4" #source = "../Ch17-20220314-190000.mp4"
# source="rtsp://192.168.5.218/txg/01" source="rtsp://192.168.5.218/txg/01"
source="rtsp://140.120.65.31/live.sdp" # source="rtsp://140.120.65.31/live.sdp"
# cam=Camera(1,source,encoder = "h265",width=1920,height=1080) cam=Camera(1,source,encoder = "h265",width=1920,height=1080, use_gstr=True, framerate=60, skip_frames=1)
cam=Camera(1,source,encoder = "h264",width=1920,height=1080) # cam=Camera(1,source,encoder = "h264",width=1920,height=1080)
camera_id=1 camera_id=1
break_flag=False break_flag=False
......
...@@ -4,7 +4,32 @@ from queue import Queue ...@@ -4,7 +4,32 @@ from queue import Queue
import time, numpy as np, cv2 import time, numpy as np, cv2
class Camera(object): class Camera(object):
def __init__(self,_id,video_path,encoder=None, width=1280, height=720, framerate=30, log=None, maxsize=10 , use_gstr=True): """
jetson平台相機物件,透過threading去處理影像,並儲放於Queue之中,並支援多種格式
Args:
video_path: 來源包含影片、串流
encoder: 解碼器選擇,h264或h265
width: 輸出影像的寬
height: 輸出影像的高
framerate: 輸出影像的更新率
log: 自定義log 函數
maxsize: queue(buffer)最大存放大小
use_gstr: 是否使用gstreamer後台
skip_frames: 是否跳過部分幀,1 => 全部解碼, 2 => 只解碼 non-ref frames,3 => 只解碼 key-frames
"""
def __init__(self,_id,video_path,encoder=None, width=1280, height=720, framerate=30, log=None, maxsize=10 , use_gstr=True, skip_frames=1):
self.ID = _id self.ID = _id
self.__log = self.__log if log is None else log self.__log = self.__log if log is None else log
self.__isCaptured = False self.__isCaptured = False
...@@ -16,6 +41,7 @@ class Camera(object): ...@@ -16,6 +41,7 @@ class Camera(object):
self.framerate = framerate self.framerate = framerate
self.encoder = encoder self.encoder = encoder
self.use_gstr = use_gstr self.use_gstr = use_gstr
self.skip_frames = skip_frames
self.__thread = Thread(target=self.__job) self.__thread = Thread(target=self.__job)
self.resultQueue = Queue(maxsize=maxsize) self.resultQueue = Queue(maxsize=maxsize)
def start(self): def start(self):
...@@ -34,7 +60,7 @@ class Camera(object): ...@@ -34,7 +60,7 @@ class Camera(object):
h = self.height h = self.height
fps = self.framerate fps = self.framerate
if self.use_gstr: if self.use_gstr:
source = self.__gstreamer(self.video_path,w, h, fps) source = self.__gstreamer(self.video_path,w, h, fps, skip_frames = self.skip_frames)
while self.isOpened: while self.isOpened:
try: try:
if self.use_gstr: if self.use_gstr:
...@@ -67,7 +93,7 @@ class Camera(object): ...@@ -67,7 +93,7 @@ class Camera(object):
self.__frame = None self.__frame = None
self.__log("camera stop") self.__log("camera stop")
def __gstreamer(self, source=0,width=1280, height=720, framerate=30, flip_method=2): def __gstreamer(self, source=0,width=1280, height=720, framerate=30, flip_method=2, skip_frames = 1):
if source==0: if source==0:
# Camera Device Stream # Camera Device Stream
return ( return (
...@@ -86,15 +112,16 @@ class Camera(object): ...@@ -86,15 +112,16 @@ class Camera(object):
if self.encoder=="h265": if self.encoder=="h265":
print("h265") print("h265")
return ( return (
f'rtspsrc location={source} ! ' + f'rtspsrc location={source} latency=0 protocols=tcp ! ' +
# 'watchdog timeout=10000 !'+ # 'watchdog timeout=10000 !'+
'rtph265depay ! h265parse ! nvv4l2decoder ! nvvidconv ! '+ f'rtph265depay ! h265parse ! nvv4l2decoder skip-frames={skip_frames} ! nvvidconv ! '+
'video/x-raw,format=BGRx ! videoconvert ! video/x-raw,format=BGR ! appsink' f' video/x-raw,format=BGRx ! videorate ! video/x-raw,framerate={framerate}/1 ! videoconvert ! video/x-raw,format=BGR ! '+
'appsink sync=false'
) )
elif self.encoder=="h264": elif self.encoder=="h264":
print("h264") print("h264")
gst_str = ('rtspsrc location={} latency={} ! ' gst_str = ('rtspsrc location={} latency={} ! '
'rtph264depay ! h264parse ! omxh264dec ! ' f'rtph264depay ! h264parse ! nvv4l2decoder skip-frames={skip_frames}! '
'nvvidconv ! ' 'nvvidconv ! '
'video/x-raw, width=(int){}, height=(int){}, ' 'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! ' 'format=(string)BGRx ! '
......
import cv2
source = "rtsp://192.168.5.218/txg/01"
framerate = 30
skipframes = 0
gst_format = f'rtspsrc location={source} latency=0 protocols=tcp ! '
gst_format += f'rtph265depay ! h265parse ! nvv4l2decoder skip-frames={skipframes} ! nvvidconv ! '
gst_format += f' video/x-raw,format=BGRx ! videorate ! video/x-raw,framerate={framerate}/1 ! videoconvert ! video/x-raw,format=BGR ! '
gst_format += 'appsink sync=false'
dev = cv2.VideoCapture(gst_format, cv2.CAP_GSTREAMER)
if(dev.isOpened()):
print("strat stream")
else:
print("Open this source faild")
dev.release()
\ No newline at end of file
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "dc84920a",
"metadata": {},
"outputs": [],
"source": [
"import cv2"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0bc9ac0c",
"metadata": {},
"outputs": [],
"source": [
"source = \"rtsp://192.168.5.218/txg/01\"\n",
"framerate = 60"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f7607446",
"metadata": {},
"outputs": [],
"source": [
"gst_format = f'rtspsrc location={source} latency=0 protocols=tcp ! ' \n",
"gst_format += 'rtph265depay ! h265parse ! nvv4l2decoder skip-frames=2 ! nvvidconv ! '\n",
"gst_format += f' video/x-raw,format=BGRx ! videorate ! video/x-raw,framerate={framerate}/1 ! videoconvert ! video/x-raw,format=BGR ! '\n",
"gst_format += 'appsink sync=false'"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9d9df82f",
"metadata": {},
"outputs": [],
"source": [
"dev = cv2.VideoCapture(gst_format, cv2.CAP_GSTREAMER)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bf294fdb",
"metadata": {},
"outputs": [],
"source": [
"while(dev.isOpened()):\n",
" ret,frame = dev.read()\n",
" if(ret):\n",
" cv2.imshow(\"frame\", frame)\n",
" if(cv2.waitKey(1000//framerate) == ord('q')):\n",
" break\n",
" else:\n",
" break\n",
" \n",
"dev.release()\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2d377c68",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "f6c5d62c",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "84ef06e2",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment