'迁移'
|
|
@ -0,0 +1,10 @@
|
|||
node_modules
|
||||
.DS_Store
|
||||
dist
|
||||
dist.zip
|
||||
.cache
|
||||
.turbo
|
||||
.idea
|
||||
.vscode
|
||||
__pycache__
|
||||
models
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
# minio_client.py
|
||||
from minio import Minio
|
||||
import os
|
||||
|
||||
class MinioUploader:
|
||||
def __init__(self, config):
|
||||
self.bucket = config['BucketName']
|
||||
self.client = Minio(
|
||||
config['Endpoint'],
|
||||
access_key=config['AccessKey'],
|
||||
secret_key=config['SecretKey'],
|
||||
secure=config['UseSSL']
|
||||
)
|
||||
|
||||
# 自动创建 bucket(如不存在)
|
||||
if not self.client.bucket_exists(self.bucket):
|
||||
self.client.make_bucket(self.bucket)
|
||||
|
||||
def upload_file(self, file_path, object_name=None):
|
||||
if not object_name:
|
||||
object_name = os.path.basename(file_path)
|
||||
|
||||
self.client.fput_object(
|
||||
bucket_name=self.bucket,
|
||||
object_name=object_name,
|
||||
file_path=file_path,
|
||||
)
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
import paho.mqtt.client as mqtt
|
||||
|
||||
# MQTT Broker 配置
|
||||
BROKER = '175.27.168.120'
|
||||
PORT = 6011
|
||||
TOPIC = "thing/product/1581F8HGX254V00A0BUY/osd"
|
||||
|
||||
|
||||
# 消息接收回调
|
||||
def on_message(client, userdata, msg):
|
||||
print(f"Received message: '{msg.payload.decode()}' from topic '{msg.topic}'")
|
||||
|
||||
|
||||
# 连接成功回调
|
||||
def on_connect(client, userdata, flags, rc):
|
||||
if rc == 0:
|
||||
print("Connected!")
|
||||
client.subscribe(TOPIC, qos=1) # 订阅主题
|
||||
else:
|
||||
print(f"Connection failed with error code {rc}")
|
||||
|
||||
|
||||
# 创建客户端
|
||||
client = mqtt.Client()
|
||||
client.username_pw_set("username", "my_password")
|
||||
client.on_connect = on_connect
|
||||
client.on_message = on_message
|
||||
|
||||
# 连接 Broker
|
||||
client.connect(BROKER, PORT, 60)
|
||||
|
||||
# 持续监听消息
|
||||
client.loop_forever() # 阻塞式循环
|
||||
|
|
@ -0,0 +1,71 @@
|
|||
import torch
|
||||
|
||||
|
||||
def get_default_config():
|
||||
return {
|
||||
'rtmp': {
|
||||
# 'url': "rtmp://123.132.248.154:6009/live/14",
|
||||
'url': "rtmp://localhost:1935/live/14",
|
||||
'max_reconnect_attempts': 20, # 增加重连次数
|
||||
'reconnect_delay': 1, # 减少初始延迟
|
||||
'buffer_size': 1,
|
||||
'timeout_ms': 5000,
|
||||
'gpu_decode': True # 启用硬件解码
|
||||
},
|
||||
'push': {
|
||||
'enable_push': True,
|
||||
# 'url': 'rtmp://123.132.248.154:6009/live/11',
|
||||
'url': 'rtmp://localhost:1935/live/13',
|
||||
'format': 'flv',
|
||||
'video_codec': 'h264_nvenc' if torch.cuda.is_available() else 'libx264', # 使用硬件编码
|
||||
'pixel_format': 'bgr24',
|
||||
'preset': 'p1' if torch.cuda.is_available() else 'ultrafast', # NVIDIA专用预设
|
||||
'framerate': 30,
|
||||
'gpu_acceleration': True, # 启用硬件加速
|
||||
'tune': 'll', # 低延迟模式
|
||||
'zerolatency': 1, # 零延迟
|
||||
'delay': 0, # 无延迟
|
||||
'rc': 'cbr_ld_hq', # 恒定码率低延迟高质量
|
||||
'bufsize': '500k' # 减少缓冲区大小
|
||||
},
|
||||
'model': {
|
||||
'path': 'yolo11x.pt', # 保留大模型
|
||||
'download_url': 'https://github.com/ultralytics/assets/releases/download/v8.3.0/yolov8x.pt'
|
||||
},
|
||||
'predict': {
|
||||
'conf_thres': 0.25, # 提高置信度阈值
|
||||
'iou_thres': 0.45,
|
||||
'imgsz': 1280, # 初始推理尺寸
|
||||
'line_width': 1,
|
||||
'font_size': 18,
|
||||
'font':"E:\\tsgz\\fonts\\PingFangSC-Medium.ttf",
|
||||
'device': 'cuda:0' if torch.cuda.is_available() else 'cpu',
|
||||
'frame_skip': 1, # 初始跳帧值
|
||||
'half': True # 启用半精度推理
|
||||
},
|
||||
'task': {
|
||||
'taskname': '', # 文件夹名称
|
||||
'taskid': '',
|
||||
'tag': {},
|
||||
'aiid': '',
|
||||
'res_api': 'http://123.132.248.154:6033/api/DaHuaAi/AddImg',
|
||||
'api': 'http://123.132.248.154:6033/'
|
||||
},
|
||||
'mqtt': {
|
||||
'enable': True, # 是否启用MQTT
|
||||
'broker': '175.27.168.120', # MQTT代理地址
|
||||
'port': 6011, # MQTT端口
|
||||
'topic': 'thing/product/1581F8HGX254V00A0BUY/osd', # 订阅的主题
|
||||
'client_id': 'yolo_detection_client', # 客户端ID------自己生成个
|
||||
'username': 'sdhc', # 用户名(可选)
|
||||
'password': None, # 密码(可选)
|
||||
'keepalive': 60 # 保活时间
|
||||
},
|
||||
'minio': {
|
||||
"UseSSL": False,
|
||||
"Endpoint": "175.27.168.120:6013",
|
||||
"AccessKey": "minioadmin",
|
||||
"SecretKey": "minioadmin",
|
||||
"BucketName": "test"
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,635 @@
|
|||
import datetime
|
||||
import gc
|
||||
import json
|
||||
import os
|
||||
import queue
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
|
||||
import cv2
|
||||
import paho.mqtt.client as mqtt
|
||||
import requests
|
||||
import torch
|
||||
from ultralytics import YOLO
|
||||
|
||||
from _minio import MinioUploader
|
||||
from ffmpegStreamer import FFmpegStreamer
|
||||
from log import logger
|
||||
import global_data as gd
|
||||
from mapping_cn import class_mapping_cn as cmc
|
||||
|
||||
class DetectionThread(threading.Thread):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
self.running = True
|
||||
self.model = None
|
||||
self.cap = None
|
||||
self.streamer = None
|
||||
self.frame_count = 0
|
||||
self.frame_skip_counter = 0
|
||||
self.reconnect_attempts = 0
|
||||
self.last_frame_time = time.time()
|
||||
self.fps = 0
|
||||
self.detections_count = 0
|
||||
self.stop_event = threading.Event()
|
||||
self.rtmp_url = config['rtmp']['url']
|
||||
self.max_reconnect_attempts = config['rtmp']['max_reconnect_attempts']
|
||||
self.reconnect_delay = config['rtmp']['reconnect_delay']
|
||||
self.buffer_size = config['rtmp']['buffer_size']
|
||||
self.timeout_ms = config['rtmp']['timeout_ms']
|
||||
self.taskname = config['task']['taskname']
|
||||
self.taskid = config['task']['taskid']
|
||||
self.tag = config['task']['tag']
|
||||
self.aiid = config['task']['aiid']
|
||||
self.last_log_time = time.time()
|
||||
self.daemon = True
|
||||
self.frame_skip = 0
|
||||
self.target_latency = 0.05
|
||||
self.max_processing_time = 0.033
|
||||
self.last_processing_time = 0
|
||||
self.prev_frame = None
|
||||
self.prev_results = None
|
||||
self.processing_times = []
|
||||
self.avg_process_time = 0.033
|
||||
self.last_status_update = 0
|
||||
self.last_fps = 0
|
||||
self.original_width = 0
|
||||
self.original_height = 0
|
||||
self.imgsz = config['predict']['imgsz']
|
||||
self.minio_uploader = MinioUploader(config['minio'])
|
||||
|
||||
# 上传队列和线程
|
||||
self.upload_queue = queue.Queue(maxsize=50)
|
||||
self.upload_thread = None
|
||||
self.upload_active = False
|
||||
self.upload_interval = 2
|
||||
self.last_upload_time = 0
|
||||
self.res_api = config['task']['res_api']
|
||||
|
||||
# MQTT配置
|
||||
self.mqtt_config = config.get('mqtt', {})
|
||||
self.mqtt_enabled = self.mqtt_config.get('enable', False)
|
||||
self.mqtt_topic = self.mqtt_config.get('topic', 'drone/data')
|
||||
self.mqtt_client = None
|
||||
self.mqtt_connected = False
|
||||
self.latest_drone_data = None
|
||||
self.mqtt_data_lock = threading.Lock()
|
||||
|
||||
def on_mqtt_connect(self, client, userdata, flags, rc):
|
||||
if rc == 0:
|
||||
client.subscribe(self.mqtt_topic)
|
||||
self.mqtt_connected = True
|
||||
# logger.info("MQTT连接成功")
|
||||
# logger.info(f"已订阅主题: {self.mqtt_topic}")
|
||||
else:
|
||||
logger.error(f"MQTT连接失败,错误码: {rc}")
|
||||
self.mqtt_connected = False
|
||||
|
||||
def on_mqtt_message(self, client, userdata, msg):
|
||||
try:
|
||||
drone_data = json.loads(msg.payload.decode())
|
||||
with self.mqtt_data_lock:
|
||||
self.latest_drone_data = drone_data
|
||||
logger.debug(f"收到MQTT消息: {drone_data}")
|
||||
except Exception as e:
|
||||
logger.error(f"解析MQTT消息失败: {str(e)}")
|
||||
|
||||
def start_mqtt_client(self):
|
||||
if not self.mqtt_enabled:
|
||||
logger.info("MQTT功能未启用")
|
||||
return False
|
||||
try:
|
||||
logger.info("启动MQTT客户端...")
|
||||
self.mqtt_client = mqtt.Client(client_id=self.mqtt_config.get('client_id', 'yolo_detection'))
|
||||
self.mqtt_client.on_connect = self.on_mqtt_connect
|
||||
self.mqtt_client.on_message = self.on_mqtt_message
|
||||
|
||||
if 'username' in self.mqtt_config and 'password' in self.mqtt_config:
|
||||
self.mqtt_client.username_pw_set(
|
||||
self.mqtt_config['username'],
|
||||
self.mqtt_config['password']
|
||||
)
|
||||
|
||||
self.mqtt_client.connect(
|
||||
self.mqtt_config['broker'],
|
||||
self.mqtt_config.get('port', 1883),
|
||||
self.mqtt_config.get('keepalive', 60)
|
||||
)
|
||||
|
||||
self.mqtt_client.loop_start()
|
||||
logger.info("MQTT客户端已启动")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"启动MQTT客户端失败: {str(e)}")
|
||||
return False
|
||||
|
||||
def stop_mqtt_client(self):
|
||||
if self.mqtt_client:
|
||||
try:
|
||||
self.mqtt_client.loop_stop()
|
||||
self.mqtt_client.disconnect()
|
||||
logger.info("MQTT客户端已停止")
|
||||
except Exception as e:
|
||||
logger.error(f"停止MQTT客户端失败: {str(e)}")
|
||||
finally:
|
||||
self.mqtt_client = None
|
||||
self.mqtt_connected = False
|
||||
|
||||
def run(self):
|
||||
global detection_active
|
||||
try:
|
||||
logger.info("启动优化检测线程")
|
||||
self.initialize_resources()
|
||||
logger.info("资源初始化完成")
|
||||
|
||||
if self.mqtt_enabled:
|
||||
self.start_mqtt_client()
|
||||
|
||||
self.upload_active = True
|
||||
self.upload_thread = threading.Thread(target=self._upload_worker, daemon=True)
|
||||
self.upload_thread.start()
|
||||
logger.info("图片上传线程已启动")
|
||||
|
||||
# 预热模型
|
||||
logger.info("预热模型...")
|
||||
dummy_input = torch.zeros(1, 3, self.imgsz, self.imgsz)
|
||||
dummy_input = dummy_input.to(self.config['predict']['device'])
|
||||
if self.config['predict'].get('half', False) and 'cuda' in self.config['predict']['device']:
|
||||
dummy_input = dummy_input.half()
|
||||
|
||||
for _ in range(5):
|
||||
self.model.predict(dummy_input)
|
||||
logger.info("模型预热完成")
|
||||
|
||||
while self.running and not self.stop_event.is_set():
|
||||
start_time = time.perf_counter()
|
||||
|
||||
# 优化帧读取
|
||||
ret, frame = self.cap.read()
|
||||
logger.debug(f'读取帧结果: {ret}')
|
||||
# 处理重连
|
||||
if not ret:
|
||||
self.handle_reconnect()
|
||||
continue
|
||||
|
||||
# 确保帧分辨率一致
|
||||
if frame.shape[1] != self.original_width or frame.shape[0] != self.original_height:
|
||||
logger.warning(
|
||||
f"帧分辨率不匹配: 预期 {self.original_width}x{self.original_height}, 实际 {frame.shape[1]}x{frame.shape[0]}")
|
||||
frame = cv2.resize(frame, (self.original_width, self.original_height))
|
||||
|
||||
# 计算FPS
|
||||
current_time = time.time()
|
||||
time_diff = current_time - self.last_frame_time
|
||||
if time_diff > 0:
|
||||
self.fps = 0.9 * self.fps + 0.1 / time_diff
|
||||
if current_time - self.last_status_update > 0.5:
|
||||
self.last_fps = self.fps
|
||||
self.last_status_update = current_time
|
||||
self.last_frame_time = current_time
|
||||
|
||||
# 最低延迟处理逻辑
|
||||
processing_time = time.perf_counter() - start_time
|
||||
if processing_time > self.target_latency:
|
||||
self.frame_skip_counter += 1
|
||||
self.frame_count += 1
|
||||
continue
|
||||
|
||||
# 性能优化:跳过帧处理
|
||||
if self.fps < 15 and self.frame_skip < 2:
|
||||
self.frame_skip += 1
|
||||
self.frame_count += 1
|
||||
continue
|
||||
self.frame_skip = 0
|
||||
|
||||
# 使用帧差分算法减少处理量
|
||||
if self.prev_frame is not None:
|
||||
# 确保前一帧与当前帧分辨率一致
|
||||
if self.prev_frame.shape != frame.shape:
|
||||
logger.warning(f"前一帧分辨率不匹配: 预期 {frame.shape}, 实际 {self.prev_frame.shape}")
|
||||
self.prev_frame = cv2.resize(self.prev_frame, (frame.shape[1], frame.shape[0]))
|
||||
# 使用优化的推理
|
||||
results = self.model(
|
||||
frame,
|
||||
stream=False,
|
||||
verbose=False,
|
||||
conf=self.config['predict']['conf_thres'],
|
||||
iou=self.config['predict']['iou_thres'],
|
||||
imgsz=self.imgsz,
|
||||
device=self.config['predict']['device'],
|
||||
half=self.config['predict'].get('half', False)
|
||||
)
|
||||
self.prev_results = results
|
||||
else:
|
||||
results = self.model(
|
||||
frame,
|
||||
stream=False,
|
||||
verbose=False,
|
||||
conf=self.config['predict']['conf_thres'],
|
||||
iou=self.config['predict']['iou_thres'],
|
||||
imgsz=self.imgsz,
|
||||
device=self.config['predict']['device'],
|
||||
half=self.config['predict'].get('half', False)
|
||||
)
|
||||
self.prev_frame = frame.copy() # 使用副本避免引用问题
|
||||
|
||||
# 处理结果
|
||||
annotated_frame, detection_data = self.process_results(frame, results)
|
||||
self.detections_count = len(detection_data)
|
||||
|
||||
# 仅在检测到目标且满足时间间隔时添加上传任务
|
||||
if len(detection_data) > 0 and (current_time - self.last_upload_time >= self.upload_interval):
|
||||
try:
|
||||
timestamp = int(current_time * 1000)
|
||||
filename = f"DJI_{timestamp}.jpg"
|
||||
self.upload_queue.put({
|
||||
"image": annotated_frame.copy(),
|
||||
"filename": filename,
|
||||
"detection_data": detection_data,
|
||||
"timestamp": current_time
|
||||
})
|
||||
self.last_upload_time = current_time
|
||||
except Exception as e:
|
||||
logger.error(f"添加上传任务失败: {e}")
|
||||
|
||||
# 推流处理
|
||||
if self.config['push']['enable_push'] and self.streamer:
|
||||
try:
|
||||
self.streamer.add_frame(annotated_frame)
|
||||
except queue.Full:
|
||||
logger.warning("推流队列已满,丢弃帧")
|
||||
# WebSocket发送优化
|
||||
if current_time - self.last_log_time >= 1:
|
||||
self.send_to_websocket(detection_data)
|
||||
self.last_log_time = current_time
|
||||
|
||||
self.frame_count += 1
|
||||
self.reconnect_attempts = 0
|
||||
|
||||
# 性能监控
|
||||
elapsed = time.perf_counter() - start_time
|
||||
self.processing_times.append(elapsed)
|
||||
self.avg_process_time = self.avg_process_time * 0.9 + elapsed * 0.1
|
||||
|
||||
# 每50帧输出一次性能报告
|
||||
if self.frame_count % 50 == 0:
|
||||
avg_time = sum(self.processing_times) / len(self.processing_times)
|
||||
logger.info(f"帧处理耗时: {avg_time * 1000:.2f}ms | 平均FPS: {1.0 / avg_time:.1f}")
|
||||
self.processing_times = []
|
||||
|
||||
# 动态调整跳帧参数
|
||||
if avg_time > 0.15:
|
||||
self.config['predict']['frame_skip'] = min(4, self.config['predict']['frame_skip'] + 1)
|
||||
logger.info(f"增加跳帧至 {self.config['predict']['frame_skip']}")
|
||||
elif avg_time < 0.05 and self.config['predict']['frame_skip'] > 0:
|
||||
self.config['predict']['frame_skip'] = max(0, self.config['predict']['frame_skip'] - 1)
|
||||
logger.info(f"减少跳帧至 {self.config['predict']['frame_skip']}")
|
||||
|
||||
logger.info("检测线程主循环结束")
|
||||
except Exception as e:
|
||||
logger.error(f"检测线程异常: {str(e)}")
|
||||
logger.error(traceback.format_exc())
|
||||
finally:
|
||||
self.cleanup()
|
||||
logger.info("检测线程已安全停止")
|
||||
gd.set_value('detection_active', False)
|
||||
|
||||
def _upload_worker(self):
|
||||
"""独立的图片上传工作线程"""
|
||||
logger.info("上传工作线程启动")
|
||||
output_dir = "output_frames"
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
while self.upload_active or not self.upload_queue.empty():
|
||||
try:
|
||||
task = self.upload_queue.get(timeout=1.0)
|
||||
if task is None:
|
||||
break
|
||||
|
||||
start_time = time.time()
|
||||
image = task["image"]
|
||||
filename = task["filename"]
|
||||
detection_data = task["detection_data"]
|
||||
filepath = os.path.join(output_dir, filename)
|
||||
|
||||
cv2.imwrite(filepath, image)
|
||||
|
||||
try:
|
||||
foldername = self.config['task']['taskname']
|
||||
object_path = f"{foldername}/{filename}"
|
||||
self.minio_uploader.upload_file(filepath, object_path)
|
||||
|
||||
payload = {
|
||||
"taskid": self.taskid,
|
||||
"path": object_path,
|
||||
"tag": detection_data,
|
||||
"aiid": self.aiid,
|
||||
}
|
||||
|
||||
# 添加MQTT数据
|
||||
if self.mqtt_enabled and self.mqtt_connected:
|
||||
with self.mqtt_data_lock:
|
||||
if self.latest_drone_data:
|
||||
payload["drone_info"] = self.latest_drone_data
|
||||
logger.info("添加无人机数据到API调用")
|
||||
|
||||
headers = {"Content-Type": "application/json"}
|
||||
response = requests.post(self.res_api, json=payload, headers=headers)
|
||||
|
||||
if response.status_code == 200:
|
||||
logger.info(f"已上传帧至 MinIO: {object_path} | 耗时: {time.time() - start_time:.2f}s")
|
||||
else:
|
||||
logger.warning(f"API调用失败: {response.status_code} - {response.text}")
|
||||
except Exception as e:
|
||||
logger.error(f"上传/API调用失败: {e}")
|
||||
finally:
|
||||
try:
|
||||
os.remove(filepath)
|
||||
except:
|
||||
pass
|
||||
except queue.Empty:
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.error(f"上传任务处理异常: {e}")
|
||||
|
||||
logger.info("上传工作线程已停止")
|
||||
|
||||
def process_results(self, frame, results):
|
||||
result = results[0]
|
||||
annotated_frame = []
|
||||
# for result in results:
|
||||
annotated_frame = result.plot(
|
||||
img=frame,
|
||||
line_width=self.config['predict']['line_width'],
|
||||
# font_size=self.config['predict']['font_size'],
|
||||
font= self.config['predict']['font'],
|
||||
conf=True,
|
||||
labels=True,
|
||||
probs=True
|
||||
)
|
||||
if annotated_frame.shape != frame.shape:
|
||||
logger.warning(f"渲染帧分辨率不匹配: 输入 {frame.shape}, 输出 {annotated_frame.shape}")
|
||||
annotated_frame = cv2.resize(annotated_frame, (frame.shape[1], frame.shape[0]))
|
||||
# break
|
||||
|
||||
detection_data = []
|
||||
# for result in results:
|
||||
# print("result",result)
|
||||
boxes = result.boxes
|
||||
for box in boxes:
|
||||
class_id = int(box.cls)
|
||||
confidence = float(box.conf)
|
||||
class_name_obj = self.tag.get(str(class_id), None)
|
||||
if class_name_obj is None:
|
||||
class_name_obj = cmc.get(str(class_id), None)
|
||||
|
||||
if float(box.conf) < class_name_obj['reliability']:
|
||||
continue
|
||||
|
||||
detection_data.append({
|
||||
'class_id': class_id,
|
||||
'class_name': class_name_obj['name'],
|
||||
'confidence': confidence,
|
||||
'box': box.xyxy[0].tolist(),
|
||||
'reliability': class_name_obj['reliability']
|
||||
})
|
||||
return annotated_frame, detection_data
|
||||
|
||||
def initialize_resources(self):
|
||||
"""初始化所有资源"""
|
||||
# 加载模型
|
||||
model_path = self.config['model']['path']
|
||||
download_url = self.config['task']['api'] + model_path
|
||||
_model_path = r"models/" + model_path
|
||||
|
||||
if not Path(os.path.join('models', model_path)).exists():
|
||||
logger.info(r"模型不存在,开始下载模型")
|
||||
try:
|
||||
model_dir = model_path.split("\\")[0]
|
||||
if ".pt" in model_dir:
|
||||
model_dir = ""
|
||||
model_save_path = r"models/" + model_dir
|
||||
|
||||
model_dir = os.path.dirname(model_save_path)
|
||||
if model_dir != '' and not os.path.exists(model_save_path):
|
||||
os.makedirs(model_save_path)
|
||||
|
||||
response = requests.get(download_url, stream=True)
|
||||
response.raise_for_status()
|
||||
|
||||
total_size = int(response.headers.get('content-length', 0))
|
||||
block_size = 1024 * 1024
|
||||
downloaded = 0
|
||||
progress = 0
|
||||
|
||||
with open(os.path.join('models', model_path), 'wb') as f:
|
||||
for data in response.iter_content(block_size):
|
||||
downloaded += len(data)
|
||||
f.write(data)
|
||||
|
||||
new_progress = int(downloaded * 100 / total_size) if total_size > 0 else 0
|
||||
if new_progress != progress and (new_progress % 10 == 0 or downloaded == total_size):
|
||||
logger.info(f"下载进度: {new_progress}% ({downloaded}/{total_size} 字节)")
|
||||
progress = new_progress
|
||||
|
||||
logger.info(f"模型下载成功: {model_save_path} ({downloaded} 字节)")
|
||||
except Exception as e:
|
||||
logger.error(f"下载模型失败: {str(e)}")
|
||||
if not Path(model_path).exists():
|
||||
logger.error("没有可用的模型文件,使用默认模型")
|
||||
_model_path = r"models/yolov8n.pt"
|
||||
|
||||
logger.info(f"加载模型...{_model_path}")
|
||||
self.model = YOLO(_model_path).to(self.config['predict']['device'])
|
||||
|
||||
if self.config['predict'].get('half', False) and 'cuda' in self.config['predict']['device']:
|
||||
self.model = self.model.half()
|
||||
logger.info("启用半精度推理")
|
||||
|
||||
logger.info("模型预热...")
|
||||
dummy_input = torch.zeros(1, 3, self.imgsz, self.imgsz).to(self.config['predict']['device'])
|
||||
if self.config['predict'].get('half', False) and 'cuda' in self.config['predict']['device']:
|
||||
dummy_input = dummy_input.half()
|
||||
|
||||
for _ in range(5):
|
||||
self.model.predict(dummy_input)
|
||||
logger.info(f"模型加载成功 | 设备: {self.config['predict']['device'].upper()}")
|
||||
|
||||
# 初始化RTMP连接
|
||||
self.original_width, self.original_height, fps = self.initialize_rtmp()
|
||||
self.fps = fps # 保存fps用于后续创建推流线程
|
||||
|
||||
# 初始化推流线程
|
||||
self.start_streamer()
|
||||
|
||||
def initialize_rtmp(self):
|
||||
"""初始化RTMP连接 - 使用更可靠的方法"""
|
||||
logger.info(f"连接RTMP: {self.rtmp_url}")
|
||||
self.cap = cv2.VideoCapture()
|
||||
|
||||
# 设置优化参数
|
||||
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, self.buffer_size)
|
||||
self.cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'H264'))
|
||||
self.cap.set(cv2.CAP_PROP_FPS, 30)
|
||||
|
||||
# 启用硬件加速解码
|
||||
if self.config['rtmp'].get('gpu_decode', False):
|
||||
try:
|
||||
self.cap.set(cv2.CAP_PROP_HW_ACCELERATION, cv2.VIDEO_ACCELERATION_ANY)
|
||||
logger.info("启用硬件加速解码")
|
||||
except:
|
||||
logger.warning("硬件解码不可用,使用软件解码")
|
||||
|
||||
# 尝试连接
|
||||
if not self.cap.open(self.rtmp_url, cv2.CAP_FFMPEG):
|
||||
logger.error(f"连接RTMP失败: {self.rtmp_url}")
|
||||
raise IOError(f"无法连接RTMP流 ({self.rtmp_url})")
|
||||
|
||||
# 获取视频属性
|
||||
frame_width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||
frame_height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||
fps = self.cap.get(cv2.CAP_PROP_FPS) or 30
|
||||
|
||||
logger.info(f"视频属性: {frame_width}x{frame_height} @ {fps}fps")
|
||||
return frame_width, frame_height, fps
|
||||
|
||||
def handle_reconnect(self):
|
||||
"""优化重连逻辑 - 更高效的恢复策略"""
|
||||
self.reconnect_attempts += 1
|
||||
if self.reconnect_attempts >= self.max_reconnect_attempts:
|
||||
logger.error("达到最大重连次数")
|
||||
self.running = False
|
||||
return
|
||||
|
||||
delay = min(10, self.reconnect_attempts * self.reconnect_delay)
|
||||
logger.warning(f"流中断,{delay}秒后重连 ({self.reconnect_attempts}/{self.max_reconnect_attempts})")
|
||||
|
||||
# 清理推流数据并停止推流线程
|
||||
self.stop_streamer()
|
||||
|
||||
# 释放视频资源
|
||||
if self.cap:
|
||||
self.cap.release()
|
||||
self.cap = None
|
||||
time.sleep(delay)
|
||||
|
||||
# 重新连接
|
||||
try:
|
||||
width, height, fps = self.initialize_rtmp()
|
||||
self.original_width = width
|
||||
self.original_height = height
|
||||
self.prev_frame = None
|
||||
self.prev_results = None
|
||||
|
||||
# 重新创建推流线程
|
||||
self.start_streamer()
|
||||
|
||||
logger.info("重连成功")
|
||||
except Exception as e:
|
||||
logger.error(f"重连异常: {str(e)}")
|
||||
|
||||
def send_to_websocket(self, detection_data):
|
||||
"""高效发送WebSocket数据"""
|
||||
try:
|
||||
now = datetime.datetime.now()
|
||||
time_str = now.strftime("%H:%M:%S")
|
||||
simplified_data = [{
|
||||
'class_id': d['class_id'],
|
||||
'class_name': d['class_name'],
|
||||
'confidence': round(d['confidence'], 2),
|
||||
'box': [round(float(c), 1) for c in d['box']],
|
||||
} for d in detection_data]
|
||||
|
||||
self.config['socketIO'].emit('detection_results', {
|
||||
'detections': simplified_data,
|
||||
'timestamp': time.time_ns() // 1000000,
|
||||
'fps': round(self.last_fps, 1),
|
||||
'frame_count': self.frame_count,
|
||||
'taskid': self.taskid,
|
||||
'time_str': time_str
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"WebSocket发送错误: {str(e)}")
|
||||
|
||||
def start_streamer(self):
|
||||
"""启动新的推流线程"""
|
||||
if self.config['push']['enable_push']:
|
||||
try:
|
||||
self.streamer = FFmpegStreamer(self.config, self.fps, self.original_width, self.original_height)
|
||||
self.streamer.start()
|
||||
logger.info("推流线程已启动")
|
||||
except Exception as e:
|
||||
logger.error(f"启动推流线程失败: {str(e)}")
|
||||
|
||||
def stop_streamer(self):
|
||||
"""停止推流线程并清理资源"""
|
||||
if self.streamer:
|
||||
try:
|
||||
self.streamer.stop()
|
||||
self.streamer = None
|
||||
logger.info("推流线程已停止")
|
||||
except Exception as e:
|
||||
logger.error(f"停止推流线程失败: {str(e)}")
|
||||
|
||||
def stop(self):
|
||||
"""停止检测线程"""
|
||||
self.stop_event.set()
|
||||
self.running = False
|
||||
|
||||
def cleanup(self):
|
||||
"""资源清理 - 保证资源释放"""
|
||||
logger.info("清理资源...")
|
||||
# 停止MQTT客户端
|
||||
if self.mqtt_enabled:
|
||||
self.stop_mqtt_client()
|
||||
|
||||
# 停止上传线程
|
||||
self.upload_active = False
|
||||
if self.upload_thread and self.upload_thread.is_alive():
|
||||
try:
|
||||
self.upload_queue.put(None, timeout=0.5)
|
||||
except:
|
||||
pass
|
||||
self.upload_thread.join(2.0)
|
||||
if self.upload_thread.is_alive():
|
||||
logger.warning("上传线程未能正常停止")
|
||||
|
||||
logger.info("上传资源清理完成")
|
||||
|
||||
# 释放流
|
||||
if self.cap:
|
||||
try:
|
||||
self.cap.release()
|
||||
except:
|
||||
pass
|
||||
self.cap = None
|
||||
logger.info("视频流已释放")
|
||||
|
||||
# 停止推流
|
||||
if self.streamer:
|
||||
self.streamer.stop()
|
||||
self.streamer = None
|
||||
logger.info("推流已停止")
|
||||
|
||||
# 释放模型
|
||||
if self.model:
|
||||
try:
|
||||
if hasattr(self.model, 'predictor'):
|
||||
del self.model.predictor
|
||||
if hasattr(self.model, 'model'):
|
||||
del self.model.model
|
||||
del self.model
|
||||
except:
|
||||
pass
|
||||
self.model = None
|
||||
logger.info("模型已释放")
|
||||
|
||||
# 清空缓存
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.empty_cache()
|
||||
gc.collect()
|
||||
|
||||
logger.info("资源清理完成")
|
||||
|
|
@ -0,0 +1,569 @@
|
|||
import errno
|
||||
import threading
|
||||
import queue
|
||||
import time
|
||||
import os
|
||||
import cv2
|
||||
import numpy as np
|
||||
import subprocess
|
||||
import torch
|
||||
from log import logger
|
||||
|
||||
class FFmpegStreamer(threading.Thread):
|
||||
"""独立推流线程 - 避免阻塞检测线程"""
|
||||
|
||||
def __init__(self, config, fps, frame_width, frame_height):
|
||||
super().__init__()
|
||||
self.config = config['push']
|
||||
self.fps = fps
|
||||
self.original_width = frame_width
|
||||
self.original_height = frame_height
|
||||
|
||||
# 使用原始分辨率
|
||||
self.scaled_width = frame_width
|
||||
self.scaled_height = frame_height
|
||||
|
||||
self.queue = queue.Queue(maxsize=10) # 合理大小的队列
|
||||
self.running = True
|
||||
self.process = None
|
||||
self.process_lock = threading.Lock()
|
||||
self.process_starting = threading.Event()
|
||||
self.daemon = True
|
||||
self.last_drop_warning = 0
|
||||
self.frame_buffer = None
|
||||
|
||||
# 统计信息
|
||||
self.total_frames_sent = 0
|
||||
self.total_frames_dropped = 0
|
||||
self.frame_counter = 0
|
||||
self.start_time = time.time()
|
||||
self.avg_process_time = 0.033 # 初始值设为30fps的帧间隔(1/30≈0.033)
|
||||
|
||||
# 重连参数
|
||||
self.max_restarts = 5
|
||||
self.restart_count = 0
|
||||
self.min_restart_delay = 0.5
|
||||
self.max_restart_delay = 5.0
|
||||
self.last_restart_time = 0
|
||||
|
||||
# 新增重启控制变量
|
||||
self.restart_lock = threading.Lock()
|
||||
self.restart_scheduled = False
|
||||
self.last_restart_attempt = 0
|
||||
self.pipe_broken = False # 管道破裂标志
|
||||
|
||||
# 预计算帧处理参数
|
||||
self.buffer_size = self.scaled_width * self.scaled_height * 3
|
||||
logger.info(
|
||||
f"推流线程初始化 | 分辨率: {self.scaled_width}x{self.scaled_height} | 缓冲区: {self.buffer_size // 1024}KB")
|
||||
|
||||
def run(self):
|
||||
"""推流线程主循环"""
|
||||
logger.info(f"启动推流线程 | 分辨率: {self.scaled_width}x{self.scaled_height} | FPS: {self.fps}")
|
||||
|
||||
# 启动时确保FFmpeg已启动
|
||||
self.start_ffmpeg()
|
||||
|
||||
while self.running:
|
||||
frame_start = time.perf_counter()
|
||||
|
||||
try:
|
||||
# 根据处理时间动态调整获取超时
|
||||
timeout = max(0.1, min(1.0, self.avg_process_time * 2))
|
||||
frame = self.queue.get(timeout=timeout)
|
||||
|
||||
if frame is None: # 停止信号
|
||||
logger.info("接收到停止信号")
|
||||
break
|
||||
|
||||
# 检查并处理进程失败
|
||||
if self.process_failed():
|
||||
logger.warning("检测到进程失效,尝试重启")
|
||||
self.safe_restart_ffmpeg()
|
||||
|
||||
# 处理帧
|
||||
processed_frame = self.process_frame(frame)
|
||||
|
||||
# 安全写入FFmpeg
|
||||
write_success = self.write_frame(processed_frame)
|
||||
|
||||
if write_success:
|
||||
self.total_frames_sent += 1
|
||||
self.pipe_broken = False # 重置管道破裂标志
|
||||
else:
|
||||
self.total_frames_dropped += 1
|
||||
# 写失败后快速重启
|
||||
if self.process_failed():
|
||||
self.safe_restart_ffmpeg()
|
||||
except queue.Empty:
|
||||
# 队列空时进行心跳检测
|
||||
self.process_heartbeat()
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.error(f"处理帧时发生未知错误: {str(e)}", exc_info=True)
|
||||
self.total_frames_dropped += 1
|
||||
|
||||
# 更新帧处理时间估计(EMA平滑)
|
||||
elapsed = time.perf_counter() - frame_start
|
||||
self.avg_process_time = self.avg_process_time * 0.9 + elapsed * 0.1
|
||||
|
||||
# 清理资源
|
||||
self.stop_ffmpeg()
|
||||
self.log_statistics()
|
||||
logger.info("推流线程已停止")
|
||||
|
||||
def safe_restart_ffmpeg(self):
|
||||
"""安全重启FFmpeg(带速率限制和锁保护)"""
|
||||
with self.restart_lock:
|
||||
current_time = time.time()
|
||||
|
||||
# 防止频繁重启
|
||||
if current_time - self.last_restart_attempt < 2.0:
|
||||
logger.debug("跳过重启:距离上次尝试时间过短")
|
||||
return False
|
||||
|
||||
self.last_restart_attempt = current_time
|
||||
|
||||
# 检查重启次数
|
||||
self.restart_count += 1
|
||||
if self.restart_count > self.max_restarts:
|
||||
logger.error(f"达到最大重启次数 {self.max_restarts},停止推流")
|
||||
self.running = False
|
||||
return False
|
||||
|
||||
# 计算退避延迟
|
||||
delay = min(
|
||||
self.max_restart_delay,
|
||||
self.min_restart_delay * (2 ** (self.restart_count - 1))
|
||||
)
|
||||
|
||||
logger.info(f"准备重启FFmpeg ({self.restart_count}/{self.max_restarts}), {delay:.1f}秒后执行...")
|
||||
time.sleep(delay)
|
||||
self.last_restart_time = current_time
|
||||
|
||||
try:
|
||||
return self.start_ffmpeg()
|
||||
except Exception as e:
|
||||
logger.error(f"重启失败: {str(e)}")
|
||||
return False
|
||||
|
||||
def process_failed(self):
|
||||
"""检查进程状态(带状态缓存)"""
|
||||
if not self.process:
|
||||
return True
|
||||
|
||||
try:
|
||||
# 检查进程是否已终止
|
||||
if self.process.poll() is not None:
|
||||
return True
|
||||
|
||||
# 额外检查进程是否真实存在
|
||||
try:
|
||||
# 使用psutil检查进程状态(如果可用)
|
||||
import psutil
|
||||
if not psutil.pid_exists(self.process.pid):
|
||||
return True
|
||||
process = psutil.Process(self.process.pid)
|
||||
if process.status() == psutil.STATUS_ZOMBIE:
|
||||
return True
|
||||
except ImportError:
|
||||
# 如果没有psutil,使用简单检查
|
||||
if os.name == 'posix' and not os.path.exists(f"/proc/{self.process.pid}"):
|
||||
return True
|
||||
|
||||
return False
|
||||
except Exception:
|
||||
return True
|
||||
|
||||
def process_heartbeat(self):
|
||||
"""空闲时的心跳检测"""
|
||||
# 每5秒检测一次进程状态
|
||||
if time.time() - getattr(self, '_last_heartbeat', 0) > 5:
|
||||
if self.process_failed():
|
||||
logger.warning("心跳检测发现进程失效,尝试重启")
|
||||
self.safe_restart_ffmpeg()
|
||||
self._last_heartbeat = time.time()
|
||||
|
||||
def start_ffmpeg(self):
|
||||
"""安全启动FFmpeg进程(带互斥锁和状态跟踪)"""
|
||||
if self.process_starting.is_set():
|
||||
logger.debug("FFmpeg已在启动中,跳过重复启动")
|
||||
return False
|
||||
|
||||
self.process_starting.set()
|
||||
success = False
|
||||
try:
|
||||
with self.process_lock:
|
||||
# 确保关闭现有进程
|
||||
if self.process:
|
||||
self.stop_ffmpeg()
|
||||
|
||||
logger.info(f"启动FFmpeg推流进程 | 目标地址: {self.config['url']}")
|
||||
|
||||
# 构建FFmpeg命令
|
||||
command = self.build_ffmpeg_command()
|
||||
|
||||
# 启动新进程
|
||||
self.process = subprocess.Popen(
|
||||
command,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.PIPE,
|
||||
bufsize=0 # 无缓冲
|
||||
)
|
||||
|
||||
# 启动监控线程
|
||||
threading.Thread(
|
||||
target=self.monitor_ffmpeg,
|
||||
daemon=True,
|
||||
name="FFmpegMonitor"
|
||||
).start()
|
||||
|
||||
logger.info(f"FFmpeg启动成功 PID: {self.process.pid}")
|
||||
self.restart_count = 0 # 重置重启计数
|
||||
self.pipe_broken = False # 重置管道破裂标志
|
||||
|
||||
# 等待进程稳定
|
||||
time.sleep(0.5)
|
||||
|
||||
# 检查进程是否仍在运行
|
||||
if self.process_failed():
|
||||
logger.error("FFmpeg启动后立即退出")
|
||||
# 尝试读取错误输出
|
||||
try:
|
||||
err_output = self.process.stderr.read()
|
||||
if err_output:
|
||||
logger.error(f"FFmpeg错误输出: {err_output.decode('utf-8', errors='ignore')}")
|
||||
except:
|
||||
pass
|
||||
self.process = None
|
||||
return False
|
||||
|
||||
success = True
|
||||
except Exception as e:
|
||||
logger.error(f"启动FFmpeg失败: {str(e)}", exc_info=True)
|
||||
self.process = None
|
||||
finally:
|
||||
self.process_starting.clear()
|
||||
return success
|
||||
|
||||
def build_ffmpeg_command(self):
|
||||
"""构建优化的FFmpeg命令"""
|
||||
w, h = self.scaled_width, self.scaled_height
|
||||
|
||||
# 基本命令
|
||||
command = [
|
||||
'ffmpeg',
|
||||
'-y', '-an', # 覆盖输出文件,禁用音频
|
||||
'-loglevel', 'warning', # 减少日志输出
|
||||
'-f', 'rawvideo',
|
||||
'-vcodec', 'rawvideo',
|
||||
'-pix_fmt', self.config.get('pixel_format', 'bgr24'),
|
||||
'-s', f'{w}x{h}',
|
||||
'-r', str(self.fps),
|
||||
'-i', '-', # 从标准输入读取
|
||||
'-c:v', self.config['video_codec'],
|
||||
'-pix_fmt', 'yuv420p',
|
||||
'-preset', self.config['preset'],
|
||||
'-g', str(int(self.fps * 1.5)), # 关键帧间隔
|
||||
'-crf', str(self.config.get('crf', 23)), # 质量参数
|
||||
'-b:v', self.config.get('bitrate', '2000k'), # 码率控制
|
||||
'-bufsize', self.config.get('bufsize', '1000k'), # 缓冲区大小
|
||||
'-threads', '0', # 自动多线程
|
||||
'-f', self.config['format'],
|
||||
'-fflags', 'nobuffer', # 最小化缓冲
|
||||
self.config['url']
|
||||
]
|
||||
|
||||
# 启用硬件加速
|
||||
if self.config.get('gpu_acceleration', False) and torch.cuda.is_available():
|
||||
command[1:1] = [
|
||||
'-hwaccel', 'cuda',
|
||||
'-hwaccel_output_format', 'cuda',
|
||||
'-extra_hw_frames', '8'
|
||||
]
|
||||
logger.info("启用CUDA硬件加速")
|
||||
# print("command",command)
|
||||
logger.debug(f"FFmpeg命令: {' '.join(command)}")
|
||||
return command
|
||||
|
||||
def monitor_ffmpeg(self):
|
||||
"""监控FFmpeg输出流(改进退出条件)"""
|
||||
logger.info("启动FFmpeg监控线程")
|
||||
try:
|
||||
stderr = self.process.stderr
|
||||
while self.running and self.process and self.process.poll() is None:
|
||||
try:
|
||||
line = stderr.readline()
|
||||
if line:
|
||||
line = line.decode('utf-8', errors='ignore').strip()
|
||||
self.process_ffmpeg_output(line)
|
||||
else:
|
||||
# 进程可能已终止
|
||||
if self.process.poll() is not None:
|
||||
break
|
||||
time.sleep(0.1)
|
||||
except Exception as e:
|
||||
logger.error(f"监控线程读取错误: {str(e)}")
|
||||
break
|
||||
except Exception as e:
|
||||
logger.error(f"监控线程异常: {str(e)}", exc_info=True)
|
||||
finally:
|
||||
logger.info("FFmpeg监控线程已退出")
|
||||
|
||||
def process_ffmpeg_output(self, line):
|
||||
"""处理FFmpeg输出(根据级别分类)"""
|
||||
if not line:
|
||||
return
|
||||
|
||||
# 分类日志级别
|
||||
if any(tag in line for tag in ['[error]', 'Failed', 'Error']):
|
||||
logger.error(f"FFmpeg ERROR: {line}")
|
||||
|
||||
# 关键错误处理
|
||||
if 'Connection refused' in line:
|
||||
logger.error("目标服务器拒绝连接,请检查服务器状态")
|
||||
elif 'Cannot open connection' in line:
|
||||
logger.error("无法建立网络连接,检查网络和防火墙")
|
||||
elif 'Broken pipe' in line:
|
||||
logger.critical("检测到底层管道破裂")
|
||||
self.pipe_broken = True # 设置管道破裂标志
|
||||
# 立即安排重启
|
||||
self.schedule_restart()
|
||||
|
||||
elif any(tag in line for tag in ['[warning]', 'WARNING']):
|
||||
logger.warning(f"FFmpeg WARN: {line}")
|
||||
elif 'frame=' in line and 'fps=' in line:
|
||||
# 忽略进度输出,避免日志污染
|
||||
pass
|
||||
else:
|
||||
logger.debug(f"FFmpeg INFO: {line}")
|
||||
|
||||
def schedule_restart(self):
|
||||
"""安排安全重启(避免重复调度)"""
|
||||
if self.restart_scheduled:
|
||||
return
|
||||
|
||||
self.restart_scheduled = True
|
||||
|
||||
def safe_restart():
|
||||
try:
|
||||
if self.running:
|
||||
self.safe_restart_ffmpeg()
|
||||
except Exception as e:
|
||||
logger.error(f"安全重启失败: {str(e)}")
|
||||
finally:
|
||||
self.restart_scheduled = False
|
||||
|
||||
threading.Thread(target=safe_restart, daemon=True).start()
|
||||
|
||||
def write_frame(self, frame_data):
|
||||
"""安全写入帧数据(带自动重试)"""
|
||||
if not self.process:
|
||||
return False
|
||||
|
||||
# 如果管道已经破裂,直接返回失败并触发重启
|
||||
if self.pipe_broken:
|
||||
logger.warning("管道已破裂,跳过写入并触发重启")
|
||||
self.schedule_restart()
|
||||
return False
|
||||
|
||||
max_retries = 1 # 减少重试次数
|
||||
for attempt in range(max_retries + 1):
|
||||
try:
|
||||
# 检查进程状态
|
||||
if self.process_failed():
|
||||
return False
|
||||
|
||||
# 尝试写入
|
||||
with self.process_lock:
|
||||
if self.process and self.process.stdin and self.process.poll() is None:
|
||||
self.process.stdin.write(frame_data)
|
||||
self.process.stdin.flush()
|
||||
return True
|
||||
else:
|
||||
logger.debug("写入失败 - 管道已关闭")
|
||||
return False
|
||||
|
||||
except BrokenPipeError:
|
||||
logger.warning(f"管道破裂 (尝试 {attempt + 1}/{max_retries + 1})")
|
||||
self.pipe_broken = True # 设置管道破裂标志
|
||||
self.schedule_restart()
|
||||
return False
|
||||
except OSError as e:
|
||||
# 特别处理EAGAIN错误(非阻塞操作)
|
||||
if e.errno == errno.EAGAIN:
|
||||
logger.warning("资源暂时不可用,等待后重试")
|
||||
time.sleep(0.05)
|
||||
else:
|
||||
logger.error(f"系统级写入错误: {os.strerror(e.errno)}")
|
||||
self.schedule_restart()
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"写入异常: {str(e)}")
|
||||
self.schedule_restart()
|
||||
return False
|
||||
|
||||
# 重试前短暂暂停
|
||||
time.sleep(0.02 * (attempt + 1))
|
||||
|
||||
return False
|
||||
|
||||
def stop_ffmpeg(self):
|
||||
"""安全停止FFmpeg(改进清理逻辑)"""
|
||||
with self.process_lock:
|
||||
if not self.process:
|
||||
return
|
||||
|
||||
pid = self.process.pid
|
||||
logger.info(f"停止FFmpeg进程 PID: {pid}...")
|
||||
|
||||
# 第1层: 关闭输入流
|
||||
if self.process.stdin:
|
||||
try:
|
||||
self.process.stdin.close()
|
||||
except:
|
||||
pass
|
||||
|
||||
# 第2层: 发送终止信号
|
||||
try:
|
||||
self.process.terminate()
|
||||
except:
|
||||
pass
|
||||
|
||||
# 等待程序优雅退出
|
||||
try:
|
||||
self.process.wait(timeout=1.0)
|
||||
except subprocess.TimeoutExpired:
|
||||
# 第3层: 强制终止
|
||||
try:
|
||||
logger.warning(f"强制终止FFmpeg进程 PID: {pid}")
|
||||
self.process.kill()
|
||||
self.process.wait(timeout=0.5)
|
||||
except:
|
||||
pass
|
||||
except:
|
||||
pass
|
||||
|
||||
# 确保进程已终止
|
||||
if self.process.poll() is None:
|
||||
logger.warning(f"FFmpeg进程 PID: {pid} 未能正常终止")
|
||||
|
||||
self.process = None
|
||||
logger.info(f"FFmpeg进程 PID: {pid} 已停止")
|
||||
|
||||
def process_frame(self, frame):
|
||||
"""高效帧处理(减少内存分配)"""
|
||||
# 如果需要缩放
|
||||
if (self.scaled_width, self.scaled_height) != (self.original_width, self.original_height):
|
||||
# 使用更高效的缩放算法
|
||||
frame = cv2.resize(frame, (self.scaled_width, self.scaled_height),
|
||||
interpolation=cv2.INTER_LANCZOS4)
|
||||
|
||||
# 确保格式正确
|
||||
if frame.dtype != np.uint8:
|
||||
frame = frame.astype(np.uint8)
|
||||
|
||||
if len(frame.shape) == 3 and frame.shape[2] == 4:
|
||||
frame = cv2.cvtColor(frame, cv2.COLOR_BGRA2BGR)
|
||||
elif len(frame.shape) == 2:
|
||||
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
|
||||
|
||||
return frame.tobytes()
|
||||
|
||||
def add_frame(self, frame):
|
||||
"""智能帧添加策略(自适应队列管理)"""
|
||||
# 如果管道已破裂,跳过帧添加
|
||||
if self.pipe_broken:
|
||||
logger.debug("管道破裂状态,跳过帧添加")
|
||||
self.total_frames_dropped += 1
|
||||
return False
|
||||
|
||||
try:
|
||||
# 根据队列状态调整行为
|
||||
queue_size = self.queue.qsize()
|
||||
queue_capacity = self.queue.maxsize
|
||||
|
||||
if queue_size < queue_capacity * 0.8:
|
||||
# 队列正常 - 直接添加
|
||||
self.queue.put_nowait(frame)
|
||||
return True
|
||||
else:
|
||||
# 队列较满 - 尝试优化
|
||||
try:
|
||||
self.queue.put_nowait(frame)
|
||||
return True
|
||||
except queue.Full:
|
||||
# 使用低开销的缓冲区覆盖方法
|
||||
for _ in range(min(5, queue_size // 2)):
|
||||
try:
|
||||
self.queue.get_nowait()
|
||||
except queue.Empty:
|
||||
break
|
||||
|
||||
# 再次尝试放入
|
||||
try:
|
||||
self.queue.put_nowait(frame)
|
||||
self.total_frames_dropped += 1
|
||||
return True
|
||||
except queue.Full:
|
||||
self.total_frames_dropped += 1
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"帧处理错误: {str(e)}")
|
||||
self.total_frames_dropped += 1
|
||||
return False
|
||||
|
||||
def log_statistics(self):
|
||||
"""输出性能统计"""
|
||||
elapsed = time.time() - self.start_time
|
||||
sent = self.total_frames_sent
|
||||
dropped = self.total_frames_dropped
|
||||
total = sent + dropped
|
||||
|
||||
if total > 0:
|
||||
fps = sent / elapsed
|
||||
drop_rate = (dropped / total) * 100
|
||||
|
||||
logger.info(
|
||||
"推流统计结果:\n"
|
||||
f" 持续时间: {elapsed:.1f}秒\n"
|
||||
f" 总帧数: {total}\n"
|
||||
f" 成功发送: {sent}\n"
|
||||
f" 丢弃帧数: {dropped}\n"
|
||||
f" 丢帧率: {drop_rate:.1f}%\n"
|
||||
f" 平均FPS: {fps:.1f}\n"
|
||||
f" 重启次数: {self.restart_count}"
|
||||
)
|
||||
|
||||
def stop(self):
|
||||
"""优雅停止整个线程(改进停止逻辑)"""
|
||||
if not self.running:
|
||||
return
|
||||
|
||||
logger.info("停止推流线程...")
|
||||
self.running = False
|
||||
|
||||
# 发送停止信号
|
||||
try:
|
||||
self.queue.put(None, block=False)
|
||||
except:
|
||||
pass
|
||||
|
||||
# 清理FFmpeg进程
|
||||
self.stop_ffmpeg()
|
||||
|
||||
# 强制唤醒
|
||||
try:
|
||||
self.queue.put(None, block=False)
|
||||
except:
|
||||
pass
|
||||
|
||||
# 等待线程结束
|
||||
if self.is_alive() and threading.current_thread() != self:
|
||||
self.join(3.0)
|
||||
if self.is_alive():
|
||||
logger.warning("推流线程未能及时停止")
|
||||
|
||||
logger.info("推流线程已停止")
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
# globalvar.py
|
||||
|
||||
def _init():
|
||||
global _global_dict
|
||||
_global_dict = {}
|
||||
|
||||
|
||||
def set_value(name, value):
|
||||
_global_dict[name] = value
|
||||
|
||||
|
||||
def get_value(name, defValue=None):
|
||||
try:
|
||||
return _global_dict[name]
|
||||
except KeyError:
|
||||
return defValue
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
import logging
|
||||
from logging.handlers import RotatingFileHandler
|
||||
|
||||
|
||||
# 配置日志处理
|
||||
def setup_logger():
|
||||
"""优化日志系统 - 减少磁盘I/O"""
|
||||
logger = logging.getLogger("YOLOv8 Optimized")
|
||||
logger.setLevel(logging.INFO)
|
||||
|
||||
# 清除现有处理器
|
||||
for handler in logger.handlers[:]:
|
||||
logger.removeHandler(handler)
|
||||
|
||||
# 优化控制台日志格式
|
||||
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')
|
||||
|
||||
# 控制台处理器 - 设置较低的延迟
|
||||
console_handler = logging.StreamHandler()
|
||||
console_handler.setFormatter(formatter)
|
||||
|
||||
# 文件处理器 - 限制日志大小和使用异步写入
|
||||
log_file = 'yolo_detection.log'
|
||||
file_handler = RotatingFileHandler(
|
||||
log_file,
|
||||
maxBytes=5 * 1024 * 1024, # 减小为5MB
|
||||
backupCount=3,
|
||||
encoding='utf-8',
|
||||
delay=False # 禁用延迟打开文件
|
||||
)
|
||||
file_handler.setFormatter(formatter)
|
||||
|
||||
logger.addHandler(console_handler)
|
||||
logger.addHandler(file_handler)
|
||||
|
||||
# 禁用不需要的库的日志
|
||||
logging.getLogger("werkzeug").setLevel(logging.WARNING)
|
||||
logging.getLogger("engineio").setLevel(logging.WARNING)
|
||||
logging.getLogger("socketio").setLevel(logging.WARNING)
|
||||
|
||||
return logger
|
||||
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,45 @@
|
|||
# 主程序入口
|
||||
import os
|
||||
import signal
|
||||
import threading
|
||||
import torch
|
||||
from log import logger
|
||||
|
||||
import global_data as gd
|
||||
from server import socketio, app
|
||||
|
||||
gd._init()
|
||||
|
||||
gd.set_value('detection_thread', None)
|
||||
gd.set_value('detection_active', False)
|
||||
gd.set_value('stop_event', threading.Event())
|
||||
gd.set_value('mqtt_client', None)
|
||||
gd.set_value('latest_drone_data', None)
|
||||
gd.set_value('mqtt_data_lock', threading.Lock())
|
||||
|
||||
if __name__ == '__main__':
|
||||
logger.info("启动优化版YOLOv8服务")
|
||||
# 优化2: 使用最新版本YOLO和PyTorch特性
|
||||
logger.info(f"PyTorch版本: {torch.__version__}, CUDA可用: {torch.cuda.is_available()}")
|
||||
# 退出服务
|
||||
def graceful_exit(signum, frame):
|
||||
logger.info("收到退出信号,停止服务...")
|
||||
detection_active = gd.get_value('detection_active')
|
||||
detection_thread = gd.get_value('detection_thread')
|
||||
if detection_active and detection_thread:
|
||||
detection_thread.stop()
|
||||
detection_thread.join(5.0)
|
||||
logger.info("服务已退出")
|
||||
os._exit(0)
|
||||
|
||||
|
||||
signal.signal(signal.SIGINT, graceful_exit)
|
||||
signal.signal(signal.SIGTERM, graceful_exit)
|
||||
|
||||
# 启动服务 - 禁用开发服务器调试
|
||||
socketio.run(app,
|
||||
host='0.0.0.0',
|
||||
port=9309,
|
||||
debug=True, # 禁用调试模式
|
||||
use_reloader=False,
|
||||
allow_unsafe_werkzeug=True)
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
# 优化3: 使用更紧凑的类别映射
|
||||
class_mapping_cn = dict(
|
||||
{
|
||||
'0': {
|
||||
"name": "汽车",
|
||||
"reliability": 0.5
|
||||
},
|
||||
'1': {
|
||||
"name": "卡车",
|
||||
"reliability": 0.5
|
||||
},
|
||||
'2': {
|
||||
"name": "公交车",
|
||||
"reliability": 0.5
|
||||
},
|
||||
'3': {
|
||||
"name": "商用车",
|
||||
"reliability": 0.5
|
||||
},
|
||||
'4': {
|
||||
"name": "大货车",
|
||||
"reliability": 0.5
|
||||
}
|
||||
}
|
||||
)
|
||||
|
|
@ -0,0 +1,132 @@
|
|||
import torch
|
||||
from flask import Flask, jsonify, request, render_template
|
||||
from flask_socketio import SocketIO
|
||||
from config import get_default_config
|
||||
from detectionThread import DetectionThread
|
||||
import global_data as gd
|
||||
from log import logger
|
||||
from mapping_cn import class_mapping_cn
|
||||
|
||||
# Flask初始化
|
||||
app = Flask(__name__, static_url_path='/static')
|
||||
socketio = SocketIO(app,
|
||||
cors_allowed_origins="*",
|
||||
async_mode='threading',
|
||||
allow_unsafe_werkzeug=True,
|
||||
max_http_buffer_size=5 * 1024 * 1024) # 增加WebSocket缓冲区
|
||||
|
||||
|
||||
# ======================= Flask路由 =======================
|
||||
@app.route('/', methods=['GET'])
|
||||
def main():
|
||||
return render_template("flv2.html")
|
||||
|
||||
|
||||
@app.route('/start_detection', methods=['POST'])
|
||||
def start_detection():
|
||||
detection_active = gd.get_value('detection_active')
|
||||
if detection_active:
|
||||
return jsonify({"status": "error", "message": "检测已在运行"}), 400
|
||||
|
||||
config = get_default_config()
|
||||
config['socketIO'] = socketio
|
||||
|
||||
# 配置更新逻辑
|
||||
if request.json:
|
||||
# 更新RTMP地址
|
||||
if 'rtmp_url' in request.json:
|
||||
config['rtmp']['url'] = request.json['rtmp_url']
|
||||
# 更新推流地址
|
||||
if 'push_url' in request.json and request.json['push_url'] is not None:
|
||||
config['push']['url'] = request.json['push_url']
|
||||
# minio文件夹名称
|
||||
if 'taskname' in request.json:
|
||||
config['task']['taskname'] = request.json['taskname']
|
||||
# 标签
|
||||
|
||||
if 'tag' in request.json and request.json['tag'] is not {}:
|
||||
config['task']['tag'] = request.json['tag']
|
||||
else:
|
||||
config['task']['tag'] = class_mapping_cn
|
||||
|
||||
if 'taskid' in request.json:
|
||||
config['task']['taskid'] = request.json['taskid']
|
||||
# 性能参数调整
|
||||
if 'imgsz' in request.json:
|
||||
config['predict']['imgsz'] = max(128, min(1920, request.json['imgsz']))
|
||||
if 'frame_skip' in request.json:
|
||||
config['predict']['frame_skip'] = request.json['frame_skip']
|
||||
if 'model_name' in request.json:
|
||||
config['model']['path'] = request.json['model_name']
|
||||
if 'aiid' in request.json:
|
||||
config['task']['aiid'] = request.json['AlgoId']
|
||||
if 'device' in request.json:
|
||||
if request.json['device'] == "cuda:0" or "cpu":
|
||||
config['predict']['device'] = request.json['device']
|
||||
else:
|
||||
config['predict']['device'] = 'cuda:0' if torch.cuda.is_available() else 'cpu'
|
||||
# 创建并启动线程
|
||||
detection_thread = DetectionThread(config)
|
||||
gd.set_value('detection_thread', detection_thread)
|
||||
detection_thread.start()
|
||||
gd.set_value('detection_active', True)
|
||||
return jsonify({
|
||||
"status": "success",
|
||||
"message": "目标检测已启动"
|
||||
})
|
||||
|
||||
|
||||
@app.route('/stop_detection', methods=['POST'])
|
||||
def stop_detection():
|
||||
detection_active = gd.get_value('detection_active')
|
||||
detection_thread = gd.get_value('detection_thread')
|
||||
if not detection_active or not detection_thread:
|
||||
return jsonify({"status": "error", "message": "检测未运行"}), 400
|
||||
|
||||
# 停止线程
|
||||
detection_thread.stop()
|
||||
|
||||
# 等待不超过3秒
|
||||
detection_thread.join(3.0)
|
||||
|
||||
if detection_thread.is_alive():
|
||||
logger.warning("检测线程未在规定时间停止")
|
||||
else:
|
||||
logger.info("检测线程已停止")
|
||||
gd.set_value('detection_active', False)
|
||||
gd.set_value('detection_thread', None)
|
||||
return jsonify({
|
||||
"status": "success",
|
||||
"message": "目标检测已停止"
|
||||
})
|
||||
|
||||
|
||||
@app.route('/status', methods=['GET'])
|
||||
def get_status():
|
||||
detection_active = gd.get_value('detection_active')
|
||||
detection_thread = gd.get_value('detection_thread')
|
||||
if detection_active and detection_thread:
|
||||
status = {
|
||||
"active": True,
|
||||
"fps": round(detection_thread.last_fps, 1), # 使用稳定FPS值
|
||||
"frame_count": detection_thread.frame_count,
|
||||
"detections_count": detection_thread.detections_count,
|
||||
"rtmp_url": detection_thread.rtmp_url,
|
||||
"reconnect_attempts": detection_thread.reconnect_attempts
|
||||
}
|
||||
if torch.cuda.is_available():
|
||||
status['gpu_memory'] = torch.cuda.memory_allocated() // (1024 * 1024)
|
||||
return jsonify(status)
|
||||
else:
|
||||
return jsonify({"active": False})
|
||||
|
||||
|
||||
# WebSocket事件
|
||||
@socketio.on('connect')
|
||||
def handle_connect():
|
||||
logger.info(f"Socket客户端已连接: {request.sid}")
|
||||
|
||||
|
||||
@socketio.on('disconnect')
|
||||
def handle_disconnect():
|
||||
logger.info(f"Socket客户端断开: {request.sid}")
|
||||
|
After Width: | Height: | Size: 5.0 MiB |
|
After Width: | Height: | Size: 5.1 MiB |
|
After Width: | Height: | Size: 5.1 MiB |
|
After Width: | Height: | Size: 8.7 MiB |
|
After Width: | Height: | Size: 11 MiB |
|
After Width: | Height: | Size: 5.6 MiB |
|
After Width: | Height: | Size: 5.8 MiB |
|
After Width: | Height: | Size: 5.6 MiB |
|
After Width: | Height: | Size: 5.6 MiB |
|
After Width: | Height: | Size: 5.7 MiB |
|
After Width: | Height: | Size: 5.8 MiB |
|
After Width: | Height: | Size: 5.7 MiB |
|
After Width: | Height: | Size: 5.7 MiB |
|
After Width: | Height: | Size: 5.5 MiB |
|
After Width: | Height: | Size: 5.4 MiB |