修正了视频录制功能,删除了前端冗余代码导致的卡顿
This commit is contained in:
parent
4932cb18fc
commit
c88845dd6e
@ -17,6 +17,8 @@ import logging
|
||||
from collections import deque
|
||||
import gc
|
||||
from matplotlib.colors import LinearSegmentedColormap
|
||||
from scipy import ndimage
|
||||
from scipy.interpolate import griddata
|
||||
|
||||
try:
|
||||
from .base_device import BaseDevice
|
||||
@ -118,7 +120,9 @@ class FemtoBoltManager(BaseDevice):
|
||||
|
||||
# 自定义彩虹色 colormap(参考testfemtobolt.py)
|
||||
colors = ['fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
|
||||
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue']
|
||||
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
|
||||
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
|
||||
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue']
|
||||
self.custom_cmap = LinearSegmentedColormap.from_list("custom_cmap", colors)
|
||||
|
||||
self.logger.info("FemtoBolt管理器初始化完成")
|
||||
@ -491,6 +495,135 @@ class FemtoBoltManager(BaseDevice):
|
||||
|
||||
frame_count = 0
|
||||
|
||||
try:
|
||||
while self.is_streaming:
|
||||
# 发送频率限制
|
||||
now = time.time()
|
||||
if now - self._last_send_time < self._min_send_interval:
|
||||
time.sleep(0.001)
|
||||
continue
|
||||
|
||||
if self.device_handle and self._socketio:
|
||||
try:
|
||||
capture = self.device_handle.update()
|
||||
if capture is not None:
|
||||
try:
|
||||
ret, depth_image = capture.get_depth_image()
|
||||
if ret and depth_image is not None:
|
||||
# 确保二维数据
|
||||
# if depth_image.ndim == 3 and depth_image.shape[2] == 1:
|
||||
# depth_image = depth_image[:, :, 0]
|
||||
|
||||
# 使用My_TestFemtobolt.py的原始算法处理深度数据
|
||||
depth = depth_image.copy()
|
||||
|
||||
# 深度数据过滤 (根据depth_range参数动态设置)
|
||||
depth[depth > self.depth_range_max] = 0
|
||||
depth[depth < self.depth_range_min] = 0
|
||||
|
||||
# 裁剪感兴趣区域 (与My_TestFemtobolt.py完全一致)
|
||||
# depth = depth[50:200, 50:210]
|
||||
|
||||
|
||||
# 使用 np.ma.masked_equal() 来屏蔽深度图中的零值 (与My_TestFemtobolt.py完全一致)
|
||||
depth_masked = np.ma.masked_equal(depth, 0)
|
||||
|
||||
# 创建RGB背景图像
|
||||
rows, cols = depth.shape
|
||||
bg_rgb = np.ones((rows, cols, 3), dtype=np.uint8) * 128 # 灰色背景
|
||||
|
||||
# 绘制白色栅格线 (与My_TestFemtobolt.py完全一致)
|
||||
cell_size = 50
|
||||
grid_color = (255, 255, 255)
|
||||
for x in range(0, cols, cell_size):
|
||||
cv2.line(bg_rgb, (x, 0), (x, rows), grid_color, 1)
|
||||
for y in range(0, rows, cell_size):
|
||||
cv2.line(bg_rgb, (0, y), (cols, y), grid_color, 1)
|
||||
|
||||
# 处理有效深度数据 - 添加平滑处理算法
|
||||
valid_mask = ~depth_masked.mask if hasattr(depth_masked, 'mask') else (depth > 0)
|
||||
|
||||
if np.any(valid_mask):
|
||||
# 创建平滑的深度图像 (类似等高线效果)
|
||||
depth_smooth = self._apply_contour_smoothing(depth, valid_mask)
|
||||
|
||||
# 归一化平滑后的深度值到[0,1]范围
|
||||
valid_depth_smooth = depth_smooth[valid_mask]
|
||||
depth_normalized = (valid_depth_smooth.astype(np.float32) - self.depth_range_min) / (self.depth_range_max - self.depth_range_min)
|
||||
depth_normalized = np.clip(depth_normalized, 0, 1)
|
||||
|
||||
# 应用自定义colormap (与My_TestFemtobolt.py使用相同的colormap)
|
||||
rgba = self.custom_cmap(depth_normalized)
|
||||
rgb_values = (rgba[:, :3] * 255).astype(np.uint8)
|
||||
|
||||
# 将彩色深度值应用到背景图像上
|
||||
bg_rgb[valid_mask] = rgb_values
|
||||
|
||||
depth_colored_final = bg_rgb
|
||||
# 裁剪宽度
|
||||
height, width = depth_colored_final.shape[:2]
|
||||
target_width = height // 2
|
||||
if width > target_width:
|
||||
left = (width - target_width) // 2
|
||||
right = left + target_width
|
||||
depth_colored_final = depth_colored_final[:, left:right]
|
||||
|
||||
# 推送SocketIO
|
||||
success, buffer = cv2.imencode('.jpg', depth_colored_final, self._encode_param)
|
||||
if success and self._socketio:
|
||||
jpg_as_text = base64.b64encode(memoryview(buffer).tobytes()).decode('utf-8')
|
||||
self._socketio.emit('femtobolt_frame', {
|
||||
'depth_image': jpg_as_text,
|
||||
'frame_count': frame_count,
|
||||
'timestamp': now,
|
||||
'fps': self.actual_fps,
|
||||
'device_id': self.device_id,
|
||||
'depth_range': {
|
||||
'min': self.depth_range_min,
|
||||
'max': self.depth_range_max
|
||||
}
|
||||
}, namespace='/devices')
|
||||
frame_count += 1
|
||||
self._last_send_time = now
|
||||
|
||||
# 更新统计
|
||||
self._update_statistics()
|
||||
else:
|
||||
time.sleep(0.005)
|
||||
except Exception as e:
|
||||
# 捕获处理过程中出现异常,记录并继续
|
||||
self.logger.error(f"FemtoBolt捕获处理错误: {e}")
|
||||
finally:
|
||||
# 无论处理成功与否,都应释放capture以回收内存:contentReference[oaicite:3]{index=3}
|
||||
try:
|
||||
if hasattr(capture, 'release'):
|
||||
capture.release()
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
time.sleep(0.005)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f'FemtoBolt帧推送失败: {e}')
|
||||
time.sleep(0.05)
|
||||
|
||||
# 降低空转CPU
|
||||
time.sleep(0.001)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"FemtoBolt流处理异常: {e}")
|
||||
finally:
|
||||
self.is_streaming = False
|
||||
self.logger.info("FemtoBolt流工作线程结束")
|
||||
|
||||
def _streaming_worker_bak(self):
|
||||
"""
|
||||
流处理工作线程
|
||||
"""
|
||||
self.logger.info("FemtoBolt流工作线程启动")
|
||||
|
||||
frame_count = 0
|
||||
|
||||
try:
|
||||
while self.is_streaming:
|
||||
# 发送频率限制
|
||||
@ -680,6 +813,46 @@ class FemtoBoltManager(BaseDevice):
|
||||
except Exception as e:
|
||||
self.logger.error(f"发送深度数据失败: {e}")
|
||||
|
||||
def _apply_contour_smoothing(self, depth_image: np.ndarray, valid_mask: np.ndarray) -> np.ndarray:
|
||||
"""
|
||||
应用等高线平滑处理算法 (参考My_TestFemtobolt.py的contourf效果)
|
||||
|
||||
Args:
|
||||
depth_image: 原始深度图像
|
||||
valid_mask: 有效像素掩码
|
||||
|
||||
Returns:
|
||||
np.ndarray: 平滑处理后的深度图像
|
||||
"""
|
||||
try:
|
||||
# 创建平滑后的深度图像副本
|
||||
depth_smooth = depth_image.copy().astype(np.float32)
|
||||
|
||||
# 对有效区域进行高斯平滑 (模拟等高线的平滑效果)
|
||||
if np.any(valid_mask):
|
||||
# 使用高斯滤波进行平滑处理
|
||||
sigma = 1.5 # 平滑程度,可调节
|
||||
depth_smooth = ndimage.gaussian_filter(depth_smooth, sigma=sigma)
|
||||
|
||||
# 使用双边滤波进一步平滑,保持边缘
|
||||
# 注意:cv2.bilateralFilter只支持8位无符号整数和32位浮点数
|
||||
# 将深度值归一化到0-255范围用于双边滤波
|
||||
depth_min, depth_max = np.min(depth_smooth[valid_mask]), np.max(depth_smooth[valid_mask])
|
||||
if depth_max > depth_min:
|
||||
depth_normalized = ((depth_smooth - depth_min) / (depth_max - depth_min) * 255).astype(np.uint8)
|
||||
depth_bilateral = cv2.bilateralFilter(depth_normalized, d=9, sigmaColor=75, sigmaSpace=75)
|
||||
# 将结果转换回原始深度范围
|
||||
depth_smooth = (depth_bilateral.astype(np.float32) / 255.0 * (depth_max - depth_min) + depth_min)
|
||||
|
||||
# 对无效区域保持原值
|
||||
depth_smooth[~valid_mask] = depth_image[~valid_mask]
|
||||
|
||||
return depth_smooth
|
||||
|
||||
except Exception as e:
|
||||
self.logger.warning(f"平滑处理失败,使用原始深度图像: {e}")
|
||||
return depth_image.astype(np.float32)
|
||||
|
||||
def _update_statistics(self):
|
||||
"""
|
||||
更新性能统计
|
||||
|
@ -16,8 +16,10 @@ import logging
|
||||
import json
|
||||
import base64
|
||||
from pathlib import Path
|
||||
from typing import Optional, Dict, Any
|
||||
from typing import Optional, Dict, Any, List
|
||||
import sys
|
||||
import psutil
|
||||
import gc
|
||||
|
||||
try:
|
||||
from .camera_manager import CameraManager
|
||||
@ -48,41 +50,118 @@ class RecordingManager:
|
||||
|
||||
# 录制状态
|
||||
self.sync_recording = False
|
||||
self.is_recording = False
|
||||
self.recording_stop_event = threading.Event()
|
||||
|
||||
# 会话信息
|
||||
self.current_session_id = None
|
||||
self.current_patient_id = None
|
||||
self.current_patient_id = None
|
||||
self.recording_start_time = None
|
||||
|
||||
# 视频写入器
|
||||
self.feet_video_writer = None
|
||||
self.screen_video_writer = None
|
||||
self.femtobolt_video_writer = None
|
||||
|
||||
# 录制线程
|
||||
self.feet_recording_thread = None
|
||||
self.screen_recording_thread = None
|
||||
|
||||
self.femtobolt_recording_thread = None
|
||||
|
||||
# 屏幕录制参数
|
||||
self.screen_fps = 20
|
||||
self.screen_fps = 25 # 与VideoWriter的fps保持一致
|
||||
self.screen_region = None
|
||||
self.camera_region = None
|
||||
self.femtobolt_region = None
|
||||
|
||||
# 屏幕尺寸
|
||||
self.screen_size = pyautogui.size()
|
||||
|
||||
# 输出目录
|
||||
self.screen_output_dir = None
|
||||
self.camera_output_dir = None
|
||||
self.femtobolt_output_dir = None
|
||||
|
||||
# 视频参数
|
||||
self.MAX_FRAME_SIZE = (1280, 720) # 最大帧尺寸
|
||||
|
||||
# CPU监控和性能优化参数
|
||||
self.cpu_threshold = 80.0 # CPU使用率阈值
|
||||
self.memory_threshold = 85.0 # 内存使用率阈值
|
||||
self.adaptive_fps = True # 是否启用自适应帧率
|
||||
self.min_fps = 10 # 最小帧率
|
||||
self.max_fps = 30 # 最大帧率
|
||||
self.current_fps = self.screen_fps # 当前动态帧率
|
||||
self.performance_check_interval = 30 # 性能检查间隔(帧数)
|
||||
self.frame_skip_count = 0 # 跳帧计数
|
||||
self.last_performance_check = 0 # 上次性能检查时间
|
||||
|
||||
# 日志
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
self.logger.info("录制管理器初始化完成")
|
||||
|
||||
def start_recording(self, session_id: str, patient_id: str) -> Dict[str, Any]:
|
||||
def _check_system_performance(self):
|
||||
"""
|
||||
检查系统性能指标
|
||||
|
||||
Returns:
|
||||
Dict: 包含CPU和内存使用率的字典
|
||||
"""
|
||||
try:
|
||||
cpu_percent = psutil.cpu_percent(interval=0.1)
|
||||
memory_info = psutil.virtual_memory()
|
||||
memory_percent = memory_info.percent
|
||||
|
||||
return {
|
||||
'cpu_percent': cpu_percent,
|
||||
'memory_percent': memory_percent,
|
||||
'available_memory_mb': memory_info.available / (1024 * 1024)
|
||||
}
|
||||
except Exception as e:
|
||||
self.logger.warning(f"性能检查失败: {e}")
|
||||
return {'cpu_percent': 0, 'memory_percent': 0, 'available_memory_mb': 0}
|
||||
|
||||
def _adjust_recording_performance(self, performance_data):
|
||||
"""
|
||||
根据系统性能调整录制参数
|
||||
|
||||
Args:
|
||||
performance_data: 性能数据字典
|
||||
"""
|
||||
if not self.adaptive_fps:
|
||||
return
|
||||
|
||||
cpu_percent = performance_data.get('cpu_percent', 0)
|
||||
memory_percent = performance_data.get('memory_percent', 0)
|
||||
|
||||
# 根据CPU使用率调整帧率
|
||||
if cpu_percent > self.cpu_threshold:
|
||||
# CPU使用率过高,降低帧率
|
||||
self.current_fps = max(self.min_fps, self.current_fps - 2)
|
||||
self.frame_skip_count += 1
|
||||
self.logger.warning(f"CPU使用率过高({cpu_percent:.1f}%),降低帧率至{self.current_fps}fps")
|
||||
elif cpu_percent < self.cpu_threshold - 20 and self.current_fps < self.max_fps:
|
||||
# CPU使用率较低,可以适当提高帧率
|
||||
self.current_fps = min(self.max_fps, self.current_fps + 1)
|
||||
self.logger.info(f"CPU使用率正常({cpu_percent:.1f}%),提高帧率至{self.current_fps}fps")
|
||||
|
||||
# 内存使用率过高时强制垃圾回收
|
||||
if memory_percent > self.memory_threshold:
|
||||
gc.collect()
|
||||
self.logger.warning(f"内存使用率过高({memory_percent:.1f}%),执行垃圾回收")
|
||||
|
||||
def start_recording(self, session_id: str, patient_id: str, screen_location: List[int], camera_location: List[int], femtobolt_location: List[int], recording_types: List[str] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
启动同步录制
|
||||
|
||||
Args:
|
||||
session_id: 检测会话ID
|
||||
patient_id: 患者ID
|
||||
screen_location: 屏幕录制区域 [x, y, w, h]
|
||||
camera_location: 相机录制区域 [x, y, w, h]
|
||||
femtobolt_location: FemtoBolt录制区域 [x, y, w, h]
|
||||
recording_types: 录制类型列表 ['screen', 'feet', 'femtobolt'],默认全部录制
|
||||
|
||||
Returns:
|
||||
Dict: 录制启动状态和信息
|
||||
@ -94,7 +173,8 @@ class RecordingManager:
|
||||
'recording_start_time': None,
|
||||
'video_paths': {
|
||||
'feet_video': None,
|
||||
'screen_video': None
|
||||
'screen_video': None,
|
||||
'femtobolt_video': None
|
||||
},
|
||||
'message': ''
|
||||
}
|
||||
@ -105,24 +185,53 @@ class RecordingManager:
|
||||
result['message'] = f'已在录制中,当前会话ID: {self.current_session_id}'
|
||||
return result
|
||||
|
||||
# 设置默认录制类型
|
||||
recording_types = ['screen', 'feet', 'femtobolt']
|
||||
# recording_types = ['screen']
|
||||
|
||||
|
||||
|
||||
# 验证录制区域参数(仅对启用的录制类型进行验证)
|
||||
if 'screen' in recording_types:
|
||||
if not screen_location or not isinstance(screen_location, list) or len(screen_location) != 4:
|
||||
result['success'] = False
|
||||
result['message'] = '屏幕录制区域参数无效或缺失,必须是包含4个元素的数组[x, y, w, h]'
|
||||
return result
|
||||
|
||||
if 'feet' in recording_types:
|
||||
if not camera_location or not isinstance(camera_location, list) or len(camera_location) != 4:
|
||||
result['success'] = False
|
||||
result['message'] = '相机录制区域参数无效或缺失,必须是包含4个元素的数组[x, y, w, h]'
|
||||
return result
|
||||
|
||||
if 'femtobolt' in recording_types:
|
||||
if not femtobolt_location or not isinstance(femtobolt_location, list) or len(femtobolt_location) != 4:
|
||||
result['success'] = False
|
||||
result['message'] = 'FemtoBolt录制区域参数无效或缺失,必须是包含4个元素的数组[x, y, w, h]'
|
||||
return result
|
||||
|
||||
# 设置录制参数
|
||||
self.current_session_id = session_id
|
||||
self.current_patient_id = patient_id
|
||||
self.screen_region = tuple(screen_location) # [x, y, w, h] -> (x, y, w, h)
|
||||
self.camera_region = tuple(camera_location) # [x, y, w, h] -> (x, y, w, h)
|
||||
self.femtobolt_region = tuple(femtobolt_location) # [x, y, w, h] -> (x, y, w, h)
|
||||
self.recording_start_time = datetime.now()
|
||||
data_base_path = os.path.join('data', 'patients', patient_id, session_id)
|
||||
# 创建存储目录
|
||||
|
||||
# 创建主存储目录
|
||||
if getattr(sys, 'frozen', False):
|
||||
# 打包后的exe文件路径
|
||||
exe_dir = os.path.dirname(sys.executable)
|
||||
base_path = os.path.join(exe_dir, 'data', 'patients', patient_id, session_id)
|
||||
else:
|
||||
base_path = os.path.join('data', 'patients', patient_id, session_id)
|
||||
|
||||
|
||||
try:
|
||||
os.makedirs(base_path, exist_ok=True)
|
||||
self.logger.info(f'录制目录创建成功: {base_path}')
|
||||
|
||||
# 设置目录权限
|
||||
self._set_directory_permissions(base_path)
|
||||
os.makedirs(base_path, exist_ok=True)
|
||||
|
||||
except Exception as dir_error:
|
||||
self.logger.error(f'创建录制目录失败: {base_path}, 错误: {dir_error}')
|
||||
@ -133,78 +242,119 @@ class RecordingManager:
|
||||
# 定义视频文件路径
|
||||
feet_video_path = os.path.join(base_path, 'feet.mp4')
|
||||
screen_video_path = os.path.join(base_path, 'screen.mp4')
|
||||
femtobolt_video_path = os.path.join(base_path, 'femtobolt.mp4')
|
||||
|
||||
result['video_paths']['feet_video'] = feet_video_path
|
||||
result['video_paths']['screen_video'] = screen_video_path
|
||||
result['video_paths']['femtobolt_video'] = femtobolt_video_path
|
||||
|
||||
# 准备数据库更新信息,返回给调用方统一处理
|
||||
result['database_updates'] = {
|
||||
'session_id': session_id,
|
||||
'status': 'recording',
|
||||
'video_paths': {
|
||||
'normal_video_path': os.path.join(base_path, 'feet.mp4'),
|
||||
'screen_video_path': os.path.join(base_path, 'screen.mp4'),
|
||||
'femtobolt_video_path': os.path.join(base_path, 'femtobolt.mp4')
|
||||
}
|
||||
}
|
||||
self.logger.debug(f'数据库更新信息已准备 - 会话ID: {session_id}')
|
||||
|
||||
# 更新数据库中的视频路径
|
||||
if self.db_manager:
|
||||
# 视频编码参数 - 使用更兼容的编解码器
|
||||
# 尝试多种编解码器以确保兼容性
|
||||
try:
|
||||
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # 更兼容的编解码器
|
||||
except:
|
||||
try:
|
||||
# 更新会话状态为录制中
|
||||
if not self.db_manager.update_session_status(session_id, 'recording'):
|
||||
self.logger.error(f'更新会话状态为录制中失败 - 会话ID: {session_id}')
|
||||
|
||||
# 更新视频文件路径
|
||||
self.db_manager.update_session_normal_video_path(session_id, os.path.join(data_base_path, 'feet.mp4'))
|
||||
self.db_manager.update_session_screen_video_path(session_id, os.path.join(data_base_path, 'screen.mp4'))
|
||||
|
||||
self.logger.debug(f'数据库视频路径更新成功 - 会话ID: {session_id}')
|
||||
except Exception as db_error:
|
||||
self.logger.error(f'更新数据库视频路径失败: {db_error}')
|
||||
fourcc = cv2.VideoWriter_fourcc(*'XVID') # 备选编解码器
|
||||
except:
|
||||
fourcc = cv2.VideoWriter_fourcc(*'MJPG') # 最后备选
|
||||
fps = 25 #正常帧率
|
||||
|
||||
# 视频编码参数
|
||||
fourcc = cv2.VideoWriter_fourcc(*'avc1')
|
||||
fps = 30
|
||||
|
||||
|
||||
# 初始化足部视频写入器
|
||||
if self.camera_manager and self.camera_manager.is_connected:
|
||||
target_width, target_height = self.MAX_FRAME_SIZE
|
||||
self.feet_video_writer = cv2.VideoWriter(
|
||||
feet_video_path, fourcc, fps, (target_width, target_height)
|
||||
# 根据录制类型选择性地初始化视频写入器
|
||||
self.screen_video_writer = None
|
||||
self.femtobolt_video_writer = None
|
||||
self.feet_video_writer = None
|
||||
|
||||
if 'screen' in recording_types:
|
||||
self.screen_video_writer = cv2.VideoWriter(
|
||||
screen_video_path, fourcc, fps, (self.screen_region[2], self.screen_region[3])
|
||||
)
|
||||
|
||||
if self.feet_video_writer.isOpened():
|
||||
self.logger.info(f'脚部视频写入器初始化成功: {feet_video_path}')
|
||||
|
||||
if 'femtobolt' in recording_types:
|
||||
self.femtobolt_video_writer = cv2.VideoWriter(
|
||||
femtobolt_video_path, fourcc, fps, (self.femtobolt_region[2], self.femtobolt_region[3])
|
||||
)
|
||||
|
||||
if 'feet' in recording_types:
|
||||
self.feet_video_writer = cv2.VideoWriter(
|
||||
feet_video_path, fourcc, fps, (self.camera_region[2], self.camera_region[3])
|
||||
)
|
||||
|
||||
# 检查视频写入器状态(仅检查启用的录制类型)
|
||||
# 检查足部视频写入器
|
||||
if 'feet' in recording_types:
|
||||
if self.feet_video_writer and self.feet_video_writer.isOpened():
|
||||
self.logger.info(f'足部视频写入器初始化成功: {feet_video_path}')
|
||||
else:
|
||||
self.logger.error(f'脚部视频写入器初始化失败: {feet_video_path}')
|
||||
self.logger.error(f'足部视频写入器初始化失败: {feet_video_path}')
|
||||
else:
|
||||
self.logger.warning('相机设备未启用,跳过脚部视频写入器初始化')
|
||||
self.logger.info('足部录制功能已禁用')
|
||||
|
||||
# 初始化屏幕录制写入器
|
||||
# record_size = self.screen_region[2:4] if self.screen_region else self.screen_size
|
||||
# print('屏幕写入器的宽高..............',record_size)
|
||||
# self.screen_video_writer = cv2.VideoWriter(
|
||||
# screen_video_path, fourcc, fps, (self.screen_size[0],self.screen_size[1])
|
||||
# )
|
||||
|
||||
# 检查屏幕视频写入器状态(仅在初始化时)
|
||||
if self.screen_video_writer and self.screen_video_writer.isOpened():
|
||||
self.logger.info(f'屏幕视频写入器初始化成功: {screen_video_path}')
|
||||
elif self.screen_video_writer:
|
||||
self.logger.error(f'屏幕视频写入器初始化失败: {screen_video_path}')
|
||||
# 检查屏幕视频写入器
|
||||
if 'screen' in recording_types:
|
||||
if self.screen_video_writer and self.screen_video_writer.isOpened():
|
||||
self.logger.info(f'屏幕视频写入器初始化成功: {screen_video_path}')
|
||||
else:
|
||||
self.logger.error(f'屏幕视频写入器初始化失败: {screen_video_path}')
|
||||
else:
|
||||
self.logger.info('屏幕录制功能暂时禁用')
|
||||
self.logger.info('屏幕录制功能已禁用')
|
||||
|
||||
# 检查FemtoBolt视频写入器
|
||||
if 'femtobolt' in recording_types:
|
||||
if self.femtobolt_video_writer and self.femtobolt_video_writer.isOpened():
|
||||
self.logger.info(f'FemtoBolt视频写入器初始化成功: {femtobolt_video_path}')
|
||||
else:
|
||||
self.logger.error(f'FemtoBolt视频写入器初始化失败: {femtobolt_video_path}')
|
||||
else:
|
||||
self.logger.info('FemtoBolt录制功能已禁用')
|
||||
|
||||
# 重置停止事件
|
||||
self.recording_stop_event.clear()
|
||||
self.sync_recording = True
|
||||
|
||||
# 启动录制线程
|
||||
if self.feet_video_writer:
|
||||
# 根据录制类型启动对应的录制线程
|
||||
if 'feet' in recording_types and self.feet_video_writer and self.feet_video_writer.isOpened():
|
||||
self.feet_recording_thread = threading.Thread(
|
||||
target=self._feet_recording_thread,
|
||||
target=self._generic_recording_thread,
|
||||
args=('camera', self.camera_region, feet_video_path, self.feet_video_writer),
|
||||
daemon=True,
|
||||
name='FeetRecordingThread'
|
||||
)
|
||||
self.feet_recording_thread.start()
|
||||
self.logger.info(f'足部录制线程已启动 - 区域: {self.camera_region}, 输出文件: {feet_video_path}')
|
||||
|
||||
# if self.screen_video_writer:
|
||||
# self.screen_recording_thread = threading.Thread(
|
||||
# target=self._screen_recording_thread,
|
||||
# daemon=True,
|
||||
# name='ScreenRecordingThread'
|
||||
# )
|
||||
# self.screen_recording_thread.start()
|
||||
if 'screen' in recording_types and self.screen_video_writer and self.screen_video_writer.isOpened():
|
||||
self.screen_recording_thread = threading.Thread(
|
||||
target=self._generic_recording_thread,
|
||||
args=('screen', self.screen_region, screen_video_path, self.screen_video_writer),
|
||||
daemon=True,
|
||||
name='ScreenRecordingThread'
|
||||
)
|
||||
self.screen_recording_thread.start()
|
||||
self.logger.info(f'屏幕录制线程已启动 - 区域: {self.screen_region}, 输出文件: {screen_video_path}')
|
||||
|
||||
if 'femtobolt' in recording_types and self.femtobolt_video_writer and self.femtobolt_video_writer.isOpened():
|
||||
self.femtobolt_recording_thread = threading.Thread(
|
||||
target=self._generic_recording_thread,
|
||||
args=('femtobolt', self.femtobolt_region, femtobolt_video_path, self.femtobolt_video_writer),
|
||||
daemon=True,
|
||||
name='FemtoBoltRecordingThread'
|
||||
)
|
||||
self.femtobolt_recording_thread.start()
|
||||
self.logger.info(f'FemtoBolt录制线程已启动 - 区域: {self.femtobolt_region}, 输出文件: {femtobolt_video_path}')
|
||||
|
||||
result['success'] = True
|
||||
result['recording_start_time'] = self.recording_start_time.isoformat()
|
||||
@ -251,22 +401,25 @@ class RecordingManager:
|
||||
self.recording_stop_event.set()
|
||||
|
||||
# 等待录制线程结束
|
||||
if self.feet_recording_thread and self.feet_recording_thread.is_alive():
|
||||
if hasattr(self, 'feet_recording_thread') and self.feet_recording_thread and self.feet_recording_thread.is_alive():
|
||||
self.feet_recording_thread.join(timeout=5.0)
|
||||
|
||||
if self.screen_recording_thread and self.screen_recording_thread.is_alive():
|
||||
if hasattr(self, 'screen_recording_thread') and self.screen_recording_thread and self.screen_recording_thread.is_alive():
|
||||
self.screen_recording_thread.join(timeout=5.0)
|
||||
|
||||
if hasattr(self, 'femtobolt_recording_thread') and self.femtobolt_recording_thread and self.femtobolt_recording_thread.is_alive():
|
||||
self.femtobolt_recording_thread.join(timeout=5.0)
|
||||
|
||||
# 清理视频写入器
|
||||
self._cleanup_video_writers()
|
||||
|
||||
# 更新数据库状态
|
||||
if self.db_manager and self.current_session_id:
|
||||
try:
|
||||
self.db_manager.update_session_status(self.current_session_id, 'completed')
|
||||
self.logger.info(f'会话状态已更新为完成 - 会话ID: {self.current_session_id}')
|
||||
except Exception as db_error:
|
||||
self.logger.error(f'更新数据库状态失败: {db_error}')
|
||||
# 准备数据库更新信息,返回给调用方统一处理
|
||||
if self.current_session_id:
|
||||
result['database_updates'] = {
|
||||
'session_id': self.current_session_id,
|
||||
'status': 'completed'
|
||||
}
|
||||
self.logger.info(f'数据库更新信息已准备 - 会话ID: {self.current_session_id}')
|
||||
|
||||
result['success'] = True
|
||||
result['message'] = '录制已停止'
|
||||
@ -284,156 +437,108 @@ class RecordingManager:
|
||||
|
||||
return result
|
||||
|
||||
def _feet_recording_thread(self):
|
||||
"""足部视频录制线程"""
|
||||
consecutive_failures = 0
|
||||
max_consecutive_failures = 10
|
||||
recording_frame_count = 0
|
||||
|
||||
self.logger.info(f"足部录制线程已启动 - 会话ID: {self.current_session_id}")
|
||||
self.logger.info(f"视频写入器状态: {self.feet_video_writer.isOpened() if self.feet_video_writer else 'None'}")
|
||||
def _generic_recording_thread(self, recording_type, region, output_file_name, video_writer):
|
||||
"""
|
||||
通用录制线程,支持屏幕、相机和FemtoBolt录制
|
||||
|
||||
Args:
|
||||
recording_type: 录制类型 ('screen', 'camera', 'femtobolt')
|
||||
region: 录制区域 (x, y, width, height)
|
||||
output_file_name: 输出文件名
|
||||
video_writer: 视频写入器对象
|
||||
"""
|
||||
try:
|
||||
# 使用与屏幕录制相同的帧率控制
|
||||
target_fps = 30 # 目标帧率
|
||||
self.logger.info(f'{recording_type}录制线程启动 - 区域: {region}, 输出文件: {output_file_name}')
|
||||
frame_count = 0
|
||||
# 使用当前动态帧率,支持自适应帧率调整
|
||||
target_fps = self.current_fps
|
||||
frame_interval = 1.0 / target_fps
|
||||
last_frame_time = time.time()
|
||||
|
||||
if not video_writer or not video_writer.isOpened():
|
||||
self.logger.error(f'{recording_type}视频写入器初始化失败: {output_file_name}')
|
||||
return
|
||||
|
||||
# 验证并解包region参数
|
||||
if not region or len(region) != 4:
|
||||
self.logger.error(f'{recording_type}录制区域参数无效: {region}')
|
||||
return
|
||||
|
||||
x, y, w, h = region
|
||||
self.logger.info(f'{recording_type}录制区域解包成功: x={x}, y={y}, w={w}, h={h}')
|
||||
|
||||
while self.sync_recording and not self.recording_stop_event.is_set():
|
||||
current_time = time.time()
|
||||
|
||||
# 检查是否到了下一帧的时间
|
||||
if current_time - last_frame_time >= frame_interval:
|
||||
if self.feet_video_writer:
|
||||
# 从相机管理器的全局缓存获取最新帧
|
||||
frame, frame_timestamp = self.camera_manager._get_latest_frame_from_cache('camera')
|
||||
try:
|
||||
current_time = time.time()
|
||||
|
||||
# 定期检查系统性能并调整录制参数
|
||||
if frame_count % self.performance_check_interval == 0 and frame_count > 0:
|
||||
performance_data = self._check_system_performance()
|
||||
self._adjust_recording_performance(performance_data)
|
||||
# 更新帧率间隔
|
||||
target_fps = self.current_fps
|
||||
frame_interval = 1.0 / target_fps
|
||||
self.logger.debug(f'{recording_type}性能检查完成,当前帧率: {target_fps}fps')
|
||||
|
||||
# 控制帧率
|
||||
if current_time - last_frame_time < frame_interval:
|
||||
time.sleep(0.001)
|
||||
continue
|
||||
|
||||
frame = None
|
||||
|
||||
# 获取帧数据 - 从屏幕截图生成
|
||||
screenshot = pyautogui.screenshot(region=(x, y, w, h))
|
||||
frame = cv2.cvtColor(np.array(screenshot), cv2.COLOR_RGB2BGR)
|
||||
frame = cv2.resize(frame, (w, h))
|
||||
|
||||
# 写入视频帧
|
||||
if frame is not None:
|
||||
video_writer.write(frame)
|
||||
frame_count += 1
|
||||
|
||||
if frame is not None:
|
||||
self.logger.debug(f"成功获取帧 - 尺寸: {frame.shape}, 数据类型: {frame.dtype}, 时间戳: {frame_timestamp}")
|
||||
|
||||
# 检查视频写入器状态
|
||||
if not self.feet_video_writer.isOpened():
|
||||
self.logger.error(f"脚部视频写入器已关闭,无法写入帧 - 会话ID: {self.current_session_id}")
|
||||
break
|
||||
|
||||
try:
|
||||
# 调整帧尺寸到目标大小
|
||||
resized_frame = cv2.resize(frame, self.MAX_FRAME_SIZE)
|
||||
|
||||
# 写入录制文件
|
||||
write_success = self.feet_video_writer.write(resized_frame)
|
||||
|
||||
if write_success is False:
|
||||
self.logger.error(f"视频帧写入返回False - 可能写入失败")
|
||||
consecutive_failures += 1
|
||||
else:
|
||||
consecutive_failures = 0
|
||||
recording_frame_count += 1
|
||||
|
||||
except Exception as write_error:
|
||||
self.logger.error(f"写入脚部视频帧异常: {write_error}")
|
||||
consecutive_failures += 1
|
||||
if consecutive_failures >= 10:
|
||||
self.logger.error("连续写入失败次数过多,停止录制")
|
||||
break
|
||||
else:
|
||||
# 如果没有获取到帧,写入上一帧或黑色帧来保持帧率
|
||||
consecutive_failures += 1
|
||||
if consecutive_failures <= 3:
|
||||
self.logger.warning(f"录制线程无法从缓存获取帧 (连续失败{consecutive_failures}次)")
|
||||
elif consecutive_failures == max_consecutive_failures:
|
||||
self.logger.error(f"录制线程连续失败{max_consecutive_failures}次,可能缓存无数据或推流已停止")
|
||||
|
||||
last_frame_time = current_time
|
||||
else:
|
||||
self.logger.error("足部视频写入器未初始化")
|
||||
break
|
||||
|
||||
# 短暂休眠避免CPU占用过高
|
||||
time.sleep(0.01)
|
||||
|
||||
# 检查连续失败情况
|
||||
if consecutive_failures >= max_consecutive_failures:
|
||||
self.logger.error(f"连续失败次数达到上限({max_consecutive_failures}),停止录制")
|
||||
break
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f'足部录制线程异常: {e}')
|
||||
finally:
|
||||
self.logger.info(f"足部录制线程已结束 - 会话ID: {self.current_session_id}, 总录制帧数: {recording_frame_count}")
|
||||
# 确保视频写入器被正确关闭
|
||||
if self.feet_video_writer:
|
||||
self.feet_video_writer.release()
|
||||
self.feet_video_writer = None
|
||||
self.logger.debug("足部视频写入器已释放")
|
||||
|
||||
def _screen_recording_thread(self):
|
||||
"""屏幕录制线程"""
|
||||
self.logger.info(f"屏幕录制线程已启动 - 会话ID: {self.current_session_id}")
|
||||
recording_frame_count = 0
|
||||
|
||||
try:
|
||||
# 使用与足部录制相同的帧率控制
|
||||
target_fps = 30 # 目标帧率
|
||||
frame_interval = 1.0 / target_fps
|
||||
last_frame_time = time.time()
|
||||
# 如果没有获取到帧,短暂等待
|
||||
time.sleep(0.01)
|
||||
|
||||
last_frame_time = current_time
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f'{recording_type}录制线程错误: {e}')
|
||||
time.sleep(0.1)
|
||||
|
||||
self.logger.info(f'{recording_type}录制线程结束,总帧数: {frame_count}')
|
||||
|
||||
while self.sync_recording and not self.recording_stop_event.is_set():
|
||||
current_time = time.time()
|
||||
|
||||
# 检查是否到了下一帧的时间
|
||||
if current_time - last_frame_time >= frame_interval:
|
||||
try:
|
||||
# 截取屏幕self.screen_size
|
||||
if self.screen_size:
|
||||
# print('获取截图的时候屏幕写入器的宽高..............',self.screen_region)
|
||||
width, height = self.screen_size
|
||||
screenshot = pyautogui.screenshot(region=(0, 0, width, height))
|
||||
else:
|
||||
# print('screen_region方法没找到。。。。。。。。。。。。。。。。。')
|
||||
screenshot = pyautogui.screenshot()
|
||||
|
||||
# 转换为numpy数组
|
||||
frame = np.array(screenshot)
|
||||
|
||||
# 转换颜色格式 (RGB -> BGR)
|
||||
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
|
||||
|
||||
# 写入视频文件
|
||||
if self.screen_video_writer and self.screen_video_writer.isOpened():
|
||||
self.screen_video_writer.write(frame)
|
||||
recording_frame_count += 1
|
||||
|
||||
last_frame_time = current_time
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"屏幕录制异常: {e}")
|
||||
|
||||
# 短暂休眠避免CPU占用过高
|
||||
time.sleep(0.01)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f'屏幕录制线程异常: {e}')
|
||||
self.logger.error(f'{recording_type}录制线程异常: {e}')
|
||||
finally:
|
||||
self.logger.info(f"屏幕录制线程已结束 - 会话ID: {self.current_session_id}, 总录制帧数: {recording_frame_count}")
|
||||
# 确保视频写入器被正确关闭
|
||||
if self.screen_video_writer:
|
||||
self.screen_video_writer.release()
|
||||
self.screen_video_writer = None
|
||||
self.logger.debug("屏幕视频写入器已释放")
|
||||
# 清理资源
|
||||
if video_writer:
|
||||
try:
|
||||
video_writer.release()
|
||||
self.logger.info(f'{recording_type}视频写入器已释放')
|
||||
except Exception as e:
|
||||
self.logger.error(f'释放{recording_type}视频写入器失败: {e}')
|
||||
|
||||
def _cleanup_video_writers(self):
|
||||
"""清理视频写入器"""
|
||||
try:
|
||||
if self.feet_video_writer:
|
||||
if hasattr(self, 'feet_video_writer') and self.feet_video_writer:
|
||||
self.feet_video_writer.release()
|
||||
self.feet_video_writer = None
|
||||
self.logger.debug("足部视频写入器已清理")
|
||||
|
||||
if self.screen_video_writer:
|
||||
if hasattr(self, 'screen_video_writer') and self.screen_video_writer:
|
||||
self.screen_video_writer.release()
|
||||
self.screen_video_writer = None
|
||||
self.logger.debug("屏幕视频写入器已清理")
|
||||
|
||||
if hasattr(self, 'femtobolt_video_writer') and self.femtobolt_video_writer:
|
||||
self.femtobolt_video_writer.release()
|
||||
self.femtobolt_video_writer = None
|
||||
self.logger.debug("FemtoBolt视频写入器已清理")
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"清理视频写入器失败: {e}")
|
||||
@ -487,6 +592,30 @@ class RecordingManager:
|
||||
|
||||
return True
|
||||
|
||||
def set_recording_regions(self, screen_region=None, camera_region=None, femtobolt_region=None):
|
||||
"""
|
||||
设置三个录制区域
|
||||
|
||||
Args:
|
||||
screen_region: 屏幕录制区域 (x, y, width, height)
|
||||
camera_region: 相机录制区域 (x, y, width, height)
|
||||
femtobolt_region: FemtoBolt录制区域 (x, y, width, height)
|
||||
"""
|
||||
if self.sync_recording:
|
||||
self.logger.warning("录制进行中,无法更改区域设置")
|
||||
return False
|
||||
|
||||
self.screen_region = screen_region
|
||||
self.camera_region = camera_region
|
||||
self.femtobolt_region = femtobolt_region
|
||||
|
||||
self.logger.info(f'录制区域已设置:')
|
||||
self.logger.info(f' 屏幕区域: {screen_region}')
|
||||
self.logger.info(f' 相机区域: {camera_region}')
|
||||
self.logger.info(f' FemtoBolt区域: {femtobolt_region}')
|
||||
|
||||
return True
|
||||
|
||||
def get_status(self):
|
||||
"""获取录制状态"""
|
||||
return {
|
||||
|
115
backend/main.py
115
backend/main.py
@ -32,29 +32,6 @@ from devices.femtobolt_manager import FemtoBoltManager
|
||||
from devices.device_coordinator import DeviceCoordinator
|
||||
from devices.screen_recorder import RecordingManager
|
||||
from devices.utils.config_manager import ConfigManager
|
||||
# # 导入设备管理器
|
||||
# try:
|
||||
# from devices.camera_manager import CameraManager
|
||||
# from devices.imu_manager import IMUManager
|
||||
# from devices.pressure_manager import PressureManager
|
||||
# from devices.femtobolt_manager import FemtoBoltManager
|
||||
# from devices.device_coordinator import DeviceCoordinator
|
||||
# from devices.screen_recorder import RecordingManager
|
||||
# from devices.utils.config_manager import ConfigManager
|
||||
# except ImportError:
|
||||
# # 如果上面的导入失败,尝试直接导入
|
||||
# # from camera_manager import CameraManager
|
||||
# import imu_manager
|
||||
# import pressure_manager
|
||||
# import femtobolt_manager
|
||||
# import device_coordinator
|
||||
# from utils import config_manager
|
||||
|
||||
# IMUManager = imu_manager.IMUManager
|
||||
# PressureManager = pressure_manager.PressureManager
|
||||
# FemtoBoltManager = femtobolt_manager.FemtoBoltManager
|
||||
# DeviceCoordinator = device_coordinator.DeviceCoordinator
|
||||
# ConfigManager = config_manager.ConfigManager
|
||||
|
||||
|
||||
class AppServer:
|
||||
@ -149,7 +126,6 @@ class AppServer:
|
||||
self.app,
|
||||
cors_allowed_origins='*',
|
||||
async_mode='threading',
|
||||
#async_mode='eventlet',
|
||||
logger=False,
|
||||
engineio_logger=False,
|
||||
ping_timeout=60,
|
||||
@ -295,10 +271,14 @@ class AppServer:
|
||||
# 检查是否在允许的目录内
|
||||
if not os.path.commonpath([data_dir, file_path]) == data_dir:
|
||||
return jsonify({'error': '访问被拒绝'}), 403
|
||||
|
||||
self.logger.info(f'静态文件: {file_path}')
|
||||
# 返回文件
|
||||
from flask import send_file
|
||||
return send_file(file_path)
|
||||
# 为视频文件设置正确的MIME类型
|
||||
if file_path.lower().endswith(('.mp4', '.webm', '.avi', '.mov')):
|
||||
return send_file(file_path, mimetype='video/mp4')
|
||||
else:
|
||||
return send_file(file_path)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f'静态文件服务错误: {e}')
|
||||
@ -976,6 +956,16 @@ class AppServer:
|
||||
data = flask_request.get_json()
|
||||
patient_id = data.get('patient_id')
|
||||
creator_id = data.get('creator_id')
|
||||
screen_location = data.get('screen_location') # [0,0,1920,1080]
|
||||
camera_location = data.get('camera_location') # [0,0,640,480]
|
||||
femtobolt_location = data.get('femtobolt_location') # [0,0,640,480]
|
||||
|
||||
# 添加调试输出
|
||||
self.logger.info(f'接收到的参数 - patient_id: {patient_id}, creator_id: {creator_id}')
|
||||
self.logger.info(f'接收到的位置参数 - screen_location: {screen_location}, type: {type(screen_location)}')
|
||||
self.logger.info(f'接收到的位置参数 - camera_location: {camera_location}, type: {type(camera_location)}')
|
||||
self.logger.info(f'接收到的位置参数 - femtobolt_location: {femtobolt_location}, type: {type(femtobolt_location)}')
|
||||
|
||||
if not patient_id or not creator_id:
|
||||
return jsonify({'success': False, 'error': '缺少患者ID或创建人ID'}), 400
|
||||
|
||||
@ -985,7 +975,26 @@ class AppServer:
|
||||
# 开始同步录制
|
||||
recording_response = None
|
||||
try:
|
||||
recording_response = self.recording_manager.start_recording(session_id, patient_id)
|
||||
recording_response = self.recording_manager.start_recording(session_id, patient_id,screen_location,camera_location,femtobolt_location)
|
||||
|
||||
# 处理录制管理器返回的数据库更新信息
|
||||
if recording_response and recording_response.get('success') and 'database_updates' in recording_response:
|
||||
db_updates = recording_response['database_updates']
|
||||
try:
|
||||
# 更新会话状态
|
||||
if not self.db_manager.update_session_status(db_updates['session_id'], db_updates['status']):
|
||||
self.logger.error(f'更新会话状态失败 - 会话ID: {db_updates["session_id"]}, 状态: {db_updates["status"]}')
|
||||
|
||||
# 更新视频文件路径
|
||||
video_paths = db_updates['video_paths']
|
||||
self.db_manager.update_session_normal_video_path(db_updates['session_id'], video_paths['normal_video_path'])
|
||||
self.db_manager.update_session_screen_video_path(db_updates['session_id'], video_paths['screen_video_path'])
|
||||
self.db_manager.update_session_femtobolt_video_path(db_updates['session_id'], video_paths['femtobolt_video_path'])
|
||||
|
||||
self.logger.info(f'数据库更新成功 - 会话ID: {db_updates["session_id"]}')
|
||||
except Exception as db_error:
|
||||
self.logger.error(f'处理数据库更新失败: {db_error}')
|
||||
|
||||
except Exception as rec_e:
|
||||
self.logger.error(f'开始同步录制失败: {rec_e}')
|
||||
|
||||
@ -1009,37 +1018,45 @@ class AppServer:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': '缺少会话ID'
|
||||
}), 400
|
||||
}), 400
|
||||
|
||||
data = flask_request.get_json()
|
||||
video_data = data['videoData']
|
||||
mime_type = data.get('mimeType', 'video/webm;codecs=vp9') # 默认webm格式
|
||||
import base64
|
||||
# 验证base64视频数据格式
|
||||
if not video_data.startswith('data:video/'):
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'message': '无效的视频数据格式'
|
||||
}), 400
|
||||
try:
|
||||
header, encoded = video_data.split(',', 1)
|
||||
video_bytes = base64.b64decode(encoded)
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'message': f'视频数据解码失败: {str(e)}'
|
||||
}), 400
|
||||
# 获取请求数据中的duration参数
|
||||
data = flask_request.get_json() or {}
|
||||
duration = data.get('duration')
|
||||
|
||||
# 如果提供了duration,更新到数据库
|
||||
if duration is not None and isinstance(duration, (int, float)):
|
||||
try:
|
||||
self.db_manager.update_session_duration(session_id, int(duration))
|
||||
self.logger.info(f'更新会话持续时间: {session_id} -> {duration}秒')
|
||||
except Exception as duration_error:
|
||||
self.logger.error(f'更新会话持续时间失败: {duration_error}')
|
||||
|
||||
# 停止同步录制,传递视频数据
|
||||
try:
|
||||
restrt = self.recording_manager.stop_recording(session_id)
|
||||
self.logger.info(f'停止录制结果: {restrt}')
|
||||
|
||||
# 处理录制管理器返回的数据库更新信息
|
||||
if restrt and restrt.get('success') and 'database_updates' in restrt:
|
||||
db_updates = restrt['database_updates']
|
||||
try:
|
||||
# 更新会话状态
|
||||
success = self.db_manager.update_session_status(db_updates['session_id'], db_updates['status'])
|
||||
self.logger.info(f'会话状态已更新为: {db_updates["status"]} - 会话ID: {db_updates["session_id"]}')
|
||||
except Exception as db_error:
|
||||
self.logger.error(f'处理停止录制的数据库更新失败: {db_error}')
|
||||
success = False
|
||||
else:
|
||||
# 如果录制管理器没有返回数据库更新信息,则手动更新
|
||||
success = self.db_manager.update_session_status(session_id, 'completed')
|
||||
|
||||
except Exception as rec_e:
|
||||
self.logger.error(f'停止同步录制失败: {rec_e}', exc_info=True)
|
||||
# 即使录制停止失败,也尝试更新数据库状态
|
||||
success = self.db_manager.update_session_status(session_id, 'completed')
|
||||
raise
|
||||
|
||||
# 更新会话状态为已完成
|
||||
success = self.db_manager.update_session_status(session_id, 'completed')
|
||||
|
||||
if success:
|
||||
self.logger.info(f'检测会话已停止 - 会话ID: {session_id}')
|
||||
return jsonify({
|
||||
|
137
backend/test_screen_recorder_performance.py
Normal file
137
backend/test_screen_recorder_performance.py
Normal file
@ -0,0 +1,137 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
屏幕录制性能测试脚本
|
||||
用于测试屏幕录制的CPU优化功能
|
||||
"""
|
||||
|
||||
import time
|
||||
import logging
|
||||
import threading
|
||||
from devices.screen_recorder import RecordingManager
|
||||
|
||||
# 配置日志
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
|
||||
def test_screen_recording_performance():
|
||||
"""
|
||||
测试屏幕录制性能优化功能
|
||||
"""
|
||||
print("开始屏幕录制性能测试...")
|
||||
|
||||
# 创建录制管理器
|
||||
recording_manager = RecordingManager()
|
||||
|
||||
# 配置性能参数(更严格的阈值用于测试)
|
||||
recording_manager.configure_performance_settings(
|
||||
cpu_threshold=70.0, # 降低CPU阈值以便测试
|
||||
memory_threshold=80.0,
|
||||
adaptive_fps=True,
|
||||
min_fps=10,
|
||||
max_fps=30
|
||||
)
|
||||
|
||||
try:
|
||||
# 启动屏幕录制
|
||||
print("启动屏幕录制...")
|
||||
result = recording_manager.start_recording(
|
||||
session_id="test_session_001",
|
||||
patient_id="test_patient",
|
||||
screen_location=[0, 0, 1920, 1080], # 全屏录制
|
||||
camera_location=[0, 0, 640, 480], # 默认相机区域
|
||||
femtobolt_location=[0, 0, 640, 480], # 默认FemtoBolt区域
|
||||
recording_types=["screen"] # 只录制屏幕
|
||||
)
|
||||
|
||||
if not result['success']:
|
||||
print(f"录制启动失败: {result['message']}")
|
||||
return
|
||||
|
||||
print("录制已启动,开始性能监控...")
|
||||
|
||||
# 监控性能状态
|
||||
def monitor_performance():
|
||||
for i in range(30): # 监控30秒
|
||||
time.sleep(1)
|
||||
status = recording_manager.get_status()
|
||||
|
||||
if status['recording']:
|
||||
perf = status['performance']
|
||||
print(f"[{i+1:2d}s] CPU: {perf['cpu_percent']:.1f}% | "
|
||||
f"内存: {perf['memory_percent']:.1f}% | "
|
||||
f"当前帧率: {status['current_fps']:.1f}fps | "
|
||||
f"跳帧: {status['frame_skip_count']}")
|
||||
|
||||
# 如果CPU或内存超过阈值,显示警告
|
||||
if perf['cpu_percent'] > perf['cpu_threshold']:
|
||||
print(f" ⚠️ CPU使用率超过阈值 ({perf['cpu_threshold']}%)")
|
||||
if perf['memory_percent'] > perf['memory_threshold']:
|
||||
print(f" ⚠️ 内存使用率超过阈值 ({perf['memory_threshold']}%)")
|
||||
else:
|
||||
print("录制已停止")
|
||||
break
|
||||
|
||||
# 在单独线程中监控性能
|
||||
monitor_thread = threading.Thread(target=monitor_performance)
|
||||
monitor_thread.start()
|
||||
|
||||
# 等待监控完成
|
||||
monitor_thread.join()
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\n用户中断测试")
|
||||
except Exception as e:
|
||||
print(f"测试过程中发生错误: {e}")
|
||||
finally:
|
||||
# 停止录制
|
||||
print("停止录制...")
|
||||
stop_result = recording_manager.stop_recording()
|
||||
if stop_result['success']:
|
||||
print(f"录制已停止,视频文件: {stop_result.get('video_files', [])}")
|
||||
else:
|
||||
print(f"停止录制失败: {stop_result['message']}")
|
||||
|
||||
def test_performance_configuration():
|
||||
"""
|
||||
测试性能配置功能
|
||||
"""
|
||||
print("\n测试性能配置功能...")
|
||||
|
||||
recording_manager = RecordingManager()
|
||||
|
||||
# 测试各种配置
|
||||
print("测试CPU阈值配置:")
|
||||
recording_manager.configure_performance_settings(cpu_threshold=60)
|
||||
recording_manager.configure_performance_settings(cpu_threshold=40) # 应该被限制到50
|
||||
recording_manager.configure_performance_settings(cpu_threshold=100) # 应该被限制到95
|
||||
|
||||
print("\n测试帧率配置:")
|
||||
recording_manager.configure_performance_settings(min_fps=15, max_fps=25)
|
||||
recording_manager.configure_performance_settings(min_fps=35, max_fps=20) # min > max,应该调整
|
||||
|
||||
print("\n当前状态:")
|
||||
status = recording_manager.get_status()
|
||||
perf = status['performance']
|
||||
print(f"CPU阈值: {perf['cpu_threshold']}%")
|
||||
print(f"内存阈值: {perf['memory_threshold']}%")
|
||||
print(f"自适应帧率: {status['adaptive_fps_enabled']}")
|
||||
print(f"当前帧率: {status['current_fps']}fps")
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("屏幕录制性能测试")
|
||||
print("=" * 50)
|
||||
|
||||
# 测试配置功能
|
||||
test_performance_configuration()
|
||||
|
||||
# 询问是否进行实际录制测试
|
||||
response = input("\n是否进行实际录制测试?(y/n): ")
|
||||
if response.lower() in ['y', 'yes', '是']:
|
||||
test_screen_recording_performance()
|
||||
else:
|
||||
print("跳过录制测试")
|
||||
|
||||
print("\n测试完成")
|
File diff suppressed because it is too large
Load Diff
139
backend/tests/My_TestFemtobolt.py
Normal file
139
backend/tests/My_TestFemtobolt.py
Normal file
@ -0,0 +1,139 @@
|
||||
import os
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from matplotlib.colors import LinearSegmentedColormap, ListedColormap
|
||||
|
||||
# 设置matplotlib支持中文显示
|
||||
plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans'] # 用来正常显示中文标签
|
||||
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
|
||||
|
||||
|
||||
class FemtoBoltDynamicViewer:
|
||||
def __init__(self, depth_min=900, depth_max=1300):
|
||||
self.depth_min = depth_min
|
||||
self.depth_max = depth_max
|
||||
|
||||
# 使用display_x.py的原始颜色映射算法
|
||||
colors = ['fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
|
||||
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
|
||||
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
|
||||
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue']
|
||||
self.mcmap = LinearSegmentedColormap.from_list("custom_cmap", colors)
|
||||
|
||||
# SDK 设备句柄和配置
|
||||
self.device_handle = None
|
||||
self.pykinect = None
|
||||
self.config = None
|
||||
|
||||
def _load_sdk(self):
|
||||
"""加载并初始化 FemtoBolt SDK"""
|
||||
import pykinect_azure as pykinect
|
||||
base_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
dll_path = os.path.join(base_dir, "..", "dll", "femtobolt", "bin", "k4a.dll")
|
||||
self.pykinect = pykinect
|
||||
self.pykinect.initialize_libraries(track_body=False, module_k4a_path=dll_path)
|
||||
|
||||
def _configure_device(self):
|
||||
"""配置 FemtoBolt 深度相机"""
|
||||
self.config = self.pykinect.default_configuration
|
||||
self.config.depth_mode = self.pykinect.K4A_DEPTH_MODE_NFOV_UNBINNED
|
||||
self.config.camera_fps = self.pykinect.K4A_FRAMES_PER_SECOND_15
|
||||
self.config.synchronized_images_only = False
|
||||
self.config.color_resolution = 0
|
||||
self.device_handle = self.pykinect.start_device(config=self.config)
|
||||
|
||||
|
||||
|
||||
def run(self):
|
||||
"""运行实时深度数据可视化 - 融合display_x.py原始算法"""
|
||||
self._load_sdk()
|
||||
self._configure_device()
|
||||
|
||||
plt.ion() # 打开交互模式
|
||||
plt.figure(figsize=(7, 7)) # 使用display_x.py的图形设置
|
||||
print("FemtoBolt 深度相机启动成功,关闭窗口或 Ctrl+C 退出")
|
||||
print(f"深度范围: {self.depth_min} - {self.depth_max} mm")
|
||||
|
||||
try:
|
||||
frame_count = 0
|
||||
while True:
|
||||
capture = self.device_handle.update()
|
||||
if capture is None:
|
||||
continue
|
||||
ret, depth_image = capture.get_depth_image()
|
||||
if not ret or depth_image is None:
|
||||
continue
|
||||
|
||||
# 使用display_x.py的原始算法处理深度数据
|
||||
depth = depth_image.copy()
|
||||
|
||||
# 深度数据过滤 (根据输入参数动态设置)
|
||||
depth[depth > self.depth_max] = 0
|
||||
depth[depth < self.depth_min] = 0
|
||||
|
||||
# 裁剪感兴趣区域 (与display_x.py完全一致)
|
||||
depth = depth[50:200, 50:210]
|
||||
|
||||
# 背景图 (与display_x.py完全一致)
|
||||
background = np.ones_like(depth) * 0.5 # 设定灰色背景
|
||||
|
||||
# 使用 np.ma.masked_equal() 来屏蔽深度图中的零值 (与display_x.py完全一致)
|
||||
depth = np.ma.masked_equal(depth, 0)
|
||||
|
||||
# 绘制背景 (与display_x.py完全一致)
|
||||
plt.imshow(background, origin='lower', cmap='gray', alpha=0.3)
|
||||
|
||||
# 绘制白色栅格线,并将其置于底层 (与display_x.py完全一致)
|
||||
plt.grid(True, which='both', axis='both', color='white', linestyle='-', linewidth=1, zorder=0)
|
||||
|
||||
# 绘制等高线图并设置原点在左下角 (根据输入参数动态设置)
|
||||
# 通过设置 zorder 来控制它们的层级。例如,设置 zorder=2 或更大的值来确保它们位于栅格线之上。
|
||||
plt.contourf(depth, levels=100, cmap=self.mcmap, vmin=self.depth_min, vmax=self.depth_max, origin='upper', zorder=2)
|
||||
|
||||
# 更新显示 (与display_x.py完全一致)
|
||||
plt.pause(0.1) # 暂停0.1秒
|
||||
plt.draw() # 重绘图像
|
||||
plt.clf() # 清除当前图像
|
||||
|
||||
frame_count += 1
|
||||
if frame_count % 30 == 0: # 每30帧打印一次信息
|
||||
print(f"已处理 {frame_count} 帧")
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\n检测到退出信号,结束程序")
|
||||
except Exception as e:
|
||||
print(f"运行时错误: {e}")
|
||||
finally:
|
||||
# 清理资源
|
||||
if self.device_handle:
|
||||
try:
|
||||
if hasattr(self.device_handle, 'stop'):
|
||||
self.device_handle.stop()
|
||||
if hasattr(self.device_handle, 'close'):
|
||||
self.device_handle.close()
|
||||
except Exception as e:
|
||||
print(f"设备关闭时出现错误: {e}")
|
||||
plt.ioff() # 关闭交互模式
|
||||
plt.close('all')
|
||||
print("程序已安全退出")
|
||||
|
||||
def save_current_frame(self, filename="depth_frame.png"):
|
||||
"""保存当前帧到文件"""
|
||||
try:
|
||||
plt.savefig(filename, dpi=150, bbox_inches='tight')
|
||||
print(f"当前帧已保存到: {filename}")
|
||||
except Exception as e:
|
||||
print(f"保存帧失败: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# 创建查看器实例
|
||||
viewer = FemtoBoltDynamicViewer(depth_min=700, depth_max=1000)
|
||||
|
||||
print("=" * 50)
|
||||
print("FemtoBolt 深度相机动态可视化测试")
|
||||
print("基于 display_x.py 算法的实时成像")
|
||||
print("=" * 50)
|
||||
|
||||
# 运行可视化
|
||||
viewer.run()
|
70
backend/tests/display_x.py
Normal file
70
backend/tests/display_x.py
Normal file
@ -0,0 +1,70 @@
|
||||
import cv2
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import pdb
|
||||
|
||||
import os
|
||||
from matplotlib.colors import LinearSegmentedColormap,ListedColormap
|
||||
from matplotlib.animation import FuncAnimation, FFMpegWriter
|
||||
|
||||
# 指定文件夹路径
|
||||
folder_path = 'datas'
|
||||
|
||||
# 获取文件夹中的所有文件
|
||||
files = [f for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, f))]
|
||||
|
||||
# 根据文件的修改时间排序
|
||||
sorted_files = sorted(files, key=lambda x: os.path.getmtime(os.path.join(folder_path, x)))
|
||||
|
||||
|
||||
# 创建一个自定义的 colormap
|
||||
colors = ['red', 'yellow', 'green', 'blue']
|
||||
|
||||
# 自定义颜色
|
||||
colors = ['fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
|
||||
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
|
||||
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
|
||||
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue']
|
||||
mcmap = LinearSegmentedColormap.from_list("custom_cmap", colors)
|
||||
|
||||
plt.figure(figsize=(7, 7))
|
||||
# 打印排序后的文件名
|
||||
for file in sorted_files:
|
||||
data = np.load(os.path.join(folder_path,file))
|
||||
depth = data['arr1']
|
||||
points = data['arr2']
|
||||
color_image = data['arr3']
|
||||
|
||||
h,w,_ = color_image.shape
|
||||
points = points.reshape((h,w,3))
|
||||
|
||||
depth[depth > 1300] = 0
|
||||
depth[depth < 900] = 0
|
||||
depth = depth[50:200,50:210]
|
||||
|
||||
# 背景图
|
||||
background = np.ones_like(depth) * 0.5 # 设定灰色背景
|
||||
|
||||
# 使用 np.ma.masked_equal() 来屏蔽深度图中的零值。masked_array 中的值不会被绘制,从而避免了零值的显示。
|
||||
depth = np.ma.masked_equal(depth, 0)
|
||||
|
||||
# 绘制背景
|
||||
plt.imshow(background, origin='lower', cmap='gray', alpha=0.3)
|
||||
# 绘制白色栅格线,并将其置于底层
|
||||
plt.grid(True, which='both', axis='both', color='white', linestyle='-', linewidth=1, zorder=0)
|
||||
if False:
|
||||
plt.subplot(1,2,1)
|
||||
plt.imshow(depth, cmap='plasma', vmin=1000, vmax=1200)
|
||||
plt.subplot(1,2,2)
|
||||
# 绘制等高线图并设置原点在左下角
|
||||
# 通过设置 zorder 来控制它们的层级。例如,设置 zorder=2 或更大的值来确保它们位于栅格线之上。
|
||||
plt.contourf(depth, levels=200, cmap=mcmap,vmin=900, vmax=1300,origin='upper',zorder=2)
|
||||
plt.pause(0.1) # 暂停0.1秒
|
||||
plt.draw() # 重绘图像
|
||||
plt.clf() # 清除当前图像
|
||||
#plt.show()
|
||||
|
||||
|
||||
|
||||
|
@ -1,3 +0,0 @@
|
||||
# 数据目录
|
||||
# 此文件用于确保 data 目录在版本控制中被保留
|
||||
# 实际的数据文件会被 .gitignore 忽略
|
@ -108,6 +108,9 @@ function createWindow() {
|
||||
backgroundColor: '#000000'
|
||||
});
|
||||
|
||||
// 窗口创建后立即最大化
|
||||
mainWindow.maximize();
|
||||
|
||||
// 开发环境加载本地服务器,生产环境加载打包后的文件
|
||||
const isDev = process.env.NODE_ENV === 'development';
|
||||
if (isDev) {
|
||||
|
@ -13,7 +13,7 @@ api.interceptors.request.use(
|
||||
if (window.electronAPI) {
|
||||
config.baseURL = window.electronAPI.getBackendUrl()
|
||||
} else {
|
||||
config.baseURL = 'http://192.168.1.58:5000'
|
||||
config.baseURL = 'http://localhost:5000'
|
||||
}
|
||||
|
||||
// 只为需要发送数据的请求设置Content-Type
|
||||
@ -637,7 +637,7 @@ export const getBackendUrl = () => {
|
||||
if (window.electronAPI) {
|
||||
return window.electronAPI.getBackendUrl()
|
||||
} else {
|
||||
return 'http://192.168.1.58:5000'
|
||||
return 'http://localhost:5000'
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -550,11 +550,10 @@
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
import { ref, reactive, computed, onMounted, onUnmounted, nextTick } from 'vue'
|
||||
import { ref, computed, onMounted, onUnmounted, nextTick } from 'vue'
|
||||
import { ElMessage } from 'element-plus'
|
||||
import { useRouter, useRoute } from 'vue-router'
|
||||
import { io } from 'socket.io-client'
|
||||
import html2canvas from 'html2canvas'
|
||||
import Header from '@/views/Header.vue'
|
||||
import { useAuthStore } from '../stores/index.js'
|
||||
import * as echarts from 'echarts'
|
||||
@ -582,7 +581,7 @@ const videoImgRef =ref(null) // 视频流图片ref
|
||||
let mediaRecorder = null
|
||||
let recordedChunks = []
|
||||
let recordingStream = null
|
||||
let currentMimeType = null // 当前录制的视频格式
|
||||
|
||||
|
||||
// 患者信息(从页面获取或通过API获取)
|
||||
const patientInfo = ref({
|
||||
@ -1343,15 +1342,6 @@ function updateHeadPoseMaxValues(headPose) {
|
||||
)
|
||||
}
|
||||
|
||||
// // 输出当前最值(用于调试)
|
||||
// console.log('📊 当前头部姿态最值:', {
|
||||
// rotationLeft: headPoseMaxValues.value.rotationLeftMax.toFixed(1),
|
||||
// rotationRight: headPoseMaxValues.value.rotationRightMax.toFixed(1),
|
||||
// tiltLeft: headPoseMaxValues.value.tiltLeftMax.toFixed(1),
|
||||
// tiltRight: headPoseMaxValues.value.tiltRightMax.toFixed(1),
|
||||
// pitchUp: headPoseMaxValues.value.pitchUpMax.toFixed(1),
|
||||
// pitchDown: headPoseMaxValues.value.pitchDownMax.toFixed(1)
|
||||
// })
|
||||
} catch (error) {
|
||||
console.error('❌ 更新头部姿态最值失败:', error)
|
||||
}
|
||||
@ -1465,16 +1455,7 @@ function handlePressureData(data) {
|
||||
if (pressureData.pressure_zones) {
|
||||
footPressure.value = pressureData.pressure_zones
|
||||
}
|
||||
// 显示平衡分析
|
||||
// if (pressureData.balance_analysis) {
|
||||
// const balance = pressureData.balance_analysis
|
||||
// console.log(' 平衡分析:')
|
||||
// console.log(` 平衡比例: ${(balance.balance_ratio * 100).toFixed(1)}%`)
|
||||
// console.log(` 压力中心偏移: ${balance.pressure_center_offset}%`)
|
||||
// console.log(` 平衡状态: ${balance.balance_status}`)
|
||||
// console.log(` 左足前后比: ${(balance.left_front_ratio * 100).toFixed(1)}%`)
|
||||
// console.log(` 右足前后比: ${(balance.right_front_ratio * 100).toFixed(1)}%`)
|
||||
// }
|
||||
|
||||
|
||||
// 处理压力图片
|
||||
if (pressureData.pressure_image) {
|
||||
@ -1746,281 +1727,10 @@ async function sendDetectionData(data) {
|
||||
}
|
||||
}
|
||||
|
||||
// 更新会话视频路径
|
||||
async function updateSessionVideoPath(sessionId, videoPath) {
|
||||
try {
|
||||
const response = await fetch(`${BACKEND_URL}/api/sessions/${sessionId}/video-path`, {
|
||||
method: 'PUT',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
videoPath: videoPath
|
||||
})
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`HTTP ${response.status}: ${response.statusText}`)
|
||||
}
|
||||
|
||||
const result = await response.json()
|
||||
|
||||
if (result.success) {
|
||||
console.log('📹 会话视频路径更新成功:', videoPath)
|
||||
return result
|
||||
} else {
|
||||
throw new Error(result.message || '更新失败')
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
console.error('💥 更新会话视频路径失败:', error)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
// 开始录像
|
||||
async function startRecording() {
|
||||
try {
|
||||
console.log('🎬 开始录像...')
|
||||
|
||||
// 获取要录制的区域
|
||||
const targetElement = document.getElementById('detectare')
|
||||
if (!targetElement) {
|
||||
throw new Error('未找到录制区域')
|
||||
}
|
||||
|
||||
// 使用getDisplayMedia API录制屏幕区域
|
||||
// 注意:由于浏览器限制,我们使用captureStream方式
|
||||
const canvas = document.createElement('canvas')
|
||||
const ctx = canvas.getContext('2d')
|
||||
|
||||
// 设置canvas尺寸
|
||||
const rect = targetElement.getBoundingClientRect()
|
||||
canvas.width = rect.width
|
||||
canvas.height = rect.height
|
||||
|
||||
// 创建录制流
|
||||
recordingStream = canvas.captureStream(30) // 30fps
|
||||
|
||||
// 初始化MediaRecorder
|
||||
// 尝试使用mp4格式,如果不支持则回退到webm
|
||||
let mimeType = 'video/mp4;codecs=avc1.42E01E,mp4a.40.2'
|
||||
if (!MediaRecorder.isTypeSupported(mimeType)) {
|
||||
mimeType = 'video/webm;codecs=vp9'
|
||||
console.log('⚠️ 浏览器不支持MP4录制,使用WebM格式')
|
||||
} else {
|
||||
console.log('✅ 使用MP4格式录制')
|
||||
}
|
||||
|
||||
mediaRecorder = new MediaRecorder(recordingStream, {
|
||||
mimeType: mimeType
|
||||
})
|
||||
|
||||
// 保存当前使用的格式
|
||||
currentMimeType = mimeType
|
||||
|
||||
recordedChunks = []
|
||||
|
||||
mediaRecorder.ondataavailable = (event) => {
|
||||
if (event.data.size > 0) {
|
||||
recordedChunks.push(event.data)
|
||||
}
|
||||
}
|
||||
|
||||
// mediaRecorder.onstop = async () => {
|
||||
// console.log('🎬 录像停止,开始保存...')
|
||||
// await saveRecording()
|
||||
// }
|
||||
|
||||
// 开始录制
|
||||
mediaRecorder.start(1000) // 每秒收集一次数据
|
||||
startTimer()
|
||||
isRecording.value = true
|
||||
|
||||
// 开始定期捕获目标区域
|
||||
startCapturingArea(targetElement, canvas, ctx)
|
||||
|
||||
console.log('✅ 录像已开始')
|
||||
|
||||
} catch (error) {
|
||||
console.error('❌ 开始录像失败:', error)
|
||||
ElMessage.error(`开始录像失败: ${error.message}`)
|
||||
}
|
||||
}
|
||||
|
||||
// 定期捕获区域内容到canvas
|
||||
function startCapturingArea(element, canvas, ctx) {
|
||||
const captureFrame = () => {
|
||||
if (!isRecording.value) return
|
||||
|
||||
// 使用html2canvas捕获元素
|
||||
html2canvas(element, {
|
||||
useCORS: true,
|
||||
allowTaint: true,
|
||||
backgroundColor: '#1E1E1E',
|
||||
scale: 1,
|
||||
logging: false,
|
||||
width: canvas.width,
|
||||
height: canvas.height
|
||||
}).then(capturedCanvas => {
|
||||
// 将捕获的内容绘制到录制canvas上
|
||||
ctx.clearRect(0, 0, canvas.width, canvas.height)
|
||||
ctx.drawImage(capturedCanvas, 0, 0, canvas.width, canvas.height)
|
||||
|
||||
// 继续下一帧
|
||||
if (isRecording.value) {
|
||||
setTimeout(captureFrame, 1000 / 30) // 30fps
|
||||
}
|
||||
}).catch(error => {
|
||||
console.error('捕获帧失败:', error)
|
||||
if (isRecording.value) {
|
||||
setTimeout(captureFrame, 1000 / 30)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
captureFrame()
|
||||
}
|
||||
|
||||
// 停止录像
|
||||
function stopRecording() {
|
||||
try {
|
||||
console.log('🛑 停止录像...')
|
||||
|
||||
if (mediaRecorder && mediaRecorder.state === 'recording') {
|
||||
// 设置停止事件监听器,在录像停止后自动保存
|
||||
mediaRecorder.addEventListener('stop', () => {
|
||||
console.log('📹 录像数据准备完成,开始保存...')
|
||||
saveRecording()
|
||||
}, { once: true })
|
||||
|
||||
mediaRecorder.stop()
|
||||
} else {
|
||||
// 如果没有正在录制的内容,但有录制数据,直接保存
|
||||
if (recordedChunks.length > 0) {
|
||||
console.log('📹 发现未保存的录像数据,开始保存...')
|
||||
saveRecording()
|
||||
}
|
||||
}
|
||||
if (recordingStream) {
|
||||
recordingStream.getTracks().forEach(track => track.stop())
|
||||
recordingStream = null
|
||||
}
|
||||
|
||||
isRecording.value = false
|
||||
console.log('✅ 录像已停止')
|
||||
|
||||
} catch (error) {
|
||||
console.error('❌ 停止录像失败:', error)
|
||||
ElMessage.error(`停止录像失败: ${error.message}`)
|
||||
}
|
||||
}
|
||||
|
||||
// 保存录像
|
||||
async function saveRecording() {
|
||||
try {
|
||||
if (recordedChunks.length === 0) {
|
||||
throw new Error('没有录制数据')
|
||||
}
|
||||
// 验证必需的患者信息
|
||||
if (!patientInfo.value.id || !patientInfo.value.name || !patientInfo.value.sessionId) {
|
||||
throw new Error(`缺少必需的患者信息: ID=${patientInfo.value.id}, 姓名=${patientInfo.value.name}, 会话ID=${patientInfo.value.sessionId}`)
|
||||
}
|
||||
|
||||
console.log('📝 准备保存录像,患者信息:', {
|
||||
id: patientInfo.value.id,
|
||||
name: patientInfo.value.name,
|
||||
sessionId: patientInfo.value.sessionId
|
||||
})
|
||||
|
||||
// 创建视频blob
|
||||
const blob = new Blob(recordedChunks, { type: 'video/webm' })
|
||||
console.log('📹 录像数据大小:', (blob.size / 1024 / 1024).toFixed(2), 'MB')
|
||||
|
||||
// 转换为base64
|
||||
const reader = new FileReader()
|
||||
reader.readAsDataURL(blob)
|
||||
|
||||
reader.onload = async () => {
|
||||
try {
|
||||
const base64Data = reader.result
|
||||
|
||||
// await fetch(`${BACKEND_URL}/api/recordings/save`
|
||||
// 调用后端API保存录像
|
||||
const response = await fetch(`${BACKEND_URL}/api/detection/${patientInfo.value.sessionId}/stop`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
patientId: patientInfo.value.id,
|
||||
patientName: patientInfo.value.name,
|
||||
sessionId: patientInfo.value.sessionId,
|
||||
videoData: base64Data,
|
||||
mimeType: currentMimeType || 'video/webm;codecs=vp9'
|
||||
})
|
||||
})
|
||||
if (!response.ok) {
|
||||
throw new Error(`HTTP ${response.status}: ${response.statusText}`)
|
||||
}
|
||||
const result = await response.json()
|
||||
if (result.success) {
|
||||
//诊断信息说明
|
||||
console.log('🎬 录像保存成功:', result.filepath)
|
||||
ElMessage.success({
|
||||
message: `录像保存成功!文件路径: ${result.filepath}`,
|
||||
duration: 5000
|
||||
})
|
||||
isRecording.value = false
|
||||
resetTimer()
|
||||
// 更新会话的视频路径
|
||||
if (patientInfo.value.sessionId) {
|
||||
try {
|
||||
await updateSessionVideoPath(patientInfo.value.sessionId, result.filepath)
|
||||
} catch (error) {
|
||||
console.error('更新会话视频路径失败:', error)
|
||||
}
|
||||
}
|
||||
// 清空录制数据,避免重复保存
|
||||
recordedChunks.length = 0
|
||||
console.log('🧹 录像数据已清空')
|
||||
|
||||
// 录像保存完成后,清空会话ID,正式结束会话
|
||||
// patientInfo.value.sessionId = null
|
||||
console.log('✅ 会话正式结束,会话ID已清空')
|
||||
diagnosticForm.value = {
|
||||
diagnosis_info: '',
|
||||
treatment_info: '',
|
||||
suggestion_info: ''
|
||||
}
|
||||
resDialogVisible.value = true
|
||||
} else {
|
||||
throw new Error(result.message || '保存失败')
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
console.error('💥 保存录像失败:', error)
|
||||
ElMessage.error({
|
||||
message: `保存录像失败: ${error.message}`,
|
||||
duration: 5000
|
||||
})
|
||||
// 即使保存失败,也要清空会话ID,避免状态混乱
|
||||
// patientInfo.value.sessionId = null
|
||||
console.log('⚠️ 录像保存失败,但会话已结束,会话ID已清空')
|
||||
}
|
||||
}
|
||||
|
||||
reader.onerror = () => {
|
||||
console.error('❌ 读取录像数据失败')
|
||||
ElMessage.error('读取录像数据失败')
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
console.error('❌ 保存录像失败:', error)
|
||||
ElMessage.error(`保存录像失败: ${error.message}`)
|
||||
console.log('⚠️ 录像保存失败,但会话已结束,会话ID已清空')
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -2044,6 +1754,8 @@ async function handleStartStop() {
|
||||
async function startDetection() {
|
||||
try {
|
||||
console.log('🚀 正在开始检测...')
|
||||
isRecording.value = true
|
||||
startTimer()
|
||||
// 验证患者信息
|
||||
if (!patientInfo.value || !patientInfo.value.id) {
|
||||
throw new Error('缺少患者信息,无法开始检测')
|
||||
@ -2061,29 +1773,10 @@ async function startDetection() {
|
||||
patient_id: patientInfo.value.id,
|
||||
// 可以添加其他检测参数
|
||||
creator_id: creatorId.value,
|
||||
screen_location:{
|
||||
x:screen_location.x,
|
||||
y:screen_location.y,
|
||||
w:screen_location.width,
|
||||
h:screen_location.height
|
||||
},
|
||||
femtobolt_location :{
|
||||
x:femtobolt_location.x,
|
||||
y:femtobolt_location.y,
|
||||
w:femtobolt_location.width,
|
||||
h:femtobolt_location.height
|
||||
},
|
||||
camera_location :{
|
||||
x:camera_location.x,
|
||||
y:camera_location.y,
|
||||
w:camera_location.width,
|
||||
h:camera_location.height
|
||||
}
|
||||
|
||||
// settings: JSON.stringify({
|
||||
// frequency: 30, // 采样频率
|
||||
// // 其他设置参数
|
||||
// })
|
||||
screen_location:[Math.round(screen_location.x), Math.round(screen_location.y), Math.round(screen_location.width), Math.round(screen_location.height)],
|
||||
camera_location:[Math.round(camera_location.x), Math.round(camera_location.y), Math.round(camera_location.width), Math.round(camera_location.height)],
|
||||
femtobolt_location:[Math.round(femtobolt_location.x), Math.round(femtobolt_location.y), Math.round(femtobolt_location.width), Math.round(femtobolt_location.height)],
|
||||
|
||||
})
|
||||
})
|
||||
if (!response.ok) {
|
||||
@ -2099,9 +1792,8 @@ async function startDetection() {
|
||||
patientInfo.value.sessionId = result.session_id
|
||||
patientInfo.value.detectionStartTime = Date.now()
|
||||
console.log('✅ 检测会话创建成功,会话ID:', patientInfo.value.sessionId)
|
||||
|
||||
isStart.value = true
|
||||
startRecording()
|
||||
|
||||
isStart.value = true
|
||||
ElMessage.success('检测已开始')
|
||||
} else {
|
||||
throw new Error(result.message || '开始检测失败')
|
||||
@ -2118,17 +1810,27 @@ async function startDetection() {
|
||||
async function stopDetection() {
|
||||
try {
|
||||
console.log('🛑 停止检测,会话ID:', patientInfo.value.sessionId)
|
||||
|
||||
resetTimer()
|
||||
// 计算检测持续时间
|
||||
let duration = 0
|
||||
if (patientInfo.value.detectionStartTime) {
|
||||
duration = Math.floor((Date.now() - patientInfo.value.detectionStartTime) / 1000)
|
||||
}
|
||||
|
||||
// 如果正在录制,停止录制
|
||||
if (isRecording.value) {
|
||||
stopRecording()
|
||||
// 调用后端API停止检测
|
||||
const response = await fetch(`${BACKEND_URL}/api/detection/${patientInfo.value.sessionId}/stop`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
duration: duration
|
||||
})
|
||||
})
|
||||
if (!response.ok) {
|
||||
throw new Error(`HTTP ${response.status}: ${response.statusText}`)
|
||||
}
|
||||
isRecording.value = false
|
||||
isStart.value = false
|
||||
|
||||
} catch (error) {
|
||||
@ -2292,15 +1994,6 @@ const calibrationClick = async () => {
|
||||
}
|
||||
|
||||
const cameraSubmit = async () => {
|
||||
|
||||
// let data = {
|
||||
// "imu": {"device_type": "real", "port": "COM7", "baudrate": 9600},
|
||||
// "pressure": {"device_type": "real", "port": "COM8", "baudrate": 115200},
|
||||
// "camera": {"device_index": 0, "width": 1280, "height": 720, "fps": 30},
|
||||
// "femtobolt": {"color_resolution": "1080P", "depth_mode": "NFOV_UNBINNED", "fps": 15}
|
||||
// }
|
||||
|
||||
//
|
||||
const response = await fetch(`${BACKEND_URL}/api/config/devices/all`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
@ -2344,7 +2037,7 @@ const getDevicesInit = async () => {
|
||||
}
|
||||
|
||||
onMounted(() => {
|
||||
// wholeBodyRef.value
|
||||
|
||||
console.log(wholeBodyRef.value.getBoundingClientRect())
|
||||
console.log(videoImgRef.value.getBoundingClientRect())
|
||||
// 加载患者信息
|
||||
|
@ -30,7 +30,7 @@
|
||||
|
||||
</div>
|
||||
<div class="content-center">
|
||||
<video ref="videoPlayerRef" :src=" BACKEND_URL+'/' + item.screen_video_path" controls width="100%" height="100%">
|
||||
<video ref="videoPlayerRef" :src=" BACKEND_URL+'/' + item.screen_video_path.replace(/\\/g, '/')" controls width="100%" height="100%">
|
||||
您的浏览器不支持视频播放
|
||||
</video>
|
||||
<img src="@/assets/big.png" alt="" class="bigImgBox" @click="bigImgClick(item)">
|
||||
@ -70,8 +70,8 @@
|
||||
<div class="content-right-bottom-content">
|
||||
<div v-for="(item2, index2) in item.latest_detection_data" :key="index2" class="content-right-bottom-content-box">
|
||||
<div class="content-right-bottom-img">
|
||||
<img :src="BACKEND_URL+'/' + item2.screen_image" style="width:100% ;height: 100%;cursor: pointer;" alt=""
|
||||
@click="showImage(BACKEND_URL+'/' + item2.screen_image)">
|
||||
<img :src="BACKEND_URL+'/' + item2.screen_image.replace(/\\/g, '/')" style="width:100% ;height: 100%;cursor: pointer;" alt=""
|
||||
@click="showImage(BACKEND_URL+'/' + item2.screen_image.replace(/\\/g, '/'))">
|
||||
|
||||
</div>
|
||||
<div style="margin-top: 15px;">
|
||||
@ -404,7 +404,9 @@ function showImage(row){ // 显示大屏图片
|
||||
}, 300)
|
||||
}
|
||||
function bigImgClick(row) {
|
||||
videoUrl.value = BACKEND_URL + '/' + row.normal_video_path
|
||||
// 将Windows路径的反斜杠转换为Web URL的正斜杠
|
||||
const webPath = row.normal_video_path.replace(/\\/g, '/')
|
||||
videoUrl.value = BACKEND_URL + '/' + webPath
|
||||
dialogVideoVisible.value = true
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user