修改了深度相机算法,修改系统bug
This commit is contained in:
parent
c88845dd6e
commit
05da4eab8e
@ -5,16 +5,19 @@ block_cipher = None
|
||||
|
||||
a = Analysis(
|
||||
['main.py'],
|
||||
pathex=[],
|
||||
pathex=['D:/Trae_space/pyKinectAzure'],
|
||||
binaries=[
|
||||
('dll/femtobolt/bin/k4a.dll', 'dll/femtobolt/bin'), # K4A动态库
|
||||
('dll/femtobolt/bin/k4arecord.dll', 'dll/femtobolt/bin'), # K4A录制库
|
||||
('dll/femtobolt/bin/depthengine_2_0.dll', 'dll/femtobolt/bin'), # 深度引擎
|
||||
('dll/femtobolt/bin/OrbbecSDK.dll', 'dll/femtobolt/bin'), # Orbbec SDK
|
||||
('dll/femtobolt/bin/ob_usb.dll', 'dll/femtobolt/bin'), # Orbbec USB库
|
||||
('dll/femtobolt/bin/live555.dll', 'dll/femtobolt/bin'), # Live555库
|
||||
('dll/femtobolt/bin/OrbbecSDKConfig_v1.0.xml', 'dll/femtobolt/bin'), # Orbbec配置文件 ('dll/smitsense/SMiTSenseUsb-F3.0.dll', 'dll/smitsense'), # SMiTSense传感器库
|
||||
('dll/smitsense/SMiTSenseUsb-F3.0.dll', 'dll/smitsense'), # SMiTSenseUsb库
|
||||
# FemtoBolt相关库文件
|
||||
('dll/femtobolt/k4a.dll', 'dll/femtobolt'), # K4A动态库
|
||||
('dll/femtobolt/k4arecord.dll', 'dll/femtobolt'), # K4A录制库
|
||||
('dll/femtobolt/depthengine_2_0.dll', 'dll/femtobolt'), # 深度引擎
|
||||
('dll/femtobolt/OrbbecSDK.dll', 'dll/femtobolt'), # Orbbec SDK
|
||||
('dll/femtobolt/k4a.lib', 'dll/femtobolt'), # K4A静态库
|
||||
('dll/femtobolt/k4arecord.lib', 'dll/femtobolt'), # K4A录制静态库
|
||||
('dll/femtobolt/k4arecorder.exe', 'dll/femtobolt'), # K4A录制工具
|
||||
('dll/femtobolt/k4aviewer.exe', 'dll/femtobolt'), # K4A查看器
|
||||
# SMiTSense相关库文件
|
||||
('dll/smitsense/SMiTSenseUsb-F3.0.dll', 'dll/smitsense'), # SMiTSense传感器库
|
||||
('dll/smitsense/Wrapper.dll', 'dll/smitsense'), # SMiTSense传感器库包装类
|
||||
],
|
||||
hiddenimports=[
|
||||
@ -38,6 +41,13 @@ a = Analysis(
|
||||
'base64',
|
||||
'psutil',
|
||||
'pykinect_azure',
|
||||
'pykinect_azure.k4a',
|
||||
'pykinect_azure.k4abt',
|
||||
'pykinect_azure.k4arecord',
|
||||
'pykinect_azure.pykinect',
|
||||
'pykinect_azure.utils',
|
||||
'pykinect_azure._k4a',
|
||||
'pykinect_azure._k4abt',
|
||||
'pyserial',
|
||||
'requests',
|
||||
'yaml',
|
||||
|
@ -33,16 +33,19 @@ block_cipher = None
|
||||
|
||||
a = Analysis(
|
||||
['main.py'],
|
||||
pathex=[],
|
||||
pathex=['D:/Trae_space/pyKinectAzure'],
|
||||
binaries=[
|
||||
('dll/femtobolt/bin/k4a.dll', 'dll/femtobolt/bin'), # K4A动态库
|
||||
('dll/femtobolt/bin/k4arecord.dll', 'dll/femtobolt/bin'), # K4A录制库
|
||||
('dll/femtobolt/bin/depthengine_2_0.dll', 'dll/femtobolt/bin'), # 深度引擎
|
||||
('dll/femtobolt/bin/OrbbecSDK.dll', 'dll/femtobolt/bin'), # Orbbec SDK
|
||||
('dll/femtobolt/bin/ob_usb.dll', 'dll/femtobolt/bin'), # Orbbec USB库
|
||||
('dll/femtobolt/bin/live555.dll', 'dll/femtobolt/bin'), # Live555库
|
||||
('dll/femtobolt/bin/OrbbecSDKConfig_v1.0.xml', 'dll/femtobolt/bin'), # Orbbec配置文件 ('dll/smitsense/SMiTSenseUsb-F3.0.dll', 'dll/smitsense'), # SMiTSense传感器库
|
||||
('dll/smitsense/SMiTSenseUsb-F3.0.dll', 'dll/smitsense'), # SMiTSenseUsb库
|
||||
# FemtoBolt相关库文件
|
||||
('dll/femtobolt/k4a.dll', 'dll/femtobolt'), # K4A动态库
|
||||
('dll/femtobolt/k4arecord.dll', 'dll/femtobolt'), # K4A录制库
|
||||
('dll/femtobolt/depthengine_2_0.dll', 'dll/femtobolt'), # 深度引擎
|
||||
('dll/femtobolt/OrbbecSDK.dll', 'dll/femtobolt'), # Orbbec SDK
|
||||
('dll/femtobolt/k4a.lib', 'dll/femtobolt'), # K4A静态库
|
||||
('dll/femtobolt/k4arecord.lib', 'dll/femtobolt'), # K4A录制静态库
|
||||
('dll/femtobolt/k4arecorder.exe', 'dll/femtobolt'), # K4A录制工具
|
||||
('dll/femtobolt/k4aviewer.exe', 'dll/femtobolt'), # K4A查看器
|
||||
# SMiTSense相关库文件
|
||||
('dll/smitsense/SMiTSenseUsb-F3.0.dll', 'dll/smitsense'), # SMiTSense传感器库
|
||||
('dll/smitsense/Wrapper.dll', 'dll/smitsense'), # SMiTSense传感器库包装类
|
||||
],
|
||||
hiddenimports=[
|
||||
@ -66,6 +69,13 @@ a = Analysis(
|
||||
'base64',
|
||||
'psutil',
|
||||
'pykinect_azure',
|
||||
'pykinect_azure.k4a',
|
||||
'pykinect_azure.k4abt',
|
||||
'pykinect_azure.k4arecord',
|
||||
'pykinect_azure.pykinect',
|
||||
'pykinect_azure.utils',
|
||||
'pykinect_azure._k4a',
|
||||
'pykinect_azure._k4abt',
|
||||
'pyserial',
|
||||
'requests',
|
||||
'yaml',
|
||||
@ -266,6 +276,30 @@ def copy_config_files():
|
||||
else:
|
||||
print(f"⚠️ 配置文件不存在: {config_file}")
|
||||
|
||||
def install_build_dependencies():
|
||||
"""安装打包依赖"""
|
||||
print("检查并安装打包依赖...")
|
||||
|
||||
try:
|
||||
# 安装打包依赖
|
||||
cmd = [sys.executable, '-m', 'pip', 'install', '-r', 'requirements_build.txt']
|
||||
print(f"执行命令: {' '.join(cmd)}")
|
||||
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, encoding='utf-8', errors='ignore')
|
||||
|
||||
if result.returncode == 0:
|
||||
print("✓ 依赖安装成功!")
|
||||
return True
|
||||
else:
|
||||
print(f"⚠️ 依赖安装警告: {result.stderr}")
|
||||
print("继续打包过程...")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"⚠️ 依赖安装失败: {e}")
|
||||
print("继续打包过程...")
|
||||
return True
|
||||
|
||||
def main():
|
||||
"""主函数"""
|
||||
print("=" * 60)
|
||||
@ -280,6 +314,10 @@ def main():
|
||||
input("按回车键退出...")
|
||||
return
|
||||
|
||||
# 安装打包依赖
|
||||
install_build_dependencies()
|
||||
print()
|
||||
|
||||
try:
|
||||
# 清理构建目录
|
||||
clean_build_dirs()
|
||||
|
@ -21,9 +21,10 @@ height = 720
|
||||
fps = 30
|
||||
|
||||
[FEMTOBOLT]
|
||||
algorithm_type = plt
|
||||
color_resolution = 1080P
|
||||
depth_mode = NFOV_UNBINNED
|
||||
fps = 30
|
||||
depth_mode = NFOV_2X2BINNED
|
||||
camera_fps = 15
|
||||
depth_range_min = 1200
|
||||
depth_range_max = 1500
|
||||
|
||||
|
@ -276,7 +276,7 @@ class DatabaseManager:
|
||||
diagnosis_info TEXT, -- 诊断信息
|
||||
treatment_info TEXT, -- 处理信息
|
||||
suggestion_info TEXT, -- 建议信息
|
||||
status TEXT DEFAULT 'created', -- 会话状态(created/running/diagnosed/completed)
|
||||
status TEXT DEFAULT 'created', -- 会话状态(created/running/checked/diagnosed/completed)
|
||||
created_at TIMESTAMP, -- 记录创建时间
|
||||
FOREIGN KEY (patient_id) REFERENCES patients (id), -- 患者表外键约束
|
||||
FOREIGN KEY (creator_id) REFERENCES users (id) -- 用户表外键约束
|
||||
@ -668,14 +668,29 @@ class DatabaseManager:
|
||||
cursor = conn.cursor()
|
||||
|
||||
try:
|
||||
if status in ['completed', 'stopped', 'error']:
|
||||
# 首先获取会话对应的患者ID
|
||||
cursor.execute('SELECT patient_id FROM detection_sessions WHERE id = ?', (session_id,))
|
||||
result = cursor.fetchone()
|
||||
if not result:
|
||||
logger.error(f'会话不存在: {session_id}')
|
||||
return False
|
||||
|
||||
patient_id = result[0]
|
||||
china_time = self.get_china_time()
|
||||
|
||||
if status in ['checked', 'stopped','complated', 'error']:
|
||||
# 使用中国时区时间
|
||||
china_time = self.get_china_time()
|
||||
cursor.execute('''
|
||||
UPDATE detection_sessions SET
|
||||
status = ?, end_time = ?
|
||||
WHERE id = ?
|
||||
''', (status, china_time, session_id))
|
||||
|
||||
# 同步更新患者表的updated_at时间
|
||||
cursor.execute('''
|
||||
UPDATE patients SET updated_at = ?
|
||||
WHERE id = ?
|
||||
''', (china_time, patient_id))
|
||||
else:
|
||||
cursor.execute('''
|
||||
UPDATE detection_sessions SET
|
||||
@ -683,8 +698,10 @@ class DatabaseManager:
|
||||
WHERE id = ?
|
||||
''', (status, session_id))
|
||||
|
||||
|
||||
|
||||
conn.commit()
|
||||
logger.info(f'更新会话状态: {session_id} -> {status}')
|
||||
logger.info(f'更新会话状态: {session_id} -> {status},同时更新患者 {patient_id} 的updated_at时间')
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
|
@ -128,6 +128,16 @@ class BaseDevice(ABC):
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def reload_config(self) -> bool:
|
||||
"""
|
||||
重新加载设备配置
|
||||
|
||||
Returns:
|
||||
bool: 重新加载是否成功
|
||||
"""
|
||||
pass
|
||||
|
||||
def set_socketio(self, socketio):
|
||||
"""
|
||||
设置SocketIO实例
|
||||
|
@ -101,7 +101,10 @@ class CameraManager(BaseDevice):
|
||||
bool: 初始化是否成功
|
||||
"""
|
||||
try:
|
||||
self.logger.info(f"正在初始化相机设备 {self.device_index}...")
|
||||
self.logger.info(f"正在初始化相机设备...")
|
||||
|
||||
# 使用构造函数中已加载的配置,避免并发读取配置文件
|
||||
self.logger.info(f"使用已加载配置: device_index={self.device_index}, resolution={self.width}x{self.height}, fps={self.fps}")
|
||||
|
||||
# 尝试多个后端(Windows下优先MSMF/DShow)
|
||||
backends = [cv2.CAP_MSMF, cv2.CAP_DSHOW, cv2.CAP_ANY]
|
||||
@ -563,6 +566,52 @@ class CameraManager(BaseDevice):
|
||||
except Exception as e:
|
||||
self.logger.error(f"断开相机连接失败: {e}")
|
||||
|
||||
def reload_config(self) -> bool:
|
||||
"""
|
||||
重新加载设备配置
|
||||
|
||||
Returns:
|
||||
bool: 重新加载是否成功
|
||||
"""
|
||||
try:
|
||||
self.logger.info("正在重新加载相机配置...")
|
||||
|
||||
|
||||
|
||||
# 获取最新配置
|
||||
config = self.config_manager.get_device_config('camera')
|
||||
|
||||
# 更新配置属性
|
||||
self.device_index = config.get('device_index', 0)
|
||||
self.width = config.get('width', 1280)
|
||||
self.height = config.get('height', 720)
|
||||
self.fps = config.get('fps', 30)
|
||||
self.buffer_size = config.get('buffer_size', 1)
|
||||
self.fourcc = config.get('fourcc', 'MJPG')
|
||||
self._tx_max_width = int(config.get('tx_max_width', 640))
|
||||
|
||||
# 更新帧缓存队列大小
|
||||
frame_cache_len = int(config.get('frame_cache_len', 2))
|
||||
if frame_cache_len != self.frame_cache.maxsize:
|
||||
# 清空旧队列
|
||||
while not self.frame_cache.empty():
|
||||
try:
|
||||
self.frame_cache.get_nowait()
|
||||
except queue.Empty:
|
||||
break
|
||||
# 创建新队列
|
||||
self.frame_cache = queue.Queue(maxsize=frame_cache_len)
|
||||
|
||||
# 更新设备信息
|
||||
self.device_id = f"camera_{self.device_index}"
|
||||
|
||||
self.logger.info(f"相机配置重新加载成功 - 设备索引: {self.device_index}, 分辨率: {self.width}x{self.height}, FPS: {self.fps}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"重新加载相机配置失败: {e}")
|
||||
return False
|
||||
|
||||
def cleanup(self):
|
||||
"""
|
||||
清理资源
|
||||
|
@ -17,8 +17,11 @@ import logging
|
||||
from collections import deque
|
||||
import gc
|
||||
from matplotlib.colors import LinearSegmentedColormap
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib
|
||||
from scipy import ndimage
|
||||
from scipy.interpolate import griddata
|
||||
import io
|
||||
|
||||
try:
|
||||
from .base_device import BaseDevice
|
||||
@ -71,9 +74,11 @@ class FemtoBoltManager(BaseDevice):
|
||||
self.sdk_initialized = False
|
||||
|
||||
# 设备配置
|
||||
self.algorithm_type = self.config.get('algorithm_type', 'opencv')
|
||||
self.color_resolution = self.config.get('color_resolution', '1080P')
|
||||
self.depth_mode = self.config.get('depth_mode', 'NFOV_UNBINNED')
|
||||
self.fps = self.config.get('fps', 15)
|
||||
self.depth_mode = self.config.get('depth_mode', 'NFOV_2X2BINNED')
|
||||
self.color_format = self.config.get('color_format', 'COLOR_BGRA32')
|
||||
self.fps = self.config.get('camera_fps', 20)
|
||||
self.depth_range_min = self.config.get('depth_range_min', 500)
|
||||
self.depth_range_max = self.config.get('depth_range_max', 4500)
|
||||
self.synchronized_images_only = self.config.get('synchronized_images_only', False)
|
||||
@ -107,7 +112,7 @@ class FemtoBoltManager(BaseDevice):
|
||||
self._last_send_time = 0
|
||||
|
||||
# 编码参数缓存(避免每帧创建数组)
|
||||
self._encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), int(self.config.get('jpeg_quality', 80))]
|
||||
self._encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), int(self.config.get('jpeg_quality', 60))]
|
||||
|
||||
# 预计算伽马LUT(避免每帧计算)
|
||||
self._gamma_lut = None
|
||||
@ -120,18 +125,223 @@ class FemtoBoltManager(BaseDevice):
|
||||
|
||||
# 自定义彩虹色 colormap(参考testfemtobolt.py)
|
||||
colors = ['fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
|
||||
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
|
||||
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
|
||||
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue']
|
||||
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
|
||||
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
|
||||
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue']
|
||||
self.custom_cmap = LinearSegmentedColormap.from_list("custom_cmap", colors)
|
||||
|
||||
self.logger.info("FemtoBolt管理器初始化完成")
|
||||
# 设置matplotlib为非交互模式
|
||||
matplotlib.use('Agg')
|
||||
|
||||
# 创建matplotlib图形对象(复用以提高性能)
|
||||
self.fig, self.ax = plt.subplots(figsize=(7, 7))
|
||||
self.ax.set_aspect('equal')
|
||||
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
|
||||
self.logger.info(f"FemtoBolt设备配置完成 - 算法类型: {self.algorithm_type}, 深度模式: {self.depth_mode}, FPS: {self.fps}")
|
||||
|
||||
|
||||
def _update_gamma_lut(self):
|
||||
"""更新伽马校正查找表"""
|
||||
if self._current_gamma != self.gamma_value:
|
||||
self._gamma_lut = np.array([((i / 255.0) ** self.gamma_value) * 255 for i in range(256)]).astype("uint8")
|
||||
self._gamma_lut = np.array([((i / 255.0) ** (1.0 / self.gamma_value)) * 255 for i in range(256)], dtype=np.uint8)
|
||||
self._current_gamma = self.gamma_value
|
||||
|
||||
def _generate_contour_image_opencv(self, depth):
|
||||
"""优化的等高线图像生成(增强梯度变化清晰度)"""
|
||||
try:
|
||||
# 深度数据过滤(与原始函数完全一致)
|
||||
depth_filtered = depth.copy()
|
||||
depth_filtered[depth_filtered > 1100] = 0
|
||||
depth_filtered[depth_filtered < 500] = 0
|
||||
|
||||
# 创建输出图像
|
||||
height, width = depth_filtered.shape
|
||||
|
||||
# 背景图(与原始函数一致:灰色背景,alpha=0.3效果)
|
||||
background_gray = int(0.5 * 255 * 0.3 + 255 * (1 - 0.3)) # 模拟灰色背景alpha混合
|
||||
output = np.ones((height, width, 3), dtype=np.uint8) * background_gray
|
||||
|
||||
# 绘制白色网格线(与原始函数grid效果一致)
|
||||
grid_spacing = max(height // 20, width // 20, 10) # 自适应网格间距
|
||||
for x in range(0, width, grid_spacing):
|
||||
cv2.line(output, (x, 0), (x, height-1), (255, 255, 255), 1)
|
||||
for y in range(0, height, grid_spacing):
|
||||
cv2.line(output, (0, y), (width-1, y), (255, 255, 255), 1)
|
||||
|
||||
# 使用masked数据(与原始函数np.ma.masked_equal逻辑一致)
|
||||
valid_mask = depth_filtered > 0
|
||||
if np.any(valid_mask):
|
||||
# 将深度值映射到500-1100范围(与原始函数vmin=500, vmax=1100一致)
|
||||
depth_for_contour = depth_filtered.copy().astype(np.float32)
|
||||
depth_for_contour[~valid_mask] = np.nan # 无效区域设为NaN
|
||||
|
||||
# 增加等高线层级数量以获得更细腻的梯度变化(从100增加到200)
|
||||
levels = np.linspace(500, 1100, 201) # 200个等高线层级,提升梯度细腻度
|
||||
|
||||
# 创建等高线边界增强图像
|
||||
contour_edges = np.zeros((height, width), dtype=np.uint8)
|
||||
|
||||
# 为每个像素分配等高线层级
|
||||
for i in range(len(levels) - 1):
|
||||
level_min = levels[i]
|
||||
level_max = levels[i + 1]
|
||||
|
||||
# 创建当前层级的掩码
|
||||
level_mask = (depth_filtered >= level_min) & (depth_filtered < level_max)
|
||||
|
||||
if np.any(level_mask):
|
||||
# 增强颜色映射算法 - 使用非线性映射增强对比度
|
||||
color_val = (level_min - 500) / (1100 - 500)
|
||||
color_val = np.clip(color_val, 0, 1)
|
||||
|
||||
# 应用Gamma校正增强对比度(gamma=0.8增强中间色调)
|
||||
color_val_enhanced = np.power(color_val, 0.8)
|
||||
|
||||
# 应用自定义colormap
|
||||
color = self.custom_cmap(color_val_enhanced)[:3]
|
||||
color_bgr = (np.array(color) * 255).astype(np.uint8)
|
||||
|
||||
# 赋值颜色(BGR格式)
|
||||
output[level_mask, 0] = color_bgr[2] # B
|
||||
output[level_mask, 1] = color_bgr[1] # G
|
||||
output[level_mask, 2] = color_bgr[0] # R
|
||||
|
||||
# 检测等高线边界(每10个层级检测一次主要等高线)
|
||||
if i % 10 == 0:
|
||||
# 使用形态学操作检测边界
|
||||
kernel = np.ones((3, 3), np.uint8)
|
||||
dilated = cv2.dilate(level_mask.astype(np.uint8), kernel, iterations=1)
|
||||
eroded = cv2.erode(level_mask.astype(np.uint8), kernel, iterations=1)
|
||||
edge = dilated - eroded
|
||||
contour_edges = cv2.bitwise_or(contour_edges, edge)
|
||||
|
||||
# 增强等高线边界
|
||||
if np.any(contour_edges):
|
||||
# 对等高线边界进行轻微扩展
|
||||
kernel = np.ones((2, 2), np.uint8)
|
||||
contour_edges = cv2.dilate(contour_edges, kernel, iterations=1)
|
||||
|
||||
# 在等高线边界处增强对比度
|
||||
edge_mask = contour_edges > 0
|
||||
if np.any(edge_mask):
|
||||
# 增强边界处的颜色对比度
|
||||
for c in range(3):
|
||||
channel = output[:, :, c].astype(np.float32)
|
||||
# 对边界像素应用对比度增强
|
||||
channel[edge_mask] = np.clip(channel[edge_mask] * 1.2, 0, 255)
|
||||
output[:, :, c] = channel.astype(np.uint8)
|
||||
|
||||
# 减少过度平滑处理以保持清晰度
|
||||
# 仅应用轻微的降噪处理,保持梯度边界清晰
|
||||
output = cv2.bilateralFilter(output, 3, 20, 20) # 减少滤波强度
|
||||
|
||||
# 裁剪宽度(与原始函数保持一致)
|
||||
target_width = height // 2
|
||||
if width > target_width:
|
||||
left = (width - target_width) // 2
|
||||
right = left + target_width
|
||||
output = output[:, left:right]
|
||||
|
||||
# 最终锐化处理增强细节
|
||||
# 使用USM锐化增强等高线细节
|
||||
gaussian = cv2.GaussianBlur(output, (0, 0), 1.0)
|
||||
output = cv2.addWeighted(output, 1.5, gaussian, -0.5, 0)
|
||||
output = np.clip(output, 0, 255).astype(np.uint8)
|
||||
|
||||
return output
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"优化等高线生成失败: {e}")
|
||||
return None
|
||||
|
||||
def _create_grid_background(self, height, width):
|
||||
"""创建网格背景缓存"""
|
||||
bg = np.ones((height, width, 3), dtype=np.uint8) * 128
|
||||
# 绘制白色网格线
|
||||
grid_spacing = 50
|
||||
for x in range(0, width, grid_spacing):
|
||||
cv2.line(bg, (x, 0), (x, height-1), (255, 255, 255), 1)
|
||||
for y in range(0, height, grid_spacing):
|
||||
cv2.line(bg, (0, y), (width-1, y), (255, 255, 255), 1)
|
||||
|
||||
self._grid_bg = bg
|
||||
self._grid_size = (height, width)
|
||||
|
||||
def _generate_contour_image_plt(self, depth):
|
||||
"""使用matplotlib生成等高线图像(完全采用display_x.py的逻辑)"""
|
||||
try:
|
||||
# 清除之前的绘图
|
||||
self.ax.clear()
|
||||
|
||||
# 深度数据过滤(与display_x.py完全一致)
|
||||
depth[depth > 1100] = 0
|
||||
depth[depth < 500] = 0
|
||||
|
||||
# 背景图(与display_x.py完全一致)
|
||||
background = np.ones_like(depth) * 0.5 # 设定灰色背景
|
||||
|
||||
# 使用 np.ma.masked_equal() 来屏蔽深度图中的零值(与display_x.py完全一致)
|
||||
depth = np.ma.masked_equal(depth, 0)
|
||||
|
||||
# 绘制背景(与display_x.py完全一致)
|
||||
self.ax.imshow(background, origin='lower', cmap='gray', alpha=0.3)
|
||||
|
||||
# 绘制白色栅格线,并将其置于底层(网格密度加大一倍)
|
||||
self.ax.grid(True, which='both', axis='both', color='white', linestyle='-', linewidth=0.5, zorder=0)
|
||||
self.ax.minorticks_on()
|
||||
self.ax.grid(True, which='minor', axis='both', color='white', linestyle='-', linewidth=0.3, zorder=0)
|
||||
|
||||
# 隐藏坐标轴
|
||||
# self.ax.set_xticks([])
|
||||
# self.ax.set_yticks([])
|
||||
|
||||
# 绘制等高线图并设置原点在上方(与display_x.py完全一致)
|
||||
import time
|
||||
start_time = time.perf_counter()
|
||||
self.ax.contourf(depth, levels=100, cmap=self.custom_cmap, vmin=500, vmax=1100, origin='upper', zorder=2)
|
||||
contourf_time = time.perf_counter() - start_time
|
||||
# self.logger.info(f"contourf绘制耗时: {contourf_time*1000:.2f}ms")
|
||||
|
||||
|
||||
# 将matplotlib图形转换为numpy数组
|
||||
buf = io.BytesIO()
|
||||
savefig_start = time.perf_counter()
|
||||
savefig_start = time.perf_counter()
|
||||
self.fig.savefig(buf, format='png',bbox_inches='tight', pad_inches=0, dpi=75)
|
||||
savefig_time = time.perf_counter() - savefig_start
|
||||
# self.logger.info(f"savefig保存耗时: {savefig_time*1000:.2f}ms")
|
||||
|
||||
buf_start = time.perf_counter()
|
||||
buf.seek(0)
|
||||
|
||||
# 读取PNG数据并转换为OpenCV格式
|
||||
img_array = np.frombuffer(buf.getvalue(), dtype=np.uint8)
|
||||
buf.close()
|
||||
buf_time = time.perf_counter() - buf_start
|
||||
# self.logger.info(f"缓冲区操作耗时: {buf_time*1000:.2f}ms")
|
||||
|
||||
# 解码PNG图像
|
||||
decode_start = time.perf_counter()
|
||||
img = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
|
||||
decode_time = time.perf_counter() - decode_start
|
||||
# self.logger.info(f"PNG解码耗时: {decode_time*1000:.2f}ms")
|
||||
# return img
|
||||
if img is not None:
|
||||
# 裁剪宽度(与原逻辑保持一致)
|
||||
height, width = img.shape[:2]
|
||||
target_width = round(height // 2)
|
||||
if width > target_width:
|
||||
left = (width - target_width) // 2
|
||||
right = left + target_width
|
||||
img = img[:, left:right]
|
||||
return img
|
||||
else:
|
||||
self.logger.error("无法解码matplotlib生成的PNG图像")
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"生成等高线图像失败: {e}")
|
||||
return None
|
||||
|
||||
def initialize(self) -> bool:
|
||||
"""
|
||||
@ -143,6 +353,9 @@ class FemtoBoltManager(BaseDevice):
|
||||
try:
|
||||
self.logger.info("正在初始化FemtoBolt设备...")
|
||||
|
||||
# 使用构造函数中已加载的配置,避免并发读取配置文件
|
||||
self.logger.info(f"使用已加载配置: algorithm_type={self.algorithm_type}, fps={self.fps}, depth_mode={self.depth_mode}")
|
||||
|
||||
# 初始化SDK
|
||||
if not self._initialize_sdk():
|
||||
raise Exception("SDK初始化失败")
|
||||
@ -159,7 +372,7 @@ class FemtoBoltManager(BaseDevice):
|
||||
self.device_info.update({
|
||||
'color_resolution': self.color_resolution,
|
||||
'depth_mode': self.depth_mode,
|
||||
'fps': self.fps,
|
||||
'camera_fps': self.fps,
|
||||
'depth_range': f"{self.depth_range_min}-{self.depth_range_max}mm"
|
||||
})
|
||||
|
||||
@ -187,10 +400,9 @@ class FemtoBoltManager(BaseDevice):
|
||||
real_pykinect = pykinect
|
||||
self.logger.info("成功导入pykinect_azure库")
|
||||
except ImportError as e:
|
||||
self.logger.warning(f"无法导入pykinect_azure库,使用模拟模式: {e}")
|
||||
self.pykinect = self._create_mock_pykinect()
|
||||
self.sdk_initialized = True
|
||||
return True
|
||||
self.logger.error(f"无法导入pykinect_azure库: {e}")
|
||||
self.sdk_initialized = False
|
||||
return False
|
||||
|
||||
# 查找并初始化SDK路径
|
||||
sdk_initialized = False
|
||||
@ -209,8 +421,9 @@ class FemtoBoltManager(BaseDevice):
|
||||
continue
|
||||
|
||||
if not sdk_initialized:
|
||||
self.logger.info('未找到真实SDK,使用模拟模式')
|
||||
self.pykinect = self._create_mock_pykinect()
|
||||
self.logger.error('未找到真实SDK,初始化失败')
|
||||
self.sdk_initialized = False
|
||||
return False
|
||||
|
||||
self.sdk_initialized = True
|
||||
return True
|
||||
@ -225,84 +438,12 @@ class FemtoBoltManager(BaseDevice):
|
||||
if platform.system() == "Windows":
|
||||
# 优先使用Orbbec SDK K4A Wrapper(与azure_kinect_image_example.py一致)
|
||||
base_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
dll_path = os.path.join(base_dir,"..", "dll","femtobolt","bin", "k4a.dll")
|
||||
dll_path = os.path.join(base_dir,"..", "dll","femtobolt", "k4a.dll")
|
||||
self.logger.info(f"FemtoBolt SDK路径: {dll_path}")
|
||||
sdk_paths.append(dll_path)
|
||||
return sdk_paths
|
||||
|
||||
def _create_mock_pykinect(self):
|
||||
"""
|
||||
创建模拟pykinect_azure(用于测试)
|
||||
|
||||
Returns:
|
||||
Mock pykinect对象
|
||||
"""
|
||||
class MockPyKinect:
|
||||
def __init__(self):
|
||||
self.default_configuration = self._create_mock_config()
|
||||
|
||||
def initialize_libraries(self, track_body=False, module_k4a_path=None):
|
||||
pass
|
||||
|
||||
def start_device(self, config=None):
|
||||
return MockDevice()
|
||||
|
||||
def _create_mock_config(self):
|
||||
class MockConfig:
|
||||
def __init__(self):
|
||||
self.depth_mode = 'NFOV_UNBINNED'
|
||||
self.camera_fps = 15
|
||||
self.synchronized_images_only = False
|
||||
self.color_resolution = 0
|
||||
return MockConfig()
|
||||
|
||||
# 添加常量
|
||||
K4A_DEPTH_MODE_NFOV_UNBINNED = 'NFOV_UNBINNED'
|
||||
K4A_FRAMES_PER_SECOND_15 = 15
|
||||
|
||||
class MockDevice:
|
||||
def __init__(self):
|
||||
self.is_started = True
|
||||
|
||||
def update(self):
|
||||
return MockCapture()
|
||||
|
||||
def stop(self):
|
||||
self.is_started = False
|
||||
|
||||
def close(self):
|
||||
pass
|
||||
|
||||
class MockCapture:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def get_depth_image(self):
|
||||
# 生成模拟深度图像
|
||||
height, width = 480, 640
|
||||
depth_image = np.full((height, width), 2000, dtype=np.uint16)
|
||||
|
||||
# 添加人体轮廓
|
||||
center_x = width // 2
|
||||
center_y = height // 2
|
||||
|
||||
# 头部
|
||||
cv2.circle(depth_image, (center_x, center_y - 100), 40, 1500, -1)
|
||||
# 身体
|
||||
cv2.rectangle(depth_image, (center_x - 50, center_y - 60),
|
||||
(center_x + 50, center_y + 100), 1600, -1)
|
||||
# 手臂
|
||||
cv2.rectangle(depth_image, (center_x - 80, center_y - 40),
|
||||
(center_x - 50, center_y + 20), 1700, -1)
|
||||
cv2.rectangle(depth_image, (center_x + 50, center_y - 40),
|
||||
(center_x + 80, center_y + 20), 1700, -1)
|
||||
|
||||
return True, depth_image
|
||||
|
||||
def get_color_image(self):
|
||||
return None
|
||||
|
||||
return MockPyKinect()
|
||||
|
||||
|
||||
def _configure_device(self) -> bool:
|
||||
"""
|
||||
@ -317,12 +458,12 @@ class FemtoBoltManager(BaseDevice):
|
||||
|
||||
# 配置FemtoBolt设备参数
|
||||
self.femtobolt_config = self.pykinect.default_configuration
|
||||
self.femtobolt_config.depth_mode = self.pykinect.K4A_DEPTH_MODE_NFOV_UNBINNED
|
||||
self.femtobolt_config.depth_mode = self.pykinect.K4A_DEPTH_MODE_NFOV_2X2BINNED
|
||||
self.femtobolt_config.color_format = self.pykinect.K4A_IMAGE_FORMAT_COLOR_BGRA32
|
||||
self.femtobolt_config.color_resolution = self.pykinect.K4A_COLOR_RESOLUTION_720P
|
||||
self.femtobolt_config.camera_fps = self.pykinect.K4A_FRAMES_PER_SECOND_15
|
||||
self.femtobolt_config.synchronized_images_only = False
|
||||
self.femtobolt_config.color_resolution = 0
|
||||
|
||||
self.logger.info(f"FemtoBolt设备配置完成 - 深度模式: {self.depth_mode}, FPS: {self.fps}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
@ -340,17 +481,12 @@ class FemtoBoltManager(BaseDevice):
|
||||
# 启动FemtoBolt设备
|
||||
self.logger.info(f'尝试启动FemtoBolt设备...')
|
||||
|
||||
if hasattr(self.pykinect, 'start_device'):
|
||||
# 真实设备模式
|
||||
self.device_handle = self.pykinect.start_device(config=self.femtobolt_config)
|
||||
if self.device_handle:
|
||||
self.logger.info('✓ FemtoBolt深度相机初始化成功!')
|
||||
else:
|
||||
raise Exception('设备启动返回None')
|
||||
# 启动真实设备
|
||||
self.device_handle = self.pykinect.start_device(config=self.femtobolt_config)
|
||||
if self.device_handle:
|
||||
self.logger.info('✓ FemtoBolt深度相机初始化成功!')
|
||||
else:
|
||||
# 模拟设备模式
|
||||
self.device_handle = self.pykinect.start_device(config=self.femtobolt_config)
|
||||
self.logger.info('✓ FemtoBolt深度相机模拟模式启动成功!')
|
||||
raise Exception('设备启动返回None')
|
||||
|
||||
# 等待设备稳定
|
||||
time.sleep(1.0)
|
||||
@ -510,63 +646,16 @@ class FemtoBoltManager(BaseDevice):
|
||||
try:
|
||||
ret, depth_image = capture.get_depth_image()
|
||||
if ret and depth_image is not None:
|
||||
# 确保二维数据
|
||||
# if depth_image.ndim == 3 and depth_image.shape[2] == 1:
|
||||
# depth_image = depth_image[:, :, 0]
|
||||
|
||||
# 使用My_TestFemtobolt.py的原始算法处理深度数据
|
||||
depth = depth_image.copy()
|
||||
|
||||
# 深度数据过滤 (根据depth_range参数动态设置)
|
||||
depth[depth > self.depth_range_max] = 0
|
||||
depth[depth < self.depth_range_min] = 0
|
||||
|
||||
# 裁剪感兴趣区域 (与My_TestFemtobolt.py完全一致)
|
||||
# depth = depth[50:200, 50:210]
|
||||
|
||||
|
||||
# 使用 np.ma.masked_equal() 来屏蔽深度图中的零值 (与My_TestFemtobolt.py完全一致)
|
||||
depth_masked = np.ma.masked_equal(depth, 0)
|
||||
|
||||
# 创建RGB背景图像
|
||||
rows, cols = depth.shape
|
||||
bg_rgb = np.ones((rows, cols, 3), dtype=np.uint8) * 128 # 灰色背景
|
||||
|
||||
# 绘制白色栅格线 (与My_TestFemtobolt.py完全一致)
|
||||
cell_size = 50
|
||||
grid_color = (255, 255, 255)
|
||||
for x in range(0, cols, cell_size):
|
||||
cv2.line(bg_rgb, (x, 0), (x, rows), grid_color, 1)
|
||||
for y in range(0, rows, cell_size):
|
||||
cv2.line(bg_rgb, (0, y), (cols, y), grid_color, 1)
|
||||
|
||||
# 处理有效深度数据 - 添加平滑处理算法
|
||||
valid_mask = ~depth_masked.mask if hasattr(depth_masked, 'mask') else (depth > 0)
|
||||
|
||||
if np.any(valid_mask):
|
||||
# 创建平滑的深度图像 (类似等高线效果)
|
||||
depth_smooth = self._apply_contour_smoothing(depth, valid_mask)
|
||||
|
||||
# 归一化平滑后的深度值到[0,1]范围
|
||||
valid_depth_smooth = depth_smooth[valid_mask]
|
||||
depth_normalized = (valid_depth_smooth.astype(np.float32) - self.depth_range_min) / (self.depth_range_max - self.depth_range_min)
|
||||
depth_normalized = np.clip(depth_normalized, 0, 1)
|
||||
|
||||
# 应用自定义colormap (与My_TestFemtobolt.py使用相同的colormap)
|
||||
rgba = self.custom_cmap(depth_normalized)
|
||||
rgb_values = (rgba[:, :3] * 255).astype(np.uint8)
|
||||
|
||||
# 将彩色深度值应用到背景图像上
|
||||
bg_rgb[valid_mask] = rgb_values
|
||||
|
||||
depth_colored_final = bg_rgb
|
||||
# 裁剪宽度
|
||||
height, width = depth_colored_final.shape[:2]
|
||||
target_width = height // 2
|
||||
if width > target_width:
|
||||
left = (width - target_width) // 2
|
||||
right = left + target_width
|
||||
depth_colored_final = depth_colored_final[:, left:right]
|
||||
# 根据配置选择不同的等高线生成方法
|
||||
if self.algorithm_type == 'plt':
|
||||
depth_colored_final = self._generate_contour_image_plt(depth_image)
|
||||
elif self.algorithm_type == 'opencv':
|
||||
depth_colored_final = self._generate_contour_image_opencv(depth_image)
|
||||
|
||||
if depth_colored_final is None:
|
||||
# 如果等高线生成失败,跳过这一帧
|
||||
continue
|
||||
|
||||
# 推送SocketIO
|
||||
success, buffer = cv2.imencode('.jpg', depth_colored_final, self._encode_param)
|
||||
@ -601,7 +690,7 @@ class FemtoBoltManager(BaseDevice):
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
time.sleep(0.005)
|
||||
time.sleep(0.001)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f'FemtoBolt帧推送失败: {e}')
|
||||
@ -616,242 +705,6 @@ class FemtoBoltManager(BaseDevice):
|
||||
self.is_streaming = False
|
||||
self.logger.info("FemtoBolt流工作线程结束")
|
||||
|
||||
def _streaming_worker_bak(self):
|
||||
"""
|
||||
流处理工作线程
|
||||
"""
|
||||
self.logger.info("FemtoBolt流工作线程启动")
|
||||
|
||||
frame_count = 0
|
||||
|
||||
try:
|
||||
while self.is_streaming:
|
||||
# 发送频率限制
|
||||
now = time.time()
|
||||
if now - self._last_send_time < self._min_send_interval:
|
||||
time.sleep(0.001)
|
||||
continue
|
||||
|
||||
if self.device_handle and self._socketio:
|
||||
try:
|
||||
capture = self.device_handle.update()
|
||||
if capture is not None:
|
||||
try:
|
||||
ret, depth_image = capture.get_depth_image()
|
||||
if ret and depth_image is not None:
|
||||
# 确保二维数据
|
||||
if depth_image.ndim == 3 and depth_image.shape[2] == 1:
|
||||
depth_image = depth_image[:, :, 0]
|
||||
|
||||
rows, cols = depth_image.shape[:2]
|
||||
# 生成或复用网格背景
|
||||
if (self._grid_bg is None) or (self._grid_size != (rows, cols)):
|
||||
bg = np.ones((rows, cols, 3), dtype=np.uint8) * 128
|
||||
cell_size = 50
|
||||
grid_color = (255, 255, 255)
|
||||
grid = np.zeros_like(bg)
|
||||
for x in range(0, cols, cell_size):
|
||||
cv2.line(grid, (x, 0), (x, rows), grid_color, 1)
|
||||
for y in range(0, rows, cell_size):
|
||||
cv2.line(grid, (0, y), (cols, y), grid_color, 1)
|
||||
mask_grid = (grid.sum(axis=2) > 0)
|
||||
bg[mask_grid] = grid[mask_grid]
|
||||
self._grid_bg = bg
|
||||
self._grid_size = (rows, cols)
|
||||
|
||||
background = self._grid_bg.copy()
|
||||
|
||||
# 生成深度掩码,仅保留指定范围内的像素
|
||||
mask_valid = (depth_image >= self.depth_range_min) & (depth_image <= self.depth_range_max)
|
||||
depth_clipped = np.clip(depth_image, self.depth_range_min, self.depth_range_max)
|
||||
normed = (depth_clipped.astype(np.float32) - self.depth_range_min) / (self.depth_range_max - self.depth_range_min)
|
||||
|
||||
# 反转映射,保证颜色方向与之前一致
|
||||
normed = 1.0 - normed
|
||||
|
||||
# 应用自定义 colormap,将深度值映射到 RGB
|
||||
rgba = self.custom_cmap(normed)
|
||||
rgb = (rgba[..., :3] * 255).astype(np.uint8)
|
||||
|
||||
# 叠加:在背景上覆盖彩色深度图(掩码处不覆盖,保留灰色背景+网格)
|
||||
depth_colored_final = background.copy()
|
||||
depth_colored_final[mask_valid] = rgb[mask_valid]
|
||||
|
||||
# 裁剪宽度
|
||||
height, width = depth_colored_final.shape[:2]
|
||||
target_width = height // 2
|
||||
if width > target_width:
|
||||
left = (width - target_width) // 2
|
||||
right = left + target_width
|
||||
depth_colored_final = depth_colored_final[:, left:right]
|
||||
|
||||
# 推送SocketIO
|
||||
success, buffer = cv2.imencode('.jpg', depth_colored_final, self._encode_param)
|
||||
if success and self._socketio:
|
||||
jpg_as_text = base64.b64encode(memoryview(buffer).tobytes()).decode('utf-8')
|
||||
self._socketio.emit('femtobolt_frame', {
|
||||
'depth_image': jpg_as_text,
|
||||
'frame_count': frame_count,
|
||||
'timestamp': now,
|
||||
'fps': self.actual_fps,
|
||||
'device_id': self.device_id,
|
||||
'depth_range': {
|
||||
'min': self.depth_range_min,
|
||||
'max': self.depth_range_max
|
||||
}
|
||||
}, namespace='/devices')
|
||||
frame_count += 1
|
||||
self._last_send_time = now
|
||||
|
||||
# 更新统计
|
||||
self._update_statistics()
|
||||
else:
|
||||
time.sleep(0.005)
|
||||
except Exception as e:
|
||||
# 捕获处理过程中出现异常,记录并继续
|
||||
self.logger.error(f"FemtoBolt捕获处理错误: {e}")
|
||||
finally:
|
||||
# 无论处理成功与否,都应释放capture以回收内存:contentReference[oaicite:3]{index=3}
|
||||
try:
|
||||
if hasattr(capture, 'release'):
|
||||
capture.release()
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
time.sleep(0.005)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f'FemtoBolt帧推送失败: {e}')
|
||||
time.sleep(0.05)
|
||||
|
||||
# 降低空转CPU
|
||||
time.sleep(0.001)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"FemtoBolt流处理异常: {e}")
|
||||
finally:
|
||||
self.is_streaming = False
|
||||
self.logger.info("FemtoBolt流工作线程结束")
|
||||
|
||||
def _process_depth_image(self, depth_image) -> np.ndarray:
|
||||
"""
|
||||
处理深度图像(采用testfemtobolt.py的渲染方式)
|
||||
"""
|
||||
try:
|
||||
if not isinstance(depth_image, np.ndarray):
|
||||
self.logger.error(f"输入的深度图像不是numpy数组: {type(depth_image)}")
|
||||
return np.zeros((480, 640, 3), dtype=np.uint8)
|
||||
|
||||
# 确保二维数据
|
||||
if depth_image.ndim == 3 and depth_image.shape[2] == 1:
|
||||
depth_image = depth_image[:, :, 0]
|
||||
|
||||
h, w = depth_image.shape
|
||||
|
||||
# 生成灰色背景和白色网格(参考testfemtobolt.py)
|
||||
background = np.full((h, w, 3), 128, dtype=np.uint8) # 灰色背景
|
||||
# 绘制网格线
|
||||
for x in range(0, w, 50): # 每50像素一条竖线
|
||||
cv2.line(background, (x, 0), (x, h-1), (255, 255, 255), 1)
|
||||
for y in range(0, h, 50): # 每50像素一条横线
|
||||
cv2.line(background, (0, y), (w-1, y), (255, 255, 255), 1)
|
||||
|
||||
# 生成深度掩码,仅保留指定范围内的像素
|
||||
mask_valid = (depth_image >= self.depth_range_min) & (depth_image <= self.depth_range_max)
|
||||
depth_clipped = np.clip(depth_image, self.depth_range_min, self.depth_range_max)
|
||||
normed = (depth_clipped.astype(np.float32) - self.depth_range_min) / (self.depth_range_max - self.depth_range_min)
|
||||
|
||||
# 反转映射,保证颜色方向与之前一致
|
||||
normed = 1.0 - normed
|
||||
|
||||
# 应用自定义 colormap,将深度值映射到 RGB
|
||||
rgba = self.custom_cmap(normed)
|
||||
rgb = (rgba[..., :3] * 255).astype(np.uint8)
|
||||
|
||||
# 叠加:在背景上覆盖彩色深度图(掩码处不覆盖,保留灰色背景+网格)
|
||||
final_img = background.copy()
|
||||
final_img[mask_valid] = rgb[mask_valid]
|
||||
|
||||
# 裁剪宽度(保持原有功能)
|
||||
height, width = final_img.shape[:2]
|
||||
target_width = height // 2
|
||||
if width > target_width:
|
||||
left = (width - target_width) // 2
|
||||
right = left + target_width
|
||||
final_img = final_img[:, left:right]
|
||||
|
||||
return final_img
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"处理深度图像失败: {e}")
|
||||
return np.zeros((480, 640, 3), dtype=np.uint8)
|
||||
|
||||
def _send_depth_data(self, depth_image: np.ndarray, color_image: Optional[np.ndarray] = None):
|
||||
try:
|
||||
_, depth_buffer = cv2.imencode('.jpg', depth_image, self._encode_param)
|
||||
depth_data = base64.b64encode(memoryview(depth_buffer).tobytes()).decode('utf-8')
|
||||
|
||||
send_data = {
|
||||
'timestamp': time.time(),
|
||||
'frame_count': self.frame_count,
|
||||
'depth_image': depth_data,
|
||||
'fps': self.actual_fps,
|
||||
'device_id': self.device_id,
|
||||
'depth_range': {
|
||||
'min': self.depth_range_min,
|
||||
'max': self.depth_range_max
|
||||
},
|
||||
'last_update': time.strftime('%H:%M:%S')
|
||||
}
|
||||
|
||||
if color_image is not None:
|
||||
_, color_buffer = cv2.imencode('.jpg', color_image, self._encode_param)
|
||||
color_data = base64.b64encode(memoryview(color_buffer).tobytes()).decode('utf-8')
|
||||
send_data['color_image'] = color_data
|
||||
|
||||
self._socketio.emit('femtobolt_frame', send_data, namespace='/devices')
|
||||
except Exception as e:
|
||||
self.logger.error(f"发送深度数据失败: {e}")
|
||||
|
||||
def _apply_contour_smoothing(self, depth_image: np.ndarray, valid_mask: np.ndarray) -> np.ndarray:
|
||||
"""
|
||||
应用等高线平滑处理算法 (参考My_TestFemtobolt.py的contourf效果)
|
||||
|
||||
Args:
|
||||
depth_image: 原始深度图像
|
||||
valid_mask: 有效像素掩码
|
||||
|
||||
Returns:
|
||||
np.ndarray: 平滑处理后的深度图像
|
||||
"""
|
||||
try:
|
||||
# 创建平滑后的深度图像副本
|
||||
depth_smooth = depth_image.copy().astype(np.float32)
|
||||
|
||||
# 对有效区域进行高斯平滑 (模拟等高线的平滑效果)
|
||||
if np.any(valid_mask):
|
||||
# 使用高斯滤波进行平滑处理
|
||||
sigma = 1.5 # 平滑程度,可调节
|
||||
depth_smooth = ndimage.gaussian_filter(depth_smooth, sigma=sigma)
|
||||
|
||||
# 使用双边滤波进一步平滑,保持边缘
|
||||
# 注意:cv2.bilateralFilter只支持8位无符号整数和32位浮点数
|
||||
# 将深度值归一化到0-255范围用于双边滤波
|
||||
depth_min, depth_max = np.min(depth_smooth[valid_mask]), np.max(depth_smooth[valid_mask])
|
||||
if depth_max > depth_min:
|
||||
depth_normalized = ((depth_smooth - depth_min) / (depth_max - depth_min) * 255).astype(np.uint8)
|
||||
depth_bilateral = cv2.bilateralFilter(depth_normalized, d=9, sigmaColor=75, sigmaSpace=75)
|
||||
# 将结果转换回原始深度范围
|
||||
depth_smooth = (depth_bilateral.astype(np.float32) / 255.0 * (depth_max - depth_min) + depth_min)
|
||||
|
||||
# 对无效区域保持原值
|
||||
depth_smooth[~valid_mask] = depth_image[~valid_mask]
|
||||
|
||||
return depth_smooth
|
||||
|
||||
except Exception as e:
|
||||
self.logger.warning(f"平滑处理失败,使用原始深度图像: {e}")
|
||||
return depth_image.astype(np.float32)
|
||||
|
||||
def _update_statistics(self):
|
||||
"""
|
||||
@ -911,93 +764,7 @@ class FemtoBoltManager(BaseDevice):
|
||||
})
|
||||
return status
|
||||
|
||||
def capture_body_image(self, save_path: Optional[str] = None) -> Optional[np.ndarray]:
|
||||
"""
|
||||
捕获身体图像
|
||||
|
||||
Args:
|
||||
save_path: 保存路径(可选)
|
||||
|
||||
Returns:
|
||||
Optional[np.ndarray]: 捕获的图像,失败返回None
|
||||
"""
|
||||
try:
|
||||
if not self.is_connected or not self.device_handle:
|
||||
self.logger.error("FemtoBolt设备未连接")
|
||||
return None
|
||||
|
||||
capture = self.device_handle.get_capture()
|
||||
if not capture:
|
||||
self.logger.error("无法获取FemtoBolt捕获")
|
||||
return None
|
||||
|
||||
depth_image = capture.get_depth_image()
|
||||
if depth_image is None:
|
||||
self.logger.error("无法获取深度图像")
|
||||
capture.release()
|
||||
return None
|
||||
|
||||
# 处理深度图像
|
||||
processed_image = self._process_depth_image(depth_image)
|
||||
|
||||
if save_path:
|
||||
cv2.imwrite(save_path, processed_image)
|
||||
self.logger.info(f"身体图像已保存到: {save_path}")
|
||||
|
||||
capture.release()
|
||||
return processed_image
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"捕获身体图像异常: {e}")
|
||||
return None
|
||||
|
||||
def get_latest_depth_frame(self) -> Optional[np.ndarray]:
|
||||
"""
|
||||
获取最新深度帧
|
||||
|
||||
Returns:
|
||||
Optional[np.ndarray]: 最新深度帧,无帧返回None
|
||||
"""
|
||||
return self.last_depth_frame.copy() if self.last_depth_frame is not None else None
|
||||
|
||||
def get_latest_color_frame(self) -> Optional[np.ndarray]:
|
||||
"""
|
||||
获取最新彩色帧
|
||||
|
||||
Returns:
|
||||
Optional[np.ndarray]: 最新彩色帧,无帧返回None
|
||||
"""
|
||||
return self.last_color_frame.copy() if self.last_color_frame is not None else None
|
||||
|
||||
def collect_body_pose_data(self) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
采集身体姿态数据(兼容原接口)
|
||||
|
||||
Returns:
|
||||
Optional[Dict[str, Any]]: 身体姿态数据
|
||||
"""
|
||||
# 这里可以集成姿态估计算法
|
||||
# 目前返回模拟数据
|
||||
if not self.last_depth_frame is not None:
|
||||
return None
|
||||
|
||||
# 模拟身体姿态数据
|
||||
mock_keypoints = [
|
||||
{'name': 'head', 'x': 320, 'y': 100, 'confidence': 0.9},
|
||||
{'name': 'neck', 'x': 320, 'y': 150, 'confidence': 0.8},
|
||||
{'name': 'left_shoulder', 'x': 280, 'y': 160, 'confidence': 0.7},
|
||||
{'name': 'right_shoulder', 'x': 360, 'y': 160, 'confidence': 0.7},
|
||||
{'name': 'left_hip', 'x': 300, 'y': 300, 'confidence': 0.6},
|
||||
{'name': 'right_hip', 'x': 340, 'y': 300, 'confidence': 0.6}
|
||||
]
|
||||
|
||||
return {
|
||||
'timestamp': time.time(),
|
||||
'keypoints': mock_keypoints,
|
||||
'balance_score': np.random.uniform(0.7, 0.9),
|
||||
'center_of_mass': {'x': 320, 'y': 240},
|
||||
'device_id': self.device_id
|
||||
}
|
||||
|
||||
|
||||
def _cleanup_device(self):
|
||||
"""
|
||||
@ -1039,6 +806,51 @@ class FemtoBoltManager(BaseDevice):
|
||||
except Exception as e:
|
||||
self.logger.error(f"断开FemtoBolt设备连接失败: {e}")
|
||||
|
||||
def reload_config(self) -> bool:
|
||||
"""
|
||||
重新加载设备配置
|
||||
|
||||
Returns:
|
||||
bool: 重新加载是否成功
|
||||
"""
|
||||
try:
|
||||
self.logger.info("正在重新加载FemtoBolt配置...")
|
||||
|
||||
|
||||
# 获取最新配置
|
||||
self.config = self.config_manager.get_device_config('femtobolt')
|
||||
|
||||
# 更新配置属性
|
||||
self.algorithm_type = self.config.get('algorithm_type', 'opencv')
|
||||
self.color_resolution = self.config.get('color_resolution', '1080P')
|
||||
self.depth_mode = self.config.get('depth_mode', 'NFOV_2X2BINNED')
|
||||
self.color_format = self.config.get('color_format', 'COLOR_BGRA32')
|
||||
self.fps = self.config.get('camera_fps', 20)
|
||||
self.depth_range_min = self.config.get('depth_range_min', 500)
|
||||
self.depth_range_max = self.config.get('depth_range_max', 4500)
|
||||
self.synchronized_images_only = self.config.get('synchronized_images_only', False)
|
||||
|
||||
# 更新图像处理参数
|
||||
self.contrast_factor = self.config.get('contrast_factor', 1.2)
|
||||
self.gamma_value = self.config.get('gamma_value', 0.8)
|
||||
self.use_pseudo_color = self.config.get('use_pseudo_color', True)
|
||||
|
||||
# 更新缓存队列大小
|
||||
cache_size = self.config.get('frame_cache_size', 10)
|
||||
if cache_size != self.depth_frame_cache.maxlen:
|
||||
self.depth_frame_cache = deque(maxlen=cache_size)
|
||||
self.color_frame_cache = deque(maxlen=cache_size)
|
||||
|
||||
# 更新gamma查找表
|
||||
self._update_gamma_lut()
|
||||
|
||||
self.logger.info(f"FemtoBolt配置重新加载成功 - 算法: {self.algorithm_type}, 分辨率: {self.color_resolution}, FPS: {self.fps}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"重新加载FemtoBolt配置失败: {e}")
|
||||
return False
|
||||
|
||||
def cleanup(self):
|
||||
"""
|
||||
清理资源
|
||||
@ -1047,6 +859,12 @@ class FemtoBoltManager(BaseDevice):
|
||||
self.stop_streaming()
|
||||
self._cleanup_device()
|
||||
|
||||
# 清理matplotlib图形对象
|
||||
if hasattr(self, 'fig') and self.fig is not None:
|
||||
plt.close(self.fig)
|
||||
self.fig = None
|
||||
self.ax = None
|
||||
|
||||
self.depth_frame_cache.clear()
|
||||
self.color_frame_cache.clear()
|
||||
self.last_depth_frame = None
|
||||
|
@ -307,6 +307,9 @@ class IMUManager(BaseDevice):
|
||||
try:
|
||||
self.logger.info(f"正在初始化IMU设备...")
|
||||
|
||||
# 使用构造函数中已加载的配置,避免并发读取配置文件
|
||||
self.logger.info(f"使用已加载配置: port={self.port}, baudrate={self.baudrate}, device_type={self.device_type}")
|
||||
|
||||
# 根据配置选择真实设备或模拟设备
|
||||
# 优先使用device_type配置,如果没有则使用use_mock配置(向后兼容)
|
||||
use_real_device = (self.device_type == 'real') or (not self.use_mock)
|
||||
@ -586,6 +589,45 @@ class IMUManager(BaseDevice):
|
||||
except Exception as e:
|
||||
self.logger.error(f"断开IMU设备连接失败: {e}")
|
||||
|
||||
def reload_config(self) -> bool:
|
||||
"""
|
||||
重新加载设备配置
|
||||
|
||||
Returns:
|
||||
bool: 重新加载是否成功
|
||||
"""
|
||||
try:
|
||||
self.logger.info("正在重新加载IMU配置...")
|
||||
|
||||
|
||||
|
||||
# 获取最新配置
|
||||
config = self.config_manager.get_device_config('imu')
|
||||
|
||||
# 更新配置属性
|
||||
self.port = config.get('port', 'COM7')
|
||||
self.baudrate = config.get('baudrate', 9600)
|
||||
self.device_type = config.get('device_type', 'mock')
|
||||
self.use_mock = config.get('use_mock', False)
|
||||
|
||||
# 更新数据缓存队列大小
|
||||
buffer_size = config.get('buffer_size', 100)
|
||||
if buffer_size != self.data_buffer.maxlen:
|
||||
# 保存当前数据
|
||||
current_data = list(self.data_buffer)
|
||||
# 创建新缓冲区
|
||||
self.data_buffer = deque(maxlen=buffer_size)
|
||||
# 恢复数据(保留最新的数据)
|
||||
for data in current_data[-buffer_size:]:
|
||||
self.data_buffer.append(data)
|
||||
|
||||
self.logger.info(f"IMU配置重新加载成功 - 端口: {self.port}, 波特率: {self.baudrate}, 设备类型: {self.device_type}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"重新加载IMU配置失败: {e}")
|
||||
return False
|
||||
|
||||
def cleanup(self):
|
||||
"""
|
||||
清理资源
|
||||
|
@ -738,7 +738,10 @@ class PressureManager(BaseDevice):
|
||||
bool: 初始化是否成功
|
||||
"""
|
||||
try:
|
||||
self.logger.info(f"正在初始化压力板设备 - 类型: {self.device_type}")
|
||||
self.logger.info(f"正在初始化压力板设备...")
|
||||
|
||||
# 使用构造函数中已加载的配置,避免并发读取配置文件
|
||||
self.logger.info(f"使用已加载配置: device_type={self.device_type}, stream_interval={self.stream_interval}")
|
||||
|
||||
# 根据设备类型创建设备实例
|
||||
if self.device_type == 'real':
|
||||
@ -970,6 +973,31 @@ class PressureManager(BaseDevice):
|
||||
self.logger.error(f"断开压力板设备连接失败: {e}")
|
||||
return False
|
||||
|
||||
def reload_config(self) -> bool:
|
||||
"""
|
||||
重新加载压力板配置
|
||||
|
||||
Returns:
|
||||
bool: 配置重新加载是否成功
|
||||
"""
|
||||
try:
|
||||
self.logger.info("正在重新加载压力板配置...")
|
||||
|
||||
# 重新获取配置
|
||||
new_config = self.config_manager.get_device_config('pressure')
|
||||
|
||||
# 更新配置属性
|
||||
self.config = new_config
|
||||
self.device_type = new_config.get('device_type', 'mock')
|
||||
self.stream_interval = new_config.get('stream_interval', 0.1)
|
||||
|
||||
self.logger.info(f"压力板配置重新加载成功 - 设备类型: {self.device_type}, 流间隔: {self.stream_interval}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"重新加载压力板配置失败: {e}")
|
||||
return False
|
||||
|
||||
def cleanup(self) -> None:
|
||||
"""清理资源"""
|
||||
try:
|
||||
|
@ -18,7 +18,7 @@ import base64
|
||||
from pathlib import Path
|
||||
from typing import Optional, Dict, Any, List
|
||||
import sys
|
||||
import psutil
|
||||
# 移除psutil导入,不再需要性能监控
|
||||
import gc
|
||||
|
||||
try:
|
||||
@ -68,8 +68,12 @@ class RecordingManager:
|
||||
self.screen_recording_thread = None
|
||||
self.femtobolt_recording_thread = None
|
||||
|
||||
# 屏幕录制参数
|
||||
self.screen_fps = 25 # 与VideoWriter的fps保持一致
|
||||
# 独立的录制参数配置
|
||||
self.screen_fps = 25 # 屏幕录制帧率
|
||||
self.camera_fps = 20 # 相机录制帧率
|
||||
self.femtobolt_fps = 15 # FemtoBolt录制帧率
|
||||
|
||||
# 录制区域
|
||||
self.screen_region = None
|
||||
self.camera_region = None
|
||||
self.femtobolt_region = None
|
||||
@ -85,71 +89,151 @@ class RecordingManager:
|
||||
# 视频参数
|
||||
self.MAX_FRAME_SIZE = (1280, 720) # 最大帧尺寸
|
||||
|
||||
# CPU监控和性能优化参数
|
||||
self.cpu_threshold = 80.0 # CPU使用率阈值
|
||||
self.memory_threshold = 85.0 # 内存使用率阈值
|
||||
self.adaptive_fps = True # 是否启用自适应帧率
|
||||
self.min_fps = 10 # 最小帧率
|
||||
self.max_fps = 30 # 最大帧率
|
||||
self.current_fps = self.screen_fps # 当前动态帧率
|
||||
self.performance_check_interval = 30 # 性能检查间隔(帧数)
|
||||
self.frame_skip_count = 0 # 跳帧计数
|
||||
self.last_performance_check = 0 # 上次性能检查时间
|
||||
# 独立的帧率控制参数
|
||||
self.screen_current_fps = self.screen_fps
|
||||
self.camera_current_fps = self.camera_fps
|
||||
self.femtobolt_current_fps = self.femtobolt_fps
|
||||
|
||||
# 区域大小阈值配置 - 根据实际录制场景优化
|
||||
self.SMALL_REGION_THRESHOLD = 400 * 300 # 小区域阈值 (120,000像素)
|
||||
self.MEDIUM_REGION_THRESHOLD = 800 * 600 # 中等区域阈值 (480,000像素)
|
||||
self.LARGE_REGION_THRESHOLD = 1600 * 900 # 大区域阈值 (1,440,000像素)
|
||||
|
||||
# 基于区域大小的帧率配置 - 大幅降低帧率以减小文件大小
|
||||
self.fps_config = {
|
||||
'small': {'screen': 12, 'camera': 25, 'femtobolt': 20}, # 小区域:低帧率
|
||||
'medium': {'screen': 10, 'camera': 22, 'femtobolt': 18}, # 中等区域:更低帧率
|
||||
'large': {'screen': 8, 'camera': 18, 'femtobolt': 15}, # 大区域:很低帧率
|
||||
'xlarge': {'screen': 6, 'camera': 15, 'femtobolt': 12} # 超大区域:极低帧率
|
||||
}
|
||||
|
||||
# 移除CPU监控和性能优化参数,使用固定帧率控制
|
||||
|
||||
# 录制同步控制
|
||||
self.recording_sync_barrier = None # 同步屏障
|
||||
self.recording_threads = {} # 录制线程字典
|
||||
self.recording_start_sync = threading.Event() # 录制开始同步事件
|
||||
self.global_recording_start_time = None # 全局录制开始时间
|
||||
|
||||
# 日志
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
self.logger.info("录制管理器初始化完成")
|
||||
|
||||
def _check_system_performance(self):
|
||||
"""
|
||||
检查系统性能指标
|
||||
|
||||
Returns:
|
||||
Dict: 包含CPU和内存使用率的字典
|
||||
"""
|
||||
try:
|
||||
cpu_percent = psutil.cpu_percent(interval=0.1)
|
||||
memory_info = psutil.virtual_memory()
|
||||
memory_percent = memory_info.percent
|
||||
|
||||
return {
|
||||
'cpu_percent': cpu_percent,
|
||||
'memory_percent': memory_percent,
|
||||
'available_memory_mb': memory_info.available / (1024 * 1024)
|
||||
}
|
||||
except Exception as e:
|
||||
self.logger.warning(f"性能检查失败: {e}")
|
||||
return {'cpu_percent': 0, 'memory_percent': 0, 'available_memory_mb': 0}
|
||||
# 移除系统性能检查方法
|
||||
|
||||
def _adjust_recording_performance(self, performance_data):
|
||||
def _calculate_region_size_category(self, region):
|
||||
"""
|
||||
根据系统性能调整录制参数
|
||||
根据录制区域大小计算区域类别
|
||||
|
||||
Args:
|
||||
performance_data: 性能数据字典
|
||||
region: 录制区域 (x, y, width, height)
|
||||
|
||||
Returns:
|
||||
str: 区域大小类别 ('small', 'medium', 'large', 'xlarge')
|
||||
"""
|
||||
if not self.adaptive_fps:
|
||||
return
|
||||
if not region or len(region) != 4:
|
||||
return 'medium' # 默认中等大小
|
||||
|
||||
cpu_percent = performance_data.get('cpu_percent', 0)
|
||||
memory_percent = performance_data.get('memory_percent', 0)
|
||||
_, _, width, height = region
|
||||
area = width * height
|
||||
|
||||
# 根据CPU使用率调整帧率
|
||||
if cpu_percent > self.cpu_threshold:
|
||||
# CPU使用率过高,降低帧率
|
||||
self.current_fps = max(self.min_fps, self.current_fps - 2)
|
||||
self.frame_skip_count += 1
|
||||
self.logger.warning(f"CPU使用率过高({cpu_percent:.1f}%),降低帧率至{self.current_fps}fps")
|
||||
elif cpu_percent < self.cpu_threshold - 20 and self.current_fps < self.max_fps:
|
||||
# CPU使用率较低,可以适当提高帧率
|
||||
self.current_fps = min(self.max_fps, self.current_fps + 1)
|
||||
self.logger.info(f"CPU使用率正常({cpu_percent:.1f}%),提高帧率至{self.current_fps}fps")
|
||||
if area <= self.SMALL_REGION_THRESHOLD:
|
||||
return 'small'
|
||||
elif area <= self.MEDIUM_REGION_THRESHOLD:
|
||||
return 'medium'
|
||||
elif area <= self.LARGE_REGION_THRESHOLD:
|
||||
return 'large'
|
||||
else:
|
||||
return 'xlarge'
|
||||
|
||||
def _set_adaptive_fps_by_region(self, recording_type, region):
|
||||
"""
|
||||
根据录制区域大小设置自适应帧率
|
||||
|
||||
# 内存使用率过高时强制垃圾回收
|
||||
if memory_percent > self.memory_threshold:
|
||||
gc.collect()
|
||||
self.logger.warning(f"内存使用率过高({memory_percent:.1f}%),执行垃圾回收")
|
||||
Args:
|
||||
recording_type: 录制类型 ('screen', 'camera', 'femtobolt')
|
||||
region: 录制区域 (x, y, width, height)
|
||||
"""
|
||||
size_category = self._calculate_region_size_category(region)
|
||||
target_fps = self.fps_config[size_category][recording_type]
|
||||
|
||||
# 计算区域面积用于日志
|
||||
_, _, width, height = region
|
||||
area = width * height
|
||||
|
||||
if recording_type == 'screen':
|
||||
self.screen_current_fps = target_fps
|
||||
elif recording_type == 'camera':
|
||||
self.camera_current_fps = target_fps
|
||||
elif recording_type == 'femtobolt':
|
||||
self.femtobolt_current_fps = target_fps
|
||||
|
||||
self.logger.info(f"{recording_type}录制区域解包成功: x={region[0]}, y={region[1]}, w={width}, h={height}")
|
||||
self.logger.info(f"{recording_type}录制区域分析: 面积={area:,}像素, 类别={size_category}, 优化帧率={target_fps}fps")
|
||||
|
||||
# 如果是大区域,提示将启用性能优化
|
||||
if size_category in ['large', 'xlarge']:
|
||||
self.logger.info(f"{recording_type}大区域检测: 将启用降采样和压缩优化以提升性能")
|
||||
|
||||
# 移除动态性能调整方法,使用固定帧率控制
|
||||
|
||||
def _optimize_frame_for_large_region(self, frame, region, recording_type):
|
||||
"""
|
||||
为大区域录制优化帧数据
|
||||
|
||||
Args:
|
||||
frame: 原始帧数据
|
||||
region: 录制区域
|
||||
recording_type: 录制类型
|
||||
|
||||
Returns:
|
||||
优化后的帧数据
|
||||
"""
|
||||
if frame is None:
|
||||
return None
|
||||
|
||||
size_category = self._calculate_region_size_category(region)
|
||||
|
||||
# 对所有区域进行优化处理以减小文件大小
|
||||
_, _, width, height = region
|
||||
|
||||
# 根据区域大小进行不同程度的降采样
|
||||
if size_category == 'xlarge': #screen录屏·超大区域
|
||||
# 超大区域:降采样到50%,极大减小文件大小
|
||||
new_width = int(width * 1)
|
||||
new_height = int(height * 1)
|
||||
quality = 95 # 较高质量压缩
|
||||
elif size_category == 'large':
|
||||
# 大区域:降采样到60%,显著优化文件大小
|
||||
new_width = int(width * 1)
|
||||
new_height = int(height * 1)
|
||||
quality = 95 # 较高质量压缩
|
||||
elif size_category == 'medium': #足部视频录屏·中等区域
|
||||
# 中等区域:降采样到75%,适度优化
|
||||
new_width = int(width * 1)
|
||||
new_height = int(height * 1)
|
||||
quality = 100 # 较高质量压缩
|
||||
else: # small
|
||||
# 小区域:降采样到85%,轻度优化
|
||||
new_width = int(width * 1)
|
||||
new_height = int(height * 1)
|
||||
quality = 100 # 较高质量压缩
|
||||
|
||||
# 应用降采样
|
||||
frame = cv2.resize(frame, (new_width, new_height), interpolation=cv2.INTER_AREA)
|
||||
self.logger.debug(f"{recording_type}区域降采样({size_category}): {width}x{height} -> {new_width}x{new_height}")
|
||||
|
||||
# 应用激进的JPEG压缩以进一步减小文件大小
|
||||
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), quality]
|
||||
_, encoded_frame = cv2.imencode('.jpg', frame, encode_param)
|
||||
frame = cv2.imdecode(encoded_frame, cv2.IMREAD_COLOR)
|
||||
|
||||
# 重要:将帧尺寸调整回VideoWriter期望的原始尺寸
|
||||
# 这样可以保持压缩优化的同时确保与VideoWriter兼容
|
||||
frame = cv2.resize(frame, (width, height), interpolation=cv2.INTER_LINEAR)
|
||||
|
||||
return frame
|
||||
|
||||
def start_recording(self, session_id: str, patient_id: str, screen_location: List[int], camera_location: List[int], femtobolt_location: List[int], recording_types: List[str] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
@ -186,7 +270,7 @@ class RecordingManager:
|
||||
return result
|
||||
|
||||
# 设置默认录制类型
|
||||
recording_types = ['screen', 'feet', 'femtobolt']
|
||||
recording_types = ['screen', 'feet']
|
||||
# recording_types = ['screen']
|
||||
|
||||
|
||||
@ -216,9 +300,23 @@ class RecordingManager:
|
||||
self.screen_region = tuple(screen_location) # [x, y, w, h] -> (x, y, w, h)
|
||||
self.camera_region = tuple(camera_location) # [x, y, w, h] -> (x, y, w, h)
|
||||
self.femtobolt_region = tuple(femtobolt_location) # [x, y, w, h] -> (x, y, w, h)
|
||||
self.recording_start_time = datetime.now()
|
||||
data_base_path = os.path.join('data', 'patients', patient_id, session_id)
|
||||
|
||||
# 根据录制区域大小设置自适应帧率
|
||||
if 'screen' in recording_types:
|
||||
self._set_adaptive_fps_by_region('screen', self.screen_region)
|
||||
if 'feet' in recording_types:
|
||||
self._set_adaptive_fps_by_region('camera', self.camera_region)
|
||||
if 'femtobolt' in recording_types:
|
||||
self._set_adaptive_fps_by_region('femtobolt', self.femtobolt_region)
|
||||
|
||||
# 设置录制同步
|
||||
active_recording_count = len([t for t in recording_types if t in ['screen', 'feet', 'femtobolt']])
|
||||
self.recording_sync_barrier = threading.Barrier(active_recording_count)
|
||||
self.recording_start_sync.clear()
|
||||
self.global_recording_start_time = None
|
||||
|
||||
self.recording_start_time = datetime.now()
|
||||
db_base_path = os.path.join('data', 'patients', patient_id, session_id)
|
||||
# 创建主存储目录
|
||||
if getattr(sys, 'frozen', False):
|
||||
# 打包后的exe文件路径
|
||||
@ -242,56 +340,56 @@ class RecordingManager:
|
||||
# 定义视频文件路径
|
||||
feet_video_path = os.path.join(base_path, 'feet.mp4')
|
||||
screen_video_path = os.path.join(base_path, 'screen.mp4')
|
||||
femtobolt_video_path = os.path.join(base_path, 'femtobolt.mp4')
|
||||
|
||||
result['video_paths']['feet_video'] = feet_video_path
|
||||
result['video_paths']['screen_video'] = screen_video_path
|
||||
result['video_paths']['femtobolt_video'] = femtobolt_video_path
|
||||
femtobolt_video_path = os.path.join(base_path, 'femtobolt.mp4')
|
||||
|
||||
|
||||
# 准备数据库更新信息,返回给调用方统一处理
|
||||
result['database_updates'] = {
|
||||
'session_id': session_id,
|
||||
'status': 'recording',
|
||||
'video_paths': {
|
||||
'normal_video_path': os.path.join(base_path, 'feet.mp4'),
|
||||
'screen_video_path': os.path.join(base_path, 'screen.mp4'),
|
||||
'femtobolt_video_path': os.path.join(base_path, 'femtobolt.mp4')
|
||||
'normal_video_path': os.path.join(db_base_path, 'feet.mp4'),
|
||||
'screen_video_path': os.path.join(db_base_path, 'screen.mp4'),
|
||||
'femtobolt_video_path': os.path.join(db_base_path, 'femtobolt.mp4')
|
||||
}
|
||||
}
|
||||
self.logger.debug(f'数据库更新信息已准备 - 会话ID: {session_id}')
|
||||
|
||||
# 视频编码参数 - 使用更兼容的编解码器
|
||||
# 尝试多种编解码器以确保兼容性
|
||||
# 视频编码参数 - 使用浏览器兼容的H.264编解码器
|
||||
# 优先使用H.264编码器以确保浏览器兼容性
|
||||
try:
|
||||
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # 更兼容的编解码器
|
||||
fourcc = cv2.VideoWriter_fourcc(*'avc1') # H.264编码器,浏览器兼容性最好
|
||||
except:
|
||||
try:
|
||||
fourcc = cv2.VideoWriter_fourcc(*'XVID') # 备选编解码器
|
||||
fourcc = cv2.VideoWriter_fourcc(*'H264') # 备选H.264编码器
|
||||
except:
|
||||
fourcc = cv2.VideoWriter_fourcc(*'MJPG') # 最后备选
|
||||
fps = 25 #正常帧率
|
||||
try:
|
||||
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # 备选编解码器
|
||||
except:
|
||||
fourcc = cv2.VideoWriter_fourcc(*'MJPG') # 最后备选
|
||||
|
||||
|
||||
|
||||
# 根据录制类型选择性地初始化视频写入器
|
||||
# 根据录制类型选择性地初始化视频写入器,使用各自的自适应帧率
|
||||
self.screen_video_writer = None
|
||||
self.femtobolt_video_writer = None
|
||||
self.feet_video_writer = None
|
||||
|
||||
if 'screen' in recording_types:
|
||||
self.screen_video_writer = cv2.VideoWriter(
|
||||
screen_video_path, fourcc, fps, (self.screen_region[2], self.screen_region[3])
|
||||
screen_video_path, fourcc, self.screen_current_fps, (self.screen_region[2], self.screen_region[3])
|
||||
)
|
||||
self.logger.info(f'屏幕视频写入器使用帧率: {self.screen_current_fps}fps')
|
||||
|
||||
if 'femtobolt' in recording_types:
|
||||
self.femtobolt_video_writer = cv2.VideoWriter(
|
||||
femtobolt_video_path, fourcc, fps, (self.femtobolt_region[2], self.femtobolt_region[3])
|
||||
femtobolt_video_path, fourcc, self.femtobolt_current_fps, (self.femtobolt_region[2], self.femtobolt_region[3])
|
||||
)
|
||||
self.logger.info(f'FemtoBolt视频写入器使用帧率: {self.femtobolt_current_fps}fps')
|
||||
|
||||
if 'feet' in recording_types:
|
||||
self.feet_video_writer = cv2.VideoWriter(
|
||||
feet_video_path, fourcc, fps, (self.camera_region[2], self.camera_region[3])
|
||||
)
|
||||
feet_video_path, fourcc, self.camera_current_fps, (self.camera_region[2], self.camera_region[3])
|
||||
)
|
||||
self.logger.info(f'足部视频写入器使用帧率: {self.camera_current_fps}fps')
|
||||
|
||||
# 检查视频写入器状态(仅检查启用的录制类型)
|
||||
# 检查足部视频写入器
|
||||
@ -334,7 +432,7 @@ class RecordingManager:
|
||||
name='FeetRecordingThread'
|
||||
)
|
||||
self.feet_recording_thread.start()
|
||||
self.logger.info(f'足部录制线程已启动 - 区域: {self.camera_region}, 输出文件: {feet_video_path}')
|
||||
# self.logger.info(f'足部录制线程已启动 - 区域: {self.camera_region}, 输出文件: {feet_video_path}')
|
||||
|
||||
if 'screen' in recording_types and self.screen_video_writer and self.screen_video_writer.isOpened():
|
||||
self.screen_recording_thread = threading.Thread(
|
||||
@ -344,7 +442,7 @@ class RecordingManager:
|
||||
name='ScreenRecordingThread'
|
||||
)
|
||||
self.screen_recording_thread.start()
|
||||
self.logger.info(f'屏幕录制线程已启动 - 区域: {self.screen_region}, 输出文件: {screen_video_path}')
|
||||
# self.logger.info(f'屏幕录制线程已启动 - 区域: {self.screen_region}, 输出文件: {screen_video_path}')
|
||||
|
||||
if 'femtobolt' in recording_types and self.femtobolt_video_writer and self.femtobolt_video_writer.isOpened():
|
||||
self.femtobolt_recording_thread = threading.Thread(
|
||||
@ -354,7 +452,7 @@ class RecordingManager:
|
||||
name='FemtoBoltRecordingThread'
|
||||
)
|
||||
self.femtobolt_recording_thread.start()
|
||||
self.logger.info(f'FemtoBolt录制线程已启动 - 区域: {self.femtobolt_region}, 输出文件: {femtobolt_video_path}')
|
||||
# self.logger.info(f'FemtoBolt录制线程已启动 - 区域: {self.femtobolt_region}, 输出文件: {femtobolt_video_path}')
|
||||
|
||||
result['success'] = True
|
||||
result['recording_start_time'] = self.recording_start_time.isoformat()
|
||||
@ -396,19 +494,51 @@ class RecordingManager:
|
||||
result['message'] = '当前没有进行录制'
|
||||
return result
|
||||
|
||||
# 记录停止时间,确保所有录制线程同时结束
|
||||
recording_stop_time = time.time()
|
||||
self.logger.info(f'开始停止录制,停止时间: {recording_stop_time}')
|
||||
|
||||
# 设置停止标志
|
||||
self.sync_recording = False
|
||||
self.recording_stop_event.set()
|
||||
|
||||
# 等待录制线程结束
|
||||
# 收集活跃的录制线程
|
||||
active_threads = []
|
||||
if hasattr(self, 'feet_recording_thread') and self.feet_recording_thread and self.feet_recording_thread.is_alive():
|
||||
self.feet_recording_thread.join(timeout=5.0)
|
||||
|
||||
active_threads.append(('feet', self.feet_recording_thread))
|
||||
if hasattr(self, 'screen_recording_thread') and self.screen_recording_thread and self.screen_recording_thread.is_alive():
|
||||
self.screen_recording_thread.join(timeout=5.0)
|
||||
|
||||
active_threads.append(('screen', self.screen_recording_thread))
|
||||
if hasattr(self, 'femtobolt_recording_thread') and self.femtobolt_recording_thread and self.femtobolt_recording_thread.is_alive():
|
||||
self.femtobolt_recording_thread.join(timeout=5.0)
|
||||
active_threads.append(('femtobolt', self.femtobolt_recording_thread))
|
||||
|
||||
# 同时等待所有录制线程结束
|
||||
self.logger.info(f'等待 {len(active_threads)} 个录制线程结束')
|
||||
for thread_name, thread in active_threads:
|
||||
thread.join(timeout=5.0)
|
||||
if thread.is_alive():
|
||||
self.logger.warning(f'{thread_name}录制线程未能在超时时间内结束')
|
||||
else:
|
||||
self.logger.info(f'{thread_name}录制线程已结束')
|
||||
|
||||
# 计算实际录制时长并记录详细信息
|
||||
if self.global_recording_start_time:
|
||||
actual_recording_duration = recording_stop_time - self.global_recording_start_time
|
||||
self.logger.info(f'录制时长统计:')
|
||||
self.logger.info(f' 全局开始时间: {self.global_recording_start_time}')
|
||||
self.logger.info(f' 全局结束时间: {recording_stop_time}')
|
||||
self.logger.info(f' 实际录制时长: {actual_recording_duration:.3f}秒')
|
||||
|
||||
# 记录各录制类型的预期帧数
|
||||
for thread_name, thread in active_threads:
|
||||
if thread_name == 'screen':
|
||||
expected_frames = int(actual_recording_duration * self.screen_current_fps)
|
||||
self.logger.info(f' 屏幕录制预期帧数: {expected_frames}帧 (帧率{self.screen_current_fps}fps)')
|
||||
elif thread_name == 'feet':
|
||||
expected_frames = int(actual_recording_duration * self.camera_current_fps)
|
||||
self.logger.info(f' 足部录制预期帧数: {expected_frames}帧 (帧率{self.camera_current_fps}fps)')
|
||||
elif thread_name == 'femtobolt':
|
||||
expected_frames = int(actual_recording_duration * self.femtobolt_current_fps)
|
||||
self.logger.info(f' FemtoBolt录制预期帧数: {expected_frames}帧 (帧率{self.femtobolt_current_fps}fps)')
|
||||
|
||||
# 清理视频写入器
|
||||
self._cleanup_video_writers()
|
||||
@ -417,7 +547,7 @@ class RecordingManager:
|
||||
if self.current_session_id:
|
||||
result['database_updates'] = {
|
||||
'session_id': self.current_session_id,
|
||||
'status': 'completed'
|
||||
'status': 'checked'
|
||||
}
|
||||
self.logger.info(f'数据库更新信息已准备 - 会话ID: {self.current_session_id}')
|
||||
|
||||
@ -450,12 +580,37 @@ class RecordingManager:
|
||||
"""
|
||||
try:
|
||||
self.logger.info(f'{recording_type}录制线程启动 - 区域: {region}, 输出文件: {output_file_name}')
|
||||
frame_count = 0
|
||||
# 使用当前动态帧率,支持自适应帧率调整
|
||||
target_fps = self.current_fps
|
||||
frame_count = 0
|
||||
|
||||
# 根据录制类型获取对应的自适应帧率
|
||||
if recording_type == 'screen':
|
||||
target_fps = self.screen_current_fps
|
||||
elif recording_type == 'camera':
|
||||
target_fps = self.camera_current_fps
|
||||
elif recording_type == 'femtobolt':
|
||||
target_fps = self.femtobolt_current_fps
|
||||
else:
|
||||
target_fps = 25 # 默认帧率
|
||||
|
||||
frame_interval = 1.0 / target_fps
|
||||
last_frame_time = time.time()
|
||||
|
||||
self.logger.info(f'{recording_type}录制线程使用帧率: {target_fps}fps')
|
||||
|
||||
# 等待所有录制线程准备就绪
|
||||
if self.recording_sync_barrier:
|
||||
self.recording_sync_barrier.wait()
|
||||
|
||||
# 第一个到达的线程设置全局开始时间
|
||||
if self.global_recording_start_time is None:
|
||||
self.global_recording_start_time = time.time()
|
||||
self.recording_start_sync.set()
|
||||
else:
|
||||
self.recording_start_sync.wait()
|
||||
|
||||
# 所有线程从相同时间点开始录制
|
||||
recording_start_time = self.global_recording_start_time
|
||||
|
||||
if not video_writer or not video_writer.isOpened():
|
||||
self.logger.error(f'{recording_type}视频写入器初始化失败: {output_file_name}')
|
||||
return
|
||||
@ -472,19 +627,12 @@ class RecordingManager:
|
||||
try:
|
||||
current_time = time.time()
|
||||
|
||||
# 定期检查系统性能并调整录制参数
|
||||
if frame_count % self.performance_check_interval == 0 and frame_count > 0:
|
||||
performance_data = self._check_system_performance()
|
||||
self._adjust_recording_performance(performance_data)
|
||||
# 更新帧率间隔
|
||||
target_fps = self.current_fps
|
||||
frame_interval = 1.0 / target_fps
|
||||
self.logger.debug(f'{recording_type}性能检查完成,当前帧率: {target_fps}fps')
|
||||
|
||||
# 控制帧率
|
||||
if current_time - last_frame_time < frame_interval:
|
||||
time.sleep(0.001)
|
||||
continue
|
||||
# 严格的帧率控制 - 确保按照设定的fps精确录制
|
||||
elapsed_time = current_time - last_frame_time
|
||||
if elapsed_time < frame_interval:
|
||||
sleep_time = frame_interval - elapsed_time
|
||||
time.sleep(sleep_time)
|
||||
current_time = time.time() # 重新获取时间
|
||||
|
||||
frame = None
|
||||
|
||||
@ -493,15 +641,20 @@ class RecordingManager:
|
||||
frame = cv2.cvtColor(np.array(screenshot), cv2.COLOR_RGB2BGR)
|
||||
frame = cv2.resize(frame, (w, h))
|
||||
|
||||
# 对所有区域录制进行优化以减小文件大小
|
||||
frame = self._optimize_frame_for_large_region(frame, region, recording_type)
|
||||
|
||||
# 写入视频帧
|
||||
if frame is not None:
|
||||
video_writer.write(frame)
|
||||
frame_count += 1
|
||||
|
||||
|
||||
# # 每100帧记录一次进度
|
||||
# if frame_count % 100 == 0:
|
||||
# elapsed_recording_time = current_time - recording_start_time
|
||||
# self.logger.debug(f'{recording_type}录制进度: {frame_count}帧, 已录制{elapsed_recording_time:.1f}秒, 目标帧率{target_fps}fps')
|
||||
else:
|
||||
# 如果没有获取到帧,短暂等待
|
||||
time.sleep(0.01)
|
||||
self.logger.warning(f'{recording_type}获取帧失败,跳过此帧')
|
||||
|
||||
last_frame_time = current_time
|
||||
|
||||
@ -509,7 +662,23 @@ class RecordingManager:
|
||||
self.logger.error(f'{recording_type}录制线程错误: {e}')
|
||||
time.sleep(0.1)
|
||||
|
||||
self.logger.info(f'{recording_type}录制线程结束,总帧数: {frame_count}')
|
||||
# 计算录制统计信息
|
||||
if self.global_recording_start_time:
|
||||
total_recording_time = time.time() - self.global_recording_start_time
|
||||
actual_fps = frame_count / total_recording_time if total_recording_time > 0 else 0
|
||||
expected_frames = int(total_recording_time * target_fps)
|
||||
|
||||
self.logger.info(f'{recording_type}录制线程结束统计:')
|
||||
self.logger.info(f' 实际录制帧数: {frame_count}帧')
|
||||
self.logger.info(f' 预期录制帧数: {expected_frames}帧')
|
||||
self.logger.info(f' 目标帧率: {target_fps}fps')
|
||||
self.logger.info(f' 实际平均帧率: {actual_fps:.2f}fps')
|
||||
self.logger.info(f' 录制时长: {total_recording_time:.3f}秒')
|
||||
|
||||
if abs(frame_count - expected_frames) > target_fps * 0.1: # 如果帧数差异超过0.1秒的帧数
|
||||
self.logger.warning(f'{recording_type}帧数异常: 实际{frame_count}帧 vs 预期{expected_frames}帧,差异{frame_count - expected_frames}帧')
|
||||
else:
|
||||
self.logger.info(f'{recording_type}录制线程结束,总帧数: {frame_count}')
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f'{recording_type}录制线程异常: {e}')
|
||||
@ -672,8 +841,7 @@ class RecordingManager:
|
||||
'foot_data': detection_data.get('foot_data'),
|
||||
'foot_data_image': None,
|
||||
'foot_image': None,
|
||||
'screen_image': None,
|
||||
|
||||
'screen_image': None,
|
||||
'timestamp': timestamp
|
||||
}
|
||||
|
||||
@ -682,8 +850,7 @@ class RecordingManager:
|
||||
image_fields = [
|
||||
('body_image', 'body'),
|
||||
('foot_image', 'foot'),
|
||||
('foot_data_image', 'foot_data'),
|
||||
('screen_image', 'screen')
|
||||
('foot_data_image', 'foot_data')
|
||||
]
|
||||
|
||||
for field, prefix in image_fields:
|
||||
@ -706,11 +873,16 @@ class RecordingManager:
|
||||
f.write(image_data)
|
||||
|
||||
# 更新数据字典中的图片路径
|
||||
|
||||
data[field] = str(os.path.join('data', 'patients', patient_id, session_id, timestamp, filename))
|
||||
self.logger.debug(f'{field}保存成功: {filename}')
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f'保存{field}失败: {e}')
|
||||
# 屏幕截图
|
||||
screen_image = self._capture_screen_image(data_dir,timestamp)
|
||||
if screen_image:
|
||||
data['screen_image'] = str(os.path.join('data', 'patients', patient_id, session_id, timestamp, screen_image))
|
||||
|
||||
self.logger.debug(f'数据保存完成: {session_id}, 时间戳: {timestamp}')
|
||||
|
||||
@ -718,182 +890,42 @@ class RecordingManager:
|
||||
self.logger.error(f'数据保存失败: {e}')
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def _collect_body_pose_data(self) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
从FemtoBolt深度相机采集身体姿态数据
|
||||
|
||||
Returns:
|
||||
Dict: 身体姿态数据字典
|
||||
"""
|
||||
try:
|
||||
if self.femtobolt_manager and hasattr(self.femtobolt_manager, 'get_pose_data'):
|
||||
pose_data = self.femtobolt_manager.get_pose_data()
|
||||
return pose_data
|
||||
else:
|
||||
self.logger.warning('FemtoBolt管理器未连接或不支持姿态数据采集')
|
||||
return None
|
||||
except Exception as e:
|
||||
self.logger.error(f'采集身体姿态数据失败: {e}')
|
||||
return None
|
||||
|
||||
def _capture_body_image(self, data_dir) -> Optional[str]:
|
||||
def _capture_screen_image(self, data_dir,timestamp) -> Optional[str]:
|
||||
"""
|
||||
从FemtoBolt深度相机采集身体截图
|
||||
|
||||
Args:
|
||||
data_dir: 数据存储目录
|
||||
|
||||
Returns:
|
||||
str: 身体截图文件的相对路径
|
||||
"""
|
||||
try:
|
||||
if self.femtobolt_manager and hasattr(self.femtobolt_manager, 'get_latest_frame'):
|
||||
frame = self.femtobolt_manager.get_latest_frame()
|
||||
if frame is not None:
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S_%f')[:-3]
|
||||
filename = f'body_{timestamp}.jpg'
|
||||
file_path = data_dir / filename
|
||||
|
||||
# 保存图像
|
||||
cv2.imwrite(str(file_path), frame)
|
||||
|
||||
# 返回相对路径
|
||||
return str(filename)
|
||||
else:
|
||||
self.logger.warning('FemtoBolt相机未获取到有效帧')
|
||||
return None
|
||||
else:
|
||||
self.logger.warning('FemtoBolt管理器未连接或不支持图像采集')
|
||||
return None
|
||||
except Exception as e:
|
||||
self.logger.error(f'采集身体截图失败: {e}')
|
||||
return None
|
||||
|
||||
def _collect_foot_pressure_data(self) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
从压力传感器采集足部压力数据
|
||||
|
||||
Returns:
|
||||
Dict: 足部压力数据字典
|
||||
"""
|
||||
try:
|
||||
if self.pressure_manager and hasattr(self.pressure_manager, 'get_pressure_data'):
|
||||
pressure_data = self.pressure_manager.get_pressure_data()
|
||||
return pressure_data
|
||||
else:
|
||||
self.logger.warning('压力传感器管理器未连接或不支持压力数据采集')
|
||||
return None
|
||||
except Exception as e:
|
||||
self.logger.error(f'采集足部压力数据失败: {e}')
|
||||
return None
|
||||
|
||||
def _generate_foot_pressure_image(self, data_dir) -> Optional[str]:
|
||||
"""
|
||||
生成足底压力数据图
|
||||
|
||||
Args:
|
||||
data_dir: 数据存储目录
|
||||
|
||||
Returns:
|
||||
str: 足底压力数据图文件的相对路径
|
||||
"""
|
||||
try:
|
||||
if self.pressure_manager and hasattr(self.pressure_manager, 'generate_pressure_heatmap'):
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S_%f')[:-3]
|
||||
filename = f'foot_pressure_{timestamp}.jpg'
|
||||
file_path = data_dir / filename
|
||||
|
||||
# 生成压力热力图
|
||||
success = self.pressure_manager.generate_pressure_heatmap(str(file_path))
|
||||
|
||||
if success and file_path.exists():
|
||||
# 返回相对路径
|
||||
return str(file_path.relative_to(Path.cwd()))
|
||||
else:
|
||||
self.logger.warning('足底压力数据图生成失败')
|
||||
return None
|
||||
else:
|
||||
self.logger.warning('压力传感器管理器未连接或不支持压力图生成')
|
||||
return None
|
||||
except Exception as e:
|
||||
self.logger.error(f'生成足底压力数据图失败: {e}')
|
||||
return None
|
||||
|
||||
def _capture_screen_image(self, data_dir) -> Optional[str]:
|
||||
"""
|
||||
采集屏幕截图
|
||||
采集屏幕截图,根据screen_region 进行截图
|
||||
|
||||
Args:
|
||||
data_dir: 数据存储目录路径
|
||||
|
||||
|
||||
Returns:
|
||||
str: 截图文件的相对路径,失败返回None
|
||||
"""
|
||||
try:
|
||||
# 截取屏幕
|
||||
if self.screen_size:
|
||||
width, height = self.screen_size
|
||||
screenshot = pyautogui.screenshot(region=(0, 0, width, height))
|
||||
# 截取屏幕
|
||||
if self.screen_region:
|
||||
# 使用指定区域截图
|
||||
x, y, width, height = self.screen_region
|
||||
screenshot = pyautogui.screenshot(region=(x, y, width, height))
|
||||
else:
|
||||
# 全屏截图
|
||||
screenshot = pyautogui.screenshot()
|
||||
|
||||
# 保存截图
|
||||
from pathlib import Path
|
||||
image_path = Path(data_dir) / 'screen_image.png'
|
||||
screenshot.save(str(image_path))
|
||||
screen_filename = f'screen_{timestamp}.jpg'
|
||||
image_path = Path(data_dir) / screen_filename
|
||||
screenshot.save(str(image_path), quality=95, optimize=True)
|
||||
|
||||
# # 返回相对路径
|
||||
# abs_image_path = image_path.resolve()
|
||||
# abs_cwd = Path.cwd().resolve()
|
||||
# relative_path = abs_image_path.relative_to(abs_cwd)
|
||||
|
||||
return str('screen_image.png')
|
||||
return screen_filename
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f'屏幕截图失败: {e}')
|
||||
return None
|
||||
|
||||
def _capture_foot_image(self, data_dir) -> Optional[str]:
|
||||
"""
|
||||
采集足部视频截图
|
||||
|
||||
Args:
|
||||
data_dir: 数据存储目录路径
|
||||
|
||||
Returns:
|
||||
str: 截图文件的相对路径,失败返回None
|
||||
"""
|
||||
try:
|
||||
if not self.camera_manager or not self.camera_manager.is_connected:
|
||||
self.logger.warning('相机设备未连接,无法采集足部截图')
|
||||
return None
|
||||
|
||||
# 从相机管理器获取最新帧
|
||||
frame, frame_timestamp = self.camera_manager._get_latest_frame_from_cache('camera')
|
||||
|
||||
if frame is None:
|
||||
self.logger.warning('无法从相机获取帧数据')
|
||||
return None
|
||||
|
||||
# 调整帧尺寸
|
||||
resized_frame = cv2.resize(frame, self.MAX_FRAME_SIZE)
|
||||
|
||||
# 保存截图
|
||||
from pathlib import Path
|
||||
image_path = Path(data_dir) / 'foot_image.png'
|
||||
cv2.imwrite(str(image_path), resized_frame)
|
||||
|
||||
# 返回相对路径
|
||||
# abs_image_path = image_path.resolve()
|
||||
# abs_cwd = Path.cwd().resolve()
|
||||
# relative_path = abs_image_path.relative_to(abs_cwd)
|
||||
|
||||
return str(image_path)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f'足部截图失败: {e}')
|
||||
return None
|
||||
|
||||
|
||||
|
||||
# 保持向后兼容的ScreenRecorder类
|
||||
|
@ -19,13 +19,18 @@ device_index = 1
|
||||
width = 1280
|
||||
height = 720
|
||||
fps = 30
|
||||
buffer_size = 1
|
||||
fourcc = MJPG
|
||||
|
||||
[FEMTOBOLT]
|
||||
algorithm_type = opencv
|
||||
color_resolution = 1080P
|
||||
depth_mode = NFOV_UNBINNED
|
||||
fps = 30
|
||||
depth_range_min = 1400
|
||||
depth_range_max = 1700
|
||||
depth_mode = NFOV_2X2BINNED
|
||||
camera_fps = 20
|
||||
depth_range_min = 1000
|
||||
depth_range_max = 1400
|
||||
fps = 15
|
||||
synchronized_images_only = False
|
||||
|
||||
[DEVICES]
|
||||
imu_device_type = real
|
||||
|
@ -184,9 +184,10 @@ class ConfigManager:
|
||||
Dict[str, Any]: FemtoBolt配置
|
||||
"""
|
||||
return {
|
||||
'algorithm_type': self.config.get('FEMTOBOLT', 'algorithm_type', fallback='opencv'),
|
||||
'color_resolution': self.config.get('FEMTOBOLT', 'color_resolution', fallback='1080P'),
|
||||
'depth_mode': self.config.get('FEMTOBOLT', 'depth_mode', fallback='NFOV_UNBINNED'),
|
||||
'fps': self.config.getint('FEMTOBOLT', 'fps', fallback=15),
|
||||
'camera_fps': self.config.getint('FEMTOBOLT', 'camera_fps', fallback=15),
|
||||
'depth_range_min': self.config.getint('FEMTOBOLT', 'depth_range_min', fallback=500),
|
||||
'depth_range_max': self.config.getint('FEMTOBOLT', 'depth_range_max', fallback=4500),
|
||||
'synchronized_images_only': self.config.getboolean('FEMTOBOLT', 'synchronized_images_only', fallback=False)
|
||||
@ -489,12 +490,14 @@ class ConfigManager:
|
||||
"""
|
||||
try:
|
||||
# 验证必需参数
|
||||
if 'algorithm_type' in config_data:
|
||||
self.set_config_value('FEMTOBOLT', 'algorithm_type', config_data['algorithm_type'])
|
||||
if 'color_resolution' in config_data:
|
||||
self.set_config_value('FEMTOBOLT', 'color_resolution', config_data['color_resolution'])
|
||||
if 'depth_mode' in config_data:
|
||||
self.set_config_value('FEMTOBOLT', 'depth_mode', config_data['depth_mode'])
|
||||
if 'fps' in config_data:
|
||||
self.set_config_value('FEMTOBOLT', 'fps', str(config_data['fps']))
|
||||
if 'camera_fps' in config_data:
|
||||
self.set_config_value('FEMTOBOLT', 'camera_fps', str(config_data['camera_fps']))
|
||||
if 'depth_range_min' in config_data:
|
||||
self.set_config_value('FEMTOBOLT', 'depth_range_min', str(config_data['depth_range_min']))
|
||||
if 'depth_range_max' in config_data:
|
||||
@ -530,6 +533,153 @@ class ConfigManager:
|
||||
'femtobolt': self.get_device_config('femtobolt')
|
||||
}
|
||||
|
||||
def _batch_update_device_configs(self, configs: Dict[str, Dict[str, Any]]) -> Dict[str, Any]:
|
||||
"""
|
||||
批量更新设备配置(内部方法)
|
||||
|
||||
Args:
|
||||
configs: 所有设备配置数据
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: 包含results和errors的字典
|
||||
"""
|
||||
results = {}
|
||||
errors = []
|
||||
|
||||
try:
|
||||
# IMU配置
|
||||
if 'imu' in configs:
|
||||
try:
|
||||
config_data = configs['imu']
|
||||
if 'device_type' in config_data:
|
||||
self.set_config_value('DEVICES', 'imu_device_type', config_data['device_type'])
|
||||
if 'use_mock' in config_data:
|
||||
self.set_config_value('DEVICES', 'imu_use_mock', str(config_data['use_mock']))
|
||||
if 'port' in config_data:
|
||||
self.set_config_value('DEVICES', 'imu_port', config_data['port'])
|
||||
if 'baudrate' in config_data:
|
||||
self.set_config_value('DEVICES', 'imu_baudrate', str(config_data['baudrate']))
|
||||
|
||||
results['imu'] = {
|
||||
'success': True,
|
||||
'message': 'IMU配置更新成功',
|
||||
'config': config_data
|
||||
}
|
||||
self.logger.info(f"IMU配置已更新: {config_data}")
|
||||
except Exception as e:
|
||||
error_msg = f'设置IMU配置失败: {str(e)}'
|
||||
results['imu'] = {'success': False, 'message': error_msg}
|
||||
errors.append(f"IMU: {error_msg}")
|
||||
self.logger.error(error_msg)
|
||||
|
||||
# 压力板配置
|
||||
if 'pressure' in configs:
|
||||
try:
|
||||
config_data = configs['pressure']
|
||||
if 'device_type' in config_data:
|
||||
self.set_config_value('DEVICES', 'pressure_device_type', config_data['device_type'])
|
||||
if 'use_mock' in config_data:
|
||||
self.set_config_value('DEVICES', 'pressure_use_mock', str(config_data['use_mock']))
|
||||
if 'port' in config_data:
|
||||
self.set_config_value('DEVICES', 'pressure_port', config_data['port'])
|
||||
if 'baudrate' in config_data:
|
||||
self.set_config_value('DEVICES', 'pressure_baudrate', str(config_data['baudrate']))
|
||||
|
||||
results['pressure'] = {
|
||||
'success': True,
|
||||
'message': '压力板配置更新成功',
|
||||
'config': config_data
|
||||
}
|
||||
self.logger.info(f"压力板配置已更新: {config_data}")
|
||||
except Exception as e:
|
||||
error_msg = f'设置压力板配置失败: {str(e)}'
|
||||
results['pressure'] = {'success': False, 'message': error_msg}
|
||||
errors.append(f"压力板: {error_msg}")
|
||||
self.logger.error(error_msg)
|
||||
|
||||
# 相机配置
|
||||
if 'camera' in configs:
|
||||
try:
|
||||
config_data = configs['camera']
|
||||
if 'device_index' in config_data:
|
||||
self.set_config_value('CAMERA', 'device_index', str(config_data['device_index']))
|
||||
if 'width' in config_data:
|
||||
self.set_config_value('CAMERA', 'width', str(config_data['width']))
|
||||
if 'height' in config_data:
|
||||
self.set_config_value('CAMERA', 'height', str(config_data['height']))
|
||||
if 'fps' in config_data:
|
||||
self.set_config_value('CAMERA', 'fps', str(config_data['fps']))
|
||||
if 'buffer_size' in config_data:
|
||||
self.set_config_value('CAMERA', 'buffer_size', str(config_data['buffer_size']))
|
||||
if 'fourcc' in config_data:
|
||||
self.set_config_value('CAMERA', 'fourcc', config_data['fourcc'])
|
||||
if 'tx_max_width' in config_data:
|
||||
self.set_config_value('CAMERA', 'tx_max_width', str(config_data['tx_max_width']))
|
||||
|
||||
results['camera'] = {
|
||||
'success': True,
|
||||
'message': '相机配置更新成功',
|
||||
'config': config_data
|
||||
}
|
||||
self.logger.info(f"相机配置已更新: {config_data}")
|
||||
except Exception as e:
|
||||
error_msg = f'设置相机配置失败: {str(e)}'
|
||||
results['camera'] = {'success': False, 'message': error_msg}
|
||||
errors.append(f"相机: {error_msg}")
|
||||
self.logger.error(error_msg)
|
||||
|
||||
# FemtoBolt配置
|
||||
if 'femtobolt' in configs:
|
||||
try:
|
||||
config_data = configs['femtobolt']
|
||||
if 'algorithm_type' in config_data:
|
||||
self.set_config_value('FEMTOBOLT', 'algorithm_type', config_data['algorithm_type'])
|
||||
if 'color_resolution' in config_data:
|
||||
self.set_config_value('FEMTOBOLT', 'color_resolution', config_data['color_resolution'])
|
||||
if 'depth_mode' in config_data:
|
||||
self.set_config_value('FEMTOBOLT', 'depth_mode', config_data['depth_mode'])
|
||||
if 'color_format' in config_data:
|
||||
self.set_config_value('FEMTOBOLT', 'color_format', config_data['color_format'])
|
||||
if 'camera_fps' in config_data:
|
||||
self.set_config_value('FEMTOBOLT', 'camera_fps', str(config_data['camera_fps']))
|
||||
if 'depth_range_min' in config_data:
|
||||
self.set_config_value('FEMTOBOLT', 'depth_range_min', str(config_data['depth_range_min']))
|
||||
if 'depth_range_max' in config_data:
|
||||
self.set_config_value('FEMTOBOLT', 'depth_range_max', str(config_data['depth_range_max']))
|
||||
if 'synchronized_images_only' in config_data:
|
||||
self.set_config_value('FEMTOBOLT', 'synchronized_images_only', str(config_data['synchronized_images_only']))
|
||||
if 'send_fps' in config_data:
|
||||
self.set_config_value('FEMTOBOLT', 'send_fps', str(config_data['send_fps']))
|
||||
|
||||
results['femtobolt'] = {
|
||||
'success': True,
|
||||
'message': 'FemtoBolt配置更新成功',
|
||||
'config': config_data
|
||||
}
|
||||
self.logger.info(f"FemtoBolt配置已更新: {config_data}")
|
||||
except Exception as e:
|
||||
error_msg = f'设置FemtoBolt配置失败: {str(e)}'
|
||||
results['femtobolt'] = {'success': False, 'message': error_msg}
|
||||
errors.append(f"FemtoBolt: {error_msg}")
|
||||
self.logger.error(error_msg)
|
||||
|
||||
# 一次性保存所有配置
|
||||
if results: # 只有在有配置更新时才保存
|
||||
self.save_config()
|
||||
self.logger.info("所有设备配置已批量保存")
|
||||
|
||||
return {
|
||||
'results': results,
|
||||
'errors': errors
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"批量更新设备配置失败: {e}")
|
||||
return {
|
||||
'results': results,
|
||||
'errors': [f"批量更新失败: {str(e)}"]
|
||||
}
|
||||
|
||||
def set_all_device_configs(self, configs: Dict[str, Dict[str, Any]]) -> Dict[str, Any]:
|
||||
"""
|
||||
批量设置所有设备配置
|
||||
@ -550,31 +700,12 @@ class ConfigManager:
|
||||
results = {}
|
||||
errors = []
|
||||
|
||||
# 逐个设置每个设备的配置
|
||||
if 'imu' in configs:
|
||||
result = self.set_imu_config(configs['imu'])
|
||||
results['imu'] = result
|
||||
if not result['success']:
|
||||
errors.append(f"IMU: {result['message']}")
|
||||
|
||||
if 'pressure' in configs:
|
||||
result = self.set_pressure_config(configs['pressure'])
|
||||
results['pressure'] = result
|
||||
if not result['success']:
|
||||
errors.append(f"压力板: {result['message']}")
|
||||
|
||||
if 'camera' in configs:
|
||||
result = self.set_camera_config(configs['camera'])
|
||||
results['camera'] = result
|
||||
if not result['success']:
|
||||
errors.append(f"相机: {result['message']}")
|
||||
|
||||
if 'femtobolt' in configs:
|
||||
result = self.set_femtobolt_config(configs['femtobolt'])
|
||||
results['femtobolt'] = result
|
||||
if not result['success']:
|
||||
errors.append(f"FemtoBolt: {result['message']}")
|
||||
|
||||
# 批量更新所有设备配置
|
||||
result = self._batch_update_device_configs(configs)
|
||||
results = result['results']
|
||||
errors = result['errors']
|
||||
# 参数保存后,重新加载一下参数
|
||||
self.reload_config()
|
||||
# 如果有错误,返回部分成功的结果
|
||||
if errors:
|
||||
self.logger.warning(f"部分设备配置设置失败: {'; '.join(errors)}")
|
||||
|
BIN
backend/dll/femtobolt/Log/OrbbecSDK.log.txt
Normal file
BIN
backend/dll/femtobolt/Log/OrbbecSDK.log.txt
Normal file
Binary file not shown.
BIN
backend/dll/femtobolt/OrbbecSDK.dll
Normal file
BIN
backend/dll/femtobolt/OrbbecSDK.dll
Normal file
Binary file not shown.
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
backend/dll/femtobolt/k4a.dll
Normal file
BIN
backend/dll/femtobolt/k4a.dll
Normal file
Binary file not shown.
BIN
backend/dll/femtobolt/k4a.lib
Normal file
BIN
backend/dll/femtobolt/k4a.lib
Normal file
Binary file not shown.
BIN
backend/dll/femtobolt/k4arecord.dll
Normal file
BIN
backend/dll/femtobolt/k4arecord.dll
Normal file
Binary file not shown.
BIN
backend/dll/femtobolt/k4arecorder.exe
Normal file
BIN
backend/dll/femtobolt/k4arecorder.exe
Normal file
Binary file not shown.
BIN
backend/dll/femtobolt/k4aviewer.exe
Normal file
BIN
backend/dll/femtobolt/k4aviewer.exe
Normal file
Binary file not shown.
Binary file not shown.
@ -271,12 +271,16 @@ class AppServer:
|
||||
# 检查是否在允许的目录内
|
||||
if not os.path.commonpath([data_dir, file_path]) == data_dir:
|
||||
return jsonify({'error': '访问被拒绝'}), 403
|
||||
self.logger.info(f'静态文件: {file_path}')
|
||||
|
||||
# 返回文件
|
||||
from flask import send_file
|
||||
# 为视频文件设置正确的MIME类型
|
||||
# 为视频文件设置正确的MIME类型和响应头
|
||||
if file_path.lower().endswith(('.mp4', '.webm', '.avi', '.mov')):
|
||||
return send_file(file_path, mimetype='video/mp4')
|
||||
response = send_file(file_path, mimetype='video/mp4')
|
||||
# 添加支持视频流播放的响应头
|
||||
response.headers['Accept-Ranges'] = 'bytes'
|
||||
response.headers['Content-Type'] = 'video/mp4'
|
||||
return response
|
||||
else:
|
||||
return send_file(file_path)
|
||||
|
||||
@ -868,6 +872,23 @@ class AppServer:
|
||||
self.stop_device_push_data()
|
||||
time.sleep(1) # 等待停止完成
|
||||
|
||||
# 为每个设备管理器重新加载配置
|
||||
self.logger.info("重新加载设备配置...")
|
||||
reload_results = []
|
||||
for device_name, manager in self.device_managers.items():
|
||||
if manager is not None and hasattr(manager, 'reload_config'):
|
||||
try:
|
||||
success = manager.reload_config()
|
||||
reload_results.append(f"{device_name}: {'成功' if success else '失败'}")
|
||||
self.logger.info(f"{device_name}设备配置重新加载{'成功' if success else '失败'}")
|
||||
except Exception as e:
|
||||
reload_results.append(f"{device_name}: 异常 - {str(e)}")
|
||||
self.logger.error(f"{device_name}设备配置重新加载异常: {e}")
|
||||
else:
|
||||
reload_results.append(f"{device_name}: 跳过(管理器未初始化或不支持reload_config)")
|
||||
|
||||
self.logger.info(f"配置重新加载结果: {'; '.join(reload_results)}")
|
||||
|
||||
# 重新启动设备数据推送
|
||||
self.start_device_push_data()
|
||||
self.logger.info("设备配置更新并重启数据推送完成")
|
||||
@ -875,7 +896,8 @@ class AppServer:
|
||||
# 通过SocketIO通知前端重启完成
|
||||
self.socketio.emit('device_restart_complete', {
|
||||
'status': 'success',
|
||||
'message': '设备重启完成'
|
||||
'message': '设备重启完成',
|
||||
'reload_results': reload_results
|
||||
}, namespace='/devices')
|
||||
|
||||
except Exception as restart_error:
|
||||
@ -960,12 +982,7 @@ class AppServer:
|
||||
camera_location = data.get('camera_location') # [0,0,640,480]
|
||||
femtobolt_location = data.get('femtobolt_location') # [0,0,640,480]
|
||||
|
||||
# 添加调试输出
|
||||
self.logger.info(f'接收到的参数 - patient_id: {patient_id}, creator_id: {creator_id}')
|
||||
self.logger.info(f'接收到的位置参数 - screen_location: {screen_location}, type: {type(screen_location)}')
|
||||
self.logger.info(f'接收到的位置参数 - camera_location: {camera_location}, type: {type(camera_location)}')
|
||||
self.logger.info(f'接收到的位置参数 - femtobolt_location: {femtobolt_location}, type: {type(femtobolt_location)}')
|
||||
|
||||
|
||||
if not patient_id or not creator_id:
|
||||
return jsonify({'success': False, 'error': '缺少患者ID或创建人ID'}), 400
|
||||
|
||||
@ -1179,6 +1196,7 @@ class AppServer:
|
||||
|
||||
# 获取请求数据
|
||||
data = flask_request.get_json() or {}
|
||||
# print(f"接收到的data数据: {data}")
|
||||
patient_id = data.get('patient_id')
|
||||
|
||||
# 如果没有提供patient_id,从会话信息中获取
|
||||
|
@ -11,6 +11,7 @@ scipy
|
||||
|
||||
# Computer vision and machine learning
|
||||
opencv-python
|
||||
pykinect-azure
|
||||
# mediapipe # Not compatible with Python 3.13 yet
|
||||
# torch # May have compatibility issues with Python 3.13
|
||||
# torchvision # May have compatibility issues with Python 3.13
|
||||
|
@ -17,7 +17,7 @@ python-dateutil==2.8.2
|
||||
PyInstaller>=6.10.0
|
||||
|
||||
# Optional - only if available
|
||||
# pykinect_azure # Comment out if not available
|
||||
pykinect_azure # Azure Kinect SDK for Python
|
||||
|
||||
# System utilities
|
||||
colorama==0.4.6
|
||||
|
@ -1,137 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
屏幕录制性能测试脚本
|
||||
用于测试屏幕录制的CPU优化功能
|
||||
"""
|
||||
|
||||
import time
|
||||
import logging
|
||||
import threading
|
||||
from devices.screen_recorder import RecordingManager
|
||||
|
||||
# 配置日志
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
|
||||
def test_screen_recording_performance():
|
||||
"""
|
||||
测试屏幕录制性能优化功能
|
||||
"""
|
||||
print("开始屏幕录制性能测试...")
|
||||
|
||||
# 创建录制管理器
|
||||
recording_manager = RecordingManager()
|
||||
|
||||
# 配置性能参数(更严格的阈值用于测试)
|
||||
recording_manager.configure_performance_settings(
|
||||
cpu_threshold=70.0, # 降低CPU阈值以便测试
|
||||
memory_threshold=80.0,
|
||||
adaptive_fps=True,
|
||||
min_fps=10,
|
||||
max_fps=30
|
||||
)
|
||||
|
||||
try:
|
||||
# 启动屏幕录制
|
||||
print("启动屏幕录制...")
|
||||
result = recording_manager.start_recording(
|
||||
session_id="test_session_001",
|
||||
patient_id="test_patient",
|
||||
screen_location=[0, 0, 1920, 1080], # 全屏录制
|
||||
camera_location=[0, 0, 640, 480], # 默认相机区域
|
||||
femtobolt_location=[0, 0, 640, 480], # 默认FemtoBolt区域
|
||||
recording_types=["screen"] # 只录制屏幕
|
||||
)
|
||||
|
||||
if not result['success']:
|
||||
print(f"录制启动失败: {result['message']}")
|
||||
return
|
||||
|
||||
print("录制已启动,开始性能监控...")
|
||||
|
||||
# 监控性能状态
|
||||
def monitor_performance():
|
||||
for i in range(30): # 监控30秒
|
||||
time.sleep(1)
|
||||
status = recording_manager.get_status()
|
||||
|
||||
if status['recording']:
|
||||
perf = status['performance']
|
||||
print(f"[{i+1:2d}s] CPU: {perf['cpu_percent']:.1f}% | "
|
||||
f"内存: {perf['memory_percent']:.1f}% | "
|
||||
f"当前帧率: {status['current_fps']:.1f}fps | "
|
||||
f"跳帧: {status['frame_skip_count']}")
|
||||
|
||||
# 如果CPU或内存超过阈值,显示警告
|
||||
if perf['cpu_percent'] > perf['cpu_threshold']:
|
||||
print(f" ⚠️ CPU使用率超过阈值 ({perf['cpu_threshold']}%)")
|
||||
if perf['memory_percent'] > perf['memory_threshold']:
|
||||
print(f" ⚠️ 内存使用率超过阈值 ({perf['memory_threshold']}%)")
|
||||
else:
|
||||
print("录制已停止")
|
||||
break
|
||||
|
||||
# 在单独线程中监控性能
|
||||
monitor_thread = threading.Thread(target=monitor_performance)
|
||||
monitor_thread.start()
|
||||
|
||||
# 等待监控完成
|
||||
monitor_thread.join()
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\n用户中断测试")
|
||||
except Exception as e:
|
||||
print(f"测试过程中发生错误: {e}")
|
||||
finally:
|
||||
# 停止录制
|
||||
print("停止录制...")
|
||||
stop_result = recording_manager.stop_recording()
|
||||
if stop_result['success']:
|
||||
print(f"录制已停止,视频文件: {stop_result.get('video_files', [])}")
|
||||
else:
|
||||
print(f"停止录制失败: {stop_result['message']}")
|
||||
|
||||
def test_performance_configuration():
|
||||
"""
|
||||
测试性能配置功能
|
||||
"""
|
||||
print("\n测试性能配置功能...")
|
||||
|
||||
recording_manager = RecordingManager()
|
||||
|
||||
# 测试各种配置
|
||||
print("测试CPU阈值配置:")
|
||||
recording_manager.configure_performance_settings(cpu_threshold=60)
|
||||
recording_manager.configure_performance_settings(cpu_threshold=40) # 应该被限制到50
|
||||
recording_manager.configure_performance_settings(cpu_threshold=100) # 应该被限制到95
|
||||
|
||||
print("\n测试帧率配置:")
|
||||
recording_manager.configure_performance_settings(min_fps=15, max_fps=25)
|
||||
recording_manager.configure_performance_settings(min_fps=35, max_fps=20) # min > max,应该调整
|
||||
|
||||
print("\n当前状态:")
|
||||
status = recording_manager.get_status()
|
||||
perf = status['performance']
|
||||
print(f"CPU阈值: {perf['cpu_threshold']}%")
|
||||
print(f"内存阈值: {perf['memory_threshold']}%")
|
||||
print(f"自适应帧率: {status['adaptive_fps_enabled']}")
|
||||
print(f"当前帧率: {status['current_fps']}fps")
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("屏幕录制性能测试")
|
||||
print("=" * 50)
|
||||
|
||||
# 测试配置功能
|
||||
test_performance_configuration()
|
||||
|
||||
# 询问是否进行实际录制测试
|
||||
response = input("\n是否进行实际录制测试?(y/n): ")
|
||||
if response.lower() in ['y', 'yes', '是']:
|
||||
test_screen_recording_performance()
|
||||
else:
|
||||
print("跳过录制测试")
|
||||
|
||||
print("\n测试完成")
|
File diff suppressed because it is too large
Load Diff
@ -29,15 +29,16 @@ class FemtoBoltDynamicViewer:
|
||||
"""加载并初始化 FemtoBolt SDK"""
|
||||
import pykinect_azure as pykinect
|
||||
base_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
dll_path = os.path.join(base_dir, "..", "dll", "femtobolt", "bin", "k4a.dll")
|
||||
dll_path = os.path.join(base_dir, "..", "dll", "femtobolt", "k4a.dll")
|
||||
self.pykinect = pykinect
|
||||
self.pykinect.initialize_libraries(track_body=False, module_k4a_path=dll_path)
|
||||
|
||||
def _configure_device(self):
|
||||
"""配置 FemtoBolt 深度相机"""
|
||||
self.config = self.pykinect.default_configuration
|
||||
self.config.depth_mode = self.pykinect.K4A_DEPTH_MODE_NFOV_UNBINNED
|
||||
self.config.camera_fps = self.pykinect.K4A_FRAMES_PER_SECOND_15
|
||||
self.config.depth_mode = self.pykinect.K4A_DEPTH_MODE_NFOV_2X2BINNED
|
||||
self.config.color_format = self.pykinect.K4A_IMAGE_FORMAT_COLOR_BGRA32
|
||||
self.config.color_resolution = self.pykinect.K4A_COLOR_RESOLUTION_720P
|
||||
self.config.synchronized_images_only = False
|
||||
self.config.color_resolution = 0
|
||||
self.device_handle = self.pykinect.start_device(config=self.config)
|
||||
@ -72,7 +73,7 @@ class FemtoBoltDynamicViewer:
|
||||
depth[depth < self.depth_min] = 0
|
||||
|
||||
# 裁剪感兴趣区域 (与display_x.py完全一致)
|
||||
depth = depth[50:200, 50:210]
|
||||
# depth = depth[50:200, 50:210]
|
||||
|
||||
# 背景图 (与display_x.py完全一致)
|
||||
background = np.ones_like(depth) * 0.5 # 设定灰色背景
|
||||
@ -88,7 +89,7 @@ class FemtoBoltDynamicViewer:
|
||||
|
||||
# 绘制等高线图并设置原点在左下角 (根据输入参数动态设置)
|
||||
# 通过设置 zorder 来控制它们的层级。例如,设置 zorder=2 或更大的值来确保它们位于栅格线之上。
|
||||
plt.contourf(depth, levels=100, cmap=self.mcmap, vmin=self.depth_min, vmax=self.depth_max, origin='upper', zorder=2)
|
||||
plt.contourf(depth, levels=50, cmap=self.mcmap, vmin=self.depth_min, vmax=self.depth_max, origin='upper', zorder=2)
|
||||
|
||||
# 更新显示 (与display_x.py完全一致)
|
||||
plt.pause(0.1) # 暂停0.1秒
|
||||
|
93
backend/tests/display_x copy.py
Normal file
93
backend/tests/display_x copy.py
Normal file
@ -0,0 +1,93 @@
|
||||
import cv2
|
||||
|
||||
import pykinect_azure as pykinect
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import pdb
|
||||
|
||||
import os
|
||||
from matplotlib.colors import LinearSegmentedColormap,ListedColormap
|
||||
from matplotlib.animation import FuncAnimation, FFMpegWriter
|
||||
|
||||
|
||||
# Initialize the library, if the library is not found, add the library path as argument
|
||||
pykinect.initialize_libraries()
|
||||
|
||||
# Modify camera configuration
|
||||
device_config = pykinect.default_configuration
|
||||
device_config.color_format = pykinect.K4A_IMAGE_FORMAT_COLOR_BGRA32
|
||||
device_config.color_resolution = pykinect.K4A_COLOR_RESOLUTION_720P
|
||||
device_config.depth_mode = pykinect.K4A_DEPTH_MODE_NFOV_2X2BINNED
|
||||
|
||||
# Start device
|
||||
device = pykinect.start_device(config=device_config)
|
||||
|
||||
# 创建一个自定义的 colormap
|
||||
colors = ['red', 'yellow', 'green', 'blue']
|
||||
|
||||
# 自定义颜色
|
||||
colors = ['fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
|
||||
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
|
||||
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
|
||||
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue']
|
||||
mcmap = LinearSegmentedColormap.from_list("custom_cmap", colors)
|
||||
|
||||
# 使用交互模式减少闪烁
|
||||
plt.ion()
|
||||
fig, ax = plt.subplots(figsize=(7, 7))
|
||||
cv2.namedWindow('Transformed color',cv2.WINDOW_NORMAL)
|
||||
|
||||
framei = 0
|
||||
while True:
|
||||
# Get capture
|
||||
capture = device.update()
|
||||
|
||||
# Get depth image
|
||||
depth_ret, depth = capture.get_depth_image()
|
||||
|
||||
# Get the color image in the depth camera axis
|
||||
ret_color, color_image = capture.get_color_image()
|
||||
|
||||
if not ret_color or not depth_ret:
|
||||
continue
|
||||
|
||||
h,w,_ = color_image.shape
|
||||
|
||||
depth[depth > 1100] = 0
|
||||
depth[depth < 500] = 0
|
||||
# depth = depth[50:200,50:210]
|
||||
|
||||
# 背景图
|
||||
background = np.ones_like(depth) * 0.5 # 设定灰色背景
|
||||
|
||||
# 使用 np.ma.masked_equal() 来屏蔽深度图中的零值。masked_array 中的值不会被绘制,从而避免了零值的显示。
|
||||
depth = np.ma.masked_equal(depth, 0)
|
||||
|
||||
# 清除轴内容而不是整个图形
|
||||
ax.clear()
|
||||
|
||||
# 绘制背景
|
||||
ax.imshow(background, origin='lower', cmap='gray', alpha=0.3)
|
||||
# 绘制白色栅格线,并将其置于底层
|
||||
ax.grid(True, which='both', axis='both', color='white', linestyle='-', linewidth=1, zorder=0)
|
||||
|
||||
# 绘制等高线图并设置原点在左下角
|
||||
# 通过设置 zorder 来控制它们的层级。例如,设置 zorder=2 或更大的值来确保它们位于栅格线之上。
|
||||
ax.contourf(depth, levels=100, cmap=mcmap,vmin=500, vmax=1100,origin='upper',zorder=2)
|
||||
|
||||
# 使用更高效的绘图更新方式
|
||||
plt.draw()
|
||||
plt.pause(0.001) # 极短暂停时间
|
||||
|
||||
# 显示彩色图像
|
||||
cv2.imshow('Transformed color', color_image)
|
||||
|
||||
# Press q key to stop
|
||||
if cv2.waitKey(1) == ord('q'):
|
||||
break
|
||||
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
|
||||
|
||||
|
@ -1,70 +0,0 @@
|
||||
import cv2
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import pdb
|
||||
|
||||
import os
|
||||
from matplotlib.colors import LinearSegmentedColormap,ListedColormap
|
||||
from matplotlib.animation import FuncAnimation, FFMpegWriter
|
||||
|
||||
# 指定文件夹路径
|
||||
folder_path = 'datas'
|
||||
|
||||
# 获取文件夹中的所有文件
|
||||
files = [f for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, f))]
|
||||
|
||||
# 根据文件的修改时间排序
|
||||
sorted_files = sorted(files, key=lambda x: os.path.getmtime(os.path.join(folder_path, x)))
|
||||
|
||||
|
||||
# 创建一个自定义的 colormap
|
||||
colors = ['red', 'yellow', 'green', 'blue']
|
||||
|
||||
# 自定义颜色
|
||||
colors = ['fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
|
||||
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
|
||||
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
|
||||
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue']
|
||||
mcmap = LinearSegmentedColormap.from_list("custom_cmap", colors)
|
||||
|
||||
plt.figure(figsize=(7, 7))
|
||||
# 打印排序后的文件名
|
||||
for file in sorted_files:
|
||||
data = np.load(os.path.join(folder_path,file))
|
||||
depth = data['arr1']
|
||||
points = data['arr2']
|
||||
color_image = data['arr3']
|
||||
|
||||
h,w,_ = color_image.shape
|
||||
points = points.reshape((h,w,3))
|
||||
|
||||
depth[depth > 1300] = 0
|
||||
depth[depth < 900] = 0
|
||||
depth = depth[50:200,50:210]
|
||||
|
||||
# 背景图
|
||||
background = np.ones_like(depth) * 0.5 # 设定灰色背景
|
||||
|
||||
# 使用 np.ma.masked_equal() 来屏蔽深度图中的零值。masked_array 中的值不会被绘制,从而避免了零值的显示。
|
||||
depth = np.ma.masked_equal(depth, 0)
|
||||
|
||||
# 绘制背景
|
||||
plt.imshow(background, origin='lower', cmap='gray', alpha=0.3)
|
||||
# 绘制白色栅格线,并将其置于底层
|
||||
plt.grid(True, which='both', axis='both', color='white', linestyle='-', linewidth=1, zorder=0)
|
||||
if False:
|
||||
plt.subplot(1,2,1)
|
||||
plt.imshow(depth, cmap='plasma', vmin=1000, vmax=1200)
|
||||
plt.subplot(1,2,2)
|
||||
# 绘制等高线图并设置原点在左下角
|
||||
# 通过设置 zorder 来控制它们的层级。例如,设置 zorder=2 或更大的值来确保它们位于栅格线之上。
|
||||
plt.contourf(depth, levels=200, cmap=mcmap,vmin=900, vmax=1300,origin='upper',zorder=2)
|
||||
plt.pause(0.1) # 暂停0.1秒
|
||||
plt.draw() # 重绘图像
|
||||
plt.clf() # 清除当前图像
|
||||
#plt.show()
|
||||
|
||||
|
||||
|
||||
|
@ -35,7 +35,7 @@ chart_dpi = 300
|
||||
export_format = csv
|
||||
|
||||
[SECURITY]
|
||||
secret_key = 739bbbe1b291cd966ef91d7752701958bf6d3e48c7b41e3872a7281d45403685
|
||||
secret_key = 332fe6a0e5b58a60e61eeee09cad362a7c47051202db7fa334256c2527371ecf
|
||||
session_timeout = 3600
|
||||
max_login_attempts = 5
|
||||
|
||||
|
@ -507,7 +507,12 @@
|
||||
</el-radio-group>
|
||||
</el-form-item>
|
||||
<div class="cameraFormTitle">深度相机</div>
|
||||
|
||||
<el-form-item label="算法类型">
|
||||
<el-radio-group v-model="cameraForm.femtobolt.algorithm_type">
|
||||
<el-radio value="opencv">OpenCV(效率高)</el-radio>
|
||||
<el-radio value="plt">Matplotlib(精度高)</el-radio>
|
||||
</el-radio-group>
|
||||
</el-form-item>
|
||||
<el-form-item label="距离范围">
|
||||
<div >
|
||||
<el-input v-model="cameraForm.femtobolt.depth_range_min" placeholder="请输入最小值" style="width: 216px;" />
|
||||
@ -516,6 +521,7 @@
|
||||
</div>
|
||||
|
||||
</el-form-item>
|
||||
|
||||
<div class="cameraFormTitle">头部IMU</div>
|
||||
<el-form-item label="IMU串口号">
|
||||
<el-select v-model="cameraForm.imu.port" placeholder="请选择">
|
||||
@ -664,6 +670,7 @@ const cameraForm = ref({ // 相机参数
|
||||
device_index: '', // 序号
|
||||
},
|
||||
femtobolt:{
|
||||
algorithm_type: '', // 算法类型
|
||||
depth_mode: '', // 相机模式
|
||||
depth_range_min: '', // 距离范围最小值
|
||||
depth_range_max: '', // 距离范围最大值
|
||||
@ -804,6 +811,14 @@ const startTimer = () => {
|
||||
// 转换为秒并四舍五入
|
||||
seconds.value = Math.round(elapsed / 1000);
|
||||
|
||||
// 检测时长超过10分钟(600秒)自动停止检测
|
||||
if (seconds.value >= 60) {
|
||||
console.log('⏰ 检测时长超过10分钟,自动停止检测');
|
||||
ElMessage.warning('检测时长已达到10分钟,自动停止检测');
|
||||
stopDetection();
|
||||
return;
|
||||
}
|
||||
|
||||
// 触发闪烁效果
|
||||
blinkState.value = !blinkState.value;
|
||||
}, 1000);
|
||||
@ -881,6 +896,7 @@ function cameraUpdate() { // 相机设置数据更新弹框
|
||||
device_index: '', // 序号
|
||||
},
|
||||
femtobolt:{
|
||||
algorithm_type: '', // 算法类型
|
||||
depth_mode: '', // 相机模式
|
||||
depth_range_min: '', // 距离范围最小值
|
||||
depth_range_max: '', // 距离范围最大值
|
||||
@ -1049,18 +1065,13 @@ function connectWebSocket() {
|
||||
tempInfo.value.camera_frame = data
|
||||
displayFrame(data.image)
|
||||
})
|
||||
// devicesSocket.on('video_frame', (data) => {
|
||||
// frameCount++
|
||||
// displayFrame(data.image)
|
||||
// })
|
||||
|
||||
|
||||
devicesSocket.on('femtobolt_frame', (data) => {
|
||||
tempInfo.value.femtobolt_frame = data
|
||||
displayDepthCameraFrame(data.depth_image || data.image)
|
||||
})
|
||||
// devicesSocket.on('depth_camera_frame', (data) => {
|
||||
// displayDepthCameraFrame(data.depth_image || data.image)
|
||||
// })
|
||||
|
||||
|
||||
devicesSocket.on('imu_data', (data) => {
|
||||
tempInfo.value.imu_data = data
|
||||
@ -1520,163 +1531,86 @@ async function handleDiagnosticInfo(status) {
|
||||
}
|
||||
}
|
||||
|
||||
// 检测数据采集功能
|
||||
async function handleDataCollection() {
|
||||
if (dataCollectionLoading.value) return
|
||||
try {
|
||||
dataCollectionLoading.value = true
|
||||
|
||||
// 显示进度提示
|
||||
ElMessage.info('正在采集检测数据...')
|
||||
|
||||
// 检查是否有活跃的会话ID
|
||||
if (!patientInfo.value.sessionId) {
|
||||
throw new Error('请先开始检测再进行数据采集')
|
||||
}
|
||||
|
||||
// 调用后端API采集检测数据
|
||||
const response = await fetch(`${BACKEND_URL}/api/detection/${patientInfo.value.sessionId}/collect`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
// patient_id: patientInfo.value.id,
|
||||
// timestamp: Date.now()
|
||||
head_pose: {},
|
||||
body_pose: {},
|
||||
foot_data: {}
|
||||
})
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`HTTP ${response.status}: ${response.statusText}`)
|
||||
}
|
||||
|
||||
const result = await response.json()
|
||||
|
||||
if (result.success) {
|
||||
// 显示成功消息
|
||||
ElMessage.success({
|
||||
message: `检测数据采集成功!数据ID: ${result.dataId}`,
|
||||
duration: 5000
|
||||
})
|
||||
|
||||
console.log('✅ 检测数据采集成功:', result)
|
||||
|
||||
// 更新历史数据表格
|
||||
if (result.data) {
|
||||
historyData.value.unshift({
|
||||
id: result.dataId,
|
||||
rotLeft: result.data.rotLeft || '-',
|
||||
rotRight: result.data.rotRight || '-',
|
||||
tiltLeft: result.data.tiltLeft || '-',
|
||||
tiltRight: result.data.tiltRight || '-',
|
||||
pitchDown: result.data.pitchDown || '-',
|
||||
pitchUp: result.data.pitchUp || '-'
|
||||
})
|
||||
|
||||
// 保持最多显示10条记录
|
||||
if (historyData.value.length > 10) {
|
||||
historyData.value = historyData.value.slice(0, 10)
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
throw new Error(result.message || '数据采集失败')
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
console.error('❌ 检测数据采集失败:', error)
|
||||
|
||||
// 根据错误类型显示不同的错误消息
|
||||
let errorMessage = '检测数据采集失败'
|
||||
if (error.message.includes('网络连接失败')) {
|
||||
errorMessage = '网络连接失败,请检查后端服务是否正常运行'
|
||||
} else if (error.message.includes('服务器错误')) {
|
||||
errorMessage = error.message
|
||||
} else {
|
||||
errorMessage = `检测数据采集失败: ${error.message}`
|
||||
}
|
||||
|
||||
ElMessage.error({
|
||||
message: errorMessage,
|
||||
duration: 5000
|
||||
})
|
||||
|
||||
} finally {
|
||||
dataCollectionLoading.value = false
|
||||
}
|
||||
}
|
||||
|
||||
// 保存检测数据
|
||||
|
||||
async function saveDetectionData() {
|
||||
console.log(tempInfo.value)
|
||||
return
|
||||
console.log(tempInfo.value)
|
||||
if (screenshotLoading.value) return
|
||||
|
||||
try {
|
||||
screenshotLoading.value = true
|
||||
// 显示保存进度
|
||||
ElMessage.info('正在保存截图...')
|
||||
ElMessage.info('正在保存检测截图数据...')
|
||||
|
||||
// 检查是否有活跃的会话ID
|
||||
if (!patientInfo.value.sessionId) {
|
||||
throw new Error('请先开始检测再进行截图')
|
||||
throw new Error('请先开始检测再进行数据保存')
|
||||
}
|
||||
const base64 = 'data:image/jpeg;base64,'
|
||||
|
||||
let body_image = ""
|
||||
if(tempInfo.value.femtobolt_frame != null
|
||||
&& tempInfo.value.femtobolt_frame.depth_image != null){
|
||||
body_image = base64 + tempInfo.value.femtobolt_frame.depth_image
|
||||
}
|
||||
|
||||
let pressure_image = ""
|
||||
let foot_data = ""
|
||||
if(tempInfo.value.pressure_data != null
|
||||
&& tempInfo.value.pressure_data.foot_pressure != null
|
||||
&& tempInfo.value.pressure_data.foot_pressure.pressure_image != null){
|
||||
pressure_image = base64 + tempInfo.value.pressure_data.foot_pressure.pressure_image
|
||||
pressure_image = tempInfo.value.pressure_data.foot_pressure.pressure_image
|
||||
foot_data = tempInfo.value.pressure_data.foot_pressure.pressure_zones
|
||||
}
|
||||
|
||||
let foot_image=""
|
||||
if(tempInfo.value.camera_frame != null
|
||||
&& tempInfo.value.camera_frame.image != null ){
|
||||
foot_image=base64 + tempInfo.value.camera_frame.image
|
||||
}
|
||||
|
||||
let head_pose={}
|
||||
if(tempInfo.value.imu_data != null ){
|
||||
head_pose=tempInfo.value.imu_data
|
||||
}
|
||||
let screen_location = contenGridRef.value.getBoundingClientRect()
|
||||
// 调用后端API保存截图
|
||||
const result = await sendDetectionData({
|
||||
// patientId: patientInfo.value.id,
|
||||
// patientName: patientInfo.value.name,
|
||||
// sessionId: patientInfo.value.sessionId,
|
||||
// head_pose: tempInfo.value.imu_data,
|
||||
// body_data: femtobolt_frame,
|
||||
// foot_image: pressure_data,
|
||||
const result = await sendDetectionData({
|
||||
|
||||
session_id: patientInfo.value.sessionId,
|
||||
patient_id: patientInfo.value.id,
|
||||
|
||||
sessionId: patientInfo.value.sessionId,
|
||||
patientId: patientInfo.value.id,
|
||||
head_pose:head_pose,
|
||||
body_pose:null,
|
||||
body_image: body_image,
|
||||
|
||||
body_pose:tempInfo.value.femtobolt_frame,
|
||||
body_image:base64 + tempInfo.value.femtobolt_frame,
|
||||
|
||||
foot_data:tempInfo.value.femtobolt_frame,
|
||||
foot_data_image:base64+tempInfo.value.femtobolt_frame.image,
|
||||
screen_image:null,
|
||||
camera_data: base64+ tempInfo.value.camera_frame.image,
|
||||
})
|
||||
|
||||
foot_data:foot_data,
|
||||
foot_image:foot_image,
|
||||
foot_data_image:pressure_image,
|
||||
screen_image:null
|
||||
|
||||
})
|
||||
|
||||
// 显示成功消息和文件路径
|
||||
ElMessage.success({
|
||||
message: `截图保存成功!`,
|
||||
message: `检测数据保存成功!`,
|
||||
duration: 5000
|
||||
})
|
||||
|
||||
console.log('✅ 截图保存成功:', result.filepath)
|
||||
|
||||
} catch (error) {
|
||||
console.error('❌ 截图失败:', error)
|
||||
console.error('❌ 检测数据保存失败:', error)
|
||||
|
||||
// 根据错误类型显示不同的错误消息
|
||||
let errorMessage = '截图失败'
|
||||
let errorMessage = '检测数据保存失败'
|
||||
if (error.message.includes('网络连接失败')) {
|
||||
errorMessage = '网络连接失败,请检查后端服务是否正常运行'
|
||||
} else if (error.message.includes('服务器错误')) {
|
||||
errorMessage = error.message
|
||||
} else if (error.message.includes('未找到截图区域')) {
|
||||
errorMessage = '截图区域不存在,请刷新页面重试'
|
||||
} else if (error.message.includes('未找到检测数据区域')) {
|
||||
errorMessage = '检测数据区域不存在,请刷新页面重试'
|
||||
} else if (error.message.includes('未找到检测数据')) {
|
||||
errorMessage = '检测数据不存在,请刷新页面重试'
|
||||
} else {
|
||||
errorMessage = `截图失败: ${error.message}`
|
||||
errorMessage = `检测数据保存失败: ${error.message}`
|
||||
}
|
||||
|
||||
ElMessage.error({
|
||||
@ -1727,13 +1661,6 @@ async function sendDetectionData(data) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
// 处理开始/停止按钮点击
|
||||
async function handleStartStop() {
|
||||
if (!isConnected.value) {
|
||||
@ -1763,6 +1690,7 @@ async function startDetection() {
|
||||
let screen_location = contenGridRef.value.getBoundingClientRect()
|
||||
let femtobolt_location = wholeBodyRef.value.getBoundingClientRect()
|
||||
let camera_location = videoImgRef.value.getBoundingClientRect()
|
||||
let titile_height = 24
|
||||
// 调用后端API开始检测
|
||||
const response = await fetch(`${BACKEND_URL}/api/detection/start`, {
|
||||
method: 'POST',
|
||||
@ -1772,10 +1700,10 @@ async function startDetection() {
|
||||
body: JSON.stringify({
|
||||
patient_id: patientInfo.value.id,
|
||||
// 可以添加其他检测参数
|
||||
creator_id: creatorId.value,
|
||||
screen_location:[Math.round(screen_location.x), Math.round(screen_location.y), Math.round(screen_location.width), Math.round(screen_location.height)],
|
||||
camera_location:[Math.round(camera_location.x), Math.round(camera_location.y), Math.round(camera_location.width), Math.round(camera_location.height)],
|
||||
femtobolt_location:[Math.round(femtobolt_location.x), Math.round(femtobolt_location.y), Math.round(femtobolt_location.width), Math.round(femtobolt_location.height)],
|
||||
creator_id: creatorId.value,
|
||||
screen_location:[Math.round(screen_location.x), Math.round(screen_location.y) + titile_height, Math.round(screen_location.width), Math.round(screen_location.height-titile_height)],
|
||||
camera_location:[Math.round(camera_location.x), Math.round(camera_location.y)+ titile_height, Math.round(camera_location.width), Math.round(camera_location.height-titile_height)],
|
||||
femtobolt_location:[Math.round(femtobolt_location.x), Math.round(femtobolt_location.y) + titile_height, Math.round(femtobolt_location.width), Math.round(femtobolt_location.height-titile_height)],
|
||||
|
||||
})
|
||||
})
|
||||
|
@ -6,7 +6,7 @@
|
||||
<div class="nav-container-title" @click="goBack">
|
||||
<img src="@/assets/svg/goback.svg" alt="">
|
||||
<div style="margin-left: 20px;">
|
||||
患者档案
|
||||
患者档案 - {{ patient?.name || '未知' }} (ID: {{ patient?.id || '未知' }})
|
||||
</div>
|
||||
</div>
|
||||
<div class="nav-container-info">
|
||||
@ -30,7 +30,7 @@
|
||||
|
||||
</div>
|
||||
<div class="content-center">
|
||||
<video ref="videoPlayerRef" :src=" BACKEND_URL+'/' + item.screen_video_path.replace(/\\/g, '/')" controls width="100%" height="100%">
|
||||
<video ref="videoPlayerRef" :src="item.screen_video_path ? BACKEND_URL+'/' + item.screen_video_path.replace(/\\/g, '/') : ''" controls width="100%" height="100%">
|
||||
您的浏览器不支持视频播放
|
||||
</video>
|
||||
<img src="@/assets/big.png" alt="" class="bigImgBox" @click="bigImgClick(item)">
|
||||
@ -70,8 +70,8 @@
|
||||
<div class="content-right-bottom-content">
|
||||
<div v-for="(item2, index2) in item.latest_detection_data" :key="index2" class="content-right-bottom-content-box">
|
||||
<div class="content-right-bottom-img">
|
||||
<img :src="BACKEND_URL+'/' + item2.screen_image.replace(/\\/g, '/')" style="width:100% ;height: 100%;cursor: pointer;" alt=""
|
||||
@click="showImage(BACKEND_URL+'/' + item2.screen_image.replace(/\\/g, '/'))">
|
||||
<img :src="item2.screen_image ? BACKEND_URL+'/' + item2.screen_image.replace(/\\/g, '/') : ''" style="width:100% ;height: 100%;cursor: pointer;" alt=""
|
||||
@click="item2.screen_image ? showImage(BACKEND_URL+'/' + item2.screen_image.replace(/\\/g, '/')) : null">
|
||||
|
||||
</div>
|
||||
<div style="margin-top: 15px;">
|
||||
|
@ -1,21 +0,0 @@
|
||||
# 平衡体态检测系统安装包
|
||||
|
||||
## 目录结构
|
||||
- `backend/` - 后端程序文件
|
||||
- `frontend/` - 前端Electron应用
|
||||
- `启动系统.bat` - 系统启动脚本
|
||||
|
||||
## 使用方法
|
||||
1. 双击 `启动系统.bat` 启动系统
|
||||
2. 系统会自动启动后端服务和前端界面
|
||||
3. 如果需要单独启动,可以直接运行前端exe文件
|
||||
|
||||
## 系统要求
|
||||
- Windows 10 或更高版本
|
||||
- 至少4GB内存
|
||||
- 支持USB设备连接
|
||||
|
||||
## 注意事项
|
||||
- 首次启动可能需要较长时间
|
||||
- 确保防火墙允许程序访问网络
|
||||
- 如遇问题请查看日志文件
|
Loading…
Reference in New Issue
Block a user