解决冲突

This commit is contained in:
limengnan 2025-09-01 18:16:56 +08:00
commit 62cf394af2
41 changed files with 34482 additions and 6494 deletions

View File

@ -5,16 +5,19 @@ block_cipher = None
a = Analysis(
['main.py'],
pathex=[],
pathex=['D:/Trae_space/pyKinectAzure'],
binaries=[
('dll/femtobolt/bin/k4a.dll', 'dll/femtobolt/bin'), # K4A动态库
('dll/femtobolt/bin/k4arecord.dll', 'dll/femtobolt/bin'), # K4A录制库
('dll/femtobolt/bin/depthengine_2_0.dll', 'dll/femtobolt/bin'), # 深度引擎
('dll/femtobolt/bin/OrbbecSDK.dll', 'dll/femtobolt/bin'), # Orbbec SDK
('dll/femtobolt/bin/ob_usb.dll', 'dll/femtobolt/bin'), # Orbbec USB库
('dll/femtobolt/bin/live555.dll', 'dll/femtobolt/bin'), # Live555库
('dll/femtobolt/bin/OrbbecSDKConfig_v1.0.xml', 'dll/femtobolt/bin'), # Orbbec配置文件 ('dll/smitsense/SMiTSenseUsb-F3.0.dll', 'dll/smitsense'), # SMiTSense传感器库
('dll/smitsense/SMiTSenseUsb-F3.0.dll', 'dll/smitsense'), # SMiTSenseUsb库
# FemtoBolt相关库文件
('dll/femtobolt/k4a.dll', 'dll/femtobolt'), # K4A动态库
('dll/femtobolt/k4arecord.dll', 'dll/femtobolt'), # K4A录制库
('dll/femtobolt/depthengine_2_0.dll', 'dll/femtobolt'), # 深度引擎
('dll/femtobolt/OrbbecSDK.dll', 'dll/femtobolt'), # Orbbec SDK
('dll/femtobolt/k4a.lib', 'dll/femtobolt'), # K4A静态库
('dll/femtobolt/k4arecord.lib', 'dll/femtobolt'), # K4A录制静态库
('dll/femtobolt/k4arecorder.exe', 'dll/femtobolt'), # K4A录制工具
('dll/femtobolt/k4aviewer.exe', 'dll/femtobolt'), # K4A查看器
# SMiTSense相关库文件
('dll/smitsense/SMiTSenseUsb-F3.0.dll', 'dll/smitsense'), # SMiTSense传感器库
('dll/smitsense/Wrapper.dll', 'dll/smitsense'), # SMiTSense传感器库包装类
],
hiddenimports=[
@ -38,6 +41,13 @@ a = Analysis(
'base64',
'psutil',
'pykinect_azure',
'pykinect_azure.k4a',
'pykinect_azure.k4abt',
'pykinect_azure.k4arecord',
'pykinect_azure.pykinect',
'pykinect_azure.utils',
'pykinect_azure._k4a',
'pykinect_azure._k4abt',
'pyserial',
'requests',
'yaml',

View File

@ -33,16 +33,19 @@ block_cipher = None
a = Analysis(
['main.py'],
pathex=[],
pathex=['D:/Trae_space/pyKinectAzure'],
binaries=[
('dll/femtobolt/bin/k4a.dll', 'dll/femtobolt/bin'), # K4A动态库
('dll/femtobolt/bin/k4arecord.dll', 'dll/femtobolt/bin'), # K4A录制库
('dll/femtobolt/bin/depthengine_2_0.dll', 'dll/femtobolt/bin'), # 深度引擎
('dll/femtobolt/bin/OrbbecSDK.dll', 'dll/femtobolt/bin'), # Orbbec SDK
('dll/femtobolt/bin/ob_usb.dll', 'dll/femtobolt/bin'), # Orbbec USB库
('dll/femtobolt/bin/live555.dll', 'dll/femtobolt/bin'), # Live555库
('dll/femtobolt/bin/OrbbecSDKConfig_v1.0.xml', 'dll/femtobolt/bin'), # Orbbec配置文件 ('dll/smitsense/SMiTSenseUsb-F3.0.dll', 'dll/smitsense'), # SMiTSense传感器库
('dll/smitsense/SMiTSenseUsb-F3.0.dll', 'dll/smitsense'), # SMiTSenseUsb库
# FemtoBolt相关库文件
('dll/femtobolt/k4a.dll', 'dll/femtobolt'), # K4A动态库
('dll/femtobolt/k4arecord.dll', 'dll/femtobolt'), # K4A录制库
('dll/femtobolt/depthengine_2_0.dll', 'dll/femtobolt'), # 深度引擎
('dll/femtobolt/OrbbecSDK.dll', 'dll/femtobolt'), # Orbbec SDK
('dll/femtobolt/k4a.lib', 'dll/femtobolt'), # K4A静态库
('dll/femtobolt/k4arecord.lib', 'dll/femtobolt'), # K4A录制静态库
('dll/femtobolt/k4arecorder.exe', 'dll/femtobolt'), # K4A录制工具
('dll/femtobolt/k4aviewer.exe', 'dll/femtobolt'), # K4A查看器
# SMiTSense相关库文件
('dll/smitsense/SMiTSenseUsb-F3.0.dll', 'dll/smitsense'), # SMiTSense传感器库
('dll/smitsense/Wrapper.dll', 'dll/smitsense'), # SMiTSense传感器库包装类
],
hiddenimports=[
@ -66,6 +69,13 @@ a = Analysis(
'base64',
'psutil',
'pykinect_azure',
'pykinect_azure.k4a',
'pykinect_azure.k4abt',
'pykinect_azure.k4arecord',
'pykinect_azure.pykinect',
'pykinect_azure.utils',
'pykinect_azure._k4a',
'pykinect_azure._k4abt',
'pyserial',
'requests',
'yaml',
@ -266,6 +276,30 @@ def copy_config_files():
else:
print(f"⚠️ 配置文件不存在: {config_file}")
def install_build_dependencies():
"""安装打包依赖"""
print("检查并安装打包依赖...")
try:
# 安装打包依赖
cmd = [sys.executable, '-m', 'pip', 'install', '-r', 'requirements_build.txt']
print(f"执行命令: {' '.join(cmd)}")
result = subprocess.run(cmd, capture_output=True, text=True, encoding='utf-8', errors='ignore')
if result.returncode == 0:
print("✓ 依赖安装成功!")
return True
else:
print(f"⚠️ 依赖安装警告: {result.stderr}")
print("继续打包过程...")
return True
except Exception as e:
print(f"⚠️ 依赖安装失败: {e}")
print("继续打包过程...")
return True
def main():
"""主函数"""
print("=" * 60)
@ -280,6 +314,10 @@ def main():
input("按回车键退出...")
return
# 安装打包依赖
install_build_dependencies()
print()
try:
# 清理构建目录
clean_build_dirs()

View File

@ -21,9 +21,10 @@ height = 720
fps = 30
[FEMTOBOLT]
algorithm_type = plt
color_resolution = 1080P
depth_mode = NFOV_UNBINNED
fps = 30
depth_mode = NFOV_2X2BINNED
camera_fps = 15
depth_range_min = 1200
depth_range_max = 1500

View File

@ -276,7 +276,7 @@ class DatabaseManager:
diagnosis_info TEXT, -- 诊断信息
treatment_info TEXT, -- 处理信息
suggestion_info TEXT, -- 建议信息
status TEXT DEFAULT 'created', -- 会话状态created/running/diagnosed/completed)
status TEXT DEFAULT 'created', -- 会话状态created/running/checked/diagnosed/completed)
created_at TIMESTAMP, -- 记录创建时间
FOREIGN KEY (patient_id) REFERENCES patients (id), -- 患者表外键约束
FOREIGN KEY (creator_id) REFERENCES users (id) -- 用户表外键约束
@ -668,14 +668,29 @@ class DatabaseManager:
cursor = conn.cursor()
try:
if status in ['completed', 'stopped', 'error']:
# 首先获取会话对应的患者ID
cursor.execute('SELECT patient_id FROM detection_sessions WHERE id = ?', (session_id,))
result = cursor.fetchone()
if not result:
logger.error(f'会话不存在: {session_id}')
return False
patient_id = result[0]
china_time = self.get_china_time()
if status in ['checked', 'stopped','complated', 'error']:
# 使用中国时区时间
china_time = self.get_china_time()
cursor.execute('''
UPDATE detection_sessions SET
status = ?, end_time = ?
WHERE id = ?
''', (status, china_time, session_id))
# 同步更新患者表的updated_at时间
cursor.execute('''
UPDATE patients SET updated_at = ?
WHERE id = ?
''', (china_time, patient_id))
else:
cursor.execute('''
UPDATE detection_sessions SET
@ -683,8 +698,10 @@ class DatabaseManager:
WHERE id = ?
''', (status, session_id))
conn.commit()
logger.info(f'更新会话状态: {session_id} -> {status}')
logger.info(f'更新会话状态: {session_id} -> {status},同时更新患者 {patient_id} 的updated_at时间')
return True
except Exception as e:

View File

@ -128,6 +128,16 @@ class BaseDevice(ABC):
"""
pass
@abstractmethod
def reload_config(self) -> bool:
"""
重新加载设备配置
Returns:
bool: 重新加载是否成功
"""
pass
def set_socketio(self, socketio):
"""
设置SocketIO实例

View File

@ -101,7 +101,10 @@ class CameraManager(BaseDevice):
bool: 初始化是否成功
"""
try:
self.logger.info(f"正在初始化相机设备 {self.device_index}...")
self.logger.info(f"正在初始化相机设备...")
# 使用构造函数中已加载的配置,避免并发读取配置文件
self.logger.info(f"使用已加载配置: device_index={self.device_index}, resolution={self.width}x{self.height}, fps={self.fps}")
# 尝试多个后端Windows下优先MSMF/DShow
backends = [cv2.CAP_MSMF, cv2.CAP_DSHOW, cv2.CAP_ANY]
@ -563,6 +566,52 @@ class CameraManager(BaseDevice):
except Exception as e:
self.logger.error(f"断开相机连接失败: {e}")
def reload_config(self) -> bool:
"""
重新加载设备配置
Returns:
bool: 重新加载是否成功
"""
try:
self.logger.info("正在重新加载相机配置...")
# 获取最新配置
config = self.config_manager.get_device_config('camera')
# 更新配置属性
self.device_index = config.get('device_index', 0)
self.width = config.get('width', 1280)
self.height = config.get('height', 720)
self.fps = config.get('fps', 30)
self.buffer_size = config.get('buffer_size', 1)
self.fourcc = config.get('fourcc', 'MJPG')
self._tx_max_width = int(config.get('tx_max_width', 640))
# 更新帧缓存队列大小
frame_cache_len = int(config.get('frame_cache_len', 2))
if frame_cache_len != self.frame_cache.maxsize:
# 清空旧队列
while not self.frame_cache.empty():
try:
self.frame_cache.get_nowait()
except queue.Empty:
break
# 创建新队列
self.frame_cache = queue.Queue(maxsize=frame_cache_len)
# 更新设备信息
self.device_id = f"camera_{self.device_index}"
self.logger.info(f"相机配置重新加载成功 - 设备索引: {self.device_index}, 分辨率: {self.width}x{self.height}, FPS: {self.fps}")
return True
except Exception as e:
self.logger.error(f"重新加载相机配置失败: {e}")
return False
def cleanup(self):
"""
清理资源

View File

@ -17,6 +17,11 @@ import logging
from collections import deque
import gc
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.pyplot as plt
import matplotlib
from scipy import ndimage
from scipy.interpolate import griddata
import io
try:
from .base_device import BaseDevice
@ -69,9 +74,11 @@ class FemtoBoltManager(BaseDevice):
self.sdk_initialized = False
# 设备配置
self.algorithm_type = self.config.get('algorithm_type', 'opencv')
self.color_resolution = self.config.get('color_resolution', '1080P')
self.depth_mode = self.config.get('depth_mode', 'NFOV_UNBINNED')
self.fps = self.config.get('fps', 15)
self.depth_mode = self.config.get('depth_mode', 'NFOV_2X2BINNED')
self.color_format = self.config.get('color_format', 'COLOR_BGRA32')
self.fps = self.config.get('camera_fps', 20)
self.depth_range_min = self.config.get('depth_range_min', 500)
self.depth_range_max = self.config.get('depth_range_max', 4500)
self.synchronized_images_only = self.config.get('synchronized_images_only', False)
@ -105,7 +112,7 @@ class FemtoBoltManager(BaseDevice):
self._last_send_time = 0
# 编码参数缓存(避免每帧创建数组)
self._encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), int(self.config.get('jpeg_quality', 80))]
self._encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), int(self.config.get('jpeg_quality', 60))]
# 预计算伽马LUT避免每帧计算
self._gamma_lut = None
@ -118,16 +125,223 @@ class FemtoBoltManager(BaseDevice):
# 自定义彩虹色 colormap参考testfemtobolt.py
colors = ['fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue']
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue']
self.custom_cmap = LinearSegmentedColormap.from_list("custom_cmap", colors)
self.logger.info("FemtoBolt管理器初始化完成")
# 设置matplotlib为非交互模式
matplotlib.use('Agg')
# 创建matplotlib图形对象复用以提高性能
self.fig, self.ax = plt.subplots(figsize=(7, 7))
self.ax.set_aspect('equal')
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
self.logger.info(f"FemtoBolt设备配置完成 - 算法类型: {self.algorithm_type}, 深度模式: {self.depth_mode}, FPS: {self.fps}")
def _update_gamma_lut(self):
"""更新伽马校正查找表"""
if self._current_gamma != self.gamma_value:
self._gamma_lut = np.array([((i / 255.0) ** self.gamma_value) * 255 for i in range(256)]).astype("uint8")
self._gamma_lut = np.array([((i / 255.0) ** (1.0 / self.gamma_value)) * 255 for i in range(256)], dtype=np.uint8)
self._current_gamma = self.gamma_value
def _generate_contour_image_opencv(self, depth):
"""优化的等高线图像生成(增强梯度变化清晰度)"""
try:
# 深度数据过滤(与原始函数完全一致)
depth_filtered = depth.copy()
depth_filtered[depth_filtered > 1100] = 0
depth_filtered[depth_filtered < 500] = 0
# 创建输出图像
height, width = depth_filtered.shape
# 背景图与原始函数一致灰色背景alpha=0.3效果)
background_gray = int(0.5 * 255 * 0.3 + 255 * (1 - 0.3)) # 模拟灰色背景alpha混合
output = np.ones((height, width, 3), dtype=np.uint8) * background_gray
# 绘制白色网格线与原始函数grid效果一致
grid_spacing = max(height // 20, width // 20, 10) # 自适应网格间距
for x in range(0, width, grid_spacing):
cv2.line(output, (x, 0), (x, height-1), (255, 255, 255), 1)
for y in range(0, height, grid_spacing):
cv2.line(output, (0, y), (width-1, y), (255, 255, 255), 1)
# 使用masked数据与原始函数np.ma.masked_equal逻辑一致
valid_mask = depth_filtered > 0
if np.any(valid_mask):
# 将深度值映射到500-1100范围与原始函数vmin=500, vmax=1100一致
depth_for_contour = depth_filtered.copy().astype(np.float32)
depth_for_contour[~valid_mask] = np.nan # 无效区域设为NaN
# 增加等高线层级数量以获得更细腻的梯度变化从100增加到200
levels = np.linspace(500, 1100, 201) # 200个等高线层级提升梯度细腻度
# 创建等高线边界增强图像
contour_edges = np.zeros((height, width), dtype=np.uint8)
# 为每个像素分配等高线层级
for i in range(len(levels) - 1):
level_min = levels[i]
level_max = levels[i + 1]
# 创建当前层级的掩码
level_mask = (depth_filtered >= level_min) & (depth_filtered < level_max)
if np.any(level_mask):
# 增强颜色映射算法 - 使用非线性映射增强对比度
color_val = (level_min - 500) / (1100 - 500)
color_val = np.clip(color_val, 0, 1)
# 应用Gamma校正增强对比度gamma=0.8增强中间色调)
color_val_enhanced = np.power(color_val, 0.8)
# 应用自定义colormap
color = self.custom_cmap(color_val_enhanced)[:3]
color_bgr = (np.array(color) * 255).astype(np.uint8)
# 赋值颜色BGR格式
output[level_mask, 0] = color_bgr[2] # B
output[level_mask, 1] = color_bgr[1] # G
output[level_mask, 2] = color_bgr[0] # R
# 检测等高线边界每10个层级检测一次主要等高线
if i % 10 == 0:
# 使用形态学操作检测边界
kernel = np.ones((3, 3), np.uint8)
dilated = cv2.dilate(level_mask.astype(np.uint8), kernel, iterations=1)
eroded = cv2.erode(level_mask.astype(np.uint8), kernel, iterations=1)
edge = dilated - eroded
contour_edges = cv2.bitwise_or(contour_edges, edge)
# 增强等高线边界
if np.any(contour_edges):
# 对等高线边界进行轻微扩展
kernel = np.ones((2, 2), np.uint8)
contour_edges = cv2.dilate(contour_edges, kernel, iterations=1)
# 在等高线边界处增强对比度
edge_mask = contour_edges > 0
if np.any(edge_mask):
# 增强边界处的颜色对比度
for c in range(3):
channel = output[:, :, c].astype(np.float32)
# 对边界像素应用对比度增强
channel[edge_mask] = np.clip(channel[edge_mask] * 1.2, 0, 255)
output[:, :, c] = channel.astype(np.uint8)
# 减少过度平滑处理以保持清晰度
# 仅应用轻微的降噪处理,保持梯度边界清晰
output = cv2.bilateralFilter(output, 3, 20, 20) # 减少滤波强度
# 裁剪宽度(与原始函数保持一致)
target_width = height // 2
if width > target_width:
left = (width - target_width) // 2
right = left + target_width
output = output[:, left:right]
# 最终锐化处理增强细节
# 使用USM锐化增强等高线细节
gaussian = cv2.GaussianBlur(output, (0, 0), 1.0)
output = cv2.addWeighted(output, 1.5, gaussian, -0.5, 0)
output = np.clip(output, 0, 255).astype(np.uint8)
return output
except Exception as e:
self.logger.error(f"优化等高线生成失败: {e}")
return None
def _create_grid_background(self, height, width):
"""创建网格背景缓存"""
bg = np.ones((height, width, 3), dtype=np.uint8) * 128
# 绘制白色网格线
grid_spacing = 50
for x in range(0, width, grid_spacing):
cv2.line(bg, (x, 0), (x, height-1), (255, 255, 255), 1)
for y in range(0, height, grid_spacing):
cv2.line(bg, (0, y), (width-1, y), (255, 255, 255), 1)
self._grid_bg = bg
self._grid_size = (height, width)
def _generate_contour_image_plt(self, depth):
"""使用matplotlib生成等高线图像完全采用display_x.py的逻辑"""
try:
# 清除之前的绘图
self.ax.clear()
# 深度数据过滤与display_x.py完全一致
depth[depth > 1100] = 0
depth[depth < 500] = 0
# 背景图与display_x.py完全一致
background = np.ones_like(depth) * 0.5 # 设定灰色背景
# 使用 np.ma.masked_equal() 来屏蔽深度图中的零值与display_x.py完全一致
depth = np.ma.masked_equal(depth, 0)
# 绘制背景与display_x.py完全一致
self.ax.imshow(background, origin='lower', cmap='gray', alpha=0.3)
# 绘制白色栅格线,并将其置于底层(网格密度加大一倍)
self.ax.grid(True, which='both', axis='both', color='white', linestyle='-', linewidth=0.5, zorder=0)
self.ax.minorticks_on()
self.ax.grid(True, which='minor', axis='both', color='white', linestyle='-', linewidth=0.3, zorder=0)
# 隐藏坐标轴
# self.ax.set_xticks([])
# self.ax.set_yticks([])
# 绘制等高线图并设置原点在上方与display_x.py完全一致
import time
start_time = time.perf_counter()
self.ax.contourf(depth, levels=100, cmap=self.custom_cmap, vmin=500, vmax=1100, origin='upper', zorder=2)
contourf_time = time.perf_counter() - start_time
# self.logger.info(f"contourf绘制耗时: {contourf_time*1000:.2f}ms")
# 将matplotlib图形转换为numpy数组
buf = io.BytesIO()
savefig_start = time.perf_counter()
savefig_start = time.perf_counter()
self.fig.savefig(buf, format='png',bbox_inches='tight', pad_inches=0, dpi=75)
savefig_time = time.perf_counter() - savefig_start
# self.logger.info(f"savefig保存耗时: {savefig_time*1000:.2f}ms")
buf_start = time.perf_counter()
buf.seek(0)
# 读取PNG数据并转换为OpenCV格式
img_array = np.frombuffer(buf.getvalue(), dtype=np.uint8)
buf.close()
buf_time = time.perf_counter() - buf_start
# self.logger.info(f"缓冲区操作耗时: {buf_time*1000:.2f}ms")
# 解码PNG图像
decode_start = time.perf_counter()
img = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
decode_time = time.perf_counter() - decode_start
# self.logger.info(f"PNG解码耗时: {decode_time*1000:.2f}ms")
# return img
if img is not None:
# 裁剪宽度(与原逻辑保持一致)
height, width = img.shape[:2]
target_width = round(height // 2)
if width > target_width:
left = (width - target_width) // 2
right = left + target_width
img = img[:, left:right]
return img
else:
self.logger.error("无法解码matplotlib生成的PNG图像")
return None
except Exception as e:
self.logger.error(f"生成等高线图像失败: {e}")
return None
def initialize(self) -> bool:
"""
@ -139,6 +353,9 @@ class FemtoBoltManager(BaseDevice):
try:
self.logger.info("正在初始化FemtoBolt设备...")
# 使用构造函数中已加载的配置,避免并发读取配置文件
self.logger.info(f"使用已加载配置: algorithm_type={self.algorithm_type}, fps={self.fps}, depth_mode={self.depth_mode}")
# 初始化SDK
if not self._initialize_sdk():
raise Exception("SDK初始化失败")
@ -155,7 +372,7 @@ class FemtoBoltManager(BaseDevice):
self.device_info.update({
'color_resolution': self.color_resolution,
'depth_mode': self.depth_mode,
'fps': self.fps,
'camera_fps': self.fps,
'depth_range': f"{self.depth_range_min}-{self.depth_range_max}mm"
})
@ -183,10 +400,9 @@ class FemtoBoltManager(BaseDevice):
real_pykinect = pykinect
self.logger.info("成功导入pykinect_azure库")
except ImportError as e:
self.logger.warning(f"无法导入pykinect_azure库使用模拟模式: {e}")
self.pykinect = self._create_mock_pykinect()
self.sdk_initialized = True
return True
self.logger.error(f"无法导入pykinect_azure库: {e}")
self.sdk_initialized = False
return False
# 查找并初始化SDK路径
sdk_initialized = False
@ -205,8 +421,9 @@ class FemtoBoltManager(BaseDevice):
continue
if not sdk_initialized:
self.logger.info('未找到真实SDK使用模拟模式')
self.pykinect = self._create_mock_pykinect()
self.logger.error('未找到真实SDK初始化失败')
self.sdk_initialized = False
return False
self.sdk_initialized = True
return True
@ -221,84 +438,12 @@ class FemtoBoltManager(BaseDevice):
if platform.system() == "Windows":
# 优先使用Orbbec SDK K4A Wrapper与azure_kinect_image_example.py一致
base_dir = os.path.dirname(os.path.abspath(__file__))
dll_path = os.path.join(base_dir,"..", "dll","femtobolt","bin", "k4a.dll")
dll_path = os.path.join(base_dir,"..", "dll","femtobolt", "k4a.dll")
self.logger.info(f"FemtoBolt SDK路径: {dll_path}")
sdk_paths.append(dll_path)
return sdk_paths
def _create_mock_pykinect(self):
"""
创建模拟pykinect_azure用于测试
Returns:
Mock pykinect对象
"""
class MockPyKinect:
def __init__(self):
self.default_configuration = self._create_mock_config()
def initialize_libraries(self, track_body=False, module_k4a_path=None):
pass
def start_device(self, config=None):
return MockDevice()
def _create_mock_config(self):
class MockConfig:
def __init__(self):
self.depth_mode = 'NFOV_UNBINNED'
self.camera_fps = 15
self.synchronized_images_only = False
self.color_resolution = 0
return MockConfig()
# 添加常量
K4A_DEPTH_MODE_NFOV_UNBINNED = 'NFOV_UNBINNED'
K4A_FRAMES_PER_SECOND_15 = 15
class MockDevice:
def __init__(self):
self.is_started = True
def update(self):
return MockCapture()
def stop(self):
self.is_started = False
def close(self):
pass
class MockCapture:
def __init__(self):
pass
def get_depth_image(self):
# 生成模拟深度图像
height, width = 480, 640
depth_image = np.full((height, width), 2000, dtype=np.uint16)
# 添加人体轮廓
center_x = width // 2
center_y = height // 2
# 头部
cv2.circle(depth_image, (center_x, center_y - 100), 40, 1500, -1)
# 身体
cv2.rectangle(depth_image, (center_x - 50, center_y - 60),
(center_x + 50, center_y + 100), 1600, -1)
# 手臂
cv2.rectangle(depth_image, (center_x - 80, center_y - 40),
(center_x - 50, center_y + 20), 1700, -1)
cv2.rectangle(depth_image, (center_x + 50, center_y - 40),
(center_x + 80, center_y + 20), 1700, -1)
return True, depth_image
def get_color_image(self):
return None
return MockPyKinect()
def _configure_device(self) -> bool:
"""
@ -313,12 +458,12 @@ class FemtoBoltManager(BaseDevice):
# 配置FemtoBolt设备参数
self.femtobolt_config = self.pykinect.default_configuration
self.femtobolt_config.depth_mode = self.pykinect.K4A_DEPTH_MODE_NFOV_UNBINNED
self.femtobolt_config.depth_mode = self.pykinect.K4A_DEPTH_MODE_NFOV_2X2BINNED
self.femtobolt_config.color_format = self.pykinect.K4A_IMAGE_FORMAT_COLOR_BGRA32
self.femtobolt_config.color_resolution = self.pykinect.K4A_COLOR_RESOLUTION_720P
self.femtobolt_config.camera_fps = self.pykinect.K4A_FRAMES_PER_SECOND_15
self.femtobolt_config.synchronized_images_only = False
self.femtobolt_config.color_resolution = 0
self.logger.info(f"FemtoBolt设备配置完成 - 深度模式: {self.depth_mode}, FPS: {self.fps}")
return True
except Exception as e:
@ -336,17 +481,12 @@ class FemtoBoltManager(BaseDevice):
# 启动FemtoBolt设备
self.logger.info(f'尝试启动FemtoBolt设备...')
if hasattr(self.pykinect, 'start_device'):
# 真实设备模式
self.device_handle = self.pykinect.start_device(config=self.femtobolt_config)
if self.device_handle:
self.logger.info('✓ FemtoBolt深度相机初始化成功!')
else:
raise Exception('设备启动返回None')
# 启动真实设备
self.device_handle = self.pykinect.start_device(config=self.femtobolt_config)
if self.device_handle:
self.logger.info('✓ FemtoBolt深度相机初始化成功!')
else:
# 模拟设备模式
self.device_handle = self.pykinect.start_device(config=self.femtobolt_config)
self.logger.info('✓ FemtoBolt深度相机模拟模式启动成功!')
raise Exception('设备启动返回None')
# 等待设备稳定
time.sleep(1.0)
@ -506,52 +646,17 @@ class FemtoBoltManager(BaseDevice):
try:
ret, depth_image = capture.get_depth_image()
if ret and depth_image is not None:
# 确保二维数据
if depth_image.ndim == 3 and depth_image.shape[2] == 1:
depth_image = depth_image[:, :, 0]
rows, cols = depth_image.shape[:2]
# 生成或复用网格背景
if (self._grid_bg is None) or (self._grid_size != (rows, cols)):
bg = np.ones((rows, cols, 3), dtype=np.uint8) * 128
cell_size = 50
grid_color = (255, 255, 255)
grid = np.zeros_like(bg)
for x in range(0, cols, cell_size):
cv2.line(grid, (x, 0), (x, rows), grid_color, 1)
for y in range(0, rows, cell_size):
cv2.line(grid, (0, y), (cols, y), grid_color, 1)
mask_grid = (grid.sum(axis=2) > 0)
bg[mask_grid] = grid[mask_grid]
self._grid_bg = bg
self._grid_size = (rows, cols)
background = self._grid_bg.copy()
# 生成深度掩码,仅保留指定范围内的像素
mask_valid = (depth_image >= self.depth_range_min) & (depth_image <= self.depth_range_max)
depth_clipped = np.clip(depth_image, self.depth_range_min, self.depth_range_max)
normed = (depth_clipped.astype(np.float32) - self.depth_range_min) / (self.depth_range_max - self.depth_range_min)
# 反转映射,保证颜色方向与之前一致
normed = 1.0 - normed
# 应用自定义 colormap将深度值映射到 RGB
rgba = self.custom_cmap(normed)
rgb = (rgba[..., :3] * 255).astype(np.uint8)
# 叠加:在背景上覆盖彩色深度图(掩码处不覆盖,保留灰色背景+网格)
depth_colored_final = background.copy()
depth_colored_final[mask_valid] = rgb[mask_valid]
# 裁剪宽度
height, width = depth_colored_final.shape[:2]
target_width = height // 2
if width > target_width:
left = (width - target_width) // 2
right = left + target_width
depth_colored_final = depth_colored_final[:, left:right]
# 根据配置选择不同的等高线生成方法
if self.algorithm_type == 'plt':
depth_colored_final = self._generate_contour_image_plt(depth_image)
elif self.algorithm_type == 'opencv':
depth_colored_final = self._generate_contour_image_opencv(depth_image)
if depth_colored_final is None:
# 如果等高线生成失败,跳过这一帧
continue
# 推送SocketIO
success, buffer = cv2.imencode('.jpg', depth_colored_final, self._encode_param)
if success and self._socketio:
@ -585,7 +690,7 @@ class FemtoBoltManager(BaseDevice):
except Exception:
pass
else:
time.sleep(0.005)
time.sleep(0.001)
except Exception as e:
self.logger.error(f'FemtoBolt帧推送失败: {e}')
@ -599,87 +704,8 @@ class FemtoBoltManager(BaseDevice):
finally:
self.is_streaming = False
self.logger.info("FemtoBolt流工作线程结束")
def _process_depth_image(self, depth_image) -> np.ndarray:
"""
处理深度图像采用testfemtobolt.py的渲染方式
"""
try:
if not isinstance(depth_image, np.ndarray):
self.logger.error(f"输入的深度图像不是numpy数组: {type(depth_image)}")
return np.zeros((480, 640, 3), dtype=np.uint8)
# 确保二维数据
if depth_image.ndim == 3 and depth_image.shape[2] == 1:
depth_image = depth_image[:, :, 0]
h, w = depth_image.shape
# 生成灰色背景和白色网格参考testfemtobolt.py
background = np.full((h, w, 3), 128, dtype=np.uint8) # 灰色背景
# 绘制网格线
for x in range(0, w, 50): # 每50像素一条竖线
cv2.line(background, (x, 0), (x, h-1), (255, 255, 255), 1)
for y in range(0, h, 50): # 每50像素一条横线
cv2.line(background, (0, y), (w-1, y), (255, 255, 255), 1)
# 生成深度掩码,仅保留指定范围内的像素
mask_valid = (depth_image >= self.depth_range_min) & (depth_image <= self.depth_range_max)
depth_clipped = np.clip(depth_image, self.depth_range_min, self.depth_range_max)
normed = (depth_clipped.astype(np.float32) - self.depth_range_min) / (self.depth_range_max - self.depth_range_min)
# 反转映射,保证颜色方向与之前一致
normed = 1.0 - normed
# 应用自定义 colormap将深度值映射到 RGB
rgba = self.custom_cmap(normed)
rgb = (rgba[..., :3] * 255).astype(np.uint8)
# 叠加:在背景上覆盖彩色深度图(掩码处不覆盖,保留灰色背景+网格)
final_img = background.copy()
final_img[mask_valid] = rgb[mask_valid]
# 裁剪宽度(保持原有功能)
height, width = final_img.shape[:2]
target_width = height // 2
if width > target_width:
left = (width - target_width) // 2
right = left + target_width
final_img = final_img[:, left:right]
return final_img
except Exception as e:
self.logger.error(f"处理深度图像失败: {e}")
return np.zeros((480, 640, 3), dtype=np.uint8)
def _send_depth_data(self, depth_image: np.ndarray, color_image: Optional[np.ndarray] = None):
try:
_, depth_buffer = cv2.imencode('.jpg', depth_image, self._encode_param)
depth_data = base64.b64encode(memoryview(depth_buffer).tobytes()).decode('utf-8')
send_data = {
'timestamp': time.time(),
'frame_count': self.frame_count,
'depth_image': depth_data,
'fps': self.actual_fps,
'device_id': self.device_id,
'depth_range': {
'min': self.depth_range_min,
'max': self.depth_range_max
},
'last_update': time.strftime('%H:%M:%S')
}
if color_image is not None:
_, color_buffer = cv2.imencode('.jpg', color_image, self._encode_param)
color_data = base64.b64encode(memoryview(color_buffer).tobytes()).decode('utf-8')
send_data['color_image'] = color_data
self._socketio.emit('femtobolt_frame', send_data, namespace='/devices')
except Exception as e:
self.logger.error(f"发送深度数据失败: {e}")
def _update_statistics(self):
"""
更新性能统计
@ -738,93 +764,7 @@ class FemtoBoltManager(BaseDevice):
})
return status
def capture_body_image(self, save_path: Optional[str] = None) -> Optional[np.ndarray]:
"""
捕获身体图像
Args:
save_path: 保存路径可选
Returns:
Optional[np.ndarray]: 捕获的图像失败返回None
"""
try:
if not self.is_connected or not self.device_handle:
self.logger.error("FemtoBolt设备未连接")
return None
capture = self.device_handle.get_capture()
if not capture:
self.logger.error("无法获取FemtoBolt捕获")
return None
depth_image = capture.get_depth_image()
if depth_image is None:
self.logger.error("无法获取深度图像")
capture.release()
return None
# 处理深度图像
processed_image = self._process_depth_image(depth_image)
if save_path:
cv2.imwrite(save_path, processed_image)
self.logger.info(f"身体图像已保存到: {save_path}")
capture.release()
return processed_image
except Exception as e:
self.logger.error(f"捕获身体图像异常: {e}")
return None
def get_latest_depth_frame(self) -> Optional[np.ndarray]:
"""
获取最新深度帧
Returns:
Optional[np.ndarray]: 最新深度帧无帧返回None
"""
return self.last_depth_frame.copy() if self.last_depth_frame is not None else None
def get_latest_color_frame(self) -> Optional[np.ndarray]:
"""
获取最新彩色帧
Returns:
Optional[np.ndarray]: 最新彩色帧无帧返回None
"""
return self.last_color_frame.copy() if self.last_color_frame is not None else None
def collect_body_pose_data(self) -> Optional[Dict[str, Any]]:
"""
采集身体姿态数据兼容原接口
Returns:
Optional[Dict[str, Any]]: 身体姿态数据
"""
# 这里可以集成姿态估计算法
# 目前返回模拟数据
if not self.last_depth_frame is not None:
return None
# 模拟身体姿态数据
mock_keypoints = [
{'name': 'head', 'x': 320, 'y': 100, 'confidence': 0.9},
{'name': 'neck', 'x': 320, 'y': 150, 'confidence': 0.8},
{'name': 'left_shoulder', 'x': 280, 'y': 160, 'confidence': 0.7},
{'name': 'right_shoulder', 'x': 360, 'y': 160, 'confidence': 0.7},
{'name': 'left_hip', 'x': 300, 'y': 300, 'confidence': 0.6},
{'name': 'right_hip', 'x': 340, 'y': 300, 'confidence': 0.6}
]
return {
'timestamp': time.time(),
'keypoints': mock_keypoints,
'balance_score': np.random.uniform(0.7, 0.9),
'center_of_mass': {'x': 320, 'y': 240},
'device_id': self.device_id
}
def _cleanup_device(self):
"""
@ -866,6 +806,51 @@ class FemtoBoltManager(BaseDevice):
except Exception as e:
self.logger.error(f"断开FemtoBolt设备连接失败: {e}")
def reload_config(self) -> bool:
"""
重新加载设备配置
Returns:
bool: 重新加载是否成功
"""
try:
self.logger.info("正在重新加载FemtoBolt配置...")
# 获取最新配置
self.config = self.config_manager.get_device_config('femtobolt')
# 更新配置属性
self.algorithm_type = self.config.get('algorithm_type', 'opencv')
self.color_resolution = self.config.get('color_resolution', '1080P')
self.depth_mode = self.config.get('depth_mode', 'NFOV_2X2BINNED')
self.color_format = self.config.get('color_format', 'COLOR_BGRA32')
self.fps = self.config.get('camera_fps', 20)
self.depth_range_min = self.config.get('depth_range_min', 500)
self.depth_range_max = self.config.get('depth_range_max', 4500)
self.synchronized_images_only = self.config.get('synchronized_images_only', False)
# 更新图像处理参数
self.contrast_factor = self.config.get('contrast_factor', 1.2)
self.gamma_value = self.config.get('gamma_value', 0.8)
self.use_pseudo_color = self.config.get('use_pseudo_color', True)
# 更新缓存队列大小
cache_size = self.config.get('frame_cache_size', 10)
if cache_size != self.depth_frame_cache.maxlen:
self.depth_frame_cache = deque(maxlen=cache_size)
self.color_frame_cache = deque(maxlen=cache_size)
# 更新gamma查找表
self._update_gamma_lut()
self.logger.info(f"FemtoBolt配置重新加载成功 - 算法: {self.algorithm_type}, 分辨率: {self.color_resolution}, FPS: {self.fps}")
return True
except Exception as e:
self.logger.error(f"重新加载FemtoBolt配置失败: {e}")
return False
def cleanup(self):
"""
清理资源
@ -874,6 +859,12 @@ class FemtoBoltManager(BaseDevice):
self.stop_streaming()
self._cleanup_device()
# 清理matplotlib图形对象
if hasattr(self, 'fig') and self.fig is not None:
plt.close(self.fig)
self.fig = None
self.ax = None
self.depth_frame_cache.clear()
self.color_frame_cache.clear()
self.last_depth_frame = None

View File

@ -307,6 +307,9 @@ class IMUManager(BaseDevice):
try:
self.logger.info(f"正在初始化IMU设备...")
# 使用构造函数中已加载的配置,避免并发读取配置文件
self.logger.info(f"使用已加载配置: port={self.port}, baudrate={self.baudrate}, device_type={self.device_type}")
# 根据配置选择真实设备或模拟设备
# 优先使用device_type配置如果没有则使用use_mock配置向后兼容
use_real_device = (self.device_type == 'real') or (not self.use_mock)
@ -586,6 +589,45 @@ class IMUManager(BaseDevice):
except Exception as e:
self.logger.error(f"断开IMU设备连接失败: {e}")
def reload_config(self) -> bool:
"""
重新加载设备配置
Returns:
bool: 重新加载是否成功
"""
try:
self.logger.info("正在重新加载IMU配置...")
# 获取最新配置
config = self.config_manager.get_device_config('imu')
# 更新配置属性
self.port = config.get('port', 'COM7')
self.baudrate = config.get('baudrate', 9600)
self.device_type = config.get('device_type', 'mock')
self.use_mock = config.get('use_mock', False)
# 更新数据缓存队列大小
buffer_size = config.get('buffer_size', 100)
if buffer_size != self.data_buffer.maxlen:
# 保存当前数据
current_data = list(self.data_buffer)
# 创建新缓冲区
self.data_buffer = deque(maxlen=buffer_size)
# 恢复数据(保留最新的数据)
for data in current_data[-buffer_size:]:
self.data_buffer.append(data)
self.logger.info(f"IMU配置重新加载成功 - 端口: {self.port}, 波特率: {self.baudrate}, 设备类型: {self.device_type}")
return True
except Exception as e:
self.logger.error(f"重新加载IMU配置失败: {e}")
return False
def cleanup(self):
"""
清理资源

View File

@ -738,7 +738,10 @@ class PressureManager(BaseDevice):
bool: 初始化是否成功
"""
try:
self.logger.info(f"正在初始化压力板设备 - 类型: {self.device_type}")
self.logger.info(f"正在初始化压力板设备...")
# 使用构造函数中已加载的配置,避免并发读取配置文件
self.logger.info(f"使用已加载配置: device_type={self.device_type}, stream_interval={self.stream_interval}")
# 根据设备类型创建设备实例
if self.device_type == 'real':
@ -970,6 +973,31 @@ class PressureManager(BaseDevice):
self.logger.error(f"断开压力板设备连接失败: {e}")
return False
def reload_config(self) -> bool:
"""
重新加载压力板配置
Returns:
bool: 配置重新加载是否成功
"""
try:
self.logger.info("正在重新加载压力板配置...")
# 重新获取配置
new_config = self.config_manager.get_device_config('pressure')
# 更新配置属性
self.config = new_config
self.device_type = new_config.get('device_type', 'mock')
self.stream_interval = new_config.get('stream_interval', 0.1)
self.logger.info(f"压力板配置重新加载成功 - 设备类型: {self.device_type}, 流间隔: {self.stream_interval}")
return True
except Exception as e:
self.logger.error(f"重新加载压力板配置失败: {e}")
return False
def cleanup(self) -> None:
"""清理资源"""
try:

File diff suppressed because it is too large Load Diff

View File

@ -19,13 +19,18 @@ device_index = 1
width = 1280
height = 720
fps = 30
buffer_size = 1
fourcc = MJPG
[FEMTOBOLT]
algorithm_type = opencv
color_resolution = 1080P
depth_mode = NFOV_UNBINNED
fps = 30
depth_range_min = 1400
depth_range_max = 1700
depth_mode = NFOV_2X2BINNED
camera_fps = 20
depth_range_min = 1000
depth_range_max = 1400
fps = 15
synchronized_images_only = False
[DEVICES]
imu_device_type = real

View File

@ -184,9 +184,10 @@ class ConfigManager:
Dict[str, Any]: FemtoBolt配置
"""
return {
'algorithm_type': self.config.get('FEMTOBOLT', 'algorithm_type', fallback='opencv'),
'color_resolution': self.config.get('FEMTOBOLT', 'color_resolution', fallback='1080P'),
'depth_mode': self.config.get('FEMTOBOLT', 'depth_mode', fallback='NFOV_UNBINNED'),
'fps': self.config.getint('FEMTOBOLT', 'fps', fallback=15),
'camera_fps': self.config.getint('FEMTOBOLT', 'camera_fps', fallback=15),
'depth_range_min': self.config.getint('FEMTOBOLT', 'depth_range_min', fallback=500),
'depth_range_max': self.config.getint('FEMTOBOLT', 'depth_range_max', fallback=4500),
'synchronized_images_only': self.config.getboolean('FEMTOBOLT', 'synchronized_images_only', fallback=False)
@ -489,12 +490,14 @@ class ConfigManager:
"""
try:
# 验证必需参数
if 'algorithm_type' in config_data:
self.set_config_value('FEMTOBOLT', 'algorithm_type', config_data['algorithm_type'])
if 'color_resolution' in config_data:
self.set_config_value('FEMTOBOLT', 'color_resolution', config_data['color_resolution'])
if 'depth_mode' in config_data:
self.set_config_value('FEMTOBOLT', 'depth_mode', config_data['depth_mode'])
if 'fps' in config_data:
self.set_config_value('FEMTOBOLT', 'fps', str(config_data['fps']))
if 'camera_fps' in config_data:
self.set_config_value('FEMTOBOLT', 'camera_fps', str(config_data['camera_fps']))
if 'depth_range_min' in config_data:
self.set_config_value('FEMTOBOLT', 'depth_range_min', str(config_data['depth_range_min']))
if 'depth_range_max' in config_data:
@ -530,6 +533,153 @@ class ConfigManager:
'femtobolt': self.get_device_config('femtobolt')
}
def _batch_update_device_configs(self, configs: Dict[str, Dict[str, Any]]) -> Dict[str, Any]:
"""
批量更新设备配置内部方法
Args:
configs: 所有设备配置数据
Returns:
Dict[str, Any]: 包含results和errors的字典
"""
results = {}
errors = []
try:
# IMU配置
if 'imu' in configs:
try:
config_data = configs['imu']
if 'device_type' in config_data:
self.set_config_value('DEVICES', 'imu_device_type', config_data['device_type'])
if 'use_mock' in config_data:
self.set_config_value('DEVICES', 'imu_use_mock', str(config_data['use_mock']))
if 'port' in config_data:
self.set_config_value('DEVICES', 'imu_port', config_data['port'])
if 'baudrate' in config_data:
self.set_config_value('DEVICES', 'imu_baudrate', str(config_data['baudrate']))
results['imu'] = {
'success': True,
'message': 'IMU配置更新成功',
'config': config_data
}
self.logger.info(f"IMU配置已更新: {config_data}")
except Exception as e:
error_msg = f'设置IMU配置失败: {str(e)}'
results['imu'] = {'success': False, 'message': error_msg}
errors.append(f"IMU: {error_msg}")
self.logger.error(error_msg)
# 压力板配置
if 'pressure' in configs:
try:
config_data = configs['pressure']
if 'device_type' in config_data:
self.set_config_value('DEVICES', 'pressure_device_type', config_data['device_type'])
if 'use_mock' in config_data:
self.set_config_value('DEVICES', 'pressure_use_mock', str(config_data['use_mock']))
if 'port' in config_data:
self.set_config_value('DEVICES', 'pressure_port', config_data['port'])
if 'baudrate' in config_data:
self.set_config_value('DEVICES', 'pressure_baudrate', str(config_data['baudrate']))
results['pressure'] = {
'success': True,
'message': '压力板配置更新成功',
'config': config_data
}
self.logger.info(f"压力板配置已更新: {config_data}")
except Exception as e:
error_msg = f'设置压力板配置失败: {str(e)}'
results['pressure'] = {'success': False, 'message': error_msg}
errors.append(f"压力板: {error_msg}")
self.logger.error(error_msg)
# 相机配置
if 'camera' in configs:
try:
config_data = configs['camera']
if 'device_index' in config_data:
self.set_config_value('CAMERA', 'device_index', str(config_data['device_index']))
if 'width' in config_data:
self.set_config_value('CAMERA', 'width', str(config_data['width']))
if 'height' in config_data:
self.set_config_value('CAMERA', 'height', str(config_data['height']))
if 'fps' in config_data:
self.set_config_value('CAMERA', 'fps', str(config_data['fps']))
if 'buffer_size' in config_data:
self.set_config_value('CAMERA', 'buffer_size', str(config_data['buffer_size']))
if 'fourcc' in config_data:
self.set_config_value('CAMERA', 'fourcc', config_data['fourcc'])
if 'tx_max_width' in config_data:
self.set_config_value('CAMERA', 'tx_max_width', str(config_data['tx_max_width']))
results['camera'] = {
'success': True,
'message': '相机配置更新成功',
'config': config_data
}
self.logger.info(f"相机配置已更新: {config_data}")
except Exception as e:
error_msg = f'设置相机配置失败: {str(e)}'
results['camera'] = {'success': False, 'message': error_msg}
errors.append(f"相机: {error_msg}")
self.logger.error(error_msg)
# FemtoBolt配置
if 'femtobolt' in configs:
try:
config_data = configs['femtobolt']
if 'algorithm_type' in config_data:
self.set_config_value('FEMTOBOLT', 'algorithm_type', config_data['algorithm_type'])
if 'color_resolution' in config_data:
self.set_config_value('FEMTOBOLT', 'color_resolution', config_data['color_resolution'])
if 'depth_mode' in config_data:
self.set_config_value('FEMTOBOLT', 'depth_mode', config_data['depth_mode'])
if 'color_format' in config_data:
self.set_config_value('FEMTOBOLT', 'color_format', config_data['color_format'])
if 'camera_fps' in config_data:
self.set_config_value('FEMTOBOLT', 'camera_fps', str(config_data['camera_fps']))
if 'depth_range_min' in config_data:
self.set_config_value('FEMTOBOLT', 'depth_range_min', str(config_data['depth_range_min']))
if 'depth_range_max' in config_data:
self.set_config_value('FEMTOBOLT', 'depth_range_max', str(config_data['depth_range_max']))
if 'synchronized_images_only' in config_data:
self.set_config_value('FEMTOBOLT', 'synchronized_images_only', str(config_data['synchronized_images_only']))
if 'send_fps' in config_data:
self.set_config_value('FEMTOBOLT', 'send_fps', str(config_data['send_fps']))
results['femtobolt'] = {
'success': True,
'message': 'FemtoBolt配置更新成功',
'config': config_data
}
self.logger.info(f"FemtoBolt配置已更新: {config_data}")
except Exception as e:
error_msg = f'设置FemtoBolt配置失败: {str(e)}'
results['femtobolt'] = {'success': False, 'message': error_msg}
errors.append(f"FemtoBolt: {error_msg}")
self.logger.error(error_msg)
# 一次性保存所有配置
if results: # 只有在有配置更新时才保存
self.save_config()
self.logger.info("所有设备配置已批量保存")
return {
'results': results,
'errors': errors
}
except Exception as e:
self.logger.error(f"批量更新设备配置失败: {e}")
return {
'results': results,
'errors': [f"批量更新失败: {str(e)}"]
}
def set_all_device_configs(self, configs: Dict[str, Dict[str, Any]]) -> Dict[str, Any]:
"""
批量设置所有设备配置
@ -550,31 +700,12 @@ class ConfigManager:
results = {}
errors = []
# 逐个设置每个设备的配置
if 'imu' in configs:
result = self.set_imu_config(configs['imu'])
results['imu'] = result
if not result['success']:
errors.append(f"IMU: {result['message']}")
if 'pressure' in configs:
result = self.set_pressure_config(configs['pressure'])
results['pressure'] = result
if not result['success']:
errors.append(f"压力板: {result['message']}")
if 'camera' in configs:
result = self.set_camera_config(configs['camera'])
results['camera'] = result
if not result['success']:
errors.append(f"相机: {result['message']}")
if 'femtobolt' in configs:
result = self.set_femtobolt_config(configs['femtobolt'])
results['femtobolt'] = result
if not result['success']:
errors.append(f"FemtoBolt: {result['message']}")
# 批量更新所有设备配置
result = self._batch_update_device_configs(configs)
results = result['results']
errors = result['errors']
# 参数保存后,重新加载一下参数
self.reload_config()
# 如果有错误,返回部分成功的结果
if errors:
self.logger.warning(f"部分设备配置设置失败: {'; '.join(errors)}")

Binary file not shown.

Binary file not shown.

File diff suppressed because it is too large Load Diff

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -32,29 +32,6 @@ from devices.femtobolt_manager import FemtoBoltManager
from devices.device_coordinator import DeviceCoordinator
from devices.screen_recorder import RecordingManager
from devices.utils.config_manager import ConfigManager
# # 导入设备管理器
# try:
# from devices.camera_manager import CameraManager
# from devices.imu_manager import IMUManager
# from devices.pressure_manager import PressureManager
# from devices.femtobolt_manager import FemtoBoltManager
# from devices.device_coordinator import DeviceCoordinator
# from devices.screen_recorder import RecordingManager
# from devices.utils.config_manager import ConfigManager
# except ImportError:
# # 如果上面的导入失败,尝试直接导入
# # from camera_manager import CameraManager
# import imu_manager
# import pressure_manager
# import femtobolt_manager
# import device_coordinator
# from utils import config_manager
# IMUManager = imu_manager.IMUManager
# PressureManager = pressure_manager.PressureManager
# FemtoBoltManager = femtobolt_manager.FemtoBoltManager
# DeviceCoordinator = device_coordinator.DeviceCoordinator
# ConfigManager = config_manager.ConfigManager
class AppServer:
@ -149,7 +126,6 @@ class AppServer:
self.app,
cors_allowed_origins='*',
async_mode='threading',
#async_mode='eventlet',
logger=False,
engineio_logger=False,
ping_timeout=60,
@ -298,7 +274,15 @@ class AppServer:
# 返回文件
from flask import send_file
return send_file(file_path)
# 为视频文件设置正确的MIME类型和响应头
if file_path.lower().endswith(('.mp4', '.webm', '.avi', '.mov')):
response = send_file(file_path, mimetype='video/mp4')
# 添加支持视频流播放的响应头
response.headers['Accept-Ranges'] = 'bytes'
response.headers['Content-Type'] = 'video/mp4'
return response
else:
return send_file(file_path)
except Exception as e:
self.logger.error(f'静态文件服务错误: {e}')
@ -888,6 +872,23 @@ class AppServer:
self.stop_device_push_data()
time.sleep(1) # 等待停止完成
# 为每个设备管理器重新加载配置
self.logger.info("重新加载设备配置...")
reload_results = []
for device_name, manager in self.device_managers.items():
if manager is not None and hasattr(manager, 'reload_config'):
try:
success = manager.reload_config()
reload_results.append(f"{device_name}: {'成功' if success else '失败'}")
self.logger.info(f"{device_name}设备配置重新加载{'成功' if success else '失败'}")
except Exception as e:
reload_results.append(f"{device_name}: 异常 - {str(e)}")
self.logger.error(f"{device_name}设备配置重新加载异常: {e}")
else:
reload_results.append(f"{device_name}: 跳过管理器未初始化或不支持reload_config")
self.logger.info(f"配置重新加载结果: {'; '.join(reload_results)}")
# 重新启动设备数据推送
self.start_device_push_data()
self.logger.info("设备配置更新并重启数据推送完成")
@ -895,7 +896,8 @@ class AppServer:
# 通过SocketIO通知前端重启完成
self.socketio.emit('device_restart_complete', {
'status': 'success',
'message': '设备重启完成'
'message': '设备重启完成',
'reload_results': reload_results
}, namespace='/devices')
except Exception as restart_error:
@ -976,6 +978,11 @@ class AppServer:
data = flask_request.get_json()
patient_id = data.get('patient_id')
creator_id = data.get('creator_id')
screen_location = data.get('screen_location') # [0,0,1920,1080]
camera_location = data.get('camera_location') # [0,0,640,480]
femtobolt_location = data.get('femtobolt_location') # [0,0,640,480]
if not patient_id or not creator_id:
return jsonify({'success': False, 'error': '缺少患者ID或创建人ID'}), 400
@ -985,7 +992,26 @@ class AppServer:
# 开始同步录制
recording_response = None
try:
recording_response = self.recording_manager.start_recording(session_id, patient_id)
recording_response = self.recording_manager.start_recording(session_id, patient_id,screen_location,camera_location,femtobolt_location)
# 处理录制管理器返回的数据库更新信息
if recording_response and recording_response.get('success') and 'database_updates' in recording_response:
db_updates = recording_response['database_updates']
try:
# 更新会话状态
if not self.db_manager.update_session_status(db_updates['session_id'], db_updates['status']):
self.logger.error(f'更新会话状态失败 - 会话ID: {db_updates["session_id"]}, 状态: {db_updates["status"]}')
# 更新视频文件路径
video_paths = db_updates['video_paths']
self.db_manager.update_session_normal_video_path(db_updates['session_id'], video_paths['normal_video_path'])
self.db_manager.update_session_screen_video_path(db_updates['session_id'], video_paths['screen_video_path'])
self.db_manager.update_session_femtobolt_video_path(db_updates['session_id'], video_paths['femtobolt_video_path'])
self.logger.info(f'数据库更新成功 - 会话ID: {db_updates["session_id"]}')
except Exception as db_error:
self.logger.error(f'处理数据库更新失败: {db_error}')
except Exception as rec_e:
self.logger.error(f'开始同步录制失败: {rec_e}')
@ -1009,37 +1035,45 @@ class AppServer:
return jsonify({
'success': False,
'error': '缺少会话ID'
}), 400
}), 400
data = flask_request.get_json()
video_data = data['videoData']
mime_type = data.get('mimeType', 'video/webm;codecs=vp9') # 默认webm格式
import base64
# 验证base64视频数据格式
if not video_data.startswith('data:video/'):
return jsonify({
'success': False,
'message': '无效的视频数据格式'
}), 400
try:
header, encoded = video_data.split(',', 1)
video_bytes = base64.b64decode(encoded)
except Exception as e:
return jsonify({
'success': False,
'message': f'视频数据解码失败: {str(e)}'
}), 400
# 获取请求数据中的duration参数
data = flask_request.get_json() or {}
duration = data.get('duration')
# 如果提供了duration更新到数据库
if duration is not None and isinstance(duration, (int, float)):
try:
self.db_manager.update_session_duration(session_id, int(duration))
self.logger.info(f'更新会话持续时间: {session_id} -> {duration}')
except Exception as duration_error:
self.logger.error(f'更新会话持续时间失败: {duration_error}')
# 停止同步录制,传递视频数据
try:
restrt = self.recording_manager.stop_recording(session_id)
self.logger.info(f'停止录制结果: {restrt}')
# 处理录制管理器返回的数据库更新信息
if restrt and restrt.get('success') and 'database_updates' in restrt:
db_updates = restrt['database_updates']
try:
# 更新会话状态
success = self.db_manager.update_session_status(db_updates['session_id'], db_updates['status'])
self.logger.info(f'会话状态已更新为: {db_updates["status"]} - 会话ID: {db_updates["session_id"]}')
except Exception as db_error:
self.logger.error(f'处理停止录制的数据库更新失败: {db_error}')
success = False
else:
# 如果录制管理器没有返回数据库更新信息,则手动更新
success = self.db_manager.update_session_status(session_id, 'completed')
except Exception as rec_e:
self.logger.error(f'停止同步录制失败: {rec_e}', exc_info=True)
# 即使录制停止失败,也尝试更新数据库状态
success = self.db_manager.update_session_status(session_id, 'completed')
raise
# 更新会话状态为已完成
success = self.db_manager.update_session_status(session_id, 'completed')
if success:
self.logger.info(f'检测会话已停止 - 会话ID: {session_id}')
return jsonify({
@ -1162,6 +1196,7 @@ class AppServer:
# 获取请求数据
data = flask_request.get_json() or {}
# print(f"接收到的data数据: {data}")
patient_id = data.get('patient_id')
# 如果没有提供patient_id从会话信息中获取

View File

@ -11,6 +11,7 @@ scipy
# Computer vision and machine learning
opencv-python
pykinect-azure
# mediapipe # Not compatible with Python 3.13 yet
# torch # May have compatibility issues with Python 3.13
# torchvision # May have compatibility issues with Python 3.13

View File

@ -17,7 +17,7 @@ python-dateutil==2.8.2
PyInstaller>=6.10.0
# Optional - only if available
# pykinect_azure # Comment out if not available
pykinect_azure # Azure Kinect SDK for Python
# System utilities
colorama==0.4.6

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,140 @@
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap, ListedColormap
# 设置matplotlib支持中文显示
plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
class FemtoBoltDynamicViewer:
def __init__(self, depth_min=900, depth_max=1300):
self.depth_min = depth_min
self.depth_max = depth_max
# 使用display_x.py的原始颜色映射算法
colors = ['fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue']
self.mcmap = LinearSegmentedColormap.from_list("custom_cmap", colors)
# SDK 设备句柄和配置
self.device_handle = None
self.pykinect = None
self.config = None
def _load_sdk(self):
"""加载并初始化 FemtoBolt SDK"""
import pykinect_azure as pykinect
base_dir = os.path.dirname(os.path.abspath(__file__))
dll_path = os.path.join(base_dir, "..", "dll", "femtobolt", "k4a.dll")
self.pykinect = pykinect
self.pykinect.initialize_libraries(track_body=False, module_k4a_path=dll_path)
def _configure_device(self):
"""配置 FemtoBolt 深度相机"""
self.config = self.pykinect.default_configuration
self.config.depth_mode = self.pykinect.K4A_DEPTH_MODE_NFOV_2X2BINNED
self.config.color_format = self.pykinect.K4A_IMAGE_FORMAT_COLOR_BGRA32
self.config.color_resolution = self.pykinect.K4A_COLOR_RESOLUTION_720P
self.config.synchronized_images_only = False
self.config.color_resolution = 0
self.device_handle = self.pykinect.start_device(config=self.config)
def run(self):
"""运行实时深度数据可视化 - 融合display_x.py原始算法"""
self._load_sdk()
self._configure_device()
plt.ion() # 打开交互模式
plt.figure(figsize=(7, 7)) # 使用display_x.py的图形设置
print("FemtoBolt 深度相机启动成功,关闭窗口或 Ctrl+C 退出")
print(f"深度范围: {self.depth_min} - {self.depth_max} mm")
try:
frame_count = 0
while True:
capture = self.device_handle.update()
if capture is None:
continue
ret, depth_image = capture.get_depth_image()
if not ret or depth_image is None:
continue
# 使用display_x.py的原始算法处理深度数据
depth = depth_image.copy()
# 深度数据过滤 (根据输入参数动态设置)
depth[depth > self.depth_max] = 0
depth[depth < self.depth_min] = 0
# 裁剪感兴趣区域 (与display_x.py完全一致)
# depth = depth[50:200, 50:210]
# 背景图 (与display_x.py完全一致)
background = np.ones_like(depth) * 0.5 # 设定灰色背景
# 使用 np.ma.masked_equal() 来屏蔽深度图中的零值 (与display_x.py完全一致)
depth = np.ma.masked_equal(depth, 0)
# 绘制背景 (与display_x.py完全一致)
plt.imshow(background, origin='lower', cmap='gray', alpha=0.3)
# 绘制白色栅格线,并将其置于底层 (与display_x.py完全一致)
plt.grid(True, which='both', axis='both', color='white', linestyle='-', linewidth=1, zorder=0)
# 绘制等高线图并设置原点在左下角 (根据输入参数动态设置)
# 通过设置 zorder 来控制它们的层级。例如,设置 zorder=2 或更大的值来确保它们位于栅格线之上。
plt.contourf(depth, levels=50, cmap=self.mcmap, vmin=self.depth_min, vmax=self.depth_max, origin='upper', zorder=2)
# 更新显示 (与display_x.py完全一致)
plt.pause(0.1) # 暂停0.1秒
plt.draw() # 重绘图像
plt.clf() # 清除当前图像
frame_count += 1
if frame_count % 30 == 0: # 每30帧打印一次信息
print(f"已处理 {frame_count}")
except KeyboardInterrupt:
print("\n检测到退出信号,结束程序")
except Exception as e:
print(f"运行时错误: {e}")
finally:
# 清理资源
if self.device_handle:
try:
if hasattr(self.device_handle, 'stop'):
self.device_handle.stop()
if hasattr(self.device_handle, 'close'):
self.device_handle.close()
except Exception as e:
print(f"设备关闭时出现错误: {e}")
plt.ioff() # 关闭交互模式
plt.close('all')
print("程序已安全退出")
def save_current_frame(self, filename="depth_frame.png"):
"""保存当前帧到文件"""
try:
plt.savefig(filename, dpi=150, bbox_inches='tight')
print(f"当前帧已保存到: {filename}")
except Exception as e:
print(f"保存帧失败: {e}")
if __name__ == "__main__":
# 创建查看器实例
viewer = FemtoBoltDynamicViewer(depth_min=700, depth_max=1000)
print("=" * 50)
print("FemtoBolt 深度相机动态可视化测试")
print("基于 display_x.py 算法的实时成像")
print("=" * 50)
# 运行可视化
viewer.run()

View File

@ -0,0 +1,93 @@
import cv2
import pykinect_azure as pykinect
import matplotlib.pyplot as plt
import numpy as np
import pdb
import os
from matplotlib.colors import LinearSegmentedColormap,ListedColormap
from matplotlib.animation import FuncAnimation, FFMpegWriter
# Initialize the library, if the library is not found, add the library path as argument
pykinect.initialize_libraries()
# Modify camera configuration
device_config = pykinect.default_configuration
device_config.color_format = pykinect.K4A_IMAGE_FORMAT_COLOR_BGRA32
device_config.color_resolution = pykinect.K4A_COLOR_RESOLUTION_720P
device_config.depth_mode = pykinect.K4A_DEPTH_MODE_NFOV_2X2BINNED
# Start device
device = pykinect.start_device(config=device_config)
# 创建一个自定义的 colormap
colors = ['red', 'yellow', 'green', 'blue']
# 自定义颜色
colors = ['fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue',
'fuchsia', 'red', 'yellow', 'lime', 'cyan', 'blue']
mcmap = LinearSegmentedColormap.from_list("custom_cmap", colors)
# 使用交互模式减少闪烁
plt.ion()
fig, ax = plt.subplots(figsize=(7, 7))
cv2.namedWindow('Transformed color',cv2.WINDOW_NORMAL)
framei = 0
while True:
# Get capture
capture = device.update()
# Get depth image
depth_ret, depth = capture.get_depth_image()
# Get the color image in the depth camera axis
ret_color, color_image = capture.get_color_image()
if not ret_color or not depth_ret:
continue
h,w,_ = color_image.shape
depth[depth > 1100] = 0
depth[depth < 500] = 0
# depth = depth[50:200,50:210]
# 背景图
background = np.ones_like(depth) * 0.5 # 设定灰色背景
# 使用 np.ma.masked_equal() 来屏蔽深度图中的零值。masked_array 中的值不会被绘制,从而避免了零值的显示。
depth = np.ma.masked_equal(depth, 0)
# 清除轴内容而不是整个图形
ax.clear()
# 绘制背景
ax.imshow(background, origin='lower', cmap='gray', alpha=0.3)
# 绘制白色栅格线,并将其置于底层
ax.grid(True, which='both', axis='both', color='white', linestyle='-', linewidth=1, zorder=0)
# 绘制等高线图并设置原点在左下角
# 通过设置 zorder 来控制它们的层级。例如,设置 zorder=2 或更大的值来确保它们位于栅格线之上。
ax.contourf(depth, levels=100, cmap=mcmap,vmin=500, vmax=1100,origin='upper',zorder=2)
# 使用更高效的绘图更新方式
plt.draw()
plt.pause(0.001) # 极短暂停时间
# 显示彩色图像
cv2.imshow('Transformed color', color_image)
# Press q key to stop
if cv2.waitKey(1) == ord('q'):
break
cv2.destroyAllWindows()

View File

@ -35,7 +35,7 @@ chart_dpi = 300
export_format = csv
[SECURITY]
secret_key = 739bbbe1b291cd966ef91d7752701958bf6d3e48c7b41e3872a7281d45403685
secret_key = 332fe6a0e5b58a60e61eeee09cad362a7c47051202db7fa334256c2527371ecf
session_timeout = 3600
max_login_attempts = 5

View File

@ -1,3 +0,0 @@
# 数据目录
# 此文件用于确保 data 目录在版本控制中被保留
# 实际的数据文件会被 .gitignore 忽略

View File

@ -108,6 +108,9 @@ function createWindow() {
backgroundColor: '#000000'
});
// 窗口创建后立即最大化
mainWindow.maximize();
// 开发环境加载本地服务器,生产环境加载打包后的文件
const isDev = process.env.NODE_ENV === 'development';
if (isDev) {

View File

@ -13,7 +13,7 @@ api.interceptors.request.use(
if (window.electronAPI) {
config.baseURL = window.electronAPI.getBackendUrl()
} else {
config.baseURL = 'http://192.168.1.65:5000'
config.baseURL = 'http://localhost:5000'
}
// 只为需要发送数据的请求设置Content-Type
@ -637,7 +637,7 @@ export const getBackendUrl = () => {
if (window.electronAPI) {
return window.electronAPI.getBackendUrl()
} else {
return 'http://192.168.1.65:5000'
return 'http://localhost:5000'
}
}

View File

@ -507,7 +507,12 @@
</el-radio-group>
</el-form-item>
<div class="cameraFormTitle">深度相机</div>
<el-form-item label="算法类型">
<el-radio-group v-model="cameraForm.femtobolt.algorithm_type">
<el-radio value="opencv">OpenCV(效率高)</el-radio>
<el-radio value="plt">Matplotlib精度高</el-radio>
</el-radio-group>
</el-form-item>
<el-form-item label="距离范围">
<div >
<el-input v-model="cameraForm.femtobolt.depth_range_min" placeholder="请输入最小值" style="width: 216px;" />
@ -516,6 +521,7 @@
</div>
</el-form-item>
<div class="cameraFormTitle">头部IMU</div>
<el-form-item label="IMU串口号">
<el-select v-model="cameraForm.imu.port" placeholder="请选择">
@ -550,11 +556,10 @@
</template>
<script setup>
import { ref, reactive, computed, onMounted, onUnmounted, nextTick } from 'vue'
import { ref, computed, onMounted, onUnmounted, nextTick } from 'vue'
import { ElMessage } from 'element-plus'
import { useRouter, useRoute } from 'vue-router'
import { io } from 'socket.io-client'
import html2canvas from 'html2canvas'
import Header from '@/views/Header.vue'
import { useAuthStore } from '../stores/index.js'
import * as echarts from 'echarts'
@ -582,7 +587,7 @@ const videoImgRef =ref(null) // 视频流图片ref
let mediaRecorder = null
let recordedChunks = []
let recordingStream = null
let currentMimeType = null //
// API
const patientInfo = ref({
@ -665,6 +670,7 @@ const cameraForm = ref({ // 相机参数
device_index: '', //
},
femtobolt:{
algorithm_type: '', //
depth_mode: '', //
depth_range_min: '', //
depth_range_max: '', //
@ -805,6 +811,14 @@ const startTimer = () => {
//
seconds.value = Math.round(elapsed / 1000);
// 10600
if (seconds.value >= 60) {
console.log('⏰ 检测时长超过10分钟自动停止检测');
ElMessage.warning('检测时长已达到10分钟自动停止检测');
stopDetection();
return;
}
//
blinkState.value = !blinkState.value;
}, 1000);
@ -882,6 +896,7 @@ function cameraUpdate() { // 相机设置数据更新弹框
device_index: '', //
},
femtobolt:{
algorithm_type: '', //
depth_mode: '', //
depth_range_min: '', //
depth_range_max: '', //
@ -1050,18 +1065,13 @@ function connectWebSocket() {
tempInfo.value.camera_frame = data
displayFrame(data.image)
})
// devicesSocket.on('video_frame', (data) => {
// frameCount++
// displayFrame(data.image)
// })
devicesSocket.on('femtobolt_frame', (data) => {
tempInfo.value.femtobolt_frame = data
displayDepthCameraFrame(data.depth_image || data.image)
})
// devicesSocket.on('depth_camera_frame', (data) => {
// displayDepthCameraFrame(data.depth_image || data.image)
// })
devicesSocket.on('imu_data', (data) => {
tempInfo.value.imu_data = data
@ -1343,15 +1353,6 @@ function updateHeadPoseMaxValues(headPose) {
)
}
// //
// console.log('📊 姿:', {
// rotationLeft: headPoseMaxValues.value.rotationLeftMax.toFixed(1),
// rotationRight: headPoseMaxValues.value.rotationRightMax.toFixed(1),
// tiltLeft: headPoseMaxValues.value.tiltLeftMax.toFixed(1),
// tiltRight: headPoseMaxValues.value.tiltRightMax.toFixed(1),
// pitchUp: headPoseMaxValues.value.pitchUpMax.toFixed(1),
// pitchDown: headPoseMaxValues.value.pitchDownMax.toFixed(1)
// })
} catch (error) {
console.error('❌ 更新头部姿态最值失败:', error)
}
@ -1465,16 +1466,7 @@ function handlePressureData(data) {
if (pressureData.pressure_zones) {
footPressure.value = pressureData.pressure_zones
}
//
// if (pressureData.balance_analysis) {
// const balance = pressureData.balance_analysis
// console.log(' :')
// console.log(` : ${(balance.balance_ratio * 100).toFixed(1)}%`)
// console.log(` : ${balance.pressure_center_offset}%`)
// console.log(` : ${balance.balance_status}`)
// console.log(` : ${(balance.left_front_ratio * 100).toFixed(1)}%`)
// console.log(` : ${(balance.right_front_ratio * 100).toFixed(1)}%`)
// }
//
if (pressureData.pressure_image) {
@ -1539,163 +1531,86 @@ async function handleDiagnosticInfo(status) {
}
}
//
async function handleDataCollection() {
if (dataCollectionLoading.value) return
try {
dataCollectionLoading.value = true
//
ElMessage.info('正在采集检测数据...')
// ID
if (!patientInfo.value.sessionId) {
throw new Error('请先开始检测再进行数据采集')
}
// API
const response = await fetch(`${BACKEND_URL}/api/detection/${patientInfo.value.sessionId}/collect`, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
// patient_id: patientInfo.value.id,
// timestamp: Date.now()
head_pose: {},
body_pose: {},
foot_data: {}
})
})
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`)
}
const result = await response.json()
if (result.success) {
//
ElMessage.success({
message: `检测数据采集成功数据ID: ${result.dataId}`,
duration: 5000
})
console.log('✅ 检测数据采集成功:', result)
//
if (result.data) {
historyData.value.unshift({
id: result.dataId,
rotLeft: result.data.rotLeft || '-',
rotRight: result.data.rotRight || '-',
tiltLeft: result.data.tiltLeft || '-',
tiltRight: result.data.tiltRight || '-',
pitchDown: result.data.pitchDown || '-',
pitchUp: result.data.pitchUp || '-'
})
// 10
if (historyData.value.length > 10) {
historyData.value = historyData.value.slice(0, 10)
}
}
} else {
throw new Error(result.message || '数据采集失败')
}
} catch (error) {
console.error('❌ 检测数据采集失败:', error)
//
let errorMessage = '检测数据采集失败'
if (error.message.includes('网络连接失败')) {
errorMessage = '网络连接失败,请检查后端服务是否正常运行'
} else if (error.message.includes('服务器错误')) {
errorMessage = error.message
} else {
errorMessage = `检测数据采集失败: ${error.message}`
}
ElMessage.error({
message: errorMessage,
duration: 5000
})
} finally {
dataCollectionLoading.value = false
}
}
//
async function saveDetectionData() {
console.log(tempInfo.value)
return
console.log(tempInfo.value)
if (screenshotLoading.value) return
try {
screenshotLoading.value = true
//
ElMessage.info('正在保存截图...')
ElMessage.info('正在保存检测截图数据...')
// ID
if (!patientInfo.value.sessionId) {
throw new Error('请先开始检测再进行截图')
throw new Error('请先开始检测再进行数据保存')
}
const base64 = 'data:image/jpeg;base64,'
let body_image = ""
if(tempInfo.value.femtobolt_frame != null
&& tempInfo.value.femtobolt_frame.depth_image != null){
body_image = base64 + tempInfo.value.femtobolt_frame.depth_image
}
let pressure_image = ""
let foot_data = ""
if(tempInfo.value.pressure_data != null
&& tempInfo.value.pressure_data.foot_pressure != null
&& tempInfo.value.pressure_data.foot_pressure.pressure_image != null){
pressure_image = base64 + tempInfo.value.pressure_data.foot_pressure.pressure_image
pressure_image = tempInfo.value.pressure_data.foot_pressure.pressure_image
foot_data = tempInfo.value.pressure_data.foot_pressure.pressure_zones
}
let foot_image=""
if(tempInfo.value.camera_frame != null
&& tempInfo.value.camera_frame.image != null ){
foot_image=base64 + tempInfo.value.camera_frame.image
}
let head_pose={}
if(tempInfo.value.imu_data != null ){
head_pose=tempInfo.value.imu_data
}
let screen_location = contenGridRef.value.getBoundingClientRect()
// API
const result = await sendDetectionData({
// patientId: patientInfo.value.id,
// patientName: patientInfo.value.name,
// sessionId: patientInfo.value.sessionId,
// head_pose: tempInfo.value.imu_data,
// body_data: femtobolt_frame,
// foot_image: pressure_data,
const result = await sendDetectionData({
session_id: patientInfo.value.sessionId,
patient_id: patientInfo.value.id,
sessionId: patientInfo.value.sessionId,
patientId: patientInfo.value.id,
head_pose:head_pose,
body_pose:null,
body_image: body_image,
body_pose:tempInfo.value.femtobolt_frame,
body_image:base64 + tempInfo.value.femtobolt_frame,
foot_data:tempInfo.value.femtobolt_frame,
foot_data_image:base64+tempInfo.value.femtobolt_frame.image,
screen_image:null,
camera_data: base64+ tempInfo.value.camera_frame.image,
})
foot_data:foot_data,
foot_image:foot_image,
foot_data_image:pressure_image,
screen_image:null
})
//
ElMessage.success({
message: `截图保存成功!`,
message: `检测数据保存成功!`,
duration: 5000
})
console.log('✅ 截图保存成功:', result.filepath)
} catch (error) {
console.error('❌ 截图失败:', error)
console.error('❌ 检测数据保存失败:', error)
//
let errorMessage = '截图失败'
let errorMessage = '检测数据保存失败'
if (error.message.includes('网络连接失败')) {
errorMessage = '网络连接失败,请检查后端服务是否正常运行'
} else if (error.message.includes('服务器错误')) {
errorMessage = error.message
} else if (error.message.includes('未找到截图区域')) {
errorMessage = '截图区域不存在,请刷新页面重试'
} else if (error.message.includes('未找到检测数据区域')) {
errorMessage = '检测数据区域不存在,请刷新页面重试'
} else if (error.message.includes('未找到检测数据')) {
errorMessage = '检测数据不存在,请刷新页面重试'
} else {
errorMessage = `截图失败: ${error.message}`
errorMessage = `检测数据保存失败: ${error.message}`
}
ElMessage.error({
@ -1746,284 +1661,6 @@ async function sendDetectionData(data) {
}
}
//
async function updateSessionVideoPath(sessionId, videoPath) {
try {
const response = await fetch(`${BACKEND_URL}/api/sessions/${sessionId}/video-path`, {
method: 'PUT',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
videoPath: videoPath
})
})
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`)
}
const result = await response.json()
if (result.success) {
console.log('📹 会话视频路径更新成功:', videoPath)
return result
} else {
throw new Error(result.message || '更新失败')
}
} catch (error) {
console.error('💥 更新会话视频路径失败:', error)
throw error
}
}
//
async function startRecording() {
try {
console.log('🎬 开始录像...')
//
const targetElement = document.getElementById('detectare')
if (!targetElement) {
throw new Error('未找到录制区域')
}
// 使getDisplayMedia API
// 使captureStream
const canvas = document.createElement('canvas')
const ctx = canvas.getContext('2d')
// canvas
const rect = targetElement.getBoundingClientRect()
canvas.width = rect.width
canvas.height = rect.height
//
recordingStream = canvas.captureStream(30) // 30fps
// MediaRecorder
// 使mp4退webm
let mimeType = 'video/mp4;codecs=avc1.42E01E,mp4a.40.2'
if (!MediaRecorder.isTypeSupported(mimeType)) {
mimeType = 'video/webm;codecs=vp9'
console.log('⚠️ 浏览器不支持MP4录制使用WebM格式')
} else {
console.log('✅ 使用MP4格式录制')
}
mediaRecorder = new MediaRecorder(recordingStream, {
mimeType: mimeType
})
// 使
currentMimeType = mimeType
recordedChunks = []
mediaRecorder.ondataavailable = (event) => {
if (event.data.size > 0) {
recordedChunks.push(event.data)
}
}
// mediaRecorder.onstop = async () => {
// console.log('🎬 ...')
// await saveRecording()
// }
//
mediaRecorder.start(1000) //
startTimer()
isRecording.value = true
//
startCapturingArea(targetElement, canvas, ctx)
console.log('✅ 录像已开始')
} catch (error) {
console.error('❌ 开始录像失败:', error)
ElMessage.error(`开始录像失败: ${error.message}`)
}
}
// canvas
function startCapturingArea(element, canvas, ctx) {
const captureFrame = () => {
if (!isRecording.value) return
// 使html2canvas
html2canvas(element, {
useCORS: true,
allowTaint: true,
backgroundColor: '#1E1E1E',
scale: 1,
logging: false,
width: canvas.width,
height: canvas.height
}).then(capturedCanvas => {
// canvas
ctx.clearRect(0, 0, canvas.width, canvas.height)
ctx.drawImage(capturedCanvas, 0, 0, canvas.width, canvas.height)
//
if (isRecording.value) {
setTimeout(captureFrame, 1000 / 30) // 30fps
}
}).catch(error => {
console.error('捕获帧失败:', error)
if (isRecording.value) {
setTimeout(captureFrame, 1000 / 30)
}
})
}
captureFrame()
}
//
function stopRecording() {
try {
console.log('🛑 停止录像...')
if (mediaRecorder && mediaRecorder.state === 'recording') {
//
mediaRecorder.addEventListener('stop', () => {
console.log('📹 录像数据准备完成,开始保存...')
saveRecording()
}, { once: true })
mediaRecorder.stop()
} else {
//
if (recordedChunks.length > 0) {
console.log('📹 发现未保存的录像数据,开始保存...')
saveRecording()
}
}
if (recordingStream) {
recordingStream.getTracks().forEach(track => track.stop())
recordingStream = null
}
isRecording.value = false
console.log('✅ 录像已停止')
} catch (error) {
console.error('❌ 停止录像失败:', error)
ElMessage.error(`停止录像失败: ${error.message}`)
}
}
//
async function saveRecording() {
try {
if (recordedChunks.length === 0) {
throw new Error('没有录制数据')
}
//
if (!patientInfo.value.id || !patientInfo.value.name || !patientInfo.value.sessionId) {
throw new Error(`缺少必需的患者信息: ID=${patientInfo.value.id}, 姓名=${patientInfo.value.name}, 会话ID=${patientInfo.value.sessionId}`)
}
console.log('📝 准备保存录像,患者信息:', {
id: patientInfo.value.id,
name: patientInfo.value.name,
sessionId: patientInfo.value.sessionId
})
// blob
const blob = new Blob(recordedChunks, { type: 'video/webm' })
console.log('📹 录像数据大小:', (blob.size / 1024 / 1024).toFixed(2), 'MB')
// base64
const reader = new FileReader()
reader.readAsDataURL(blob)
reader.onload = async () => {
try {
const base64Data = reader.result
// await fetch(`${BACKEND_URL}/api/recordings/save`
// API
const response = await fetch(`${BACKEND_URL}/api/detection/${patientInfo.value.sessionId}/stop`, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
patientId: patientInfo.value.id,
patientName: patientInfo.value.name,
sessionId: patientInfo.value.sessionId,
videoData: base64Data,
mimeType: currentMimeType || 'video/webm;codecs=vp9'
})
})
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`)
}
const result = await response.json()
if (result.success) {
//
console.log('🎬 录像保存成功:', result.filepath)
ElMessage.success({
message: `录像保存成功!文件路径: ${result.filepath}`,
duration: 5000
})
isRecording.value = false
resetTimer()
//
if (patientInfo.value.sessionId) {
try {
await updateSessionVideoPath(patientInfo.value.sessionId, result.filepath)
} catch (error) {
console.error('更新会话视频路径失败:', error)
}
}
//
recordedChunks.length = 0
console.log('🧹 录像数据已清空')
// ID
// patientInfo.value.sessionId = null
console.log('✅ 会话正式结束会话ID已清空')
diagnosticForm.value = {
diagnosis_info: '',
treatment_info: '',
suggestion_info: ''
}
resDialogVisible.value = true
} else {
throw new Error(result.message || '保存失败')
}
} catch (error) {
console.error('💥 保存录像失败:', error)
ElMessage.error({
message: `保存录像失败: ${error.message}`,
duration: 5000
})
// 使ID
// patientInfo.value.sessionId = null
console.log('⚠️ 录像保存失败但会话已结束会话ID已清空')
}
}
reader.onerror = () => {
console.error('❌ 读取录像数据失败')
ElMessage.error('读取录像数据失败')
}
} catch (error) {
console.error('❌ 保存录像失败:', error)
ElMessage.error(`保存录像失败: ${error.message}`)
console.log('⚠️ 录像保存失败但会话已结束会话ID已清空')
}
}
// /
async function handleStartStop() {
if (!isConnected.value) {
@ -2044,6 +1681,8 @@ async function handleStartStop() {
async function startDetection() {
try {
console.log('🚀 正在开始检测...')
isRecording.value = true
startTimer()
//
if (!patientInfo.value || !patientInfo.value.id) {
throw new Error('缺少患者信息,无法开始检测')
@ -2051,6 +1690,7 @@ async function startDetection() {
let screen_location = contenGridRef.value.getBoundingClientRect()
let femtobolt_location = wholeBodyRef.value.getBoundingClientRect()
let camera_location = videoImgRef.value.getBoundingClientRect()
let titile_height = 24
// API
const response = await fetch(`${BACKEND_URL}/api/detection/start`, {
method: 'POST',
@ -2060,30 +1700,11 @@ async function startDetection() {
body: JSON.stringify({
patient_id: patientInfo.value.id,
//
creator_id: creatorId.value,
screen_location:{
x:screen_location.x,
y:screen_location.y,
w:screen_location.width,
h:screen_location.height
},
femtobolt_location :{
x:femtobolt_location.x,
y:femtobolt_location.y,
w:femtobolt_location.width,
h:femtobolt_location.height
},
camera_location :{
x:camera_location.x,
y:camera_location.y,
w:camera_location.width,
h:camera_location.height
}
// settings: JSON.stringify({
// frequency: 30, //
// //
// })
creator_id: creatorId.value,
screen_location:[Math.round(screen_location.x), Math.round(screen_location.y) + titile_height, Math.round(screen_location.width), Math.round(screen_location.height-titile_height)],
camera_location:[Math.round(camera_location.x), Math.round(camera_location.y)+ titile_height, Math.round(camera_location.width), Math.round(camera_location.height-titile_height)],
femtobolt_location:[Math.round(femtobolt_location.x), Math.round(femtobolt_location.y) + titile_height, Math.round(femtobolt_location.width), Math.round(femtobolt_location.height-titile_height)],
})
})
if (!response.ok) {
@ -2099,9 +1720,8 @@ async function startDetection() {
patientInfo.value.sessionId = result.session_id
patientInfo.value.detectionStartTime = Date.now()
console.log('✅ 检测会话创建成功会话ID:', patientInfo.value.sessionId)
isStart.value = true
startRecording()
isStart.value = true
ElMessage.success('检测已开始')
} else {
throw new Error(result.message || '开始检测失败')
@ -2118,17 +1738,27 @@ async function startDetection() {
async function stopDetection() {
try {
console.log('🛑 停止检测会话ID:', patientInfo.value.sessionId)
resetTimer()
//
let duration = 0
if (patientInfo.value.detectionStartTime) {
duration = Math.floor((Date.now() - patientInfo.value.detectionStartTime) / 1000)
}
//
if (isRecording.value) {
stopRecording()
// API
const response = await fetch(`${BACKEND_URL}/api/detection/${patientInfo.value.sessionId}/stop`, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
duration: duration
})
})
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`)
}
isRecording.value = false
isStart.value = false
} catch (error) {
@ -2292,15 +1922,6 @@ const calibrationClick = async () => {
}
const cameraSubmit = async () => {
// let data = {
// "imu": {"device_type": "real", "port": "COM7", "baudrate": 9600},
// "pressure": {"device_type": "real", "port": "COM8", "baudrate": 115200},
// "camera": {"device_index": 0, "width": 1280, "height": 720, "fps": 30},
// "femtobolt": {"color_resolution": "1080P", "depth_mode": "NFOV_UNBINNED", "fps": 15}
// }
//
const response = await fetch(`${BACKEND_URL}/api/config/devices/all`, {
method: 'POST',
headers: {
@ -2344,7 +1965,7 @@ const getDevicesInit = async () => {
}
onMounted(() => {
// wholeBodyRef.value
console.log(wholeBodyRef.value.getBoundingClientRect())
console.log(videoImgRef.value.getBoundingClientRect())
//

View File

@ -6,7 +6,7 @@
<div class="nav-container-title" @click="goBack">
<img src="@/assets/svg/goback.svg" alt="">
<div style="margin-left: 20px;">
患者档案
患者档案 - {{ patient?.name || '未知' }} (ID: {{ patient?.id || '未知' }})
</div>
</div>
<div class="nav-container-info">
@ -30,7 +30,7 @@
</div>
<div class="content-center">
<video ref="videoPlayerRef" :src=" BACKEND_URL+'/' + item.screen_video_path" controls width="100%" height="100%">
<video ref="videoPlayerRef" :src="item.screen_video_path ? BACKEND_URL+'/' + item.screen_video_path.replace(/\\/g, '/') : ''" controls width="100%" height="100%">
您的浏览器不支持视频播放
</video>
<img src="@/assets/big.png" alt="" class="bigImgBox" @click="bigImgClick(item)">
@ -70,8 +70,8 @@
<div class="content-right-bottom-content">
<div v-for="(item2, index2) in item.latest_detection_data" :key="index2" class="content-right-bottom-content-box">
<div class="content-right-bottom-img">
<img :src="BACKEND_URL+'/' + item2.screen_image" style="width:100% ;height: 100%;cursor: pointer;" alt=""
@click="showImage(BACKEND_URL+'/' + item2.screen_image)">
<img :src="item2.screen_image ? BACKEND_URL+'/' + item2.screen_image.replace(/\\/g, '/') : ''" style="width:100% ;height: 100%;cursor: pointer;" alt=""
@click="item2.screen_image ? showImage(BACKEND_URL+'/' + item2.screen_image.replace(/\\/g, '/')) : null">
</div>
<div style="margin-top: 15px;">
@ -441,7 +441,9 @@ function showImage(row){ // 显示大屏图片
}, 300)
}
function bigImgClick(row) {
videoUrl.value = BACKEND_URL + '/' + row.normal_video_path
// WindowsWeb URL
const webPath = row.normal_video_path.replace(/\\/g, '/')
videoUrl.value = BACKEND_URL + '/' + webPath
dialogVideoVisible.value = true
}

View File

@ -1,21 +0,0 @@
# 平衡体态检测系统安装包
## 目录结构
- `backend/` - 后端程序文件
- `frontend/` - 前端Electron应用
- `启动系统.bat` - 系统启动脚本
## 使用方法
1. 双击 `启动系统.bat` 启动系统
2. 系统会自动启动后端服务和前端界面
3. 如果需要单独启动可以直接运行前端exe文件
## 系统要求
- Windows 10 或更高版本
- 至少4GB内存
- 支持USB设备连接
## 注意事项
- 首次启动可能需要较长时间
- 确保防火墙允许程序访问网络
- 如遇问题请查看日志文件