This commit is contained in:
root 2025-08-22 09:17:47 +08:00
commit 40eb774f1b
9 changed files with 255 additions and 192 deletions

2
.gitignore vendored
View File

@ -21420,3 +21420,5 @@ frontend/src/renderer/src/services/api.js
frontend/src/renderer/src/services/api.js frontend/src/renderer/src/services/api.js
frontend/src/renderer/src/services/api.js frontend/src/renderer/src/services/api.js
frontend/src/renderer/src/services/api.js frontend/src/renderer/src/services/api.js
frontend/src/renderer/src/services/api.js
backend/devices/utils/config.ini

View File

@ -67,7 +67,6 @@ class RealIMUDevice:
if 'head_pose_offset' in calibration: if 'head_pose_offset' in calibration:
self.head_pose_offset = calibration['head_pose_offset'] self.head_pose_offset = calibration['head_pose_offset']
logger.debug(f'应用IMU校准数据: {self.head_pose_offset}') logger.debug(f'应用IMU校准数据: {self.head_pose_offset}')
def apply_calibration(self, raw_data: Dict[str, Any]) -> Dict[str, Any]: def apply_calibration(self, raw_data: Dict[str, Any]) -> Dict[str, Any]:
"""应用校准:将当前姿态减去初始偏移,得到相对于初始姿态的变化量""" """应用校准:将当前姿态减去初始偏移,得到相对于初始姿态的变化量"""
if not raw_data or 'head_pose' not in raw_data: if not raw_data or 'head_pose' not in raw_data:
@ -76,15 +75,14 @@ class RealIMUDevice:
# 应用校准偏移 # 应用校准偏移
calibrated_data = raw_data.copy() calibrated_data = raw_data.copy()
head_pose = raw_data['head_pose'].copy() head_pose = raw_data['head_pose'].copy()
angle=head_pose['rotation'] - self.head_pose_offset['rotation']
# 减去基准值(零点偏移) # 减去基准值(零点偏移)
head_pose['rotation'] = head_pose['rotation'] - self.head_pose_offset['rotation'] head_pose['rotation'] = ((angle + 180) % 360) - 180
head_pose['tilt'] = head_pose['tilt'] - self.head_pose_offset['tilt'] head_pose['tilt'] = head_pose['tilt'] - self.head_pose_offset['tilt']
head_pose['pitch'] = head_pose['pitch'] - self.head_pose_offset['pitch'] head_pose['pitch'] = head_pose['pitch'] - self.head_pose_offset['pitch']
calibrated_data['head_pose'] = head_pose calibrated_data['head_pose'] = head_pose
return calibrated_data return calibrated_data
@staticmethod @staticmethod
def _checksum(data: bytes) -> int: def _checksum(data: bytes) -> int:
return sum(data[:-1]) & 0xFF return sum(data[:-1]) & 0xFF
@ -115,7 +113,7 @@ class RealIMUDevice:
'yaw': yaw, 'yaw': yaw,
'temperature': temp 'temperature': temp
} }
# logger.debug(f'解析姿态角包: roll={roll}, pitch={pitch}, yaw={yaw}, temp={temp}') # print(f'解析姿态角包: roll={roll}, pitch={pitch}, yaw={yaw}, temp={temp}')
return self.last_data return self.last_data
else: else:
# logger.debug(f'忽略的数据包类型: 0x{packet_type:02X}') # logger.debug(f'忽略的数据包类型: 0x{packet_type:02X}')
@ -356,37 +354,16 @@ class IMUManager(BaseDevice):
self.logger.info('开始IMU快速零点校准...') self.logger.info('开始IMU快速零点校准...')
# 收集校准样本 # 直接读取一次原始数据作为校准偏移量
calibration_samples = [] raw_data = self.imu_device.read_data(apply_calibration=False)
sample_count = 50 # 减少样本数量以加快校准速度 if not raw_data or 'head_pose' not in raw_data:
return {'status': 'error', 'error': '无法读取IMU原始数据'}
for i in range(sample_count): # 使用当前姿态作为零点偏移
try:
# 读取原始数据(不应用校准)
raw_data = self.imu_device.read_data(apply_calibration=False)
if raw_data and 'head_pose' in raw_data:
calibration_samples.append(raw_data['head_pose'])
time.sleep(0.02) # 20ms间隔
except Exception as e:
self.logger.warning(f'校准样本采集失败: {e}')
continue
if len(calibration_samples) < sample_count * 0.7:
return {
'status': 'error',
'error': f'校准样本不足: {len(calibration_samples)}/{sample_count}'
}
# 计算平均值作为零点偏移
rotation_sum = sum(sample['rotation'] for sample in calibration_samples)
tilt_sum = sum(sample['tilt'] for sample in calibration_samples)
pitch_sum = sum(sample['pitch'] for sample in calibration_samples)
count = len(calibration_samples)
self.head_pose_offset = { self.head_pose_offset = {
'rotation': rotation_sum / count, 'rotation': raw_data['head_pose']['rotation'],
'tilt': tilt_sum / count, 'tilt': raw_data['head_pose']['tilt'],
'pitch': pitch_sum / count 'pitch': raw_data['head_pose']['pitch']
} }
# 应用校准到设备 # 应用校准到设备
@ -396,8 +373,7 @@ class IMUManager(BaseDevice):
self.logger.info(f'IMU快速校准完成: {self.head_pose_offset}') self.logger.info(f'IMU快速校准完成: {self.head_pose_offset}')
return { return {
'status': 'success', 'status': 'success',
'head_pose_offset': self.head_pose_offset, 'head_pose_offset': self.head_pose_offset
'samples_used': count
} }
except Exception as e: except Exception as e:
@ -504,8 +480,8 @@ class IMUManager(BaseDevice):
if data: if data:
# 缓存数据 # 缓存数据
self.data_buffer.append(data) # self.data_buffer.append(data)
self.last_valid_data = data # self.last_valid_data = data
# 发送数据到前端 # 发送数据到前端
if self._socketio: if self._socketio:

View File

@ -947,7 +947,7 @@ class AppServer:
self.logger.error(f'校准设备失败: {e}') self.logger.error(f'校准设备失败: {e}')
return jsonify({'success': False, 'error': str(e)}), 500 return jsonify({'success': False, 'error': str(e)}), 500
@self.app.route('/api/devices/imu/calibrate', methods=['POST']) @self.app.route('/api/devices/calibrate/imu', methods=['POST'])
def calibrate_imu(): def calibrate_imu():
"""校准IMU""" """校准IMU"""
try: try:

View File

@ -1,79 +1,53 @@
import ctypes from PIL import Image
import time import colorsys
import numpy as np
# === DLL 加载 === def get_unique_colors(image_path):
dll = ctypes.WinDLL(r"D:\BodyBalanceEvaluation\backend\dll\smitsense\SMiTSenseUsbWrapper.dll") img = Image.open(image_path).convert("RGB")
unique_colors = list(set(img.getdata()))
return unique_colors
# === DLL 函数声明 === def get_representative_colors(colors, n=12):
dll.SMiTSenseUsb_Init.argtypes = [ctypes.c_int] # 按亮度排序并均匀抽取
dll.SMiTSenseUsb_Init.restype = ctypes.c_int def brightness(rgb):
r, g, b = rgb
return 0.2126*r + 0.7152*g + 0.0722*b
dll.SMiTSenseUsb_ScanDevices.argtypes = [ctypes.POINTER(ctypes.c_int)] colors.sort(key=brightness)
dll.SMiTSenseUsb_ScanDevices.restype = ctypes.c_int total = len(colors)
if total <= n:
return colors
step = total / n
return [colors[int(i*step)] for i in range(n)]
dll.SMiTSenseUsb_OpenAndStart.argtypes = [ def sort_colors_by_hue(colors):
ctypes.c_int, # 转为 HSV并按色相排序
ctypes.POINTER(ctypes.c_uint16), def rgb_to_hue(rgb):
ctypes.POINTER(ctypes.c_uint16) r, g, b = [x/255.0 for x in rgb]
] h, s, v = colorsys.rgb_to_hsv(r, g, b)
dll.SMiTSenseUsb_OpenAndStart.restype = ctypes.c_int return h
return sorted(colors, key=rgb_to_hue)
dll.SMiTSenseUsb_GetLatestFrame.argtypes = [ def show_color_preview(colors, width=600, height_per_color=50):
ctypes.POINTER(ctypes.c_uint16), n = len(colors)
ctypes.c_int height = n * height_per_color
] img = Image.new("RGB", (width, height))
dll.SMiTSenseUsb_GetLatestFrame.restype = ctypes.c_int
dll.SMiTSenseUsb_StopAndClose.argtypes = [] for i, color in enumerate(colors):
dll.SMiTSenseUsb_StopAndClose.restype = ctypes.c_int for y in range(i*height_per_color, (i+1)*height_per_color):
for x in range(width):
img.putpixel((x, y), color)
# === 初始化设备 === img.show()
ret = dll.SMiTSenseUsb_Init(0)
if ret != 0:
raise RuntimeError(f"Init failed: {ret}")
count = ctypes.c_int() if __name__ == "__main__":
ret = dll.SMiTSenseUsb_ScanDevices(ctypes.byref(count)) image_path = r"D:\项目资料\技术文档资料\中康项目资料\11.png"
if ret != 0 or count.value == 0:
raise RuntimeError("No devices found")
# 打开设备 colors = get_unique_colors(image_path)
rows = ctypes.c_uint16() rep_colors = get_representative_colors(colors, 12)
cols = ctypes.c_uint16() sorted_colors = sort_colors_by_hue(rep_colors)
ret = dll.SMiTSenseUsb_OpenAndStart(0, ctypes.byref(rows), ctypes.byref(cols))
if ret != 0:
raise RuntimeError("OpenAndStart failed")
rows_val, cols_val = rows.value, cols.value print("12 个代表性颜色(按彩虹顺序):")
frame_size = rows_val * cols_val for i, color in enumerate(sorted_colors, 1):
buf_type = ctypes.c_uint16 * frame_size print(f"{i}: {color}")
buf = buf_type()
# 创建一个 NumPy 数组视图,复用内存 show_color_preview(sorted_colors)
data_array = np.ctypeslib.as_array(buf).reshape((rows_val, cols_val))
print(f"设备已打开: {rows_val}x{cols_val}")
try:
while True:
ret = dll.SMiTSenseUsb_GetLatestFrame(buf, frame_size)
time.sleep(1)
# while True:
# ret = dll.SMiTSenseUsb_GetLatestFrame(buf, frame_size)
# if ret == 0:
# # data_array 已经复用缓冲区内存,每次直接访问即可
# # 例如打印最大值和前5行前5列的数据
# print("最大压力值:", data_array.max())
# print("前5x5数据:\n", data_array[:5, :5])
# else:
# print("读取数据帧失败")
# time.sleep(1) # 每秒读取一帧
except KeyboardInterrupt:
print("退出中...")
finally:
dll.SMiTSenseUsb_StopAndClose()
print("设备已关闭")

View File

@ -59,7 +59,7 @@ def parse_packet(data):
# else: # else:
# return f"未知包类型: {packet_type:#04x}" # return f"未知包类型: {packet_type:#04x}"
def read_imu(port='COM6', baudrate=9600): def read_imu(port='COM8', baudrate=9600):
ser = serial.Serial(port, baudrate, timeout=1) ser = serial.Serial(port, baudrate, timeout=1)
buffer = bytearray() buffer = bytearray()

View File

@ -0,0 +1,94 @@
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import pykinect_azure as pykinect
class FemtoBoltContourViewer:
def __init__(self, depth_min=900, depth_max=1100):
self.depth_min = depth_min
self.depth_max = depth_max
# 自定义离散彩虹色,层次明显
colors = [
'darkblue', 'blue', 'cyan', 'lime', 'yellow',
'orange', 'red', 'darkred'
]
self.cmap = ListedColormap(colors)
self.device_handle = None
self.pykinect = None
self.config = None
def _load_sdk(self):
"""加载并初始化 FemtoBolt SDK"""
base_dir = os.path.dirname(os.path.abspath(__file__))
dll_path = os.path.join(base_dir, "..", "dll", "femtobolt", "bin", "k4a.dll")
self.pykinect = pykinect
self.pykinect.initialize_libraries(track_body=False, module_k4a_path=dll_path)
def _configure_device(self):
"""配置 FemtoBolt 深度相机"""
self.config = self.pykinect.default_configuration
self.config.depth_mode = self.pykinect.K4A_DEPTH_MODE_NFOV_UNBINNED
self.config.camera_fps = self.pykinect.K4A_FRAMES_PER_SECOND_15
self.config.synchronized_images_only = False
self.config.color_resolution = 0
self.device_handle = self.pykinect.start_device(config=self.config)
def run(self):
self._load_sdk()
self._configure_device()
plt.ion() # 打开交互模式
fig, ax = plt.subplots(figsize=(7, 7))
print("FemtoBolt 深度相机启动成功,关闭窗口或 Ctrl+C 退出")
# 设置离散等高线层次
levels = np.linspace(self.depth_min, self.depth_max, len(self.cmap.colors) + 1)
try:
while plt.fignum_exists(fig.number): # 窗口存在才继续
capture = self.device_handle.update()
if capture is None:
continue
ret, depth_image = capture.get_depth_image()
if not ret or depth_image is None:
continue
depth = depth_image.astype(np.uint16)
# 限制深度范围
depth[depth > self.depth_max] = 0
depth[depth < self.depth_min] = 0
depth = depth[0:350, 0:350]
# 屏蔽无效值
depth_masked = np.ma.masked_equal(depth, 0)
# 背景灰色
background = np.ones_like(depth) * 0.5
# 绘制
ax.clear()
ax.imshow(background, origin='lower', cmap='gray', alpha=0.3)
ax.grid(True, which='both', axis='both', color='white',
linestyle='--', linewidth=1, zorder=0)
ax.contourf(depth_masked, levels=levels, cmap=self.cmap,
vmin=self.depth_min, vmax=self.depth_max,
origin='upper', zorder=2)
plt.pause(0.05)
except KeyboardInterrupt:
print("检测到退出信号,结束程序")
finally:
if self.device_handle:
self.device_handle.stop()
self.device_handle.close()
plt.close(fig)
if __name__ == "__main__":
viewer = FemtoBoltContourViewer(depth_min=900, depth_max=1100)
viewer.run()

View File

@ -610,16 +610,22 @@ export const historyAPI = {
// 删除检测数据记录 // 删除检测数据记录
detectionDelById(id) { detectionDelById(id) {
return api.delete(`/api/detection/data/${id}`, {}) return api.delete(`/api/detection/data/${id}`, {})
},
// 删除检测会话及其相关的检测数据
sessionsDelById(id) {
return api.delete(`api/detection/sessions/${id}`, {})
}, },
// 获取检测会话历史 // 获取检测会话历史
sessionById(id) { sessionById(id) {
return api.get(`/api/history/sessions/${id}`) return api.get(`/api/history/sessions/${id}`)
}, },
// detectionLatestById(id) {
// return api.get(`/api/detection/data/detail/${id}`)
// },
detectionLatestById(id) { //获取最新的检测数据
detectionLatestList(id) {
return api.get(`/api/detection/data/detail/${id}/latest`)
},
//根据主键ID查询检测数据详情
detectionById(id) {
return api.get(`/api/detection/data/detail/${id}`) return api.get(`/api/detection/data/detail/${id}`)
}, },

View File

@ -64,7 +64,7 @@
<div :style="{ color: femtoboltStatus == '已连接' ? '#00CC33' : '#808080' }" style="font-size: 14px;"> <div :style="{ color: femtoboltStatus == '已连接' ? '#00CC33' : '#808080' }" style="font-size: 14px;">
{{ femtoboltStatus }}</div> {{ femtoboltStatus }}</div>
</div> </div>
<div style="display: flex;justify-content: center;height: 100%;padding-top: 0px;"> <div ref="wholeBodyRef" style="display: flex;justify-content: center;height: 100%;padding-top: 0px;">
<!-- 使用深度相机视频流替换静态图片 --> <!-- 使用深度相机视频流替换静态图片 -->
<img :src="(femtoboltStatus === '已连接' && depthCameraImgSrc) ? depthCameraImgSrc : noImageSvg" alt="深度相机视频流" <img :src="(femtoboltStatus === '已连接' && depthCameraImgSrc) ? depthCameraImgSrc : noImageSvg" alt="深度相机视频流"
style="width: 100%;height: calc(100% - 40px);object-fit:contain;background:#323232;"> style="width: 100%;height: calc(100% - 40px);object-fit:contain;background:#323232;">
@ -323,7 +323,7 @@
</div> </div>
<!-- 视频模块 --> <!-- 视频模块 -->
<div class="module-card" style="height: 50%;"> <div class="module-card" style="height: 50%;">
<div style="display: flex;margin-bottom: 15px;"> <div style="display: flex;">
<div class="module-header"> <div class="module-header">
<div class="module-title"> <div class="module-title">
<div class="module-title-bg"> <div class="module-title-bg">
@ -334,9 +334,12 @@
<div :style="{ color: cameraStatus == '已连接' ? '#00CC33' : '#808080' }" style="font-size: 14px;">{{ cameraStatus }}</div> <div :style="{ color: cameraStatus == '已连接' ? '#00CC33' : '#808080' }" style="font-size: 14px;">{{ cameraStatus }}</div>
</div> </div>
</div> </div>
<div ref="videoImgRef" style="width: 100%;height: calc(100% - 40px)">
<img :src="(cameraStatus === '已连接' && rtspImgSrc) ? rtspImgSrc : noImageSvg" alt=""
style="width: 100%;height: calc(100%);object-fit:contain;background:#323232;" />
</div>
<!-- 使用img元素显示视频流优化的Data URL方案 --> <!-- 使用img元素显示视频流优化的Data URL方案 -->
<img :src="(cameraStatus === '已连接' && rtspImgSrc) ? rtspImgSrc : noImageSvg" alt=""
style="width: 100%;height: calc(100% - 80px);object-fit:contain;background:#323232;" />
</div> </div>
</div> </div>
</div> </div>
@ -569,6 +572,9 @@ const dataCollectionLoading = ref(false)
const isRecording = ref(false) const isRecording = ref(false)
const cameraDialogVisible =ref(false) // const cameraDialogVisible =ref(false) //
const wholeBodyRef = ref(null) // 姿ref
const videoImgRef =ref(null) // ref
// //
let mediaRecorder = null let mediaRecorder = null
let recordedChunks = [] let recordedChunks = []
@ -908,6 +914,7 @@ const editPatient = () => {
} }
dialogVisible.value = true dialogVisible.value = true
} }
const tempInfo = ref({})
// WebSocket // WebSocket
function connectWebSocket() { function connectWebSocket() {
try { try {
@ -1037,25 +1044,29 @@ function connectWebSocket() {
// //
devicesSocket.on('camera_frame', (data) => { devicesSocket.on('camera_frame', (data) => {
frameCount++ frameCount++
tempInfo.value.camera_frame = data
displayFrame(data.image) displayFrame(data.image)
}) })
devicesSocket.on('video_frame', (data) => { // devicesSocket.on('video_frame', (data) => {
frameCount++ // frameCount++
displayFrame(data.image) // displayFrame(data.image)
}) // })
devicesSocket.on('femtobolt_frame', (data) => { devicesSocket.on('femtobolt_frame', (data) => {
tempInfo.value.femtobolt_frame = data
displayDepthCameraFrame(data.depth_image || data.image) displayDepthCameraFrame(data.depth_image || data.image)
}) })
devicesSocket.on('depth_camera_frame', (data) => { // devicesSocket.on('depth_camera_frame', (data) => {
displayDepthCameraFrame(data.depth_image || data.image) // displayDepthCameraFrame(data.depth_image || data.image)
}) // })
devicesSocket.on('imu_data', (data) => { devicesSocket.on('imu_data', (data) => {
tempInfo.value.imu_data = data
handleIMUData(data) handleIMUData(data)
}) })
devicesSocket.on('pressure_data', (data) => { devicesSocket.on('pressure_data', (data) => {
tempInfo.value.pressure_data = data
handlePressureData(data) handlePressureData(data)
}) })
@ -1634,11 +1645,12 @@ async function saveDetectionData() {
patientId: patientInfo.value.id, patientId: patientInfo.value.id,
patientName: patientInfo.value.name, patientName: patientInfo.value.name,
sessionId: patientInfo.value.sessionId, sessionId: patientInfo.value.sessionId,
head_pose: {}, head_data: imu_data,
body_pose: {}, body_data: femtobolt_frame,
foot_data: {} foot_data: pressure_data,
camera_data: camera_frame,
}) })
tempInfo.value
// //
ElMessage.success({ ElMessage.success({
message: `截图保存成功!`, message: `截图保存成功!`,
@ -2286,6 +2298,9 @@ const getDevicesInit = async () => {
} }
onMounted(() => { onMounted(() => {
// wholeBodyRef.value
console.log(wholeBodyRef.value.getBoundingClientRect())
console.log(videoImgRef.value.getBoundingClientRect())
// //
loadPatientInfo() loadPatientInfo()
@ -2301,6 +2316,7 @@ onMounted(() => {
}) })
onUnmounted(() => { onUnmounted(() => {
if (timerId.value) { if (timerId.value) {
clearInterval(timerId.value); clearInterval(timerId.value);
} }

View File

@ -26,7 +26,7 @@
<div class="content-left-text2">{{ item.created_at }}</div> <div class="content-left-text2">{{ item.created_at }}</div>
<div class="content-left-text3" v-if="index==0">最近会诊</div> <div class="content-left-text3" v-if="index==0">最近会诊</div>
<div class="content-left-text3" v-else>{{ getDayNum(item.created_at,index) }}</div> <div class="content-left-text3" v-else>{{ getDayNum(item.created_at,index) }}</div>
<!-- <el-button type="danger" style="margin-top: 20px;" @click="deleteClick(item,index)">删除</el-button> --> <el-button type="danger" style="margin-top: 20px;" @click="sessionsDelById(item,index)">删除</el-button>
</div> </div>
<div class="content-center"> <div class="content-center">
@ -75,11 +75,11 @@
</div> </div>
<div style="margin-top: 15px;"> <div style="margin-top: 15px;">
<div @click="patientdetails(item)"> <div @click="patientdetails(item2)">
<img src="@/assets/svg/datalist.svg" alt="" style="cursor: pointer;" title="查看详情"> <img src="@/assets/svg/datalist.svg" alt="" style="cursor: pointer;" title="查看详情">
</div> </div>
<div> <div>
<img src="@/assets/svg/del.svg" alt="" style="cursor: pointer;" title="删除" @click="deleteClick(item2,index)"> <img src="@/assets/svg/del.svg" alt="" style="cursor: pointer;" title="删除" @click="deleteClick(item,item2,index)">
</div> </div>
</div> </div>
</div> </div>
@ -435,6 +435,10 @@ function getDayNum(date2,index){
// //
const daysDiff = Math.floor(timeDiff / (1000 * 3600 * 24)); const daysDiff = Math.floor(timeDiff / (1000 * 3600 * 24));
if(daysDiff == 0){
return "当天";
}
return daysDiff + "天"; return daysDiff + "天";
} }
@ -544,6 +548,7 @@ const deleteScreenshot = async (screenshot) => {
} }
} }
const exportReport = async (record) => { const exportReport = async (record) => {
try { try {
ElMessage.info('正在生成报告...') ElMessage.info('正在生成报告...')
@ -712,8 +717,7 @@ function editClick(row,index) {
} }
// //
function patientdetails(row) { function patientdetails(row) {
// historyAPI.sessionById(row.id).then(res => { detectionById(row)
// })
detailsDialogVisible.value = true detailsDialogVisible.value = true
} }
async function handleDiagnosticInfo(status) { async function handleDiagnosticInfo(status) {
@ -776,8 +780,42 @@ const playNewVideo = () => {
videoPlayerRef.value[index].play() videoPlayerRef.value[index].play()
} }
} }
function detectionById(row) {
historyAPI.detectionById(row.id).then((response)=>{
if(response.success){
debugger
}
}).catch(()=>{
const deleteClick = async (row) => { })
}
const deleteClick = async (row,row2) => {
detectionLatestList(row.id)
ElMessageBox.confirm(
'确定义删除此条数据?',
'提示',
{
confirmButtonText: '确定',
cancelButtonText: '取消',
type: 'warning',
}
).then(() => {
historyAPI.detectionDelById(row2.id).then((response)=>{
if(response.success){
ElMessage.success({
message: response.message,
duration: 5000
});
sessionsInit()
// detectionLatestList(row.id)
}
}).catch(()=>{
})
})
}
const sessionsDelById = async (row) => {
ElMessageBox.confirm( ElMessageBox.confirm(
'确定义删除此条数据?', '确定义删除此条数据?',
@ -788,7 +826,7 @@ const deleteClick = async (row) => {
type: 'warning', type: 'warning',
} }
).then(() => { ).then(() => {
historyAPI.detectionDelById(row.id).then((response)=>{ historyAPI.sessionsDelById(row.id).then((response)=>{
if(response.success){ if(response.success){
ElMessage.success({ ElMessage.success({
message: response.message, message: response.message,
@ -796,63 +834,20 @@ const deleteClick = async (row) => {
}); });
sessionsInit() sessionsInit()
} }
}).catch(()=>{ }).catch(()=>{
}) })
}) })
}
const detectionLatestList = async(id)=>{
// try { const response = await historyAPI.detectionLatestList(id)
// // ID console.log(response)
// if (!row.id) { // historyAPI.detectionLatestList(id).then((response)=>{
// throw new Error('Id'); // if(response.success){
// console.log(response)
// } // }
// }).catch(()=>{
// // // })
// await ElMessageBox.confirm(
// '?',
// '',
// {
// confirmButtonText: '',
// cancelButtonText: '',
// type: 'warning',
// }
// );
// // API
// const response = await fetch(`${BACKEND_URL}/api/detection/data/${row.id}`, {
// method: 'DELETE',
// headers: {
// 'Content-Type': 'application/json'
// },
// });
// if (!response.ok) {
// throw new Error(`HTTP ${response.status}: ${response.statusText}`);
// }
// const result = await response.json();
// if (result.success) {
// ElMessage.success({
// message: '',
// duration: 5000
// });
// } else {
// throw new Error(result.message || '');
// }
// } catch (error) {
// //
// if (error === 'cancel' || error === 'close') {
// return;
// }
// ElMessage.error({
// message: error.message || '',
// duration: 5000
// });
// }
} }
// // // //
// const togglePlayPause = () => { // const togglePlayPause = () => {