365 lines
12 KiB
TypeScript
365 lines
12 KiB
TypeScript
'use client'
|
|
|
|
import React, { useEffect, useRef, useState } from 'react'
|
|
import {
|
|
Engine,
|
|
Scene,
|
|
Vector3,
|
|
HemisphericLight,
|
|
ArcRotateCamera,
|
|
Color3,
|
|
Color4,
|
|
AbstractMesh,
|
|
Nullable,
|
|
ImportMeshAsync,
|
|
HighlightLayer,
|
|
Mesh,
|
|
InstancedMesh,
|
|
Animation,
|
|
CubicEase,
|
|
EasingFunction,
|
|
Matrix,
|
|
Viewport
|
|
} from '@babylonjs/core'
|
|
import '@babylonjs/loaders'
|
|
|
|
import LoadingSpinner from '../ui/LoadingSpinner'
|
|
|
|
|
|
export interface ModelViewerProps {
|
|
modelPath: string
|
|
onModelLoaded?: (modelData: {
|
|
meshes: AbstractMesh[]
|
|
boundingBox: {
|
|
min: { x: number; y: number; z: number }
|
|
max: { x: number; y: number; z: number }
|
|
}
|
|
}) => void
|
|
onError?: (error: string) => void
|
|
focusSensorId?: string | null
|
|
renderOverlay?: (params: { anchor: { left: number; top: number } | null; info?: { name?: string; sensorId?: string } | null }) => React.ReactNode
|
|
}
|
|
|
|
const ModelViewer: React.FC<ModelViewerProps> = ({
|
|
modelPath,
|
|
onModelLoaded,
|
|
onError,
|
|
focusSensorId,
|
|
renderOverlay,
|
|
}) => {
|
|
const canvasRef = useRef<HTMLCanvasElement>(null)
|
|
const engineRef = useRef<Nullable<Engine>>(null)
|
|
const sceneRef = useRef<Nullable<Scene>>(null)
|
|
const [isLoading, setIsLoading] = useState(false)
|
|
const [loadingProgress, setLoadingProgress] = useState(0)
|
|
const [showModel, setShowModel] = useState(false)
|
|
const isInitializedRef = useRef(false)
|
|
const isDisposedRef = useRef(false)
|
|
const importedMeshesRef = useRef<AbstractMesh[]>([])
|
|
const highlightLayerRef = useRef<HighlightLayer | null>(null)
|
|
const chosenMeshRef = useRef<AbstractMesh | null>(null)
|
|
const [overlayPos, setOverlayPos] = useState<{ left: number; top: number } | null>(null)
|
|
const [overlayData, setOverlayData] = useState<{ name?: string; sensorId?: string } | null>(null)
|
|
const [modelReady, setModelReady] = useState(false)
|
|
|
|
|
|
useEffect(() => {
|
|
isDisposedRef.current = false
|
|
isInitializedRef.current = false
|
|
return () => {
|
|
isDisposedRef.current = true
|
|
}
|
|
}, [])
|
|
|
|
useEffect(() => {
|
|
if (!canvasRef.current || isInitializedRef.current) return
|
|
|
|
const canvas = canvasRef.current
|
|
const engine = new Engine(canvas, true)
|
|
engineRef.current = engine
|
|
|
|
engine.runRenderLoop(() => {
|
|
if (!isDisposedRef.current && sceneRef.current) {
|
|
sceneRef.current.render()
|
|
}
|
|
})
|
|
|
|
const scene = new Scene(engine)
|
|
sceneRef.current = scene
|
|
|
|
scene.clearColor = new Color4(0.1, 0.1, 0.15, 1)
|
|
|
|
const camera = new ArcRotateCamera('camera', 0, Math.PI / 3, 20, Vector3.Zero(), scene)
|
|
camera.attachControl(canvas, true)
|
|
camera.lowerRadiusLimit = 2
|
|
camera.upperRadiusLimit = 200
|
|
camera.wheelDeltaPercentage = 0.01
|
|
camera.panningSensibility = 50
|
|
camera.angularSensibilityX = 1000
|
|
camera.angularSensibilityY = 1000
|
|
|
|
const ambientLight = new HemisphericLight('ambientLight', new Vector3(0, 1, 0), scene)
|
|
ambientLight.intensity = 0.4
|
|
ambientLight.diffuse = new Color3(0.7, 0.7, 0.8)
|
|
ambientLight.specular = new Color3(0.2, 0.2, 0.3)
|
|
ambientLight.groundColor = new Color3(0.3, 0.3, 0.4)
|
|
|
|
const keyLight = new HemisphericLight('keyLight', new Vector3(1, 1, 0), scene)
|
|
keyLight.intensity = 0.6
|
|
keyLight.diffuse = new Color3(1, 1, 0.9)
|
|
keyLight.specular = new Color3(1, 1, 0.9)
|
|
|
|
const fillLight = new HemisphericLight('fillLight', new Vector3(-1, 0.5, -1), scene)
|
|
fillLight.intensity = 0.3
|
|
fillLight.diffuse = new Color3(0.8, 0.8, 1)
|
|
|
|
const hl = new HighlightLayer('highlight-layer', scene)
|
|
highlightLayerRef.current = hl
|
|
|
|
const handleResize = () => {
|
|
if (!isDisposedRef.current) {
|
|
engine.resize()
|
|
}
|
|
}
|
|
window.addEventListener('resize', handleResize)
|
|
|
|
isInitializedRef.current = true
|
|
|
|
return () => {
|
|
isDisposedRef.current = true
|
|
isInitializedRef.current = false
|
|
window.removeEventListener('resize', handleResize)
|
|
|
|
highlightLayerRef.current?.dispose()
|
|
highlightLayerRef.current = null
|
|
if (engineRef.current) {
|
|
engineRef.current.dispose()
|
|
engineRef.current = null
|
|
}
|
|
sceneRef.current = null
|
|
}
|
|
}, [])
|
|
|
|
useEffect(() => {
|
|
if (!isInitializedRef.current || !modelPath || isDisposedRef.current) {
|
|
return
|
|
}
|
|
|
|
const loadModel = async () => {
|
|
if (!sceneRef.current || isDisposedRef.current) {
|
|
return
|
|
}
|
|
|
|
const oldMeshes = sceneRef.current.meshes.slice();
|
|
oldMeshes.forEach(m => m.dispose());
|
|
|
|
setIsLoading(true)
|
|
setLoadingProgress(0)
|
|
setShowModel(false)
|
|
console.log('Loading GLTF model:', modelPath)
|
|
|
|
// UI элемент загрузчика (есть эффект замедленности)
|
|
const progressInterval = setInterval(() => {
|
|
setLoadingProgress(prev => {
|
|
if (prev >= 90) {
|
|
clearInterval(progressInterval)
|
|
return 90
|
|
}
|
|
return prev + Math.random() * 15
|
|
})
|
|
}, 100)
|
|
|
|
try {
|
|
const result = await ImportMeshAsync(modelPath, sceneRef.current)
|
|
|
|
importedMeshesRef.current = result.meshes
|
|
|
|
clearInterval(progressInterval)
|
|
setLoadingProgress(100)
|
|
|
|
console.log('GLTF Model loaded successfully!')
|
|
console.log('[ModelViewer] ImportMeshAsync result:', result)
|
|
if (result.meshes.length > 0) {
|
|
|
|
const boundingBox = result.meshes[0].getHierarchyBoundingVectors()
|
|
const size = boundingBox.max.subtract(boundingBox.min)
|
|
const maxDimension = Math.max(size.x, size.y, size.z)
|
|
|
|
const camera = sceneRef.current!.activeCamera as ArcRotateCamera
|
|
camera.radius = maxDimension * 2
|
|
camera.target = result.meshes[0].position
|
|
|
|
importedMeshesRef.current = result.meshes
|
|
setModelReady(true)
|
|
|
|
onModelLoaded?.({
|
|
meshes: result.meshes,
|
|
boundingBox: {
|
|
min: boundingBox.min,
|
|
max: boundingBox.max
|
|
}
|
|
})
|
|
|
|
// Плавное появление модели
|
|
setTimeout(() => {
|
|
if (!isDisposedRef.current) {
|
|
setShowModel(true)
|
|
setIsLoading(false)
|
|
}
|
|
}, 500)
|
|
} else {
|
|
console.warn('No meshes found in model')
|
|
onError?.('No geometry found in model')
|
|
setIsLoading(false)
|
|
}
|
|
} catch (error) {
|
|
clearInterval(progressInterval)
|
|
console.error('Error loading GLTF model:', error)
|
|
onError?.(`Failed to load model: ${error}`)
|
|
setIsLoading(false)
|
|
}
|
|
}
|
|
|
|
// Загрузка модлеи начинается после появления спиннера
|
|
requestIdleCallback(() => loadModel(), { timeout: 50 })
|
|
}, [modelPath, onError, onModelLoaded])
|
|
|
|
useEffect(() => {
|
|
if (!sceneRef.current || isDisposedRef.current || !modelReady) return
|
|
|
|
const sensorId = (focusSensorId ?? '').trim()
|
|
if (!sensorId) {
|
|
console.log('[ModelViewer] Focus cleared (no Sensor_ID provided)')
|
|
|
|
highlightLayerRef.current?.removeAllMeshes()
|
|
chosenMeshRef.current = null
|
|
setOverlayPos(null)
|
|
setOverlayData(null)
|
|
return
|
|
}
|
|
|
|
const allMeshes = importedMeshesRef.current || []
|
|
const sensorMeshes = allMeshes.filter((m: any) => ((m.id ?? '').includes('IfcSensor') || (m.name ?? '').includes('IfcSensor')))
|
|
|
|
const chosen = sensorMeshes.find((m: any) => {
|
|
try {
|
|
const meta: any = (m as any)?.metadata
|
|
const extras: any = meta?.gltf?.extras ?? meta?.extras ?? (m as any)?.extras
|
|
const sid = extras?.Sensor_ID ?? extras?.sensor_id ?? extras?.SERIAL_NUMBER ?? extras?.serial_number
|
|
if (sid == null) return false
|
|
return String(sid).trim() === sensorId
|
|
} catch {
|
|
return false
|
|
}
|
|
})
|
|
|
|
console.log('[ModelViewer] Sensor focus', {
|
|
requested: sensorId,
|
|
totalImportedMeshes: allMeshes.length,
|
|
totalSensorMeshes: sensorMeshes.length,
|
|
chosen: chosen ? { id: chosen.id, name: chosen.name, uniqueId: chosen.uniqueId, parent: chosen.parent?.name } : null,
|
|
source: 'result.meshes',
|
|
})
|
|
|
|
const scene = sceneRef.current!
|
|
|
|
if (chosen) {
|
|
const camera = scene.activeCamera as ArcRotateCamera
|
|
const bbox = (typeof chosen.getHierarchyBoundingVectors === 'function')
|
|
? chosen.getHierarchyBoundingVectors()
|
|
: { min: chosen.getBoundingInfo().boundingBox.minimumWorld, max: chosen.getBoundingInfo().boundingBox.maximumWorld }
|
|
const center = bbox.min.add(bbox.max).scale(0.5)
|
|
const size = bbox.max.subtract(bbox.min)
|
|
const maxDimension = Math.max(size.x, size.y, size.z)
|
|
const targetRadius = Math.max(camera.lowerRadiusLimit ?? 2, maxDimension * 1.5)
|
|
|
|
scene.stopAnimation(camera)
|
|
|
|
const ease = new CubicEase()
|
|
ease.setEasingMode(EasingFunction.EASINGMODE_EASEINOUT)
|
|
const frameRate = 60
|
|
const durationMs = 600
|
|
const totalFrames = Math.round((durationMs / 1000) * frameRate)
|
|
|
|
Animation.CreateAndStartAnimation('camTarget', camera, 'target', frameRate, totalFrames, camera.target.clone(), center.clone(), Animation.ANIMATIONLOOPMODE_CONSTANT, ease)
|
|
Animation.CreateAndStartAnimation('camRadius', camera, 'radius', frameRate, totalFrames, camera.radius, targetRadius, Animation.ANIMATIONLOOPMODE_CONSTANT, ease)
|
|
|
|
const hl = highlightLayerRef.current
|
|
if (hl) {
|
|
hl.removeAllMeshes()
|
|
if (chosen instanceof Mesh) {
|
|
hl.addMesh(chosen, new Color3(1, 1, 0))
|
|
} else if (chosen instanceof InstancedMesh) {
|
|
hl.addMesh(chosen.sourceMesh, new Color3(1, 1, 0))
|
|
} else {
|
|
const children = typeof (chosen as any)?.getChildMeshes === 'function' ? (chosen as any).getChildMeshes() : []
|
|
for (const cm of children) {
|
|
if (cm instanceof Mesh) {
|
|
hl.addMesh(cm, new Color3(1, 1, 0))
|
|
}
|
|
}
|
|
}
|
|
}
|
|
chosenMeshRef.current = chosen
|
|
setOverlayData({ name: chosen.name, sensorId })
|
|
} else {
|
|
highlightLayerRef.current?.removeAllMeshes()
|
|
chosenMeshRef.current = null
|
|
setOverlayPos(null)
|
|
setOverlayData(null)
|
|
}
|
|
}, [focusSensorId, modelReady])
|
|
|
|
useEffect(() => {
|
|
const scene = sceneRef.current
|
|
if (!scene || isDisposedRef.current) return
|
|
const observer = scene.onAfterRenderObservable.add(() => {
|
|
const chosen = chosenMeshRef.current
|
|
if (!chosen) return
|
|
const engine = scene.getEngine()
|
|
const cam = scene.activeCamera
|
|
if (!cam) return
|
|
const center = chosen.getBoundingInfo().boundingBox.centerWorld
|
|
const world = Matrix.IdentityReadOnly
|
|
const transform = scene.getTransformMatrix()
|
|
const viewport = new Viewport(0, 0, engine.getRenderWidth(), engine.getRenderHeight())
|
|
const projected = Vector3.Project(center, world, transform, viewport)
|
|
setOverlayPos({ left: projected.x, top: projected.y })
|
|
})
|
|
return () => {
|
|
scene.onAfterRenderObservable.remove(observer)
|
|
}
|
|
}, [])
|
|
|
|
return (
|
|
<div className="w-full h-screen relative bg-gray-900 overflow-hidden">
|
|
<canvas
|
|
ref={canvasRef}
|
|
className={`w-full h-full outline-none block transition-opacity duration-500 ${
|
|
showModel ? 'opacity-100' : 'opacity-0'
|
|
}`}
|
|
/>
|
|
{isLoading && (
|
|
<div className="absolute inset-0 bg-gray-900 flex items-center justify-center z-50">
|
|
<LoadingSpinner
|
|
progress={loadingProgress}
|
|
size={120}
|
|
strokeWidth={8}
|
|
/>
|
|
</div>
|
|
)}
|
|
{renderOverlay
|
|
? renderOverlay({ anchor: overlayPos, info: overlayData })
|
|
: (overlayData && overlayPos && (
|
|
<div className="absolute z-40 pointer-events-none" style={{ left: overlayPos.left, top: overlayPos.top }}>
|
|
<div className="rounded bg-black/70 text-white text-xs px-3 py-2 shadow-lg">
|
|
<div className="font-semibold truncate max-w-[200px]">{overlayData.name || 'Sensor'}</div>
|
|
{overlayData.sensorId && <div className="opacity-80">ID: {overlayData.sensorId}</div>}
|
|
</div>
|
|
</div>
|
|
))}
|
|
</div>
|
|
)
|
|
}
|
|
|
|
export default ModelViewer |