DRACO 압축을 사용할 때 가장 중요한 것은 "언제, 어떤 수준으로 압축할 것인가"입니다.
다음은 파일 크기별 최적화 전략을 구현한 코드입니다.
interface CompressionConfig {
quantization: {
position: number;
normal: number;
texcoord: number;
color: number;
};
compressionLevel: number;
}
class ModelCompressionManager {
private static readonly COMPRESSION_THRESHOLDS = {
SMALL: 1 * 1024 * 1024, // 1MB
MEDIUM: 5 * 1024 * 1024, // 5MB
LARGE: 15 * 1024 * 1024 // 15MB
};
private static getCompressionConfig(fileSize: number): CompressionConfig {
if (fileSize > this.COMPRESSION_THRESHOLDS.LARGE) {
return {
quantization: {
position: 14,
normal: 10,
texcoord: 12,
color: 8
},
compressionLevel: 10
};
} else if (fileSize > this.COMPRESSION_THRESHOLDS.MEDIUM) {
return {
quantization: {
position: 12,
normal: 8,
texcoord: 10,
color: 8
},
compressionLevel: 7
};
}
return {
quantization: {
position: 11,
normal: 7,
texcoord: 9,
color: 8
},
compressionLevel: 5
};
}
static async compressModel(input: string, output: string): Promise<void> {
const stats = await fs.stat(input);
const config = this.getCompressionConfig(stats.size);
const command = `gltf-transform optimize ${input} ${output} \
--compress draco \
--draco-quantization-position ${config.quantization.position} \
--draco-quantization-normal ${config.quantization.normal} \
--draco-quantization-texcoord ${config.quantization.texcoord} \
--draco-quantization-color ${config.quantization.color} \
--draco-compression-level ${config.compressionLevel} \
--texture-compress webp`;
await exec(command);
}
}
DRACO 디코더를 효율적으로 관리하기 위한 고급 시스템을 구현해보겠습니다.
class DracoDecoderManager {
private static instance: DracoDecoderManager;
private decoder: DRACOLoader | null = null;
private decoderStatus: 'uninitialized' | 'loading' | 'ready' = 'uninitialized';
private decoderPromise: Promise<void> | null = null;
private stats = {
decodingTime: 0,
totalModelsDecoded: 0,
memoryUsage: 0
};
private constructor() {}
static getInstance(): DracoDecoderManager {
if (!DracoDecoderManager.instance) {
DracoDecoderManager.instance = new DracoDecoderManager();
}
return DracoDecoderManager.instance;
}
private async initializeDecoder(): Promise<void> {
if (this.decoderStatus === 'loading') {
return this.decoderPromise!;
}
if (this.decoderStatus === 'ready') {
return Promise.resolve();
}
this.decoderStatus = 'loading';
this.decoder = new DRACOLoader();
const isLocalDevelopment = process.env.NODE_ENV === 'development';
if (isLocalDevelopment) {
this.decoder.setDecoderPath('/draco/');
} else {
this.decoder.setDecoderPath('https://www.gstatic.com/draco/v1/decoders/');
}
this.decoderPromise = new Promise((resolve) => {
// 디코더 초기화 완료 확인
this.decoder!.preload().then(() => {
this.decoderStatus = 'ready';
resolve();
});
});
return this.decoderPromise;
}
async getDecoder(): Promise<DRACOLoader> {
await this.initializeDecoder();
return this.decoder!;
}
updateStats(decodingTime: number): void {
this.stats.decodingTime += decodingTime;
this.stats.totalModelsDecoded++;
this.stats.memoryUsage = performance.memory?.usedJSHeapSize || 0;
}
getStats(): typeof this.stats {
return { ...this.stats };
}
dispose(): void {
if (this.decoder) {
this.decoder.dispose();
this.decoder = null;
this.decoderStatus = 'uninitialized';
this.decoderPromise = null;
}
}
}
모델의 크기와 중요도에 따라 동적으로 로딩 전략을 결정하는 시스템을 구현해보겠습니다.
interface ModelLoadingConfig {
priority: 'high' | 'medium' | 'low';
compressionType: 'draco' | 'none';
preload: boolean;
}
class ModelLoadingStrategy {
private static readonly SIZE_THRESHOLDS = {
SMALL: 2 * 1024 * 1024, // 2MB
MEDIUM: 10 * 1024 * 1024 // 10MB
};
private loadingQueue: PriorityQueue<string> = new PriorityQueue();
private loadedModels: Map<string, THREE.Object3D> = new Map();
private modelConfigs: Map<string, ModelLoadingConfig> = new Map();
constructor(private gltfLoader: GLTFLoader) {
this.setupLoaders();
}
private setupLoaders(): void {
const dracoManager = DracoDecoderManager.getInstance();
dracoManager.getDecoder().then(decoder => {
this.gltfLoader.setDRACOLoader(decoder);
});
}
async loadModel(url: string, config: ModelLoadingConfig): Promise<THREE.Object3D> {
if (this.loadedModels.has(url)) {
return this.loadedModels.get(url)!;
}
this.modelConfigs.set(url, config);
if (config.preload) {
const model = await this.loadModelWithRetry(url);
this.loadedModels.set(url, model);
return model;
} else {
this.loadingQueue.enqueue(url, config.priority);
return this.loadModelWhenNeeded(url);
}
}
private async loadModelWithRetry(url: string, retries = 3): Promise<THREE.Object3D> {
let lastError;
for (let i = 0; i < retries; i++) {
try {
const startTime = performance.now();
const gltf = await this.gltfLoader.loadAsync(url);
const endTime = performance.now();
DracoDecoderManager.getInstance().updateStats(endTime - startTime);
return gltf.scene;
} catch (error) {
lastError = error;
await new Promise(resolve => setTimeout(resolve, 1000 * Math.pow(2, i)));
}
}
throw new Error(`Failed to load model after ${retries} attempts: ${lastError}`);
}
private async loadModelWhenNeeded(url: string): Promise<THREE.Object3D> {
return new Promise((resolve) => {
const checkQueue = async () => {
if (this.loadingQueue.peek() === url) {
const model = await this.loadModelWithRetry(url);
this.loadedModels.set(url, model);
this.loadingQueue.dequeue();
resolve(model);
} else {
setTimeout(checkQueue, 100);
}
};
checkQueue();
});
}
}
CI/CD 파이프라인에 통합할 수 있는 자동화된 모델 최적화 스크립트
import { exec } from 'child_process';
import * as fs from 'fs/promises';
import * as path from 'path';
class ModelOptimizationPipeline {
private static readonly OPTIMIZATION_CONFIGS = {
ANIMATION: {
commands: [
'--compress draco',
'--texture-compress webp',
'--no-instancing'
]
},
STATIC: {
commands: [
'--compress draco',
'--texture-compress webp',
'--instance-materials'
]
}
};
constructor(
private inputDir: string,
private outputDir: string
) {}
async optimizeModels(): Promise<void> {
const files = await this.getAllModels(this.inputDir);
for (const file of files) {
try {
await this.optimizeModel(file);
} catch (error) {
console.error(`Failed to optimize ${file}:`, error);
}
}
}
private async getAllModels(dir: string): Promise<string[]> {
const entries = await fs.readdir(dir, { withFileTypes: true });
const files = await Promise.all(entries.map(async (entry) => {
const res = path.resolve(dir, entry.name);
return entry.isDirectory() ? this.getAllModels(res) : res;
}));
return files
.flat()
.filter(file => file.endsWith('.glb') || file.endsWith('.gltf'));
}
private async optimizeModel(inputPath: string): Promise<void> {
const stats = await fs.stat(inputPath);
const fileName = path.basename(inputPath);
const outputPath = path.join(this.outputDir, fileName);
// 애니메이션 포함 여부 확인
const hasAnimation = await this.checkForAnimations(inputPath);
const config = hasAnimation ?
this.OPTIMIZATION_CONFIGS.ANIMATION :
this.OPTIMIZATION_CONFIGS.STATIC;
const command = `gltf-transform optimize ${inputPath} ${outputPath} ${config.commands.join(' ')}`;
await exec(command);
// 최적화 결과 로깅
const optimizedStats = await fs.stat(outputPath);
const compressionRatio = ((stats.size - optimizedStats.size) / stats.size * 100).toFixed(2);
console.log(`Optimized ${fileName}:`);
console.log(`- Original size: ${(stats.size / 1024 / 1024).toFixed(2)}MB`);
console.log(`- Optimized size: ${(optimizedStats.size / 1024 / 1024).toFixed(2)}MB`);
console.log(`- Compression ratio: ${compressionRatio}%`);
}
private async checkForAnimations(filePath: string): Promise<boolean> {
// GLTF 파일 구조 분석하여 애니메이션 포함 여부 확인
const content = await fs.readFile(filePath);
const binary = content.toString('hex');
return binary.includes('animation');
}
}
로딩 성능과 메모리 사용량을 모니터링하는 시스템
class ModelPerformanceMonitor {
private static readonly PERFORMANCE_THRESHOLDS = {
LOADING_TIME: 2000, // 2초
MEMORY_USAGE: 100 * 1024 * 1024 // 100MB
};
private measurements: Map<string, {
loadingTime: number;
memoryUsage: number;
decodingTime: number;
size: number;
}> = new Map();
startMeasurement(modelId: string): void {
performance.mark(`${modelId}-start`);
}
endMeasurement(modelId: string, size: number): void {
performance.mark(`${modelId}-end`);
const measure = performance.measure(
modelId,
`${modelId}-start`,
`${modelId}-end`
);
this.measurements.set(modelId, {
loadingTime: measure.duration,
memoryUsage: performance.memory?.usedJSHeapSize || 0,
decodingTime: DracoDecoderManager.getInstance().getStats().decodingTime,
size
});
this.analyzePerformance(modelId);
}
private analyzePerformance(modelId: string): void {
const stats = this.measurements.get(modelId)!;
if (stats.loadingTime > this.PERFORMANCE_THRESHOLDS.LOADING_TIME) {
console.warn(`Loading time for ${modelId} exceeded threshold:`, {
actual: stats.loadingTime,
threshold: this.PERFORMANCE_THRESHOLDS.LOADING_TIME
});
this.suggestOptimizations(modelId);
}
if (stats.memoryUsage > this.PERFORMANCE_THRESHOLDS.MEMORY_USAGE) {
console.warn(`Memory usage for ${modelId} exceeded threshold:`, {
actual: stats.memoryUsage,
threshold: this.PERFORMANCE_THRESHOLDS.MEMORY_USAGE
});
}
}
private suggestOptimizations(modelId: string): void {
const stats = this.measurements.get(modelId)!;
const suggestions: string[] = [];
// 로딩 시간 기반 최적화 제안
if (stats.loadingTime > this.PERFORMANCE_THRESHOLDS.LOADING_TIME) {
if (stats.decodingTime / stats.loadingTime > 0.5) {
suggestions.push('DRACO 압축 레벨을 낮추어 디코딩 시간 단축 고려');
}
if (stats.size > 5 * 1024 * 1024) {
suggestions.push('텍스처 해상도 축소 또는 LOD 시스템 도입 검토');
}
}
// 메모리 사용량 기반 최적화 제안
if (stats.memoryUsage > this.PERFORMANCE_THRESHOLDS.MEMORY_USAGE) {
suggestions.push('지오메트리 단순화 및 메모리 관리 전략 재검토');
suggestions.push('사용하지 않는 모델의 메모리 해제 시점 최적화');
}
if (suggestions.length > 0) {
console.info(`${modelId} 최적화 제안:`, suggestions.join('\n'));
}
}
generateReport(): string {
let report = '# 모델 성능 분석 리포트\n\n';
this.measurements.forEach((stats, modelId) => {
report += `## ${modelId}\n`;
report += `- 로딩 시간: ${stats.loadingTime.toFixed(2)}ms\n`;
report += `- 메모리 사용량: ${(stats.memoryUsage / 1024 / 1024).toFixed(2)}MB\n`;
report += `- 디코딩 시간: ${stats.decodingTime.toFixed(2)}ms\n`;
report += `- 파일 크기: ${(stats.size / 1024 / 1024).toFixed(2)}MB\n\n`;
});
return report;
}
}
위에서 구현한 모든 기능을 통합하여 실제 프로젝트에서 사용할 수 있는 완성된 시스템을 만들어보겠습니다.
class ModelManager {
private compressionManager: ModelCompressionManager;
private loadingStrategy: ModelLoadingStrategy;
private performanceMonitor: ModelPerformanceMonitor;
private modelCache: Map<string, THREE.Object3D> = new Map();
constructor(private scene: THREE.Scene) {
this.compressionManager = new ModelCompressionManager();
this.loadingStrategy = new ModelLoadingStrategy(new GLTFLoader());
this.performanceMonitor = new ModelPerformanceMonitor();
this.setupMemoryManagement();
}
private setupMemoryManagement(): void {
// 메모리 관리를 위한 주기적인 청소
setInterval(() => {
this.cleanupUnusedModels();
}, 60000); // 1분마다 체크
}
async loadModel(url: string, options: {
priority?: 'high' | 'medium' | 'low';
compress?: boolean;
preload?: boolean;
} = {}): Promise<THREE.Object3D> {
const modelId = path.basename(url);
this.performanceMonitor.startMeasurement(modelId);
try {
// 압축이 필요한 경우 처리
if (options.compress) {
const compressedUrl = await this.handleCompression(url);
url = compressedUrl;
}
// 로딩 설정
const config: ModelLoadingConfig = {
priority: options.priority || 'medium',
compressionType: options.compress ? 'draco' : 'none',
preload: options.preload || false
};
// 모델 로드
const model = await this.loadingStrategy.loadModel(url, config);
this.modelCache.set(modelId, model);
this.scene.add(model);
const stats = await fs.stat(url);
this.performanceMonitor.endMeasurement(modelId, stats.size);
return model;
} catch (error) {
console.error(`Failed to load model ${url}:`, error);
throw error;
}
}
private async handleCompression(url: string): Promise<string> {
const stats = await fs.stat(url);
// 파일 크기가 임계값을 넘는 경우에만 압축
if (stats.size > 5 * 1024 * 1024) {
const compressedUrl = url.replace(/\.(glb|gltf)$/, '_compressed.$1');
if (!(await this.fileExists(compressedUrl))) {
await this.compressionManager.compressModel(url, compressedUrl);
}
return compressedUrl;
}
return url;
}
private async cleanupUnusedModels(): void {
const visibleModels = new Set<string>();
// 현재 카메라 시야에 있는 모델 확인
this.scene.traverseVisible(object => {
if (object.userData.modelId) {
visibleModels.add(object.userData.modelId);
}
});
// 사용하지 않는 모델 제거
for (const [modelId, model] of this.modelCache.entries()) {
if (!visibleModels.has(modelId)) {
this.scene.remove(model);
this.modelCache.delete(modelId);
// 메모리 해제
if (model.geometry) {
model.geometry.dispose();
}
if (model.material) {
const materials = Array.isArray(model.material) ?
model.material : [model.material];
materials.forEach(material => {
Object.values(material).forEach(value => {
if (value && typeof value.dispose === 'function') {
value.dispose();
}
});
material.dispose();
});
}
}
}
}
private async fileExists(path: string): Promise<boolean> {
try {
await fs.access(path);
return true;
} catch {
return false;
}
}
getPerformanceReport(): string {
return this.performanceMonitor.generateReport();
}
}
// 모델 관리 시스템 초기화
const modelManager = new ModelManager(scene);
// 고사양 디바이스 확인
const isHighEndDevice = () => {
const gpu = (navigator as any).gpu;
return gpu && gpu.requestAdapter;
};
// 디바이스 성능에 따른 로딩 전략 설정
const loadingOptions = {
priority: isHighEndDevice() ? 'high' : 'medium',
compress: !isHighEndDevice(), // 저사양 디바이스에서만 압축 사용
preload: isHighEndDevice() // 고사양 디바이스에서만 프리로드
};
// 모델 로드
try {
const character = await modelManager.loadModel(
'models/character.glb',
loadingOptions
);
// 성능 리포트 출력
console.log(modelManager.getPerformanceReport());
} catch (error) {
console.error('모델 로딩 실패:', error);
}
이번 포스트에서는 DRACO 압축을 활용한 고급 모델 최적화 전략과 실전에서 활용할 수 있는 완성된 시스템을 구현해보았습니다.
주요 포인트는 다음과 같습니다.
이러한 시스템을 활용하면 대규모 3D 웹 애플리케이션에서도 효율적인 모델 관리와 최적화된 사용자 경험을 제공할 수 있습니다.