
MSA 환경에서 성능 최적화를 위한 다층 캐싱 전략은 필수적입니다.
이 포스팅에서는 실제 프로젝트에서 경험한 다층 캐싱 전략과 구현했던것을 포스팅하고자 합니다.
# CloudFront 캐싱 동작 설정
CacheBehaviors:
- PathPattern: "/api/v1/products/*"
TargetOriginId: API_GATEWAY
ViewerProtocolPolicy: redirect-to-https
MinTTL: 60
DefaultTTL: 300
MaxTTL: 600
ForwardedValues:
QueryString: true
Headers:
- Accept
- Authorization
CachePolicyId: custom-cache-policy
OriginRequestPolicyId: custom-origin-request-policy
# 캐시 무효화 Lambda@Edge 함수 설정
- PathPattern: "/api/v1/products/invalidate/*"
TargetOriginId: LAMBDA_EDGE
LambdaFunctionAssociations:
- EventType: viewer-request
LambdaFunctionARN: arn:aws:lambda:us-east-1:123456789012:function:cache-invalidator:1
openapi: 3.0.0
paths:
/products/{productId}:
get:
parameters:
- name: productId
in: path
required: true
schema:
type: string
x-amazon-apigateway-integration:
type: HTTP_PROXY
uri: ${backend_url}
httpMethod: GET
requestParameters:
integration.request.path.productId: method.request.path.productId
cacheNamespace: product-cache
cacheKeyParameters:
- method.request.path.productId
cachingEnabled: true
cacheTtlInSeconds: 300
@Configuration
@EnableCaching
public class CacheConfig extends CachingConfigurerSupport {
@Bean
public RedisCacheManager cacheManager(RedisConnectionFactory connectionFactory) {
RedisCacheConfiguration config = RedisCacheConfiguration.defaultCacheConfig()
.entryTtl(Duration.ofMinutes(5))
.serializeKeysWith(RedisSerializationContext.SerializationPair.fromSerializer(new StringRedisSerializer()))
.serializeValuesWith(RedisSerializationContext.SerializationPair.fromSerializer(new GenericJackson2JsonRedisSerializer()));
return RedisCacheManager.builder(connectionFactory)
.cacheDefaults(config)
.withCacheConfiguration("products",
RedisCacheConfiguration.defaultCacheConfig().entryTtl(Duration.ofMinutes(10)))
.withCacheConfiguration("categories",
RedisCacheConfiguration.defaultCacheConfig().entryTtl(Duration.ofHours(1)))
.build();
}
}
@Service
@Slf4j
public class ProductService {
private final ProductRepository productRepository;
private final RedisTemplate<String, Product> redisTemplate;
@Cacheable(value = "products", key = "#productId", unless = "#result == null")
public Product getProduct(String productId) {
log.info("Cache miss for product: {}", productId);
return productRepository.findById(productId)
.orElseThrow(() -> new ProductNotFoundException(productId));
}
@CacheEvict(value = "products", key = "#product.id")
public Product updateProduct(Product product) {
log.info("Evicting cache for product: {}", product.getId());
return productRepository.save(product);
}
@Scheduled(fixedRate = 300000) // 5분마다 실행
public void refreshHotProducts() {
List<Product> hotProducts = productRepository.findTop100ByOrderByViewCountDesc();
String cacheKey = "hot_products";
redisTemplate.opsForValue().set(cacheKey, hotProducts, 10, TimeUnit.MINUTES);
}
}
@Configuration
public class MetricsConfig {
@Bean
public MeterRegistry meterRegistry() {
return new SimpleMeterRegistry();
}
}
@Component
@Slf4j
public class CacheMetrics {
private final MeterRegistry meterRegistry;
private final RedisTemplate<String, Object> redisTemplate;
public CacheMetrics(MeterRegistry meterRegistry, RedisTemplate<String, Object> redisTemplate) {
this.meterRegistry = meterRegistry;
this.redisTemplate = redisTemplate;
recordMetrics();
}
private void recordMetrics() {
Gauge.builder("cache.size", redisTemplate.keys("*"), Set::size)
.tag("cache", "redis")
.description("Number of cache entries")
.register(meterRegistry);
Counter.builder("cache.hits")
.tag("cache", "redis")
.description("Number of cache hits")
.register(meterRegistry);
Counter.builder("cache.misses")
.tag("cache", "redis")
.description("Number of cache misses")
.register(meterRegistry);
}
@Scheduled(fixedRate = 60000) // 1분마다 실행
public void logCacheStatistics() {
log.info("Current cache size: {}", meterRegistry.get("cache.size").gauge().value());
log.info("Cache hits: {}", meterRegistry.get("cache.hits").counter().count());
log.info("Cache misses: {}", meterRegistry.get("cache.misses").counter().count());
}
}
이러한 구현을 통해:
실제 운영 시 발생할 수 있는 이슈들도 고려해야 합니다
애플리케이션 시작 시 자동 워밍업
인기 상품 우선 워밍업
점진적 워밍업으로 시스템 부하 분산
정기적인 워밍업 스케줄링
@Component
@Slf4j
public class CacheWarmupService {
private final ProductRepository productRepository;
private final CategoryRepository categoryRepository;
private final RedisTemplate<String, Object> redisTemplate;
private final ProductService productService;
@PostConstruct
public void warmupOnStartup() {
log.info("Starting cache warmup process...");
warmupHotProducts();
warmupCategories();
warmupFrequentlyAccessedProducts();
}
@Scheduled(cron = "0 0 3 * * *") // 매일 새벽 3시에 실행
public void scheduledWarmup() {
log.info("Starting scheduled cache warmup...");
warmupHotProducts();
}
private void warmupHotProducts() {
try {
log.info("Warming up hot products cache...");
List<Product> hotProducts = productRepository
.findTop1000ByOrderByViewCountDesc();
hotProducts.forEach(product -> {
String cacheKey = "product:" + product.getId();
redisTemplate.opsForValue().set(
cacheKey,
product,
30,
TimeUnit.MINUTES
);
});
log.info("Completed warming up {} hot products", hotProducts.size());
} catch (Exception e) {
log.error("Error during hot products warmup", e);
}
}
private void warmupCategories() {
try {
log.info("Warming up categories cache...");
List<Category> categories = categoryRepository.findAll();
categories.forEach(category -> {
String cacheKey = "category:" + category.getId();
redisTemplate.opsForValue().set(
cacheKey,
category,
2,
TimeUnit.HOURS
);
});
log.info("Completed warming up {} categories", categories.size());
} catch (Exception e) {
log.error("Error during categories warmup", e);
}
}
// 점진적 워밍업을 위한 배치 처리
@Async
public void warmupFrequentlyAccessedProducts() {
List<String> productIds = getFrequentlyAccessedProductIds();
// 배치 크기 설정
int batchSize = 100;
List<List<String>> batches = Lists.partition(productIds, batchSize);
batches.forEach(batch -> {
try {
Thread.sleep(1000); // 초당 100개씩 처리
batch.forEach(productService::getProduct);
log.info("Processed batch of {} products", batch.size());
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
log.error("Warmup interrupted", e);
}
});
}
}
## 2. 장애상황 대응 전략 구현
- **서킷 브레이커 패턴 적용**
- **다중 레벨 캐시 (Local -> Redis -> DB)**
- **헬스 체크 및 모니터링**
- **폴백 메커니즘 구현**
@Configuration
@EnableCircuitBreaker
public class FaultToleranceConfig {
@Bean
public Resilience4JCircuitBreakerFactory circuitBreakerFactory() {
CircuitBreakerConfig circuitBreakerConfig = CircuitBreakerConfig.custom()
.failureRateThreshold(50)
.waitDurationInOpenState(Duration.ofSeconds(10))
.slidingWindowSize(5)
.build();
return new Resilience4JCircuitBreakerFactory(circuitBreakerConfig);
}
}
@Service
@Slf4j
public class ResilientProductService {
private final ProductRepository productRepository;
private final RedisTemplate<String, Product> redisTemplate;
private final CircuitBreaker circuitBreaker;
private final LocalCache<String, Product> localCache;
public ResilientProductService(
CircuitBreakerFactory circuitBreakerFactory,
ProductRepository productRepository,
RedisTemplate<String, Product> redisTemplate) {
this.productRepository = productRepository;
this.redisTemplate = redisTemplate;
this.circuitBreaker = circuitBreakerFactory.create("productService");
this.localCache = Caffeine.newBuilder()
.maximumSize(1000)
.expireAfterWrite(5, TimeUnit.MINUTES)
.build();
}
public Product getProduct(String productId) {
// 1. 로컬 캐시 확인
Product localProduct = localCache.getIfPresent(productId);
if (localProduct != null) {
return localProduct;
}
try {
// 2. Redis 캐시 확인
return circuitBreaker.executeSupplier(() -> {
String cacheKey = "product:" + productId;
Product product = (Product) redisTemplate.opsForValue().get(cacheKey);
if (product != null) {
localCache.put(productId, product);
return product;
}
// 3. DB에서 조회
product = productRepository.findById(productId)
.orElseThrow(() -> new ProductNotFoundException(productId));
redisTemplate.opsForValue().set(cacheKey, product, 30, TimeUnit.MINUTES);
localCache.put(productId, product);
return product;
});
} catch (Exception e) {
log.error("Error fetching product {}", productId, e);
// 4. 폴백 전략: 로컬 캐시의 만료된 데이터 사용
return localCache.getIfPresent(productId);
}
}
@Scheduled(fixedRate = 60000) // 1분마다 실행
public void checkCacheHealth() {
try {
redisTemplate.opsForValue().get("health-check");
log.info("Redis cache is healthy");
} catch (Exception e) {
log.error("Redis cache health check failed", e);
// 알림 발송 로직
notifyTeam("Redis cache is unhealthy: " + e.getMessage());
}
}
}
## 3. 성능 테스트 시나리오
- **캐시 유무에 따른 성능 비교**
- **동시성 테스트**
- **장애 상황 시뮬레이션**
- **메트릭스 수집 및 분석**
@SpringBootTest
public class CachePerformanceTest {
@Autowired
private ProductService productService;
@Autowired
private RedisTemplate<String, Object> redisTemplate;
@Test
@DisplayName("캐시 성능 테스트")
void performanceBenchmark() {
// 테스트 데이터 준비
int numberOfRequests = 10000;
List<String> productIds = prepareTestData(numberOfRequests);
// 1. 캐시가 없는 상태에서의 성능 측정
long startTime = System.currentTimeMillis();
productIds.parallelStream()
.forEach(id -> {
try {
productService.getProduct(id);
} catch (Exception e) {
log.error("Error fetching product {}", id, e);
}
});
long withoutCacheTime = System.currentTimeMillis() - startTime;
// 2. 캐시된 상태에서의 성능 측정
startTime = System.currentTimeMillis();
productIds.parallelStream()
.forEach(id -> {
try {
productService.getProduct(id);
} catch (Exception e) {
log.error("Error fetching product {}", id, e);
}
});
long withCacheTime = System.currentTimeMillis() - startTime;
// 결과 출력
log.info("Performance Test Results:");
log.info("Requests without cache: {}ms", withoutCacheTime);
log.info("Requests with cache: {}ms", withCacheTime);
log.info("Performance improvement: {}%",
((withoutCacheTime - withCacheTime) / (float)withoutCacheTime) * 100);
}
@Test
@DisplayName("장애 상황 시뮬레이션")
void faultToleranceTest() {
// Redis 장애 시뮬레이션
String productId = "test-product-1";
redisTemplate.getConnectionFactory().getConnection().close();
// 로컬 캐시 폴백 확인
Product product = productService.getProduct(productId);
assertNotNull(product);
assertEquals(productId, product.getId());
}
}