architecture

Горизонтальное масштабирование приложения

Практики и паттерны горизонтального масштабирования приложений

#scaling #architecture #load-balancing #microservices

Горизонтальное масштабирование приложения

Горизонтальное масштабирование — добавление большего количества серверов для распределения нагрузки.

Stateless приложения

Проблема состояния

// ❌ Плохо: состояние в памяти приложения
class StatefulCartService {
  private carts = new Map<string, Cart>();
  
  addItem(userId: string, item: CartItem): void {
    const cart = this.carts.get(userId) || { items: [] };
    cart.items.push(item);
    this.carts.set(userId, cart);
  }
  
  getCart(userId: string): Cart {
    return this.carts.get(userId) || { items: [] };
  }
}

// Проблема: при горизонтальном масштабировании
// разные запросы попадут на разные серверы
// и не найдут данные корзины

Решение: внешнее хранилище

// ✅ Хорошо: состояние во внешнем хранилище
class StatelessCartService {
  constructor(private redis: Redis) {}
  
  async addItem(userId: string, item: CartItem): Promise<void> {
    const key = `cart:${userId}`;
    const cart = await this.getCart(userId);
    cart.items.push(item);
    await this.redis.set(key, JSON.stringify(cart), 'EX', 3600);
  }
  
  async getCart(userId: string): Promise<Cart> {
    const key = `cart:${userId}`;
    const data = await this.redis.get(key);
    return data ? JSON.parse(data) : { items: [] };
  }
}

Сессии

Sticky Sessions (избегать)

# Nginx sticky sessions
upstream backend {
    ip_hash;  # Один пользователь всегда на одном сервере
    server backend1:3000;
    server backend2:3000;
    server backend3:3000;
}

Проблемы:

  • Неравномерное распределение нагрузки
  • Проблемы при падении сервера
  • Сложность масштабирования

Внешнее хранилище сессий

import session from 'express-session';
import RedisStore from 'connect-redis';
import { createClient } from 'redis';

const redisClient = createClient({
  host: 'redis.example.com',
  port: 6379
});

app.use(session({
  store: new RedisStore({ client: redisClient }),
  secret: process.env.SESSION_SECRET,
  resave: false,
  saveUninitialized: false,
  cookie: {
    secure: true,
    httpOnly: true,
    maxAge: 24 * 60 * 60 * 1000 // 24 часа
  }
}));

JWT токены (stateless)

import jwt from 'jsonwebtoken';

class AuthService {
  generateToken(user: User): string {
    return jwt.sign(
      {
        userId: user.id,
        email: user.email,
        role: user.role
      },
      process.env.JWT_SECRET,
      { expiresIn: '24h' }
    );
  }
  
  verifyToken(token: string): TokenPayload {
    return jwt.verify(token, process.env.JWT_SECRET) as TokenPayload;
  }
}

// Middleware
function authenticate(req: Request, res: Response, next: NextFunction) {
  const token = req.headers.authorization?.split(' ')[1];
  
  if (!token) {
    return res.status(401).json({ error: 'No token provided' });
  }
  
  try {
    const payload = authService.verifyToken(token);
    req.user = payload;
    next();
  } catch (error) {
    return res.status(401).json({ error: 'Invalid token' });
  }
}

Load Balancing

Nginx Load Balancer

upstream backend {
    least_conn;  # Least connections algorithm
    
    server backend1:3000 weight=3;
    server backend2:3000 weight=2;
    server backend3:3000 weight=1;
    
    # Health checks
    server backend4:3000 backup;
}

server {
    listen 80;
    
    location / {
        proxy_pass http://backend;
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        
        # Timeouts
        proxy_connect_timeout 5s;
        proxy_send_timeout 60s;
        proxy_read_timeout 60s;
        
        # Retry
        proxy_next_upstream error timeout http_502 http_503;
        proxy_next_upstream_tries 3;
    }
}

Application-level Load Balancing

class LoadBalancer {
  private currentIndex = 0;
  
  constructor(private servers: string[]) {}
  
  // Round Robin
  roundRobin(): string {
    const server = this.servers[this.currentIndex];
    this.currentIndex = (this.currentIndex + 1) % this.servers.length;
    return server;
  }
  
  // Weighted Round Robin
  weightedRoundRobin(weights: number[]): string {
    const totalWeight = weights.reduce((sum, w) => sum + w, 0);
    const random = Math.random() * totalWeight;
    
    let sum = 0;
    for (let i = 0; i < this.servers.length; i++) {
      sum += weights[i];
      if (random < sum) {
        return this.servers[i];
      }
    }
    
    return this.servers[0];
  }
  
  // Random
  random(): string {
    const index = Math.floor(Math.random() * this.servers.length);
    return this.servers[index];
  }
}

Health Checks

Endpoint для проверки здоровья

import express from 'express';

const app = express();

// Liveness probe - жив ли сервер
app.get('/health/live', (req, res) => {
  res.status(200).json({ status: 'ok' });
});

// Readiness probe - готов ли принимать трафик
app.get('/health/ready', async (req, res) => {
  try {
    // Проверяем зависимости
    await Promise.all([
      checkDatabase(),
      checkRedis(),
      checkExternalAPI()
    ]);
    
    res.status(200).json({ status: 'ready' });
  } catch (error) {
    res.status(503).json({ 
      status: 'not ready',
      error: error.message 
    });
  }
});

async function checkDatabase(): Promise<void> {
  await db.query('SELECT 1');
}

async function checkRedis(): Promise<void> {
  await redis.ping();
}

async function checkExternalAPI(): Promise<void> {
  const response = await fetch('https://api.example.com/health');
  if (!response.ok) throw new Error('API unhealthy');
}

Kubernetes Health Checks

apiVersion: apps/v1
kind: Deployment
metadata:
  name: app
spec:
  replicas: 3
  template:
    spec:
      containers:
      - name: app
        image: app:latest
        ports:
        - containerPort: 3000
        
        # Liveness probe
        livenessProbe:
          httpGet:
            path: /health/live
            port: 3000
          initialDelaySeconds: 30
          periodSeconds: 10
          timeoutSeconds: 5
          failureThreshold: 3
        
        # Readiness probe
        readinessProbe:
          httpGet:
            path: /health/ready
            port: 3000
          initialDelaySeconds: 10
          periodSeconds: 5
          timeoutSeconds: 3
          failureThreshold: 2

Graceful Shutdown

import { Server } from 'http';

class GracefulShutdown {
  private server: Server;
  private isShuttingDown = false;
  
  constructor(server: Server) {
    this.server = server;
    this.setupSignalHandlers();
  }
  
  private setupSignalHandlers(): void {
    process.on('SIGTERM', () => this.shutdown('SIGTERM'));
    process.on('SIGINT', () => this.shutdown('SIGINT'));
  }
  
  private async shutdown(signal: string): Promise<void> {
    if (this.isShuttingDown) return;
    
    console.log(`Received ${signal}, starting graceful shutdown`);
    this.isShuttingDown = true;
    
    // 1. Перестаем принимать новые соединения
    this.server.close(() => {
      console.log('HTTP server closed');
    });
    
    // 2. Ждем завершения текущих запросов
    await this.waitForActiveRequests();
    
    // 3. Закрываем соединения с БД
    await this.closeConnections();
    
    // 4. Завершаем процесс
    process.exit(0);
  }
  
  private async waitForActiveRequests(): Promise<void> {
    return new Promise((resolve) => {
      const checkInterval = setInterval(() => {
        const activeRequests = this.getActiveRequestCount();
        
        if (activeRequests === 0) {
          clearInterval(checkInterval);
          resolve();
        } else {
          console.log(`Waiting for ${activeRequests} active requests`);
        }
      }, 1000);
      
      // Максимум ждем 30 секунд
      setTimeout(() => {
        clearInterval(checkInterval);
        console.log('Timeout reached, forcing shutdown');
        resolve();
      }, 30000);
    });
  }
  
  private async closeConnections(): Promise<void> {
    await Promise.all([
      db.close(),
      redis.quit(),
      messageQueue.disconnect()
    ]);
  }
  
  private getActiveRequestCount(): number {
    // Реализация зависит от фреймворка
    return 0;
  }
}

// Использование
const server = app.listen(3000);
new GracefulShutdown(server);

Auto-scaling

Kubernetes HPA

apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
  name: app-hpa
spec:
  scaleTargetRef:
    apiVersion: apps/v1
    kind: Deployment
    name: app
  minReplicas: 2
  maxReplicas: 10
  metrics:
  - type: Resource
    resource:
      name: cpu
      target:
        type: Utilization
        averageUtilization: 70
  - type: Resource
    resource:
      name: memory
      target:
        type: Utilization
        averageUtilization: 80
  behavior:
    scaleUp:
      stabilizationWindowSeconds: 60
      policies:
      - type: Percent
        value: 50
        periodSeconds: 60
    scaleDown:
      stabilizationWindowSeconds: 300
      policies:
      - type: Pods
        value: 1
        periodSeconds: 60

Custom Metrics

apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
  name: app-hpa-custom
spec:
  scaleTargetRef:
    apiVersion: apps/v1
    kind: Deployment
    name: app
  minReplicas: 2
  maxReplicas: 20
  metrics:
  # CPU
  - type: Resource
    resource:
      name: cpu
      target:
        type: Utilization
        averageUtilization: 70
  
  # Custom metric: requests per second
  - type: Pods
    pods:
      metric:
        name: http_requests_per_second
      target:
        type: AverageValue
        averageValue: "1000"
  
  # Custom metric: queue length
  - type: External
    external:
      metric:
        name: queue_length
        selector:
          matchLabels:
            queue: orders
      target:
        type: AverageValue
        averageValue: "100"

Распределенное кэширование

class DistributedCache {
  constructor(private redis: Redis) {}
  
  async get(key: string): Promise<any> {
    const data = await this.redis.get(key);
    return data ? JSON.parse(data) : null;
  }
  
  async set(key: string, value: any, ttl: number): Promise<void> {
    await this.redis.set(key, JSON.stringify(value), 'EX', ttl);
  }
  
  async invalidate(pattern: string): Promise<void> {
    const keys = await this.redis.keys(pattern);
    if (keys.length > 0) {
      await this.redis.del(...keys);
    }
  }
  
  // Cache-aside pattern
  async getOrLoad<T>(
    key: string,
    loader: () => Promise<T>,
    ttl: number
  ): Promise<T> {
    const cached = await this.get(key);
    if (cached) return cached;
    
    const data = await loader();
    await this.set(key, data, ttl);
    return data;
  }
}

Rate Limiting

import rateLimit from 'express-rate-limit';
import RedisStore from 'rate-limit-redis';

const limiter = rateLimit({
  store: new RedisStore({
    client: redis,
    prefix: 'rate-limit:'
  }),
  windowMs: 15 * 60 * 1000, // 15 минут
  max: 100, // максимум 100 запросов
  message: 'Too many requests from this IP',
  standardHeaders: true,
  legacyHeaders: false
});

app.use('/api/', limiter);

// Per-user rate limiting
const userLimiter = rateLimit({
  store: new RedisStore({
    client: redis,
    prefix: 'rate-limit:user:'
  }),
  windowMs: 60 * 1000,
  max: async (req) => {
    const user = req.user;
    return user?.isPremium ? 1000 : 100;
  },
  keyGenerator: (req) => req.user?.id || req.ip
});

Distributed Tracing

import { trace, context, SpanStatusCode } from '@opentelemetry/api';

const tracer = trace.getTracer('app-service');

async function processOrder(orderId: string): Promise<void> {
  const span = tracer.startSpan('processOrder');
  
  try {
    span.setAttribute('order.id', orderId);
    
    // Создаем дочерний span
    await context.with(trace.setSpan(context.active(), span), async () => {
      await validateOrder(orderId);
      await chargePayment(orderId);
      await createShipment(orderId);
    });
    
    span.setStatus({ code: SpanStatusCode.OK });
  } catch (error) {
    span.setStatus({
      code: SpanStatusCode.ERROR,
      message: error.message
    });
    throw error;
  } finally {
    span.end();
  }
}

async function validateOrder(orderId: string): Promise<void> {
  const span = tracer.startSpan('validateOrder');
  try {
    // Логика валидации
    span.setStatus({ code: SpanStatusCode.OK });
  } finally {
    span.end();
  }
}

Best Practices

1. 12-Factor App

// ✅ Конфигурация из переменных окружения
const config = {
  port: process.env.PORT || 3000,
  database: {
    host: process.env.DB_HOST,
    port: parseInt(process.env.DB_PORT || '5432'),
    database: process.env.DB_NAME,
    user: process.env.DB_USER,
    password: process.env.DB_PASSWORD
  },
  redis: {
    host: process.env.REDIS_HOST,
    port: parseInt(process.env.REDIS_PORT || '6379')
  }
};

2. Idempotency

class OrderService {
  async createOrder(idempotencyKey: string, orderData: OrderData): Promise<Order> {
    // Проверяем, не был ли запрос уже обработан
    const existing = await this.redis.get(`idempotency:${idempotencyKey}`);
    if (existing) {
      return JSON.parse(existing);
    }
    
    // Создаем заказ
    const order = await this.db.createOrder(orderData);
    
    // Сохраняем результат
    await this.redis.set(
      `idempotency:${idempotencyKey}`,
      JSON.stringify(order),
      'EX',
      86400 // 24 часа
    );
    
    return order;
  }
}

// Использование
app.post('/orders', async (req, res) => {
  const idempotencyKey = req.headers['idempotency-key'];
  
  if (!idempotencyKey) {
    return res.status(400).json({ error: 'Idempotency-Key header required' });
  }
  
  const order = await orderService.createOrder(idempotencyKey, req.body);
  res.json(order);
});

3. Circuit Breaker

class CircuitBreaker {
  private failures = 0;
  private lastFailTime = 0;
  private state: 'closed' | 'open' | 'half-open' = 'closed';
  
  constructor(
    private threshold: number = 5,
    private timeout: number = 60000
  ) {}
  
  async execute<T>(fn: () => Promise<T>): Promise<T> {
    if (this.state === 'open') {
      if (Date.now() - this.lastFailTime > this.timeout) {
        this.state = 'half-open';
      } else {
        throw new Error('Circuit breaker is open');
      }
    }
    
    try {
      const result = await fn();
      this.onSuccess();
      return result;
    } catch (error) {
      this.onFailure();
      throw error;
    }
  }
  
  private onSuccess(): void {
    this.failures = 0;
    this.state = 'closed';
  }
  
  private onFailure(): void {
    this.failures++;
    this.lastFailTime = Date.now();
    
    if (this.failures >= this.threshold) {
      this.state = 'open';
    }
  }
}

Заключение

Горизонтальное масштабирование требует:

  1. Stateless архитектуру — состояние во внешних хранилищах
  2. Load balancing — распределение нагрузки
  3. Health checks — мониторинг здоровья инстансов
  4. Graceful shutdown — корректное завершение
  5. Auto-scaling — автоматическое масштабирование
  6. Distributed caching — распределенное кэширование
  7. Observability — мониторинг и трейсинг