Scaling Otoroshi

Using multiple instances with a front load balancer

Otoroshi has been designed to work with multiple instances. If you already have an infrastructure using frontal load balancing, you just have to declare Otoroshi instances as the target of all domain names handled by Otoroshi

Using master / workers mode of Otoroshi

You can read everything about it in the clustering section of the documentation.

Using IPVS

You can use IPVS to load balance layer 4 traffic directly from the Linux Kernel to multiple instances of Otoroshi. You can find example of configuration here

Using DNS Round Robin

You can use DNS round robin technique to declare multiple A records under the domain names handled by Otoroshi.

Using software L4/L7 load balancers

You can use software L4 load balancers like NGINX or HAProxy to load balance layer 4 traffic directly from the Linux Kernel to multiple instances of Otoroshi.

NGINX L7
upstream otoroshi {
  server 192.168.1.40:8080 max_fails=1;
  server 192.168.1.41:8080 max_fails=1;
  server 192.168.1.42:8080 max_fails=1;
}

server {
  listen 80;
  # http://nginx.org/en/docs/http/server_names.html
  server_name otoroshi.oto.tools otoroshi-api.oto.tools otoroshi-admin-internal-api.oto.tools privateapps.oto.tools *-api.oto.tools;
  location / {
    # SSE config
    proxy_buffering off;
    proxy_cache off;
    proxy_set_header Connection '';
    proxy_http_version 1.1;
    chunked_transfer_encoding off;
  
    # websockets config
    proxy_set_header Upgrade $http_upgrade;
    proxy_set_header Connection "upgrade";
  
    # other config
    proxy_set_header Host $http_host;
    proxy_set_header X-Real-IP $remote_addr;
    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
    proxy_set_header X-Forwarded-Proto $scheme;
    proxy_pass http://otoroshi;
  }
}

server {
  listen 443 ssl;
  # http://nginx.org/en/docs/http/server_names.html
  server_name otoroshi.oto.tools otoroshi-api.oto.tools otoroshi-admin-internal-api.oto.tools privateapps.oto.tools *-api.oto.tools;
  ssl_certificate           /etc/letsencrypt/wildcard.oto.tools/fullchain.pem;
  ssl_certificate_key       /etc/letsencrypt/wildcard.oto.tools/privkey.pem;
  ssl_session_cache         shared:SSL:10m;
  ssl_session_timeout       5m;
  ssl_prefer_server_ciphers on;
  ssl_ciphers               ECDH+AESGCM:ECDH+AES256:ECDH+AES128:DH+3DES:!ADH:!AECDH:!MD5;
  ssl_protocols             TLSv1 TLSv1.1 TLSv1.2;
  location / {
    # SSE config
    proxy_buffering off;
    proxy_cache off;
    proxy_set_header Connection '';
    proxy_http_version 1.1;
    chunked_transfer_encoding off;
  
    # websockets config
    proxy_set_header Upgrade $http_upgrade;
    proxy_set_header Connection "upgrade";
  
    # other config
    proxy_set_header Host $http_host;
    proxy_set_header X-Real-IP $remote_addr;
    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
    proxy_set_header X-Forwarded-Proto $scheme;
    proxy_pass http://otoroshi;
  }
}
NGINX L4
stream {

  upstream back_http_nodes {
    zone back_http_nodes 64k;
    server 192.168.1.40:8080 max_fails=1;
    server 192.168.1.41:8080 max_fails=1;
    server 192.168.1.42:8080 max_fails=1;
  }

  upstream back_https_nodes {
    zone back_https_nodes 64k;
    server 192.168.1.40:8443 max_fails=1;
    server 192.168.1.41:8443 max_fails=1;
    server 192.168.1.42:8443 max_fails=1;
  }

  server {
    listen     80;
    proxy_pass back_http_nodes;
    health_check;
  }

  server {
    listen     443;
    proxy_pass back_https_nodes;
    health_check;
  }
  
}
HA Proxy L7
frontend front_nodes_http
    bind *:80
    mode http
    default_backend back_http_nodes
    timeout client          1m

frontend front_nodes_https
    bind *:443
    mode http
    default_backend back_https_nodes
    timeout client          1m

backend back_http_nodes
    mode http
    balance roundrobin
    option forwardfor
    http-request set-header X-Forwarded-Port %[dst_port]
    http-request add-header X-Forwarded-Proto https if { ssl_fc }
    http-request set-header X-Client-IP %[src]
    server node1 192.168.1.40:8080
    server node2 192.168.1.41:8080
    server node3 192.168.1.42:8080
    timeout connect        10s
    timeout server          1m

backend back_https_nodes
    mode http
    balance roundrobin
    option forwardfor
    http-request set-header X-Forwarded-Port %[dst_port]
    http-request add-header X-Forwarded-Proto https if { ssl_fc }
    http-request set-header X-Client-IP %[src]
    server node1 192.168.1.40:8443
    server node2 192.168.1.41:8443
    server node3 192.168.1.42:8443
    timeout connect        10s
    timeout server          1m
HA Proxy L4
frontend front_nodes_http
    bind *:80
    mode tcp
    default_backend back_http_nodes
    timeout client          1m

frontend front_nodes_https
    bind *:443
    mode tcp
    default_backend back_https_nodes
    timeout client          1m

backend back_http_nodes
    mode tcp
    balance roundrobin
    server node1 192.168.1.40:8080
    server node2 192.168.1.41:8080
    server node3 192.168.1.42:8080
    timeout connect        10s
    timeout server          1m

backend back_https_nodes
    mode tcp
    balance roundrobin
    server node1 192.168.1.40:8443
    server node2 192.168.1.41:8443
    server node3 192.168.1.42:8443
    timeout connect        10s
    timeout server          1m

Using a custom TCP load balancer

You can also use any other TCP load balancer, from a hardware box to a small js file like

tcp-proxy.js
const proxy = require("node-tcp-proxy");

const hosts = ["192.168.1.40", "192.168.1.41", "192.168.1.42"];
const portsHttp = [8080, 8080, 8080];
const portsHttps = [8443, 8443, 8443];

const proxyHttp = proxy.createProxy(80, hosts, portsHttp, {
  tls: false
});

const proxyHttps = proxy.createProxy(443, hosts, portsHttps, {
  tls: false
});
tcp-proxy.rs
extern crate futures;
extern crate tokio;
extern crate rand;

use futures::{Future, Stream};
use rand::Rng;
use std::net::SocketAddr;
use tokio::io::copy;
use tokio::net::{TcpListener, TcpStream};
use tokio::prelude::*;

fn main() -> Result<(), Box<std::error::Error>> {
    let urls: Vec<std::net::SocketAddr> = vec![
        std::net::SocketAddr::new("192.168.1.40".parse().unwrap(), 8080),
        std::net::SocketAddr::new("192.168.1.41".parse().unwrap(), 8080),
        std::net::SocketAddr::new("192.168.1.42".parse().unwrap(), 8080),
    ];
    let addr: SocketAddr = "0.0.0.0:80".to_string().parse().unwrap();
    let sock = TcpListener::bind(&addr).unwrap();
    println!("TCP load balancer listening on {}", addr);
    let done = sock
        .incoming()
        .map_err(move |e| println!("Error accepting socket; error = {:?}", e))
        .for_each(move |server_socket| {
            let index = rand::thread_rng().gen_range(0, urls.len());
            let url = &urls[index];
            let client_pair = TcpStream::connect(&url).map(|socket| socket.split());
            let msg = client_pair
                .and_then(move |(client_reader, client_writer)| {
                    let (server_reader, server_writer) = server_socket.split();
                    let upload = copy(server_reader, client_writer);
                    let download = copy(client_reader, server_writer);
                    upload.join(download)
                }).then(|_res| {
                    Ok(())
                });
            tokio::spawn(msg);
            Ok(())
        });
    tokio::run(done);
    Ok(())
}