Use a load balancer

After joining multiple manager nodes for high availability (HA), you can configure your own load balancer to balance user requests across all manager nodes.

Use of a load balancer allows users to access MKE using a centralized domain name. The load balancer can detect when a manager node fails and stop forwarding requests to that node, so that users are unaffected by the failure.

Configure load balancing on MKE

  1. Because MKE uses TLS, do the following when configuring your load balancer:

    • Load-balance TCP traffic on ports 443 and 6443.

    • Do not terminate HTTPS connections.

    • On each manager node, use the /_ping endpoint to verify whether the node is healthy and whether or not it should remain in the load balancing pool.

  2. Use the following examples to configure your load balancer for MKE:

    user  nginx;
       worker_processes  1;
    
       error_log  /var/log/nginx/error.log warn;
       pid        /var/run/nginx.pid;
    
       events {
          worker_connections  1024;
       }
    
       stream {
          upstream ucp_443 {
             server <UCP_MANAGER_1_IP>:443 max_fails=2 fail_timeout=30s;
             server <UCP_MANAGER_2_IP>:443 max_fails=2 fail_timeout=30s;
             server <UCP_MANAGER_N_IP>:443 max_fails=2 fail_timeout=30s;
          }
          server {
             listen 443;
             proxy_pass ucp_443;
          }
          upstream ucp_6443 {
             server <UCP_MANAGER_1_IP>:6443 max_fails=2 fail_timeout=30s;
             server <UCP_MANAGER_2_IP>:6443 max_fails=2 fail_timeout=30s;
             server <UCP_MANAGER_N_IP>:6443 max_fails=2 fail_timeout=30s;
          }
          server {
             listen 6443;
             proxy_pass ucp_6443;
          }
       }
    
    global
          log /dev/log    local0
          log /dev/log    local1 notice
    
       defaults
             mode    tcp
             option  dontlognull
             timeout connect     5s
             timeout client      50s
             timeout server      50s
             timeout tunnel      1h
             timeout client-fin  50s
       ### frontends
       # Optional HAProxy Stats Page accessible at http://<host-ip>:8181/haproxy?stats
       frontend ucp_stats
             mode http
             bind 0.0.0.0:8181
             default_backend ucp_stats
       frontend ucp_443
             mode tcp
             bind 0.0.0.0:443
             default_backend ucp_upstream_servers_443
       frontend ucp_6443
             mode tcp
             bind 0.0.0.0:6443
             default_backend ucp_upstream_servers_6443
       ### backends
       backend ucp_stats
             mode http
             option httplog
             stats enable
             stats admin if TRUE
             stats refresh 5m
       backend ucp_upstream_servers_443
             mode tcp
             option httpchk GET /_ping HTTP/1.1\r\nHost:\ <UCP_FQDN>
             server node01 <UCP_MANAGER_1_IP>:443 weight 100 check check-ssl verify none
             server node02 <UCP_MANAGER_2_IP>:443 weight 100 check check-ssl verify none
             server node03 <UCP_MANAGER_N_IP>:443 weight 100 check check-ssl verify none
       backend ucp_upstream_servers_6443
             mode tcp
             option httpchk GET /_ping HTTP/1.1\r\nHost:\ <UCP_FQDN>
             server node01 <UCP_MANAGER_1_IP>:6443 weight 100 check check-ssl verify none
             server node02 <UCP_MANAGER_2_IP>:6443 weight 100 check check-ssl verify none
             server node03 <UCP_MANAGER_N_IP>:6443 weight 100 check check-ssl verify none
    
    {
          "Subnets": [
             "subnet-XXXXXXXX",
             "subnet-YYYYYYYY",
             "subnet-ZZZZZZZZ"
          ],
          "CanonicalHostedZoneNameID": "XXXXXXXXXXX",
          "CanonicalHostedZoneName": "XXXXXXXXX.us-west-XXX.elb.amazonaws.com",
          "ListenerDescriptions": [
             {
                   "Listener": {
                      "InstancePort": 443,
                      "LoadBalancerPort": 443,
                      "Protocol": "TCP",
                      "InstanceProtocol": "TCP"
                   },
                   "PolicyNames": []
             },
             {
                   "Listener": {
                      "InstancePort": 6443,
                      "LoadBalancerPort": 6443,
                      "Protocol": "TCP",
                      "InstanceProtocol": "TCP"
                   },
                   "PolicyNames": []
             }
          ],
          "HealthCheck": {
             "HealthyThreshold": 2,
             "Interval": 10,
             "Target": "HTTPS:443/_ping",
             "Timeout": 2,
             "UnhealthyThreshold": 4
          },
          "VPCId": "vpc-XXXXXX",
          "BackendServerDescriptions": [],
          "Instances": [
             {
                   "InstanceId": "i-XXXXXXXXX"
             },
             {
                   "InstanceId": "i-XXXXXXXXX"
             },
             {
                   "InstanceId": "i-XXXXXXXXX"
             }
          ],
          "DNSName": "XXXXXXXXXXXX.us-west-2.elb.amazonaws.com",
          "SecurityGroups": [
             "sg-XXXXXXXXX"
          ],
          "Policies": {
             "LBCookieStickinessPolicies": [],
             "AppCookieStickinessPolicies": [],
             "OtherPolicies": []
          },
          "LoadBalancerName": "ELB-UCP",
          "CreatedTime": "2017-02-13T21:40:15.400Z",
          "AvailabilityZones": [
             "us-west-2c",
             "us-west-2a",
             "us-west-2b"
          ],
          "Scheme": "internet-facing",
          "SourceSecurityGroup": {
             "OwnerAlias": "XXXXXXXXXXXX",
             "GroupName":  "XXXXXXXXXXXX"
          }
       }
    
  3. Create either the nginx.conf or haproxy.cfg file, as required.

    For instruction on deploying with AWS LB, refer to Getting Started with Network Load Balancers in the AWS documentation.

  4. Deploy the load balancer:

    docker run --detach \
    --name ucp-lb \
    --restart=unless-stopped \
    --publish 443:443 \
    --publish 6443:6443 \
    --volume ${PWD}/nginx.conf:/etc/nginx/nginx.conf:ro \
    nginx:stable-alpine
    
    docker run --detach \
    --name ucp-lb \
    --publish 443:443 \
    --publish 6443:6443 \
    --publish 8181:8181 \
    --restart=unless-stopped \
    --volume ${PWD}/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro \
    haproxy:1.7-alpine haproxy -d -f /usr/local/etc/haproxy/haproxy.cfg
    

Load balancing MKE and MSR together

By default, both MKE and Mirantis Secure Registry (MSR) use port 443. If you plan to deploy MKE and MSR together, your load balancer must distinguish traffic between the two by IP address or port number.

If you want MKE and MSR both to use port 443, then you must either use separate load balancers for each or use two virtual IPs. Otherwise, you must configure your load balancer to expose MKE or MSR on a port other than 443.