Como usar um nginx.conf personalizado no GKE

Se você quiser usar um nginx.conf personalizado no Google Kubernetes Engine, prepare um estendendo esta amostra nginx.conf. Aqui está um snippet de configuração exigido pelo Cloud Endpoints:

http {
  include /etc/nginx/mime.types;
  server_tokens off;
  client_max_body_size 32m;

  upstream app_server {
    server localhost:8081;
    keepalive 128;
  }

  endpoints {
    metadata_server;
  }

  server {
    # Running port
    listen 8080;

    # Running ssl port
    listen 443 ssl;
    ssl_certificate /etc/nginx/ssl/nginx.crt;
    ssl_certificate_key /etc/nginx/ssl/nginx.key;

    # Logging to stdout enables better integration with Docker and GKE/Kubernetes.
    access_log /dev/stdout;

    location / {
      # Begin Endpoints v2 Support
      endpoints {
        on;
        # After ESP 1.7.0, "server_config" field is required.
        # It has to be /etc/nginx/server_config.pb.txt exactly.
        # If not present, some new features will not work.
        server_config /etc/nginx/server_config.pb.txt;

        # After ESP 1.7.0, "api" field is not required.
        # If added, it has to be /etc/nginx/endpoints/service.json exactly.
        # api /etc/nginx/endpoints/service.json;

        # Uncomment the line below if you are not using Google Container Engine.
        # The path should be set to the “-k” path specified in the ESP container’s
        # args section in the Kubernetes yaml config.
        # google_authentication_secret /etc/nginx/creds/service-account-creds.json;
      }
      # End Endpoints v2 Support

      proxy_pass http://app_server;
      proxy_redirect off;
      proxy_set_header Host $host;
      proxy_set_header X-Real-IP $remote_addr;
      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
      proxy_set_header X-Forwarded-Host $server_name;
      proxy_set_header X-Google-Real-IP $remote_addr;

      # 86400 seconds (24 hours) is the maximum a server is allowed.
      proxy_send_timeout 86400s;
      proxy_read_timeout 86400s;
    }

    include /var/lib/nginx/extra/*.conf;
  }

  server {
    # expose /nginx_status but on a different port to avoid
    # external visibility / conflicts with the app.
    listen 8090;
    location /nginx_status {
      stub_status on;
      access_log off;
    }
    location / {
      root /dev/null;
    }
  }
}

Agora, crie um Configmap do Kubernetes com o nginx.conf personalizado usando kubectl:

kubectl create configmap nginx-config --from-file=nginx.conf

Edite o arquivo de configuração do Kubernetes, como esp_echo_custom_config_gke.yaml e substitua SERVICE_NAME pelo nome do serviço do Endpoints.

template:
  metadata:
    labels:
      app: esp-echo
  spec:
    volumes:
    - name: nginx-config
      configMap:
        name: nginx-config
    - name: nginx-ssl
      secret:
        secretName: nginx-ssl
    containers:
    - name: esp
      image: gcr.io/endpoints-release/endpoints-runtime:1
      args: [
        "-n", "/etc/nginx/custom/nginx.conf",
        "-s", "SERVICE_NAME",
        "--rollout_strategy", "managed",
      ]
      ports:
        - containerPort: 8080
        - containerPort: 443
      volumeMounts:
      - mountPath: /etc/nginx/ssl
        name: nginx-ssl
        readOnly: true
      - mountPath: /etc/nginx/custom
        name: nginx-config
        readOnly: true
    - name: echo
      image: gcr.io/endpoints-release/echo:latest
      ports:
        - containerPort: 8081

Por fim, inicie o serviço com o arquivo de configuração atualizado do Kubernetes usando kubectl.

kubectl create -f esp_echo_custom_config_gke.yaml