Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Example configurations #35

Merged
merged 4 commits into from
Aug 23, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
/build
/docs/landing_source/.bundle
/generated
19 changes: 19 additions & 0 deletions configs/access_log_format_helper.template.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
{% macro ingress_sampled_log() %}
"format": "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %FAILURE_REASON% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\"\n"
{% endmacro %}

{% macro ingress_full() %}
"format": "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %FAILURE_REASON% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\"\n"
{% endmacro %}

{% macro ingress_error_log() %}
"format": "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %FAILURE_REASON% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\"\n"
{% endmacro %}

{% macro egress_error_log() %}
"format": "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %FAILURE_REASON% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\"\n"
{% endmacro %}

{% macro egress_error_amazon_service() %}
"format": "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %FAILURE_REASON% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\" \"%RESP(X-AMZN-RequestId)%\"\n"
{% endmacro %}
105 changes: 105 additions & 0 deletions configs/configgen.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
import jinja2
import json
from collections import OrderedDict
import sys

#
# About this script: Envoy configurations needed for a complete infrastructure are complicated.
# This script demonstrates how to programatically build Envoy configurations using jinja templates.
# This is roughly how we build our configurations at Lyft. The three configurations demonstrated
# here (front proxy, double proxy, and service to service) are also very close approximations to
# what we use at Lyft in production. They give a demonstration of how to configure most Envoy
# features. Along with the configuration guide it should be possible to modify them for different
# use cases.
#

# This is the set of internal services that front Envoy will route to. Each cluster referenced
# in envoy_router.template.json must be specified here. It is a dictionary of dictionaries.
# Options can be specified for each cluster if needed. See make_route_internal() in
# routing_helper.template.json for the types of options supported.
front_envoy_clusters = {
'service1': {},
'service2': {},
}

# This is the set of internal services that local Envoys will route to. All services that will be
# accessed via the 9001 egress port need to be listed here. It is a dictionary of dictionaries.
# Options can be specified for each cluster if needed. See make_route_internal() in
# routing_helper.template.json for the types of options supported.
service_to_service_envoy_clusters = {
'ratelimit': {},
'service1': {},
'service3': {}
}

# This is a list of external hosts that can be accessed from local Envoys. Each external service has
# its own port. This is because some SDKs don't make it easy to use host based routing. Below
# we demonstrate setting up proxying for DynamoDB. In the config, this ends up using the HTTP
# DynamoDB statistics filter, as well as generating a special access log which includes the
# X-AMZN-RequestId response header.
external_virtual_hosts = [
{
'name': 'dynamodb_iad',
'port': 9204,
'hosts': [
{
'name': 'dynamodb_iad', 'domain': '*',
'remote_address': 'dynamodb.us-east-1.amazonaws.com:443',
'verify_subject_alt_name': 'dynamodb.us-east-1.amazonaws.com', 'ssl': True
}
],
'is_amzn_service': True,
'cluster_type': 'logical_dns'
}]

# This is the set of mongo clusters that local Envoys can talk to. Each database defines a set of
# mongos routers to talk to, and whether the global rate limit service should be called for new
# connections. Many organizations will not be interested in the mongo feature. Setting this to
# an empty dictionary will remove all mongo configuration. The configuration is a useful example
# as it demonstrates how to setup TCP proxy and the network rate limit filter.
mongos_servers = {
'somedb': {
'port': 27019,
'hosts': [
"router1.yourcompany.net:27817",
"router2.yourcompany.net:27817",
"router3.yourcompany.net:27817",
"router4.yourcompany.net:27817",
],
'ratelimit': True
}
}

def generate_config(template_path, template, output_file, **context):
""" Generate a final config file based on a template and some context. """
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_path),
undefined=jinja2.StrictUndefined)
raw_output = env.get_template(template).render(**context)
# Verify valid JSON and then dump it nicely formatted to avoid jinja pain.
output = json.loads(raw_output, object_pairs_hook=OrderedDict)
with open(output_file, 'w') as fh:
json.dump(output, fh, indent=2)

# Generate a demo config for the main front proxy. This sets up both HTTP and HTTPS listeners,
# as well as a listener for the double proxy to connect to via SSL client authentication.
generate_config('configs', 'envoy_front_proxy.template.json',
'{}/envoy_front_proxy.json'.format(sys.argv[1]), clusters=front_envoy_clusters)

# Generate a demo config for the double proxy. This sets up both an HTTP and HTTPS listeners,
# and backhauls the traffic to the main front proxy.
generate_config('configs', 'envoy_double_proxy.template.json',
'{}/envoy_double_proxy.json'.format(sys.argv[1]))

# Generate a demo config for the service to service (local) proxy. This sets up several different
# listeners:
# 9211: Main ingress listener for service to service traffic.
# 9001: Main egress listener for service to service traffic. Applications use this port to send
# requests to other services.
# optional external service ports: built from external_virtual_hosts above. Each external host
# that Envoy proxies to listens on its own port.
# optional mongo ports: built from mongos_servers above.
generate_config('configs', 'envoy_service_to_service.template.json',
'{}/envoy_service_to_service.json'.format(sys.argv[1]),
internal_virtual_hosts=service_to_service_envoy_clusters,
external_virtual_hosts=external_virtual_hosts,
mongos_servers=mongos_servers)
11 changes: 11 additions & 0 deletions configs/configgen.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
#!/bin/bash

SCRIPT_DIR=`dirname $0`
BUILD_DIR=build/configgen
if [ ! -d $BUILD_DIR/venv ]; then
virtualenv $BUILD_DIR/venv
$BUILD_DIR/venv/bin/pip install -r $SCRIPT_DIR/requirements.txt
fi

mkdir -p $1
$BUILD_DIR/venv/bin/python $SCRIPT_DIR/configgen.py $1
151 changes: 151 additions & 0 deletions configs/envoy_double_proxy.template.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,151 @@
{% macro listener(port,ssl,proxy_proto) %}
{
"port": {{ port }},
{% if ssl -%}
"ssl_context": {
"alpn_protocols": "h2,http/1.1",
"alt_alpn_protocols": "http/1.1",
"cert_chain_file": "/etc/envoy/cert.pem",
"private_key_file": "/etc/envoy/key.pem"
},
{% endif -%}
{% if proxy_proto -%}
"use_proxy_proto": true,
{% endif -%}
"filters": [
{
"type": "read",
"name": "http_connection_manager",
"config": {
"codec_type": "auto",
"tracing_enabled": true,
"idle_timeout_s": 840,
"access_log": [
{
"path": "/var/log/envoy/access_error.log",
"filter": {"type": "logical_or", "filters": [
{"type": "status_code", "op": ">=", "value": 500},
{"type": "duration", "op": ">=", "value": 1000},
{"type": "traceable_request"}
]
}
},
{
"path": "/var/log/envoy/access.log"
}],
"stat_prefix": "router",
{% if proxy_proto -%}
"use_remote_address": true,
{% endif -%}
"route_config":
{
"virtual_hosts": [
{
"name": "all",
"domains": ["*"],
"routes": [
{
"prefix": "/",
"cluster": "backhaul",
{# Generally allow front proxy to control timeout and use this as a backstop #}
"timeout_ms": 20000
}
]
}
]
},
"filters": [
{ "type": "both", "name": "health_check",
"config": {
"pass_through_mode": false, "endpoint": "/healthcheck"
}
},
{ "type": "decoder", "name": "buffer",
"config": {
"max_request_bytes": 5242880,
"max_request_time_s": 120
}
},
{ "type": "decoder", "name": "router", "config": {} }
]
}
}]
}
{% endmacro %}

{
"listeners": [
{# TCP listener for external port 443 (SSL). Assumes a TCP LB in front such as ELB which
supports proxy proto. #}
{{ listener(9300,True,True) }},

{# TCP listener for external port 80 (non-SSL). Assumes a TCP LB in front such as ELB which
supports proxy proto. #}
{{ listener(9301,False,True) }}
],

"admin": { "access_log_path": "/var/log/envoy/admin_access.log",
"port": 9901 },
"flags_path": "/etc/envoy/flags",
"statsd_tcp_cluster_name": "statsd",

"tracing": {
"http": {
"sinks": [
{
"type": "lightstep",
"access_token_file": "/etc/envoy/lightstep_access_token",
"config": {
"collector_cluster": "lightstep_saas"
}
}
]
}
},

"runtime": {
"symlink_root": "/srv/runtime_data/current",
"subdirectory": "envoy",
"override_subdirectory": "envoy_override"
},

"cluster_manager": {
"clusters": [
{
"name": "statsd",
"connect_timeout_ms": 250,
"type": "static",
"lb_type": "round_robin",
"hosts": [{"url": "tcp://127.0.0.1:8125"}]
},
{
"name": "backhaul",
"connect_timeout_ms": 1000,
"type": "strict_dns",
"lb_type": "round_robin",
"features": "http2",
"max_requests_per_connection": 25000, {# There are so few connections going back
that we can get some imbalance. Until we can come
up with a better solution just limit the requests
so we can cycle and get better spread. #}
"ssl_context": {
"cert_chain_file": "/etc/envoy/envoy-double-proxy.pem",
"private_key_file": "/etc/envoy/envoy-double-proxy.key",
"verify_subject_alt_name": "front-proxy.yourcompany.com"
},
"hosts": [{"url": "tcp://front-proxy.yourcompany.com:9400"}]
},
{
"name": "lightstep_saas",
"ssl_context": {
"ca_cert_file": "/etc/ssl/certs/ca-certificates.crt",
"verify_subject_alt_name": "collector.lightstep.com"
},
"connect_timeout_ms": 1000,
"type": "logical_dns",
"lb_type": "round_robin",
"hosts": [{"url": "tcp://collector.lightstep.com:443"}]
}
]
}
}
Loading