Skip to content

Instantly share code, notes, and snippets.

@timperrett
Created January 14, 2019 16:51
Show Gist options
  • Save timperrett/07842acb4039808dc334eefd9d81b33b to your computer and use it in GitHub Desktop.
Save timperrett/07842acb4039808dc334eefd9d81b33b to your computer and use it in GitHub Desktop.
##: ----------------------------------------------------------------------------
##: Copyright (C) 2017 Verizon. All Rights Reserved.
##:
##: Licensed under the Apache License, Version 2.0 (the "License");
##: you may not use this file except in compliance with the License.
##: You may obtain a copy of the License at
##:
##: http://www.apache.org/licenses/LICENSE-2.0
##:
##: Unless required by applicable law or agreed to in writing, software
##: distributed under the License is distributed on an "AS IS" BASIS,
##: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
##: See the License for the specific language governing permissions and
##: limitations under the License.
##:
##: ----------------------------------------------------------------------------
nelson {
# generic default timeout for all http requests that
# nelson is going to make to other systems (e.g. Github, Nomad)
timeout = 4 seconds
# how frequently should we run the readiness check to see if a
# deployment is ready to take active traffic (based on the consul
# health checks).
readiness-delay = 3 minutes
# how frequently should nelson write out the discovery information
# to consul. this should not be set too high as it will affect the
# frequency and choppiness of updates in the runtime.
discovery-delay = 2 minutes
# when nelson goes to fetch the users manifest from their github
# repo, where should nelson expect to find the manifest file. By
# default, the expectation is that this config is a path from the
# root of the repository.
manifest-filename = ".nelson.yml"
# allowed list or ports that a loadbalancer can expose
proxy-port-whitelist = [ 80, 443, 8080 ]
# upon receiving a github release event, where should Nelson assume the
# application should get deployed too. This can either be a root namespace,
# or a subordinate namespace, e.g. `stage/unstable`... its arbitrary, but the
# namespace must exist (otherwise Nelson will attempt to create it on boot)
default-namespace = "dev"
network {
bind-host = "0.0.0.0"
bind-port = 9000
# how long should nelson wait before closing requests to nelson
# itself (equates to idle-timeout on http4s' blaze builder)
idle-timeout = 60 seconds
# represents the domain that users will call nelson on.
# separating these two concerns as nelson is typically run behind
# a reverse proxy that handles SSL termination
external-host = "nelson.local"
# if external-port is not supplied, then we assume port 443 (https)
external-port = 443
# will we be using HTTPS to present the nelson API to the world
enable-tls = true
monitoring-port = 5775
}
security {
expire-login-after = 14 days
# needs to be a static long which is used to salt the key derivation,
# but as we do not need rotating keys for use within nelson, we just
# fix this value.
key-id = 468513861
# for development sometimes we want to not have to actually log in
# with github. when set to `true` this will force the system to "auto-auth"
# the system based on local environment variables. This really is for
# development use only!
use-environment-session = false
# Both of these keys can be generated by bin/generate-keys
#
# IMPORTANT: you must specify a value for this
# Required: The key used to encrypt session tokens, in base64.
# DO NOT SHARE
encryption-key = "unknown"
#
# IMPORTANT: you must specify a value for this
# The private key used to sign session tokens, in base64.
# DO NOT SHARE
signature-key = "unknown"
}
# How will Nelson talk to Github. Please refer to the Github documentation
# for more information: http://bit.ly/2r5pYVO
#
# In addition to the Github application, Nelson also requires its own user-level
# account on github (this will be fixed in future versions so that its not required).
github {
client-id = "xxxxxxxxxxx"
client-secret = "yyyyy"
redirect-uri = "http://nelson.local/auth/exchange"
scope = "repo"
access-token = "replaceme"
system-username = "nelson"
}
# Nelson is expecting to be able to talk to the docker socket in
# order to push, pull and execute docker containers. Typically the
# `connection` field will take the form of either `tcp://127.0.0.1:1234`
# or a unix socket location like `unix:///var/run/docker.sock` etc.
docker {
connection = "tcp://0.0.0.0:2376"
verify-tls = true
}
database {
# at the time of writing, Nelson only supports H2 as its database.
driver = "org.h2.Driver"
connection = "jdbc:h2:file:/opt/application/db/nelson;DATABASE_TO_UPPER=FALSE;AUTO_SERVER=TRUE;"
# username = ""
# password = ""
}
ui {
# should nelson serve a user interface, or not?
enabled = true
# where are the assets located on disk, that Nelson
# will serve as its UI contents; this is for local development purposes
# only and should not be used in production.
# file-path = "/path/to/nelson/ui"
}
nomad {
# These service tags will be automatically added (via set union)
# to all services deployed to Nomad.
required-service-tags = [
"svc",
# The following tag allows prometheus to differentiate between
# nelson- and manually-deployed services.
"nomad-scheduled"
]
}
cleanup {
# initial time to live for deployments
# this is basically the grace period
# given to all deployment
initial-deployment-time-to-live = 30 minutes
# extended time to live for deployments
# the amount a deployment expiration is bumped
extend-deployment-time-to-live = 30 minutes
# delay between cleanup process runs, i.e. every 30 minutes
cleanup-delay = 10 minutes
# delay between long-term cleanup process
sweeper-delay = 24 hours
}
pipeline {
# the maximum number of pipeline workflows nelson will handle
# at the same time. This limit is typically bound by the hardware
# that the service runs on. A general rule of thumb would be to
# set this value to the same number of processors that are available
# to nelson.
concurrency-limit = 4
# what is the absolutely limit for the number of deployments
# that we'll queue before we start to drop them to preserve
# the well-being of the rest of the system.
inbound-buffer-limit = 50
}
audit {
concurrency-limit = 4
inbound-buffer-limit = 50
}
# these are the settings for the consul-template linter.
# with the exception of the templating image, it is expected
# that these defaults will not need to be changed and should
# suffice in most use cases.
template {
temp-dir = "/tmp"
memory-mb = 8
# specify the CPU CFS scheduler period, which is used alongside
# cpu-quota. defaults to 100 micro-seconds.
cpu-period = 100000
# impose a CPU CFS quota on the container. The number of microseconds
# per cpu-period that the container is limited to before throttled.
# as such acting as the effective ceiling for the execution.
cpu-quota = 50000
# how long to give the container before it is actively killed.
timeout = 10 seconds
# what container shall we use? for information about the linting
# engine protocol please see here: https://github.com/getnelson/containers#linters
template-engine-image = "getnelson/linter-consul-template:2.0.13"
}
workflow-logger {
# Every workflow logs progress information to a namespaced file.
# The logging is run as a separate processes, hence the bufferLimit
# for the queue.
inbound-buffer-limit = 50
# The base directory where the workflow logging files are stored.
# Ensure when deploying Nelson as a container that this location is
# bind-mounted to a host volume, or uses a docker-volume container.
file-path = "/var/nelson/log"
}
# configuration for the slack endpoints, where webhook-url defines the
# url that the slack admin view provides when building custom integrations.
# `username` denotes the bot name that will appear in the slack channel
# slack {
# webhook-url = "https://hooks.slack.com/services/....."
# username = "Nelson"
# }
# receive email updates from Nelson about various state changes, like failed
# deployments, successful deployments and so forth.
# email {
# host = "somehost"
# port = 9000
# from = "nelson@example.com"
# user = "someuser"
# password = "somepassword"
# }
######## datacenter configuration.
# no defaults are assumed here, as this must be provided by the implementor.
# this section included here for documentation purposes only.
datacenters {
texas {
domain = "your.company.com"
docker-registry = "registry.service.texas.your.company.com/whatever"
infrastructure {
# values can be "kubernetes" | "nomad"
scheduler = "nomad"
# values can be "kubernetes" | "consul" | "noop"
discovery = "consul"
# values can be "kubernetes" | "consul" | "noop"
readiness = "consul"
kubernetes {
in-cluster = false
timeout = 10 seconds
kubeconfig = "/opt/application/conf/kubeconfig"
}
nomad {
endpoint = "http://nomad.service.texas.your.company.com:1234/"
timeout = 1 second
docker {
host = "registry.service.texas.your.company.com"
user = "someuser"
password = "dummypwd"
}
}
consul {
endpoint = "http://consul.service.texas.your.company.com"
timeout = 1 second
acl-token = "XXXXXXXXX"
username = "XXXXXXXXX"
password = "XXXXXXXXX"
}
vault {
endpoint = "https://vault.service.texas.your.company.com:1234"
auth-token = "XXXXXXXXX"
timeout = 5 seconds
# optional: if your backend auth provider (for kubernetes, as one example)
# has the same name as the datacenter, you can ignore this field. Given
# this is not typically the case (people have some convention) then you can
# specify a prefix that will be prepended to the datacenter name:
auth-role-prefix = "aws-"
}
loadbalancer {
aws {
# access-key-id and secret-access-key are OPTIONAL
# one can use the IAM instance provider
access-key-id = "XXXXXXXXX"
secret-access-key = "XXXXXXXXX"
region = "texas"
launch-configuration-name = "nelson-lb-1-LaunchConfig-XXXXXXXXXXXXXXXXXX"
# image is optional
image = "registry.service.texas.your.company.com/whatever/infra-lb:1.2.3"
elb-security-group-names = [ "sg-AAAAAAAA", "sg-BBBBBBB" ]
availability-zones {
texas-a {
private-subnet = "subnet-AAAAAAAA"
public-subnet = "subnet-BBBBBBB"
}
}
}
}
}
policy {
# Path under which credentials are stored for
# resources. Units will get read capability on each resource.
# Supported variables: %env%, %resource%, %unit%
resource-creds-path = "nelson/%env%/%resource%/creds/%unit%"
# Path to your PKI backend. Optional. If specified, unit will get
# create and update capabilities on ${pki-path}/issue
# Supported variables: %env%
# pki-path = "pki"
}
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment