Move CI vars out of gitlab and into var files (#1808)

pull/1839/head
Matthew Mosesohn 2017-10-18 17:28:54 +01:00 committed by GitHub
parent c9fe8fde59
commit 4efb0b78fa
21 changed files with 274 additions and 196 deletions

View File

@ -40,6 +40,7 @@ before_script:
GCE_USER: travis
SSH_USER: $GCE_USER
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
CONTAINER_ENGINE: docker
PRIVATE_KEY: $GCE_PRIVATE_KEY
GS_ACCESS_KEY_ID: $GS_KEY
@ -48,20 +49,11 @@ before_script:
GCE_PREEMPTIBLE: "false"
ANSIBLE_KEEP_REMOTE_FILES: "1"
ANSIBLE_CONFIG: ./tests/ansible.cfg
BOOTSTRAP_OS: none
DOWNLOAD_LOCALHOST: "false"
DOWNLOAD_RUN_ONCE: "false"
IDEMPOT_CHECK: "false"
RESET_CHECK: "false"
UPGRADE_TEST: "false"
KUBEADM_ENABLED: "false"
RESOLVCONF_MODE: docker_dns
LOG_LEVEL: "-vv"
ETCD_DEPLOYMENT: "docker"
KUBELET_DEPLOYMENT: "host"
VAULT_DEPLOYMENT: "docker"
WEAVE_CPU_LIMIT: "100m"
EXTRA_SETTINGS: "{}"
MAGIC: "ci check this"
.gce: &gce
@ -82,7 +74,9 @@ before_script:
- echo $GCE_CREDENTIALS > $HOME/.ssh/gce.json
- chmod 400 $HOME/.ssh/id_rsa
- ansible-playbook --version
- export PYPATH=$([ "$BOOTSTRAP_OS" != "coreos" ] && echo /usr/bin/python || echo /opt/bin/python)
- export PYPATH=$([[ ! "$CI_JOB_NAME" =~ "coreos" ]] && echo /usr/bin/python || echo /opt/bin/python)
- echo "CI_JOB_NAME is $CI_JOB_NAME"
- echo "PYPATH is $PYPATH"
script:
- pwd
- ls
@ -91,18 +85,12 @@ before_script:
- >
ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local
${LOG_LEVEL}
-e cloud_image=${CLOUD_IMAGE}
-e cloud_region=${CLOUD_REGION}
-e gce_credentials_file=${HOME}/.ssh/gce.json
-e gce_project_id=${GCE_PROJECT_ID}
-e gce_service_account_email=${GCE_ACCOUNT}
-e cloud_machine_type=${CLOUD_MACHINE_TYPE}
-e inventory_path=${PWD}/inventory/inventory.ini
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e mode=${CLUSTER_MODE}
-e test_id=${TEST_ID}
-e preemptible=$GCE_PREEMPTIBLE
-e startup_script="'${STARTUP_SCRIPT}'"
# Check out latest tag if testing upgrade
# Uncomment when gitlab kargo repo has tags
@ -112,28 +100,17 @@ before_script:
# Create cluster
- >
ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER
ansible-playbook
-i inventory/inventory.ini
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
${SSH_ARGS}
${LOG_LEVEL}
-e @${CI_TEST_VARS}
-e ansible_python_interpreter=${PYPATH}
-e ansible_ssh_user=${SSH_USER}
-e bootstrap_os=${BOOTSTRAP_OS}
-e cloud_provider=gce
-e cert_management=${CERT_MGMT:-script}
-e "{deploy_netchecker: true}"
-e "{download_localhost: ${DOWNLOAD_LOCALHOST}}"
-e "{download_run_once: ${DOWNLOAD_RUN_ONCE}}"
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e kubedns_min_replicas=1
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
-e local_release_dir=${PWD}/downloads
-e resolvconf_mode=${RESOLVCONF_MODE}
-e vault_deployment_type=${VAULT_DEPLOYMENT}
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
-e "${EXTRA_SETTINGS}"
--limit "all:!fake_hosts"
cluster.yml
@ -143,27 +120,17 @@ before_script:
test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml";
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
git checkout "${CI_BUILD_REF}";
ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER
ansible-playbook
-i inventory/inventory.ini
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
${SSH_ARGS}
${LOG_LEVEL}
-e @${CI_TEST_VARS}
-e ansible_python_interpreter=${PYPATH}
-e ansible_ssh_user=${SSH_USER}
-e bootstrap_os=${BOOTSTRAP_OS}
-e cloud_provider=gce
-e "{deploy_netchecker: true}"
-e "{download_localhost: ${DOWNLOAD_LOCALHOST}}"
-e "{download_run_once: ${DOWNLOAD_RUN_ONCE}}"
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e kubedns_min_replicas=1
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
-e local_release_dir=${PWD}/downloads
-e resolvconf_mode=${RESOLVCONF_MODE}
-e vault_deployment_type=${VAULT_DEPLOYMENT}
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
-e "${EXTRA_SETTINGS}"
--limit "all:!fake_hosts"
$PLAYBOOK;
fi
@ -183,25 +150,16 @@ before_script:
## Idempotency checks 1/5 (repeat deployment)
- >
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
ansible-playbook
-i inventory/inventory.ini
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS}
-e cloud_provider=gce
-u $SSH_USER
${SSH_ARGS}
${LOG_LEVEL}
-e @${CI_TEST_VARS}
-e ansible_python_interpreter=${PYPATH}
-e "{deploy_netchecker: true}"
-e "{download_localhost: ${DOWNLOAD_LOCALHOST}}"
-e "{download_run_once: ${DOWNLOAD_RUN_ONCE}}"
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kubedns_min_replicas=1
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
-e local_release_dir=${PWD}/downloads
-e resolvconf_mode=${RESOLVCONF_MODE}
-e vault_deployment_type=${VAULT_DEPLOYMENT}
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
-e "${EXTRA_SETTINGS}"
--limit "all:!fake_hosts"
cluster.yml;
fi
@ -209,8 +167,14 @@ before_script:
## Idempotency checks 2/5 (Advanced DNS checks)
- >
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
ansible-playbook
-i inventory/inventory.ini
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
${SSH_ARGS}
${LOG_LEVEL}
-e @${CI_TEST_VARS}
--limit "all:!fake_hosts"
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
fi
@ -218,11 +182,14 @@ before_script:
## Idempotency checks 3/5 (reset deployment)
- >
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
ansible-playbook
-i inventory/inventory.ini
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS}
-e cloud_provider=gce
-u $SSH_USER
${SSH_ARGS}
${LOG_LEVEL}
-e @${CI_TEST_VARS}
-e ansible_python_interpreter=${PYPATH}
-e reset_confirmation=yes
--limit "all:!fake_hosts"
@ -232,25 +199,16 @@ before_script:
## Idempotency checks 4/5 (redeploy after reset)
- >
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
ansible-playbook
-i inventory/inventory.ini
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS}
-e cloud_provider=gce
-u $SSH_USER
${SSH_ARGS}
${LOG_LEVEL}
-e @${CI_TEST_VARS}
-e ansible_python_interpreter=${PYPATH}
-e "{deploy_netchecker: true}"
-e "{download_localhost: ${DOWNLOAD_LOCALHOST}}"
-e "{download_run_once: ${DOWNLOAD_RUN_ONCE}}"
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kubedns_min_replicas=1
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
-e local_release_dir=${PWD}/downloads
-e resolvconf_mode=${RESOLVCONF_MODE}
-e vault_deployment_type=${VAULT_DEPLOYMENT}
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
-e "${EXTRA_SETTINGS}"
--limit "all:!fake_hosts"
cluster.yml;
fi
@ -267,165 +225,73 @@ before_script:
after_script:
- >
ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
-e mode=${CLUSTER_MODE}
-e @${CI_TEST_VARS}
-e test_id=${TEST_ID}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e gce_project_id=${GCE_PROJECT_ID}
-e gce_service_account_email=${GCE_ACCOUNT}
-e gce_credentials_file=${HOME}/.ssh/gce.json
-e cloud_image=${CLOUD_IMAGE}
-e inventory_path=${PWD}/inventory/inventory.ini
-e cloud_region=${CLOUD_REGION}
# Test matrix. Leave the comments for markup scripts.
.coreos_calico_aio_variables: &coreos_calico_aio_variables
# stage: deploy-gce-part1
KUBE_NETWORK_PLUGIN: calico
CLOUD_IMAGE: coreos-stable
CLOUD_REGION: us-west1-b
CLOUD_MACHINE_TYPE: "n1-standard-2"
CLUSTER_MODE: aio
BOOTSTRAP_OS: coreos
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
##User-data to simply turn off coreos upgrades
STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd'
MOVED_TO_GROUP_VARS: "true"
.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
# stage: deploy-gce-part1
KUBE_NETWORK_PLUGIN: canal
CLOUD_IMAGE: ubuntu-1604-lts
CLOUD_REGION: europe-west1-b
CLUSTER_MODE: ha
UPGRADE_TEST: "graceful"
STARTUP_SCRIPT: ""
.centos_weave_kubeadm_variables: &centos_weave_kubeadm_variables
# stage: deploy-gce-part1
KUBE_NETWORK_PLUGIN: weave
CLOUD_IMAGE: centos-7
CLOUD_MACHINE_TYPE: "n1-standard-1"
CLOUD_REGION: us-central1-b
CLUSTER_MODE: ha
KUBEADM_ENABLED: "true"
UPGRADE_TEST: "graceful"
STARTUP_SCRIPT: ""
.ubuntu_canal_kubeadm_variables: &ubuntu_canal_kubeadm_variables
# stage: deploy-gce-part1
KUBE_NETWORK_PLUGIN: canal
CLOUD_IMAGE: ubuntu-1604-lts
CLOUD_MACHINE_TYPE: "n1-standard-1"
CLOUD_REGION: europe-west1-b
CLUSTER_MODE: ha
KUBEADM_ENABLED: "true"
STARTUP_SCRIPT: ""
MOVED_TO_GROUP_VARS: "true"
.rhel7_weave_variables: &rhel7_weave_variables
# stage: deploy-gce-part1
KUBE_NETWORK_PLUGIN: weave
CLOUD_IMAGE: rhel-7
CLOUD_REGION: europe-west1-b
CLUSTER_MODE: default
STARTUP_SCRIPT: ""
MOVED_TO_GROUP_VARS: "true"
.centos7_flannel_addons_variables: &centos7_flannel_addons_variables
# stage: deploy-gce-part2
KUBE_NETWORK_PLUGIN: flannel
CLOUD_IMAGE: centos-7
CLOUD_REGION: us-west1-a
CLOUD_MACHINE_TYPE: "n1-standard-1"
CLUSTER_MODE: default
EXTRA_SETTINGS: >-
{ helm_enabled: true,
istio_enabled: true,
efk_enabled: true }
STARTUP_SCRIPT: ""
MOVED_TO_GROUP_VARS: "true"
.debian8_calico_variables: &debian8_calico_variables
# stage: deploy-gce-part2
KUBE_NETWORK_PLUGIN: calico
CLOUD_IMAGE: debian-8-kubespray
CLOUD_REGION: us-central1-b
CLUSTER_MODE: default
BOOTSTRAP_OS: debian
STARTUP_SCRIPT: ""
MOVED_TO_GROUP_VARS: "true"
.coreos_canal_variables: &coreos_canal_variables
# stage: deploy-gce-part2
KUBE_NETWORK_PLUGIN: canal
CLOUD_IMAGE: coreos-stable
CLOUD_REGION: us-east1-b
CLUSTER_MODE: default
BOOTSTRAP_OS: coreos
IDEMPOT_CHECK: "true"
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd'
.rhel7_canal_sep_variables: &rhel7_canal_sep_variables
# stage: deploy-gce-special
KUBE_NETWORK_PLUGIN: canal
CLOUD_IMAGE: rhel-7
CLOUD_REGION: us-east1-b
CLUSTER_MODE: separate
STARTUP_SCRIPT: ""
MOVED_TO_GROUP_VARS: "true"
.ubuntu_weave_sep_variables: &ubuntu_weave_sep_variables
# stage: deploy-gce-special
KUBE_NETWORK_PLUGIN: weave
CLOUD_IMAGE: ubuntu-1604-lts
CLOUD_REGION: us-central1-b
CLUSTER_MODE: separate
IDEMPOT_CHECK: "false"
STARTUP_SCRIPT: ""
MOVED_TO_GROUP_VARS: "true"
.centos7_calico_ha_variables: &centos7_calico_ha_variables
# stage: deploy-gce-special
KUBE_NETWORK_PLUGIN: calico
DOWNLOAD_LOCALHOST: "true"
DOWNLOAD_RUN_ONCE: "true"
CLOUD_IMAGE: centos-7
CLOUD_REGION: europe-west1-b
CLUSTER_MODE: ha-scale
IDEMPOT_CHECK: "true"
STARTUP_SCRIPT: ""
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
# stage: deploy-gce-special
KUBE_NETWORK_PLUGIN: weave
CLOUD_IMAGE: coreos-alpha
CLOUD_REGION: us-west1-a
CLUSTER_MODE: ha-scale
BOOTSTRAP_OS: coreos
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd'
MOVED_TO_GROUP_VARS: "true"
.ubuntu_rkt_sep_variables: &ubuntu_rkt_sep_variables
# stage: deploy-gce-part1
KUBE_NETWORK_PLUGIN: flannel
CLOUD_IMAGE: ubuntu-1604-lts
CLOUD_REGION: us-central1-b
CLUSTER_MODE: separate
ETCD_DEPLOYMENT: rkt
KUBELET_DEPLOYMENT: rkt
STARTUP_SCRIPT: ""
MOVED_TO_GROUP_VARS: "true"
.ubuntu_vault_sep_variables: &ubuntu_vault_sep_variables
# stage: deploy-gce-part1
CLOUD_MACHINE_TYPE: "n1-standard-1"
KUBE_NETWORK_PLUGIN: canal
CERT_MGMT: vault
CLOUD_IMAGE: ubuntu-1604-lts
CLOUD_REGION: us-central1-b
CLUSTER_MODE: separate
STARTUP_SCRIPT: ""
MOVED_TO_GROUP_VARS: "true"
.ubuntu_flannel_variables: &ubuntu_flannel_variables
# stage: deploy-gce-special
KUBE_NETWORK_PLUGIN: flannel
CLOUD_IMAGE: ubuntu-1604-lts
CLOUD_REGION: europe-west1-b
CLUSTER_MODE: separate
STARTUP_SCRIPT: ""
MOVED_TO_GROUP_VARS: "true"
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
coreos-calico-aio:

View File

@ -8,13 +8,14 @@
with_items:
- python
- pip
- dbus-daemon
tags:
- facts
- name: Bootstrap | Install python 2.x and pip
raw:
apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y python-minimal python-pip
DEBIAN_FRONTEND=noninteractive apt-get install -y python-minimal python-pip dbus
when:
"{{ need_bootstrap.results | map(attribute='rc') | sort | last | bool }}"

View File

@ -11,6 +11,10 @@
- include: bootstrap-centos.yml
when: bootstrap_os == "centos"
- task:
debug:
msg: "bootstrap_os: {{ bootstrap_os|d('') }}"
- include: setup-pipelining.yml
- name: check if atomic host

View File

@ -1,4 +1,8 @@
---
# Versions
kubedns_version: 1.14.2
kubednsautoscaler_version: 1.1.1
# Limits for dnsmasq/kubedns apps
dns_memory_limit: 170Mi
dns_cpu_requests: 100m
@ -6,6 +10,16 @@ dns_memory_requests: 70Mi
kubedns_min_replicas: 2
kubedns_nodes_per_replica: 10
# Images
kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-amd64"
kubedns_image_tag: "{{ kubedns_version }}"
dnsmasq_nanny_image_repo: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64"
dnsmasq_nanny_image_tag: "{{ kubedns_version }}"
dnsmasq_sidecar_image_repo: "gcr.io/google_containers/k8s-dns-sidecar-amd64"
dnsmasq_sidecar_image_tag: "{{ kubedns_version }}"
kubednsautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-autoscaler-amd64"
kubednsautoscaler_image_tag: "{{ kubednsautoscaler_version }}"
# Netchecker
deploy_netchecker: false
netchecker_port: 31081

View File

@ -6,7 +6,11 @@
cloud_machine_type: g1-small
mode: default
preemptible: no
ci_job_name: "{{ lookup('env', 'CI_JOB_NAME') }}"
tasks:
- name: include vars for test {{ ci_test_name }}
include_vars: "../files/{{ ci_job_name }}.yml"
- name: replace_test_id
set_fact:
test_name: "{{test_id |regex_replace('\\.', '-')}}"
@ -32,7 +36,7 @@
credentials_file: "{{gce_credentials_file | default(omit)}}"
project_id: "{{ gce_project_id }}"
zone: "{{cloud_region}}"
metadata: '{"test_id": "{{test_id}}", "network": "{{kube_network_plugin}}", "startup-script": "{{startup_script}}"}'
metadata: '{"test_id": "{{test_id}}", "network": "{{kube_network_plugin}}", "startup-script": "{{startup_script|default("")}}"}'
tags: "build-{{test_name}},{{kube_network_plugin}}"
ip_forward: yes
service_account_permissions: ['compute-rw']
@ -59,7 +63,6 @@
dest: "{{ inventory_path|dirname }}/group_vars/fake_hosts.yml"
when: mode in ['scale', 'separate-scale', 'ha-scale']
- name: Wait for instances
hosts: "waitfor_hosts"
gather_facts: false

View File

@ -0,0 +1,15 @@
# Instance settings
cloud_image: centos-7
cloud_machine_type: "n1-standard-1"
cloud_region: us-central1-b
mode: ha
startup_script: ""
# Deployment settings
kube_network_plugin: weave
weave_cpu_limit: "100m"
weave_cpu_requests: "100m"
kubeadm_enabled: "true"
deploy_netchecker: true
kubedns_min_replicas: 1
cloud_provider: gce

View File

@ -0,0 +1,12 @@
# Instance settings
cloud_image: centos-7
cloud_region: europe-west1-b
mode: ha-scale
# Deployment settings
kube_network_plugin: calico
download_localhost: "true"
download_run_once: "true"
deploy_netchecker: true
kubedns_min_replicas: 1
cloud_provider: gce

View File

@ -0,0 +1,14 @@
# Instance settings
cloud_image: centos-7
cloud_region: us-west1-a
cloud_machine_type: "n1-standard-1"
mode: default
# Deployment settings
kube_network_plugin: flannel
helm_enabled: true
istio_enabled: true
efk_enabled: true
deploy_netchecker: true
kubedns_min_replicas: 1
cloud_provider: gce

View File

@ -0,0 +1,15 @@
# Instance settings
cloud_image: coreos-alpha
cloud_region: us-west1-a
mode: ha-scale
startup_script: 'systemctl disable locksmithd && systemctl stop locksmithd'
# Deployment settings
kube_network_plugin: weave
weave_cpu_limit: "100m"
weave_cpu_requests: "100m"
bootstrap_os: coreos
resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12
deploy_netchecker: true
kubedns_min_replicas: 1
cloud_provider: gce

View File

@ -0,0 +1,15 @@
# Instance settings
cloud_image: coreos-stable
cloud_region: us-west1-b
cloud_machine_type: "n1-standard-2"
mode: aio
##user-data to simply turn off coreos upgrades
startup_script: 'systemctl disable locksmithd && systemctl stop locksmithd'
# Deployment settings
bootstrap_os: coreos
kube_network_plugin: calico
resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12
deploy_netchecker: true
kubedns_min_replicas: 1
cloud_provider: gce

View File

@ -0,0 +1,13 @@
# Instance settings
cloud_image: coreos-stable
cloud_region: us-east1-b
mode: default
startup_script: 'systemctl disable locksmithd && systemctl stop locksmithd'
# Deployment settings
kube_network_plugin: canal
bootstrap_os: coreos
resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12
deploy_netchecker: true
kubedns_min_replicas: 1
cloud_provider: gce

View File

@ -0,0 +1,11 @@
# Instance settings
cloud_image: debian-8-kubespray
cloud_region: us-central1-b
mode: default
# Deployment settings
kube_network_plugin: calico
bootstrap_os: debian
deploy_netchecker: true
kubedns_min_replicas: 1
cloud_provider: gce

View File

@ -0,0 +1,10 @@
# Instance settings
cloud_image: rhel-7
cloud_region: us-east1-b
mode: separate
# Deployment settings
kube_network_plugin: canal
deploy_netchecker: true
kubedns_min_replicas: 1
cloud_provider: gce

View File

@ -0,0 +1,12 @@
# Instance settings
cloud_image: rhel-7
cloud_region: europe-west1-b
mode: default
# Deployment settings
kube_network_plugin: weave
weave_cpu_limit: "100m"
weave_cpu_requests: "100m"
deploy_netchecker: true
kubedns_min_replicas: 1
cloud_provider: gce

View File

@ -0,0 +1,11 @@
# Instance settings
cloud_image: ubuntu-1604-lts
cloud_region: europe-west1-b
mode: ha
# Deployment settings
bootstrap_os: ubuntu
kube_network_plugin: canal
deploy_netchecker: true
kubedns_min_replicas: 1
cloud_provider: gce

View File

@ -0,0 +1,13 @@
# Instance settings
cloud_image: ubuntu-1604-lts
cloud_machine_type: "n1-standard-1"
cloud_region: europe-west1-b
mode: ha
# Deployment settings
bootstrap_os: ubuntu
kube_network_plugin: canal
kubeadm_enabled: "true"
deploy_netchecker: true
kubedns_min_replicas: 1
cloud_provider: gce

View File

@ -0,0 +1,11 @@
# Instance settings
cloud_image: ubuntu-1604-lts
cloud_region: europe-west1-b
mode: separate
# Deployment settings
bootstrap_os: ubuntu
kube_network_plugin: flannel
deploy_netchecker: true
kubedns_min_replicas: 1
cloud_provider: gce

View File

@ -0,0 +1,13 @@
# Instance settings
cloud_image: ubuntu-1604-lts
cloud_region: us-central1-b
mode: separate
# Deployment settings
bootstrap_os: ubuntu
kube_network_plugin: flannel
etcd_deployment: rkt
kubelet_deployment: rkt
deploy_netchecker: true
kubedns_min_replicas: 1
cloud_provider: gce

View File

@ -0,0 +1,13 @@
# Instance settings
cloud_machine_type: "n1-standard-1"
cloud_image: ubuntu-1604-lts
cloud_region: us-central1-b
mode: separate
# Instance settings
bootstrap_os: ubuntu
cert_mgmt: vault
kube_network_plugin: canal
deploy_netchecker: true
kubedns_min_replicas: 1
cloud_provider: gce

View File

@ -0,0 +1,13 @@
# Instance settings
cloud_image: ubuntu-1604-lts
cloud_region: us-central1-b
mode: separate
# Deployment settings
bootstrap_os: ubuntu
kube_network_plugin: weave
weave_cpu_limit: "100m"
weave_cpu_requests: "100m"
deploy_netchecker: true
kubedns_min_replicas: 1
cloud_provider: gce

View File

@ -74,4 +74,3 @@ fake_scale_host[1:200]
[kube-node:children]
fake_hosts
{% endif %}