kubespray/.gitlab-ci.yml

811 lines
20 KiB
YAML
Raw Normal View History

---
2016-12-10 10:23:37 +08:00
stages:
- unit-tests
2018-02-12 23:04:26 +08:00
- moderator
- deploy-part1
- deploy-part2
- deploy-special
2016-12-10 10:23:37 +08:00
variables:
FAILFASTCI_NAMESPACE: 'kargo-ci'
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
# DOCKER_HOST: tcp://localhost:2375
2016-12-14 06:01:37 +08:00
ANSIBLE_FORCE_COLOR: "true"
MAGIC: "ci check this"
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
GS_ACCESS_KEY_ID: $GS_KEY
GS_SECRET_ACCESS_KEY: $GS_SECRET
CONTAINER_ENGINE: docker
2018-02-12 22:32:40 +08:00
SSH_USER: root
GCE_PREEMPTIBLE: "false"
ANSIBLE_KEEP_REMOTE_FILES: "1"
ANSIBLE_CONFIG: ./tests/ansible.cfg
ANSIBLE_INVENTORY: ./inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini
IDEMPOT_CHECK: "false"
RESET_CHECK: "false"
UPGRADE_TEST: "false"
2018-02-12 23:04:26 +08:00
LOG_LEVEL: "-vv"
2016-12-10 10:23:37 +08:00
2016-12-14 06:01:37 +08:00
# asia-east1-a
# asia-northeast1-a
# europe-west1-b
# us-central1-a
# us-east1-b
# us-west1-a
2016-12-10 10:23:37 +08:00
before_script:
- /usr/bin/python -m pip install -r tests/requirements.txt
- mkdir -p /.ssh
2016-12-10 10:23:37 +08:00
.job: &job
tags:
- kubernetes
- docker
image: quay.io/kubespray/kubespray:v2.8
2016-12-10 10:23:37 +08:00
.docker_service: &docker_service
services:
- docker:dind
2016-12-10 10:23:37 +08:00
.create_cluster: &create_cluster
<<: *job
<<: *docker_service
2016-12-14 06:01:37 +08:00
.gce_variables: &gce_variables
GCE_USER: travis
SSH_USER: $GCE_USER
2017-03-30 04:28:05 +08:00
CLOUD_MACHINE_TYPE: "g1-small"
CI_PLATFORM: "gce"
PRIVATE_KEY: $GCE_PRIVATE_KEY
2018-02-12 23:04:26 +08:00
.do_variables: &do_variables
PRIVATE_KEY: $DO_PRIVATE_KEY
CI_PLATFORM: "do"
2018-02-12 22:32:40 +08:00
SSH_USER: root
.testcases: &testcases
2016-12-10 10:23:37 +08:00
<<: *job
2016-12-14 06:01:37 +08:00
<<: *docker_service
cache:
key: "$CI_BUILD_REF_NAME"
paths:
- downloads/
- $HOME/.cache
before_script:
- docker info
2018-02-12 22:32:40 +08:00
- /usr/bin/python -m pip install -r requirements.txt
- /usr/bin/python -m pip install -r tests/requirements.txt
2016-12-14 06:01:37 +08:00
- mkdir -p /.ssh
- mkdir -p $HOME/.ssh
- ansible-playbook --version
- export PYPATH=$([[ ! "$CI_JOB_NAME" =~ "coreos" ]] && echo /usr/bin/python || echo /opt/bin/python)
- echo "CI_JOB_NAME is $CI_JOB_NAME"
- echo "PYPATH is $PYPATH"
2016-12-10 10:23:37 +08:00
script:
2016-12-14 06:01:37 +08:00
- pwd
- ls
- echo ${PWD}
2017-06-27 02:24:52 +08:00
- echo "${STARTUP_SCRIPT}"
- cd tests && make create-${CI_PLATFORM} -s ; cd -
2016-12-10 10:23:37 +08:00
# Check out latest tag if testing upgrade
- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
# Checkout the CI vars file so it is available
- test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
# Workaround https://github.com/kubernetes-sigs/kubespray/issues/2021
- 'sh -c "echo ignore_assert_errors: true | tee -a tests/files/${CI_JOB_NAME}.yml"'
2016-12-14 06:01:37 +08:00
# Create cluster
- >
ansible-playbook
-i ${ANSIBLE_INVENTORY}
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
${SSH_ARGS}
${LOG_LEVEL}
-e @${CI_TEST_VARS}
-e ansible_ssh_user=${SSH_USER}
-e local_release_dir=${PWD}/downloads
--limit "all:!fake_hosts"
2016-12-14 06:01:37 +08:00
cluster.yml
2016-12-10 10:23:37 +08:00
# Repeat deployment if testing upgrade
- >
if [ "${UPGRADE_TEST}" != "false" ]; then
test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml";
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
git checkout "${CI_BUILD_REF}";
ansible-playbook
-i ${ANSIBLE_INVENTORY}
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
${SSH_ARGS}
${LOG_LEVEL}
-e @${CI_TEST_VARS}
-e ansible_ssh_user=${SSH_USER}
-e local_release_dir=${PWD}/downloads
--limit "all:!fake_hosts"
$PLAYBOOK;
fi
2016-12-14 06:01:37 +08:00
# Tests Cases
## Test Master API
- ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
2016-12-14 06:01:37 +08:00
## Ping the between 2 pod
- ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
2016-12-14 06:01:37 +08:00
## Advanced DNS checks
- ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL
## Idempotency checks 1/5 (repeat deployment)
- >
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook
-i ${ANSIBLE_INVENTORY}
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
${SSH_ARGS}
${LOG_LEVEL}
-e @${CI_TEST_VARS}
-e ansible_python_interpreter=${PYPATH}
-e local_release_dir=${PWD}/downloads
--limit "all:!fake_hosts"
cluster.yml;
fi
## Idempotency checks 2/5 (Advanced DNS checks)
- >
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook
-i ${ANSIBLE_INVENTORY}
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
${SSH_ARGS}
${LOG_LEVEL}
-e @${CI_TEST_VARS}
--limit "all:!fake_hosts"
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
fi
## Idempotency checks 3/5 (reset deployment)
- >
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
ansible-playbook
-i ${ANSIBLE_INVENTORY}
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
${SSH_ARGS}
${LOG_LEVEL}
-e @${CI_TEST_VARS}
-e ansible_python_interpreter=${PYPATH}
-e reset_confirmation=yes
--limit "all:!fake_hosts"
reset.yml;
fi
## Idempotency checks 4/5 (redeploy after reset)
- >
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
ansible-playbook
-i ${ANSIBLE_INVENTORY}
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
${SSH_ARGS}
${LOG_LEVEL}
-e @${CI_TEST_VARS}
-e ansible_python_interpreter=${PYPATH}
-e local_release_dir=${PWD}/downloads
--limit "all:!fake_hosts"
cluster.yml;
fi
## Idempotency checks 5/5 (Advanced DNS checks)
- >
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH}
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
--limit "all:!fake_hosts"
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
fi
2016-12-14 06:01:37 +08:00
after_script:
- cd tests && make delete-${CI_PLATFORM} -s ; cd -
2016-12-14 06:01:37 +08:00
.gce: &gce
<<: *testcases
variables:
<<: *gce_variables
2017-02-22 21:27:30 +08:00
.do: &do
variables:
<<: *do_variables
<<: *testcases
# Test matrix. Leave the comments for markup scripts.
.coreos_calico_aio_variables: &coreos_calico_aio_variables
# stage: deploy-part1
MOVED_TO_GROUP_VARS: "true"
2018-08-22 22:02:07 +08:00
.ubuntu18_flannel_aio_variables: &ubuntu18_flannel_aio_variables
# stage: deploy-part1
2018-08-22 22:02:07 +08:00
MOVED_TO_GROUP_VARS: "true"
.centos_weave_kubeadm_variables: &centos_weave_kubeadm_variables
# stage: deploy-part1
UPGRADE_TEST: "graceful"
.ubuntu_canal_kubeadm_variables: &ubuntu_canal_kubeadm_variables
# stage: deploy-part1
MOVED_TO_GROUP_VARS: "true"
.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
# stage: deploy-special
MOVED_TO_GROUP_VARS: "true"
contiv network support (#1914) * Add Contiv support Contiv is a network plugin for Kubernetes and Docker. It supports vlan/vxlan/BGP/Cisco ACI technologies. It support firewall policies, multiple networks and bridging pods onto physical networks. * Update contiv version to 1.1.4 Update contiv version to 1.1.4 and added SVC_SUBNET in contiv-config. * Load openvswitch module to workaround on CentOS7.4 * Set contiv cni version to 0.1.0 Correct contiv CNI version to 0.1.0. * Use kube_apiserver_endpoint for K8S_API_SERVER Use kube_apiserver_endpoint as K8S_API_SERVER to make contiv talks to a available endpoint no matter if there's a loadbalancer or not. * Make contiv use its own etcd Before this commit, contiv is using a etcd proxy mode to k8s etcd, this work fine when the etcd hosts are co-located with contiv etcd proxy, however the k8s peering certs are only in etcd group, as a result the etcd-proxy is not able to peering with the k8s etcd on etcd group, plus the netplugin is always trying to find the etcd endpoint on localhost, this will cause problem for all netplugins not runnign on etcd group nodes. This commit make contiv uses its own etcd, separate from k8s one. on kube-master nodes (where net-master runs), it will run as leader mode and on all rest nodes it will run as proxy mode. * Use cp instead of rsync to copy cni binaries Since rsync has been removed from hyperkube, this commit changes it to use cp instead. * Make contiv-etcd able to run on master nodes * Add rbac_enabled flag for contiv pods * Add contiv into CNI network plugin lists * migrate contiv test to tests/files Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com> * Add required rules for contiv netplugin * Better handling json return of fwdMode * Make contiv etcd port configurable * Use default var instead of templating * roles/download/defaults/main.yml: use contiv 1.1.7 Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com>
2017-11-29 22:24:16 +08:00
.ubuntu_contiv_sep_variables: &ubuntu_contiv_sep_variables
# stage: deploy-special
contiv network support (#1914) * Add Contiv support Contiv is a network plugin for Kubernetes and Docker. It supports vlan/vxlan/BGP/Cisco ACI technologies. It support firewall policies, multiple networks and bridging pods onto physical networks. * Update contiv version to 1.1.4 Update contiv version to 1.1.4 and added SVC_SUBNET in contiv-config. * Load openvswitch module to workaround on CentOS7.4 * Set contiv cni version to 0.1.0 Correct contiv CNI version to 0.1.0. * Use kube_apiserver_endpoint for K8S_API_SERVER Use kube_apiserver_endpoint as K8S_API_SERVER to make contiv talks to a available endpoint no matter if there's a loadbalancer or not. * Make contiv use its own etcd Before this commit, contiv is using a etcd proxy mode to k8s etcd, this work fine when the etcd hosts are co-located with contiv etcd proxy, however the k8s peering certs are only in etcd group, as a result the etcd-proxy is not able to peering with the k8s etcd on etcd group, plus the netplugin is always trying to find the etcd endpoint on localhost, this will cause problem for all netplugins not runnign on etcd group nodes. This commit make contiv uses its own etcd, separate from k8s one. on kube-master nodes (where net-master runs), it will run as leader mode and on all rest nodes it will run as proxy mode. * Use cp instead of rsync to copy cni binaries Since rsync has been removed from hyperkube, this commit changes it to use cp instead. * Make contiv-etcd able to run on master nodes * Add rbac_enabled flag for contiv pods * Add contiv into CNI network plugin lists * migrate contiv test to tests/files Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com> * Add required rules for contiv netplugin * Better handling json return of fwdMode * Make contiv etcd port configurable * Use default var instead of templating * roles/download/defaults/main.yml: use contiv 1.1.7 Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com>
2017-11-29 22:24:16 +08:00
MOVED_TO_GROUP_VARS: "true"
.coreos_cilium_variables: &coreos_cilium_variables
# stage: deploy-special
MOVED_TO_GROUP_VARS: "true"
2018-03-21 12:39:29 +08:00
.ubuntu_cilium_sep_variables: &ubuntu_cilium_sep_variables
# stage: deploy-special
2018-03-21 12:39:29 +08:00
MOVED_TO_GROUP_VARS: "true"
2018-08-22 22:02:07 +08:00
.rhel7_weave_variables: &rhel7_weave_variables
# stage: deploy-part1
MOVED_TO_GROUP_VARS: "true"
.centos7_flannel_addons_variables: &centos7_flannel_addons_variables
# stage: deploy-part2
MOVED_TO_GROUP_VARS: "true"
.debian9_calico_variables: &debian9_calico_variables
# stage: deploy-part2
MOVED_TO_GROUP_VARS: "true"
.coreos_canal_variables: &coreos_canal_variables
# stage: deploy-part2
2017-10-26 22:35:12 +08:00
MOVED_TO_GROUP_VARS: "true"
.rhel7_canal_sep_variables: &rhel7_canal_sep_variables
# stage: deploy-special
MOVED_TO_GROUP_VARS: "true"
.ubuntu_weave_sep_variables: &ubuntu_weave_sep_variables
# stage: deploy-special
MOVED_TO_GROUP_VARS: "true"
.centos7_calico_ha_variables: &centos7_calico_ha_variables
# stage: deploy-special
2017-10-26 22:35:12 +08:00
MOVED_TO_GROUP_VARS: "true"
.centos7_kube_router_variables: &centos7_kube_router_variables
# stage: deploy-special
MOVED_TO_GROUP_VARS: "true"
.centos7_multus_calico_variables: &centos7_multus_calico_variables
# stage: deploy-part2
UPGRADE_TEST: "graceful"
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
# stage: deploy-special
MOVED_TO_GROUP_VARS: "true"
.coreos_kube_router_variables: &coreos_kube_router_variables
# stage: deploy-special
MOVED_TO_GROUP_VARS: "true"
2016-12-21 07:00:46 +08:00
.ubuntu_rkt_sep_variables: &ubuntu_rkt_sep_variables
# stage: deploy-part1
MOVED_TO_GROUP_VARS: "true"
2016-12-21 07:00:46 +08:00
.ubuntu_flannel_variables: &ubuntu_flannel_variables
# stage: deploy-part2
MOVED_TO_GROUP_VARS: "true"
2017-06-29 11:11:22 +08:00
.ubuntu_kube_router_variables: &ubuntu_kube_router_variables
# stage: deploy-special
MOVED_TO_GROUP_VARS: "true"
.opensuse_canal_variables: &opensuse_canal_variables
# stage: deploy-part2
MOVED_TO_GROUP_VARS: "true"
2018-02-13 19:30:14 +08:00
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
2018-02-13 22:05:28 +08:00
### PR JOBS PART1
gce_ubuntu18-flannel-aio:
2018-02-12 23:04:26 +08:00
stage: deploy-part1
2016-12-10 10:23:37 +08:00
<<: *job
2016-12-14 06:01:37 +08:00
<<: *gce
variables:
<<: *ubuntu18_flannel_aio_variables
2018-02-12 21:28:59 +08:00
<<: *gce_variables
when: on_success
except: ['triggers']
2018-02-12 23:04:26 +08:00
only: [/^pr-.*$/]
2016-12-10 10:23:37 +08:00
2018-08-25 00:18:27 +08:00
### PR JOBS PART2
gce_coreos-calico-aio:
2018-08-25 00:18:27 +08:00
stage: deploy-part2
2018-08-22 22:02:07 +08:00
<<: *job
<<: *gce
variables:
<<: *coreos_calico_aio_variables
2018-08-22 22:02:07 +08:00
<<: *gce_variables
when: on_success
2018-08-22 22:02:07 +08:00
except: ['triggers']
only: [/^pr-.*$/]
2018-02-13 19:30:14 +08:00
gce_centos7-flannel-addons:
2018-02-13 22:05:28 +08:00
stage: deploy-part2
2018-02-12 23:04:26 +08:00
<<: *job
<<: *gce
variables:
<<: *gce_variables
2018-02-13 19:30:14 +08:00
<<: *centos7_flannel_addons_variables
2018-02-12 23:04:26 +08:00
when: on_success
2018-02-13 19:30:14 +08:00
except: ['triggers']
only: [/^pr-.*$/]
2018-02-12 23:04:26 +08:00
2019-01-03 17:26:38 +08:00
### MANUAL JOBS
gce_centos-weave-kubeadm-sep:
stage: deploy-part2
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *centos_weave_kubeadm_variables
when: on_success
2019-01-03 17:26:38 +08:00
only: ['triggers']
2018-02-13 19:30:14 +08:00
gce_ubuntu-weave-sep:
2018-02-13 22:05:28 +08:00
stage: deploy-part2
2018-02-12 23:04:26 +08:00
<<: *job
<<: *gce
variables:
<<: *gce_variables
2018-02-13 19:30:14 +08:00
<<: *ubuntu_weave_sep_variables
when: manual
2019-01-03 17:26:38 +08:00
only: ['triggers']
2018-02-12 23:04:26 +08:00
2018-02-13 19:30:14 +08:00
gce_coreos-calico-sep-triggers:
stage: deploy-part2
2018-02-12 23:04:26 +08:00
<<: *job
<<: *gce
variables:
<<: *gce_variables
2018-02-13 19:30:14 +08:00
<<: *coreos_calico_aio_variables
2018-02-12 23:04:26 +08:00
when: on_success
only: ['triggers']
2018-02-13 19:30:14 +08:00
gce_ubuntu-canal-ha-triggers:
stage: deploy-special
2018-02-12 23:04:26 +08:00
<<: *job
<<: *gce
variables:
<<: *gce_variables
2018-02-13 19:30:14 +08:00
<<: *ubuntu_canal_ha_variables
2018-02-12 23:04:26 +08:00
when: on_success
2018-02-13 19:30:14 +08:00
only: ['triggers']
gce_centos7-flannel-addons-triggers:
stage: deploy-part2
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *centos7_flannel_addons_variables
when: on_success
only: ['triggers']
2018-02-12 23:04:26 +08:00
gce_ubuntu-weave-sep-triggers:
2018-02-13 19:30:14 +08:00
stage: deploy-part2
2018-02-12 23:04:26 +08:00
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_weave_sep_variables
when: on_success
only: ['triggers']
# More builds for PRs/merges (manual) and triggers (auto)
2018-03-07 20:37:07 +08:00
do_ubuntu-canal-ha:
stage: deploy-part2
<<: *job
<<: *do
variables:
<<: *do_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
2018-02-12 23:04:26 +08:00
gce_ubuntu-canal-ha:
stage: deploy-special
2018-02-12 23:04:26 +08:00
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_canal_ha_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_ubuntu-canal-kubeadm:
2018-02-13 19:30:14 +08:00
stage: deploy-part2
2018-02-12 23:04:26 +08:00
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_canal_kubeadm_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_ubuntu-canal-kubeadm-triggers:
2018-02-13 19:30:14 +08:00
stage: deploy-part2
2018-02-12 23:04:26 +08:00
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_canal_kubeadm_variables
when: on_success
only: ['triggers']
2018-12-19 10:40:10 +08:00
gce_ubuntu-flannel-ha:
stage: deploy-part2
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_flannel_variables
when: manual
except: ['triggers']
2018-02-12 23:04:26 +08:00
gce_centos-weave-kubeadm-triggers:
2018-02-13 19:30:14 +08:00
stage: deploy-part2
2018-02-12 23:04:26 +08:00
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *centos_weave_kubeadm_variables
when: on_success
only: ['triggers']
gce_ubuntu-contiv-sep:
stage: deploy-special
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_contiv_sep_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_coreos-cilium:
stage: deploy-special
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *coreos_cilium_variables
when: manual
2018-03-21 12:39:29 +08:00
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_ubuntu-cilium-sep:
stage: deploy-special
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_cilium_sep_variables
when: manual
2018-02-12 23:04:26 +08:00
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_rhel7-weave:
2018-02-13 19:30:14 +08:00
stage: deploy-part2
2018-02-12 23:04:26 +08:00
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *rhel7_weave_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_rhel7-weave-triggers:
2018-02-13 19:30:14 +08:00
stage: deploy-part2
2018-02-12 23:04:26 +08:00
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *rhel7_weave_variables
when: on_success
only: ['triggers']
gce_debian9-calico-upgrade:
2018-02-12 23:04:26 +08:00
stage: deploy-part2
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *debian9_calico_variables
2018-02-12 23:04:26 +08:00
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_debian9-calico-triggers:
2018-02-13 19:30:14 +08:00
stage: deploy-part2
2018-02-12 23:04:26 +08:00
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *debian9_calico_variables
2018-02-12 23:04:26 +08:00
when: on_success
only: ['triggers']
gce_coreos-canal:
stage: deploy-part2
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *coreos_canal_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_coreos-canal-triggers:
2018-02-13 19:30:14 +08:00
stage: deploy-part2
2018-02-12 23:04:26 +08:00
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *coreos_canal_variables
when: on_success
only: ['triggers']
gce_rhel7-canal-sep:
stage: deploy-special
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *rhel7_canal_sep_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
2018-02-12 23:04:26 +08:00
gce_rhel7-canal-sep-triggers:
2018-02-13 19:30:14 +08:00
stage: deploy-part2
2018-02-12 23:04:26 +08:00
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *rhel7_canal_sep_variables
when: on_success
only: ['triggers']
gce_centos7-calico-ha:
stage: deploy-special
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *centos7_calico_ha_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_centos7-calico-ha-triggers:
2018-02-13 19:30:14 +08:00
stage: deploy-part2
2018-02-12 23:04:26 +08:00
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *centos7_calico_ha_variables
when: on_success
only: ['triggers']
gce_centos7-kube-router:
stage: deploy-special
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *centos7_kube_router_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_centos7-multus-calico:
stage: deploy-part2
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *centos7_multus_calico_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_opensuse-canal:
stage: deploy-part2
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *opensuse_canal_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
2018-02-12 23:04:26 +08:00
# no triggers yet https://github.com/kubernetes-incubator/kargo/issues/613
gce_coreos-alpha-weave-ha:
stage: deploy-special
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *coreos_alpha_weave_ha_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_coreos-kube-router:
stage: deploy-special
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *coreos_kube_router_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
2018-02-12 23:04:26 +08:00
gce_ubuntu-rkt-sep:
2018-02-13 19:30:14 +08:00
stage: deploy-part2
2018-02-12 23:04:26 +08:00
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_rkt_sep_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_ubuntu-kube-router-sep:
stage: deploy-special
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_kube_router_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
2018-02-12 23:04:26 +08:00
# Premoderated with manual actions
ci-authorized:
<<: *job
stage: moderator
before_script:
- apt-get -y install jq
script:
- /bin/sh scripts/premoderator.sh
except: ['triggers', 'master']
syntax-check:
<<: *job
stage: unit-tests
script:
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root cluster.yml -vvv --syntax-check
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root upgrade-cluster.yml -vvv --syntax-check
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root reset.yml -vvv --syntax-check
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root extra_playbooks/upgrade-only-k8s.yml -vvv --syntax-check
except: ['triggers', 'master']
yamllint:
<<: *job
stage: unit-tests
script:
- yamllint .
2018-02-12 23:04:26 +08:00
except: ['triggers', 'master']
tox-inventory-builder:
stage: unit-tests
<<: *job
script:
- pip install tox
- cd contrib/inventory_builder && tox
when: manual
except: ['triggers', 'master']
2019-04-04 16:42:52 +08:00
# Tests for contrib/terraform/
.terraform_install: &terraform_install
<<: *job
before_script:
# Set Ansible config
- cp ansible.cfg ~/.ansible.cfg
# Install Terraform
- apt-get install -y unzip
- curl https://releases.hashicorp.com/terraform/${TF_VERSION}/terraform_${TF_VERSION}_linux_amd64.zip > /tmp/terraform.zip
- unzip /tmp/terraform.zip && mv ./terraform /usr/local/bin/ && terraform --version
# Prepare inventory
- cp -LRp contrib/terraform/$PROVIDER/sample-inventory inventory/$CLUSTER
- cd inventory/$CLUSTER
- ln -s ../../contrib/terraform/$PROVIDER/hosts
- terraform init ../../contrib/terraform/$PROVIDER
# Copy SSH keypair
- mkdir -p ~/.ssh
- echo "$PACKET_PRIVATE_KEY" | base64 -d > ~/.ssh/id_rsa
- chmod 400 ~/.ssh/id_rsa
- echo "$PACKET_PUBLIC_KEY" | base64 -d > ~/.ssh/id_rsa.pub
- export TF_VAR_public_key_path=""
only: ['master', /^pr-.*$/]
.terraform_validate: &terraform_validate
<<: *terraform_install
stage: unit-tests
script:
- terraform validate -var-file=cluster.tf ../../contrib/terraform/$PROVIDER
.terraform_apply: &terraform_apply
<<: *terraform_install
stage: deploy-part2
when: manual
script:
- terraform apply -auto-approve ../../contrib/terraform/$PROVIDER
- ansible-playbook -i hosts ../../cluster.yml
after_script:
# Cleanup regardless of exit code
- cd inventory/$CLUSTER
- terraform destroy -auto-approve ../../contrib/terraform/$PROVIDER
tf-validate-openstack:
<<: *terraform_validate
variables:
TF_VERSION: 0.11.11
PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME
tf-validate-packet:
<<: *terraform_validate
variables:
TF_VERSION: 0.11.11
PROVIDER: packet
CLUSTER: $CI_COMMIT_REF_NAME
tf-apply-packet:
<<: *terraform_apply
variables:
TF_VERSION: 0.11.11
PROVIDER: packet
CLUSTER: $CI_COMMIT_REF_NAME
TF_VAR_cluster_name: $CI_COMMIT_REF_NAME
TF_VAR_number_of_k8s_masters: "1"
TF_VAR_number_of_k8s_nodes: "1"
TF_VAR_plan_k8s_masters: t1.small.x86
TF_VAR_plan_k8s_nodes: t1.small.x86
TF_VAR_facility: "ewr1"