kubespray/.travis.yml

192 lines
6.2 KiB
YAML

sudo: false
git:
depth: 5
env:
global:
GCE_USER=travis
SSH_USER=$GCE_USER
TEST_ID=$TRAVIS_JOB_NUMBER
CONTAINER_ENGINE=docker
PRIVATE_KEY=$GCE_PRIVATE_KEY
GS_ACCESS_KEY_ID=$GS_KEY
GS_SECRET_ACCESS_KEY=$GS_SECRET
ANSIBLE_KEEP_REMOTE_FILES=1
CLUSTER_MODE=default
BOOTSTRAP_OS=none
matrix:
# Debian Jessie
- >-
KUBE_NETWORK_PLUGIN=flannel
CLOUD_IMAGE=debian-8-kubespray
CLOUD_REGION=europe-west1-b
CLUSTER_MODE=default
- >-
KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=debian-8-kubespray
CLOUD_REGION=us-central1-c
CLUSTER_MODE=default
# Centos 7
- >-
KUBE_NETWORK_PLUGIN=flannel
CLOUD_IMAGE=centos-7-sudo
CLOUD_REGION=asia-east1-c
CLUSTER_MODE=default
- >-
KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=centos-7-sudo
CLOUD_REGION=europe-west1-b
CLUSTER_MODE=default
- >-
KUBE_NETWORK_PLUGIN=weave
CLOUD_IMAGE=centos-7-sudo
CLOUD_REGION=us-central1-c
CLUSTER_MODE=default
# Redhat 7
- >-
KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=rhel-7-sudo
CLOUD_REGION=asia-east1-c
CLUSTER_MODE=default
- >-
KUBE_NETWORK_PLUGIN=weave
CLOUD_IMAGE=rhel-7-sudo
CLOUD_REGION=europe-west1-b
CLUSTER_MODE=default
# Ubuntu 16.04
- >-
KUBE_NETWORK_PLUGIN=canal
CLOUD_IMAGE=ubuntu-1604-xenial
CLOUD_REGION=us-central1-c
CLUSTER_MODE=default
- >-
KUBE_NETWORK_PLUGIN=weave
CLOUD_IMAGE=ubuntu-1604-xenial
CLOUD_REGION=asia-east1-c
CLUSTER_MODE=default
# CoreOS stable
- >-
KUBE_NETWORK_PLUGIN=weave
CLOUD_IMAGE=coreos-stable
CLOUD_REGION=europe-west1-b
CLUSTER_MODE=default
BOOTSTRAP_OS=coreos
- >-
KUBE_NETWORK_PLUGIN=canal
CLOUD_IMAGE=coreos-stable
CLOUD_REGION=us-east1-d
CLUSTER_MODE=default
BOOTSTRAP_OS=coreos
# Extra cases for separated roles
- >-
KUBE_NETWORK_PLUGIN=canal
CLOUD_IMAGE=rhel-7-sudo
CLOUD_REGION=europe-west1-b
CLUSTER_MODE=separate
- >-
KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=ubuntu-1604-xenial
CLOUD_REGION=us-central1-a
CLUSTER_MODE=separate
- >-
KUBE_NETWORK_PLUGIN=weave
CLOUD_IMAGE=debian-8-kubespray
CLOUD_REGION=us-east1-d
CLUSTER_MODE=separate
- >-
KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=coreos-stable
CLOUD_REGION=asia-east1-c
CLUSTER_MODE=separate
BOOTSTRAP_OS=coreos
before_install:
# Install Ansible.
- pip install --user ansible
- pip install --user netaddr
# W/A https://github.com/ansible/ansible-modules-core/issues/5196#issuecomment-253766186
- pip install --user apache-libcloud==0.20.1
- pip install --user boto==2.9.0 -U
cache:
- directories:
- $HOME/.cache/pip
- $HOME/.local
before_script:
- echo "RUN $TRAVIS_JOB_NUMBER $KUBE_NETWORK_PLUGIN $CONTAINER_ENGINE "
- mkdir -p $HOME/.ssh
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
- chmod 400 $HOME/.ssh/id_rsa
- chmod 755 $HOME/.local/bin/ansible-playbook
- $HOME/.local/bin/ansible-playbook --version
- cp tests/ansible.cfg .
- export PYPATH=$([ $BOOTSTRAP_OS = none ] && echo /usr/bin/python || echo /opt/bin/python)
# - "echo $HOME/.local/bin/ansible-playbook -i inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}' $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml"
script:
- >
$HOME/.local/bin/ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local $LOG_LEVEL
-e mode=${CLUSTER_MODE}
-e test_id=${TEST_ID}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e gce_project_id=${GCE_PROJECT_ID}
-e gce_service_account_email=${GCE_ACCOUNT}
-e gce_pem_file=${HOME}/.ssh/gce
-e cloud_image=${CLOUD_IMAGE}
-e inventory_path=${PWD}/inventory/inventory.ini
-e cloud_region=${CLOUD_REGION}
# Create cluster
- >
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e bootstrap_os=${BOOTSTRAP_OS}
-e ansible_python_interpreter=${PYPATH}
cluster.yml
# Tests Cases
## Test Master API
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} tests/testcases/010_check-apiserver.yml $LOG_LEVEL
## Create a POD
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/020_check-create-pod.yml $LOG_LEVEL
## Ping the between 2 pod
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
after_failure:
- >
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER
-e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e dir=$HOME
-e ansible_python_interpreter=${PYPATH}
scripts/collect-info.yaml
- >
$HOME/.local/bin/ansible-playbook tests/cloud_playbooks/upload-logs-gcs.yml -i "localhost," -c local
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e gce_project_id=${GCE_PROJECT_ID}
-e gs_key=${GS_ACCESS_KEY_ID}
-e gs_skey=${GS_SECRET_ACCESS_KEY}
-e ostype=${CLOUD_IMAGE}
-e commit=${TRAVIS_COMMIT}
-e dir=${HOME}
-e ansible_python_interpreter=${PYPATH}
after_script:
- >
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
-e test_id=${TEST_ID}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e gce_project_id=${GCE_PROJECT_ID}
-e gce_service_account_email=${GCE_ACCOUNT}
-e gce_pem_file=${HOME}/.ssh/gce
-e cloud_image=${CLOUD_IMAGE}
-e inventory_path=${PWD}/inventory/inventory.ini
-e cloud_region=${CLOUD_REGION}