Cleanup unused CI tooling (#11014)

pull/11015/head
Max Gautier 2024-03-15 10:57:27 +00:00 committed by GitHub
parent 7ddc175b70
commit ef95eb078a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 1 additions and 538 deletions

View File

@ -1,18 +1,7 @@
INVENTORY=$(PWD)/../inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini
$(HOME)/.ssh/id_rsa:
mkdir -p $(HOME)/.ssh
echo $(PRIVATE_KEY) | base64 -d > $(HOME)/.ssh/id_rsa
chmod 400 $(HOME)/.ssh/id_rsa
init-gce: $(HOME)/.ssh/id_rsa
# echo $(GCE_PEM_FILE) | base64 -d > $(HOME)/.ssh/gce
echo "$(GCE_CREDENTIALS_B64)" | base64 -d > $(HOME)/.ssh/gce.json
init-do: $(HOME)/.ssh/id_rsa
echo $(DO_PRIVATE_KEY) | base64 -d > $(HOME)/.ssh/id_rsa
init-packet:
mkdir -p $(HOME)/.ssh
echo $(PACKET_VM_SSH_PRIVATE_KEY) | base64 -d > $(HOME)/.ssh/id_rsa
chmod 400 $(HOME)/.ssh/id_rsa
@ -22,21 +11,6 @@ create-tf:
delete-tf:
./scripts/delete-tf.sh
create-do: init-do
ansible-playbook cloud_playbooks/create-do.yml -i local_inventory/hosts.cfg -c local \
${ANSIBLE_LOG_LEVEL} \
-e @"files/${CI_JOB_NAME}.yml" \
-e inventory_path=$(INVENTORY) \
-e test_id=${TEST_ID}
delete-do:
ansible-playbook -i $(INVENTORY) cloud_playbooks/create-do.yml -c local \
$(ANSIBLE_LOG_LEVEL) \
-e @"files/${CI_JOB_NAME}.yml" \
-e state=absent \
-e test_id=${TEST_ID} \
-e inventory_path=$(INVENTORY)
create-packet: init-packet
ansible-playbook cloud_playbooks/create-packet.yml -c local \
$(ANSIBLE_LOG_LEVEL) \

View File

@ -1,40 +0,0 @@
# Kubespray cloud deployment tests
## Amazon Web Service
| | Calico | Flannel | Weave |
------------- | ------------- | ------------- | ------------- |
Debian Jessie | [![Build Status](https://ci.kubespray.io/job/kubespray-aws-calico-jessie/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-calico-jessie) | [![Build Status](https://ci.kubespray.io/job/kubespray-aws-flannel-jessie/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-flannel-jessie/) | [![Build Status](https://ci.kubespray.io/job/kubespray-aws-weave-jessie/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-weave-jessie/) |
Ubuntu Trusty |[![Build Status](https://ci.kubespray.io/job/kubespray-aws-calico-trusty/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-calico-trusty/)|[![Build Status](https://ci.kubespray.io/job/kubespray-aws-flannel-trusty/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-flannel-trusty/)|[![Build Status](https://ci.kubespray.io/job/kubespray-aws-weave-trusty/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-weave-trusty)|
RHEL 7.2 |[![Build Status](https://ci.kubespray.io/job/kubespray-aws-calico-rhel72/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-calico-rhel72/)|[![Build Status](https://ci.kubespray.io/job/kubespray-aws-flannel-rhel72/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-flannel-rhel72/)|[![Build Status](https://ci.kubespray.io/job/kubespray-aws-weave-rhel72/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-weave-rhel72/)|
CentOS 7 |[![Build Status](https://ci.kubespray.io/job/kubespray-aws-calico-centos7/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-calico-centos7/)|[![Build Status](https://ci.kubespray.io/job/kubespray-aws-flannel-centos7/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-flannel-centos7/)|[![Build Status](https://ci.kubespray.io/job/kubespray-aws-weave-centos7/badge/icon)](https://ci.kubespray.io/job/kubespray-aws-weave-centos7/)|
## Test environment variables
### Common
Variable | Description | Required | Default
--------------------- | -------------------------------------- | ---------- | --------
`TEST_ID` | A unique execution ID for this test | Yes |
`KUBE_NETWORK_PLUGIN` | The network plugin (calico or flannel) | Yes |
`PRIVATE_KEY_FILE` | The path to the SSH private key file | No |
### AWS Tests
Variable | Description | Required | Default
--------------------- | ----------------------------------------------- | ---------- | ---------
`AWS_ACCESS_KEY` | The Amazon Access Key ID | Yes |
`AWS_SECRET_KEY` | The Amazon Secret Access Key | Yes |
`AWS_AMI_ID` | The AMI ID to deploy | Yes |
`AWS_KEY_PAIR_NAME` | The name of the EC2 key pair to use | Yes |
`AWS_SECURITY_GROUP` | The EC2 Security Group to use | No | default
`AWS_REGION` | The EC2 region | No | eu-central-1
#### Use private ssh key
##### Key
```bash
openssl pkcs12 -in gce-secure.p12 -passin pass:notasecret -nodes -nocerts | openssl rsa -out gce-secure.pem
cat gce-secure.pem |base64 -w0 > GCE_PEM_FILE`
```

View File

@ -1,26 +0,0 @@
---
- name: Provision AWS VMs
hosts: localhost
become: False
gather_facts: False
tasks:
- name: Provision a set of instances
amazon.aws.ec2_instance:
key_name: "{{ aws.key_name }}"
aws_access_key: "{{ aws.access_key }}"
aws_secret_key: "{{ aws.secret_key }}"
region: "{{ aws.region }}"
group_id: "{{ aws.group }}"
instance_type: "{{ aws.instance_type }}"
image: "{{ aws.ami_id }}"
wait: true
count: "{{ aws.count }}"
instance_tags: "{{ aws.tags }}"
register: ec2
- name: Template the inventory
template:
src: ../templates/inventory-aws.j2 # noqa no-relative-paths - CI inventory templates are not in role_path
dest: "{{ inventory_path }}"
mode: 0644

View File

@ -1,94 +0,0 @@
---
- name: Provision Digital Ocean VMs
hosts: localhost
become: false
gather_facts: no
vars:
state: "present"
ssh_key_id: "6536865"
cloud_machine_type: 2gb
regions:
- nyc1
- sfo1
- nyc2
- ams2
- sgp1
- lon1
- nyc3
- ams3
- fra1
- tor1
- sfo2
- blr1
cloud_images:
- fedora-24-x64
- centos-5-x64
- centos-5-x32
- fedora-25-x64
- debian-7-x64
- debian-7-x32
- debian-8-x64
- debian-8-x32
- centos-6-x32
- centos-6-x64
- ubuntu-16-10-x32
- ubuntu-16-10-x64
- freebsd-11-0-x64-zfs
- freebsd-10-3-x64-zfs
- ubuntu-12-04-x32
- ubuntu-12-04-x64
- ubuntu-16-04-x64
- ubuntu-16-04-x32
- ubuntu-14-04-x64
- ubuntu-14-04-x32
- centos-7-x64
- freebsd-11-0-x64
- freebsd-10-3-x64
- centos-7-3-1611-x64
mode: default
tasks:
- name: Replace_test_id
set_fact:
test_name: "{{ test_id | regex_replace('\\.', '-') }}"
- name: Show vars
debug:
msg: "{{ cloud_region }}, {{ cloud_image }}"
- name: Set instance names
set_fact:
# noqa: jinja[spacing]
instance_names: >-
{%- if mode in ['separate', 'ha'] -%}
["k8s-{{ test_name }}-1", "k8s-{{ test_name }}-2", "k8s-{{ test_name }}-3"]
{%- else -%}
["k8s-{{ test_name }}-1", "k8s-{{ test_name }}-2"]
{%- endif -%}
- name: Manage DO instances | {{ state }}
community.digitalocean.digital_ocean:
unique_name: yes
api_token: "{{ lookup('env', 'DO_API_TOKEN') }}"
command: "droplet"
image_id: "{{ cloud_image }}"
name: "{{ item }}"
private_networking: no
region_id: "{{ cloud_region }}"
size_id: "{{ cloud_machine_type }}"
ssh_key_ids: "{{ ssh_key_id }}"
state: "{{ state }}"
wait: yes
register: droplets
with_items: "{{ instance_names }}"
- debug: # noqa unnamed-task
msg: "{{ droplets }}, {{ inventory_path }}"
when: state == 'present'
- name: Template the inventory
template:
src: ../templates/inventory-do.j2 # noqa no-relative-paths - CI templates are not in role_path
dest: "{{ inventory_path }}"
mode: 0644
when: state == 'present'

View File

@ -1,19 +0,0 @@
---
- name: Terminate AWS VMs
hosts: kube_node
become: False
tasks:
- name: Gather EC2 facts
amazon.aws.ec2_metadata_facts:
- name: Terminate EC2 instances
amazon.aws.ec2_instance:
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
state: absent
instance_ids: "{{ ansible_ec2_instance_id }}"
region: "{{ ansible_ec2_placement_region }}"
wait: True
delegate_to: localhost
connection: local

View File

@ -1,11 +0,0 @@
[Credentials]
gs_access_key_id = {{ gs_key }}
gs_secret_access_key = {{ gs_skey }}
[Boto]
https_validate_certificates = True
[GoogleCompute]
[GSUtil]
default_project_id = {{ gce_project_id }}
content_language = en
default_api_version = 2
[OAuth2]

View File

@ -1,9 +0,0 @@
{
"rule":
[
{
"action": {"type": "Delete"},
"condition": {"age": {{expire_days}}}
}
]
}

View File

@ -1,12 +0,0 @@
aws:
key_name: "{{ key_name | default('ansibl8s') }}"
access_key: "{{ aws_access_key }}"
secret_key: "{{ aws_secret_key }}"
region: "{{ aws_region | default('eu-west-1') }}" # default to eu-west-1
group: "{{ aws_security_group | default ('default')}}"
instance_type: t2.micro
ami_id: "{{ aws_ami_id | default('ami-02724d1f') }}" # default to Debian Jessie
count: 3
tags:
test_id: "{{ test_id }}"
network_plugin: "{{ kube_network_plugin }}"

View File

@ -1 +0,0 @@
localhost ansible_connection=local

View File

@ -1,4 +0,0 @@
---
collections:
- name: amazon.aws
version: 6.0.1

View File

@ -1,52 +0,0 @@
#! /bin/bash
global_setup() {
git clone https://github.com/ansibl8s/setup-kubernetes.git setup-kubernetes
private_key=""
if [ ! -z ${PRIVATE_KEY_FILE} ]
then
private_key="--private-key=${PRIVATE_KEY_FILE}"
fi
ansible-playbook create.yml -i hosts -u admin -s \
-e test_id=${TEST_ID} \
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN} \
-e aws_access_key=${AWS_ACCESS_KEY} \
-e aws_secret_key=${AWS_SECRET_KEY} \
-e aws_ami_id=${AWS_AMI_ID} \
-e aws_security_group=${AWS_SECURITY_GROUP} \
-e key_name=${AWS_KEY_PAIR_NAME} \
-e inventory_path=${PWD}/inventory.ini \
-e aws_region=${AWS_REGION}
}
global_teardown() {
if [ -f inventory.ini ];
then
ansible-playbook -i inventory.ini -u admin delete.yml
fi
rm -rf ${PWD}/setup-kubernetes
}
should_deploy_cluster() {
ansible-playbook -i inventory.ini -s ${private_key} -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml
assertion__status_code_is_success $?
}
should_api_server_respond() {
ansible-playbook -i inventory.ini ${private_key} testcases/010_check-apiserver.yml
assertion__status_code_is_success $?
}
should_pod_be_in_expected_subnet() {
ansible-playbook -i inventory.ini -s ${private_key} testcases/030_check-network.yml -vv
assertion__status_code_is_success $?
}
should_resolve_cluster_dns() {
ansible-playbook -i inventory.ini -s ${private_key} testcases/040_check-network-adv.yml -vv
assertion__status_code_is_success $?
}

View File

@ -1,94 +0,0 @@
def run(username, credentialsId, ami, network_plugin, aws_access, aws_secret) {
def inventory_path = pwd() + "/inventory/sample/${env.CI_JOB_NAME}-${env.BUILD_NUMBER}.ini"
dir('tests') {
wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) {
try {
create_vm("${env.CI_JOB_NAME}-${env.BUILD_NUMBER}", inventory_path, ami, username, network_plugin, aws_access, aws_secret)
install_cluster(inventory_path, credentialsId, network_plugin)
test_apiserver(inventory_path, credentialsId)
test_create_pod(inventory_path, credentialsId)
test_network(inventory_path, credentialsId)
} finally {
delete_vm(inventory_path, credentialsId, aws_access, aws_secret)
}
}
}
}
def create_vm(run_id, inventory_path, ami, username, network_plugin, aws_access, aws_secret) {
ansiblePlaybook(
inventory: 'local_inventory/hosts.cfg',
playbook: 'cloud_playbooks/create-aws.yml',
extraVars: [
test_id: run_id,
kube_network_plugin: network_plugin,
aws_access_key: [value: aws_access, hidden: true],
aws_secret_key: [value: aws_secret, hidden: true],
aws_ami_id: ami,
aws_security_group: [value: 'sg-cb0327a2', hidden: true],
key_name: 'travis-ci',
inventory_path: inventory_path,
aws_region: 'eu-central-1',
ssh_user: username
],
colorized: true
)
}
def delete_vm(inventory_path, credentialsId, aws_access, aws_secret) {
ansiblePlaybook(
inventory: inventory_path,
playbook: 'cloud_playbooks/delete-aws.yml',
credentialsId: credentialsId,
extraVars: [
aws_access_key: [value: aws_access, hidden: true],
aws_secret_key: [value: aws_secret, hidden: true]
],
colorized: true
)
}
def install_cluster(inventory_path, credentialsId, network_plugin) {
ansiblePlaybook(
inventory: inventory_path,
playbook: '../cluster.yml',
sudo: true,
credentialsId: credentialsId,
extraVars: [
kube_network_plugin: network_plugin
],
extras: "-e cloud_provider=aws",
colorized: true
)
}
def test_apiserver(inventory_path, credentialsId) {
ansiblePlaybook(
inventory: inventory_path,
playbook: 'testcases/010_check-apiserver.yml',
credentialsId: credentialsId,
colorized: true
)
}
def test_create_pod(inventory_path, credentialsId) {
ansiblePlaybook(
inventory: inventory_path,
playbook: 'testcases/020_check-create-pod.yml',
sudo: true,
credentialsId: credentialsId,
colorized: true
)
}
def test_network(inventory_path, credentialsId) {
ansiblePlaybook(
inventory: inventory_path,
playbook: 'testcases/030_check-network.yml',
sudo: true,
credentialsId: credentialsId,
colorized: true
)
}
return this;

View File

@ -1,29 +0,0 @@
node1 ansible_ssh_host={{ec2.instances[0].public_ip}} ansible_ssh_user={{ssh_user}}
node2 ansible_ssh_host={{ec2.instances[1].public_ip}} ansible_ssh_user={{ssh_user}}
node3 ansible_ssh_host={{ec2.instances[2].public_ip}} ansible_ssh_user={{ssh_user}}
[kube_control_plane]
node1
node2
[kube_node]
node1
node2
node3
[etcd]
node1
node2
[k8s_cluster:children]
kube_node
kube_control_plane
calico_rr
[calico_rr]
[broken_kube_control_plane]
node2
[broken_etcd]
node2

View File

@ -1,47 +0,0 @@
{% for instance in droplets.results %}
{{instance.droplet.name}} ansible_ssh_host={{instance.droplet.ip_address}}
{% endfor %}
{% if mode is defined and mode == "separate" %}
[kube_control_plane]
{{droplets.results[0].droplet.name}}
[kube_node]
{{droplets.results[1].droplet.name}}
[etcd]
{{droplets.results[2].droplet.name}}
{% elif mode is defined and mode == "ha" %}
[kube_control_plane]
{{droplets.results[0].droplet.name}}
{{droplets.results[1].droplet.name}}
[kube_node]
{{droplets.results[2].droplet.name}}
[etcd]
{{droplets.results[1].droplet.name}}
{{droplets.results[2].droplet.name}}
[broken_kube_control_plane]
{{droplets.results[1].droplet.name}}
[broken_etcd]
{{droplets.results[2].droplet.name}}
{% else %}
[kube_control_plane]
{{droplets.results[0].droplet.name}}
[kube_node]
{{droplets.results[1].droplet.name}}
[etcd]
{{droplets.results[0].droplet.name}}
{% endif %}
[calico_rr]
[k8s_cluster:children]
kube_node
kube_control_plane
calico_rr

View File

@ -1,73 +0,0 @@
{% set node1 = gce.instance_data[0].name %}
{{node1}} ansible_ssh_host={{gce.instance_data[0].public_ip}}
{% if mode != "all-in-one" %}
{% set node2 = gce.instance_data[1].name %}
{{node2}} ansible_ssh_host={{gce.instance_data[1].public_ip}}
{% endif %}
{% if mode is defined and mode in ["ha", "ha-scale", "separate", "separate-scale"] %}
{% set node3 = gce.instance_data[2].name %}
{{node3}} ansible_ssh_host={{gce.instance_data[2].public_ip}}
{% endif %}
{% if mode is defined and mode in ["separate", "separate-scale"] %}
[kube_control_plane]
{{node1}}
[kube_node]
{{node2}}
[etcd]
{{node3}}
{% elif mode is defined and mode in ["ha", "ha-scale"] %}
[kube_control_plane]
{{node1}}
{{node2}}
[kube_node]
{{node3}}
[etcd]
{{node1}}
{{node2}}
{{node3}}
[broken_kube_control_plane]
{{node2}}
[etcd]
{{node2}}
{{node3}}
{% elif mode == "default" %}
[kube_control_plane]
{{node1}}
[kube_node]
{{node2}}
[etcd]
{{node1}}
{% elif mode == "all-in-one" %}
[kube_control_plane]
{{node1}}
[kube_node]
{{node1}}
[etcd]
{{node1}}
{% endif %}
[k8s_cluster:children]
kube_node
kube_control_plane
calico_rr
[calico_rr]
{% if mode is defined and mode in ["scale", "separate-scale", "ha-scale"] %}
[fake_hosts]
fake_scale_host[1:200]
[kube_node:children]
fake_hosts
{% endif %}