Support multiple inventory files under individual inventory directory

pull/2233/head
Wong Hoi Sing Edison 2018-02-01 14:42:34 +08:00
parent 384e5dd4c4
commit 1a1d154e14
21 changed files with 162 additions and 134 deletions

View File

@ -87,7 +87,7 @@ before_script:
-e gce_credentials_file=${HOME}/.ssh/gce.json
-e gce_project_id=${GCE_PROJECT_ID}
-e gce_service_account_email=${GCE_ACCOUNT}
-e inventory_path=${PWD}/inventory/inventory.ini
-e inventory_path=${PWD}/inventory/sample/hosts.ini
-e test_id=${TEST_ID}
-e preemptible=$GCE_PREEMPTIBLE
@ -104,7 +104,7 @@ before_script:
# Create cluster
- >
ansible-playbook
-i inventory/inventory.ini
-i inventory/sample/hosts.ini
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
@ -124,7 +124,7 @@ before_script:
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
git checkout "${CI_BUILD_REF}";
ansible-playbook
-i inventory/inventory.ini
-i inventory/sample/hosts.ini
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
@ -141,20 +141,20 @@ before_script:
# Tests Cases
## Test Master API
- >
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
## Ping the between 2 pod
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
- ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
## Advanced DNS checks
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL
- ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL
## Idempotency checks 1/5 (repeat deployment)
- >
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook
-i inventory/inventory.ini
-i inventory/sample/hosts.ini
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
@ -171,7 +171,7 @@ before_script:
- >
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook
-i inventory/inventory.ini
-i inventory/sample/hosts.ini
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
@ -186,7 +186,7 @@ before_script:
- >
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
ansible-playbook
-i inventory/inventory.ini
-i inventory/sample/hosts.ini
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
@ -203,7 +203,7 @@ before_script:
- >
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
ansible-playbook
-i inventory/inventory.ini
-i inventory/sample/hosts.ini
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
@ -219,7 +219,7 @@ before_script:
## Idempotency checks 5/5 (Advanced DNS checks)
- >
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH}
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
--limit "all:!fake_hosts"
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
@ -227,13 +227,13 @@ before_script:
after_script:
- >
ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
ansible-playbook -i inventory/sample/hosts.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
-e @${CI_TEST_VARS}
-e test_id=${TEST_ID}
-e gce_project_id=${GCE_PROJECT_ID}
-e gce_service_account_email=${GCE_ACCOUNT}
-e gce_credentials_file=${HOME}/.ssh/gce.json
-e inventory_path=${PWD}/inventory/inventory.ini
-e inventory_path=${PWD}/inventory/sample/hosts.ini
# Test matrix. Leave the comments for markup scripts.
.coreos_calico_aio_variables: &coreos_calico_aio_variables

177
README.md
View File

@ -1,67 +1,89 @@
![Kubernetes Logo](https://s28.postimg.org/lf3q4ocpp/k8s.png)
## Deploy a production ready kubernetes cluster
Deploy a Production Ready Kubernetes Cluster
============================================
If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **#kubespray**.
If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
- Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal**
- **High available** cluster
- **Composable** (Choice of the network plugin for instance)
- Support most popular **Linux distributions**
- **Continuous integration tests**
- Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal**
- **High available** cluster
- **Composable** (Choice of the network plugin for instance)
- Support most popular **Linux distributions**
- **Continuous integration tests**
Quick Start
-----------
To deploy the cluster you can use :
**Ansible** usual commands and [**inventory builder**](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py) <br>
**vagrant** by simply running `vagrant up` (for tests purposes) <br>
### Ansible
# Copy ``inventory/sample`` as ``inventory/mycluster``
cp -rfp inventory/sample inventory/mycluster
* [Requirements](#requirements)
* [Kubespray vs ...](docs/comparisons.md)
* [Getting started](docs/getting-started.md)
* [Ansible inventory and tags](docs/ansible.md)
* [Integration with existing ansible repo](docs/integration.md)
* [Deployment data variables](docs/vars.md)
* [DNS stack](docs/dns-stack.md)
* [HA mode](docs/ha-mode.md)
* [Network plugins](#network-plugins)
* [Vagrant install](docs/vagrant.md)
* [CoreOS bootstrap](docs/coreos.md)
* [Debian Jessie setup](docs/debian.md)
* [Downloaded artifacts](docs/downloads.md)
* [Cloud providers](docs/cloud.md)
* [OpenStack](docs/openstack.md)
* [AWS](docs/aws.md)
* [Azure](docs/azure.md)
* [vSphere](docs/vsphere.md)
* [Large deployments](docs/large-deployments.md)
* [Upgrades basics](docs/upgrades.md)
* [Roadmap](docs/roadmap.md)
# Update Ansible inventory file with inventory builder
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
Supported Linux distributions
===============
# Review and change parameters under ``inventory/mycluster/group_vars``
cat inventory/mycluster/group_vars/all.yml
cat inventory/mycluster/group_vars/k8s-cluster.yml
* **Container Linux by CoreOS**
* **Debian** Jessie
* **Ubuntu** 16.04
* **CentOS/RHEL** 7
# Deploy Kubespray with Ansible Playbook
ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml
### Vagrant
# Simply running `vagrant up` (for tests purposes)
vagrant up
Documents
---------
- [Requirements](#requirements)
- [Kubespray vs ...](docs/comparisons.md)
- [Getting started](docs/getting-started.md)
- [Ansible inventory and tags](docs/ansible.md)
- [Integration with existing ansible repo](docs/integration.md)
- [Deployment data variables](docs/vars.md)
- [DNS stack](docs/dns-stack.md)
- [HA mode](docs/ha-mode.md)
- [Network plugins](#network-plugins)
- [Vagrant install](docs/vagrant.md)
- [CoreOS bootstrap](docs/coreos.md)
- [Debian Jessie setup](docs/debian.md)
- [Downloaded artifacts](docs/downloads.md)
- [Cloud providers](docs/cloud.md)
- [OpenStack](docs/openstack.md)
- [AWS](docs/aws.md)
- [Azure](docs/azure.md)
- [vSphere](docs/vsphere.md)
- [Large deployments](docs/large-deployments.md)
- [Upgrades basics](docs/upgrades.md)
- [Roadmap](docs/roadmap.md)
Supported Linux Distributions
-----------------------------
- **Container Linux by CoreOS**
- **Debian** Jessie
- **Ubuntu** 16.04
- **CentOS/RHEL** 7
Note: Upstart/SysV init based OS types are not supported.
Versions of supported components
--------------------------------
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.9.2 <br>
[etcd](https://github.com/coreos/etcd/releases) v3.2.4 <br>
[flanneld](https://github.com/coreos/flannel/releases) v0.8.0 <br>
[calico](https://docs.projectcalico.org/v2.5/releases/) v2.5.0 <br>
[canal](https://github.com/projectcalico/canal) (given calico/flannel versions) <br>
[contiv](https://github.com/contiv/install/releases) v1.0.3 <br>
[weave](http://weave.works/) v2.0.1 <br>
[docker](https://www.docker.com/) v1.13 (see note)<br>
[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)<br>
- [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.9.2
- [etcd](https://github.com/coreos/etcd/releases) v3.2.4
- [flanneld](https://github.com/coreos/flannel/releases) v0.8.0
- [calico](https://docs.projectcalico.org/v2.5/releases/) v2.5.0
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
- [contiv](https://github.com/contiv/install/releases) v1.0.3
- [weave](http://weave.works/) v2.0.1
- [docker](https://www.docker.com/) v1.13 (see note)
- [rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)
Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
@ -71,54 +93,59 @@ plugins' related OS services. Also note, only one of the supported network
plugins can be deployed for a given single cluster.
Requirements
--------------
------------
* **Ansible v2.4 (or newer) and python-netaddr is installed on the machine
that will run Ansible commands**
* **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
* The target servers must have **access to the Internet** in order to pull docker images.
* The target servers are configured to allow **IPv4 forwarding**.
* **Your ssh key must be copied** to all the servers part of your inventory.
* The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
in order to avoid any issue during deployment you should disable your firewall.
- **Ansible v2.4 (or newer) and python-netaddr is installed on the machine
that will run Ansible commands**
- **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
- The target servers must have **access to the Internet** in order to pull docker images.
- The target servers are configured to allow **IPv4 forwarding**.
- **Your ssh key must be copied** to all the servers part of your inventory.
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
in order to avoid any issue during deployment you should disable your firewall.
## Network plugins
Network Plugins
---------------
You can choose between 4 network plugins. (default: `calico`, except Vagrant uses `flannel`)
* [**flannel**](docs/flannel.md): gre/vxlan (layer 2) networking.
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
* [**calico**](docs/calico.md): bgp (layer 3) networking.
- [calico](docs/calico.md): bgp (layer 3) networking.
* [**canal**](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
- [canal](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
* [**contiv**](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
- [contiv](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
* [**weave**](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster. <br>
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
The choice is defined with the variable `kube_network_plugin`. There is also an
option to leverage built-in cloud provider networking instead.
See also [Network checker](docs/netcheck.md).
## Community docs and resources
- [kubernetes.io/docs/getting-started-guides/kubespray/](https://kubernetes.io/docs/getting-started-guides/kubespray/)
- [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr
- [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty
- [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=N9q51JgbWu8)
Community docs and resources
----------------------------
## Tools and projects on top of Kubespray
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/master/doc/integrations/ansible.rst)
- [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer)
- [Terraform Contrib](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/terraform)
- [kubernetes.io/docs/getting-started-guides/kubespray/](https://kubernetes.io/docs/getting-started-guides/kubespray/)
- [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr
- [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty
- [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=N9q51JgbWu8)
## CI Tests
Tools and projects on top of Kubespray
--------------------------------------
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/master/doc/integrations/ansible.rst)
- [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer)
- [Terraform Contrib](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/terraform)
CI Tests
--------
![Gitlab Logo](https://s27.postimg.org/wmtaig1wz/gitlabci.png)
[![Build graphs](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/badges/master/build.svg)](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines) </br>
[![Build graphs](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/badges/master/build.svg)](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines)
CI/end-to-end tests sponsored by Google (GCE), DigitalOcean, [teuto.net](https://teuto.net/) (openstack).
See the [test matrix](docs/test_cases.md) for details.

View File

@ -59,6 +59,6 @@ It will create the file ./inventory which can then be used with kubespray, e.g.:
```shell
$ cd kubespray-root-dir
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/group_vars/all.yml" cluster.yml
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all.yml" cluster.yml
```

View File

@ -6,16 +6,16 @@ You can either deploy using Ansible on its own by supplying your own inventory f
In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group.
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/k8s_gfs_inventory`. Make sure that the settings on `inventory/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu):
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/sample/k8s_gfs_inventory`. Make sure that the settings on `inventory/sample/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu):
```
ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./cluster.yml
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./cluster.yml
```
This will provision your Kubernetes cluster. Then, to provision and configure the GlusterFS cluster, from the same directory execute:
```
ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml
```
If your machines are not using Ubuntu, you need to change the `--user=ubuntu` to the correct user. Alternatively, if your Kubernetes machines are using one OS and your GlusterFS a different one, you can instead specify the `ansible_ssh_user=<correct-user>` variable in the inventory file that you just created, for each machine/VM:

View File

@ -47,10 +47,10 @@ export SKIP_PIP_INSTALL=1
%files
%doc %{_docdir}/%{name}/README.md
%doc %{_docdir}/%{name}/inventory/inventory.example
%doc %{_docdir}/%{name}/inventory/sample/hosts.ini
%config %{_sysconfdir}/%{name}/ansible.cfg
%config %{_sysconfdir}/%{name}/inventory/group_vars/all.yml
%config %{_sysconfdir}/%{name}/inventory/group_vars/k8s-cluster.yml
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/all.yml
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s-cluster.yml
%license %{_docdir}/%{name}/LICENSE
%{python2_sitelib}/%{srcname}-%{release}-py%{python2_version}.egg-info
%{_datarootdir}/%{name}/roles/

View File

@ -200,7 +200,7 @@ if it fails try to connect manually via SSH ... it could be something as simple
## Configure Cluster variables
Edit`inventory/group_vars/all.yml`:
Edit `inventory/sample/group_vars/all.yml`:
- Set variable **bootstrap_os** according selected image
```
# Valid bootstrap options (required): ubuntu, coreos, centos, none
@ -218,7 +218,7 @@ bin_dir: /opt/bin
```
cloud_provider: openstack
```
Edit`inventory/group_vars/k8s-cluster.yml`:
Edit `inventory/sample/group_vars/k8s-cluster.yml`:
- Set variable **kube_network_plugin** according selected networking
```
# Choose network plugin (calico, weave or flannel)

View File

@ -27,7 +27,7 @@ not _kube-node_.
There are also two special groups:
* **calico-rr** : explained for [advanced Calico networking cases](calico.md)
* **calico-rr** : explained for [advanced Calico networking cases](calico.md)
* **bastion** : configure a bastion host if your nodes are not directly reachable
Below is a complete inventory example:
@ -66,10 +66,10 @@ kube-master
Group vars and overriding variables precedence
----------------------------------------------
The group variables to control main deployment options are located in the directory ``inventory/group_vars``.
Optional variables are located in the `inventory/group_vars/all.yml`.
The group variables to control main deployment options are located in the directory ``inventory/sample/group_vars``.
Optional variables are located in the `inventory/sample/group_vars/all.yml`.
Mandatory variables that are common for at least one role (or a node group) can be found in the
`inventory/group_vars/k8s-cluster.yml`.
`inventory/sample/group_vars/k8s-cluster.yml`.
There are also role vars for docker, rkt, kubernetes preinstall and master roles.
According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable),
those cannot be overriden from the group vars. In order to override, one should use
@ -153,16 +153,16 @@ Example command to filter and apply only DNS configuration tasks and skip
everything else related to host OS configuration and downloading images of containers:
```
ansible-playbook -i inventory/inventory.ini cluster.yml --tags preinstall,dnsmasq,facts --skip-tags=download,bootstrap-os
ansible-playbook -i inventory/sample/hosts.ini cluster.yml --tags preinstall,dnsmasq,facts --skip-tags=download,bootstrap-os
```
And this play only removes the K8s cluster DNS resolver IP from hosts' /etc/resolv.conf files:
```
ansible-playbook -i inventory/inventory.ini -e dnsmasq_dns_server='' cluster.yml --tags resolvconf
ansible-playbook -i inventory/sample/hosts.ini -e dnsmasq_dns_server='' cluster.yml --tags resolvconf
```
And this prepares all container images localy (at the ansible runner node) without installing
or upgrading related stuff or trying to upload container to K8s cluster nodes:
```
ansible-playbook -i inventory/inventory.ini cluster.yml \
ansible-playbook -i inventory/sample/hosts.ini cluster.yml \
-e download_run_once=true -e download_localhost=true \
--tags download --skip-tags upload,upgrade
```

View File

@ -6,7 +6,7 @@ Building your own inventory
Ansible inventory can be stored in 3 formats: YAML, JSON, or INI-like. There is
an example inventory located
[here](https://github.com/kubernetes-incubator/kubespray/blob/master/inventory/inventory.example).
[here](https://github.com/kubernetes-incubator/kubespray/blob/master/inventory/sample/hosts.ini).
You can use an
[inventory generator](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py)
@ -19,9 +19,9 @@ certain threshold. Run `python3 contrib/inventory_builder/inventory.py help` hel
Example inventory generator usage:
```
cp -r inventory my_inventory
cp -r inventory/sample inventory/mycluster
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
CONFIG_FILE=my_inventory/inventory.cfg python3 contrib/inventory_builder/inventory.py ${IPS[@]}
CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
```
Starting custom deployment
@ -33,7 +33,7 @@ and start the deployment:
**IMPORTANT: Edit my_inventory/groups_vars/*.yaml to override data vars**
```
ansible-playbook -i my_inventory/inventory.cfg cluster.yml -b -v \
ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml -b -v \
--private-key=~/.ssh/private_key
```
@ -47,7 +47,7 @@ You may want to add **worker** nodes to your existing cluster. This can be done
- Add the new worker node to your inventory under kube-node (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
- Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`:
```
ansible-playbook -i my_inventory/inventory.cfg scale.yml -b -v \
ansible-playbook -i inventory/mycluster/hosts.ini scale.yml -b -v \
--private-key=~/.ssh/private_key
```

View File

@ -24,13 +24,13 @@ If you wanted to upgrade just kube_version from v1.4.3 to v1.4.6, you could
deploy the following way:
```
ansible-playbook cluster.yml -i inventory/inventory.cfg -e kube_version=v1.4.3
ansible-playbook cluster.yml -i inventory/sample/hosts.ini -e kube_version=v1.4.3
```
And then repeat with v1.4.6 as kube_version:
```
ansible-playbook cluster.yml -i inventory/inventory.cfg -e kube_version=v1.4.6
ansible-playbook cluster.yml -i inventory/sample/hosts.ini -e kube_version=v1.4.6
```
#### Graceful upgrade
@ -44,7 +44,7 @@ deployed.
```
git fetch origin
git checkout origin/master
ansible-playbook upgrade-cluster.yml -b -i inventory/inventory.cfg -e kube_version=v1.6.0
ansible-playbook upgrade-cluster.yml -b -i inventory/sample/hosts.ini -e kube_version=v1.6.0
```
After a successul upgrade, the Server Version should be updated:

View File

@ -16,7 +16,7 @@ After this step you should have:
## Kubespray configuration
Fist you must define the cloud provider in `inventory/group_vars/all.yml` and set it to `vsphere`.
Fist you must define the cloud provider in `inventory/sample/group_vars/all.yml` and set it to `vsphere`.
```yml
cloud_provider: vsphere
```
@ -58,7 +58,7 @@ vsphere_resource_pool: "K8s-Pool"
Once the configuration is set, you can execute the playbook again to apply the new configuration
```
cd kubespray
ansible-playbook -i inventory/inventory.cfg -b -v cluster.yml
ansible-playbook -i inventory/sample/hosts.ini -b -v cluster.yml
```
You'll find some usefull examples [here](https://github.com/kubernetes/kubernetes/tree/master/examples/volumes/vsphere) to test your configuration.

View File

@ -12,7 +12,7 @@ Weave encryption is supported for all communication
* To use Weave encryption, specify a strong password (if no password, no encrytion)
```
# In file ./inventory/group_vars/k8s-cluster.yml
# In file ./inventory/sample/group_vars/k8s-cluster.yml
weave_password: EnterPasswordHere
```
@ -77,14 +77,14 @@ The seed mode also allows multi-clouds and hybrid on-premise/cloud clusters depl
* Switch from consensus mode to seed mode
```
# In file ./inventory/group_vars/k8s-cluster.yml
# In file ./inventory/sample/group_vars/k8s-cluster.yml
weave_mode_seed: true
```
These two variables are only used when `weave_mode_seed` is set to `true` (**/!\ do not manually change these values**)
```
# In file ./inventory/group_vars/k8s-cluster.yml
# In file ./inventory/sample/group_vars/k8s-cluster.yml
weave_seed: uninitialized
weave_peers: uninitialized
```

View File

@ -0,0 +1 @@
../sample/group_vars

View File

@ -96,8 +96,8 @@ bin_dir: /usr/local/bin
## Uncomment to enable experimental kubeadm deployment mode
#kubeadm_enabled: false
#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}"
#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
#kubeadm_token_first: "{{ lookup('password', inventory_dir + '/credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}"
#kubeadm_token_second: "{{ lookup('password', inventory_dir + '/credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}"
#
## Set these proxy values in order to update package manager and docker daemon to use proxies

View File

@ -37,7 +37,7 @@ kube_log_level: 2
# Users to create for basic auth in Kubernetes API via HTTP
# Optionally add groups for user
kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}"
kube_api_pwd: "{{ lookup('password', inventory_dir + '/credentials/kube_user length=15 chars=ascii_letters,digits') }}"
kube_users:
kube:
pass: "{{kube_api_pwd}}"

View File

@ -33,7 +33,7 @@
- name: Weave seed | Save seed
lineinfile:
dest: "./inventory/group_vars/k8s-cluster.yml"
dest: "{{ inventory_dir }}/group_vars/k8s-cluster.yml"
state: present
regexp: '^weave_seed:'
line: 'weave_seed: {{ seed }}'
@ -45,7 +45,7 @@
- name: Weave seed | Save peers
lineinfile:
dest: "./inventory/group_vars/k8s-cluster.yml"
dest: "{{ inventory_dir }}/group_vars/k8s-cluster.yml"
state: present
regexp: '^weave_peers:'
line: 'weave_peers: {{ peers }}'
@ -53,4 +53,4 @@
delegate_to: 127.0.0.1
run_once: true
tags:
- confweave
- confweave

View File

@ -115,7 +115,7 @@ vault_pki_mounts:
roles:
- name: vault
group: vault
password: "{{ lookup('password', 'credentials/vault/vault length=15') }}"
password: "{{ lookup('password', inventory_dir + '/credentials/vault/vault length=15') }}"
policy_rules: default
role_options: default
etcd:
@ -127,7 +127,7 @@ vault_pki_mounts:
roles:
- name: etcd
group: etcd
password: "{{ lookup('password', 'credentials/vault/etcd length=15') }}"
password: "{{ lookup('password', inventory_dir + '/credentials/vault/etcd length=15') }}"
policy_rules: default
role_options:
allow_any_name: true
@ -142,7 +142,7 @@ vault_pki_mounts:
roles:
- name: kube-master
group: kube-master
password: "{{ lookup('password', 'credentials/vault/kube-master length=15') }}"
password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-master length=15') }}"
policy_rules: default
role_options:
allow_any_name: true
@ -150,7 +150,7 @@ vault_pki_mounts:
organization: "system:masters"
- name: kube-node
group: k8s-cluster
password: "{{ lookup('password', 'credentials/vault/kube-node length=15') }}"
password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-node length=15') }}"
policy_rules: default
role_options:
allow_any_name: true
@ -158,7 +158,7 @@ vault_pki_mounts:
organization: "system:nodes"
- name: kube-proxy
group: k8s-cluster
password: "{{ lookup('password', 'credentials/vault/kube-proxy length=15') }}"
password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-proxy length=15') }}"
policy_rules: default
role_options:
allow_any_name: true

View File

@ -32,12 +32,12 @@ data_files =
LICENSE
README.md
/usr/share/doc/kubespray/inventory/ =
inventory/inventory.example
inventory/sample/hosts.ini
/etc/kubespray/ =
ansible.cfg
/etc/kubespray/inventory/group_vars/ =
inventory/group_vars/all.yml
inventory/group_vars/k8s-cluster.yml
/etc/kubespray/inventory/sample/group_vars/ =
inventory/sample/group_vars/all.yml
inventory/sample/group_vars/k8s-cluster.yml
[wheel]
universal = 1

View File

@ -1,5 +1,5 @@
def run(username, credentialsId, ami, network_plugin, aws_access, aws_secret) {
def inventory_path = pwd() + "/inventory/inventory-test.ini"
def inventory_path = pwd() + "/inventory/sample/hosts.ini"
dir('tests') {
wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) {
try {

View File

@ -6,7 +6,7 @@
uri:
url: "https://{{ access_ip | default(ansible_default_ipv4.address) }}:{{ kube_apiserver_port }}/api/v1"
user: kube
password: "{{ lookup('password', '../../credentials/kube_user length=15 chars=ascii_letters,digits') }}"
password: "{{ lookup('password', inventory_dir + '/credentials/kube_user length=15 chars=ascii_letters,digits') }}"
validate_certs: no
status_code: 200,401
when: not kubeadm_enabled|default(false)