commit
a3f892c76c
|
@ -2,3 +2,4 @@
|
||||||
*.retry
|
*.retry
|
||||||
inventory/vagrant_ansible_inventory
|
inventory/vagrant_ansible_inventory
|
||||||
temp
|
temp
|
||||||
|
.idea
|
||||||
|
|
|
@ -109,6 +109,8 @@ before_script:
|
||||||
- $HOME/.local/bin/ansible-playbook --version
|
- $HOME/.local/bin/ansible-playbook --version
|
||||||
- cp tests/ansible.cfg .
|
- cp tests/ansible.cfg .
|
||||||
# - "echo $HOME/.local/bin/ansible-playbook -i inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}' $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml"
|
# - "echo $HOME/.local/bin/ansible-playbook -i inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}' $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml"
|
||||||
|
## Configure ansible deployment logs to be collected as an artifact. Enable when GCS configured, see https://docs.travis-ci.com/user/deployment/gcs
|
||||||
|
# - $HOME/.local/bin/ansible-playbook -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root scripts/configure-logs.yaml
|
||||||
|
|
||||||
script:
|
script:
|
||||||
- >
|
- >
|
||||||
|
@ -131,6 +133,8 @@ script:
|
||||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/020_check-create-pod.yml $LOG_LEVEL
|
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/020_check-create-pod.yml $LOG_LEVEL
|
||||||
## Ping the between 2 pod
|
## Ping the between 2 pod
|
||||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
|
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
|
||||||
|
## Collect env info, enable it once GCS configured, see https://docs.travis-ci.com/user/deployment/gcs
|
||||||
|
# - $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root scripts/collect-info.yaml
|
||||||
|
|
||||||
after_script:
|
after_script:
|
||||||
- >
|
- >
|
||||||
|
|
|
@ -0,0 +1,10 @@
|
||||||
|
# Contributing guidelines
|
||||||
|
|
||||||
|
## How to become a contributor and submit your own code
|
||||||
|
|
||||||
|
### Contributing A Patch
|
||||||
|
|
||||||
|
1. Submit an issue describing your proposed change to the repo in question.
|
||||||
|
2. The [repo owners](OWNERS) will respond to your issue promptly.
|
||||||
|
3. Fork the desired repo, develop and test your code changes.
|
||||||
|
4. Submit a pull request.
|
|
@ -0,0 +1,6 @@
|
||||||
|
# See the OWNERS file documentation:
|
||||||
|
# https://github.com/kubernetes/kubernetes/blob/master/docs/devel/owners.md
|
||||||
|
|
||||||
|
owners:
|
||||||
|
- Smana
|
||||||
|
- ant31
|
13
README.md
13
README.md
|
@ -13,7 +13,7 @@ If you have questions, you can [invite yourself](https://slack.kubespray.io/) to
|
||||||
|
|
||||||
To deploy the cluster you can use :
|
To deploy the cluster you can use :
|
||||||
|
|
||||||
[**kargo-cli**](https://github.com/kubespray/kargo-cli) <br>
|
[**kargo-cli**](https://github.com/kubespray/kargo-cli) (deprecated, a newer [go](https://github.com/Smana/kargo-cli/tree/kargogo) version soon)<br>
|
||||||
**Ansible** usual commands <br>
|
**Ansible** usual commands <br>
|
||||||
**vagrant** by simply running `vagrant up` (for tests purposes) <br>
|
**vagrant** by simply running `vagrant up` (for tests purposes) <br>
|
||||||
|
|
||||||
|
@ -24,7 +24,8 @@ To deploy the cluster you can use :
|
||||||
* [CoreOS bootstrap](docs/coreos.md)
|
* [CoreOS bootstrap](docs/coreos.md)
|
||||||
* [Ansible variables](docs/ansible.md)
|
* [Ansible variables](docs/ansible.md)
|
||||||
* [Cloud providers](docs/cloud.md)
|
* [Cloud providers](docs/cloud.md)
|
||||||
* [Openstack](docs/openstack.md)
|
* [OpenStack](docs/openstack.md)
|
||||||
|
* [AWS](docs/aws.md)
|
||||||
* [Network plugins](#network-plugins)
|
* [Network plugins](#network-plugins)
|
||||||
* [Roadmap](docs/roadmap.md)
|
* [Roadmap](docs/roadmap.md)
|
||||||
|
|
||||||
|
@ -40,11 +41,11 @@ Supported Linux distributions
|
||||||
Versions
|
Versions
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.3.0 <br>
|
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.4.3 <br>
|
||||||
[etcd](https://github.com/coreos/etcd/releases) v3.0.1 <br>
|
[etcd](https://github.com/coreos/etcd/releases) v3.0.1 <br>
|
||||||
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.20.0 <br>
|
[flanneld](https://github.com/coreos/flannel/releases) v0.6.2 <br>
|
||||||
[flanneld](https://github.com/coreos/flannel/releases) v0.5.5 <br>
|
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.22.0 <br>
|
||||||
[weave](http://weave.works/) v1.5.0 <br>
|
[weave](http://weave.works/) v1.6.1 <br>
|
||||||
[docker](https://www.docker.com/) v1.10.3 <br>
|
[docker](https://www.docker.com/) v1.10.3 <br>
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
# Release Process
|
||||||
|
|
||||||
|
The Kargo Project is released on an as-needed basis. The process is as follows:
|
||||||
|
|
||||||
|
1. An issue is proposing a new release with a changelog since the last release
|
||||||
|
2. At least on of the [OWNERS](OWNERS) must LGTM this release
|
||||||
|
3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
|
||||||
|
4. The release issue is closed
|
||||||
|
5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kargo $VERSION is released`
|
|
@ -76,7 +76,7 @@ Vagrant.configure("2") do |config|
|
||||||
ip = "#{$subnet}.#{i+100}"
|
ip = "#{$subnet}.#{i+100}"
|
||||||
host_vars[vm_name] = {
|
host_vars[vm_name] = {
|
||||||
"ip" => ip,
|
"ip" => ip,
|
||||||
"access_ip" => ip,
|
#"access_ip" => ip,
|
||||||
"flannel_interface" => ip,
|
"flannel_interface" => ip,
|
||||||
"flannel_backend_type" => "host-gw",
|
"flannel_backend_type" => "host-gw",
|
||||||
"local_release_dir" => "/vagrant/temp",
|
"local_release_dir" => "/vagrant/temp",
|
||||||
|
@ -100,11 +100,11 @@ Vagrant.configure("2") do |config|
|
||||||
#ansible.tags = ['download']
|
#ansible.tags = ['download']
|
||||||
ansible.groups = {
|
ansible.groups = {
|
||||||
# The first three nodes should be etcd servers
|
# The first three nodes should be etcd servers
|
||||||
"etcd" => ["k8s-0[1:3]"],
|
"etcd" => ["#{$instance_name_prefix}-0[1:3]"],
|
||||||
# The first two nodes should be masters
|
# The first two nodes should be masters
|
||||||
"kube-master" => ["k8s-0[1:2]"],
|
"kube-master" => ["#{$instance_name_prefix}-0[1:2]"],
|
||||||
# all nodes should be kube nodes
|
# all nodes should be kube nodes
|
||||||
"kube-node" => ["k8s-0[1:#{$num_instances}]"],
|
"kube-node" => ["#{$instance_name_prefix}-0[1:#{$num_instances}]"],
|
||||||
"k8s-cluster:children" => ["kube-master", "kube-node"],
|
"k8s-cluster:children" => ["kube-master", "kube-node"],
|
||||||
}
|
}
|
||||||
end
|
end
|
||||||
|
|
|
@ -2,3 +2,6 @@
|
||||||
pipelining=True
|
pipelining=True
|
||||||
[defaults]
|
[defaults]
|
||||||
host_key_checking=False
|
host_key_checking=False
|
||||||
|
gathering = smart
|
||||||
|
fact_caching = jsonfile
|
||||||
|
fact_caching_connection = /tmp
|
||||||
|
|
21
cluster.yml
21
cluster.yml
|
@ -1,9 +1,26 @@
|
||||||
---
|
---
|
||||||
- hosts: k8s-cluster
|
- hosts: all
|
||||||
|
gather_facts: false
|
||||||
|
roles:
|
||||||
|
- bootstrap-os
|
||||||
|
tags:
|
||||||
|
- bootstrap-os
|
||||||
|
|
||||||
|
|
||||||
|
- hosts: all
|
||||||
|
gather_facts: true
|
||||||
|
|
||||||
|
- hosts: all
|
||||||
roles:
|
roles:
|
||||||
- { role: kubernetes/preinstall, tags: preinstall }
|
- { role: kubernetes/preinstall, tags: preinstall }
|
||||||
|
|
||||||
|
- hosts: etcd:!k8s-cluster
|
||||||
|
roles:
|
||||||
|
- { role: etcd, tags: etcd }
|
||||||
|
|
||||||
|
- hosts: k8s-cluster
|
||||||
|
roles:
|
||||||
- { role: etcd, tags: etcd }
|
- { role: etcd, tags: etcd }
|
||||||
- { role: docker, tags: docker }
|
|
||||||
- { role: kubernetes/node, tags: node }
|
- { role: kubernetes/node, tags: node }
|
||||||
- { role: network_plugin, tags: network }
|
- { role: network_plugin, tags: network }
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,59 @@
|
||||||
|
## Kubernetes Community Code of Conduct
|
||||||
|
|
||||||
|
### Contributor Code of Conduct
|
||||||
|
|
||||||
|
As contributors and maintainers of this project, and in the interest of fostering
|
||||||
|
an open and welcoming community, we pledge to respect all people who contribute
|
||||||
|
through reporting issues, posting feature requests, updating documentation,
|
||||||
|
submitting pull requests or patches, and other activities.
|
||||||
|
|
||||||
|
We are committed to making participation in this project a harassment-free experience for
|
||||||
|
everyone, regardless of level of experience, gender, gender identity and expression,
|
||||||
|
sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
|
||||||
|
religion, or nationality.
|
||||||
|
|
||||||
|
Examples of unacceptable behavior by participants include:
|
||||||
|
|
||||||
|
* The use of sexualized language or imagery
|
||||||
|
* Personal attacks
|
||||||
|
* Trolling or insulting/derogatory comments
|
||||||
|
* Public or private harassment
|
||||||
|
* Publishing other's private information, such as physical or electronic addresses,
|
||||||
|
without explicit permission
|
||||||
|
* Other unethical or unprofessional conduct.
|
||||||
|
|
||||||
|
Project maintainers have the right and responsibility to remove, edit, or reject
|
||||||
|
comments, commits, code, wiki edits, issues, and other contributions that are not
|
||||||
|
aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers
|
||||||
|
commit themselves to fairly and consistently applying these principles to every aspect
|
||||||
|
of managing this project. Project maintainers who do not follow or enforce the Code of
|
||||||
|
Conduct may be permanently removed from the project team.
|
||||||
|
|
||||||
|
This code of conduct applies both within project spaces and in public spaces
|
||||||
|
when an individual is representing the project or its community.
|
||||||
|
|
||||||
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
|
||||||
|
opening an issue or contacting one or more of the project maintainers.
|
||||||
|
|
||||||
|
This Code of Conduct is adapted from the Contributor Covenant
|
||||||
|
(http://contributor-covenant.org), version 1.2.0, available at
|
||||||
|
http://contributor-covenant.org/version/1/2/0/
|
||||||
|
|
||||||
|
### Kubernetes Events Code of Conduct
|
||||||
|
|
||||||
|
Kubernetes events are working conferences intended for professional networking and collaboration in the
|
||||||
|
Kubernetes community. Attendees are expected to behave according to professional standards and in accordance
|
||||||
|
with their employer's policies on appropriate workplace behavior.
|
||||||
|
|
||||||
|
While at Kubernetes events or related social networking opportunities, attendees should not engage in
|
||||||
|
discriminatory or offensive speech or actions regarding gender, sexuality, race, or religion. Speakers should
|
||||||
|
be especially aware of these concerns.
|
||||||
|
|
||||||
|
The Kubernetes team does not condone any statements by speakers contrary to these standards. The Kubernetes
|
||||||
|
team reserves the right to deny entrance and/or eject from an event (without refund) any individual found to
|
||||||
|
be engaging in discriminatory or offensive speech or actions.
|
||||||
|
|
||||||
|
Please bring any concerns to to the immediate attention of Kubernetes event staff
|
||||||
|
|
||||||
|
|
||||||
|
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/code-of-conduct.md?pixel)]()
|
|
@ -0,0 +1,10 @@
|
||||||
|
AWS
|
||||||
|
===============
|
||||||
|
|
||||||
|
To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`.
|
||||||
|
|
||||||
|
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes/kubernetes/tree/master/cluster/aws/templates/iam). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
|
||||||
|
|
||||||
|
The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`.
|
||||||
|
|
||||||
|
You can now create your cluster!
|
|
@ -0,0 +1,92 @@
|
||||||
|
K8s DNS stack by Kargo
|
||||||
|
======================
|
||||||
|
|
||||||
|
Kargo configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/)
|
||||||
|
[cluster add-on](http://releases.k8s.io/master/cluster/addons/README.md)
|
||||||
|
to serve as an authoritative DNS server for a given ``dns_domain`` and its
|
||||||
|
``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels).
|
||||||
|
|
||||||
|
Note, additional search (sub)domains may be defined in the ``searchdomains``
|
||||||
|
and ``ndots`` vars. And additional recursive DNS resolvers in the `` upstream_dns_servers``,
|
||||||
|
``nameservers`` vars. Intranet DNS resolvers should be specified in the first
|
||||||
|
place, followed by external resolvers, for example:
|
||||||
|
|
||||||
|
```
|
||||||
|
skip_dnsmasq: true
|
||||||
|
nameservers: [8.8.8.8]
|
||||||
|
upstream_dns_servers: [172.18.32.6]
|
||||||
|
```
|
||||||
|
or
|
||||||
|
```
|
||||||
|
skip_dnsmasq: false
|
||||||
|
upstream_dns_servers: [172.18.32.6, 172.18.32.7, 8.8.8.8, 8.8.8.4]
|
||||||
|
```
|
||||||
|
The vars are explained below as well.
|
||||||
|
|
||||||
|
DNS configuration details
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
Here is an approximate picture of how DNS things working and
|
||||||
|
being configured by Kargo ansible playbooks:
|
||||||
|
|
||||||
|
![Image](figures/dns.jpeg?raw=true)
|
||||||
|
|
||||||
|
Note that an additional dnsmasq daemon set is installed by Kargo
|
||||||
|
by default. Kubelet will configure DNS base of all pods to use the
|
||||||
|
given dnsmasq cluster IP, which is defined via the ``dns_server`` var.
|
||||||
|
The dnsmasq forwards requests for a given cluster ``dns_domain`` to
|
||||||
|
Kubedns's SkyDns service. The SkyDns server is configured to be an
|
||||||
|
authoritative DNS server for the given cluser domain (and its subdomains
|
||||||
|
up to ``ndots:5`` depth). Note: you should scale its replication controller
|
||||||
|
up, if SkyDns chokes. These two layered DNS forwarders provide HA for the
|
||||||
|
DNS cluster IP endpoint, which is a critical moving part for Kubernetes apps.
|
||||||
|
|
||||||
|
Nameservers are as well configured in the hosts' ``/etc/resolv.conf`` files,
|
||||||
|
as the given DNS cluster IP merged with ``nameservers`` values. While the
|
||||||
|
DNS cluster IP merged with the ``upstream_dns_servers`` defines additional
|
||||||
|
nameservers for the aforementioned nsmasq daemon set running on all hosts.
|
||||||
|
This mitigates existing Linux limitation of max 3 nameservers in the
|
||||||
|
``/etc/resolv.conf`` and also brings an additional caching layer for the
|
||||||
|
clustered DNS services.
|
||||||
|
|
||||||
|
You can skip the dnsmasq daemon set install steps by setting the
|
||||||
|
``skip_dnsmasq: true``. This may be the case, if you're fine with
|
||||||
|
the nameservers limitation. Sadly, there is no way to work around the
|
||||||
|
search domain limitations of a 256 chars and 6 domains. Thus, you can
|
||||||
|
use the ``searchdomains`` var to define no more than a three custom domains.
|
||||||
|
Remaining three slots are reserved for K8s cluster default subdomains.
|
||||||
|
|
||||||
|
When dnsmasq skipped, Kargo redefines the DNS cluster IP to point directly
|
||||||
|
to SkyDns cluster IP ``skydns_server`` and configures Kubelet's
|
||||||
|
``--dns_cluster`` to use that IP as well. While this greatly simplifies
|
||||||
|
things, it comes by the price of limited nameservers though. As you know now,
|
||||||
|
the DNS cluster IP takes a slot in the ``/etc/resolv.conf``, thus you can
|
||||||
|
specify no more than a two nameservers for infra and/or external use.
|
||||||
|
Those may be specified either in ``nameservers`` or ``upstream_dns_servers``
|
||||||
|
and will be merged together with the ``skydns_server`` IP into the hots'
|
||||||
|
``/etc/resolv.conf``.
|
||||||
|
|
||||||
|
Limitations
|
||||||
|
-----------
|
||||||
|
|
||||||
|
* Kargo has yet ways to configure Kubedns addon to forward requests SkyDns can
|
||||||
|
not answer with authority to arbitrary recursive resolvers. This task is left
|
||||||
|
for future. See [official SkyDns docs](https://github.com/skynetservices/skydns)
|
||||||
|
for details.
|
||||||
|
|
||||||
|
* There is
|
||||||
|
[no way to specify a custom value](https://github.com/kubernetes/kubernetes/issues/33554)
|
||||||
|
for the SkyDNS ``ndots`` param via an
|
||||||
|
[option for KubeDNS](https://github.com/kubernetes/kubernetes/blob/master/cmd/kube-dns/app/options/options.go)
|
||||||
|
add-on, while SkyDNS supports it though. Thus, DNS SRV records may not work
|
||||||
|
as expected as they require the ``ndots:7``.
|
||||||
|
|
||||||
|
* the ``searchdomains`` have a limitation of a 6 names and 256 chars
|
||||||
|
length. Due to default ``svc, default.svc`` subdomains, the actual
|
||||||
|
limits are a 4 names and 239 chars respectively.
|
||||||
|
|
||||||
|
* the ``nameservers`` have a limitation of a 3 servers, although there
|
||||||
|
is a way to mitigate that with the ``upstream_dns_servers``,
|
||||||
|
see below. Anyway, the ``nameservers`` can take no more than a two
|
||||||
|
custom DNS servers because of one slot is reserved for a Kubernetes
|
||||||
|
cluster needs.
|
Binary file not shown.
After Width: | Height: | Size: 654 KiB |
Binary file not shown.
After Width: | Height: | Size: 57 KiB |
|
@ -0,0 +1,109 @@
|
||||||
|
HA endpoints for K8s
|
||||||
|
====================
|
||||||
|
|
||||||
|
The following components require a highly available endpoints:
|
||||||
|
* etcd cluster,
|
||||||
|
* kube-apiserver service instances.
|
||||||
|
|
||||||
|
The former provides the
|
||||||
|
[etcd-proxy](https://coreos.com/etcd/docs/latest/proxy.html) service to access
|
||||||
|
the cluster members in HA fashion.
|
||||||
|
|
||||||
|
The latter relies on a 3rd side reverse proxies, like Nginx or HAProxy, to
|
||||||
|
achieve the same goal.
|
||||||
|
|
||||||
|
Etcd
|
||||||
|
----
|
||||||
|
|
||||||
|
Etcd proxies are deployed on each node in the `k8s-cluster` group. A proxy is
|
||||||
|
a separate etcd process. It has a `localhost:2379` frontend and all of the etcd
|
||||||
|
cluster members as backends. Note that the `access_ip` is used as the backend
|
||||||
|
IP, if specified. Frontend endpoints cannot be accessed externally as they are
|
||||||
|
bound to a localhost only.
|
||||||
|
|
||||||
|
The `etcd_access_endpoint` fact provides an access pattern for clients. And the
|
||||||
|
`etcd_multiaccess` (defaults to `false`) group var controlls that behavior.
|
||||||
|
When enabled, it makes deployed components to access the etcd cluster members
|
||||||
|
directly: `http://ip1:2379, http://ip2:2379,...`. This mode assumes the clients
|
||||||
|
do a loadbalancing and handle HA for connections. Note, a pod definition of a
|
||||||
|
flannel networking plugin always uses a single `--etcd-server` endpoint!
|
||||||
|
|
||||||
|
|
||||||
|
Kube-apiserver
|
||||||
|
--------------
|
||||||
|
|
||||||
|
K8s components require a loadbalancer to access the apiservers via a reverse
|
||||||
|
proxy. Kargo includes support for an nginx-based proxy that resides on each
|
||||||
|
non-master Kubernetes node. This is referred to as localhost loadbalancing. It
|
||||||
|
is less efficient than a dedicated load balancer because it creates extra
|
||||||
|
health checks on the Kubernetes apiserver, but is more practical for scenarios
|
||||||
|
where an external LB or virtual IP management is inconvenient.
|
||||||
|
|
||||||
|
This option is configured by the variable `loadbalancer_apiserver_localhost`.
|
||||||
|
you will need to configure your own loadbalancer to achieve HA. Note that
|
||||||
|
deploying a loadbalancer is up to a user and is not covered by ansible roles
|
||||||
|
in Kargo. By default, it only configures a non-HA endpoint, which points to
|
||||||
|
the `access_ip` or IP address of the first server node in the `kube-master`
|
||||||
|
group. It can also configure clients to use endpoints for a given loadbalancer
|
||||||
|
type. The following diagram shows how traffic to the apiserver is directed.
|
||||||
|
|
||||||
|
![Image](figures/loadbalancer_localhost.png?raw=true)
|
||||||
|
|
||||||
|
Note: Kubernetes master nodes still use insecure localhost access because
|
||||||
|
there are bugs in Kubernetes <1.5.0 in using TLS auth on master role
|
||||||
|
services. This makes backends receiving unencrypted traffic and may be a
|
||||||
|
security issue when interconnecting different nodes, or maybe not, if those
|
||||||
|
belong to the isolated management network without external access.
|
||||||
|
|
||||||
|
A user may opt to use an external loadbalancer (LB) instead. An external LB
|
||||||
|
provides access for external clients, while the internal LB accepts client
|
||||||
|
connections only to the localhost, similarly to the etcd-proxy HA endpoints.
|
||||||
|
Given a frontend `VIP` address and `IP1, IP2` addresses of backends, here is
|
||||||
|
an example configuration for a HAProxy service acting as an external LB:
|
||||||
|
```
|
||||||
|
listen kubernetes-apiserver-https
|
||||||
|
bind <VIP>:8383
|
||||||
|
option ssl-hello-chk
|
||||||
|
mode tcp
|
||||||
|
timeout client 3h
|
||||||
|
timeout server 3h
|
||||||
|
server master1 <IP1>:443
|
||||||
|
server master2 <IP2>:443
|
||||||
|
balance roundrobin
|
||||||
|
```
|
||||||
|
|
||||||
|
And the corresponding example global vars config:
|
||||||
|
```
|
||||||
|
apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
|
||||||
|
loadbalancer_apiserver:
|
||||||
|
address: <VIP>
|
||||||
|
port: 8383
|
||||||
|
```
|
||||||
|
|
||||||
|
This domain name, or default "lb-apiserver.kubernetes.local", will be inserted
|
||||||
|
into the `/etc/hosts` file of all servers in the `k8s-cluster` group. Note that
|
||||||
|
the HAProxy service should as well be HA and requires a VIP management, which
|
||||||
|
is out of scope of this doc. Specifying an external LB overrides any internal
|
||||||
|
localhost LB configuration.
|
||||||
|
|
||||||
|
Note: In order to achieve HA for HAProxy instances, those must be running on
|
||||||
|
the each node in the `k8s-cluster` group as well, but require no VIP, thus
|
||||||
|
no VIP management.
|
||||||
|
|
||||||
|
Access endpoints are evaluated automagically, as the following:
|
||||||
|
|
||||||
|
| Endpoint type | kube-master | non-master |
|
||||||
|
|------------------------------|---------------|---------------------|
|
||||||
|
| Local LB | http://lc:p | https://lc:sp |
|
||||||
|
| External LB, no internal | https://lb:lp | https://lb:lp |
|
||||||
|
| No ext/int LB (default) | http://lc:p | https://m[0].aip:sp |
|
||||||
|
|
||||||
|
Where:
|
||||||
|
* `m[0]` - the first node in the `kube-master` group;
|
||||||
|
* `lb` - LB FQDN, `apiserver_loadbalancer_domain_name`;
|
||||||
|
* `lc` - localhost;
|
||||||
|
* `p` - insecure port, `kube_apiserver_insecure_port`
|
||||||
|
* `sp` - secure port, `kube_apiserver_port`;
|
||||||
|
* `lp` - LB port, `loadbalancer_apiserver.port`, defers to the secure port;
|
||||||
|
* `ip` - the node IP, defers to the ansible IP;
|
||||||
|
* `aip` - `access_ip`, defers to the ip.
|
|
@ -0,0 +1,19 @@
|
||||||
|
Large deployments of K8s
|
||||||
|
========================
|
||||||
|
|
||||||
|
For a large scaled deployments, consider the following configuration changes:
|
||||||
|
|
||||||
|
* Tune [ansible settings](http://docs.ansible.com/ansible/intro_configuration.html)
|
||||||
|
for `forks` and `timeout` vars to fit large numbers of nodes being deployed.
|
||||||
|
|
||||||
|
* Override containers' `foo_image_repo` vars to point to intranet registry.
|
||||||
|
|
||||||
|
* Override the ``download_run_once: true`` to download binaries and container
|
||||||
|
images only once then push to nodes in batches.
|
||||||
|
|
||||||
|
* Adjust the `retry_stagger` global var as appropriate. It should provide sane
|
||||||
|
load on a delegate (the first K8s master node) then retrying failed
|
||||||
|
push or download operations.
|
||||||
|
|
||||||
|
For example, when deploying 200 nodes, you may want to run ansible with
|
||||||
|
``--forks=50``, ``--timeout=600`` and define the ``retry_stagger: 60``.
|
|
@ -1,6 +1,10 @@
|
||||||
Kargo's roadmap
|
Kargo's roadmap
|
||||||
=================
|
=================
|
||||||
|
|
||||||
|
### Kubeadm
|
||||||
|
- Propose kubeadm as an option in order to setup the kubernetes cluster.
|
||||||
|
That would probably improve deployment speed and certs management [#553](https://github.com/kubespray/kargo/issues/553)
|
||||||
|
|
||||||
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kargo/issues/320)
|
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kargo/issues/320)
|
||||||
- the playbook would install and configure docker/rkt and the etcd cluster
|
- the playbook would install and configure docker/rkt and the etcd cluster
|
||||||
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
|
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
|
||||||
|
@ -33,6 +37,7 @@ Kargo's roadmap
|
||||||
- test scale up cluster: +1 etcd, +1 master, +1 node
|
- test scale up cluster: +1 etcd, +1 master, +1 node
|
||||||
|
|
||||||
### Lifecycle
|
### Lifecycle
|
||||||
|
- Adopt the kubeadm tool by delegating CM tasks it is capable to accomplish well [#553](https://github.com/kubespray/kargo/issues/553)
|
||||||
- Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kargo/issues/154)
|
- Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kargo/issues/154)
|
||||||
- Drain worker node when shutting down/deleting an instance
|
- Drain worker node when shutting down/deleting an instance
|
||||||
|
|
||||||
|
|
|
@ -1,9 +1,14 @@
|
||||||
|
# Valid bootstrap options (required): xenial, coreos, none
|
||||||
|
bootstrap_os: none
|
||||||
|
|
||||||
# Directory where the binaries will be installed
|
# Directory where the binaries will be installed
|
||||||
bin_dir: /usr/local/bin
|
bin_dir: /usr/local/bin
|
||||||
|
|
||||||
# Where the binaries will be downloaded.
|
# Where the binaries will be downloaded.
|
||||||
# Note: ensure that you've enough disk space (about 1G)
|
# Note: ensure that you've enough disk space (about 1G)
|
||||||
local_release_dir: "/tmp/releases"
|
local_release_dir: "/tmp/releases"
|
||||||
|
# Random shifts for retrying failed ops like pushing/downloading
|
||||||
|
retry_stagger: 5
|
||||||
|
|
||||||
# Uncomment this line for CoreOS only.
|
# Uncomment this line for CoreOS only.
|
||||||
# Directory where python binary is installed
|
# Directory where python binary is installed
|
||||||
|
@ -28,6 +33,8 @@ kube_users:
|
||||||
|
|
||||||
# Kubernetes cluster name, also will be used as DNS domain
|
# Kubernetes cluster name, also will be used as DNS domain
|
||||||
cluster_name: cluster.local
|
cluster_name: cluster.local
|
||||||
|
# Subdomains of DNS domain to be resolved via /etc/resolv.conf
|
||||||
|
ndots: 5
|
||||||
|
|
||||||
# For some environments, each node has a pubilcally accessible
|
# For some environments, each node has a pubilcally accessible
|
||||||
# address and an address it should bind services to. These are
|
# address and an address it should bind services to. These are
|
||||||
|
@ -51,6 +58,16 @@ cluster_name: cluster.local
|
||||||
# but don't know about that address themselves.
|
# but don't know about that address themselves.
|
||||||
# access_ip: 1.1.1.1
|
# access_ip: 1.1.1.1
|
||||||
|
|
||||||
|
# Etcd access modes:
|
||||||
|
# Enable multiaccess to configure clients to access all of the etcd members directly
|
||||||
|
# as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
|
||||||
|
# This may be the case if clients support and loadbalance multiple etcd servers natively.
|
||||||
|
etcd_multiaccess: false
|
||||||
|
|
||||||
|
# Assume there are no internal loadbalancers for apiservers exist and listen on
|
||||||
|
# kube_apiserver_port (default 443)
|
||||||
|
loadbalancer_apiserver_localhost: true
|
||||||
|
|
||||||
# Choose network plugin (calico, weave or flannel)
|
# Choose network plugin (calico, weave or flannel)
|
||||||
kube_network_plugin: flannel
|
kube_network_plugin: flannel
|
||||||
|
|
||||||
|
@ -89,10 +106,12 @@ kube_apiserver_insecure_port: 8080 # (http)
|
||||||
# You still must manually configure all your containers to use this DNS server,
|
# You still must manually configure all your containers to use this DNS server,
|
||||||
# Kubernetes won't do this for you (yet).
|
# Kubernetes won't do this for you (yet).
|
||||||
|
|
||||||
|
# Do not install additional dnsmasq
|
||||||
|
skip_dnsmasq: false
|
||||||
# Upstream dns servers used by dnsmasq
|
# Upstream dns servers used by dnsmasq
|
||||||
upstream_dns_servers:
|
#upstream_dns_servers:
|
||||||
- 8.8.8.8
|
# - 8.8.8.8
|
||||||
- 8.8.4.4
|
# - 8.8.4.4
|
||||||
#
|
#
|
||||||
# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
|
# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
|
||||||
dns_setup: true
|
dns_setup: true
|
||||||
|
@ -109,21 +128,6 @@ dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address')
|
||||||
# like you would do when using nova-client before starting the playbook.
|
# like you would do when using nova-client before starting the playbook.
|
||||||
# cloud_provider:
|
# cloud_provider:
|
||||||
|
|
||||||
# For multi masters architecture:
|
|
||||||
# kube-proxy doesn't support multiple apiservers for the time being so you'll need to configure your own loadbalancer
|
|
||||||
# This domain name will be inserted into the /etc/hosts file of all servers
|
|
||||||
# configuration example with haproxy :
|
|
||||||
# listen kubernetes-apiserver-https
|
|
||||||
# bind 10.99.0.21:8383
|
|
||||||
# option ssl-hello-chk
|
|
||||||
# mode tcp
|
|
||||||
# timeout client 3h
|
|
||||||
# timeout server 3h
|
|
||||||
# server master1 10.99.0.26:443
|
|
||||||
# server master2 10.99.0.27:443
|
|
||||||
# balance roundrobin
|
|
||||||
# apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
|
|
||||||
|
|
||||||
## Set these proxy values in order to update docker daemon to use proxies
|
## Set these proxy values in order to update docker daemon to use proxies
|
||||||
# http_proxy: ""
|
# http_proxy: ""
|
||||||
# https_proxy: ""
|
# https_proxy: ""
|
||||||
|
@ -136,9 +140,5 @@ dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address')
|
||||||
docker_options: "--insecure-registry={{ kube_service_addresses }}"
|
docker_options: "--insecure-registry={{ kube_service_addresses }}"
|
||||||
|
|
||||||
# default packages to install within the cluster
|
# default packages to install within the cluster
|
||||||
kpm_packages:
|
kpm_packages: []
|
||||||
- name: kube-system/kubedns
|
|
||||||
namespace: kube-system
|
|
||||||
variables:
|
|
||||||
cluster_ip: "{{skydns_server}}"
|
|
||||||
# - name: kube-system/grafana
|
# - name: kube-system/grafana
|
||||||
|
|
|
@ -4,9 +4,10 @@
|
||||||
register: need_bootstrap
|
register: need_bootstrap
|
||||||
ignore_errors: True
|
ignore_errors: True
|
||||||
|
|
||||||
|
|
||||||
- name: Bootstrap | Run bootstrap.sh
|
- name: Bootstrap | Run bootstrap.sh
|
||||||
script: bootstrap.sh
|
script: bootstrap.sh
|
||||||
when: need_bootstrap | failed
|
when: (need_bootstrap | failed)
|
||||||
|
|
||||||
- set_fact:
|
- set_fact:
|
||||||
ansible_python_interpreter: "/opt/bin/python"
|
ansible_python_interpreter: "/opt/bin/python"
|
||||||
|
@ -16,23 +17,23 @@
|
||||||
register: need_pip
|
register: need_pip
|
||||||
ignore_errors: True
|
ignore_errors: True
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when: need_bootstrap | failed
|
when: (need_bootstrap | failed)
|
||||||
|
|
||||||
- name: Bootstrap | Copy get-pip.py
|
- name: Bootstrap | Copy get-pip.py
|
||||||
copy: src=get-pip.py dest=~/get-pip.py
|
copy: src=get-pip.py dest=~/get-pip.py
|
||||||
when: need_pip | failed
|
when: (need_pip | failed)
|
||||||
|
|
||||||
- name: Bootstrap | Install pip
|
- name: Bootstrap | Install pip
|
||||||
shell: "{{ansible_python_interpreter}} ~/get-pip.py"
|
shell: "{{ansible_python_interpreter}} ~/get-pip.py"
|
||||||
when: need_pip | failed
|
when: (need_pip | failed)
|
||||||
|
|
||||||
- name: Bootstrap | Remove get-pip.py
|
- name: Bootstrap | Remove get-pip.py
|
||||||
file: path=~/get-pip.py state=absent
|
file: path=~/get-pip.py state=absent
|
||||||
when: need_pip | failed
|
when: (need_pip | failed)
|
||||||
|
|
||||||
- name: Bootstrap | Install pip launcher
|
- name: Bootstrap | Install pip launcher
|
||||||
copy: src=runner dest=/opt/bin/pip mode=0755
|
copy: src=runner dest=/opt/bin/pip mode=0755
|
||||||
when: need_pip | failed
|
when: (need_pip | failed)
|
||||||
|
|
||||||
- name: Install required python modules
|
- name: Install required python modules
|
||||||
pip:
|
pip:
|
||||||
|
@ -45,4 +46,4 @@
|
||||||
|
|
||||||
- name: Assign inventory name to unconfigured hostnames
|
- name: Assign inventory name to unconfigured hostnames
|
||||||
shell: sh -c "echo \"{{inventory_hostname}}\" > /etc/hostname; hostname \"{{inventory_hostname}}\""
|
shell: sh -c "echo \"{{inventory_hostname}}\" > /etc/hostname; hostname \"{{inventory_hostname}}\""
|
||||||
when: configured_hostname.stdout == 'localhost'
|
when: (configured_hostname.stdout == 'localhost')
|
|
@ -0,0 +1,14 @@
|
||||||
|
---
|
||||||
|
# raw: cat /etc/issue.net | grep '{{ bootstrap_versions }}'
|
||||||
|
|
||||||
|
- name: Bootstrap | Check if bootstrap is needed
|
||||||
|
raw: which python
|
||||||
|
register: need_bootstrap
|
||||||
|
ignore_errors: True
|
||||||
|
|
||||||
|
- name: Bootstrap | Install python 2.x
|
||||||
|
raw: DEBIAN_FRONTEND=noninteractive apt-get install -y python-minimal
|
||||||
|
when: need_bootstrap | failed
|
||||||
|
|
||||||
|
- set_fact:
|
||||||
|
ansible_python_interpreter: "/usr/bin/python"
|
|
@ -0,0 +1,6 @@
|
||||||
|
---
|
||||||
|
- include: bootstrap-ubuntu.yml
|
||||||
|
when: bootstrap_os == "ubuntu"
|
||||||
|
|
||||||
|
- include: bootstrap-coreos.yml
|
||||||
|
when: bootstrap_os == "coreos"
|
|
@ -0,0 +1,19 @@
|
||||||
|
---
|
||||||
|
# Existing search/nameserver resolvconf entries will be purged and
|
||||||
|
# ensured by this additional data:
|
||||||
|
|
||||||
|
# Max of 4 names is allowed and no more than 256 - 17 chars total
|
||||||
|
# (a 2 is reserved for the 'default.svc.' and'svc.')
|
||||||
|
#searchdomains:
|
||||||
|
# - foo.bar.lc
|
||||||
|
|
||||||
|
# Max of 2 is allowed here (a 1 is reserved for the dns_server)
|
||||||
|
#nameservers:
|
||||||
|
# - 127.0.0.1
|
||||||
|
|
||||||
|
# Versions
|
||||||
|
dnsmasq_version: 2.72
|
||||||
|
|
||||||
|
# Images
|
||||||
|
dnsmasq_image_repo: "andyshinn/dnsmasq"
|
||||||
|
dnsmasq_image_tag: "{{ dnsmasq_version }}"
|
|
@ -0,0 +1,34 @@
|
||||||
|
- name: Dnsmasq | restart network
|
||||||
|
command: /bin/true
|
||||||
|
notify:
|
||||||
|
- Dnsmasq | reload network
|
||||||
|
- Dnsmasq | update resolvconf
|
||||||
|
when: ansible_os_family != "CoreOS"
|
||||||
|
|
||||||
|
- name: Dnsmasq | reload network
|
||||||
|
service:
|
||||||
|
name: >-
|
||||||
|
{% if ansible_os_family == "RedHat" -%}
|
||||||
|
network
|
||||||
|
{%- elif ansible_os_family == "Debian" -%}
|
||||||
|
networking
|
||||||
|
{%- endif %}
|
||||||
|
state: restarted
|
||||||
|
when: ansible_os_family != "RedHat" and ansible_os_family != "CoreOS"
|
||||||
|
|
||||||
|
- name: Dnsmasq | update resolvconf
|
||||||
|
command: /bin/true
|
||||||
|
notify:
|
||||||
|
- Dnsmasq | reload resolvconf
|
||||||
|
- Dnsmasq | reload kubelet
|
||||||
|
|
||||||
|
- name: Dnsmasq | reload resolvconf
|
||||||
|
command: /sbin/resolvconf -u
|
||||||
|
ignore_errors: true
|
||||||
|
|
||||||
|
- name: Dnsmasq | reload kubelet
|
||||||
|
service:
|
||||||
|
name: kubelet
|
||||||
|
state: restarted
|
||||||
|
when: "{{ inventory_hostname in groups['kube-master'] }}"
|
||||||
|
ignore_errors: true
|
|
@ -44,12 +44,6 @@ options:
|
||||||
default: null
|
default: null
|
||||||
description:
|
description:
|
||||||
- The url for the API server that commands are executed against.
|
- The url for the API server that commands are executed against.
|
||||||
api_version:
|
|
||||||
required: false
|
|
||||||
choices: ['v1', 'v1beta3']
|
|
||||||
default: v1
|
|
||||||
description:
|
|
||||||
- The API version associated with cluster.
|
|
||||||
force:
|
force:
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
|
@ -105,10 +99,6 @@ class KubeManager(object):
|
||||||
if self.kubectl is None:
|
if self.kubectl is None:
|
||||||
self.kubectl = module.get_bin_path('kubectl', True)
|
self.kubectl = module.get_bin_path('kubectl', True)
|
||||||
self.base_cmd = [self.kubectl]
|
self.base_cmd = [self.kubectl]
|
||||||
self.api_version = module.params.get('api_version')
|
|
||||||
|
|
||||||
if self.api_version:
|
|
||||||
self.base_cmd.append('--api-version=' + self.api_version)
|
|
||||||
|
|
||||||
if module.params.get('server'):
|
if module.params.get('server'):
|
||||||
self.base_cmd.append('--server=' + module.params.get('server'))
|
self.base_cmd.append('--server=' + module.params.get('server'))
|
||||||
|
@ -164,8 +154,6 @@ class KubeManager(object):
|
||||||
return []
|
return []
|
||||||
|
|
||||||
cmd = ['replace']
|
cmd = ['replace']
|
||||||
if self.api_version != 'v1':
|
|
||||||
cmd = ['update']
|
|
||||||
|
|
||||||
if self.force:
|
if self.force:
|
||||||
cmd.append('--force')
|
cmd.append('--force')
|
||||||
|
@ -271,7 +259,6 @@ def main():
|
||||||
label=dict(),
|
label=dict(),
|
||||||
server=dict(),
|
server=dict(),
|
||||||
kubectl=dict(),
|
kubectl=dict(),
|
||||||
api_version=dict(default='v1', choices=['v1', 'v1beta3']),
|
|
||||||
force=dict(default=False, type='bool'),
|
force=dict(default=False, type='bool'),
|
||||||
all=dict(default=False, type='bool'),
|
all=dict(default=False, type='bool'),
|
||||||
log_level=dict(default=0, type='int'),
|
log_level=dict(default=0, type='int'),
|
||||||
|
|
|
@ -0,0 +1,58 @@
|
||||||
|
---
|
||||||
|
- name: ensure dnsmasq.d directory exists
|
||||||
|
file:
|
||||||
|
path: /etc/dnsmasq.d
|
||||||
|
state: directory
|
||||||
|
|
||||||
|
- name: ensure dnsmasq.d-available directory exists
|
||||||
|
file:
|
||||||
|
path: /etc/dnsmasq.d-available
|
||||||
|
state: directory
|
||||||
|
|
||||||
|
- name: Write dnsmasq configuration
|
||||||
|
template:
|
||||||
|
src: 01-kube-dns.conf.j2
|
||||||
|
dest: /etc/dnsmasq.d-available/01-kube-dns.conf
|
||||||
|
mode: 0755
|
||||||
|
backup: yes
|
||||||
|
|
||||||
|
- name: Stat dnsmasq configuration
|
||||||
|
stat: path=/etc/dnsmasq.d/01-kube-dns.conf
|
||||||
|
register: sym
|
||||||
|
|
||||||
|
- name: Move previous configuration
|
||||||
|
command: mv /etc/dnsmasq.d/01-kube-dns.conf /etc/dnsmasq.d-available/01-kube-dns.conf.bak
|
||||||
|
changed_when: False
|
||||||
|
when: sym.stat.islnk is defined and sym.stat.islnk == False
|
||||||
|
|
||||||
|
- name: Enable dnsmasq configuration
|
||||||
|
file:
|
||||||
|
src: /etc/dnsmasq.d-available/01-kube-dns.conf
|
||||||
|
dest: /etc/dnsmasq.d/01-kube-dns.conf
|
||||||
|
state: link
|
||||||
|
|
||||||
|
- name: Create dnsmasq manifests
|
||||||
|
template: src={{item.file}} dest=/etc/kubernetes/{{item.file}}
|
||||||
|
with_items:
|
||||||
|
- {file: dnsmasq-ds.yml, type: ds}
|
||||||
|
- {file: dnsmasq-svc.yml, type: svc}
|
||||||
|
register: manifests
|
||||||
|
when: inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
- name: Start Resources
|
||||||
|
kube:
|
||||||
|
name: dnsmasq
|
||||||
|
namespace: kube-system
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
resource: "{{item.item.type}}"
|
||||||
|
filename: /etc/kubernetes/{{item.item.file}}
|
||||||
|
state: "{{item.changed | ternary('latest','present') }}"
|
||||||
|
with_items: "{{ manifests.results }}"
|
||||||
|
when: inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
- name: Check for dnsmasq port (pulling image and running container)
|
||||||
|
wait_for:
|
||||||
|
host: "{{dns_server}}"
|
||||||
|
port: 53
|
||||||
|
delay: 5
|
||||||
|
when: inventory_hostname == groups['kube-node'][0]
|
|
@ -1,114 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: ensure dnsmasq.d directory exists
|
- include: dnsmasq.yml
|
||||||
file:
|
when: "{{ not skip_dnsmasq|bool }}"
|
||||||
path: /etc/dnsmasq.d
|
|
||||||
state: directory
|
|
||||||
|
|
||||||
- name: ensure dnsmasq.d-available directory exists
|
- include: resolvconf.yml
|
||||||
file:
|
|
||||||
path: /etc/dnsmasq.d-available
|
|
||||||
state: directory
|
|
||||||
|
|
||||||
- name: Write dnsmasq configuration
|
|
||||||
template:
|
|
||||||
src: 01-kube-dns.conf.j2
|
|
||||||
dest: /etc/dnsmasq.d-available/01-kube-dns.conf
|
|
||||||
mode: 0755
|
|
||||||
backup: yes
|
|
||||||
|
|
||||||
- name: Stat dnsmasq configuration
|
|
||||||
stat: path=/etc/dnsmasq.d/01-kube-dns.conf
|
|
||||||
register: sym
|
|
||||||
|
|
||||||
- name: Move previous configuration
|
|
||||||
command: mv /etc/dnsmasq.d/01-kube-dns.conf /etc/dnsmasq.d-available/01-kube-dns.conf.bak
|
|
||||||
changed_when: False
|
|
||||||
when: sym.stat.islnk is defined and sym.stat.islnk == False
|
|
||||||
|
|
||||||
- name: Enable dnsmasq configuration
|
|
||||||
file:
|
|
||||||
src: /etc/dnsmasq.d-available/01-kube-dns.conf
|
|
||||||
dest: /etc/dnsmasq.d/01-kube-dns.conf
|
|
||||||
state: link
|
|
||||||
|
|
||||||
- name: Create dnsmasq manifests
|
|
||||||
template: src={{item.file}} dest=/etc/kubernetes/{{item.file}}
|
|
||||||
with_items:
|
|
||||||
- {file: dnsmasq-ds.yml, type: ds}
|
|
||||||
- {file: dnsmasq-svc.yml, type: svc}
|
|
||||||
register: manifests
|
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
|
||||||
|
|
||||||
- name: Start Resources
|
|
||||||
kube:
|
|
||||||
name: dnsmasq
|
|
||||||
namespace: kube-system
|
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
|
||||||
resource: "{{item.item.type}}"
|
|
||||||
filename: /etc/kubernetes/{{item.item.file}}
|
|
||||||
state: "{{item.changed | ternary('latest','present') }}"
|
|
||||||
with_items: "{{ manifests.results }}"
|
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
|
||||||
|
|
||||||
- name: Check for dnsmasq port (pulling image and running container)
|
|
||||||
wait_for:
|
|
||||||
host: "{{dns_server}}"
|
|
||||||
port: 53
|
|
||||||
delay: 5
|
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
|
||||||
|
|
||||||
|
|
||||||
- name: check resolvconf
|
|
||||||
stat: path=/etc/resolvconf/resolv.conf.d/head
|
|
||||||
register: resolvconf
|
|
||||||
|
|
||||||
- name: target resolv.conf file
|
|
||||||
set_fact:
|
|
||||||
resolvconffile: >-
|
|
||||||
{%- if resolvconf.stat.exists == True -%}/etc/resolvconf/resolv.conf.d/head{%- else -%}/etc/resolv.conf{%- endif -%}
|
|
||||||
|
|
||||||
- name: Add search resolv.conf
|
|
||||||
lineinfile:
|
|
||||||
line: "search {{ [ 'default.svc.' + dns_domain, 'svc.' + dns_domain, dns_domain ] | join(' ') }}"
|
|
||||||
dest: "{{resolvconffile}}"
|
|
||||||
state: present
|
|
||||||
insertbefore: BOF
|
|
||||||
backup: yes
|
|
||||||
follow: yes
|
|
||||||
|
|
||||||
- name: Add local dnsmasq to resolv.conf
|
|
||||||
lineinfile:
|
|
||||||
line: "nameserver {{dns_server}}"
|
|
||||||
dest: "{{resolvconffile}}"
|
|
||||||
state: present
|
|
||||||
insertafter: "^search.*$"
|
|
||||||
backup: yes
|
|
||||||
follow: yes
|
|
||||||
|
|
||||||
- name: Add options to resolv.conf
|
|
||||||
lineinfile:
|
|
||||||
line: options {{ item }}
|
|
||||||
dest: "{{resolvconffile}}"
|
|
||||||
state: present
|
|
||||||
regexp: "^options.*{{ item }}$"
|
|
||||||
insertafter: EOF
|
|
||||||
backup: yes
|
|
||||||
follow: yes
|
|
||||||
with_items:
|
|
||||||
- timeout:2
|
|
||||||
- attempts:2
|
|
||||||
|
|
||||||
- name: disable resolv.conf modification by dhclient
|
|
||||||
copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient-enter-hooks.d/nodnsupdate mode=0755 backup=yes
|
|
||||||
when: ansible_os_family == "Debian"
|
|
||||||
|
|
||||||
- name: disable resolv.conf modification by dhclient
|
|
||||||
copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient.d/nodnsupdate mode=u+x backup=yes
|
|
||||||
when: ansible_os_family == "RedHat"
|
|
||||||
|
|
||||||
- name: update resolvconf
|
|
||||||
command: resolvconf -u
|
|
||||||
changed_when: False
|
|
||||||
when: resolvconf.stat.exists == True
|
|
||||||
|
|
||||||
- meta: flush_handlers
|
|
||||||
|
|
|
@ -0,0 +1,101 @@
|
||||||
|
---
|
||||||
|
- name: check resolvconf
|
||||||
|
shell: which resolvconf
|
||||||
|
register: resolvconf
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- name: target resolv.conf file
|
||||||
|
set_fact:
|
||||||
|
resolvconffile: >-
|
||||||
|
{%- if resolvconf.rc == 0 -%}/etc/resolvconf/resolv.conf.d/head{%- else -%}/etc/resolv.conf{%- endif -%}
|
||||||
|
|
||||||
|
- name: generate search domains to resolvconf
|
||||||
|
set_fact:
|
||||||
|
searchentries:
|
||||||
|
"{{ ([ 'default.svc.' + dns_domain, 'svc.' + dns_domain ] + searchdomains|default([])) | join(' ') }}"
|
||||||
|
|
||||||
|
- name: pick dnsmasq cluster IP
|
||||||
|
set_fact:
|
||||||
|
dnsmasq_server: >-
|
||||||
|
{%- if skip_dnsmasq|bool -%}{{ [ skydns_server ] + upstream_dns_servers|default([]) }}{%- else -%}{{ [ dns_server ] }}{%- endif -%}
|
||||||
|
|
||||||
|
- name: generate nameservers to resolvconf
|
||||||
|
set_fact:
|
||||||
|
nameserverentries:
|
||||||
|
"{{ dnsmasq_server|default([]) + nameservers|default([]) }}"
|
||||||
|
|
||||||
|
- name: Remove search and nameserver options from resolvconf head
|
||||||
|
lineinfile:
|
||||||
|
dest: /etc/resolvconf/resolv.conf.d/head
|
||||||
|
state: absent
|
||||||
|
regexp: "^{{ item }}.*$"
|
||||||
|
backup: yes
|
||||||
|
follow: yes
|
||||||
|
with_items:
|
||||||
|
- search
|
||||||
|
- nameserver
|
||||||
|
when: resolvconf.rc == 0
|
||||||
|
notify: Dnsmasq | update resolvconf
|
||||||
|
|
||||||
|
- name: Add search domains to resolv.conf
|
||||||
|
lineinfile:
|
||||||
|
line: "search {{searchentries}}"
|
||||||
|
dest: "{{resolvconffile}}"
|
||||||
|
state: present
|
||||||
|
insertbefore: BOF
|
||||||
|
backup: yes
|
||||||
|
follow: yes
|
||||||
|
notify: Dnsmasq | update resolvconf
|
||||||
|
|
||||||
|
- name: Add nameservers to resolv.conf
|
||||||
|
blockinfile:
|
||||||
|
dest: "{{resolvconffile}}"
|
||||||
|
block: |-
|
||||||
|
{% for item in nameserverentries -%}
|
||||||
|
nameserver {{ item }}
|
||||||
|
{% endfor %}
|
||||||
|
state: present
|
||||||
|
insertafter: "^search.*$"
|
||||||
|
create: yes
|
||||||
|
backup: yes
|
||||||
|
follow: yes
|
||||||
|
marker: "# Ansible nameservers {mark}"
|
||||||
|
notify: Dnsmasq | update resolvconf
|
||||||
|
|
||||||
|
- name: Add options to resolv.conf
|
||||||
|
lineinfile:
|
||||||
|
line: options {{ item }}
|
||||||
|
dest: "{{resolvconffile}}"
|
||||||
|
state: present
|
||||||
|
regexp: "^options.*{{ item }}$"
|
||||||
|
insertafter: EOF
|
||||||
|
backup: yes
|
||||||
|
follow: yes
|
||||||
|
with_items:
|
||||||
|
- ndots:{{ ndots }}
|
||||||
|
- timeout:2
|
||||||
|
- attempts:2
|
||||||
|
notify: Dnsmasq | update resolvconf
|
||||||
|
|
||||||
|
- name: Remove search and nameserver options from resolvconf base
|
||||||
|
lineinfile:
|
||||||
|
dest: /etc/resolvconf/resolv.conf.d/base
|
||||||
|
state: absent
|
||||||
|
regexp: "^{{ item }}.*$"
|
||||||
|
backup: yes
|
||||||
|
follow: yes
|
||||||
|
with_items:
|
||||||
|
- search
|
||||||
|
- nameserver
|
||||||
|
when: resolvconf.rc == 0
|
||||||
|
notify: Dnsmasq | update resolvconf
|
||||||
|
|
||||||
|
- name: disable resolv.conf modification by dhclient
|
||||||
|
copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient-enter-hooks.d/znodnsupdate mode=0755
|
||||||
|
notify: Dnsmasq | restart network
|
||||||
|
when: ansible_os_family == "Debian"
|
||||||
|
|
||||||
|
- name: disable resolv.conf modification by dhclient
|
||||||
|
copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient.d/nodnsupdate mode=u+x
|
||||||
|
notify: Dnsmasq | restart network
|
||||||
|
when: ansible_os_family == "RedHat"
|
|
@ -4,17 +4,26 @@ listen-address=0.0.0.0
|
||||||
|
|
||||||
addn-hosts=/etc/hosts
|
addn-hosts=/etc/hosts
|
||||||
|
|
||||||
bogus-priv
|
strict-order
|
||||||
|
# Forward k8s domain to kube-dns
|
||||||
|
server=/{{ dns_domain }}/{{ skydns_server }}
|
||||||
|
|
||||||
#Set upstream dns servers
|
#Set upstream dns servers
|
||||||
{% if upstream_dns_servers is defined %}
|
{% if upstream_dns_servers is defined %}
|
||||||
{% for srv in upstream_dns_servers %}
|
{% for srv in upstream_dns_servers %}
|
||||||
server={{ srv }}
|
server={{ srv }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
{% elif cloud_provider is defined and cloud_provider == "gce" %}
|
||||||
|
server=169.254.169.254
|
||||||
{% else %}
|
{% else %}
|
||||||
server=8.8.8.8
|
server=8.8.8.8
|
||||||
server=8.8.4.4
|
server=8.8.4.4
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
# Forward k8s domain to kube-dns
|
bogus-priv
|
||||||
server=/{{ dns_domain }}/{{ skydns_server }}
|
no-resolv
|
||||||
|
no-negcache
|
||||||
|
cache-size=1000
|
||||||
|
max-cache-ttl=10
|
||||||
|
max-ttl=20
|
||||||
|
log-facility=-
|
||||||
|
|
|
@ -14,7 +14,7 @@ spec:
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: dnsmasq
|
- name: dnsmasq
|
||||||
image: andyshinn/dnsmasq:2.72
|
image: "{{ dnsmasq_image_repo }}:{{ dnsmasq_image_tag }}"
|
||||||
command:
|
command:
|
||||||
- dnsmasq
|
- dnsmasq
|
||||||
args:
|
args:
|
||||||
|
@ -25,7 +25,7 @@ spec:
|
||||||
capabilities:
|
capabilities:
|
||||||
add:
|
add:
|
||||||
- NET_ADMIN
|
- NET_ADMIN
|
||||||
imagePullPolicy: Always
|
imagePullPolicy: IfNotPresent
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
|
@ -50,3 +50,4 @@ spec:
|
||||||
- name: etcdnsmasqdavailable
|
- name: etcdnsmasqdavailable
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /etc/dnsmasq.d-available
|
path: /etc/dnsmasq.d-available
|
||||||
|
dnsPolicy: Default # Don't use cluster DNS.
|
||||||
|
|
|
@ -2,14 +2,26 @@
|
||||||
- name: restart docker
|
- name: restart docker
|
||||||
command: /bin/true
|
command: /bin/true
|
||||||
notify:
|
notify:
|
||||||
- reload systemd
|
- Docker | reload systemd
|
||||||
- reload docker
|
- Docker | reload docker
|
||||||
|
- Docker | pause while Docker restarts
|
||||||
|
- Docker | wait for docker
|
||||||
|
|
||||||
- name : reload systemd
|
- name : Docker | reload systemd
|
||||||
shell: systemctl daemon-reload
|
shell: systemctl daemon-reload
|
||||||
when: ansible_service_mgr == "systemd"
|
when: ansible_service_mgr == "systemd"
|
||||||
|
|
||||||
- name: reload docker
|
- name: Docker | reload docker
|
||||||
service:
|
service:
|
||||||
name: docker
|
name: docker
|
||||||
state: restarted
|
state: restarted
|
||||||
|
|
||||||
|
- name: Docker | pause while Docker restarts
|
||||||
|
pause: seconds=10 prompt="Waiting for docker restart"
|
||||||
|
|
||||||
|
- name: Docker | wait for docker
|
||||||
|
command: /usr/bin/docker images
|
||||||
|
register: docker_ready
|
||||||
|
retries: 10
|
||||||
|
delay: 5
|
||||||
|
until: docker_ready.rc == 0
|
||||||
|
|
|
@ -27,6 +27,10 @@
|
||||||
id: "{{item}}"
|
id: "{{item}}"
|
||||||
keyserver: "{{docker_repo_key_info.keyserver}}"
|
keyserver: "{{docker_repo_key_info.keyserver}}"
|
||||||
state: present
|
state: present
|
||||||
|
register: keyserver_task_result
|
||||||
|
until: keyserver_task_result|success
|
||||||
|
retries: 4
|
||||||
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
with_items: "{{ docker_repo_key_info.repo_keys }}"
|
with_items: "{{ docker_repo_key_info.repo_keys }}"
|
||||||
when: ansible_os_family != "CoreOS"
|
when: ansible_os_family != "CoreOS"
|
||||||
|
|
||||||
|
@ -51,6 +55,10 @@
|
||||||
pkg: "{{item.name}}"
|
pkg: "{{item.name}}"
|
||||||
force: "{{item.force|default(omit)}}"
|
force: "{{item.force|default(omit)}}"
|
||||||
state: present
|
state: present
|
||||||
|
register: docker_task_result
|
||||||
|
until: docker_task_result|success
|
||||||
|
retries: 4
|
||||||
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
with_items: "{{ docker_package_info.pkgs }}"
|
with_items: "{{ docker_package_info.pkgs }}"
|
||||||
when: (ansible_os_family != "CoreOS") and (docker_package_info.pkgs|length > 0)
|
when: (ansible_os_family != "CoreOS") and (docker_package_info.pkgs|length > 0)
|
||||||
|
|
||||||
|
@ -59,6 +67,14 @@
|
||||||
when: ansible_service_mgr == "systemd" and
|
when: ansible_service_mgr == "systemd" and
|
||||||
(http_proxy is defined or https_proxy is defined or no_proxy is defined)
|
(http_proxy is defined or https_proxy is defined or no_proxy is defined)
|
||||||
|
|
||||||
|
- name: Write docker.service systemd file
|
||||||
|
template:
|
||||||
|
src: systemd-docker.service.j2
|
||||||
|
dest: /etc/systemd/system/docker.service
|
||||||
|
register: docker_service_file
|
||||||
|
notify: restart docker
|
||||||
|
when: ansible_service_mgr == "systemd" and ansible_os_family != "CoreOS"
|
||||||
|
|
||||||
- meta: flush_handlers
|
- meta: flush_handlers
|
||||||
|
|
||||||
- name: ensure docker service is started and enabled
|
- name: ensure docker service is started and enabled
|
||||||
|
|
|
@ -2,11 +2,11 @@
|
||||||
Description=Docker Application Container Engine
|
Description=Docker Application Container Engine
|
||||||
Documentation=http://docs.docker.com
|
Documentation=http://docs.docker.com
|
||||||
{% if ansible_os_family == "RedHat" %}
|
{% if ansible_os_family == "RedHat" %}
|
||||||
After=network.target
|
After=network.target docker-storage-setup.service
|
||||||
Wants=docker-storage-setup.service
|
Wants=docker-storage-setup.service
|
||||||
{% elif ansible_os_family == "Debian" %}
|
{% elif ansible_os_family == "Debian" %}
|
||||||
After=network.target docker.socket
|
After=network.target docker.socket
|
||||||
Requires=docker.socket
|
Wants=docker.socket
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
|
@ -20,17 +20,21 @@ EnvironmentFile=-/etc/sysconfig/docker-storage
|
||||||
EnvironmentFile=-/etc/default/docker
|
EnvironmentFile=-/etc/default/docker
|
||||||
{% endif %}
|
{% endif %}
|
||||||
Environment=GOTRACEBACK=crash
|
Environment=GOTRACEBACK=crash
|
||||||
|
ExecReload=/bin/kill -s HUP $MAINPID
|
||||||
|
Delegate=yes
|
||||||
|
KillMode=process
|
||||||
ExecStart=/usr/bin/docker daemon \
|
ExecStart=/usr/bin/docker daemon \
|
||||||
$OPTIONS \
|
$OPTIONS \
|
||||||
$DOCKER_STORAGE_OPTIONS \
|
$DOCKER_STORAGE_OPTIONS \
|
||||||
$DOCKER_NETWORK_OPTIONS \
|
$DOCKER_NETWORK_OPTIONS \
|
||||||
$INSECURE_REGISTRY \
|
$INSECURE_REGISTRY \
|
||||||
$DOCKER_OPTS
|
$DOCKER_OPTS
|
||||||
|
TasksMax=infinity
|
||||||
LimitNOFILE=1048576
|
LimitNOFILE=1048576
|
||||||
LimitNPROC=1048576
|
LimitNPROC=1048576
|
||||||
LimitCORE=infinity
|
LimitCORE=infinity
|
||||||
MountFlags=slave
|
|
||||||
TimeoutStartSec=1min
|
TimeoutStartSec=1min
|
||||||
|
Restart=on-abnormal
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
|
@ -5,6 +5,8 @@ docker_versioned_pkg:
|
||||||
latest: docker-engine
|
latest: docker-engine
|
||||||
1.9: docker-engine=1.9.1-0~{{ ansible_distribution_release|lower }}
|
1.9: docker-engine=1.9.1-0~{{ ansible_distribution_release|lower }}
|
||||||
1.10: docker-engine=1.10.3-0~{{ ansible_distribution_release|lower }}
|
1.10: docker-engine=1.10.3-0~{{ ansible_distribution_release|lower }}
|
||||||
|
1.11: docker-engine=1.11.2-0~{{ ansible_distribution_release|lower }}
|
||||||
|
1.12: docker-engine=1.12.1-0~{{ ansible_distribution_release|lower }}
|
||||||
|
|
||||||
docker_package_info:
|
docker_package_info:
|
||||||
pkg_mgr: apt
|
pkg_mgr: apt
|
||||||
|
|
|
@ -4,6 +4,8 @@ docker_versioned_pkg:
|
||||||
latest: docker
|
latest: docker
|
||||||
1.9: docker-1:1.9.1
|
1.9: docker-1:1.9.1
|
||||||
1.10: docker-1:1.10.1
|
1.10: docker-1:1.10.1
|
||||||
|
1.11: docker-1:1.11.2
|
||||||
|
1.12: docker-1:1.12.1
|
||||||
|
|
||||||
docker_package_info:
|
docker_package_info:
|
||||||
pkg_mgr: dnf
|
pkg_mgr: dnf
|
||||||
|
|
|
@ -2,10 +2,11 @@
|
||||||
docker_version: 1.11
|
docker_version: 1.11
|
||||||
docker_kernel_min_version: '3.2'
|
docker_kernel_min_version: '3.2'
|
||||||
|
|
||||||
# https://apt.dockerproject.org/repo/dists/ubuntu-trusty/main/filelist
|
# https://apt.dockerproject.org/repo/dists/ubuntu-xenial/main/filelist
|
||||||
docker_versioned_pkg:
|
docker_versioned_pkg:
|
||||||
latest: docker-engine
|
latest: docker-engine
|
||||||
1.11: docker-engine=1.11.1-0~{{ ansible_distribution_release|lower }}
|
1.11: docker-engine=1.11.1-0~{{ ansible_distribution_release|lower }}
|
||||||
|
1.12: docker-engine=1.12.1-0~{{ ansible_distribution_release|lower }}
|
||||||
|
|
||||||
docker_package_info:
|
docker_package_info:
|
||||||
pkg_mgr: apt
|
pkg_mgr: apt
|
||||||
|
|
|
@ -6,6 +6,8 @@ docker_versioned_pkg:
|
||||||
latest: docker-engine
|
latest: docker-engine
|
||||||
1.9: docker-engine=1.9.0-0~{{ ansible_distribution_release|lower }}
|
1.9: docker-engine=1.9.0-0~{{ ansible_distribution_release|lower }}
|
||||||
1.10: docker-engine=1.10.3-0~{{ ansible_distribution_release|lower }}
|
1.10: docker-engine=1.10.3-0~{{ ansible_distribution_release|lower }}
|
||||||
|
1.11: docker-engine=1.11.1-0~{{ ansible_distribution_release|lower }}
|
||||||
|
1.12: docker-engine=1.12.1-0~{{ ansible_distribution_release|lower }}
|
||||||
|
|
||||||
docker_package_info:
|
docker_package_info:
|
||||||
pkg_mgr: apt
|
pkg_mgr: apt
|
||||||
|
|
|
@ -5,30 +5,47 @@ local_release_dir: /tmp
|
||||||
download_run_once: False
|
download_run_once: False
|
||||||
|
|
||||||
# Versions
|
# Versions
|
||||||
include_vars: kube_versions.yml
|
kube_version: v1.4.3
|
||||||
|
|
||||||
etcd_version: v3.0.1
|
etcd_version: v3.0.6
|
||||||
#TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
|
#TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
|
||||||
# after migration to container download
|
# after migration to container download
|
||||||
calico_version: v0.20.0
|
calico_version: v0.22.0
|
||||||
calico_cni_version: v1.3.1
|
calico_cni_version: v1.4.2
|
||||||
weave_version: v1.5.0
|
weave_version: v1.6.1
|
||||||
|
flannel_version: v0.6.2
|
||||||
|
flannel_server_helper_version: 0.1
|
||||||
|
pod_infra_version: 3.0
|
||||||
|
|
||||||
# Download URL's
|
# Download URL's
|
||||||
kubelet_download_url: "https://storage.googleapis.com/kargo/{{kube_version}}_kubernetes-kubelet"
|
|
||||||
apiserver_download_url: "https://storage.googleapis.com/kargo/{{kube_version}}_kubernetes-apiserver"
|
|
||||||
kubectl_download_url: "https://storage.googleapis.com/kargo/{{kube_version}}_kubernetes-kubectl"
|
|
||||||
|
|
||||||
etcd_download_url: "https://storage.googleapis.com/kargo/{{etcd_version}}_etcd"
|
etcd_download_url: "https://storage.googleapis.com/kargo/{{etcd_version}}_etcd"
|
||||||
calico_cni_download_url: "https://storage.googleapis.com/kargo/{{calico_cni_version}}_calico-cni-plugin"
|
calico_cni_download_url: "https://storage.googleapis.com/kargo/{{calico_cni_version}}_calico-cni-plugin"
|
||||||
calico_cni_ipam_download_url: "https://storage.googleapis.com/kargo/{{calico_cni_version}}_calico-cni-plugin-ipam"
|
calico_cni_ipam_download_url: "https://storage.googleapis.com/kargo/{{calico_cni_version}}_calico-cni-plugin-ipam"
|
||||||
weave_download_url: "https://storage.googleapis.com/kargo/{{weave_version}}_weave"
|
weave_download_url: "https://storage.googleapis.com/kargo/{{weave_version}}_weave"
|
||||||
|
|
||||||
# Checksums
|
# Checksums
|
||||||
calico_cni_checksum: "ac05cb9254b5aaa5822cf10325983431bd25489147f2edf9dec7e43d99c43e77"
|
calico_cni_checksum: "9cab29764681e9d80da826e4b2cd10841cc01a749e0018867d96dd76a4691548"
|
||||||
calico_cni_ipam_checksum: "3df6951a30749c279229e7e318e74ac4e41263996125be65257db7cd25097273"
|
calico_cni_ipam_checksum: "09d076b15b791956efee91646e47fdfdcf382db16082cef4f542a9fff7bae172"
|
||||||
weave_checksum: "28d2c4e2b1ad8600da69882501eba697679aea10a5e61c769aa3a9ee72b0d89a"
|
weave_checksum: "9bf9d6e5a839e7bcbb28cc00c7acae9d09284faa3e7a3720ca9c2b9e93c68580"
|
||||||
etcd_checksum: "7e5d8db2b8a7cec7a93e531c8ae0f3108c66c7d896a2fb6d8768c067923ce0aa"
|
etcd_checksum: "385afd518f93e3005510b7aaa04d38ee4a39f06f5152cd33bb86d4f0c94c7485"
|
||||||
|
|
||||||
|
# Containers
|
||||||
|
# Possible values: host, docker
|
||||||
|
etcd_deployment_type: "docker"
|
||||||
|
etcd_image_repo: "quay.io/coreos/etcd"
|
||||||
|
etcd_image_tag: "{{ etcd_version }}"
|
||||||
|
flannel_server_helper_image_repo: "gcr.io/google_containers/flannel-server-helper"
|
||||||
|
flannel_server_helper_image_tag: "{{ flannel_server_helper_version }}"
|
||||||
|
flannel_image_repo: "quay.io/coreos/flannel"
|
||||||
|
flannel_image_tag: "{{ flannel_version }}"
|
||||||
|
calicoctl_image_repo: "calico/ctl"
|
||||||
|
calicoctl_image_tag: "{{ calico_version }}"
|
||||||
|
calico_node_image_repo: "calico/node"
|
||||||
|
calico_node_image_tag: "{{ calico_version }}"
|
||||||
|
hyperkube_image_repo: "quay.io/coreos/hyperkube"
|
||||||
|
hyperkube_image_tag: "{{ kube_version }}_coreos.0"
|
||||||
|
pod_infra_image_repo: "gcr.io/google_containers/pause-amd64"
|
||||||
|
pod_infra_image_tag: "{{ pod_infra_version }}"
|
||||||
|
|
||||||
downloads:
|
downloads:
|
||||||
calico_cni_plugin:
|
calico_cni_plugin:
|
||||||
|
@ -39,6 +56,7 @@ downloads:
|
||||||
url: "{{ calico_cni_download_url }}"
|
url: "{{ calico_cni_download_url }}"
|
||||||
owner: "root"
|
owner: "root"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
|
enabled: "{{ kube_network_plugin == 'calico' }}"
|
||||||
calico_cni_plugin_ipam:
|
calico_cni_plugin_ipam:
|
||||||
dest: calico/bin/calico-ipam
|
dest: calico/bin/calico-ipam
|
||||||
version: "{{calico_cni_version}}"
|
version: "{{calico_cni_version}}"
|
||||||
|
@ -47,6 +65,7 @@ downloads:
|
||||||
url: "{{ calico_cni_ipam_download_url }}"
|
url: "{{ calico_cni_ipam_download_url }}"
|
||||||
owner: "root"
|
owner: "root"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
|
enabled: "{{ kube_network_plugin == 'calico' }}"
|
||||||
weave:
|
weave:
|
||||||
dest: weave/bin/weave
|
dest: weave/bin/weave
|
||||||
version: "{{weave_version}}"
|
version: "{{weave_version}}"
|
||||||
|
@ -55,6 +74,7 @@ downloads:
|
||||||
sha256: "{{ weave_checksum }}"
|
sha256: "{{ weave_checksum }}"
|
||||||
owner: "root"
|
owner: "root"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
|
enabled: "{{ kube_network_plugin == 'weave' }}"
|
||||||
etcd:
|
etcd:
|
||||||
version: "{{etcd_version}}"
|
version: "{{etcd_version}}"
|
||||||
dest: "etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
|
dest: "etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
|
||||||
|
@ -64,32 +84,42 @@ downloads:
|
||||||
unarchive: true
|
unarchive: true
|
||||||
owner: "etcd"
|
owner: "etcd"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
kubernetes_kubelet:
|
container: "{{ etcd_deployment_type == 'docker' }}"
|
||||||
version: "{{kube_version}}"
|
repo: "{{ etcd_image_repo }}"
|
||||||
dest: kubernetes/bin/kubelet
|
tag: "{{ etcd_image_tag }}"
|
||||||
sha256: "{{vars['kube_checksum'][kube_version]['kubelet']}}"
|
hyperkube:
|
||||||
source_url: "{{ kubelet_download_url }}"
|
container: true
|
||||||
url: "{{ kubelet_download_url }}"
|
repo: "{{ hyperkube_image_repo }}"
|
||||||
owner: "kube"
|
tag: "{{ hyperkube_image_tag }}"
|
||||||
mode: "0755"
|
flannel:
|
||||||
kubernetes_kubectl:
|
container: true
|
||||||
dest: kubernetes/bin/kubectl
|
repo: "{{ flannel_image_repo }}"
|
||||||
version: "{{kube_version}}"
|
tag: "{{ flannel_image_tag }}"
|
||||||
sha256: "{{vars['kube_checksum'][kube_version]['kubectl']}}"
|
enabled: "{{ kube_network_plugin == 'flannel' }}"
|
||||||
source_url: "{{ kubectl_download_url }}"
|
flannel_server_helper:
|
||||||
url: "{{ kubectl_download_url }}"
|
container: true
|
||||||
owner: "kube"
|
repo: "{{ flannel_server_helper_image_repo }}"
|
||||||
mode: "0755"
|
tag: "{{ flannel_server_helper_image_tag }}"
|
||||||
kubernetes_apiserver:
|
enabled: "{{ kube_network_plugin == 'flannel' }}"
|
||||||
dest: kubernetes/bin/kube-apiserver
|
calicoctl:
|
||||||
version: "{{kube_version}}"
|
container: true
|
||||||
sha256: "{{vars['kube_checksum'][kube_version]['kube_apiserver']}}"
|
repo: "{{ calicoctl_image_repo }}"
|
||||||
source_url: "{{ apiserver_download_url }}"
|
tag: "{{ calicoctl_image_tag }}"
|
||||||
url: "{{ apiserver_download_url }}"
|
enabled: "{{ kube_network_plugin == 'calico' }}"
|
||||||
owner: "kube"
|
calico_node:
|
||||||
mode: "0755"
|
container: true
|
||||||
|
repo: "{{ calico_node_image_repo }}"
|
||||||
|
tag: "{{ calico_node_image_tag }}"
|
||||||
|
enabled: "{{ kube_network_plugin == 'calico' }}"
|
||||||
|
pod_infra:
|
||||||
|
container: true
|
||||||
|
repo: "{{ pod_infra_image_repo }}"
|
||||||
|
tag: "{{ pod_infra_image_tag }}"
|
||||||
|
|
||||||
download:
|
download:
|
||||||
|
container: "{{ file.container|default('false') }}"
|
||||||
|
repo: "{{ file.repo|default(None) }}"
|
||||||
|
tag: "{{ file.tag|default(None) }}"
|
||||||
enabled: "{{ file.enabled|default('true') }}"
|
enabled: "{{ file.enabled|default('true') }}"
|
||||||
dest: "{{ file.dest|default(None) }}"
|
dest: "{{ file.dest|default(None) }}"
|
||||||
version: "{{ file.version|default(None) }}"
|
version: "{{ file.version|default(None) }}"
|
||||||
|
|
|
@ -1,15 +1,12 @@
|
||||||
---
|
---
|
||||||
- include_vars: kube_versions.yml
|
|
||||||
|
|
||||||
- name: downloading...
|
- name: downloading...
|
||||||
debug:
|
debug:
|
||||||
msg: "{{ download.url }}"
|
msg: "{{ download.url }}"
|
||||||
when: "{{ download.enabled|bool }}"
|
when: "{{ download.enabled|bool and not download.container|bool }}"
|
||||||
|
|
||||||
- name: Create dest directories
|
- name: Create dest directories
|
||||||
file: path={{local_release_dir}}/{{download.dest|dirname}} state=directory recurse=yes
|
file: path={{local_release_dir}}/{{download.dest|dirname}} state=directory recurse=yes
|
||||||
when: "{{ download.enabled|bool }}"
|
when: "{{ download.enabled|bool and not download.container|bool }}"
|
||||||
run_once: "{{ download_run_once|bool }}"
|
|
||||||
|
|
||||||
- name: Download items
|
- name: Download items
|
||||||
get_url:
|
get_url:
|
||||||
|
@ -18,8 +15,11 @@
|
||||||
sha256sum: "{{download.sha256 | default(omit)}}"
|
sha256sum: "{{download.sha256 | default(omit)}}"
|
||||||
owner: "{{ download.owner|default(omit) }}"
|
owner: "{{ download.owner|default(omit) }}"
|
||||||
mode: "{{ download.mode|default(omit) }}"
|
mode: "{{ download.mode|default(omit) }}"
|
||||||
when: "{{ download.enabled|bool }}"
|
register: get_url_result
|
||||||
run_once: "{{ download_run_once|bool }}"
|
until: "'OK' in get_url_result.msg or 'file already exists' in get_url_result.msg"
|
||||||
|
retries: 4
|
||||||
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
|
when: "{{ download.enabled|bool and not download.container|bool }}"
|
||||||
|
|
||||||
- name: Extract archives
|
- name: Extract archives
|
||||||
unarchive:
|
unarchive:
|
||||||
|
@ -28,8 +28,7 @@
|
||||||
owner: "{{ download.owner|default(omit) }}"
|
owner: "{{ download.owner|default(omit) }}"
|
||||||
mode: "{{ download.mode|default(omit) }}"
|
mode: "{{ download.mode|default(omit) }}"
|
||||||
copy: no
|
copy: no
|
||||||
when: "{{ download.enabled|bool }} and ({{download.unarchive is defined and download.unarchive == True}})"
|
when: "{{ download.enabled|bool and not download.container|bool and download.unarchive is defined and download.unarchive == True }}"
|
||||||
run_once: "{{ download_run_once|bool }}"
|
|
||||||
|
|
||||||
- name: Fix permissions
|
- name: Fix permissions
|
||||||
file:
|
file:
|
||||||
|
@ -37,5 +36,59 @@
|
||||||
path: "{{local_release_dir}}/{{download.dest}}"
|
path: "{{local_release_dir}}/{{download.dest}}"
|
||||||
owner: "{{ download.owner|default(omit) }}"
|
owner: "{{ download.owner|default(omit) }}"
|
||||||
mode: "{{ download.mode|default(omit) }}"
|
mode: "{{ download.mode|default(omit) }}"
|
||||||
when: "{{ download.enabled|bool }} and ({{download.unarchive is not defined or download.unarchive == False}})"
|
when: "{{ download.enabled|bool and not download.container|bool and (download.unarchive is not defined or download.unarchive == False) }}"
|
||||||
|
|
||||||
|
- name: pulling...
|
||||||
|
debug:
|
||||||
|
msg: "{{ download.repo }}:{{ download.tag }}"
|
||||||
|
when: "{{ download.enabled|bool and download.container|bool }}"
|
||||||
|
|
||||||
|
- name: Create dest directory for saved/loaded container images
|
||||||
|
file: path="{{local_release_dir}}/containers" state=directory recurse=yes
|
||||||
|
when: "{{ download.enabled|bool and download.container|bool }}"
|
||||||
|
|
||||||
|
#NOTE(bogdando) this brings no docker-py deps for nodes
|
||||||
|
- name: Download containers
|
||||||
|
command: "/usr/bin/docker pull {{ download.repo }}:{{ download.tag }}"
|
||||||
|
register: pull_task_result
|
||||||
|
until: pull_task_result.rc == 0
|
||||||
|
retries: 4
|
||||||
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
|
when: "{{ download.enabled|bool and download.container|bool }}"
|
||||||
|
delegate_to: "{{ groups['kube-master'][0] if download_run_once|bool else inventory_hostname }}"
|
||||||
run_once: "{{ download_run_once|bool }}"
|
run_once: "{{ download_run_once|bool }}"
|
||||||
|
|
||||||
|
- set_fact:
|
||||||
|
fname: "{{local_release_dir}}/containers/{{download.repo|regex_replace('/|\0|:', '_')}}:{{download.tag|regex_replace('/|\0|:', '_')}}.tar"
|
||||||
|
|
||||||
|
- name: "Set default value for 'container_changed' to false"
|
||||||
|
set_fact:
|
||||||
|
container_changed: false
|
||||||
|
|
||||||
|
- name: "Update the 'container_changed' fact"
|
||||||
|
set_fact:
|
||||||
|
container_changed: "{{ not 'up to date' in pull_task_result.stdout }}"
|
||||||
|
when: "{{ download.enabled|bool and download.container|bool }}"
|
||||||
|
delegate_to: "{{ groups['kube-master'][0] if download_run_once|bool else inventory_hostname }}"
|
||||||
|
run_once: "{{ download_run_once|bool }}"
|
||||||
|
|
||||||
|
- name: Download | save container images
|
||||||
|
shell: docker save "{{ download.repo }}:{{ download.tag }}" > "{{ fname }}"
|
||||||
|
delegate_to: "{{groups['kube-master'][0]}}"
|
||||||
|
run_once: true
|
||||||
|
when: ansible_os_family != "CoreOS" and download_run_once|bool and download.enabled|bool and download.container|bool and container_changed|bool
|
||||||
|
|
||||||
|
- name: Download | get container images
|
||||||
|
synchronize:
|
||||||
|
src: "{{ fname }}"
|
||||||
|
dest: "{{local_release_dir}}/containers"
|
||||||
|
mode: push
|
||||||
|
register: get_task
|
||||||
|
until: get_task|success
|
||||||
|
retries: 4
|
||||||
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
|
when: ansible_os_family != "CoreOS" and inventory_hostname != groups['kube-master'][0] and download_run_once|bool and download.enabled|bool and download.container|bool and container_changed|bool
|
||||||
|
|
||||||
|
- name: Download | load container images
|
||||||
|
shell: docker load < "{{ fname }}"
|
||||||
|
when: ansible_os_family != "CoreOS" and inventory_hostname != groups['kube-master'][0] and download_run_once|bool and download.enabled|bool and download.container|bool and container_changed|bool
|
||||||
|
|
|
@ -1,22 +0,0 @@
|
||||||
kube_checksum:
|
|
||||||
v1.2.2:
|
|
||||||
kube_apiserver: eb1bfd8b877052cbd1991b8c429a1d06661f4cb019905e20e128174f724e16de
|
|
||||||
kubectl: 473e6924569fba30d4a50cecdc2cae5f31d97d1f662463e85b74a472105dcff4
|
|
||||||
kubelet: f16827dc7e7c82f0e215f0fc73eb01e2dfe91a2ec83f9cbcaf8d37c91b64fd3b
|
|
||||||
v1.2.3:
|
|
||||||
kube_apiserver_checksum: ebaeeeb72cb29b358337b330617a96355ff2d08a5a523fc1a81beba36cc9d6f9
|
|
||||||
kubectl_checksum: 394853edd409a721bcafe4f1360009ef9f845050719fe7d6fc7176f45cc92a8c
|
|
||||||
kubelet_checksum: 633bb41c51c5c0df0645dd60ba82b12eba39d009eb87bae9227de7d9a89c0797
|
|
||||||
v1.2.4:
|
|
||||||
kube_apiserver: 6ac99b36b02968459e026fcfc234207c66064b5e11816b69dd8fc234b2ffec1e
|
|
||||||
kubectl: dac61fbd506f7a17540feca691cd8a9d9d628d59661eebce788a50511f578897
|
|
||||||
kubelet: 4adaf40592248eef6fd4fa126464915ea41e624a70dc77178089760ed235e341
|
|
||||||
v1.2.5:
|
|
||||||
kube_apiserver: fbe8296ad4b194c06f6802a126d35cd2887dc1aded308d4da2b580f270412b33
|
|
||||||
kubectl: 5526a496a84701015485e32c86486e2f23599f7a865164f546e619c6a62f7f19
|
|
||||||
kubelet: cd15b929f0190876216f397c2c6e7aa8c08d3b047fd90b4980cd68c8f4896211
|
|
||||||
v1.3.0:
|
|
||||||
kube_apiserver: 431cd312984a29f45590138e990d5c4d537b069b71f2587a72414fabc4fcffdd
|
|
||||||
kubectl: f40b2d0ff33984e663a0dea4916f1cb9041abecc09b11f9372cdb8049ded95dc
|
|
||||||
kubelet: bd5f10ccb95fe6e95ddf7ad8a119195c27cb2bce4be6f80c1810ff1a2111496d
|
|
||||||
kube_version: v1.3.0
|
|
|
@ -1,10 +1,2 @@
|
||||||
---
|
---
|
||||||
etcd_version: v3.0.1
|
|
||||||
etcd_bin_dir: "{{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64/"
|
etcd_bin_dir: "{{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64/"
|
||||||
|
|
||||||
# Possible values: host, docker
|
|
||||||
etcd_deployment_type: "host"
|
|
||||||
|
|
||||||
|
|
||||||
etcd_image_repo: "quay.io/coreos/etcd"
|
|
||||||
etcd_image_tag: "{{ etcd_version }}"
|
|
||||||
|
|
|
@ -2,21 +2,36 @@
|
||||||
- name: restart etcd
|
- name: restart etcd
|
||||||
command: /bin/true
|
command: /bin/true
|
||||||
notify:
|
notify:
|
||||||
- reload systemd
|
- etcd | reload systemd
|
||||||
- start etcd
|
|
||||||
- reload etcd
|
- reload etcd
|
||||||
|
- wait for etcd up
|
||||||
|
|
||||||
- name: reload systemd
|
- name: restart etcd-proxy
|
||||||
|
command: /bin/true
|
||||||
|
notify:
|
||||||
|
- etcd | reload systemd
|
||||||
|
- reload etcd-proxy
|
||||||
|
- wait for etcd up
|
||||||
|
|
||||||
|
- name: etcd | reload systemd
|
||||||
command: systemctl daemon-reload
|
command: systemctl daemon-reload
|
||||||
when: ansible_service_mgr == "systemd"
|
when: ansible_service_mgr == "systemd"
|
||||||
|
|
||||||
- name: start etcd
|
- name: wait for etcd up
|
||||||
service:
|
uri: url="http://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health"
|
||||||
name: etcd
|
register: result
|
||||||
enabled: yes
|
until: result.status == 200
|
||||||
state: started
|
retries: 10
|
||||||
|
delay: 5
|
||||||
|
|
||||||
- name: reload etcd
|
- name: reload etcd
|
||||||
service:
|
service:
|
||||||
name: etcd
|
name: etcd
|
||||||
state: "{{ 'restarted' if etcd_deployment_type == 'host' else 'reloaded' }}"
|
state: restarted
|
||||||
|
when: is_etcd_master
|
||||||
|
|
||||||
|
- name: reload etcd-proxy
|
||||||
|
service:
|
||||||
|
name: etcd-proxy
|
||||||
|
state: restarted
|
||||||
|
when: is_etcd_proxy
|
||||||
|
|
|
@ -3,8 +3,7 @@ dependencies:
|
||||||
- role: adduser
|
- role: adduser
|
||||||
user: "{{ addusers.etcd }}"
|
user: "{{ addusers.etcd }}"
|
||||||
when: ansible_os_family != 'CoreOS'
|
when: ansible_os_family != 'CoreOS'
|
||||||
|
- role: docker
|
||||||
|
when: (ansible_os_family != "CoreOS" and etcd_deployment_type == "docker" or inventory_hostname in groups['k8s-cluster'])
|
||||||
- role: download
|
- role: download
|
||||||
file: "{{ downloads.etcd }}"
|
file: "{{ downloads.etcd }}"
|
||||||
when: etcd_deployment_type == "host"
|
|
||||||
- role: docker
|
|
||||||
when: (ansible_os_family != "CoreOS" and etcd_deployment_type == "docker")
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
- name: Configure | Check if member is in cluster
|
- name: Configure | Check if member is in cluster
|
||||||
shell: "etcdctl --peers={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}"
|
shell: "{{ bin_dir }}/etcdctl --no-sync --peers={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}"
|
||||||
register: etcd_member_in_cluster
|
register: etcd_member_in_cluster
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
@ -8,14 +8,14 @@
|
||||||
|
|
||||||
- name: Configure | Add member to the cluster if it is not there
|
- name: Configure | Add member to the cluster if it is not there
|
||||||
when: is_etcd_master and etcd_member_in_cluster.rc != 0 and etcd_cluster_is_healthy.rc == 0
|
when: is_etcd_master and etcd_member_in_cluster.rc != 0 and etcd_cluster_is_healthy.rc == 0
|
||||||
shell: "etcdctl --peers={{ etcd_access_addresses }} member add {{ etcd_member_name }} {{ etcd_peer_url }}"
|
shell: "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} member add {{ etcd_member_name }} {{ etcd_peer_url }}"
|
||||||
|
|
||||||
- name: Configure | Copy etcd.service systemd file
|
- name: Configure | Copy etcd.service systemd file
|
||||||
template:
|
template:
|
||||||
src: "etcd-{{ etcd_deployment_type }}.service.j2"
|
src: "etcd-{{ etcd_deployment_type }}.service.j2"
|
||||||
dest: /etc/systemd/system/etcd.service
|
dest: /etc/systemd/system/etcd.service
|
||||||
backup: yes
|
backup: yes
|
||||||
when: ansible_service_mgr == "systemd"
|
when: ansible_service_mgr == "systemd" and is_etcd_master
|
||||||
notify: restart etcd
|
notify: restart etcd
|
||||||
|
|
||||||
- name: Configure | Write etcd initd script
|
- name: Configure | Write etcd initd script
|
||||||
|
@ -24,5 +24,21 @@
|
||||||
dest: /etc/init.d/etcd
|
dest: /etc/init.d/etcd
|
||||||
owner: root
|
owner: root
|
||||||
mode: 0755
|
mode: 0755
|
||||||
when: ansible_service_mgr in ["sysvinit","upstart"] and ansible_os_family == "Debian"
|
when: ansible_service_mgr in ["sysvinit","upstart"] and ansible_os_family == "Debian" and is_etcd_master
|
||||||
notify: restart etcd
|
notify: restart etcd
|
||||||
|
|
||||||
|
- name: Configure | Copy etcd-proxy.service systemd file
|
||||||
|
template:
|
||||||
|
src: "etcd-proxy-{{ etcd_deployment_type }}.service.j2"
|
||||||
|
dest: /etc/systemd/system/etcd-proxy.service
|
||||||
|
backup: yes
|
||||||
|
when: ansible_service_mgr == "systemd" and is_etcd_proxy
|
||||||
|
notify: restart etcd-proxy
|
||||||
|
- name: Configure | Write etcd-proxy initd script
|
||||||
|
template:
|
||||||
|
src: "deb-etcd-proxy-{{ etcd_deployment_type }}.initd.j2"
|
||||||
|
dest: /etc/init.d/etcd-proxy
|
||||||
|
owner: root
|
||||||
|
mode: 0755
|
||||||
|
when: ansible_service_mgr in ["sysvinit","upstart"] and ansible_os_family == "Debian" and is_etcd_proxy
|
||||||
|
notify: restart etcd-proxy
|
||||||
|
|
|
@ -17,6 +17,10 @@
|
||||||
/usr/bin/docker cp etcdctl-binarycopy:{{ etcd_container_bin_dir }}etcdctl {{ bin_dir }}/etcdctl &&
|
/usr/bin/docker cp etcdctl-binarycopy:{{ etcd_container_bin_dir }}etcdctl {{ bin_dir }}/etcdctl &&
|
||||||
/usr/bin/docker rm -f etcdctl-binarycopy"
|
/usr/bin/docker rm -f etcdctl-binarycopy"
|
||||||
when: etcd_deployment_type == "docker"
|
when: etcd_deployment_type == "docker"
|
||||||
|
register: etcd_task_result
|
||||||
|
until: etcd_task_result.rc == 0
|
||||||
|
retries: 4
|
||||||
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
#Plan B: looks nicer, but requires docker-py on all hosts:
|
#Plan B: looks nicer, but requires docker-py on all hosts:
|
||||||
|
|
|
@ -1,23 +1,35 @@
|
||||||
---
|
---
|
||||||
- include: set_facts.yml
|
|
||||||
- include: install.yml
|
- include: install.yml
|
||||||
- include: set_cluster_health.yml
|
- include: set_cluster_health.yml
|
||||||
- include: configure.yml
|
- include: configure.yml
|
||||||
- include: refresh_config.yml
|
- include: refresh_config.yml
|
||||||
|
|
||||||
- name: Restart etcd if binary changed
|
|
||||||
command: /bin/true
|
|
||||||
notify: restart etcd
|
|
||||||
when: etcd_deployment_type == "host" and etcd_copy.stdout_lines
|
|
||||||
|
|
||||||
# Reload systemd before starting service
|
|
||||||
- meta: flush_handlers
|
|
||||||
|
|
||||||
- name: Ensure etcd is running
|
- name: Ensure etcd is running
|
||||||
service:
|
service:
|
||||||
name: etcd
|
name: etcd
|
||||||
state: started
|
state: started
|
||||||
enabled: yes
|
enabled: yes
|
||||||
|
when: is_etcd_master
|
||||||
|
|
||||||
|
- name: Ensure etcd-proxy is running
|
||||||
|
service:
|
||||||
|
name: etcd-proxy
|
||||||
|
state: started
|
||||||
|
enabled: yes
|
||||||
|
when: is_etcd_proxy
|
||||||
|
|
||||||
|
- name: Restart etcd if binary changed
|
||||||
|
command: /bin/true
|
||||||
|
notify: restart etcd
|
||||||
|
when: etcd_deployment_type == "host" and etcd_copy.stdout_lines and is_etcd_master
|
||||||
|
|
||||||
|
- name: Restart etcd-proxy if binary changed
|
||||||
|
command: /bin/true
|
||||||
|
notify: restart etcd-proxy
|
||||||
|
when: etcd_deployment_type == "host" and etcd_copy.stdout_lines and is_etcd_proxy
|
||||||
|
|
||||||
|
# Reload systemd before starting service
|
||||||
|
- meta: flush_handlers
|
||||||
|
|
||||||
# After etcd cluster is assembled, make sure that
|
# After etcd cluster is assembled, make sure that
|
||||||
# initial state of the cluster is in `existing`
|
# initial state of the cluster is in `existing`
|
||||||
|
|
|
@ -4,3 +4,11 @@
|
||||||
src: etcd.j2
|
src: etcd.j2
|
||||||
dest: /etc/etcd.env
|
dest: /etc/etcd.env
|
||||||
notify: restart etcd
|
notify: restart etcd
|
||||||
|
when: is_etcd_master
|
||||||
|
|
||||||
|
- name: Refresh config | Create etcd-proxy config file
|
||||||
|
template:
|
||||||
|
src: etcd-proxy.j2
|
||||||
|
dest: /etc/etcd-proxy.env
|
||||||
|
notify: restart etcd-proxy
|
||||||
|
when: is_etcd_proxy
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
- name: Configure | Check if cluster is healthy
|
- name: Configure | Check if cluster is healthy
|
||||||
shell: "etcdctl --peers={{ etcd_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
|
shell: "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
|
||||||
register: etcd_cluster_is_healthy
|
register: etcd_cluster_is_healthy
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
|
@ -1,20 +0,0 @@
|
||||||
---
|
|
||||||
- set_fact: etcd_access_address="{{ access_ip | default(ip | default(ansible_default_ipv4['address'])) }}"
|
|
||||||
- set_fact: etcd_peer_url="http://{{ etcd_access_address }}:2380"
|
|
||||||
- set_fact: etcd_client_url="http://{{ etcd_access_address }}:2379"
|
|
||||||
- set_fact:
|
|
||||||
etcd_access_addresses: |-
|
|
||||||
{% for item in groups['etcd'] -%}
|
|
||||||
http://{{ hostvars[item].etcd_access_address }}:2379{% if not loop.last %},{% endif %}
|
|
||||||
{%- endfor %}
|
|
||||||
- set_fact:
|
|
||||||
etcd_member_name: |-
|
|
||||||
{% for host in groups['etcd'] %}
|
|
||||||
{% if inventory_hostname == host %}{{"etcd"+loop.index|string }}{% endif %}
|
|
||||||
{% endfor %}
|
|
||||||
- set_fact:
|
|
||||||
is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}"
|
|
||||||
- set_fact:
|
|
||||||
etcd_after_v3: etcd_version | version_compare("v3.0.0", ">=")
|
|
||||||
- set_fact:
|
|
||||||
etcd_container_bin_dir: "{% if etcd_after_v3 %}/usr/local/bin/{% else %}/{% endif %}"
|
|
|
@ -16,34 +16,25 @@ PATH=/sbin:/usr/sbin:/bin/:/usr/bin
|
||||||
DESC="etcd k/v store"
|
DESC="etcd k/v store"
|
||||||
NAME=etcd
|
NAME=etcd
|
||||||
DAEMON={{ docker_bin_dir | default("/usr/bin") }}/docker
|
DAEMON={{ docker_bin_dir | default("/usr/bin") }}/docker
|
||||||
{% if is_etcd_master %}
|
DAEMON_EXEC=`basename $DAEMON`
|
||||||
DAEMON_ARGS='--restart=always --env-file=/etc/etcd.env \
|
DAEMON_ARGS="run --restart=always --env-file=/etc/etcd.env \
|
||||||
--net=host \
|
--net=host \
|
||||||
-v /usr/share/ca-certificates/:/etc/ssl/certs:ro \
|
-v /usr/share/ca-certificates/:/etc/ssl/certs:ro \
|
||||||
-v /var/lib/etcd:/var/lib/etcd:rw \
|
-v /var/lib/etcd:/var/lib/etcd:rw \
|
||||||
--name={{ etcd_member_name | default("etcd-proxy") }} \
|
--name={{ etcd_member_name | default("etcd") }} \
|
||||||
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
|
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
|
||||||
{% if etcd_after_v3 %}
|
{% if etcd_after_v3 %}
|
||||||
{{ etcd_container_bin_dir }}etcd \
|
{{ etcd_container_bin_dir }}etcd
|
||||||
{% endif %}
|
{% endif %}"
|
||||||
{% if is_etcd_master %}
|
|
||||||
--proxy off
|
|
||||||
{% else %}
|
|
||||||
--proxy on
|
|
||||||
{% endif %}'
|
|
||||||
|
|
||||||
|
|
||||||
SCRIPTNAME=/etc/init.d/$NAME
|
SCRIPTNAME=/etc/init.d/$NAME
|
||||||
DAEMON_USER=etcd
|
DAEMON_USER=root
|
||||||
STOP_SCHEDULE="${STOP_SCHEDULE:-QUIT/5/TERM/5/KILL/5}"
|
STOP_SCHEDULE="${STOP_SCHEDULE:-QUIT/5/TERM/5/KILL/5}"
|
||||||
PID=/var/run/etcd.pid
|
PID=/var/run/etcd.pid
|
||||||
|
|
||||||
# Exit if the binary is not present
|
# Exit if the binary is not present
|
||||||
[ -x "$DAEMON" ] || exit 0
|
[ -x "$DAEMON" ] || exit 0
|
||||||
|
|
||||||
# Read configuration variable file if it is present
|
|
||||||
[ -f /etc/etcd.env ] && . /etc/etcd.env
|
|
||||||
|
|
||||||
# Define LSB log_* functions.
|
# Define LSB log_* functions.
|
||||||
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||||
# and status_of_proc is working.
|
# and status_of_proc is working.
|
||||||
|
@ -58,6 +49,8 @@ do_status()
|
||||||
#
|
#
|
||||||
do_start()
|
do_start()
|
||||||
{
|
{
|
||||||
|
{{ docker_bin_dir | default("/usr/bin") }}/docker rm -f {{ etcd_member_name | default("etcd-proxy") }} &>/dev/null || true
|
||||||
|
sleep 1
|
||||||
start-stop-daemon --background --start --quiet --make-pidfile --pidfile $PID --user $DAEMON_USER --exec $DAEMON -- \
|
start-stop-daemon --background --start --quiet --make-pidfile --pidfile $PID --user $DAEMON_USER --exec $DAEMON -- \
|
||||||
$DAEMON_ARGS \
|
$DAEMON_ARGS \
|
||||||
|| return 2
|
|| return 2
|
||||||
|
@ -68,7 +61,7 @@ do_start()
|
||||||
#
|
#
|
||||||
do_stop()
|
do_stop()
|
||||||
{
|
{
|
||||||
start-stop-daemon --stop --quiet --retry=$STOP_SCHEDULE --pidfile $PID --name $NAME
|
start-stop-daemon --stop --quiet --retry=$STOP_SCHEDULE --pidfile $PID --name $DAEMON_EXEC
|
||||||
RETVAL="$?"
|
RETVAL="$?"
|
||||||
|
|
||||||
sleep 1
|
sleep 1
|
||||||
|
|
|
@ -16,11 +16,6 @@ PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
||||||
DESC="etcd k/v store"
|
DESC="etcd k/v store"
|
||||||
NAME=etcd
|
NAME=etcd
|
||||||
DAEMON={{ bin_dir }}/etcd
|
DAEMON={{ bin_dir }}/etcd
|
||||||
{% if is_etcd_master %}
|
|
||||||
DAEMON_ARGS=""
|
|
||||||
{% else %}
|
|
||||||
DAEMON_ARGS="--proxy on"
|
|
||||||
{% endif %}
|
|
||||||
SCRIPTNAME=/etc/init.d/$NAME
|
SCRIPTNAME=/etc/init.d/$NAME
|
||||||
DAEMON_USER=etcd
|
DAEMON_USER=etcd
|
||||||
STOP_SCHEDULE="${STOP_SCHEDULE:-QUIT/5/TERM/5/KILL/5}"
|
STOP_SCHEDULE="${STOP_SCHEDULE:-QUIT/5/TERM/5/KILL/5}"
|
||||||
|
|
|
@ -0,0 +1,120 @@
|
||||||
|
#!/bin/sh
|
||||||
|
set -a
|
||||||
|
|
||||||
|
### BEGIN INIT INFO
|
||||||
|
# Provides: etcd-proxy
|
||||||
|
# Required-Start: $local_fs $network $syslog
|
||||||
|
# Required-Stop:
|
||||||
|
# Default-Start: 2 3 4 5
|
||||||
|
# Default-Stop: 0 1 6
|
||||||
|
# Short-Description: etcd-proxy
|
||||||
|
# Description:
|
||||||
|
# etcd-proxy is a proxy for etcd: distributed, consistent key-value store for shared configuration and service discovery
|
||||||
|
### END INIT INFO
|
||||||
|
|
||||||
|
PATH=/sbin:/usr/sbin:/bin/:/usr/bin
|
||||||
|
DESC="etcd-proxy"
|
||||||
|
NAME=etcd-proxy
|
||||||
|
DAEMON={{ docker_bin_dir | default("/usr/bin") }}/docker
|
||||||
|
DAEMON_EXEC=`basename $DAEMON`
|
||||||
|
DAEMON_ARGS="run --restart=always --env-file=/etc/etcd-proxy.env \
|
||||||
|
--net=host \
|
||||||
|
--stop-signal=SIGKILL \
|
||||||
|
-v /usr/share/ca-certificates/:/etc/ssl/certs:ro \
|
||||||
|
--name={{ etcd_proxy_member_name | default("etcd-proxy") }} \
|
||||||
|
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
|
||||||
|
{% if etcd_after_v3 %}
|
||||||
|
{{ etcd_container_bin_dir }}etcd
|
||||||
|
{% endif %}"
|
||||||
|
|
||||||
|
|
||||||
|
SCRIPTNAME=/etc/init.d/$NAME
|
||||||
|
DAEMON_USER=root
|
||||||
|
STOP_SCHEDULE="${STOP_SCHEDULE:-QUIT/5/TERM/5/KILL/5}"
|
||||||
|
PID=/var/run/etcd-proxy.pid
|
||||||
|
|
||||||
|
# Exit if the binary is not present
|
||||||
|
[ -x "$DAEMON" ] || exit 0
|
||||||
|
|
||||||
|
# Define LSB log_* functions.
|
||||||
|
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||||
|
# and status_of_proc is working.
|
||||||
|
. /lib/lsb/init-functions
|
||||||
|
|
||||||
|
do_status()
|
||||||
|
{
|
||||||
|
status_of_proc -p $PID "$DAEMON" "$NAME" && exit 0 || exit $?
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function that starts the daemon/service
|
||||||
|
#
|
||||||
|
do_start()
|
||||||
|
{
|
||||||
|
{{ docker_bin_dir | default("/usr/bin") }}/docker rm -f {{ etcd_proxy_member_name | default("etcd-proxy") }} &>/dev/null || true
|
||||||
|
sleep 1
|
||||||
|
start-stop-daemon --background --start --quiet --make-pidfile --pidfile $PID --user $DAEMON_USER --exec $DAEMON -- \
|
||||||
|
$DAEMON_ARGS \
|
||||||
|
|| return 2
|
||||||
|
}
|
||||||
|
|
||||||
|
#
|
||||||
|
# Function that stops the daemon/service
|
||||||
|
#
|
||||||
|
do_stop()
|
||||||
|
{
|
||||||
|
start-stop-daemon --stop --quiet --retry=$STOP_SCHEDULE --pidfile $PID --name $DAEMON_EXEC
|
||||||
|
RETVAL="$?"
|
||||||
|
|
||||||
|
sleep 1
|
||||||
|
return "$RETVAL"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
case "$1" in
|
||||||
|
start)
|
||||||
|
log_daemon_msg "Starting $DESC" "$NAME"
|
||||||
|
do_start
|
||||||
|
case "$?" in
|
||||||
|
0|1) log_end_msg 0 || exit 0 ;;
|
||||||
|
2) log_end_msg 1 || exit 1 ;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
|
stop)
|
||||||
|
log_daemon_msg "Stopping $DESC" "$NAME"
|
||||||
|
if do_stop; then
|
||||||
|
log_end_msg 0
|
||||||
|
else
|
||||||
|
log_failure_msg "Can't stop etcd-proxy"
|
||||||
|
log_end_msg 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
status)
|
||||||
|
if do_status; then
|
||||||
|
log_end_msg 0
|
||||||
|
else
|
||||||
|
log_failure_msg "etcd-proxy is not running"
|
||||||
|
log_end_msg 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
|
||||||
|
restart|force-reload)
|
||||||
|
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||||
|
if do_stop; then
|
||||||
|
if do_start; then
|
||||||
|
log_end_msg 0
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
rc="$?"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
rc="$?"
|
||||||
|
fi
|
||||||
|
log_failure_msg "Can't restart etcd-proxy"
|
||||||
|
log_end_msg ${rc}
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
||||||
|
exit 3
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
|
@ -0,0 +1,110 @@
|
||||||
|
#!/bin/sh
|
||||||
|
set -a
|
||||||
|
|
||||||
|
### BEGIN INIT INFO
|
||||||
|
# Provides: etcd-proxy
|
||||||
|
# Required-Start: $local_fs $network $syslog
|
||||||
|
# Required-Stop:
|
||||||
|
# Default-Start: 2 3 4 5
|
||||||
|
# Default-Stop: 0 1 6
|
||||||
|
# Short-Description: etcd-proxy
|
||||||
|
# Description:
|
||||||
|
# etcd-proxy is a proxy for etcd: distributed, consistent key-value store for shared configuration and service discovery
|
||||||
|
### END INIT INFO
|
||||||
|
|
||||||
|
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
||||||
|
DESC="etcd-proxy"
|
||||||
|
NAME=etcd-proxy
|
||||||
|
DAEMON={{ bin_dir }}/etcd
|
||||||
|
DAEMON_ARGS=""
|
||||||
|
SCRIPTNAME=/etc/init.d/$NAME
|
||||||
|
DAEMON_USER=etcd
|
||||||
|
STOP_SCHEDULE="${STOP_SCHEDULE:-QUIT/5/TERM/5/KILL/5}"
|
||||||
|
PID=/var/run/etcd-proxy.pid
|
||||||
|
|
||||||
|
# Exit if the binary is not present
|
||||||
|
[ -x "$DAEMON" ] || exit 0
|
||||||
|
|
||||||
|
# Read configuration variable file if it is present
|
||||||
|
[ -f /etc/etcd-proxy.env ] && . /etc/etcd-proxy.env
|
||||||
|
|
||||||
|
# Define LSB log_* functions.
|
||||||
|
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||||
|
# and status_of_proc is working.
|
||||||
|
. /lib/lsb/init-functions
|
||||||
|
|
||||||
|
do_status()
|
||||||
|
{
|
||||||
|
status_of_proc -p $PID "$DAEMON" "$NAME" && exit 0 || exit $?
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function that starts the daemon/service
|
||||||
|
#
|
||||||
|
do_start()
|
||||||
|
{
|
||||||
|
start-stop-daemon --background --start --quiet --make-pidfile --pidfile $PID --user $DAEMON_USER --exec $DAEMON -- \
|
||||||
|
$DAEMON_ARGS \
|
||||||
|
|| return 2
|
||||||
|
}
|
||||||
|
|
||||||
|
#
|
||||||
|
# Function that stops the daemon/service
|
||||||
|
#
|
||||||
|
do_stop()
|
||||||
|
{
|
||||||
|
start-stop-daemon --stop --quiet --retry=$STOP_SCHEDULE --pidfile $PID --name $NAME
|
||||||
|
RETVAL="$?"
|
||||||
|
|
||||||
|
sleep 1
|
||||||
|
return "$RETVAL"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
case "$1" in
|
||||||
|
start)
|
||||||
|
log_daemon_msg "Starting $DESC" "$NAME"
|
||||||
|
do_start
|
||||||
|
case "$?" in
|
||||||
|
0|1) log_end_msg 0 || exit 0 ;;
|
||||||
|
2) log_end_msg 1 || exit 1 ;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
|
stop)
|
||||||
|
log_daemon_msg "Stopping $DESC" "$NAME"
|
||||||
|
if do_stop; then
|
||||||
|
log_end_msg 0
|
||||||
|
else
|
||||||
|
log_failure_msg "Can't stop etcd-proxy"
|
||||||
|
log_end_msg 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
status)
|
||||||
|
if do_status; then
|
||||||
|
log_end_msg 0
|
||||||
|
else
|
||||||
|
log_failure_msg "etcd-proxy is not running"
|
||||||
|
log_end_msg 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
|
||||||
|
restart|force-reload)
|
||||||
|
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||||
|
if do_stop; then
|
||||||
|
if do_start; then
|
||||||
|
log_end_msg 0
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
rc="$?"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
rc="$?"
|
||||||
|
fi
|
||||||
|
log_failure_msg "Can't restart etcd-proxy"
|
||||||
|
log_end_msg ${rc}
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
||||||
|
exit 3
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=etcd docker wrapper
|
Description=etcd docker wrapper
|
||||||
Requires=docker.service
|
Wants=docker.socket
|
||||||
After=docker.service
|
After=docker.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
|
@ -13,21 +13,16 @@ ExecStart={{ docker_bin_dir | default("/usr/bin") }}/docker run --restart=always
|
||||||
--net=host \
|
--net=host \
|
||||||
-v /usr/share/ca-certificates/:/etc/ssl/certs:ro \
|
-v /usr/share/ca-certificates/:/etc/ssl/certs:ro \
|
||||||
-v /var/lib/etcd:/var/lib/etcd:rw \
|
-v /var/lib/etcd:/var/lib/etcd:rw \
|
||||||
--name={{ etcd_member_name | default("etcd-proxy") }} \
|
--name={{ etcd_member_name | default("etcd") }} \
|
||||||
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
|
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
|
||||||
{% if etcd_after_v3 %}
|
{% if etcd_after_v3 %}
|
||||||
{{ etcd_container_bin_dir }}etcd \
|
{{ etcd_container_bin_dir }}etcd
|
||||||
{% endif %}
|
|
||||||
{% if is_etcd_master %}
|
|
||||||
--proxy off
|
|
||||||
{% else %}
|
|
||||||
--proxy on
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
ExecStartPre=-{{ docker_bin_dir | default("/usr/bin") }}/docker rm -f {{ etcd_member_name | default("etcd-proxy") }}
|
ExecStartPre=-{{ docker_bin_dir | default("/usr/bin") }}/docker rm -f {{ etcd_member_name | default("etcd-proxy") }}
|
||||||
ExecReload={{ docker_bin_dir | default("/usr/bin") }}/docker restart {{ etcd_member_name | default("etcd-proxy") }}
|
ExecReload={{ docker_bin_dir | default("/usr/bin") }}/docker restart {{ etcd_member_name | default("etcd-proxy") }}
|
||||||
ExecStop={{ docker_bin_dir | default("/usr/bin") }}/docker stop {{ etcd_member_name | default("etcd-proxy") }}
|
ExecStop={{ docker_bin_dir | default("/usr/bin") }}/docker stop {{ etcd_member_name | default("etcd-proxy") }}
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10s
|
RestartSec=15s
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
|
|
|
@ -1,15 +1,13 @@
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=etcd
|
Description=etcd
|
||||||
|
After=network.target
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
|
Type=notify
|
||||||
User=etcd
|
User=etcd
|
||||||
EnvironmentFile=/etc/etcd.env
|
EnvironmentFile=/etc/etcd.env
|
||||||
{% if inventory_hostname in groups['etcd'] %}
|
|
||||||
ExecStart={{ bin_dir }}/etcd
|
ExecStart={{ bin_dir }}/etcd
|
||||||
{% else %}
|
NotifyAccess=all
|
||||||
ExecStart={{ bin_dir }}/etcd -proxy on
|
|
||||||
{% endif %}
|
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10s
|
RestartSec=10s
|
||||||
LimitNOFILE=40000
|
LimitNOFILE=40000
|
||||||
|
|
|
@ -0,0 +1,28 @@
|
||||||
|
[Unit]
|
||||||
|
Description=etcd-proxy docker wrapper
|
||||||
|
Wants=docker.socket
|
||||||
|
After=docker.service
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
User=root
|
||||||
|
PermissionsStartOnly=true
|
||||||
|
ExecStart={{ docker_bin_dir | default("/usr/bin") }}/docker run --restart=always \
|
||||||
|
--env-file=/etc/etcd-proxy.env \
|
||||||
|
{# TODO(mattymo): Allow docker IP binding and disable in envfile
|
||||||
|
-p 2380:2380 -p 2379:2379 #}
|
||||||
|
--net=host \
|
||||||
|
--stop-signal=SIGKILL \
|
||||||
|
-v /usr/share/ca-certificates/:/etc/ssl/certs:ro \
|
||||||
|
--name={{ etcd_proxy_member_name | default("etcd-proxy") }} \
|
||||||
|
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
|
||||||
|
{% if etcd_after_v3 %}
|
||||||
|
{{ etcd_container_bin_dir }}etcd
|
||||||
|
{% endif %}
|
||||||
|
ExecStartPre=-{{ docker_bin_dir | default("/usr/bin") }}/docker rm -f {{ etcd_proxy_member_name | default("etcd-proxy") }}
|
||||||
|
ExecReload={{ docker_bin_dir | default("/usr/bin") }}/docker restart {{ etcd_proxy_member_name | default("etcd-proxy") }}
|
||||||
|
ExecStop={{ docker_bin_dir | default("/usr/bin") }}/docker stop {{ etcd_proxy_member_name | default("etcd-proxy") }}
|
||||||
|
Restart=always
|
||||||
|
RestartSec=15s
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
|
@ -0,0 +1,19 @@
|
||||||
|
[Unit]
|
||||||
|
Description=etcd-proxy
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=notify
|
||||||
|
User=etcd
|
||||||
|
PermissionsStartOnly=true
|
||||||
|
EnvironmentFile=/etc/etcd-proxy.env
|
||||||
|
ExecStart={{ bin_dir }}/etcd
|
||||||
|
ExecStartPre=/bin/mkdir -p /var/lib/etcd-proxy
|
||||||
|
ExecStartPre=/bin/chown -R etcd: /var/lib/etcd-proxy
|
||||||
|
NotifyAccess=all
|
||||||
|
Restart=always
|
||||||
|
RestartSec=10s
|
||||||
|
LimitNOFILE=40000
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
|
@ -0,0 +1,5 @@
|
||||||
|
ETCD_DATA_DIR=/var/lib/etcd-proxy
|
||||||
|
ETCD_PROXY=on
|
||||||
|
ETCD_LISTEN_CLIENT_URLS={{ etcd_access_endpoint }}
|
||||||
|
ETCD_NAME={{ etcd_proxy_member_name | default("etcd-proxy") }}
|
||||||
|
ETCD_INITIAL_CLUSTER={{ etcd_peer_addresses }}
|
|
@ -1,14 +1,16 @@
|
||||||
ETCD_DATA_DIR=/var/lib/etcd
|
ETCD_DATA_DIR=/var/lib/etcd
|
||||||
{% if is_etcd_master %}
|
ETCD_ADVERTISE_CLIENT_URLS={{ etcd_client_url }}
|
||||||
ETCD_ADVERTISE_CLIENT_URLS=http://{{ hostvars[inventory_hostname]['access_ip'] | default(hostvars[inventory_hostname]['ip'] | default( hostvars[inventory_hostname]['ansible_default_ipv4']['address'])) }}:2379
|
ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_peer_url }}
|
||||||
ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{ hostvars[inventory_hostname]['access_ip'] | default(hostvars[inventory_hostname]['ip'] | default( hostvars[inventory_hostname]['ansible_default_ipv4']['address'])) }}:2380
|
|
||||||
ETCD_INITIAL_CLUSTER_STATE={% if etcd_cluster_is_healthy.rc != 0 | bool %}new{% else %}existing{% endif %}
|
ETCD_INITIAL_CLUSTER_STATE={% if etcd_cluster_is_healthy.rc != 0 | bool %}new{% else %}existing{% endif %}
|
||||||
|
|
||||||
|
{% if not is_etcd_proxy %}
|
||||||
|
ETCD_LISTEN_CLIENT_URLS=http://{{ etcd_address }}:2379,http://127.0.0.1:2379
|
||||||
|
{% else %}
|
||||||
|
ETCD_LISTEN_CLIENT_URLS=http://{{ etcd_address }}:2379
|
||||||
|
{% endif %}
|
||||||
ETCD_ELECTION_TIMEOUT=10000
|
ETCD_ELECTION_TIMEOUT=10000
|
||||||
ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd
|
ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd
|
||||||
ETCD_LISTEN_PEER_URLS=http://{{ hostvars[inventory_hostname]['ip'] | default( hostvars[inventory_hostname]['ansible_default_ipv4']['address']) }}:2380
|
ETCD_LISTEN_PEER_URLS=http://{{ etcd_address }}:2380
|
||||||
ETCD_NAME={{ etcd_member_name }}
|
ETCD_NAME={{ etcd_member_name }}
|
||||||
{% endif %}
|
ETCD_PROXY=off
|
||||||
ETCD_INITIAL_CLUSTER={% for host in groups['etcd'] %}etcd{{ loop.index|string }}={{ hostvars[host]['etcd_peer_url'] }}{% if not loop.last %},{% endif %}{% endfor %}
|
ETCD_INITIAL_CLUSTER={{ etcd_peer_addresses }}
|
||||||
|
|
||||||
ETCD_LISTEN_CLIENT_URLS=http://{{ hostvars[inventory_hostname]['ip'] | default( hostvars[inventory_hostname]['ansible_default_ipv4']['address']) }}:2379,http://127.0.0.1:2379
|
|
||||||
|
|
|
@ -0,0 +1,12 @@
|
||||||
|
# Versions
|
||||||
|
kubedns_version: 1.7
|
||||||
|
kubednsmasq_version: 1.3
|
||||||
|
exechealthz_version: 1.1
|
||||||
|
|
||||||
|
# Images
|
||||||
|
kubedns_image_repo: "gcr.io/google_containers/kubedns-amd64"
|
||||||
|
kubedns_image_tag: "{{ kubedns_version }}"
|
||||||
|
kubednsmasq_image_repo: "gcr.io/google_containers/kube-dnsmasq-amd64"
|
||||||
|
kubednsmasq_image_tag: "{{ kubednsmasq_version }}"
|
||||||
|
exechealthz_image_repo: "gcr.io/google_containers/exechealthz-amd64"
|
||||||
|
exechealthz_image_tag: "{{ exechealthz_version }}"
|
|
@ -0,0 +1,305 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
DOCUMENTATION = """
|
||||||
|
---
|
||||||
|
module: kube
|
||||||
|
short_description: Manage Kubernetes Cluster
|
||||||
|
description:
|
||||||
|
- Create, replace, remove, and stop resources within a Kubernetes Cluster
|
||||||
|
version_added: "2.0"
|
||||||
|
options:
|
||||||
|
name:
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
description:
|
||||||
|
- The name associated with resource
|
||||||
|
filename:
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
description:
|
||||||
|
- The path and filename of the resource(s) definition file.
|
||||||
|
kubectl:
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
description:
|
||||||
|
- The path to the kubectl bin
|
||||||
|
namespace:
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
description:
|
||||||
|
- The namespace associated with the resource(s)
|
||||||
|
resource:
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
description:
|
||||||
|
- The resource to perform an action on. pods (po), replicationControllers (rc), services (svc)
|
||||||
|
label:
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
description:
|
||||||
|
- The labels used to filter specific resources.
|
||||||
|
server:
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
description:
|
||||||
|
- The url for the API server that commands are executed against.
|
||||||
|
force:
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
description:
|
||||||
|
- A flag to indicate to force delete, replace, or stop.
|
||||||
|
all:
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
description:
|
||||||
|
- A flag to indicate delete all, stop all, or all namespaces when checking exists.
|
||||||
|
log_level:
|
||||||
|
required: false
|
||||||
|
default: 0
|
||||||
|
description:
|
||||||
|
- Indicates the level of verbosity of logging by kubectl.
|
||||||
|
state:
|
||||||
|
required: false
|
||||||
|
choices: ['present', 'absent', 'latest', 'reloaded', 'stopped']
|
||||||
|
default: present
|
||||||
|
description:
|
||||||
|
- present handles checking existence or creating if definition file provided,
|
||||||
|
absent handles deleting resource(s) based on other options,
|
||||||
|
latest handles creating ore updating based on existence,
|
||||||
|
reloaded handles updating resource(s) definition using definition file,
|
||||||
|
stopped handles stopping resource(s) based on other options.
|
||||||
|
requirements:
|
||||||
|
- kubectl
|
||||||
|
author: "Kenny Jones (@kenjones-cisco)"
|
||||||
|
"""
|
||||||
|
|
||||||
|
EXAMPLES = """
|
||||||
|
- name: test nginx is present
|
||||||
|
kube: name=nginx resource=rc state=present
|
||||||
|
|
||||||
|
- name: test nginx is stopped
|
||||||
|
kube: name=nginx resource=rc state=stopped
|
||||||
|
|
||||||
|
- name: test nginx is absent
|
||||||
|
kube: name=nginx resource=rc state=absent
|
||||||
|
|
||||||
|
- name: test nginx is present
|
||||||
|
kube: filename=/tmp/nginx.yml
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class KubeManager(object):
|
||||||
|
|
||||||
|
def __init__(self, module):
|
||||||
|
|
||||||
|
self.module = module
|
||||||
|
|
||||||
|
self.kubectl = module.params.get('kubectl')
|
||||||
|
if self.kubectl is None:
|
||||||
|
self.kubectl = module.get_bin_path('kubectl', True)
|
||||||
|
self.base_cmd = [self.kubectl]
|
||||||
|
|
||||||
|
if module.params.get('server'):
|
||||||
|
self.base_cmd.append('--server=' + module.params.get('server'))
|
||||||
|
|
||||||
|
if module.params.get('log_level'):
|
||||||
|
self.base_cmd.append('--v=' + str(module.params.get('log_level')))
|
||||||
|
|
||||||
|
if module.params.get('namespace'):
|
||||||
|
self.base_cmd.append('--namespace=' + module.params.get('namespace'))
|
||||||
|
|
||||||
|
self.all = module.params.get('all')
|
||||||
|
self.force = module.params.get('force')
|
||||||
|
self.name = module.params.get('name')
|
||||||
|
self.filename = module.params.get('filename')
|
||||||
|
self.resource = module.params.get('resource')
|
||||||
|
self.label = module.params.get('label')
|
||||||
|
|
||||||
|
def _execute(self, cmd):
|
||||||
|
args = self.base_cmd + cmd
|
||||||
|
try:
|
||||||
|
rc, out, err = self.module.run_command(args)
|
||||||
|
if rc != 0:
|
||||||
|
self.module.fail_json(
|
||||||
|
msg='error running kubectl (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err))
|
||||||
|
except Exception as exc:
|
||||||
|
self.module.fail_json(
|
||||||
|
msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc)))
|
||||||
|
return out.splitlines()
|
||||||
|
|
||||||
|
def _execute_nofail(self, cmd):
|
||||||
|
args = self.base_cmd + cmd
|
||||||
|
rc, out, err = self.module.run_command(args)
|
||||||
|
if rc != 0:
|
||||||
|
return None
|
||||||
|
return out.splitlines()
|
||||||
|
|
||||||
|
def create(self, check=True):
|
||||||
|
if check and self.exists():
|
||||||
|
return []
|
||||||
|
|
||||||
|
cmd = ['create']
|
||||||
|
|
||||||
|
if not self.filename:
|
||||||
|
self.module.fail_json(msg='filename required to create')
|
||||||
|
|
||||||
|
cmd.append('--filename=' + self.filename)
|
||||||
|
|
||||||
|
return self._execute(cmd)
|
||||||
|
|
||||||
|
def replace(self):
|
||||||
|
|
||||||
|
if not self.force and not self.exists():
|
||||||
|
return []
|
||||||
|
|
||||||
|
cmd = ['replace']
|
||||||
|
|
||||||
|
if self.force:
|
||||||
|
cmd.append('--force')
|
||||||
|
|
||||||
|
if not self.filename:
|
||||||
|
self.module.fail_json(msg='filename required to reload')
|
||||||
|
|
||||||
|
cmd.append('--filename=' + self.filename)
|
||||||
|
|
||||||
|
return self._execute(cmd)
|
||||||
|
|
||||||
|
def delete(self):
|
||||||
|
|
||||||
|
if not self.force and not self.exists():
|
||||||
|
return []
|
||||||
|
|
||||||
|
cmd = ['delete']
|
||||||
|
|
||||||
|
if self.filename:
|
||||||
|
cmd.append('--filename=' + self.filename)
|
||||||
|
else:
|
||||||
|
if not self.resource:
|
||||||
|
self.module.fail_json(msg='resource required to delete without filename')
|
||||||
|
|
||||||
|
cmd.append(self.resource)
|
||||||
|
|
||||||
|
if self.name:
|
||||||
|
cmd.append(self.name)
|
||||||
|
|
||||||
|
if self.label:
|
||||||
|
cmd.append('--selector=' + self.label)
|
||||||
|
|
||||||
|
if self.all:
|
||||||
|
cmd.append('--all')
|
||||||
|
|
||||||
|
if self.force:
|
||||||
|
cmd.append('--ignore-not-found')
|
||||||
|
|
||||||
|
return self._execute(cmd)
|
||||||
|
|
||||||
|
def exists(self):
|
||||||
|
cmd = ['get']
|
||||||
|
|
||||||
|
if not self.resource:
|
||||||
|
return False
|
||||||
|
|
||||||
|
cmd.append(self.resource)
|
||||||
|
|
||||||
|
if self.name:
|
||||||
|
cmd.append(self.name)
|
||||||
|
|
||||||
|
cmd.append('--no-headers')
|
||||||
|
|
||||||
|
if self.label:
|
||||||
|
cmd.append('--selector=' + self.label)
|
||||||
|
|
||||||
|
if self.all:
|
||||||
|
cmd.append('--all-namespaces')
|
||||||
|
|
||||||
|
result = self._execute_nofail(cmd)
|
||||||
|
if not result:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
|
||||||
|
if not self.force and not self.exists():
|
||||||
|
return []
|
||||||
|
|
||||||
|
cmd = ['stop']
|
||||||
|
|
||||||
|
if self.filename:
|
||||||
|
cmd.append('--filename=' + self.filename)
|
||||||
|
else:
|
||||||
|
if not self.resource:
|
||||||
|
self.module.fail_json(msg='resource required to stop without filename')
|
||||||
|
|
||||||
|
cmd.append(self.resource)
|
||||||
|
|
||||||
|
if self.name:
|
||||||
|
cmd.append(self.name)
|
||||||
|
|
||||||
|
if self.label:
|
||||||
|
cmd.append('--selector=' + self.label)
|
||||||
|
|
||||||
|
if self.all:
|
||||||
|
cmd.append('--all')
|
||||||
|
|
||||||
|
if self.force:
|
||||||
|
cmd.append('--ignore-not-found')
|
||||||
|
|
||||||
|
return self._execute(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=dict(
|
||||||
|
name=dict(),
|
||||||
|
filename=dict(),
|
||||||
|
namespace=dict(),
|
||||||
|
resource=dict(),
|
||||||
|
label=dict(),
|
||||||
|
server=dict(),
|
||||||
|
kubectl=dict(),
|
||||||
|
force=dict(default=False, type='bool'),
|
||||||
|
all=dict(default=False, type='bool'),
|
||||||
|
log_level=dict(default=0, type='int'),
|
||||||
|
state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped']),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
changed = False
|
||||||
|
|
||||||
|
manager = KubeManager(module)
|
||||||
|
state = module.params.get('state')
|
||||||
|
|
||||||
|
if state == 'present':
|
||||||
|
result = manager.create()
|
||||||
|
|
||||||
|
elif state == 'absent':
|
||||||
|
result = manager.delete()
|
||||||
|
|
||||||
|
elif state == 'reloaded':
|
||||||
|
result = manager.replace()
|
||||||
|
|
||||||
|
elif state == 'stopped':
|
||||||
|
result = manager.stop()
|
||||||
|
|
||||||
|
elif state == 'latest':
|
||||||
|
if manager.exists():
|
||||||
|
manager.force = True
|
||||||
|
result = manager.replace()
|
||||||
|
else:
|
||||||
|
result = manager.create(check=False)
|
||||||
|
|
||||||
|
else:
|
||||||
|
module.fail_json(msg='Unrecognized state %s.' % state)
|
||||||
|
|
||||||
|
if result:
|
||||||
|
changed = True
|
||||||
|
module.exit_json(changed=changed,
|
||||||
|
msg='success: %s' % (' '.join(result))
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
from ansible.module_utils.basic import * # noqa
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
|
@ -0,0 +1,10 @@
|
||||||
|
- name: Write calico-policy-controller yaml
|
||||||
|
template: src=calico-policy-controller.yml.j2 dest=/etc/kubernetes/calico-policy-controller.yml
|
||||||
|
when: inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
|
||||||
|
- name: Start of Calico policy controller
|
||||||
|
kube:
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
filename: /etc/kubernetes/calico-policy-controller.yml
|
||||||
|
when: inventory_hostname == groups['kube-master'][0]
|
|
@ -0,0 +1,23 @@
|
||||||
|
---
|
||||||
|
- name: Kubernetes Apps | Lay Down KubeDNS Template
|
||||||
|
template: src={{item.file}} dest=/etc/kubernetes/{{item.file}}
|
||||||
|
with_items:
|
||||||
|
- {file: kubedns-rc.yml, type: rc}
|
||||||
|
- {file: kubedns-svc.yml, type: svc}
|
||||||
|
register: manifests
|
||||||
|
when: inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
- name: Kubernetes Apps | Start Resources
|
||||||
|
kube:
|
||||||
|
name: kubedns
|
||||||
|
namespace: kube-system
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
resource: "{{item.item.type}}"
|
||||||
|
filename: /etc/kubernetes/{{item.item.file}}
|
||||||
|
state: "{{item.changed | ternary('latest','present') }}"
|
||||||
|
with_items: "{{ manifests.results }}"
|
||||||
|
when: inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
|
||||||
|
- include: tasks/calico-policy-controller.yml
|
||||||
|
when: enable_network_policy is defined and enable_network_policy == True
|
|
@ -0,0 +1,40 @@
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
kind: ReplicaSet
|
||||||
|
metadata:
|
||||||
|
name: calico-policy-controller
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: calico-policy
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
k8s-app: calico-policy
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
name: calico-policy-controller
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
k8s-app: calico-policy
|
||||||
|
spec:
|
||||||
|
hostNetwork: true
|
||||||
|
containers:
|
||||||
|
- name: calico-policy-controller
|
||||||
|
image: calico/kube-policy-controller:latest
|
||||||
|
env:
|
||||||
|
- name: ETCD_ENDPOINTS
|
||||||
|
value: "{{ etcd_endpoint }}"
|
||||||
|
# Location of the Kubernetes API - this shouldn't need to be
|
||||||
|
# changed so long as it is used in conjunction with
|
||||||
|
# CONFIGURE_ETC_HOSTS="true".
|
||||||
|
- name: K8S_API
|
||||||
|
value: "https://kubernetes.default:443"
|
||||||
|
# Configure /etc/hosts within the container to resolve
|
||||||
|
# the kubernetes.default Service to the correct clusterIP
|
||||||
|
# using the environment provided by the kubelet.
|
||||||
|
# This removes the need for KubeDNS to resolve the Service.
|
||||||
|
- name: CONFIGURE_ETC_HOSTS
|
||||||
|
value: "true"
|
|
@ -0,0 +1,100 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ReplicationController
|
||||||
|
metadata:
|
||||||
|
name: kubedns
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: kubedns
|
||||||
|
version: v19
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
k8s-app: kubedns
|
||||||
|
version: v19
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: kubedns
|
||||||
|
version: v19
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: kubedns
|
||||||
|
image: "{{ kubedns_image_repo }}:{{ kubedns_image_tag }}"
|
||||||
|
resources:
|
||||||
|
# TODO: Set memory limits when we've profiled the container for large
|
||||||
|
# clusters, then set request = limit to keep this container in
|
||||||
|
# guaranteed class. Currently, this container falls into the
|
||||||
|
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||||
|
limits:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 170Mi
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 70Mi
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /healthz
|
||||||
|
port: 8080
|
||||||
|
scheme: HTTP
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
timeoutSeconds: 5
|
||||||
|
successThreshold: 1
|
||||||
|
failureThreshold: 5
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /readiness
|
||||||
|
port: 8081
|
||||||
|
scheme: HTTP
|
||||||
|
# we poll on pod startup for the Kubernetes master service and
|
||||||
|
# only setup the /readiness HTTP server once that's available.
|
||||||
|
initialDelaySeconds: 30
|
||||||
|
timeoutSeconds: 5
|
||||||
|
args:
|
||||||
|
# command = "/kube-dns"
|
||||||
|
- --domain={{ dns_domain }}.
|
||||||
|
- --dns-port=10053
|
||||||
|
ports:
|
||||||
|
- containerPort: 10053
|
||||||
|
name: dns-local
|
||||||
|
protocol: UDP
|
||||||
|
- containerPort: 10053
|
||||||
|
name: dns-tcp-local
|
||||||
|
protocol: TCP
|
||||||
|
- name: dnsmasq
|
||||||
|
image: "{{ kubednsmasq_image_repo }}:{{ kubednsmasq_image_tag }}"
|
||||||
|
args:
|
||||||
|
- --log-facility=-
|
||||||
|
- --cache-size=1000
|
||||||
|
- --no-resolv
|
||||||
|
- --server=127.0.0.1#10053
|
||||||
|
ports:
|
||||||
|
- containerPort: 53
|
||||||
|
name: dns
|
||||||
|
protocol: UDP
|
||||||
|
- containerPort: 53
|
||||||
|
name: dns-tcp
|
||||||
|
protocol: TCP
|
||||||
|
- name: healthz
|
||||||
|
image: "{{ exechealthz_image_repo }}:{{ exechealthz_image_tag }}"
|
||||||
|
resources:
|
||||||
|
# keep request = limit to keep this container in guaranteed class
|
||||||
|
limits:
|
||||||
|
cpu: 10m
|
||||||
|
memory: 50Mi
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
# Note that this container shouldn't really need 50Mi of memory. The
|
||||||
|
# limits are set higher than expected pending investigation on #29688.
|
||||||
|
# The extra memory was stolen from the kubedns container to keep the
|
||||||
|
# net memory requested by the pod constant.
|
||||||
|
memory: 50Mi
|
||||||
|
args:
|
||||||
|
- -cmd=nslookup kubernetes.default.svc.{{ dns_domain }} 127.0.0.1 >/dev/null && nslookup kubernetes.default.svc.{{ dns_domain }} 127.0.0.1:10053 >/dev/null
|
||||||
|
- -port=8080
|
||||||
|
- -quiet
|
||||||
|
ports:
|
||||||
|
- containerPort: 8080
|
||||||
|
protocol: TCP
|
||||||
|
dnsPolicy: Default # Don't use cluster DNS.
|
|
@ -0,0 +1,20 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: kubedns
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: kubedns
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
kubernetes.io/name: "kubedns"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
k8s-app: kubedns
|
||||||
|
clusterIP: {{ skydns_server }}
|
||||||
|
ports:
|
||||||
|
- name: dns
|
||||||
|
port: 53
|
||||||
|
protocol: UDP
|
||||||
|
- name: dns-tcp
|
||||||
|
port: 53
|
||||||
|
protocol: TCP
|
|
@ -1,4 +0,0 @@
|
||||||
---
|
|
||||||
kpm_registry: "https://api.kpm.sh"
|
|
||||||
kpm_namespace: "default"
|
|
||||||
kpm_packages: []
|
|
|
@ -0,0 +1,2 @@
|
||||||
|
---
|
||||||
|
- debug: msg="No helm charts"
|
|
@ -1,14 +1,9 @@
|
||||||
- name: Install pip
|
---
|
||||||
action:
|
|
||||||
module: "{{ ansible_pkg_mgr }}"
|
|
||||||
name: "python-pip"
|
|
||||||
state: latest
|
|
||||||
when: ansible_os_family != "CoreOS" and kpm_packages | length > 0
|
|
||||||
|
|
||||||
- name: install kpm
|
- name: install kpm
|
||||||
pip:
|
pip:
|
||||||
name: "kpm"
|
name: "kpm"
|
||||||
state: "latest"
|
state: "present"
|
||||||
|
version: "0.16.1"
|
||||||
when: kpm_packages | length > 0
|
when: kpm_packages | length > 0
|
||||||
|
|
||||||
- name: manage kubernetes applications
|
- name: manage kubernetes applications
|
|
@ -0,0 +1,3 @@
|
||||||
|
dependencies:
|
||||||
|
- {role: kubernetes-apps/ansible, tags: apps}
|
||||||
|
- {role: kubernetes-apps/kpm, tags: [apps, kpm]}
|
|
@ -0,0 +1,30 @@
|
||||||
|
# This is where all the cert scripts and certs will be located
|
||||||
|
kube_cert_dir: "{{ kube_config_dir }}/ssl"
|
||||||
|
|
||||||
|
# This is where all of the bearer tokens will be stored
|
||||||
|
kube_token_dir: "{{ kube_config_dir }}/tokens"
|
||||||
|
|
||||||
|
# This is where to save basic auth file
|
||||||
|
kube_users_dir: "{{ kube_config_dir }}/users"
|
||||||
|
|
||||||
|
# An experimental dev/test only dynamic volumes provisioner,
|
||||||
|
# for PetSets. Works for kube>=v1.3 only.
|
||||||
|
kube_hostpath_dynamic_provisioner: "false"
|
||||||
|
|
||||||
|
# This is where you can drop yaml/json files and the kubelet will run those
|
||||||
|
# pods on startup
|
||||||
|
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
|
||||||
|
|
||||||
|
# This directory is where all the additional config stuff goes
|
||||||
|
# the kubernetes normally puts in /srv/kubernets.
|
||||||
|
# This puts them in a sane location.
|
||||||
|
# Editting this value will almost surely break something. Don't
|
||||||
|
# change it. Things like the systemd scripts are hard coded to
|
||||||
|
# look in here. Don't do it.
|
||||||
|
kube_config_dir: /etc/kubernetes
|
||||||
|
|
||||||
|
# change to 0.0.0.0 to enable insecure access from anywhere (not recommended)
|
||||||
|
kube_apiserver_insecure_bind_address: 127.0.0.1
|
||||||
|
|
||||||
|
# Logging directory (sysvinit systems)
|
||||||
|
kube_log_dir: "/var/log/kubernetes"
|
|
@ -1,4 +1,44 @@
|
||||||
---
|
---
|
||||||
- name: restart kube-apiserver
|
- name: Master | restart kubelet
|
||||||
set_fact:
|
command: /bin/true
|
||||||
restart_apimaster: True
|
notify:
|
||||||
|
- Master | reload systemd
|
||||||
|
- Master | reload kubelet
|
||||||
|
- Master | wait for master static pods
|
||||||
|
|
||||||
|
- name: Master | wait for master static pods
|
||||||
|
command: /bin/true
|
||||||
|
notify:
|
||||||
|
- Master | wait for the apiserver to be running
|
||||||
|
- Master | wait for kube-scheduler
|
||||||
|
- Master | wait for kube-controller-manager
|
||||||
|
|
||||||
|
- name: Master | reload systemd
|
||||||
|
command: systemctl daemon-reload
|
||||||
|
when: ansible_service_mgr == "systemd"
|
||||||
|
|
||||||
|
- name: Master | reload kubelet
|
||||||
|
service:
|
||||||
|
name: kubelet
|
||||||
|
state: restarted
|
||||||
|
|
||||||
|
- name: Master | wait for kube-scheduler
|
||||||
|
uri: url=http://localhost:10251/healthz
|
||||||
|
register: scheduler_result
|
||||||
|
until: scheduler_result.status == 200
|
||||||
|
retries: 15
|
||||||
|
delay: 5
|
||||||
|
|
||||||
|
- name: Master | wait for kube-controller-manager
|
||||||
|
uri: url=http://localhost:10252/healthz
|
||||||
|
register: controller_manager_result
|
||||||
|
until: controller_manager_result.status == 200
|
||||||
|
retries: 15
|
||||||
|
delay: 5
|
||||||
|
|
||||||
|
- name: Master | wait for the apiserver to be running
|
||||||
|
uri: url=http://localhost:8080/healthz
|
||||||
|
register: result
|
||||||
|
until: result.status == 200
|
||||||
|
retries: 10
|
||||||
|
delay: 6
|
||||||
|
|
|
@ -1,8 +1,4 @@
|
||||||
---
|
---
|
||||||
dependencies:
|
dependencies:
|
||||||
- role: download
|
- role: download
|
||||||
file: "{{ downloads.kubernetes_kubectl }}"
|
file: "{{ downloads.hyperkube }}"
|
||||||
- role: download
|
|
||||||
file: "{{ downloads.kubernetes_apiserver }}"
|
|
||||||
- { role: etcd }
|
|
||||||
- { role: kubernetes/node }
|
|
||||||
|
|
|
@ -1,53 +1,27 @@
|
||||||
---
|
---
|
||||||
|
- include: pre-upgrade.yml
|
||||||
|
|
||||||
- name: Copy kubectl bash completion
|
- name: Copy kubectl bash completion
|
||||||
copy:
|
copy:
|
||||||
src: kubectl_bash_completion.sh
|
src: kubectl_bash_completion.sh
|
||||||
dest: /etc/bash_completion.d/kubectl.sh
|
dest: /etc/bash_completion.d/kubectl.sh
|
||||||
when: ansible_os_family in ["Debian","RedHat"]
|
when: ansible_os_family in ["Debian","RedHat"]
|
||||||
|
|
||||||
- name: Copy kube-apiserver binary
|
- name: Copy kubectl from hyperkube container
|
||||||
command: rsync -piu "{{ local_release_dir }}/kubernetes/bin/kube-apiserver" "{{ bin_dir }}/kube-apiserver"
|
command: "/usr/bin/docker run --rm -v {{ bin_dir }}:/systembindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp /hyperkube /systembindir/kubectl"
|
||||||
register: kube_apiserver_copy
|
register: kube_task_result
|
||||||
|
until: kube_task_result.rc == 0
|
||||||
|
retries: 4
|
||||||
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: Copy kubectl binary
|
- name: Write kube-apiserver manifest
|
||||||
command: rsync -piu "{{ local_release_dir }}/kubernetes/bin/kubectl" "{{ bin_dir }}/kubectl"
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: install | Write kube-apiserver systemd init file
|
|
||||||
template:
|
template:
|
||||||
src: "kube-apiserver.service.j2"
|
src: manifests/kube-apiserver.manifest.j2
|
||||||
dest: "/etc/systemd/system/kube-apiserver.service"
|
dest: "{{ kube_manifest_dir }}/kube-apiserver.manifest"
|
||||||
backup: yes
|
notify: Master | wait for the apiserver to be running
|
||||||
when: ansible_service_mgr == "systemd"
|
|
||||||
notify: restart kube-apiserver
|
|
||||||
|
|
||||||
- name: install | Write kube-apiserver initd script
|
|
||||||
template:
|
|
||||||
src: "deb-kube-apiserver.initd.j2"
|
|
||||||
dest: "/etc/init.d/kube-apiserver"
|
|
||||||
owner: root
|
|
||||||
mode: 0755
|
|
||||||
backup: yes
|
|
||||||
when: ansible_service_mgr in ["sysvinit","upstart"] and ansible_os_family == "Debian"
|
|
||||||
|
|
||||||
- name: Write kube-apiserver config file
|
|
||||||
template:
|
|
||||||
src: "kube-apiserver.j2"
|
|
||||||
dest: "{{ kube_config_dir }}/kube-apiserver.env"
|
|
||||||
backup: yes
|
|
||||||
notify: restart kube-apiserver
|
|
||||||
|
|
||||||
- name: Allow apiserver to bind on both secure and insecure ports
|
|
||||||
shell: setcap cap_net_bind_service+ep {{ bin_dir }}/kube-apiserver
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- meta: flush_handlers
|
- meta: flush_handlers
|
||||||
|
|
||||||
- include: start.yml
|
|
||||||
with_items: "{{ groups['kube-master'] }}"
|
|
||||||
when: "{{ hostvars[item].inventory_hostname == inventory_hostname }}"
|
|
||||||
|
|
||||||
# Create kube-system namespace
|
# Create kube-system namespace
|
||||||
- name: copy 'kube-system' namespace manifest
|
- name: copy 'kube-system' namespace manifest
|
||||||
copy: src=namespace.yml dest=/etc/kubernetes/kube-system-ns.yml
|
copy: src=namespace.yml dest=/etc/kubernetes/kube-system-ns.yml
|
||||||
|
@ -61,29 +35,20 @@
|
||||||
failed_when: False
|
failed_when: False
|
||||||
run_once: yes
|
run_once: yes
|
||||||
|
|
||||||
- name: wait for the apiserver to be running
|
|
||||||
wait_for:
|
|
||||||
port: "{{kube_apiserver_insecure_port}}"
|
|
||||||
timeout: 60
|
|
||||||
|
|
||||||
- name: Create 'kube-system' namespace
|
- name: Create 'kube-system' namespace
|
||||||
command: "{{ bin_dir }}/kubectl create -f /etc/kubernetes/kube-system-ns.yml"
|
command: "{{ bin_dir }}/kubectl create -f /etc/kubernetes/kube-system-ns.yml"
|
||||||
changed_when: False
|
changed_when: False
|
||||||
when: kubesystem|failed and inventory_hostname == groups['kube-master'][0]
|
when: kubesystem|failed and inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
# Write manifests
|
# Write other manifests
|
||||||
- name: Write kube-controller-manager manifest
|
- name: Write kube-controller-manager manifest
|
||||||
template:
|
template:
|
||||||
src: manifests/kube-controller-manager.manifest.j2
|
src: manifests/kube-controller-manager.manifest.j2
|
||||||
dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest"
|
dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest"
|
||||||
|
notify: Master | wait for kube-controller-manager
|
||||||
|
|
||||||
- name: Write kube-scheduler manifest
|
- name: Write kube-scheduler manifest
|
||||||
template:
|
template:
|
||||||
src: manifests/kube-scheduler.manifest.j2
|
src: manifests/kube-scheduler.manifest.j2
|
||||||
dest: "{{ kube_manifest_dir }}/kube-scheduler.manifest"
|
dest: "{{ kube_manifest_dir }}/kube-scheduler.manifest"
|
||||||
|
notify: Master | wait for kube-scheduler
|
||||||
- name: restart kubelet
|
|
||||||
service:
|
|
||||||
name: kubelet
|
|
||||||
state: restarted
|
|
||||||
changed_when: false
|
|
||||||
|
|
|
@ -0,0 +1,25 @@
|
||||||
|
---
|
||||||
|
- name: "Pre-upgrade | check for kube-apiserver unit file"
|
||||||
|
stat:
|
||||||
|
path: /etc/systemd/system/kube-apiserver.service
|
||||||
|
register: kube_apiserver_service_file
|
||||||
|
|
||||||
|
- name: "Pre-upgrade | check for kube-apiserver init script"
|
||||||
|
stat:
|
||||||
|
path: /etc/init.d/kube-apiserver
|
||||||
|
register: kube_apiserver_init_script
|
||||||
|
|
||||||
|
- name: "Pre-upgrade | stop kube-apiserver if service defined"
|
||||||
|
service:
|
||||||
|
name: kube-apiserver
|
||||||
|
state: stopped
|
||||||
|
when: (kube_apiserver_service_file.stat.exists|default(False) or kube_apiserver_init_script.stat.exists|default(False))
|
||||||
|
|
||||||
|
- name: "Pre-upgrade | remove kube-apiserver service definition"
|
||||||
|
file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: absent
|
||||||
|
when: (kube_apiserver_service_file.stat.exists|default(False) or kube_apiserver_init_script.stat.exists|default(False))
|
||||||
|
with_items:
|
||||||
|
- /etc/systemd/system/kube-apiserver.service
|
||||||
|
- /etc/init.d/kube-apiserver
|
|
@ -1,22 +0,0 @@
|
||||||
---
|
|
||||||
- name: Pause
|
|
||||||
pause: seconds=10
|
|
||||||
|
|
||||||
- name: reload systemd
|
|
||||||
command: systemctl daemon-reload
|
|
||||||
when: ansible_service_mgr == "systemd" and restart_apimaster is defined and restart_apimaster == True
|
|
||||||
|
|
||||||
- name: reload kube-apiserver
|
|
||||||
service:
|
|
||||||
name: kube-apiserver
|
|
||||||
state: restarted
|
|
||||||
enabled: yes
|
|
||||||
when: ( restart_apimaster is defined and restart_apimaster == True) or
|
|
||||||
secret_changed | default(false)
|
|
||||||
|
|
||||||
- name: Enable apiserver
|
|
||||||
service:
|
|
||||||
name: kube-apiserver
|
|
||||||
enabled: yes
|
|
||||||
state: started
|
|
||||||
when: restart_apimaster is not defined or restart_apimaster == False
|
|
|
@ -1,118 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
### BEGIN INIT INFO
|
|
||||||
# Provides: kube-apiserver
|
|
||||||
# Required-Start: $local_fs $network $syslog
|
|
||||||
# Required-Stop:
|
|
||||||
# Default-Start: 2 3 4 5
|
|
||||||
# Default-Stop: 0 1 6
|
|
||||||
# Short-Description: The Kubernetes apiserver
|
|
||||||
# Description:
|
|
||||||
# The Kubernetes apiserver.
|
|
||||||
### END INIT INFO
|
|
||||||
|
|
||||||
|
|
||||||
# PATH should only include /usr/* if it runs after the mountnfs.sh script
|
|
||||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
|
||||||
DESC="The Kubernetes apiserver"
|
|
||||||
NAME=kube-apiserver
|
|
||||||
DAEMON={{ bin_dir }}/kube-apiserver
|
|
||||||
DAEMON_LOG_FILE=/var/log/$NAME.log
|
|
||||||
PIDFILE=/var/run/$NAME.pid
|
|
||||||
SCRIPTNAME=/etc/init.d/$NAME
|
|
||||||
DAEMON_USER=root
|
|
||||||
|
|
||||||
# Exit if the package is not installed
|
|
||||||
[ -x "$DAEMON" ] || exit 0
|
|
||||||
|
|
||||||
# Read configuration variable file if it is present
|
|
||||||
[ -r /etc/kubernetes/$NAME.env ] && . /etc/kubernetes/$NAME.env
|
|
||||||
|
|
||||||
# Define LSB log_* functions.
|
|
||||||
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
|
||||||
# and status_of_proc is working.
|
|
||||||
. /lib/lsb/init-functions
|
|
||||||
|
|
||||||
#
|
|
||||||
# Function that starts the daemon/service
|
|
||||||
#
|
|
||||||
do_start()
|
|
||||||
{
|
|
||||||
# Return
|
|
||||||
# 0 if daemon has been started
|
|
||||||
# 1 if daemon was already running
|
|
||||||
# 2 if daemon could not be started
|
|
||||||
start-stop-daemon --start --quiet --background --no-close \
|
|
||||||
--make-pidfile --pidfile $PIDFILE \
|
|
||||||
--exec $DAEMON -c $DAEMON_USER --test > /dev/null \
|
|
||||||
|| return 1
|
|
||||||
start-stop-daemon --start --quiet --background --no-close \
|
|
||||||
--make-pidfile --pidfile $PIDFILE \
|
|
||||||
--exec $DAEMON -c $DAEMON_USER -- \
|
|
||||||
$DAEMON_ARGS >> $DAEMON_LOG_FILE 2>&1 \
|
|
||||||
|| return 2
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Function that stops the daemon/service
|
|
||||||
#
|
|
||||||
do_stop()
|
|
||||||
{
|
|
||||||
# Return
|
|
||||||
# 0 if daemon has been stopped
|
|
||||||
# 1 if daemon was already stopped
|
|
||||||
# 2 if daemon could not be stopped
|
|
||||||
# other if a failure occurred
|
|
||||||
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
|
|
||||||
RETVAL="$?"
|
|
||||||
[ "$RETVAL" = 2 ] && return 2
|
|
||||||
# Many daemons don't delete their pidfiles when they exit.
|
|
||||||
rm -f $PIDFILE
|
|
||||||
return "$RETVAL"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
start)
|
|
||||||
log_daemon_msg "Starting $DESC" "$NAME"
|
|
||||||
do_start
|
|
||||||
case "$?" in
|
|
||||||
0|1) log_end_msg 0 || exit 0 ;;
|
|
||||||
2) log_end_msg 1 || exit 1 ;;
|
|
||||||
esac
|
|
||||||
;;
|
|
||||||
stop)
|
|
||||||
log_daemon_msg "Stopping $DESC" "$NAME"
|
|
||||||
do_stop
|
|
||||||
case "$?" in
|
|
||||||
0|1) log_end_msg 0 ;;
|
|
||||||
2) exit 1 ;;
|
|
||||||
esac
|
|
||||||
;;
|
|
||||||
status)
|
|
||||||
status_of_proc -p $PIDFILE "$DAEMON" "$NAME" && exit 0 || exit $?
|
|
||||||
;;
|
|
||||||
|
|
||||||
restart|force-reload)
|
|
||||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
|
||||||
do_stop
|
|
||||||
case "$?" in
|
|
||||||
0|1)
|
|
||||||
do_start
|
|
||||||
case "$?" in
|
|
||||||
0) log_end_msg 0 ;;
|
|
||||||
1) log_end_msg 1 ;; # Old process is still running
|
|
||||||
*) log_end_msg 1 ;; # Failed to start
|
|
||||||
esac
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
# Failed to stop
|
|
||||||
log_end_msg 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
|
||||||
exit 3
|
|
||||||
;;
|
|
||||||
esac
|
|
|
@ -1,58 +0,0 @@
|
||||||
###
|
|
||||||
# kubernetes system config
|
|
||||||
#
|
|
||||||
# The following values are used to configure the kube-apiserver
|
|
||||||
|
|
||||||
{% if ansible_service_mgr in ["sysvinit","upstart"] %}
|
|
||||||
# Logging directory
|
|
||||||
KUBE_LOGGING="--log-dir={{ kube_log_dir }} --logtostderr=true"
|
|
||||||
{% else %}
|
|
||||||
# logging to stderr means we get it in the systemd journal
|
|
||||||
KUBE_LOGGING="--logtostderr=true"
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Apiserver Log level, 0 is debug
|
|
||||||
KUBE_LOG_LEVEL="--v={{ kube_log_level | default('2') }}"
|
|
||||||
|
|
||||||
# Should this cluster be allowed to run privileged docker containers
|
|
||||||
KUBE_ALLOW_PRIV="--allow_privileged=true"
|
|
||||||
|
|
||||||
# The port on the local server to listen on.
|
|
||||||
KUBE_API_PORT="--insecure-port={{kube_apiserver_insecure_port}} --secure-port={{ kube_apiserver_port }}"
|
|
||||||
|
|
||||||
# Insecure API address (default is localhost)
|
|
||||||
KUBE_API_INSECURE_BIND="--insecure-bind-address={{ kube_apiserver_insecure_bind_address | default('127.0.0.1') }}"
|
|
||||||
|
|
||||||
# Address range to use for services
|
|
||||||
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range={{ kube_service_addresses }}"
|
|
||||||
|
|
||||||
# Location of the etcd cluster
|
|
||||||
KUBE_ETCD_SERVERS="--etcd_servers={% for host in groups['etcd'] %}http://{{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address'])) }}:2379{% if not loop.last %},{% endif %}{% endfor %}"
|
|
||||||
|
|
||||||
# Bind address for secure endpoint
|
|
||||||
KUBE_API_ADDRESS="--bind-address={{ ip | default(ansible_default_ipv4.address) }}"
|
|
||||||
|
|
||||||
# default admission control policies
|
|
||||||
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota"
|
|
||||||
|
|
||||||
# RUNTIME API CONFIGURATION (e.g. enable extensions)
|
|
||||||
KUBE_RUNTIME_CONFIG="{% if kube_api_runtime_config is defined %}{% for conf in kube_api_runtime_config %}--runtime-config={{ conf }} {% endfor %}{% endif %}"
|
|
||||||
|
|
||||||
# TLS CONFIGURATION
|
|
||||||
KUBE_TLS_CONFIG="--tls_cert_file={{ kube_cert_dir }}/apiserver.pem --tls_private_key_file={{ kube_cert_dir }}/apiserver-key.pem --client_ca_file={{ kube_cert_dir }}/ca.pem"
|
|
||||||
|
|
||||||
# Add you own!
|
|
||||||
KUBE_API_ARGS="--token_auth_file={{ kube_token_dir }}/known_tokens.csv --basic-auth-file={{ kube_users_dir }}/known_users.csv --service_account_key_file={{ kube_cert_dir }}/apiserver-key.pem --advertise-address={{ ip | default(ansible_default_ipv4.address) }}"
|
|
||||||
|
|
||||||
{% if cloud_provider is defined and cloud_provider == "openstack" %}
|
|
||||||
KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config"
|
|
||||||
{% else %}
|
|
||||||
{# TODO: gce and aws don't need the cloud provider to be set? #}
|
|
||||||
KUBELET_CLOUDPROVIDER=""
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if ansible_service_mgr in ["sysvinit","upstart"] %}
|
|
||||||
DAEMON_ARGS="$KUBE_LOGGING $KUBE_LOG_LEVEL $KUBE_ALLOW_PRIV $KUBE_API_PORT $KUBE_API_INSECURE_BIND \
|
|
||||||
$KUBE_SERVICE_ADDRESSES $KUBE_ETCD_SERVERS $KUBE_ADMISSION_CONTROL $KUBE_RUNTIME_CONFIG \
|
|
||||||
$KUBE_TLS_CONFIG $KUBE_API_ARGS $KUBELET_CLOUDPROVIDER"
|
|
||||||
{% endif %}
|
|
|
@ -1,30 +0,0 @@
|
||||||
[Unit]
|
|
||||||
Description=Kubernetes API Server
|
|
||||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
|
||||||
Requires=etcd.service
|
|
||||||
After=etcd.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
EnvironmentFile=/etc/kubernetes/kube-apiserver.env
|
|
||||||
User=kube
|
|
||||||
ExecStart={{ bin_dir }}/kube-apiserver \
|
|
||||||
$KUBE_LOGTOSTDERR \
|
|
||||||
$KUBE_LOG_LEVEL \
|
|
||||||
$KUBE_ETCD_SERVERS \
|
|
||||||
$KUBE_API_ADDRESS \
|
|
||||||
$KUBE_API_PORT \
|
|
||||||
$KUBE_API_INSECURE_BIND \
|
|
||||||
$KUBELET_PORT \
|
|
||||||
$KUBE_ALLOW_PRIV \
|
|
||||||
$KUBE_SERVICE_ADDRESSES \
|
|
||||||
$KUBE_ADMISSION_CONTROL \
|
|
||||||
$KUBE_RUNTIME_CONFIG \
|
|
||||||
$KUBE_TLS_CONFIG \
|
|
||||||
$KUBE_API_ARGS \
|
|
||||||
$KUBELET_CLOUDPROVIDER
|
|
||||||
Restart=on-failure
|
|
||||||
Type=notify
|
|
||||||
LimitNOFILE=65536
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
|
@ -5,7 +5,7 @@ preferences: {}
|
||||||
clusters:
|
clusters:
|
||||||
- cluster:
|
- cluster:
|
||||||
certificate-authority-data: {{ kube_node_cert|b64encode }}
|
certificate-authority-data: {{ kube_node_cert|b64encode }}
|
||||||
server: https://{{ groups['kube-master'][0] }}:{{ kube_apiserver_port }}
|
server: {{ kube_apiserver_endpoint }}
|
||||||
name: {{ cluster_name }}
|
name: {{ cluster_name }}
|
||||||
contexts:
|
contexts:
|
||||||
- context:
|
- context:
|
||||||
|
|
|
@ -2,6 +2,7 @@ apiVersion: v1
|
||||||
kind: Pod
|
kind: Pod
|
||||||
metadata:
|
metadata:
|
||||||
name: kube-apiserver
|
name: kube-apiserver
|
||||||
|
namespace: kube-system
|
||||||
spec:
|
spec:
|
||||||
hostNetwork: true
|
hostNetwork: true
|
||||||
containers:
|
containers:
|
||||||
|
@ -11,13 +12,17 @@ spec:
|
||||||
- /hyperkube
|
- /hyperkube
|
||||||
- apiserver
|
- apiserver
|
||||||
- --advertise-address={{ ip | default(ansible_default_ipv4.address) }}
|
- --advertise-address={{ ip | default(ansible_default_ipv4.address) }}
|
||||||
- --etcd-servers={% for srv in groups['etcd'] %}http://{{ hostvars[srv]['access_ip'] | default(hostvars[srv]['ip']|default(hostvars[srv]['ansible_default_ipv4']['address'])) }}:2379{% if not loop.last %},{% endif %}{% endfor %}
|
- --etcd-servers={{ etcd_access_endpoint }}
|
||||||
- --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
|
- --etcd-quorum-read=true
|
||||||
|
- --insecure-bind-address={{ kube_apiserver_insecure_bind_address }}
|
||||||
|
- --apiserver-count={{ kube_apiserver_count }}
|
||||||
|
- --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,ServiceAccount,ResourceQuota
|
||||||
- --service-cluster-ip-range={{ kube_service_addresses }}
|
- --service-cluster-ip-range={{ kube_service_addresses }}
|
||||||
- --client-ca-file={{ kube_cert_dir }}/ca.pem
|
- --client-ca-file={{ kube_cert_dir }}/ca.pem
|
||||||
- --basic-auth-file={{ kube_users_dir }}/known_users.csv
|
- --basic-auth-file={{ kube_users_dir }}/known_users.csv
|
||||||
- --tls-cert-file={{ kube_cert_dir }}/apiserver.pem
|
- --tls-cert-file={{ kube_cert_dir }}/apiserver.pem
|
||||||
- --tls-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
|
- --tls-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
|
||||||
|
- --token-auth-file={{ kube_token_dir }}/known_tokens.csv
|
||||||
- --service-account-key-file={{ kube_cert_dir }}/apiserver-key.pem
|
- --service-account-key-file={{ kube_cert_dir }}/apiserver-key.pem
|
||||||
- --secure-port={{ kube_apiserver_port }}
|
- --secure-port={{ kube_apiserver_port }}
|
||||||
- --insecure-port={{ kube_apiserver_insecure_port }}
|
- --insecure-port={{ kube_apiserver_insecure_port }}
|
||||||
|
@ -26,16 +31,18 @@ spec:
|
||||||
- --runtime-config={{ conf }}
|
- --runtime-config={{ conf }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
- --token-auth-file={{ kube_token_dir }}/known_tokens.csv
|
{% if enable_network_policy is defined and enable_network_policy == True %}
|
||||||
|
- --runtime-config=extensions/v1beta1/networkpolicies=true
|
||||||
|
{% endif %}
|
||||||
- --v={{ kube_log_level | default('2') }}
|
- --v={{ kube_log_level | default('2') }}
|
||||||
- --allow-privileged=true
|
- --allow-privileged=true
|
||||||
ports:
|
{% if cloud_provider is defined and cloud_provider == "openstack" %}
|
||||||
- containerPort: {{ kube_apiserver_port }}
|
- --cloud-provider={{ cloud_provider }}
|
||||||
hostPort: {{ kube_apiserver_port }}
|
- --cloud-config={{ kube_config_dir }}/cloud_config
|
||||||
name: https
|
{% elif cloud_provider is defined and cloud_provider == "aws" %}
|
||||||
- containerPort: {{ kube_apiserver_insecure_port }}
|
- --cloud-provider={{ cloud_provider }}
|
||||||
hostPort: {{ kube_apiserver_insecure_port }}
|
{% endif %}
|
||||||
name: local
|
- 2>&1 >> {{ kube_log_dir }}/kube-apiserver.log
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- mountPath: {{ kube_config_dir }}
|
- mountPath: {{ kube_config_dir }}
|
||||||
name: kubernetes-config
|
name: kubernetes-config
|
||||||
|
@ -43,6 +50,8 @@ spec:
|
||||||
- mountPath: /etc/ssl/certs
|
- mountPath: /etc/ssl/certs
|
||||||
name: ssl-certs-host
|
name: ssl-certs-host
|
||||||
readOnly: true
|
readOnly: true
|
||||||
|
- mountPath: /var/log/
|
||||||
|
name: logfile
|
||||||
volumes:
|
volumes:
|
||||||
- hostPath:
|
- hostPath:
|
||||||
path: {{ kube_config_dir }}
|
path: {{ kube_config_dir }}
|
||||||
|
@ -50,3 +59,6 @@ spec:
|
||||||
- hostPath:
|
- hostPath:
|
||||||
path: /etc/ssl/certs/
|
path: /etc/ssl/certs/
|
||||||
name: ssl-certs-host
|
name: ssl-certs-host
|
||||||
|
- hostPath:
|
||||||
|
path: /var/log/
|
||||||
|
name: logfile
|
||||||
|
|
|
@ -11,15 +11,17 @@ spec:
|
||||||
command:
|
command:
|
||||||
- /hyperkube
|
- /hyperkube
|
||||||
- controller-manager
|
- controller-manager
|
||||||
- --master=http://127.0.0.1:{{kube_apiserver_insecure_port}}
|
- --master={{ kube_apiserver_endpoint }}
|
||||||
- --leader-elect=true
|
- --leader-elect=true
|
||||||
- --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
|
- --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
|
||||||
- --root-ca-file={{ kube_cert_dir }}/ca.pem
|
- --root-ca-file={{ kube_cert_dir }}/ca.pem
|
||||||
- --enable-hostpath-provisioner={{ kube_hostpath_dynamic_provisioner }}
|
- --enable-hostpath-provisioner={{ kube_hostpath_dynamic_provisioner }}
|
||||||
- --v={{ kube_log_level | default('2') }}
|
- --v={{ kube_log_level | default('2') }}
|
||||||
{% if cloud_provider is defined and cloud_provider == "openstack" %}
|
{% if cloud_provider is defined and cloud_provider == "openstack" %}
|
||||||
- --cloud-provider=openstack
|
- --cloud-provider={{cloud_provider}}
|
||||||
- --cloud-config={{ kube_config_dir }}/cloud_config
|
- --cloud-config={{ kube_config_dir }}/cloud_config
|
||||||
|
{% elif cloud_provider is defined and cloud_provider == "aws" %}
|
||||||
|
- --cloud-provider={{cloud_provider}}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
livenessProbe:
|
livenessProbe:
|
||||||
httpGet:
|
httpGet:
|
||||||
|
@ -32,9 +34,6 @@ spec:
|
||||||
- mountPath: {{ kube_cert_dir }}
|
- mountPath: {{ kube_cert_dir }}
|
||||||
name: ssl-certs-kubernetes
|
name: ssl-certs-kubernetes
|
||||||
readOnly: true
|
readOnly: true
|
||||||
- mountPath: /etc/ssl/certs
|
|
||||||
name: ssl-certs-host
|
|
||||||
readOnly: true
|
|
||||||
{% if cloud_provider is defined and cloud_provider == "openstack" %}
|
{% if cloud_provider is defined and cloud_provider == "openstack" %}
|
||||||
- mountPath: {{ kube_config_dir }}/cloud_config
|
- mountPath: {{ kube_config_dir }}/cloud_config
|
||||||
name: cloudconfig
|
name: cloudconfig
|
||||||
|
@ -44,9 +43,6 @@ spec:
|
||||||
- hostPath:
|
- hostPath:
|
||||||
path: {{ kube_cert_dir }}
|
path: {{ kube_cert_dir }}
|
||||||
name: ssl-certs-kubernetes
|
name: ssl-certs-kubernetes
|
||||||
- hostPath:
|
|
||||||
path: /etc/ssl/certs/
|
|
||||||
name: ssl-certs-host
|
|
||||||
{% if cloud_provider is defined and cloud_provider == "openstack" %}
|
{% if cloud_provider is defined and cloud_provider == "openstack" %}
|
||||||
- hostPath:
|
- hostPath:
|
||||||
path: {{ kube_config_dir }}/cloud_config
|
path: {{ kube_config_dir }}/cloud_config
|
||||||
|
|
|
@ -12,7 +12,7 @@ spec:
|
||||||
- /hyperkube
|
- /hyperkube
|
||||||
- scheduler
|
- scheduler
|
||||||
- --leader-elect=true
|
- --leader-elect=true
|
||||||
- --master=http://127.0.0.1:{{kube_apiserver_insecure_port}}
|
- --master={{ kube_apiserver_endpoint }}
|
||||||
- --v={{ kube_log_level | default('2') }}
|
- --v={{ kube_log_level | default('2') }}
|
||||||
livenessProbe:
|
livenessProbe:
|
||||||
httpGet:
|
httpGet:
|
||||||
|
|
|
@ -1,7 +1,29 @@
|
||||||
# This directory is where all the additional scripts go
|
# This is where all the cert scripts and certs will be located
|
||||||
# that Kubernetes normally puts in /srv/kubernetes.
|
kube_cert_dir: "{{ kube_config_dir }}/ssl"
|
||||||
# This puts them in a sane location
|
|
||||||
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
|
# change to 0.0.0.0 to enable insecure access from anywhere (not recommended)
|
||||||
|
kube_apiserver_insecure_bind_address: 127.0.0.1
|
||||||
|
|
||||||
|
# This is where you can drop yaml/json files and the kubelet will run those
|
||||||
|
# pods on startup
|
||||||
|
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
|
||||||
|
|
||||||
|
dns_domain: "{{ cluster_name }}"
|
||||||
|
|
||||||
|
# resolv.conf to base dns config
|
||||||
|
kube_resolv_conf: "/etc/resolv.conf"
|
||||||
|
|
||||||
|
kube_proxy_mode: iptables
|
||||||
|
|
||||||
|
# If using the pure iptables proxy, SNAT everything
|
||||||
|
kube_proxy_masquerade_all: true
|
||||||
|
|
||||||
|
# kube_api_runtime_config:
|
||||||
|
# - extensions/v1beta1/daemonsets=true
|
||||||
|
# - extensions/v1beta1/deployments=true
|
||||||
|
|
||||||
|
# Logging directory (sysvinit systems)
|
||||||
|
kube_log_dir: "/var/log/kubernetes"
|
||||||
|
|
||||||
# This directory is where all the additional config stuff goes
|
# This directory is where all the additional config stuff goes
|
||||||
# the kubernetes normally puts in /srv/kubernets.
|
# the kubernetes normally puts in /srv/kubernets.
|
||||||
|
@ -11,40 +33,5 @@ kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
|
||||||
# look in here. Don't do it.
|
# look in here. Don't do it.
|
||||||
kube_config_dir: /etc/kubernetes
|
kube_config_dir: /etc/kubernetes
|
||||||
|
|
||||||
# This is where all the cert scripts and certs will be located
|
nginx_image_repo: nginx
|
||||||
kube_cert_dir: "{{ kube_config_dir }}/ssl"
|
nginx_image_tag: 1.11.4-alpine
|
||||||
|
|
||||||
# This is where all of the bearer tokens will be stored
|
|
||||||
kube_token_dir: "{{ kube_config_dir }}/tokens"
|
|
||||||
|
|
||||||
# This is where to save basic auth file
|
|
||||||
kube_users_dir: "{{ kube_config_dir }}/users"
|
|
||||||
|
|
||||||
# This is where you can drop yaml/json files and the kubelet will run those
|
|
||||||
# pods on startup
|
|
||||||
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
|
|
||||||
|
|
||||||
# Logging directory (sysvinit systems)
|
|
||||||
kube_log_dir: "/var/log/kubernetes"
|
|
||||||
|
|
||||||
dns_domain: "{{ cluster_name }}"
|
|
||||||
|
|
||||||
kube_proxy_mode: iptables
|
|
||||||
|
|
||||||
# An experimental dev/test only dynamic volumes provisioner,
|
|
||||||
# for PetSets. Works for kube>=v1.3 only.
|
|
||||||
kube_hostpath_dynamic_provisioner: "false"
|
|
||||||
|
|
||||||
hyperkube_image_repo: "quay.io/coreos/hyperkube"
|
|
||||||
hyperkube_image_tag: "{{ kube_version }}_coreos.0"
|
|
||||||
|
|
||||||
# IP address of the DNS server.
|
|
||||||
# Kubernetes will create a pod with several containers, serving as the DNS
|
|
||||||
# server and expose it under this IP address. The IP address must be from
|
|
||||||
# the range specified as kube_service_addresses. This magic will actually
|
|
||||||
# pick the 10th ip address in the kube_service_addresses range and use that.
|
|
||||||
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address') }}"
|
|
||||||
|
|
||||||
# kube_api_runtime_config:
|
|
||||||
# - extensions/v1beta1/daemonsets=true
|
|
||||||
# - extensions/v1beta1/deployments=true
|
|
||||||
|
|
|
@ -2,14 +2,14 @@
|
||||||
- name: restart kubelet
|
- name: restart kubelet
|
||||||
command: /bin/true
|
command: /bin/true
|
||||||
notify:
|
notify:
|
||||||
- reload systemd
|
- Kubelet | reload systemd
|
||||||
- reload kubelet
|
- Kubelet | reload kubelet
|
||||||
|
|
||||||
- name: reload systemd
|
- name: Kubelet | reload systemd
|
||||||
command: systemctl daemon-reload
|
command: systemctl daemon-reload
|
||||||
when: ansible_service_mgr == "systemd"
|
when: ansible_service_mgr == "systemd"
|
||||||
|
|
||||||
- name: reload kubelet
|
- name: Kubelet | reload kubelet
|
||||||
service:
|
service:
|
||||||
name: kubelet
|
name: kubelet
|
||||||
state: restarted
|
state: restarted
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
---
|
---
|
||||||
dependencies:
|
dependencies:
|
||||||
- role: download
|
- role: download
|
||||||
file: "{{ downloads.kubernetes_kubelet }}"
|
file: "{{ downloads.hyperkube }}"
|
||||||
|
- role: download
|
||||||
|
file: "{{ downloads.pod_infra }}"
|
||||||
- role: kubernetes/secrets
|
- role: kubernetes/secrets
|
||||||
|
|
|
@ -14,7 +14,6 @@
|
||||||
when: ansible_service_mgr in ["sysvinit","upstart"] and ansible_os_family == "RedHat"
|
when: ansible_service_mgr in ["sysvinit","upstart"] and ansible_os_family == "RedHat"
|
||||||
notify: restart kubelet
|
notify: restart kubelet
|
||||||
|
|
||||||
- name: install | Install kubelet binary
|
- name: install | Install kubelet launch script
|
||||||
command: rsync -piu "{{ local_release_dir }}/kubernetes/bin/kubelet" "{{ bin_dir }}/kubelet"
|
template: src=kubelet-container.j2 dest="{{ bin_dir }}/kubelet" owner=kube mode=0755 backup=yes
|
||||||
register: kubelet_copy
|
notify: restart kubelet
|
||||||
changed_when: false
|
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
---
|
---
|
||||||
- include: install.yml
|
- include: install.yml
|
||||||
|
|
||||||
|
- include: nginx-proxy.yml
|
||||||
|
when: is_kube_master == false and loadbalancer_apiserver_localhost|default(false)
|
||||||
|
|
||||||
- name: Write Calico cni config
|
- name: Write Calico cni config
|
||||||
template:
|
template:
|
||||||
src: "cni-calico.conf.j2"
|
src: "cni-calico.conf.j2"
|
||||||
|
@ -23,11 +26,6 @@
|
||||||
src: manifests/kube-proxy.manifest.j2
|
src: manifests/kube-proxy.manifest.j2
|
||||||
dest: "{{ kube_manifest_dir }}/kube-proxy.manifest"
|
dest: "{{ kube_manifest_dir }}/kube-proxy.manifest"
|
||||||
|
|
||||||
- name: Restart kubelet if binary changed
|
|
||||||
command: /bin/true
|
|
||||||
notify: restart kubelet
|
|
||||||
when: kubelet_copy.stdout_lines
|
|
||||||
|
|
||||||
# reload-systemd
|
# reload-systemd
|
||||||
- meta: flush_handlers
|
- meta: flush_handlers
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
---
|
||||||
|
- name: nginx-proxy | Write static pod
|
||||||
|
template: src=manifests/nginx-proxy.manifest.j2 dest=/etc/kubernetes/manifests/nginx-proxy.yml
|
||||||
|
|
||||||
|
- name: nginx-proxy | Make nginx directory
|
||||||
|
file: path=/etc/nginx state=directory mode=0700 owner=root
|
||||||
|
|
||||||
|
- name: nginx-proxy | Write nginx-proxy configuration
|
||||||
|
template: src=nginx.conf.j2 dest="/etc/nginx/nginx.conf" owner=root mode=0755 backup=yes
|
|
@ -1,9 +1,16 @@
|
||||||
{
|
{
|
||||||
"name": "calico-k8s-network",
|
"name": "calico-k8s-network",
|
||||||
"type": "calico",
|
"type": "calico",
|
||||||
"etcd_authority": "127.0.0.1:2379",
|
|
||||||
"log_level": "info",
|
"log_level": "info",
|
||||||
"ipam": {
|
"ipam": {
|
||||||
"type": "calico-ipam"
|
"type": "calico-ipam"
|
||||||
|
},
|
||||||
|
{% if enable_network_policy is defined and enable_network_policy == True %}
|
||||||
|
"policy": {
|
||||||
|
"type": "k8s"
|
||||||
|
},
|
||||||
|
{% endif %}
|
||||||
|
"kubernetes": {
|
||||||
|
"kubeconfig": "{{ kube_config_dir }}/node-kubeconfig.yaml"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,6 +39,8 @@ DAEMON_USER=root
|
||||||
#
|
#
|
||||||
do_start()
|
do_start()
|
||||||
{
|
{
|
||||||
|
/usr/bin/docker rm -f kubelet &>/dev/null || true
|
||||||
|
sleep 1
|
||||||
# Return
|
# Return
|
||||||
# 0 if daemon has been started
|
# 0 if daemon has been started
|
||||||
# 1 if daemon was already running
|
# 1 if daemon was already running
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
#!/bin/bash
|
||||||
|
/usr/bin/docker run --privileged --rm \
|
||||||
|
--net=host --pid=host --name=kubelet \
|
||||||
|
-v /etc/cni:/etc/cni:ro \
|
||||||
|
-v /opt/cni:/opt/cni:ro \
|
||||||
|
-v /etc/kubernetes:/etc/kubernetes \
|
||||||
|
-v /sys:/sys \
|
||||||
|
-v /dev:/dev \
|
||||||
|
-v /var/lib/docker:/var/lib/docker \
|
||||||
|
-v /var/run:/var/run \
|
||||||
|
-v /var/lib/kubelet:/var/lib/kubelet \
|
||||||
|
{{ hyperkube_image_repo }}:{{ hyperkube_image_tag}} \
|
||||||
|
nsenter --target=1 --mount --wd=. -- \
|
||||||
|
./hyperkube kubelet \
|
||||||
|
$@
|
|
@ -6,36 +6,38 @@ KUBE_LOGGING="--log-dir={{ kube_log_dir }} --logtostderr=true"
|
||||||
KUBE_LOGGING="--logtostderr=true"
|
KUBE_LOGGING="--logtostderr=true"
|
||||||
{% endif %}
|
{% endif %}
|
||||||
KUBE_LOG_LEVEL="--v={{ kube_log_level | default('2') }}"
|
KUBE_LOG_LEVEL="--v={{ kube_log_level | default('2') }}"
|
||||||
KUBE_ALLOW_PRIV="--allow_privileged=true"
|
|
||||||
{% if inventory_hostname in groups['kube-node'] %}
|
{% if inventory_hostname in groups['kube-node'] %}
|
||||||
KUBELET_API_SERVER="--api_servers={% for host in groups['kube-master'] %}https://{{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address'])) }}:{{ kube_apiserver_port }}{% if not loop.last %},{% endif %}{% endfor %}"
|
KUBELET_API_SERVER="--api_servers={{ kube_apiserver_endpoint }}"
|
||||||
{% endif %}
|
{% endif %}
|
||||||
# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
|
# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
|
||||||
KUBELET_ADDRESS="--address={{ ip | default("0.0.0.0") }}"
|
KUBELET_ADDRESS="--address={{ ip | default("0.0.0.0") }}"
|
||||||
# The port for the info server to serve on
|
# The port for the info server to serve on
|
||||||
# KUBELET_PORT="--port=10250"
|
# KUBELET_PORT="--port=10250"
|
||||||
# You may leave this blank to use the actual hostname
|
# You may leave this blank to use the actual hostname
|
||||||
KUBELET_HOSTNAME="--hostname_override={{ inventory_hostname }}"
|
KUBELET_HOSTNAME="--hostname-override={{ inventory_hostname }}"
|
||||||
{% if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] %}
|
{% if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] %}
|
||||||
KUBELET_REGISTER_NODE="--register-node=false"
|
KUBELET_REGISTER_NODE="--register-node=false"
|
||||||
{% endif %}
|
{% endif %}
|
||||||
# location of the api-server
|
# location of the api-server
|
||||||
{% if dns_setup %}
|
{% if dns_setup|bool and skip_dnsmasq|bool %}
|
||||||
KUBELET_ARGS="--cluster_dns={{ dns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --config={{ kube_manifest_dir }}"
|
KUBELET_ARGS="--cluster_dns={{ skydns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --config={{ kube_manifest_dir }} --resolv-conf={{ kube_resolv_conf }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}"
|
||||||
|
{% elif dns_setup|bool %}
|
||||||
|
KUBELET_ARGS="--cluster_dns={{ dns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --config={{ kube_manifest_dir }} --resolv-conf={{ kube_resolv_conf }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}"
|
||||||
{% else %}
|
{% else %}
|
||||||
KUBELET_ARGS="--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }}"
|
KUBELET_ARGS="--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}"
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "weave"] %}
|
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "weave"] %}
|
||||||
KUBELET_NETWORK_PLUGIN="--network_plugin=cni --network-plugin-dir=/etc/cni/net.d"
|
KUBELET_NETWORK_PLUGIN="--network-plugin=cni --network-plugin-dir=/etc/cni/net.d"
|
||||||
{% elif kube_network_plugin is defined and kube_network_plugin == "weave" %}
|
{% elif kube_network_plugin is defined and kube_network_plugin == "weave" %}
|
||||||
DOCKER_SOCKET="--docker-endpoint=unix:/var/run/weave/weave.sock"
|
DOCKER_SOCKET="--docker-endpoint=unix:/var/run/weave/weave.sock"
|
||||||
{% endif %}
|
{% endif %}
|
||||||
# Should this cluster be allowed to run privileged docker containers
|
# Should this cluster be allowed to run privileged docker containers
|
||||||
KUBE_ALLOW_PRIV="--allow_privileged=true"
|
KUBE_ALLOW_PRIV="--allow-privileged=true"
|
||||||
{% if cloud_provider is defined and cloud_provider == "openstack" %}
|
{% if cloud_provider is defined and cloud_provider == "openstack" %}
|
||||||
KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config"
|
KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config"
|
||||||
|
{% elif cloud_provider is defined and cloud_provider == "aws" %}
|
||||||
|
KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }}"
|
||||||
{% else %}
|
{% else %}
|
||||||
{# TODO: gce and aws don't need the cloud provider to be set? #}
|
|
||||||
KUBELET_CLOUDPROVIDER=""
|
KUBELET_CLOUDPROVIDER=""
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if ansible_service_mgr in ["sysvinit","upstart"] %}
|
{% if ansible_service_mgr in ["sysvinit","upstart"] %}
|
||||||
|
|
|
@ -2,9 +2,11 @@
|
||||||
Description=Kubernetes Kubelet Server
|
Description=Kubernetes Kubelet Server
|
||||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||||
{% if kube_network_plugin is defined and kube_network_plugin == "calico" %}
|
{% if kube_network_plugin is defined and kube_network_plugin == "calico" %}
|
||||||
After=docker.service calico-node.service
|
After=docker.service docker.socket calico-node.service
|
||||||
|
Wants=docker.socket calico-node.service
|
||||||
{% else %}
|
{% else %}
|
||||||
After=docker.service
|
After=docker.service
|
||||||
|
Wants=docker.socket
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
|
@ -22,7 +24,10 @@ ExecStart={{ bin_dir }}/kubelet \
|
||||||
$KUBELET_REGISTER_NODE \
|
$KUBELET_REGISTER_NODE \
|
||||||
$KUBELET_NETWORK_PLUGIN \
|
$KUBELET_NETWORK_PLUGIN \
|
||||||
$KUBELET_CLOUDPROVIDER
|
$KUBELET_CLOUDPROVIDER
|
||||||
Restart=on-failure
|
ExecStartPre=-/usr/bin/docker rm -f kubelet
|
||||||
|
ExecReload=/usr/bin/docker restart kubelet
|
||||||
|
Restart=always
|
||||||
|
RestartSec=10s
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue