Merge branch 'master' into gpu2
commit
08179018d4
|
@ -111,10 +111,10 @@ Supported Components
|
||||||
- [cilium](https://github.com/cilium/cilium) v1.2.0
|
- [cilium](https://github.com/cilium/cilium) v1.2.0
|
||||||
- [contiv](https://github.com/contiv/install) v1.1.7
|
- [contiv](https://github.com/contiv/install) v1.1.7
|
||||||
- [flanneld](https://github.com/coreos/flannel) v0.10.0
|
- [flanneld](https://github.com/coreos/flannel) v0.10.0
|
||||||
- [weave](https://github.com/weaveworks/weave) v2.4.0
|
- [weave](https://github.com/weaveworks/weave) v2.4.1
|
||||||
- Application
|
- Application
|
||||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||||
- [cert-manager](https://github.com/jetstack/cert-manager) v0.4.1
|
- [cert-manager](https://github.com/jetstack/cert-manager) v0.5.0
|
||||||
- [coredns](https://github.com/coredns/coredns) v1.2.2
|
- [coredns](https://github.com/coredns/coredns) v1.2.2
|
||||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.19.0
|
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.19.0
|
||||||
|
|
||||||
|
|
|
@ -9,8 +9,8 @@
|
||||||
- name: "Kubernetes Apps | Install and configure MetalLB"
|
- name: "Kubernetes Apps | Install and configure MetalLB"
|
||||||
kube:
|
kube:
|
||||||
name: "MetalLB"
|
name: "MetalLB"
|
||||||
filename: "{{ kube_config_dir }}/metallb.yml"
|
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
filename: "{{ kube_config_dir }}/{{ item.item }}"
|
||||||
state: "{{ item.changed | ternary('latest','present') }}"
|
state: "{{ item.changed | ternary('latest','present') }}"
|
||||||
with_items: "{{ rendering.results }}"
|
with_items: "{{ rendering.results }}"
|
||||||
when:
|
when:
|
||||||
|
|
|
@ -22,8 +22,6 @@ export TF_VAR_AWS_SECRET_ACCESS_KEY ="xxx"
|
||||||
export TF_VAR_AWS_SSH_KEY_NAME="yyy"
|
export TF_VAR_AWS_SSH_KEY_NAME="yyy"
|
||||||
export TF_VAR_AWS_DEFAULT_REGION="zzz"
|
export TF_VAR_AWS_DEFAULT_REGION="zzz"
|
||||||
```
|
```
|
||||||
- Rename `contrib/terraform/aws/terraform.tfvars.example` to `terraform.tfvars`
|
|
||||||
|
|
||||||
- Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use CoreOS as base image. If you want to change this behaviour, see note "Using other distrib than CoreOs" below.
|
- Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use CoreOS as base image. If you want to change this behaviour, see note "Using other distrib than CoreOs" below.
|
||||||
- Create an AWS EC2 SSH Key
|
- Create an AWS EC2 SSH Key
|
||||||
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
|
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
|
||||||
|
|
|
@ -1,4 +1,8 @@
|
||||||
output "router_id" {
|
output "router_id" {
|
||||||
|
value = "${openstack_networking_router_v2.k8s.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "router_internal_port_id" {
|
||||||
value = "${openstack_networking_router_interface_v2.k8s.id}"
|
value = "${openstack_networking_router_interface_v2.k8s.id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,7 +21,7 @@ The **calicoctl** command allows to check the status of the network workloads.
|
||||||
calicoctl node status
|
calicoctl node status
|
||||||
```
|
```
|
||||||
|
|
||||||
or for versions prior *v1.0.0*:
|
or for versions prior to *v1.0.0*:
|
||||||
|
|
||||||
```
|
```
|
||||||
calicoctl status
|
calicoctl status
|
||||||
|
@ -33,7 +33,7 @@ calicoctl status
|
||||||
calicoctl get ippool -o wide
|
calicoctl get ippool -o wide
|
||||||
```
|
```
|
||||||
|
|
||||||
or for versions prior *v1.0.0*:
|
or for versions prior to *v1.0.0*:
|
||||||
|
|
||||||
```
|
```
|
||||||
calicoctl pool show
|
calicoctl pool show
|
||||||
|
@ -73,7 +73,7 @@ In some cases you may want to route the pods subnet and so NAT is not needed on
|
||||||
For instance if you have a cluster spread on different locations and you want your pods to talk each other no matter where they are located.
|
For instance if you have a cluster spread on different locations and you want your pods to talk each other no matter where they are located.
|
||||||
The following variables need to be set:
|
The following variables need to be set:
|
||||||
`peer_with_router` to enable the peering with the datacenter's border router (default value: false).
|
`peer_with_router` to enable the peering with the datacenter's border router (default value: false).
|
||||||
you'll need to edit the inventory and add a and a hostvar `local_as` by node.
|
you'll need to edit the inventory and add a hostvar `local_as` by node.
|
||||||
|
|
||||||
```
|
```
|
||||||
node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx
|
node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx
|
||||||
|
@ -156,7 +156,7 @@ The inventory above will deploy the following topology assuming that calico's
|
||||||
|
|
||||||
##### Optional : Define default endpoint to host action
|
##### Optional : Define default endpoint to host action
|
||||||
|
|
||||||
By default Calico blocks traffic from endpoints to the host itself by using an iptables DROP action. When using it in kubernetes the action has to be changed to RETURN (default in kubespray) or ACCEPT (see https://github.com/projectcalico/felix/issues/660 and https://github.com/projectcalico/calicoctl/issues/1389). Otherwise all network packets from pods (with hostNetwork=False) to services endpoints (with hostNetwork=True) withing the same node are dropped.
|
By default Calico blocks traffic from endpoints to the host itself by using an iptables DROP action. When using it in kubernetes the action has to be changed to RETURN (default in kubespray) or ACCEPT (see https://github.com/projectcalico/felix/issues/660 and https://github.com/projectcalico/calicoctl/issues/1389). Otherwise all network packets from pods (with hostNetwork=False) to services endpoints (with hostNetwork=True) within the same node are dropped.
|
||||||
|
|
||||||
|
|
||||||
To re-define default action please set the following variable in your inventory:
|
To re-define default action please set the following variable in your inventory:
|
||||||
|
|
|
@ -54,16 +54,18 @@ The default configuration uses VXLAN to create an overlay. Two networks are crea
|
||||||
|
|
||||||
You can change the default network configuration by overriding the `contiv_networks` variable.
|
You can change the default network configuration by overriding the `contiv_networks` variable.
|
||||||
|
|
||||||
The default forward mode is set to routing:
|
The default forward mode is set to routing and the default network mode is vxlan:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
contiv_fwd_mode: routing
|
contiv_fwd_mode: routing
|
||||||
|
contiv_net_mode: vxlan
|
||||||
```
|
```
|
||||||
|
|
||||||
The following is an example of how you can use VLAN instead of VXLAN:
|
The following is an example of how you can use VLAN instead of VXLAN:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
contiv_fwd_mode: bridge
|
contiv_fwd_mode: bridge
|
||||||
|
contiv_net_mode: vlan
|
||||||
contiv_vlan_interface: eth0
|
contiv_vlan_interface: eth0
|
||||||
contiv_networks:
|
contiv_networks:
|
||||||
- name: default-net
|
- name: default-net
|
||||||
|
|
|
@ -40,3 +40,14 @@ The full list of available vars may be found in the download's ansible role defa
|
||||||
Those also allow to specify custom urls and local repositories for binaries and container
|
Those also allow to specify custom urls and local repositories for binaries and container
|
||||||
images as well. See also the DNS stack docs for the related intranet configuration,
|
images as well. See also the DNS stack docs for the related intranet configuration,
|
||||||
so the hosts can resolve those urls and repos.
|
so the hosts can resolve those urls and repos.
|
||||||
|
|
||||||
|
## Offline environment
|
||||||
|
|
||||||
|
In case your servers don't have access to internet (for example when deploying on premises with security constraints), you'll have, first, to setup the appropriate proxies/caches/mirrors and/or internal repositories and registries and, then, adapt the following variables to fit your environment before deploying:
|
||||||
|
|
||||||
|
* At least `foo_image_repo` and `foo_download_url` as described before (i.e. in case of use of proxies to registries and binaries repositories, checksums and versions do not necessarily need to be changed).
|
||||||
|
NB: Regarding `foo_image_repo`, when using insecure registries/proxies, you will certainly have to append them to the `docker_insecure_registries` variable in group_vars/all/docker.yml
|
||||||
|
* Depending on the `container_manager`
|
||||||
|
* When `container_manager=docker`, `docker_foo_repo_base_url`, `docker_foo_repo_gpgkey`, `dockerproject_bar_repo_base_url` and `dockerproject_bar_repo_gpgkey` (where `foo` is the distribution and `bar` is system package manager)
|
||||||
|
* When `container_manager=crio`, `crio_rhel_repo_base_url`
|
||||||
|
* When using Helm, `helm_stable_repo_url`
|
||||||
|
|
|
@ -69,7 +69,7 @@ minute which may require large etcd containers or even dedicated nodes for etcd.
|
||||||
|
|
||||||
> If we calculate the number of tries, the division will give 5, but in reality
|
> If we calculate the number of tries, the division will give 5, but in reality
|
||||||
> it will be from 3 to 5 with `nodeStatusUpdateRetry` attempts of each try. The
|
> it will be from 3 to 5 with `nodeStatusUpdateRetry` attempts of each try. The
|
||||||
> total number of attemtps will vary from 15 to 25 due to latency of all
|
> total number of attempts will vary from 15 to 25 due to latency of all
|
||||||
> components.
|
> components.
|
||||||
|
|
||||||
## Medium Update and Average Reaction
|
## Medium Update and Average Reaction
|
||||||
|
@ -92,7 +92,7 @@ etcd updates per minute.
|
||||||
Let's set `-–node-status-update-frequency` to **1m**.
|
Let's set `-–node-status-update-frequency` to **1m**.
|
||||||
`--node-monitor-grace-period` will set to **5m** and `--pod-eviction-timeout`
|
`--node-monitor-grace-period` will set to **5m** and `--pod-eviction-timeout`
|
||||||
to **1m**. In this scenario, every kubelet will try to update the status every
|
to **1m**. In this scenario, every kubelet will try to update the status every
|
||||||
minute. There will be 5 * 5 = 25 attempts before unhealty status. After 5m,
|
minute. There will be 5 * 5 = 25 attempts before unhealthy status. After 5m,
|
||||||
Kubernetes controller manager will set unhealthy status. This means that pods
|
Kubernetes controller manager will set unhealthy status. This means that pods
|
||||||
will be evicted after 1m after being marked unhealthy. (6m in total).
|
will be evicted after 1m after being marked unhealthy. (6m in total).
|
||||||
|
|
||||||
|
|
|
@ -39,7 +39,7 @@ Then you can use the instance ids to find the connected [neutron](https://wiki.o
|
||||||
Given the port ids on the left, you can set the two `allowed_address`(es) in OpenStack. Note that you have to allow both `kube_service_addresses` (default `10.233.0.0/18`) and `kube_pods_subnet` (default `10.233.64.0/18`.)
|
Given the port ids on the left, you can set the two `allowed_address`(es) in OpenStack. Note that you have to allow both `kube_service_addresses` (default `10.233.0.0/18`) and `kube_pods_subnet` (default `10.233.64.0/18`.)
|
||||||
|
|
||||||
# allow kube_service_addresses and kube_pods_subnet network
|
# allow kube_service_addresses and kube_pods_subnet network
|
||||||
openstack port set 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address ip_address=10.233.0.0/18,ip_address=10.233.64.0/18
|
openstack port set 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
|
||||||
openstack port set e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address ip_address=10.233.0.0/18,ip_address=10.233.64.0/18
|
openstack port set e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
|
||||||
|
|
||||||
Now you can finally run the playbook.
|
Now you can finally run the playbook.
|
||||||
|
|
|
@ -35,7 +35,7 @@ ansible-playbook cluster.yml -i inventory/sample/hosts.ini -e kube_version=v1.4.
|
||||||
|
|
||||||
#### Graceful upgrade
|
#### Graceful upgrade
|
||||||
|
|
||||||
Kubespray also supports cordon, drain and uncordoning of nodes when performing
|
Kubespray also supports cordon, drain and uncordoning of nodes when performing
|
||||||
a cluster upgrade. There is a separate playbook used for this purpose. It is
|
a cluster upgrade. There is a separate playbook used for this purpose. It is
|
||||||
important to note that upgrade-cluster.yml can only be used for upgrading an
|
important to note that upgrade-cluster.yml can only be used for upgrading an
|
||||||
existing cluster. That means there must be at least 1 kube-master already
|
existing cluster. That means there must be at least 1 kube-master already
|
||||||
|
@ -86,7 +86,7 @@ for impact to user deployed pods.
|
||||||
|
|
||||||
A deployer may want to upgrade specific components in order to minimize risk
|
A deployer may want to upgrade specific components in order to minimize risk
|
||||||
or save time. This strategy is not covered by CI as of this writing, so it is
|
or save time. This strategy is not covered by CI as of this writing, so it is
|
||||||
not guaranteed to work.
|
not guaranteed to work.
|
||||||
|
|
||||||
These commands are useful only for upgrading fully-deployed, healthy, existing
|
These commands are useful only for upgrading fully-deployed, healthy, existing
|
||||||
hosts. This will definitely not work for undeployed or partially deployed
|
hosts. This will definitely not work for undeployed or partially deployed
|
||||||
|
|
14
docs/vars.md
14
docs/vars.md
|
@ -126,9 +126,20 @@ node_labels:
|
||||||
label1_name: label1_value
|
label1_name: label1_value
|
||||||
label2_name: label2_value
|
label2_name: label2_value
|
||||||
```
|
```
|
||||||
|
* *podsecuritypolicy_enabled* - When set to `true`, enables the PodSecurityPolicy admission controller and defines two policies `privileged` (applying to all resources in `kube-system` namespace and kubelet) and `restricted` (applying all other namespaces).
|
||||||
|
Addons deployed in kube-system namespaces are handled.
|
||||||
|
* *kubernetes_audit* - When set to `true`, enables Auditing.
|
||||||
|
The auditing parameters can be tuned via the following variables (which default values are shown below):
|
||||||
|
* `audit_log_path`: /var/log/audit/kube-apiserver-audit.log
|
||||||
|
* `audit_log_maxage`: 30
|
||||||
|
* `audit_log_maxbackups`: 1
|
||||||
|
* `audit_log_maxsize`: 100
|
||||||
|
* `audit_policy_file`: "{{ kube_config_dir }}/audit-policy/apiserver-audit-policy.yaml"
|
||||||
|
|
||||||
|
By default, the `audit_policy_file` contains [default rules](https://github.com/kubernetes-incubator/kubespray/blob/master/roles/kubernetes/master/templates/apiserver-audit-policy.yaml.j2) that can be overriden with the `audit_policy_custom_rules` variable.
|
||||||
|
|
||||||
##### Custom flags for Kube Components
|
##### Custom flags for Kube Components
|
||||||
For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments. This can be done by providing a list of flags. Example:
|
For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments. This can be done by providing a list of flags. The `kubelet_node_custom_flags` apply kubelet settings only to nodes and not masters. Example:
|
||||||
```
|
```
|
||||||
kubelet_custom_flags:
|
kubelet_custom_flags:
|
||||||
- "--eviction-hard=memory.available<100Mi"
|
- "--eviction-hard=memory.available<100Mi"
|
||||||
|
@ -140,6 +151,7 @@ The possible vars are:
|
||||||
* *controller_mgr_custom_flags*
|
* *controller_mgr_custom_flags*
|
||||||
* *scheduler_custom_flags*
|
* *scheduler_custom_flags*
|
||||||
* *kubelet_custom_flags*
|
* *kubelet_custom_flags*
|
||||||
|
* *kubelet_node_custom_flags*
|
||||||
|
|
||||||
#### User accounts
|
#### User accounts
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,7 @@ Weave uses [**consensus**](https://www.weave.works/docs/net/latest/ipam/##consen
|
||||||
|
|
||||||
Weave encryption is supported for all communication
|
Weave encryption is supported for all communication
|
||||||
|
|
||||||
* To use Weave encryption, specify a strong password (if no password, no encrytion)
|
* To use Weave encryption, specify a strong password (if no password, no encryption)
|
||||||
|
|
||||||
```
|
```
|
||||||
# In file ./inventory/sample/group_vars/k8s-cluster.yml
|
# In file ./inventory/sample/group_vars/k8s-cluster.yml
|
||||||
|
|
|
@ -38,11 +38,6 @@ bin_dir: /usr/local/bin
|
||||||
## modules.
|
## modules.
|
||||||
#kubelet_load_modules: false
|
#kubelet_load_modules: false
|
||||||
|
|
||||||
## With calico it is possible to distributed routes with border routers of the datacenter.
|
|
||||||
## Warning : enabling router peering will disable calico's default behavior ('node mesh').
|
|
||||||
## The subnets of each nodes will be distributed by the datacenter router
|
|
||||||
#peer_with_router: false
|
|
||||||
|
|
||||||
## Upstream dns servers used by dnsmasq
|
## Upstream dns servers used by dnsmasq
|
||||||
#upstream_dns_servers:
|
#upstream_dns_servers:
|
||||||
# - 8.8.8.8
|
# - 8.8.8.8
|
||||||
|
|
|
@ -70,22 +70,6 @@ kube_users:
|
||||||
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
||||||
kube_network_plugin: calico
|
kube_network_plugin: calico
|
||||||
|
|
||||||
# Weave deployment
|
|
||||||
# weave_password: ~
|
|
||||||
# weave_checkpoint_disable: false
|
|
||||||
# weave_conn_limit: 100
|
|
||||||
# weave_hairpin_mode: true
|
|
||||||
# weave_ipalloc_range: {{ kube_pods_subnet }}
|
|
||||||
# weave_expect_npc: {{ enable_network_policy }}
|
|
||||||
# weave_kube_peers: ~
|
|
||||||
# weave_ipalloc_init: ~
|
|
||||||
# weave_expose_ip: ~
|
|
||||||
# weave_metrics_addr: ~
|
|
||||||
# weave_status_addr: ~
|
|
||||||
# weave_mtu: 1376
|
|
||||||
# weave_no_masq_local: true
|
|
||||||
# weave_extra_args: ~
|
|
||||||
|
|
||||||
# Kubernetes internal network for services, unused block of space.
|
# Kubernetes internal network for services, unused block of space.
|
||||||
kube_service_addresses: 10.233.0.0/18
|
kube_service_addresses: 10.233.0.0/18
|
||||||
|
|
||||||
|
@ -154,6 +138,14 @@ k8s_image_pull_policy: IfNotPresent
|
||||||
# audit log for kubernetes
|
# audit log for kubernetes
|
||||||
kubernetes_audit: false
|
kubernetes_audit: false
|
||||||
|
|
||||||
|
# dynamic kubelet configuration
|
||||||
|
dynamic_kubelet_configuration: false
|
||||||
|
|
||||||
|
# define kubelet config dir for dynamic kubelet
|
||||||
|
#kubelet_config_dir:
|
||||||
|
default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
|
||||||
|
dynamic_kubelet_configuration_dir: "{{ kubelet_config_dir | default(default_kubelet_config_dir) }}"
|
||||||
|
|
||||||
# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled)
|
# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled)
|
||||||
podsecuritypolicy_enabled: false
|
podsecuritypolicy_enabled: false
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,20 @@
|
||||||
|
# see roles/network_plugin/calico/defaults/main.yml
|
||||||
|
|
||||||
|
## With calico it is possible to distributed routes with border routers of the datacenter.
|
||||||
|
## Warning : enabling router peering will disable calico's default behavior ('node mesh').
|
||||||
|
## The subnets of each nodes will be distributed by the datacenter router
|
||||||
|
#peer_with_router: false
|
||||||
|
|
||||||
|
# Enables Internet connectivity from containers
|
||||||
|
# nat_outgoing: true
|
||||||
|
|
||||||
|
# add default ippool name
|
||||||
|
# calico_pool_name: "default-pool"
|
||||||
|
|
||||||
|
# Global as_num (/calico/bgp/v1/global/as_num)
|
||||||
|
# global_as_num: "64512"
|
||||||
|
|
||||||
|
# You can set MTU value here. If left undefined or empty, it will
|
||||||
|
# not be specified in calico CNI config, so Calico will use built-in
|
||||||
|
# defaults. The value should be a number, not a string.
|
||||||
|
# calico_mtu: 1500
|
|
@ -0,0 +1,11 @@
|
||||||
|
# see roles/network_plugin/canal/defaults/main.yml
|
||||||
|
|
||||||
|
# The interface used by canal for host <-> host communication.
|
||||||
|
# If left blank, then the interface is chosing using the node's
|
||||||
|
# default route.
|
||||||
|
# canal_iface: ""
|
||||||
|
|
||||||
|
# Whether or not to masquerade traffic to destinations not within
|
||||||
|
# the pod network.
|
||||||
|
# canal_masquerade: "true"
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
# see roles/network_plugin/cilium/defaults/main.yml
|
|
@ -0,0 +1,20 @@
|
||||||
|
# see roles/network_plugin/contiv/defaults/main.yml
|
||||||
|
|
||||||
|
# Forwarding mode: bridge or routing
|
||||||
|
# contiv_fwd_mode: routing
|
||||||
|
|
||||||
|
## With contiv, L3 BGP mode is possible by setting contiv_fwd_mode to "routing".
|
||||||
|
## In this case, you may need to peer with an uplink
|
||||||
|
## NB: The hostvars must contain a key "contiv" of which value is a dict containing "router_ip", "as"(defaults to contiv_global_as), "neighbor_as" (defaults to contiv_global_neighbor_as), "neighbor"
|
||||||
|
#contiv_peer_with_uplink_leaf: false
|
||||||
|
#contiv_global_as: "65002"
|
||||||
|
#contiv_global_neighbor_as: "500"
|
||||||
|
|
||||||
|
# Fabric mode: aci, aci-opflex or default
|
||||||
|
# contiv_fabric_mode: default
|
||||||
|
|
||||||
|
# Defaut netmode: vxlan or vlan
|
||||||
|
# contiv_net_mode: vxlan
|
||||||
|
|
||||||
|
# Dataplane interface
|
||||||
|
# contiv_vlan_interface: ""
|
|
@ -0,0 +1,16 @@
|
||||||
|
# see roles/network_plugin/flannel/defaults/main.yml
|
||||||
|
|
||||||
|
## interface that should be used for flannel operations
|
||||||
|
## This is actually an inventory cluster-level item
|
||||||
|
# flannel_interface:
|
||||||
|
|
||||||
|
## Select interface that should be used for flannel operations by regexp on Name or IP
|
||||||
|
## This is actually an inventory cluster-level item
|
||||||
|
## example: select interface with ip from net 10.0.0.0/23
|
||||||
|
## single quote and escape backslashes
|
||||||
|
# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}'
|
||||||
|
|
||||||
|
# You can choose what type of flannel backend to use: 'vxlan' or 'host-gw'
|
||||||
|
# for experimental backend
|
||||||
|
# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md
|
||||||
|
# flannel_backend_type: "vxlan"
|
|
@ -0,0 +1,58 @@
|
||||||
|
# see roles/network_plugin/weave/defaults/main.yml
|
||||||
|
|
||||||
|
# Weave's network password for encryption, if null then no network encryption.
|
||||||
|
# weave_password: ~
|
||||||
|
|
||||||
|
# If set to 1, disable checking for new Weave Net versions (default is blank,
|
||||||
|
# i.e. check is enabled)
|
||||||
|
# weave_checkpoint_disable: false
|
||||||
|
|
||||||
|
# Soft limit on the number of connections between peers. Defaults to 100.
|
||||||
|
# weave_conn_limit: 100
|
||||||
|
|
||||||
|
# Weave Net defaults to enabling hairpin on the bridge side of the veth pair
|
||||||
|
# for containers attached. If you need to disable hairpin, e.g. your kernel is
|
||||||
|
# one of those that can panic if hairpin is enabled, then you can disable it by
|
||||||
|
# setting `HAIRPIN_MODE=false`.
|
||||||
|
# weave_hairpin_mode: true
|
||||||
|
|
||||||
|
# The range of IP addresses used by Weave Net and the subnet they are placed in
|
||||||
|
# (CIDR format; default 10.32.0.0/12)
|
||||||
|
# weave_ipalloc_range: "{{ kube_pods_subnet }}"
|
||||||
|
|
||||||
|
# Set to 0 to disable Network Policy Controller (default is on)
|
||||||
|
# weave_expect_npc: "{{ enable_network_policy }}"
|
||||||
|
|
||||||
|
# List of addresses of peers in the Kubernetes cluster (default is to fetch the
|
||||||
|
# list from the api-server)
|
||||||
|
# weave_kube_peers: ~
|
||||||
|
|
||||||
|
# Set the initialization mode of the IP Address Manager (defaults to consensus
|
||||||
|
# amongst the KUBE_PEERS)
|
||||||
|
# weave_ipalloc_init: ~
|
||||||
|
|
||||||
|
# Set the IP address used as a gateway from the Weave network to the host
|
||||||
|
# network - this is useful if you are configuring the addon as a static pod.
|
||||||
|
# weave_expose_ip: ~
|
||||||
|
|
||||||
|
# Address and port that the Weave Net daemon will serve Prometheus-style
|
||||||
|
# metrics on (defaults to 0.0.0.0:6782)
|
||||||
|
# weave_metrics_addr: ~
|
||||||
|
|
||||||
|
# Address and port that the Weave Net daemon will serve status requests on
|
||||||
|
# (defaults to disabled)
|
||||||
|
# weave_status_addr: ~
|
||||||
|
|
||||||
|
# Weave Net defaults to 1376 bytes, but you can set a smaller size if your
|
||||||
|
# underlying network has a tighter limit, or set a larger size for better
|
||||||
|
# performance if your network supports jumbo frames (e.g. 8916)
|
||||||
|
# weave_mtu: 1376
|
||||||
|
|
||||||
|
# Set to 1 to preserve the client source IP address when accessing Service
|
||||||
|
# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works
|
||||||
|
# only with Weave IPAM (default).
|
||||||
|
# weave_no_masq_local: true
|
||||||
|
|
||||||
|
# Extra variables that passing to launch.sh, useful for enabling seed mode, see
|
||||||
|
# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/
|
||||||
|
# weave_extra_args: ~
|
|
@ -33,6 +33,7 @@ image_arch: "{{host_architecture | default('amd64')}}"
|
||||||
kube_version: v1.11.3
|
kube_version: v1.11.3
|
||||||
kubeadm_version: "{{ kube_version }}"
|
kubeadm_version: "{{ kube_version }}"
|
||||||
etcd_version: v3.2.18
|
etcd_version: v3.2.18
|
||||||
|
|
||||||
# TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
|
# TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
|
||||||
# after migration to container download
|
# after migration to container download
|
||||||
calico_version: "v3.1.3"
|
calico_version: "v3.1.3"
|
||||||
|
@ -40,12 +41,14 @@ calico_ctl_version: "v3.1.3"
|
||||||
calico_cni_version: "v3.1.3"
|
calico_cni_version: "v3.1.3"
|
||||||
calico_policy_version: "v3.1.3"
|
calico_policy_version: "v3.1.3"
|
||||||
calico_rr_version: "v0.6.1"
|
calico_rr_version: "v0.6.1"
|
||||||
|
|
||||||
flannel_version: "v0.10.0"
|
flannel_version: "v0.10.0"
|
||||||
flannel_cni_version: "v0.3.0"
|
flannel_cni_version: "v0.3.0"
|
||||||
|
|
||||||
vault_version: 0.10.1
|
vault_version: 0.10.1
|
||||||
weave_version: "2.4.0"
|
weave_version: "2.4.1"
|
||||||
pod_infra_version: 3.1
|
pod_infra_version: 3.1
|
||||||
contiv_version: 1.1.7
|
contiv_version: 1.2.1
|
||||||
cilium_version: "v1.2.0"
|
cilium_version: "v1.2.0"
|
||||||
|
|
||||||
# Download URLs
|
# Download URLs
|
||||||
|
@ -55,10 +58,39 @@ etcd_download_url: "https://github.com/coreos/etcd/releases/download/{{ etcd_ver
|
||||||
hyperkube_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64/hyperkube"
|
hyperkube_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64/hyperkube"
|
||||||
|
|
||||||
# Checksums
|
# Checksums
|
||||||
etcd_checksum: b729db0732448064271ea6fdcb901773c4fe917763ca07776f22d0e5e0bd4097
|
hyperkube_checksums:
|
||||||
hyperkube_checksum: dac8da16dd6688e52b5dc510f5dd0a20b54350d52fb27ceba2f018ba2c8be692
|
v1.11.3: dac8da16dd6688e52b5dc510f5dd0a20b54350d52fb27ceba2f018ba2c8be692
|
||||||
kubeadm_checksum: 422a7a32ed9a7b1eaa2a4f9d121674dfbe80eb41e206092c13017d097f75aaec
|
v1.11.2: d727f8cae3fc26b1add9b4ff0d4d9b99605544ff7fb3baeecdca394362adbfb8
|
||||||
|
v1.11.1: 019ce1ecf4c6a70c06a7f4ef107443351458b4d9e6b9ce4a436bfbfbef93feea
|
||||||
|
v1.11.0: 7e191c164dc2c942abd37e4b50846e0be31ca959afffeff6b034beacbc2a106a
|
||||||
|
v1.10.8: f8a68514a6c858089f44ec93b2ffb2d764ea67d3b02b19112348f73ffcfe4386
|
||||||
|
v1.10.7: 13e25eb39467014fd169f38b7cd6bec8ff55525b8001c7abba85957e6470b6cc
|
||||||
|
v1.10.6: 0daa34fa58470e5f20def10d3dd544922c28c558719d3338ad8c524154c91257
|
||||||
|
v1.10.5: 1a53456f9d33a7c07adb1636f20f1d0b92b8e7647063a70d0ce134a238e680fe
|
||||||
|
v1.10.4: 16e36693c15494036d930139a749ec1bc492b7fefa2c3adc1abbe8f38178ae7c
|
||||||
|
v1.10.3: e807753dc309635902a56069ee06fc390944ef034b72c53b2e1e51d0c9ead8a3
|
||||||
|
v1.10.2: 3843fb594a18c4a64d77736bab72000ec4b8c4ddf178e20ec3249f709e9ed9c1
|
||||||
|
v1.10.1: 6e0642ad6bae68dc81b8d1c9efa18e265e17e23da1895862823cafac08c0344c
|
||||||
|
v1.10.0: b5575b2fb4266754c1675b8cd5d9b6cac70f3fee7a05c4e80da3a9e83e58c57e
|
||||||
|
kubeadm_checksums:
|
||||||
|
v1.11.3: 422a7a32ed9a7b1eaa2a4f9d121674dfbe80eb41e206092c13017d097f75aaec
|
||||||
|
v1.11.2: 6b17720a65b8ff46efe92a5544f149c39a221910d89939838d75581d4e6924c0
|
||||||
|
v1.11.1: 425ec24b95f7217ee06d1588aba22f206a5829f8c6a5352c2862368552361fe6
|
||||||
|
v1.11.0: 0000478fc59a24ec1727de744188d13c4d702a644954132efa9d9954371b3553
|
||||||
|
v1.10.8: 42660875dd94c93267bd2f567c67d692b362bd143d7502967a62c5474b2b25b8
|
||||||
|
v1.10.7: cdeb07fd3705e973800c4aa0b8a510d5dba1de8e1039428cfebdaf3d93e332b6
|
||||||
|
v1.10.6: e1d49a6b33b384f681468add2e9ee08552069ae0d6b0ad59e1c943ddbaeac3fa
|
||||||
|
v1.10.5: f231d4bcc9f2ed15597272e5359e380cc760c0b57a1f7cb97ce2bbab5df774e0
|
||||||
|
v1.10.4: 7e1169bbbeed973ab402941672dec957638dea5952a1e8bc89a37d5e709cc4b4
|
||||||
|
v1.10.3: b2a6f0764b89a4a13a3da4471af943ce98efeb29e2913c9e7880fe27f4f43a5f
|
||||||
|
v1.10.2: 394d7d340214c91d669186cf4f2110d8eb840ca965399b4d8b22d0545a60e377
|
||||||
|
v1.10.1: 012e48fb92b1c22543b12ab2db7d780777972043287404c98cca4d2c6ec964ec
|
||||||
|
v1.10.0: ebbac985834289037b544523c3e2f39bb44bea938aca9d9e88ef7e880fb8472f
|
||||||
|
|
||||||
|
etcd_binary_checksum: b729db0732448064271ea6fdcb901773c4fe917763ca07776f22d0e5e0bd4097
|
||||||
vault_binary_checksum: 3c4d70ba71619a43229e65c67830e30e050eab7a81ac6b28325ff707e5914188
|
vault_binary_checksum: 3c4d70ba71619a43229e65c67830e30e050eab7a81ac6b28325ff707e5914188
|
||||||
|
hyperkube_binary_checksum: "{{ hyperkube_checksums[kube_version] }}"
|
||||||
|
kubeadm_binary_checksum: "{{ kubeadm_checksums[kubeadm_version] }}"
|
||||||
|
|
||||||
# Containers
|
# Containers
|
||||||
# In some cases, we need a way to set --registry-mirror or --insecure-registry for docker,
|
# In some cases, we need a way to set --registry-mirror or --insecure-registry for docker,
|
||||||
|
@ -95,16 +127,20 @@ netcheck_agent_img_repo: "mirantis/k8s-netchecker-agent"
|
||||||
netcheck_agent_tag: "{{ netcheck_version }}"
|
netcheck_agent_tag: "{{ netcheck_version }}"
|
||||||
netcheck_server_img_repo: "mirantis/k8s-netchecker-server"
|
netcheck_server_img_repo: "mirantis/k8s-netchecker-server"
|
||||||
netcheck_server_tag: "{{ netcheck_version }}"
|
netcheck_server_tag: "{{ netcheck_version }}"
|
||||||
weave_kube_image_repo: "weaveworks/weave-kube"
|
weave_kube_image_repo: "docker.io/weaveworks/weave-kube"
|
||||||
weave_kube_image_tag: "{{ weave_version }}"
|
weave_kube_image_tag: "{{ weave_version }}"
|
||||||
weave_npc_image_repo: "weaveworks/weave-npc"
|
weave_npc_image_repo: "docker.io/weaveworks/weave-npc"
|
||||||
weave_npc_image_tag: "{{ weave_version }}"
|
weave_npc_image_tag: "{{ weave_version }}"
|
||||||
contiv_image_repo: "contiv/netplugin"
|
contiv_image_repo: "contiv/netplugin"
|
||||||
contiv_image_tag: "{{ contiv_version }}"
|
contiv_image_tag: "{{ contiv_version }}"
|
||||||
|
contiv_init_image_repo: "contiv/netplugin-init"
|
||||||
|
contiv_init_image_tag: "latest"
|
||||||
contiv_auth_proxy_image_repo: "contiv/auth_proxy"
|
contiv_auth_proxy_image_repo: "contiv/auth_proxy"
|
||||||
contiv_auth_proxy_image_tag: "{{ contiv_version }}"
|
contiv_auth_proxy_image_tag: "{{ contiv_version }}"
|
||||||
contiv_etcd_init_image_repo: "ferest/etcd-initer"
|
contiv_etcd_init_image_repo: "ferest/etcd-initer"
|
||||||
contiv_etcd_init_image_tag: latest
|
contiv_etcd_init_image_tag: latest
|
||||||
|
contiv_ovs_image_repo: "contiv/ovs"
|
||||||
|
contiv_ovs_image_tag: "latest"
|
||||||
cilium_image_repo: "docker.io/cilium/cilium"
|
cilium_image_repo: "docker.io/cilium/cilium"
|
||||||
cilium_image_tag: "{{ cilium_version }}"
|
cilium_image_tag: "{{ cilium_version }}"
|
||||||
nginx_image_repo: nginx
|
nginx_image_repo: nginx
|
||||||
|
@ -112,7 +148,7 @@ nginx_image_tag: 1.13
|
||||||
dnsmasq_version: 2.78
|
dnsmasq_version: 2.78
|
||||||
dnsmasq_image_repo: "andyshinn/dnsmasq"
|
dnsmasq_image_repo: "andyshinn/dnsmasq"
|
||||||
dnsmasq_image_tag: "{{ dnsmasq_version }}"
|
dnsmasq_image_tag: "{{ dnsmasq_version }}"
|
||||||
kubedns_version: 1.14.10
|
kubedns_version: 1.14.11
|
||||||
kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-{{ image_arch }}"
|
kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-{{ image_arch }}"
|
||||||
kubedns_image_tag: "{{ kubedns_version }}"
|
kubedns_image_tag: "{{ kubedns_version }}"
|
||||||
|
|
||||||
|
@ -161,7 +197,7 @@ ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/ngin
|
||||||
ingress_nginx_controller_image_tag: "0.19.0"
|
ingress_nginx_controller_image_tag: "0.19.0"
|
||||||
ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend"
|
ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend"
|
||||||
ingress_nginx_default_backend_image_tag: "1.4"
|
ingress_nginx_default_backend_image_tag: "1.4"
|
||||||
cert_manager_version: "v0.4.1"
|
cert_manager_version: "v0.5.0"
|
||||||
cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller"
|
cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller"
|
||||||
cert_manager_controller_image_tag: "{{ cert_manager_version }}"
|
cert_manager_controller_image_tag: "{{ cert_manager_version }}"
|
||||||
|
|
||||||
|
@ -174,6 +210,7 @@ downloads:
|
||||||
sha256: "{{ netcheck_server_digest_checksum|default(None) }}"
|
sha256: "{{ netcheck_server_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s-cluster
|
||||||
|
|
||||||
netcheck_agent:
|
netcheck_agent:
|
||||||
enabled: "{{ deploy_netchecker }}"
|
enabled: "{{ deploy_netchecker }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -182,20 +219,16 @@ downloads:
|
||||||
sha256: "{{ netcheck_agent_digest_checksum|default(None) }}"
|
sha256: "{{ netcheck_agent_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s-cluster
|
||||||
|
|
||||||
etcd:
|
etcd:
|
||||||
|
container: "{{ etcd_deployment_type != 'host' }}"
|
||||||
|
file: "{{ etcd_deployment_type == 'host' }}"
|
||||||
enabled: true
|
enabled: true
|
||||||
container: true
|
|
||||||
repo: "{{ etcd_image_repo }}"
|
|
||||||
tag: "{{ etcd_image_tag }}"
|
|
||||||
sha256: "{{ etcd_digest_checksum|default(None) }}"
|
|
||||||
groups:
|
|
||||||
- etcd
|
|
||||||
etcd_file:
|
|
||||||
enabled: true
|
|
||||||
file: true
|
|
||||||
version: "{{ etcd_version }}"
|
version: "{{ etcd_version }}"
|
||||||
dest: "etcd-{{ etcd_version }}-linux-amd64.tar.gz"
|
dest: "etcd-{{ etcd_version }}-linux-amd64.tar.gz"
|
||||||
sha256: "{{ etcd_checksum }}"
|
repo: "{{ etcd_image_repo }}"
|
||||||
|
tag: "{{ etcd_image_tag }}"
|
||||||
|
sha256: "{{ etcd_binary_checksum if etcd_deployment_type == 'host' else etcd_digest_checksum|d(None) }}"
|
||||||
source_url: "{{ etcd_download_url }}"
|
source_url: "{{ etcd_download_url }}"
|
||||||
url: "{{ etcd_download_url }}"
|
url: "{{ etcd_download_url }}"
|
||||||
unarchive: true
|
unarchive: true
|
||||||
|
@ -203,12 +236,13 @@ downloads:
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
groups:
|
groups:
|
||||||
- etcd
|
- etcd
|
||||||
|
|
||||||
kubeadm:
|
kubeadm:
|
||||||
enabled: "{{ kubeadm_enabled }}"
|
enabled: "{{ kubeadm_enabled }}"
|
||||||
file: true
|
file: true
|
||||||
version: "{{ kubeadm_version }}"
|
version: "{{ kubeadm_version }}"
|
||||||
dest: "kubeadm"
|
dest: "kubeadm"
|
||||||
sha256: "{{ kubeadm_checksum }}"
|
sha256: "{{ kubeadm_binary_checksum }}"
|
||||||
source_url: "{{ kubeadm_download_url }}"
|
source_url: "{{ kubeadm_download_url }}"
|
||||||
url: "{{ kubeadm_download_url }}"
|
url: "{{ kubeadm_download_url }}"
|
||||||
unarchive: false
|
unarchive: false
|
||||||
|
@ -216,6 +250,7 @@ downloads:
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s-cluster
|
||||||
|
|
||||||
hyperkube:
|
hyperkube:
|
||||||
enabled: true
|
enabled: true
|
||||||
container: true
|
container: true
|
||||||
|
@ -224,12 +259,13 @@ downloads:
|
||||||
sha256: "{{ hyperkube_digest_checksum|default(None) }}"
|
sha256: "{{ hyperkube_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s-cluster
|
||||||
|
|
||||||
hyperkube_file:
|
hyperkube_file:
|
||||||
enabled: true
|
enabled: true
|
||||||
file: true
|
file: true
|
||||||
version: "{{ kube_version }}"
|
version: "{{ kube_version }}"
|
||||||
dest: "hyperkube"
|
dest: "hyperkube"
|
||||||
sha256: "{{ hyperkube_checksum }}"
|
sha256: "{{ hyperkube_binary_checksum }}"
|
||||||
source_url: "{{ hyperkube_download_url }}"
|
source_url: "{{ hyperkube_download_url }}"
|
||||||
url: "{{ hyperkube_download_url }}"
|
url: "{{ hyperkube_download_url }}"
|
||||||
unarchive: false
|
unarchive: false
|
||||||
|
@ -237,6 +273,7 @@ downloads:
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s-cluster
|
||||||
|
|
||||||
cilium:
|
cilium:
|
||||||
enabled: "{{ kube_network_plugin == 'cilium' }}"
|
enabled: "{{ kube_network_plugin == 'cilium' }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -245,6 +282,7 @@ downloads:
|
||||||
sha256: "{{ cilium_digest_checksum|default(None) }}"
|
sha256: "{{ cilium_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s-cluster
|
||||||
|
|
||||||
flannel:
|
flannel:
|
||||||
enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}"
|
enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -253,6 +291,7 @@ downloads:
|
||||||
sha256: "{{ flannel_digest_checksum|default(None) }}"
|
sha256: "{{ flannel_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s-cluster
|
||||||
|
|
||||||
flannel_cni:
|
flannel_cni:
|
||||||
enabled: "{{ kube_network_plugin == 'flannel' }}"
|
enabled: "{{ kube_network_plugin == 'flannel' }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -261,6 +300,7 @@ downloads:
|
||||||
sha256: "{{ flannel_cni_digest_checksum|default(None) }}"
|
sha256: "{{ flannel_cni_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s-cluster
|
||||||
|
|
||||||
calicoctl:
|
calicoctl:
|
||||||
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -269,6 +309,7 @@ downloads:
|
||||||
sha256: "{{ calicoctl_digest_checksum|default(None) }}"
|
sha256: "{{ calicoctl_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s-cluster
|
||||||
|
|
||||||
calico_node:
|
calico_node:
|
||||||
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -277,6 +318,7 @@ downloads:
|
||||||
sha256: "{{ calico_node_digest_checksum|default(None) }}"
|
sha256: "{{ calico_node_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s-cluster
|
||||||
|
|
||||||
calico_cni:
|
calico_cni:
|
||||||
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -285,6 +327,7 @@ downloads:
|
||||||
sha256: "{{ calico_cni_digest_checksum|default(None) }}"
|
sha256: "{{ calico_cni_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s-cluster
|
||||||
|
|
||||||
calico_policy:
|
calico_policy:
|
||||||
enabled: "{{ enable_network_policy or kube_network_plugin == 'canal' }}"
|
enabled: "{{ enable_network_policy or kube_network_plugin == 'canal' }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -293,6 +336,7 @@ downloads:
|
||||||
sha256: "{{ calico_policy_digest_checksum|default(None) }}"
|
sha256: "{{ calico_policy_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s-cluster
|
||||||
|
|
||||||
calico_rr:
|
calico_rr:
|
||||||
enabled: "{{ peer_with_calico_rr is defined and peer_with_calico_rr and kube_network_plugin == 'calico' }}"
|
enabled: "{{ peer_with_calico_rr is defined and peer_with_calico_rr and kube_network_plugin == 'calico' }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -301,6 +345,7 @@ downloads:
|
||||||
sha256: "{{ calico_rr_digest_checksum|default(None) }}"
|
sha256: "{{ calico_rr_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- calico-rr
|
- calico-rr
|
||||||
|
|
||||||
weave_kube:
|
weave_kube:
|
||||||
enabled: "{{ kube_network_plugin == 'weave' }}"
|
enabled: "{{ kube_network_plugin == 'weave' }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -309,6 +354,7 @@ downloads:
|
||||||
sha256: "{{ weave_kube_digest_checksum|default(None) }}"
|
sha256: "{{ weave_kube_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s-cluster
|
||||||
|
|
||||||
weave_npc:
|
weave_npc:
|
||||||
enabled: "{{ kube_network_plugin == 'weave' }}"
|
enabled: "{{ kube_network_plugin == 'weave' }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -317,6 +363,7 @@ downloads:
|
||||||
sha256: "{{ weave_npc_digest_checksum|default(None) }}"
|
sha256: "{{ weave_npc_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s-cluster
|
||||||
|
|
||||||
contiv:
|
contiv:
|
||||||
enabled: "{{ kube_network_plugin == 'contiv' }}"
|
enabled: "{{ kube_network_plugin == 'contiv' }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -325,6 +372,7 @@ downloads:
|
||||||
sha256: "{{ contiv_digest_checksum|default(None) }}"
|
sha256: "{{ contiv_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s-cluster
|
||||||
|
|
||||||
contiv_auth_proxy:
|
contiv_auth_proxy:
|
||||||
enabled: "{{ kube_network_plugin == 'contiv' }}"
|
enabled: "{{ kube_network_plugin == 'contiv' }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -333,6 +381,7 @@ downloads:
|
||||||
sha256: "{{ contiv_auth_proxy_digest_checksum|default(None) }}"
|
sha256: "{{ contiv_auth_proxy_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s-cluster
|
||||||
|
|
||||||
contiv_etcd_init:
|
contiv_etcd_init:
|
||||||
enabled: "{{ kube_network_plugin == 'contiv' }}"
|
enabled: "{{ kube_network_plugin == 'contiv' }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -341,6 +390,7 @@ downloads:
|
||||||
sha256: "{{ contiv_etcd_init_digest_checksum|default(None) }}"
|
sha256: "{{ contiv_etcd_init_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s-cluster
|
||||||
|
|
||||||
pod_infra:
|
pod_infra:
|
||||||
enabled: true
|
enabled: true
|
||||||
container: true
|
container: true
|
||||||
|
@ -349,6 +399,7 @@ downloads:
|
||||||
sha256: "{{ pod_infra_digest_checksum|default(None) }}"
|
sha256: "{{ pod_infra_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s-cluster
|
||||||
|
|
||||||
install_socat:
|
install_socat:
|
||||||
enabled: "{{ ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] }}"
|
enabled: "{{ ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -357,6 +408,7 @@ downloads:
|
||||||
sha256: "{{ install_socat_digest_checksum|default(None) }}"
|
sha256: "{{ install_socat_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s-cluster
|
||||||
|
|
||||||
nginx:
|
nginx:
|
||||||
enabled: "{{ loadbalancer_apiserver_localhost }}"
|
enabled: "{{ loadbalancer_apiserver_localhost }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -365,6 +417,7 @@ downloads:
|
||||||
sha256: "{{ nginx_digest_checksum|default(None) }}"
|
sha256: "{{ nginx_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube-node
|
||||||
|
|
||||||
dnsmasq:
|
dnsmasq:
|
||||||
enabled: "{{ dns_mode == 'dnsmasq_kubedns' }}"
|
enabled: "{{ dns_mode == 'dnsmasq_kubedns' }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -373,6 +426,7 @@ downloads:
|
||||||
sha256: "{{ dnsmasq_digest_checksum|default(None) }}"
|
sha256: "{{ dnsmasq_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube-node
|
||||||
|
|
||||||
kubedns:
|
kubedns:
|
||||||
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
|
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -381,6 +435,7 @@ downloads:
|
||||||
sha256: "{{ kubedns_digest_checksum|default(None) }}"
|
sha256: "{{ kubedns_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube-node
|
||||||
|
|
||||||
coredns:
|
coredns:
|
||||||
enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}"
|
enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -389,6 +444,7 @@ downloads:
|
||||||
sha256: "{{ coredns_digest_checksum|default(None) }}"
|
sha256: "{{ coredns_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube-node
|
||||||
|
|
||||||
dnsmasq_nanny:
|
dnsmasq_nanny:
|
||||||
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
|
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -397,6 +453,7 @@ downloads:
|
||||||
sha256: "{{ dnsmasq_nanny_digest_checksum|default(None) }}"
|
sha256: "{{ dnsmasq_nanny_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube-node
|
||||||
|
|
||||||
dnsmasq_sidecar:
|
dnsmasq_sidecar:
|
||||||
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
|
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -405,6 +462,7 @@ downloads:
|
||||||
sha256: "{{ dnsmasq_sidecar_digest_checksum|default(None) }}"
|
sha256: "{{ dnsmasq_sidecar_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube-node
|
||||||
|
|
||||||
kubednsautoscaler:
|
kubednsautoscaler:
|
||||||
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
|
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -413,12 +471,14 @@ downloads:
|
||||||
sha256: "{{ kubednsautoscaler_digest_checksum|default(None) }}"
|
sha256: "{{ kubednsautoscaler_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube-node
|
||||||
|
|
||||||
testbox:
|
testbox:
|
||||||
enabled: false
|
enabled: false
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ test_image_repo }}"
|
repo: "{{ test_image_repo }}"
|
||||||
tag: "{{ test_image_tag }}"
|
tag: "{{ test_image_tag }}"
|
||||||
sha256: "{{ testbox_digest_checksum|default(None) }}"
|
sha256: "{{ testbox_digest_checksum|default(None) }}"
|
||||||
|
|
||||||
elasticsearch:
|
elasticsearch:
|
||||||
enabled: "{{ efk_enabled }}"
|
enabled: "{{ efk_enabled }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -427,6 +487,7 @@ downloads:
|
||||||
sha256: "{{ elasticsearch_digest_checksum|default(None) }}"
|
sha256: "{{ elasticsearch_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube-node
|
||||||
|
|
||||||
fluentd:
|
fluentd:
|
||||||
enabled: "{{ efk_enabled }}"
|
enabled: "{{ efk_enabled }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -435,6 +496,7 @@ downloads:
|
||||||
sha256: "{{ fluentd_digest_checksum|default(None) }}"
|
sha256: "{{ fluentd_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube-node
|
||||||
|
|
||||||
kibana:
|
kibana:
|
||||||
enabled: "{{ efk_enabled }}"
|
enabled: "{{ efk_enabled }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -443,6 +505,7 @@ downloads:
|
||||||
sha256: "{{ kibana_digest_checksum|default(None) }}"
|
sha256: "{{ kibana_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube-node
|
||||||
|
|
||||||
helm:
|
helm:
|
||||||
enabled: "{{ helm_enabled }}"
|
enabled: "{{ helm_enabled }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -451,6 +514,7 @@ downloads:
|
||||||
sha256: "{{ helm_digest_checksum|default(None) }}"
|
sha256: "{{ helm_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube-node
|
||||||
|
|
||||||
tiller:
|
tiller:
|
||||||
enabled: "{{ helm_enabled }}"
|
enabled: "{{ helm_enabled }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -459,6 +523,7 @@ downloads:
|
||||||
sha256: "{{ tiller_digest_checksum|default(None) }}"
|
sha256: "{{ tiller_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube-node
|
||||||
|
|
||||||
vault:
|
vault:
|
||||||
enabled: "{{ cert_management == 'vault' }}"
|
enabled: "{{ cert_management == 'vault' }}"
|
||||||
container: "{{ vault_deployment_type != 'host' }}"
|
container: "{{ vault_deployment_type != 'host' }}"
|
||||||
|
@ -475,6 +540,7 @@ downloads:
|
||||||
version: "{{ vault_version }}"
|
version: "{{ vault_version }}"
|
||||||
groups:
|
groups:
|
||||||
- vault
|
- vault
|
||||||
|
|
||||||
registry:
|
registry:
|
||||||
enabled: "{{ registry_enabled }}"
|
enabled: "{{ registry_enabled }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -483,6 +549,7 @@ downloads:
|
||||||
sha256: "{{ registry_digest_checksum|default(None) }}"
|
sha256: "{{ registry_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube-node
|
||||||
|
|
||||||
registry_proxy:
|
registry_proxy:
|
||||||
enabled: "{{ registry_enabled }}"
|
enabled: "{{ registry_enabled }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -491,6 +558,7 @@ downloads:
|
||||||
sha256: "{{ registry_proxy_digest_checksum|default(None) }}"
|
sha256: "{{ registry_proxy_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube-node
|
||||||
|
|
||||||
local_volume_provisioner:
|
local_volume_provisioner:
|
||||||
enabled: "{{ local_volume_provisioner_enabled }}"
|
enabled: "{{ local_volume_provisioner_enabled }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -499,6 +567,7 @@ downloads:
|
||||||
sha256: "{{ local_volume_provisioner_digest_checksum|default(None) }}"
|
sha256: "{{ local_volume_provisioner_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube-node
|
||||||
|
|
||||||
cephfs_provisioner:
|
cephfs_provisioner:
|
||||||
enabled: "{{ cephfs_provisioner_enabled }}"
|
enabled: "{{ cephfs_provisioner_enabled }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -507,6 +576,7 @@ downloads:
|
||||||
sha256: "{{ cephfs_provisioner_digest_checksum|default(None) }}"
|
sha256: "{{ cephfs_provisioner_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube-node
|
||||||
|
|
||||||
ingress_nginx_controller:
|
ingress_nginx_controller:
|
||||||
enabled: "{{ ingress_nginx_enabled }}"
|
enabled: "{{ ingress_nginx_enabled }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -515,6 +585,7 @@ downloads:
|
||||||
sha256: "{{ ingress_nginx_controller_digest_checksum|default(None) }}"
|
sha256: "{{ ingress_nginx_controller_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube-node
|
||||||
|
|
||||||
ingress_nginx_default_backend:
|
ingress_nginx_default_backend:
|
||||||
enabled: "{{ ingress_nginx_enabled }}"
|
enabled: "{{ ingress_nginx_enabled }}"
|
||||||
container: true
|
container: true
|
||||||
|
@ -523,6 +594,7 @@ downloads:
|
||||||
sha256: "{{ ingress_nginx_default_backend_digest_checksum|default(None) }}"
|
sha256: "{{ ingress_nginx_default_backend_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube-node
|
||||||
|
|
||||||
cert_manager_controller:
|
cert_manager_controller:
|
||||||
enabled: "{{ cert_manager_enabled }}"
|
enabled: "{{ cert_manager_enabled }}"
|
||||||
container: true
|
container: true
|
||||||
|
|
|
@ -1,21 +1,25 @@
|
||||||
---
|
---
|
||||||
- name: install | Copy etcd binary from download dir
|
- name: install | Copy etcd and etcdctl binary from download dir
|
||||||
shell: |
|
synchronize:
|
||||||
rsync -piu "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-amd64/etcd" "{{ bin_dir }}/etcd"
|
src: "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-amd64/{{ item }}"
|
||||||
rsync -piu "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-amd64/etcdctl" "{{ bin_dir }}/etcdctl"
|
dest: "{{ bin_dir }}/{{ item }}"
|
||||||
|
compress: no
|
||||||
|
perms: yes
|
||||||
|
owner: no
|
||||||
|
group: no
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
delegate_to: "{{ inventory_hostname }}"
|
||||||
|
with_items:
|
||||||
|
- "etcd"
|
||||||
|
- "etcdctl"
|
||||||
when: etcd_cluster_setup
|
when: etcd_cluster_setup
|
||||||
|
|
||||||
- name: install | Set etcd binary permissions
|
- name: install | Set etcd and etcdctl binary permissions
|
||||||
file:
|
file:
|
||||||
path: "{{ bin_dir }}/etcd"
|
path: "{{ bin_dir }}/{{ item }}"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
state: file
|
state: file
|
||||||
|
with_items:
|
||||||
|
- "etcd"
|
||||||
|
- "etcdctl"
|
||||||
when: etcd_cluster_setup
|
when: etcd_cluster_setup
|
||||||
|
|
||||||
- name: install | Set etcdctl binary permissions
|
|
||||||
file:
|
|
||||||
path: "{{ bin_dir }}/etcdctl"
|
|
||||||
mode: "0755"
|
|
||||||
state: file
|
|
||||||
when: etcd_cluster_setup
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
# Versions
|
# Versions
|
||||||
kubedns_version: 1.14.10
|
kubedns_version: 1.14.11
|
||||||
kubednsautoscaler_version: 1.1.2
|
kubednsautoscaler_version: 1.1.2
|
||||||
|
|
||||||
# Limits for dnsmasq/kubedns apps
|
# Limits for dnsmasq/kubedns apps
|
||||||
|
|
|
@ -9,14 +9,21 @@ metadata:
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
addonmanager.kubernetes.io/mode: Reconcile
|
addonmanager.kubernetes.io/mode: Reconcile
|
||||||
kubernetes.io/name: "CoreDNS"
|
kubernetes.io/name: "CoreDNS"
|
||||||
|
annotations:
|
||||||
|
prometheus.io/path: /metrics
|
||||||
|
prometheus.io/port: "9153"
|
||||||
|
prometheus.io/scrape: "true"
|
||||||
spec:
|
spec:
|
||||||
selector:
|
selector:
|
||||||
k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
|
k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
|
||||||
clusterIP: {{ clusterIP }}
|
clusterIP: {{ clusterIP }}
|
||||||
ports:
|
ports:
|
||||||
- name: dns
|
- name: dns
|
||||||
port: 53
|
port: 53
|
||||||
protocol: UDP
|
protocol: UDP
|
||||||
- name: dns-tcp
|
- name: dns-tcp
|
||||||
port: 53
|
port: 53
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
|
- name: metrics
|
||||||
|
port: 9153
|
||||||
|
protocol: TCP
|
||||||
|
|
|
@ -68,6 +68,7 @@
|
||||||
{% if tiller_max_history is defined %} --history-max={{ tiller_max_history }}{% endif %}
|
{% if tiller_max_history is defined %} --history-max={{ tiller_max_history }}{% endif %}
|
||||||
{% if tiller_enable_tls %} --tiller-tls --tiller-tls-verify --tiller-tls-cert={{ tiller_tls_cert }} --tiller-tls-key={{ tiller_tls_key }} --tls-ca-cert={{ tiller_tls_ca_cert }} {% endif %}
|
{% if tiller_enable_tls %} --tiller-tls --tiller-tls-verify --tiller-tls-cert={{ tiller_tls_cert }} --tiller-tls-key={{ tiller_tls_key }} --tls-ca-cert={{ tiller_tls_ca_cert }} {% endif %}
|
||||||
{% if tiller_secure_release_info %} --override 'spec.template.spec.containers[0].command'='{/tiller,--storage=secret}' {% endif %}
|
{% if tiller_secure_release_info %} --override 'spec.template.spec.containers[0].command'='{/tiller,--storage=secret}' {% endif %}
|
||||||
|
--debug --dry-run
|
||||||
| kubectl apply -f -
|
| kubectl apply -f -
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when:
|
when:
|
||||||
|
|
|
@ -5,3 +5,4 @@ metadata:
|
||||||
name: {{ cert_manager_namespace }}
|
name: {{ cert_manager_namespace }}
|
||||||
labels:
|
labels:
|
||||||
name: {{ cert_manager_namespace }}
|
name: {{ cert_manager_namespace }}
|
||||||
|
certmanager.k8s.io/disable-validation: "true"
|
||||||
|
|
|
@ -5,7 +5,7 @@ metadata:
|
||||||
name: cert-manager
|
name: cert-manager
|
||||||
labels:
|
labels:
|
||||||
app: cert-manager
|
app: cert-manager
|
||||||
chart: cert-manager-v0.4.1
|
chart: cert-manager-v0.5.0
|
||||||
release: cert-manager
|
release: cert-manager
|
||||||
heritage: Tiller
|
heritage: Tiller
|
||||||
rules:
|
rules:
|
||||||
|
@ -13,12 +13,7 @@ rules:
|
||||||
resources: ["certificates", "issuers", "clusterissuers"]
|
resources: ["certificates", "issuers", "clusterissuers"]
|
||||||
verbs: ["*"]
|
verbs: ["*"]
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
# TODO: remove endpoints once 0.4 is released. We include it here in case
|
resources: ["configmaps", "secrets", "events", "services", "pods"]
|
||||||
# users use the 'master' version of the Helm chart with a 0.2.x release of
|
|
||||||
# cert-manager that still performs leader election with Endpoint resources.
|
|
||||||
# We advise users don't do this, but some will anyway and this will reduce
|
|
||||||
# friction.
|
|
||||||
resources: ["endpoints", "configmaps", "secrets", "events", "services", "pods"]
|
|
||||||
verbs: ["*"]
|
verbs: ["*"]
|
||||||
- apiGroups: ["extensions"]
|
- apiGroups: ["extensions"]
|
||||||
resources: ["ingresses"]
|
resources: ["ingresses"]
|
||||||
|
|
|
@ -5,7 +5,7 @@ metadata:
|
||||||
name: cert-manager
|
name: cert-manager
|
||||||
labels:
|
labels:
|
||||||
app: cert-manager
|
app: cert-manager
|
||||||
chart: cert-manager-v0.4.1
|
chart: cert-manager-v0.5.0
|
||||||
release: cert-manager
|
release: cert-manager
|
||||||
heritage: Tiller
|
heritage: Tiller
|
||||||
roleRef:
|
roleRef:
|
||||||
|
|
|
@ -3,9 +3,11 @@ apiVersion: apiextensions.k8s.io/v1beta1
|
||||||
kind: CustomResourceDefinition
|
kind: CustomResourceDefinition
|
||||||
metadata:
|
metadata:
|
||||||
name: certificates.certmanager.k8s.io
|
name: certificates.certmanager.k8s.io
|
||||||
|
annotations:
|
||||||
|
"helm.sh/hook": crd-install
|
||||||
labels:
|
labels:
|
||||||
app: cert-manager
|
app: cert-manager
|
||||||
chart: cert-manager-v0.4.1
|
chart: cert-manager-v0.5.0
|
||||||
release: cert-manager
|
release: cert-manager
|
||||||
heritage: Tiller
|
heritage: Tiller
|
||||||
spec:
|
spec:
|
||||||
|
|
|
@ -3,9 +3,11 @@ apiVersion: apiextensions.k8s.io/v1beta1
|
||||||
kind: CustomResourceDefinition
|
kind: CustomResourceDefinition
|
||||||
metadata:
|
metadata:
|
||||||
name: clusterissuers.certmanager.k8s.io
|
name: clusterissuers.certmanager.k8s.io
|
||||||
|
annotations:
|
||||||
|
"helm.sh/hook": crd-install
|
||||||
labels:
|
labels:
|
||||||
app: cert-manager
|
app: cert-manager
|
||||||
chart: cert-manager-v0.4.1
|
chart: cert-manager-v0.5.0
|
||||||
release: cert-manager
|
release: cert-manager
|
||||||
heritage: Tiller
|
heritage: Tiller
|
||||||
spec:
|
spec:
|
||||||
|
|
|
@ -3,9 +3,11 @@ apiVersion: apiextensions.k8s.io/v1beta1
|
||||||
kind: CustomResourceDefinition
|
kind: CustomResourceDefinition
|
||||||
metadata:
|
metadata:
|
||||||
name: issuers.certmanager.k8s.io
|
name: issuers.certmanager.k8s.io
|
||||||
|
annotations:
|
||||||
|
"helm.sh/hook": crd-install
|
||||||
labels:
|
labels:
|
||||||
app: cert-manager
|
app: cert-manager
|
||||||
chart: cert-manager-v0.4.1
|
chart: cert-manager-v0.5.0
|
||||||
release: cert-manager
|
release: cert-manager
|
||||||
heritage: Tiller
|
heritage: Tiller
|
||||||
spec:
|
spec:
|
||||||
|
|
|
@ -6,7 +6,7 @@ metadata:
|
||||||
namespace: {{ cert_manager_namespace }}
|
namespace: {{ cert_manager_namespace }}
|
||||||
labels:
|
labels:
|
||||||
app: cert-manager
|
app: cert-manager
|
||||||
chart: cert-manager-v0.4.1
|
chart: cert-manager-v0.5.0
|
||||||
release: cert-manager
|
release: cert-manager
|
||||||
heritage: Tiller
|
heritage: Tiller
|
||||||
spec:
|
spec:
|
||||||
|
|
|
@ -6,6 +6,6 @@ metadata:
|
||||||
namespace: {{ cert_manager_namespace }}
|
namespace: {{ cert_manager_namespace }}
|
||||||
labels:
|
labels:
|
||||||
app: cert-manager
|
app: cert-manager
|
||||||
chart: cert-manager-v0.4.1
|
chart: cert-manager-v0.5.0
|
||||||
release: cert-manager
|
release: cert-manager
|
||||||
heritage: Tiller
|
heritage: Tiller
|
||||||
|
|
|
@ -33,6 +33,46 @@
|
||||||
when: "contiv_global_config.networkInfraType != contiv_fabric_mode"
|
when: "contiv_global_config.networkInfraType != contiv_fabric_mode"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
|
- name: Contiv | Set peer hostname
|
||||||
|
set_fact:
|
||||||
|
contiv_peer_hostname: >-
|
||||||
|
{%- if override_system_hostname|default(true) -%}
|
||||||
|
{{ contiv_peer_hostname|default({})|combine({item: hostvars[item]['inventory_hostname']}) }}
|
||||||
|
{%- else -%}
|
||||||
|
{{ contiv_peer_hostname|default({})|combine({item: hostvars[item]['ansible_fqdn']}) }}
|
||||||
|
{%- endif -%}
|
||||||
|
with_items: "{{ groups['k8s-cluster'] }}"
|
||||||
|
run_once: true
|
||||||
|
when:
|
||||||
|
- contiv_fwd_mode == 'routing'
|
||||||
|
- contiv_peer_with_uplink_leaf
|
||||||
|
|
||||||
|
- name: Contiv | Get BGP configuration
|
||||||
|
command: |
|
||||||
|
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
|
||||||
|
bgp ls --json
|
||||||
|
register: bgp_config
|
||||||
|
run_once: true
|
||||||
|
changed_when: false
|
||||||
|
when:
|
||||||
|
- contiv_fwd_mode == 'routing'
|
||||||
|
- contiv_peer_with_uplink_leaf
|
||||||
|
|
||||||
|
- name: Contiv | Configure peering with router(s)
|
||||||
|
command: |
|
||||||
|
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
|
||||||
|
bgp create {{ item.value }} \
|
||||||
|
--router-ip="{{ hostvars[item.key]['contiv']['router_ip'] }}" \
|
||||||
|
--as="{{ hostvars[item.key]['contiv']['as'] | default(contiv_global_as) }}" \
|
||||||
|
--neighbor-as="{{ hostvars[item.key]['contiv']['neighbor_as'] | default(contiv_global_neighbor_as) }}" \
|
||||||
|
--neighbor="{{ hostvars[item.key]['contiv']['neighbor'] }}"
|
||||||
|
run_once: true
|
||||||
|
with_dict: "{{ contiv_peer_hostname }}"
|
||||||
|
when:
|
||||||
|
- contiv_fwd_mode == 'routing'
|
||||||
|
- contiv_peer_with_uplink_leaf
|
||||||
|
- bgp_config.stdout|from_json|length == 0 or not item.value in bgp_config.stdout|from_json|map(attribute='key')|list
|
||||||
|
|
||||||
- name: Contiv | Get existing networks
|
- name: Contiv | Get existing networks
|
||||||
command: |
|
command: |
|
||||||
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
|
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
|
||||||
|
|
|
@ -9,7 +9,6 @@
|
||||||
filename: "{{ contiv_config_dir }}/{{ item.item.file }}"
|
filename: "{{ contiv_config_dir }}/{{ item.item.file }}"
|
||||||
state: "{{ item.changed | ternary('latest','present') }}"
|
state: "{{ item.changed | ternary('latest','present') }}"
|
||||||
with_items: "{{ contiv_manifests_results.results }}"
|
with_items: "{{ contiv_manifests_results.results }}"
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
- import_tasks: configure.yml
|
- import_tasks: configure.yml
|
||||||
|
|
|
@ -10,8 +10,15 @@
|
||||||
when: kube_encrypt_secret_data
|
when: kube_encrypt_secret_data
|
||||||
|
|
||||||
- name: install | Copy kubectl binary from download dir
|
- name: install | Copy kubectl binary from download dir
|
||||||
command: rsync -piu "{{ local_release_dir }}/hyperkube" "{{ bin_dir }}/kubectl"
|
synchronize:
|
||||||
|
src: "{{ local_release_dir }}/hyperkube"
|
||||||
|
dest: "{{ bin_dir }}/kubectl"
|
||||||
|
compress: no
|
||||||
|
perms: yes
|
||||||
|
owner: no
|
||||||
|
group: no
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
delegate_to: "{{ inventory_hostname }}"
|
||||||
tags:
|
tags:
|
||||||
- hyperkube
|
- hyperkube
|
||||||
- kubectl
|
- kubectl
|
||||||
|
|
|
@ -68,9 +68,18 @@ apiServerExtraArgs:
|
||||||
{% endif %}
|
{% endif %}
|
||||||
service-node-port-range: {{ kube_apiserver_node_port_range }}
|
service-node-port-range: {{ kube_apiserver_node_port_range }}
|
||||||
kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}"
|
kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}"
|
||||||
|
profiling: "{{ kube_profiling }}"
|
||||||
|
repair-malformed-updates: "false"
|
||||||
|
enable-aggregator-routing: "{{ kube_api_aggregator_routing }}"
|
||||||
|
{% if kube_api_anonymous_auth is defined and kube_version | version_compare('v1.5', '>=') %}
|
||||||
|
anonymous-auth: "{{ kube_api_anonymous_auth }}"
|
||||||
|
{% endif %}
|
||||||
{% if kube_basic_auth|default(true) %}
|
{% if kube_basic_auth|default(true) %}
|
||||||
basic-auth-file: {{ kube_users_dir }}/known_users.csv
|
basic-auth-file: {{ kube_users_dir }}/known_users.csv
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
{% if kube_token_auth|default(true) %}
|
||||||
|
token-auth-file: {{ kube_token_dir }}/known_tokens.csv
|
||||||
|
{% endif %}
|
||||||
{% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %}
|
{% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %}
|
||||||
oidc-issuer-url: {{ kube_oidc_url }}
|
oidc-issuer-url: {{ kube_oidc_url }}
|
||||||
oidc-client-id: {{ kube_oidc_client_id }}
|
oidc-client-id: {{ kube_oidc_client_id }}
|
||||||
|
@ -102,19 +111,21 @@ controllerManagerExtraArgs:
|
||||||
node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }}
|
node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }}
|
||||||
node-monitor-period: {{ kube_controller_node_monitor_period }}
|
node-monitor-period: {{ kube_controller_node_monitor_period }}
|
||||||
pod-eviction-timeout: {{ kube_controller_pod_eviction_timeout }}
|
pod-eviction-timeout: {{ kube_controller_pod_eviction_timeout }}
|
||||||
|
profiling: "{{ kube_profiling }}"
|
||||||
{% if kube_feature_gates %}
|
{% if kube_feature_gates %}
|
||||||
feature-gates: {{ kube_feature_gates|join(',') }}
|
feature-gates: {{ kube_feature_gates|join(',') }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
{% for key in kube_kubeadm_controller_extra_args %}
|
||||||
|
{{ key }}: "{{ kube_kubeadm_controller_extra_args[key] }}"
|
||||||
|
{% endfor %}
|
||||||
{% if cloud_provider is defined and cloud_provider in ["openstack"] and openstack_cacert is defined %}
|
{% if cloud_provider is defined and cloud_provider in ["openstack"] and openstack_cacert is defined %}
|
||||||
controllerManagerExtraVolumes:
|
controllerManagerExtraVolumes:
|
||||||
- name: openstackcacert
|
- name: openstackcacert
|
||||||
hostPath: "{{ kube_config_dir }}/openstack-cacert.pem"
|
hostPath: "{{ kube_config_dir }}/openstack-cacert.pem"
|
||||||
mountPath: "{{ kube_config_dir }}/openstack-cacert.pem"
|
mountPath: "{{ kube_config_dir }}/openstack-cacert.pem"
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% for key in kube_kubeadm_controller_extra_args %}
|
|
||||||
{{ key }}: "{{ kube_kubeadm_controller_extra_args[key] }}"
|
|
||||||
{% endfor %}
|
|
||||||
schedulerExtraArgs:
|
schedulerExtraArgs:
|
||||||
|
profiling: "{{ kube_profiling }}"
|
||||||
{% if kube_feature_gates %}
|
{% if kube_feature_gates %}
|
||||||
feature-gates: {{ kube_feature_gates|join(',') }}
|
feature-gates: {{ kube_feature_gates|join(',') }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
|
@ -60,9 +60,18 @@ apiServerExtraArgs:
|
||||||
{% endif %}
|
{% endif %}
|
||||||
service-node-port-range: {{ kube_apiserver_node_port_range }}
|
service-node-port-range: {{ kube_apiserver_node_port_range }}
|
||||||
kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}"
|
kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}"
|
||||||
|
profiling: "{{ kube_profiling }}"
|
||||||
|
repair-malformed-updates: "false"
|
||||||
|
enable-aggregator-routing: "{{ kube_api_aggregator_routing }}"
|
||||||
|
{% if kube_api_anonymous_auth is defined and kube_version | version_compare('v1.5', '>=') %}
|
||||||
|
anonymous-auth: "{{ kube_api_anonymous_auth }}"
|
||||||
|
{% endif %}
|
||||||
{% if kube_basic_auth|default(true) %}
|
{% if kube_basic_auth|default(true) %}
|
||||||
basic-auth-file: {{ kube_users_dir }}/known_users.csv
|
basic-auth-file: {{ kube_users_dir }}/known_users.csv
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
{% if kube_token_auth|default(true) %}
|
||||||
|
token-auth-file: {{ kube_token_dir }}/known_tokens.csv
|
||||||
|
{% endif %}
|
||||||
{% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %}
|
{% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %}
|
||||||
oidc-issuer-url: {{ kube_oidc_url }}
|
oidc-issuer-url: {{ kube_oidc_url }}
|
||||||
oidc-client-id: {{ kube_oidc_client_id }}
|
oidc-client-id: {{ kube_oidc_client_id }}
|
||||||
|
@ -101,9 +110,13 @@ controllerManagerExtraArgs:
|
||||||
node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }}
|
node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }}
|
||||||
node-monitor-period: {{ kube_controller_node_monitor_period }}
|
node-monitor-period: {{ kube_controller_node_monitor_period }}
|
||||||
pod-eviction-timeout: {{ kube_controller_pod_eviction_timeout }}
|
pod-eviction-timeout: {{ kube_controller_pod_eviction_timeout }}
|
||||||
|
profiling: "{{ kube_profiling }}"
|
||||||
{% if kube_feature_gates %}
|
{% if kube_feature_gates %}
|
||||||
feature-gates: {{ kube_feature_gates|join(',') }}
|
feature-gates: {{ kube_feature_gates|join(',') }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
{% for key in kube_kubeadm_controller_extra_args %}
|
||||||
|
{{ key }}: "{{ kube_kubeadm_controller_extra_args[key] }}"
|
||||||
|
{% endfor %}
|
||||||
{% if cloud_provider is defined and cloud_provider in ["openstack"] and openstack_cacert is defined %}
|
{% if cloud_provider is defined and cloud_provider in ["openstack"] and openstack_cacert is defined %}
|
||||||
controllerManagerExtraVolumes:
|
controllerManagerExtraVolumes:
|
||||||
- name: openstackcacert
|
- name: openstackcacert
|
||||||
|
@ -122,10 +135,8 @@ apiServerExtraVolumes:
|
||||||
writable: true
|
writable: true
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% for key in kube_kubeadm_controller_extra_args %}
|
|
||||||
{{ key }}: "{{ kube_kubeadm_controller_extra_args[key] }}"
|
|
||||||
{% endfor %}
|
|
||||||
schedulerExtraArgs:
|
schedulerExtraArgs:
|
||||||
|
profiling: "{{ kube_profiling }}"
|
||||||
{% if kube_feature_gates %}
|
{% if kube_feature_gates %}
|
||||||
feature-gates: {{ kube_feature_gates|join(',') }}
|
feature-gates: {{ kube_feature_gates|join(',') }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
@ -150,3 +161,7 @@ nodeRegistration:
|
||||||
{% if container_manager == 'crio' %}
|
{% if container_manager == 'crio' %}
|
||||||
criSocket: /var/run/crio/crio.sock
|
criSocket: /var/run/crio/crio.sock
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
{% if dynamic_kubelet_configuration %}
|
||||||
|
featureGates:
|
||||||
|
DynamicKubeletConfig: true
|
||||||
|
{% endif %}
|
||||||
|
|
|
@ -33,7 +33,7 @@ spec:
|
||||||
- --audit-log-maxage={{ audit_log_maxage }}
|
- --audit-log-maxage={{ audit_log_maxage }}
|
||||||
- --audit-log-maxbackup={{ audit_log_maxbackups }}
|
- --audit-log-maxbackup={{ audit_log_maxbackups }}
|
||||||
- --audit-log-maxsize={{ audit_log_maxsize }}
|
- --audit-log-maxsize={{ audit_log_maxsize }}
|
||||||
- --audit-policy-file={{ audit_policy_file }}
|
- --audit-policy-file={{ audit_policy_file }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
- --advertise-address={{ ip | default(ansible_default_ipv4.address) }}
|
- --advertise-address={{ ip | default(ansible_default_ipv4.address) }}
|
||||||
- --etcd-servers={{ etcd_access_addresses }}
|
- --etcd-servers={{ etcd_access_addresses }}
|
||||||
|
@ -58,16 +58,16 @@ spec:
|
||||||
- --admission-control={{ kube_apiserver_admission_control | join(',') }}
|
- --admission-control={{ kube_apiserver_admission_control | join(',') }}
|
||||||
{% else %}
|
{% else %}
|
||||||
{% if kube_apiserver_enable_admission_plugins|length > 0 %}
|
{% if kube_apiserver_enable_admission_plugins|length > 0 %}
|
||||||
- --enable-admission-plugins={{ kube_apiserver_enable_admission_plugins | join(',') }}
|
- --enable-admission-plugins={{ kube_apiserver_enable_admission_plugins | join(',') }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if kube_apiserver_disable_admission_plugins|length > 0 %}
|
{% if kube_apiserver_disable_admission_plugins|length > 0 %}
|
||||||
- --disable-admission-plugins={{ kube_apiserver_disable_admission_plugins | join(',') }}
|
- --disable-admission-plugins={{ kube_apiserver_disable_admission_plugins | join(',') }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
- --service-cluster-ip-range={{ kube_service_addresses }}
|
- --service-cluster-ip-range={{ kube_service_addresses }}
|
||||||
- --service-node-port-range={{ kube_apiserver_node_port_range }}
|
- --service-node-port-range={{ kube_apiserver_node_port_range }}
|
||||||
- --client-ca-file={{ kube_cert_dir }}/ca.pem
|
- --client-ca-file={{ kube_cert_dir }}/ca.pem
|
||||||
- --profiling=false
|
- --profiling={{ kube_profiling }}
|
||||||
- --repair-malformed-updates=false
|
- --repair-malformed-updates=false
|
||||||
- --kubelet-client-certificate={{ kube_cert_dir }}/node-{{ inventory_hostname }}.pem
|
- --kubelet-client-certificate={{ kube_cert_dir }}/node-{{ inventory_hostname }}.pem
|
||||||
- --kubelet-client-key={{ kube_cert_dir }}/node-{{ inventory_hostname }}-key.pem
|
- --kubelet-client-key={{ kube_cert_dir }}/node-{{ inventory_hostname }}-key.pem
|
||||||
|
|
|
@ -37,7 +37,7 @@ spec:
|
||||||
- --node-monitor-grace-period={{ kube_controller_node_monitor_grace_period }}
|
- --node-monitor-grace-period={{ kube_controller_node_monitor_grace_period }}
|
||||||
- --node-monitor-period={{ kube_controller_node_monitor_period }}
|
- --node-monitor-period={{ kube_controller_node_monitor_period }}
|
||||||
- --pod-eviction-timeout={{ kube_controller_pod_eviction_timeout }}
|
- --pod-eviction-timeout={{ kube_controller_pod_eviction_timeout }}
|
||||||
- --profiling=false
|
- --profiling={{ kube_profiling }}
|
||||||
- --terminated-pod-gc-threshold=12500
|
- --terminated-pod-gc-threshold=12500
|
||||||
- --v={{ kube_log_level }}
|
- --v={{ kube_log_level }}
|
||||||
{% if rbac_enabled %}
|
{% if rbac_enabled %}
|
||||||
|
|
|
@ -32,7 +32,7 @@ spec:
|
||||||
- --use-legacy-policy-config
|
- --use-legacy-policy-config
|
||||||
- --policy-config-file={{ kube_config_dir }}/kube-scheduler-policy.yaml
|
- --policy-config-file={{ kube_config_dir }}/kube-scheduler-policy.yaml
|
||||||
{% endif %}
|
{% endif %}
|
||||||
- --profiling=false
|
- --profiling={{ kube_profiling }}
|
||||||
- --v={{ kube_log_level }}
|
- --v={{ kube_log_level }}
|
||||||
{% if kube_feature_gates %}
|
{% if kube_feature_gates %}
|
||||||
- --feature-gates={{ kube_feature_gates|join(',') }}
|
- --feature-gates={{ kube_feature_gates|join(',') }}
|
||||||
|
|
|
@ -86,6 +86,9 @@ kubelet_max_pods: 110
|
||||||
## Support custom flags to be passed to kubelet
|
## Support custom flags to be passed to kubelet
|
||||||
kubelet_custom_flags: []
|
kubelet_custom_flags: []
|
||||||
|
|
||||||
|
## Support custom flags to be passed to kubelet only on nodes, not masters
|
||||||
|
kubelet_node_custom_flags: []
|
||||||
|
|
||||||
# This setting is used for rkt based kubelet for deploying hyperkube
|
# This setting is used for rkt based kubelet for deploying hyperkube
|
||||||
# from a docker based registry ( controls --insecure and docker:// )
|
# from a docker based registry ( controls --insecure and docker:// )
|
||||||
## Empty vaule for quay.io containers
|
## Empty vaule for quay.io containers
|
||||||
|
|
|
@ -7,8 +7,14 @@
|
||||||
- kubeadm
|
- kubeadm
|
||||||
|
|
||||||
- name: install | Copy kubeadm binary from download dir
|
- name: install | Copy kubeadm binary from download dir
|
||||||
command: rsync -piu "{{ local_release_dir }}/kubeadm" "{{ bin_dir }}/kubeadm"
|
synchronize:
|
||||||
changed_when: false
|
src: "{{ local_release_dir }}/kubeadm"
|
||||||
|
dest: "{{ bin_dir }}/kubeadm"
|
||||||
|
compress: no
|
||||||
|
perms: yes
|
||||||
|
owner: no
|
||||||
|
group: no
|
||||||
|
delegate_to: "{{ inventory_hostname }}"
|
||||||
when: kubeadm_enabled
|
when: kubeadm_enabled
|
||||||
tags:
|
tags:
|
||||||
- kubeadm
|
- kubeadm
|
||||||
|
|
|
@ -1,11 +1,18 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
- name: install | Copy kubelet binary from download dir
|
- name: install | Copy kubelet binary from download dir
|
||||||
command: rsync -piu "{{ local_release_dir }}/hyperkube" "{{ bin_dir }}/kubelet"
|
synchronize:
|
||||||
changed_when: false
|
src: "{{ local_release_dir }}/hyperkube"
|
||||||
|
dest: "{{ bin_dir }}/kubelet"
|
||||||
|
compress: no
|
||||||
|
perms: yes
|
||||||
|
owner: no
|
||||||
|
group: no
|
||||||
|
delegate_to: "{{ inventory_hostname }}"
|
||||||
tags:
|
tags:
|
||||||
- hyperkube
|
- hyperkube
|
||||||
- upgrade
|
- upgrade
|
||||||
|
notify: restart kubelet
|
||||||
|
|
||||||
- name: install | Set kubelet binary permissions
|
- name: install | Set kubelet binary permissions
|
||||||
file:
|
file:
|
||||||
|
@ -15,7 +22,6 @@
|
||||||
tags:
|
tags:
|
||||||
- hyperkube
|
- hyperkube
|
||||||
- upgrade
|
- upgrade
|
||||||
notify: restart kubelet
|
|
||||||
|
|
||||||
- name: install | Copy socat wrapper for Container Linux
|
- name: install | Copy socat wrapper for Container Linux
|
||||||
command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/opt/bin {{ install_socat_image_repo }}:{{ install_socat_image_tag }}"
|
command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/opt/bin {{ install_socat_image_repo }}:{{ install_socat_image_tag }}"
|
||||||
|
|
|
@ -32,6 +32,13 @@
|
||||||
tags:
|
tags:
|
||||||
- kubelet
|
- kubelet
|
||||||
|
|
||||||
|
- name: Make sure dynamic kubelet configuration directory is writeable
|
||||||
|
file:
|
||||||
|
path: "{{ dynamic_kubelet_configuration_dir }}"
|
||||||
|
mode: 0600
|
||||||
|
state: directory
|
||||||
|
when: dynamic_kubelet_configuration
|
||||||
|
|
||||||
- name: Write kubelet config file (kubeadm)
|
- name: Write kubelet config file (kubeadm)
|
||||||
template:
|
template:
|
||||||
src: kubelet.kubeadm.env.j2
|
src: kubelet.kubeadm.env.j2
|
||||||
|
@ -70,6 +77,8 @@
|
||||||
|
|
||||||
- name: Verify if br_netfilter module exists
|
- name: Verify if br_netfilter module exists
|
||||||
shell: "modinfo br_netfilter"
|
shell: "modinfo br_netfilter"
|
||||||
|
environment:
|
||||||
|
PATH: "{{ ansible_env.PATH}}:/sbin" # Make sure we can workaround RH's conservative path management
|
||||||
register: modinfo_br_netfilter
|
register: modinfo_br_netfilter
|
||||||
failed_when: modinfo_br_netfilter.rc not in [0, 1]
|
failed_when: modinfo_br_netfilter.rc not in [0, 1]
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
|
@ -26,6 +26,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
|
||||||
{% if kubelet_authorization_mode_webhook %}
|
{% if kubelet_authorization_mode_webhook %}
|
||||||
--authorization-mode=Webhook \
|
--authorization-mode=Webhook \
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
--enforce-node-allocatable={{ kubelet_enforce_node_allocatable }} \
|
||||||
--client-ca-file={{ kube_cert_dir }}/ca.crt \
|
--client-ca-file={{ kube_cert_dir }}/ca.crt \
|
||||||
--pod-manifest-path={{ kube_manifest_dir }} \
|
--pod-manifest-path={{ kube_manifest_dir }} \
|
||||||
--cadvisor-port={{ kube_cadvisor_port }} \
|
--cadvisor-port={{ kube_cadvisor_port }} \
|
||||||
|
@ -48,6 +49,9 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
|
||||||
{% else %}
|
{% else %}
|
||||||
--fail-swap-on={{ kubelet_fail_swap_on|default(true)}} \
|
--fail-swap-on={{ kubelet_fail_swap_on|default(true)}} \
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
{% if dynamic_kubelet_configuration %}
|
||||||
|
--dynamic-config-dir={{ dynamic_kubelet_configuration_dir }} \
|
||||||
|
{% endif %}
|
||||||
--runtime-cgroups={{ kubelet_runtime_cgroups }} --kubelet-cgroups={{ kubelet_kubelet_cgroups }} \
|
--runtime-cgroups={{ kubelet_runtime_cgroups }} --kubelet-cgroups={{ kubelet_kubelet_cgroups }} \
|
||||||
{% endset %}
|
{% endset %}
|
||||||
|
|
||||||
|
@ -90,7 +94,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% set all_node_labels = role_node_labels + inventory_node_labels %}
|
{% set all_node_labels = role_node_labels + inventory_node_labels %}
|
||||||
|
|
||||||
KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kube_reserved }} --node-labels={{ all_node_labels | join(',') }} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}"
|
KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kube_reserved }} --node-labels={{ all_node_labels | join(',') }} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}{% if inventory_hostname in groups['kube-node'] %}{% if kubelet_node_custom_flags is string %} {{kubelet_node_custom_flags}} {% else %}{% for flag in kubelet_node_custom_flags %} {{flag}} {% endfor %}{% endif %}{% endif %}"
|
||||||
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "flannel", "weave", "contiv", "cilium"] %}
|
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "flannel", "weave", "contiv", "cilium"] %}
|
||||||
KUBELET_NETWORK_PLUGIN="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
|
KUBELET_NETWORK_PLUGIN="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
|
||||||
{% elif kube_network_plugin is defined and kube_network_plugin == "cloud" %}
|
{% elif kube_network_plugin is defined and kube_network_plugin == "cloud" %}
|
||||||
|
|
|
@ -120,7 +120,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ kube_reserved }} --node-labels={{ all_node_labels | join(',') }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}"
|
KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ kube_reserved }} --node-labels={{ all_node_labels | join(',') }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}{% if inventory_hostname in groups['kube-node'] %}{% if kubelet_node_custom_flags is string %} {{kubelet_node_custom_flags}} {% else %}{% for flag in kubelet_node_custom_flags %} {{flag}} {% endfor %}{% endif %}{% endif %}"
|
||||||
|
|
||||||
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "flannel", "weave", "contiv", "cilium"] %}
|
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "flannel", "weave", "contiv", "cilium"] %}
|
||||||
KUBELET_NETWORK_PLUGIN="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
|
KUBELET_NETWORK_PLUGIN="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
|
||||||
|
|
|
@ -127,3 +127,21 @@
|
||||||
tags:
|
tags:
|
||||||
- cloud-provider
|
- cloud-provider
|
||||||
- facts
|
- facts
|
||||||
|
|
||||||
|
- name: "Get current version of calico cluster version"
|
||||||
|
shell: "{{ bin_dir }}/calicoctl version | grep 'Cluster Version' | awk '{ print $3}'"
|
||||||
|
register: calico_version_on_server
|
||||||
|
run_once: yes
|
||||||
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
|
|
||||||
|
|
||||||
|
- name: "Check that calico version is enought for upgrade"
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- calico_version_on_server.stdout|version_compare('v2.6.5', '>=')
|
||||||
|
msg: "Your version of calico is not fresh enough for upgrade. Minimum version v2.6.5"
|
||||||
|
when:
|
||||||
|
- 'calico_version_on_server.stdout is defined'
|
||||||
|
- 'calico_version_on_server.stdout != ""'
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
run_once: yes
|
||||||
|
|
|
@ -136,9 +136,20 @@ kube_apiserver_port: 6443
|
||||||
kube_apiserver_insecure_bind_address: 127.0.0.1
|
kube_apiserver_insecure_bind_address: 127.0.0.1
|
||||||
kube_apiserver_insecure_port: 8080
|
kube_apiserver_insecure_port: 8080
|
||||||
|
|
||||||
|
# dynamic kubelet configuration
|
||||||
|
dynamic_kubelet_configuration: false
|
||||||
|
|
||||||
|
# define kubelet config dir for dynamic kubelet
|
||||||
|
#kubelet_config_dir:
|
||||||
|
default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
|
||||||
|
dynamic_kubelet_configuration_dir: "{{ kubelet_config_dir | default(default_kubelet_config_dir) }}"
|
||||||
|
|
||||||
# Aggregator
|
# Aggregator
|
||||||
kube_api_aggregator_routing: false
|
kube_api_aggregator_routing: false
|
||||||
|
|
||||||
|
# Profiling
|
||||||
|
kube_profiling: false
|
||||||
|
|
||||||
# Container for runtime
|
# Container for runtime
|
||||||
container_manager: docker
|
container_manager: docker
|
||||||
|
|
||||||
|
@ -303,6 +314,11 @@ weave_mode_seed: false
|
||||||
weave_seed: uninitialized
|
weave_seed: uninitialized
|
||||||
weave_peers: uninitialized
|
weave_peers: uninitialized
|
||||||
|
|
||||||
|
# Contiv L3 BGP Mode
|
||||||
|
contiv_peer_with_uplink_leaf: false
|
||||||
|
contiv_global_as: "65002"
|
||||||
|
contiv_global_neighbor_as: "500"
|
||||||
|
|
||||||
## Set no_proxy to all assigned cluster IPs and hostnames
|
## Set no_proxy to all assigned cluster IPs and hostnames
|
||||||
no_proxy: >-
|
no_proxy: >-
|
||||||
{%- if http_proxy is defined or https_proxy is defined %}
|
{%- if http_proxy is defined or https_proxy is defined %}
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
run_once: yes
|
run_once: yes
|
||||||
|
|
||||||
- name: "Get current version of calico cluster version"
|
- name: "Get current version of calico cluster version"
|
||||||
shell: "{{ bin_dir }}/calicoctl version | grep 'Cluster Version' | awk '{ print $3}'"
|
shell: "{{ bin_dir }}/calicoctl version | grep 'Cluster Version:' | awk '{ print $3}'"
|
||||||
register: calico_version_on_server
|
register: calico_version_on_server
|
||||||
run_once: yes
|
run_once: yes
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
|
@ -22,6 +22,7 @@
|
||||||
that:
|
that:
|
||||||
- calico_version_on_server.stdout|version_compare('v2.6.5', '>=')
|
- calico_version_on_server.stdout|version_compare('v2.6.5', '>=')
|
||||||
msg: "Your version of calico is not fresh enough for upgrade"
|
msg: "Your version of calico is not fresh enough for upgrade"
|
||||||
|
when: calico_upgrade_enabled
|
||||||
|
|
||||||
- name: "Set upgrade flag when version needs to be updated"
|
- name: "Set upgrade flag when version needs to be updated"
|
||||||
set_fact:
|
set_fact:
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
- name: Calico | Write Calico cni config
|
- name: Calico | Write Calico cni config
|
||||||
template:
|
template:
|
||||||
src: "cni-calico.conflist.j2"
|
src: "cni-calico.conflist.j2"
|
||||||
|
@ -103,6 +102,24 @@
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
when:
|
when:
|
||||||
- 'calico_conf.stdout == "0"'
|
- 'calico_conf.stdout == "0"'
|
||||||
|
- calico_version | version_compare("v3.0.0", ">=")
|
||||||
|
|
||||||
|
- name: Calico | Configure calico network pool (legacy)
|
||||||
|
shell: >
|
||||||
|
echo '
|
||||||
|
{ "kind": "ipPool",
|
||||||
|
"spec": {"disabled": false, "ipip": {"enabled": {{ ipip }}, "mode": "{{ ipip_mode|lower }}"},
|
||||||
|
"nat-outgoing": {{ nat_outgoing|default(false) and not peer_with_router|default(false) }}},
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"metadata": {"cidr": "{{ kube_pods_subnet }}"}
|
||||||
|
}' | {{ bin_dir }}/calicoctl apply -f -
|
||||||
|
environment:
|
||||||
|
NO_DEFAULT_POOLS: true
|
||||||
|
run_once: true
|
||||||
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
|
when:
|
||||||
|
- 'calico_conf.stdout == "0"'
|
||||||
|
- calico_version | version_compare("v3.0.0", "<")
|
||||||
|
|
||||||
- name: "Determine nodeToNodeMesh needed state"
|
- name: "Determine nodeToNodeMesh needed state"
|
||||||
set_fact:
|
set_fact:
|
||||||
|
@ -112,7 +129,6 @@
|
||||||
- inventory_hostname in groups['k8s-cluster']
|
- inventory_hostname in groups['k8s-cluster']
|
||||||
run_once: yes
|
run_once: yes
|
||||||
|
|
||||||
|
|
||||||
- name: Calico | Set global as_num
|
- name: Calico | Set global as_num
|
||||||
shell: >
|
shell: >
|
||||||
echo '
|
echo '
|
||||||
|
@ -127,19 +143,33 @@
|
||||||
"asNumber": {{ global_as_num }} }} ' | {{ bin_dir }}/calicoctl --skip-exists create -f -
|
"asNumber": {{ global_as_num }} }} ' | {{ bin_dir }}/calicoctl --skip-exists create -f -
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
|
when:
|
||||||
|
- calico_version | version_compare('v3.0.0', '>=')
|
||||||
|
|
||||||
|
- name: Calico | Set global as_num (legacy)
|
||||||
|
command: "{{ bin_dir}}/calicoctl config set asNumber {{ global_as_num }}"
|
||||||
|
run_once: true
|
||||||
|
when:
|
||||||
|
- calico_version | version_compare('v3.0.0', '<')
|
||||||
|
|
||||||
|
- name: Calico | Disable node mesh (legacy)
|
||||||
|
command: "{{ bin_dir }}/calicoctl config set nodeToNodeMesh off"
|
||||||
|
run_once: yes
|
||||||
|
when:
|
||||||
|
- calico_version | version_compare('v3.0.0', '<')
|
||||||
|
- nodeToMeshEnabled|default(True)
|
||||||
|
|
||||||
- name: Calico | Configure peering with router(s)
|
- name: Calico | Configure peering with router(s)
|
||||||
shell: >
|
shell: >
|
||||||
echo '{
|
echo '{
|
||||||
"apiVersion": "projectcalico.org/v3",
|
"apiVersion": "projectcalico.org/v3",
|
||||||
"kind": "bgpPeer",
|
"kind": "BGPPeer",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"name": "{{ inventory_hostname }}-bgp"
|
"name": "{{ inventory_hostname }}-{{ item.router_id }}"
|
||||||
},
|
},
|
||||||
"spec": {
|
"spec": {
|
||||||
"asNumber": "{{ item.as }}",
|
"asNumber": "{{ item.as }}",
|
||||||
"node": "{{ inventory_hostname }}",
|
"node": "{{ inventory_hostname }}",
|
||||||
"scope": "node",
|
|
||||||
"peerIP": "{{ item.router_id }}"
|
"peerIP": "{{ item.router_id }}"
|
||||||
}}' | {{ bin_dir }}/calicoctl create --skip-exists -f -
|
}}' | {{ bin_dir }}/calicoctl create --skip-exists -f -
|
||||||
retries: 4
|
retries: 4
|
||||||
|
@ -147,7 +177,7 @@
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ peers|default([]) }}"
|
- "{{ peers|default([]) }}"
|
||||||
when:
|
when:
|
||||||
- calico_version_on_server.stdout|version_compare('v3.0.0', '>') or calico_upgrade_enabled
|
- calico_version | version_compare('v3.0.0', '>=')
|
||||||
- peer_with_router|default(false)
|
- peer_with_router|default(false)
|
||||||
- inventory_hostname in groups['k8s-cluster']
|
- inventory_hostname in groups['k8s-cluster']
|
||||||
|
|
||||||
|
@ -164,8 +194,7 @@
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
with_items: "{{ peers|default([]) }}"
|
with_items: "{{ peers|default([]) }}"
|
||||||
when:
|
when:
|
||||||
- calico_version_on_server.stdout|version_compare('v3.0.0', '<')
|
- calico_version | version_compare('v3.0.0', '<')
|
||||||
- not calico_upgrade_enabled
|
|
||||||
- peer_with_router|default(false)
|
- peer_with_router|default(false)
|
||||||
- inventory_hostname in groups['k8s-cluster']
|
- inventory_hostname in groups['k8s-cluster']
|
||||||
|
|
||||||
|
@ -173,13 +202,12 @@
|
||||||
shell: >
|
shell: >
|
||||||
echo '{
|
echo '{
|
||||||
"apiVersion": "projectcalico.org/v3",
|
"apiVersion": "projectcalico.org/v3",
|
||||||
"kind": "bgpPeer",
|
"kind": "BGPPeer",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"name": "{{ inventory_hostname }}"
|
"name": "{{ inventory_hostname }}-{{ hostvars[item]["calico_rr_ip"]|default(hostvars[item]["ip"])|default(hostvars[item]["ansible_default_ipv4"]["address"]) }}"
|
||||||
},
|
},
|
||||||
"spec": {
|
"spec": {
|
||||||
"asNumber": "{{ local_as | default(global_as_num)}}",
|
"asNumber": "{{ local_as | default(global_as_num)}}",
|
||||||
"scope": "node",
|
|
||||||
"node": "{{ inventory_hostname }}",
|
"node": "{{ inventory_hostname }}",
|
||||||
"peerIP": "{{ hostvars[item]["calico_rr_ip"]|default(hostvars[item]["ip"])|default(hostvars[item]["ansible_default_ipv4"]["address"]) }}"
|
"peerIP": "{{ hostvars[item]["calico_rr_ip"]|default(hostvars[item]["ip"])|default(hostvars[item]["ansible_default_ipv4"]["address"]) }}"
|
||||||
}}' | {{ bin_dir }}/calicoctl create --skip-exists -f -
|
}}' | {{ bin_dir }}/calicoctl create --skip-exists -f -
|
||||||
|
@ -188,7 +216,7 @@
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ groups['calico-rr'] | default([]) }}"
|
- "{{ groups['calico-rr'] | default([]) }}"
|
||||||
when:
|
when:
|
||||||
- calico_version_on_server.stdout|version_compare('v3.0.0', '>') or calico_upgrade_enabled
|
- calico_version | version_compare('v3.0.0', '>=')
|
||||||
- peer_with_calico_rr|default(false)
|
- peer_with_calico_rr|default(false)
|
||||||
- inventory_hostname in groups['k8s-cluster']
|
- inventory_hostname in groups['k8s-cluster']
|
||||||
- hostvars[item]['cluster_id'] == cluster_id
|
- hostvars[item]['cluster_id'] == cluster_id
|
||||||
|
@ -208,7 +236,7 @@
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
with_items: "{{ groups['calico-rr'] | default([]) }}"
|
with_items: "{{ groups['calico-rr'] | default([]) }}"
|
||||||
when:
|
when:
|
||||||
- calico_version_on_server.stdout|version_compare('v3.0.0', '<')
|
- calico_version | version_compare('v3.0.0', '<')
|
||||||
- not calico_upgrade_enabled
|
- not calico_upgrade_enabled
|
||||||
- peer_with_calico_rr|default(false)
|
- peer_with_calico_rr|default(false)
|
||||||
- hostvars[item]['cluster_id'] == cluster_id
|
- hostvars[item]['cluster_id'] == cluster_id
|
||||||
|
|
|
@ -159,9 +159,15 @@ spec:
|
||||||
mountPath: /host/opt/cni/bin
|
mountPath: /host/opt/cni/bin
|
||||||
- name: etc-cni-netd
|
- name: etc-cni-netd
|
||||||
mountPath: /host/etc/cni/net.d
|
mountPath: /host/etc/cni/net.d
|
||||||
|
{% if container_manager == 'crio' %}
|
||||||
|
- name: crio-socket
|
||||||
|
mountPath: /var/run/crio.sock
|
||||||
|
readOnly: true
|
||||||
|
{% else %}
|
||||||
- name: docker-socket
|
- name: docker-socket
|
||||||
mountPath: /var/run/docker.sock
|
mountPath: /var/run/docker.sock
|
||||||
readOnly: true
|
readOnly: true
|
||||||
|
{% endif %}
|
||||||
- name: etcd-config-path
|
- name: etcd-config-path
|
||||||
mountPath: /var/lib/etcd-config
|
mountPath: /var/lib/etcd-config
|
||||||
readOnly: true
|
readOnly: true
|
||||||
|
@ -183,10 +189,17 @@ spec:
|
||||||
- name: bpf-maps
|
- name: bpf-maps
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /sys/fs/bpf
|
path: /sys/fs/bpf
|
||||||
|
{% if container_manager == 'crio' %}
|
||||||
|
# To read crio events from the node
|
||||||
|
- name: crio-socket
|
||||||
|
hostPath:
|
||||||
|
path: /var/run/crio/crio.sock
|
||||||
|
{% else %}
|
||||||
# To read docker events from the node
|
# To read docker events from the node
|
||||||
- name: docker-socket
|
- name: docker-socket
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /var/run/docker.sock
|
path: /var/run/docker.sock
|
||||||
|
{% endif %}
|
||||||
# To install cilium cni plugin in the host
|
# To install cilium cni plugin in the host
|
||||||
- name: cni-path
|
- name: cni-path
|
||||||
hostPath:
|
hostPath:
|
||||||
|
|
|
@ -6,8 +6,10 @@ contiv_etcd_data_dir: "/var/lib/etcd/contiv-data"
|
||||||
contiv_netmaster_port: 9999
|
contiv_netmaster_port: 9999
|
||||||
contiv_cni_version: 0.1.0
|
contiv_cni_version: 0.1.0
|
||||||
|
|
||||||
|
# No need to download it by default, but must be defined
|
||||||
contiv_etcd_image_repo: "{{ etcd_image_repo }}"
|
contiv_etcd_image_repo: "{{ etcd_image_repo }}"
|
||||||
contiv_etcd_image_tag: "{{ etcd_image_tag }}"
|
contiv_etcd_image_tag: "{{ etcd_image_tag }}"
|
||||||
|
|
||||||
contiv_etcd_listen_port: 6666
|
contiv_etcd_listen_port: 6666
|
||||||
contiv_etcd_peer_port: 6667
|
contiv_etcd_peer_port: 6667
|
||||||
contiv_etcd_endpoints: |-
|
contiv_etcd_endpoints: |-
|
||||||
|
@ -26,9 +28,21 @@ contiv_fwd_mode: routing
|
||||||
# Fabric mode: aci, aci-opflex or default
|
# Fabric mode: aci, aci-opflex or default
|
||||||
contiv_fabric_mode: default
|
contiv_fabric_mode: default
|
||||||
|
|
||||||
|
# Defaut netmode: vxlan or vlan
|
||||||
|
contiv_net_mode: vxlan
|
||||||
|
|
||||||
# Dataplane interface
|
# Dataplane interface
|
||||||
contiv_vlan_interface: ""
|
contiv_vlan_interface: ""
|
||||||
|
|
||||||
|
# Default loglevels are INFO
|
||||||
|
contiv_netmaster_loglevel: "WARN"
|
||||||
|
contiv_netplugin_loglevel: "WARN"
|
||||||
|
contiv_ovsdb_server_loglevel: "warn"
|
||||||
|
contiv_ovs_vswitchd_loglevel: "warn"
|
||||||
|
|
||||||
|
# VxLAN port
|
||||||
|
contiv_vxlan_port: 4789
|
||||||
|
|
||||||
# Default network configuration
|
# Default network configuration
|
||||||
contiv_networks:
|
contiv_networks:
|
||||||
- name: contivh1
|
- name: contivh1
|
||||||
|
|
|
@ -0,0 +1,10 @@
|
||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
echo "Starting cleanup"
|
||||||
|
ovs-vsctl list-br | grep contiv | xargs -I % ovs-vsctl del-br %
|
||||||
|
for p in $(ifconfig | grep vport | awk '{print $1}');
|
||||||
|
do
|
||||||
|
ip link delete $p type veth
|
||||||
|
done
|
||||||
|
touch /tmp/cleanup.done
|
||||||
|
sleep 60
|
|
@ -16,8 +16,25 @@
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ contiv_etcd_conf_dir }}"
|
- "{{ contiv_etcd_conf_dir }}"
|
||||||
- "{{ contiv_etcd_data_dir }}"
|
- "{{ contiv_etcd_data_dir }}"
|
||||||
|
when: inventory_hostname in groups['kube-master']
|
||||||
|
|
||||||
- set_fact:
|
- name: Contiv | Workaround https://github.com/contiv/netplugin/issues/1152
|
||||||
|
set_fact:
|
||||||
|
kube_apiserver_endpoint_for_contiv: |-
|
||||||
|
{% if not is_kube_master and loadbalancer_apiserver_localhost -%}
|
||||||
|
https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }}
|
||||||
|
{%- elif loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%}
|
||||||
|
https://{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}
|
||||||
|
{%- if loadbalancer_apiserver.port|string != "443" -%}
|
||||||
|
:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }}
|
||||||
|
{%- endif -%}
|
||||||
|
{%- else -%}
|
||||||
|
https://{{ first_kube_master }}:{{ kube_apiserver_port }}
|
||||||
|
{%- endif %}
|
||||||
|
when: inventory_hostname in groups['kube-master']
|
||||||
|
|
||||||
|
- name: Contiv | Set necessary facts
|
||||||
|
set_fact:
|
||||||
contiv_config_dir: "{{ contiv_config_dir }}"
|
contiv_config_dir: "{{ contiv_config_dir }}"
|
||||||
contiv_enable_api_proxy: "{{ contiv_enable_api_proxy }}"
|
contiv_enable_api_proxy: "{{ contiv_enable_api_proxy }}"
|
||||||
contiv_fabric_mode: "{{ contiv_fabric_mode }}"
|
contiv_fabric_mode: "{{ contiv_fabric_mode }}"
|
||||||
|
@ -26,22 +43,26 @@
|
||||||
contiv_networks: "{{ contiv_networks }}"
|
contiv_networks: "{{ contiv_networks }}"
|
||||||
contiv_manifests:
|
contiv_manifests:
|
||||||
- {name: contiv-config, file: contiv-config.yml, type: configmap}
|
- {name: contiv-config, file: contiv-config.yml, type: configmap}
|
||||||
|
- {name: contiv-etcd, file: contiv-etcd.yml, type: daemonset}
|
||||||
|
- {name: contiv-etcd-proxy, file: contiv-etcd-proxy.yml, type: daemonset}
|
||||||
|
- {name: contiv-ovs, file: contiv-ovs.yml, type: daemonset}
|
||||||
- {name: contiv-netmaster, file: contiv-netmaster-clusterrolebinding.yml, type: clusterrolebinding}
|
- {name: contiv-netmaster, file: contiv-netmaster-clusterrolebinding.yml, type: clusterrolebinding}
|
||||||
- {name: contiv-netmaster, file: contiv-netmaster-clusterrole.yml, type: clusterrole}
|
- {name: contiv-netmaster, file: contiv-netmaster-clusterrole.yml, type: clusterrole}
|
||||||
- {name: contiv-netmaster, file: contiv-netmaster-serviceaccount.yml, type: serviceaccount}
|
- {name: contiv-netmaster, file: contiv-netmaster-serviceaccount.yml, type: serviceaccount}
|
||||||
|
- {name: contiv-netmaster, file: contiv-netmaster.yml, type: daemonset}
|
||||||
- {name: contiv-netplugin, file: contiv-netplugin-clusterrolebinding.yml, type: clusterrolebinding}
|
- {name: contiv-netplugin, file: contiv-netplugin-clusterrolebinding.yml, type: clusterrolebinding}
|
||||||
- {name: contiv-netplugin, file: contiv-netplugin-clusterrole.yml, type: clusterrole}
|
- {name: contiv-netplugin, file: contiv-netplugin-clusterrole.yml, type: clusterrole}
|
||||||
- {name: contiv-netplugin, file: contiv-netplugin-serviceaccount.yml, type: serviceaccount}
|
- {name: contiv-netplugin, file: contiv-netplugin-serviceaccount.yml, type: serviceaccount}
|
||||||
- {name: contiv-etcd, file: contiv-etcd.yml, type: daemonset}
|
|
||||||
- {name: contiv-etcd-proxy, file: contiv-etcd-proxy.yml, type: daemonset}
|
|
||||||
- {name: contiv-netplugin, file: contiv-netplugin.yml, type: daemonset}
|
- {name: contiv-netplugin, file: contiv-netplugin.yml, type: daemonset}
|
||||||
- {name: contiv-netmaster, file: contiv-netmaster.yml, type: daemonset}
|
when: inventory_hostname in groups['kube-master']
|
||||||
|
|
||||||
- set_fact:
|
- set_fact:
|
||||||
contiv_manifests: |-
|
contiv_manifests: |-
|
||||||
{% set _ = contiv_manifests.append({"name": "contiv-api-proxy", "file": "contiv-api-proxy.yml", "type": "daemonset"}) %}
|
{% set _ = contiv_manifests.append({"name": "contiv-api-proxy", "file": "contiv-api-proxy.yml", "type": "daemonset"}) %}
|
||||||
{{ contiv_manifests }}
|
{{ contiv_manifests }}
|
||||||
when: contiv_enable_api_proxy
|
when:
|
||||||
|
- contiv_enable_api_proxy
|
||||||
|
- inventory_hostname in groups['kube-master']
|
||||||
|
|
||||||
- name: Contiv | Create /var/contiv
|
- name: Contiv | Create /var/contiv
|
||||||
file:
|
file:
|
||||||
|
@ -55,21 +76,23 @@
|
||||||
mode: 0755
|
mode: 0755
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
|
when: inventory_hostname in groups['kube-master']
|
||||||
|
|
||||||
- name: Contiv | Install all Kubernetes resources
|
- name: Contiv | Install all Kubernetes resources
|
||||||
template:
|
template:
|
||||||
src: "{{ item.file }}.j2"
|
src: "{{ item.file }}.j2"
|
||||||
dest: "{{ contiv_config_dir }}/{{ item.file }}"
|
dest: "{{ contiv_config_dir }}/{{ item.file }}"
|
||||||
with_items: "{{ contiv_manifests }}"
|
with_items: "{{ contiv_manifests }}"
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
|
||||||
run_once: true
|
|
||||||
register: contiv_manifests_results
|
register: contiv_manifests_results
|
||||||
|
when: inventory_hostname in groups['kube-master']
|
||||||
|
|
||||||
- name: Contiv | Generate contiv-api-proxy certificates
|
- name: Contiv | Generate contiv-api-proxy certificates
|
||||||
script: generate-certificate.sh
|
script: generate-certificate.sh
|
||||||
args:
|
args:
|
||||||
creates: /var/contiv/auth_proxy_key.pem
|
creates: /var/contiv/auth_proxy_key.pem
|
||||||
when: "contiv_enable_api_proxy and contiv_generate_certificate"
|
when:
|
||||||
|
- contiv_enable_api_proxy
|
||||||
|
- contiv_generate_certificate
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
|
@ -81,7 +104,9 @@
|
||||||
with_items:
|
with_items:
|
||||||
- auth_proxy_key.pem
|
- auth_proxy_key.pem
|
||||||
- auth_proxy_cert.pem
|
- auth_proxy_cert.pem
|
||||||
when: "contiv_enable_api_proxy and contiv_generate_certificate"
|
when:
|
||||||
|
- contiv_enable_api_proxy
|
||||||
|
- contiv_generate_certificate
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
|
@ -92,9 +117,11 @@
|
||||||
with_items:
|
with_items:
|
||||||
- auth_proxy_key.pem
|
- auth_proxy_key.pem
|
||||||
- auth_proxy_cert.pem
|
- auth_proxy_cert.pem
|
||||||
when: "inventory_hostname != groups['kube-master'][0]
|
when:
|
||||||
and inventory_hostname in groups['kube-master']
|
- inventory_hostname != groups['kube-master'][0]
|
||||||
and contiv_enable_api_proxy and contiv_generate_certificate"
|
- inventory_hostname in groups['kube-master']
|
||||||
|
- contiv_enable_api_proxy
|
||||||
|
- contiv_generate_certificate
|
||||||
|
|
||||||
- name: Contiv | Copy cni plugins from hyperkube
|
- name: Contiv | Copy cni plugins from hyperkube
|
||||||
command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/bash -c '/bin/cp -fa /opt/cni/bin/* /cnibindir/'"
|
command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/bash -c '/bin/cp -fa /opt/cni/bin/* /cnibindir/'"
|
||||||
|
|
|
@ -0,0 +1,66 @@
|
||||||
|
---
|
||||||
|
- name: reset | Check that kubectl is still here
|
||||||
|
stat:
|
||||||
|
path: "{{ bin_dir }}/kubectl"
|
||||||
|
register: contiv_kubectl
|
||||||
|
|
||||||
|
- name: reset | Delete contiv netplugin and netmaster daemonsets
|
||||||
|
kube:
|
||||||
|
name: "{{ item }}"
|
||||||
|
namespace: "kube-system"
|
||||||
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
|
resource: "ds"
|
||||||
|
state: absent
|
||||||
|
with_items:
|
||||||
|
- contiv-netplugin
|
||||||
|
- contiv-netmaster
|
||||||
|
register: contiv_cleanup_deletion
|
||||||
|
tags:
|
||||||
|
- network
|
||||||
|
when:
|
||||||
|
- contiv_kubectl.stat.exists
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
- name: reset | Copy contiv temporary cleanup script
|
||||||
|
copy:
|
||||||
|
src: ../files/contiv-cleanup.sh # Not in role_path so we must trick...
|
||||||
|
dest: /opt/cni/bin/cleanup
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0750
|
||||||
|
when:
|
||||||
|
- contiv_kubectl.stat.exists
|
||||||
|
|
||||||
|
- name: reset | Lay down contiv cleanup template
|
||||||
|
template:
|
||||||
|
src: ../templates/contiv-cleanup.yml.j2 # Not in role_path so we must trick...
|
||||||
|
dest: "{{ kube_config_dir }}/contiv-cleanup.yml" # kube_config_dir is used here as contiv_config_dir is not necessarily set at reset
|
||||||
|
register: contiv_cleanup_manifest
|
||||||
|
when:
|
||||||
|
- contiv_kubectl.stat.exists
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
- name: reset | Start contiv cleanup resources
|
||||||
|
kube:
|
||||||
|
name: "contiv-cleanup"
|
||||||
|
namespace: "kube-system"
|
||||||
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
|
resource: "ds"
|
||||||
|
state: latest
|
||||||
|
filename: "{{ kube_config_dir }}/contiv-cleanup.yml"
|
||||||
|
when:
|
||||||
|
- contiv_kubectl.stat.exists
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
ignore_errors: true
|
||||||
|
|
||||||
|
- name: reset | Wait until contiv cleanup is done
|
||||||
|
command: "{{ bin_dir }}/kubectl -n kube-system get ds contiv-cleanup -o jsonpath='{.status.numberReady}'"
|
||||||
|
register: cleanup_done_all_nodes
|
||||||
|
until: cleanup_done_all_nodes.stdout|int == groups['k8s-cluster']|length
|
||||||
|
retries: 5
|
||||||
|
delay: 5
|
||||||
|
ignore_errors: true
|
||||||
|
changed_when: false
|
||||||
|
when:
|
||||||
|
- contiv_kubectl.stat.exists
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
|
@ -0,0 +1,9 @@
|
||||||
|
---
|
||||||
|
- name: reset | check contiv vxlan_sys network device
|
||||||
|
stat:
|
||||||
|
path: "/sys/class/net/vxlan_sys_{{ contiv_vxlan_port | default('4789') }}"
|
||||||
|
register: contiv_vxlan_sys
|
||||||
|
|
||||||
|
- name: reset | remove the vxlan_sys network device created by contiv
|
||||||
|
command: "ip link del vxlan_sys_{{ contiv_vxlan_port | default('4789') }}"
|
||||||
|
when: contiv_vxlan_sys.stat.exists
|
|
@ -35,16 +35,19 @@ spec:
|
||||||
- --listen-address=0.0.0.0:{{ contiv_api_proxy_port }}
|
- --listen-address=0.0.0.0:{{ contiv_api_proxy_port }}
|
||||||
- --tls-key-file=/var/contiv/auth_proxy_key.pem
|
- --tls-key-file=/var/contiv/auth_proxy_key.pem
|
||||||
- --tls-certificate=/var/contiv/auth_proxy_cert.pem
|
- --tls-certificate=/var/contiv/auth_proxy_cert.pem
|
||||||
|
- --data-store-driver=$(STORE_DRIVER)
|
||||||
- --data-store-address=$(CONTIV_ETCD)
|
- --data-store-address=$(CONTIV_ETCD)
|
||||||
- --netmaster-address=127.0.0.1:{{ contiv_netmaster_port }}
|
- --netmaster-address=127.0.0.1:{{ contiv_netmaster_port }}
|
||||||
env:
|
env:
|
||||||
- name: NO_NETMASTER_STARTUP_CHECK
|
- name: NO_NETMASTER_STARTUP_CHECK
|
||||||
value: "0"
|
value: "0"
|
||||||
|
- name: STORE_DRIVER
|
||||||
|
value: etcd
|
||||||
- name: CONTIV_ETCD
|
- name: CONTIV_ETCD
|
||||||
valueFrom:
|
valueFrom:
|
||||||
configMapKeyRef:
|
configMapKeyRef:
|
||||||
name: contiv-config
|
name: contiv-config
|
||||||
key: cluster_store
|
key: contiv_etcd
|
||||||
securityContext:
|
securityContext:
|
||||||
privileged: false
|
privileged: false
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
|
|
|
@ -0,0 +1,57 @@
|
||||||
|
---
|
||||||
|
kind: DaemonSet
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: contiv-cleanup
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: contiv-cleanup
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
k8s-app: contiv-cleanup
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: contiv-cleanup
|
||||||
|
spec:
|
||||||
|
hostNetwork: true
|
||||||
|
hostPID: true
|
||||||
|
tolerations:
|
||||||
|
- key: node-role.kubernetes.io/master
|
||||||
|
effect: NoSchedule
|
||||||
|
serviceAccountName: contiv-netplugin
|
||||||
|
containers:
|
||||||
|
- name: contiv-ovs-cleanup
|
||||||
|
image: {{ contiv_ovs_image_repo }}:{{ contiv_ovs_image_tag }}
|
||||||
|
command: ["/opt/cni/bin/cleanup"]
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /etc/openvswitch
|
||||||
|
name: etc-openvswitch
|
||||||
|
readOnly: false
|
||||||
|
- mountPath: /var/run
|
||||||
|
name: var-run
|
||||||
|
readOnly: false
|
||||||
|
- mountPath: /opt/cni/bin
|
||||||
|
name: cni-bin-dir
|
||||||
|
readOnly: false
|
||||||
|
readinessProbe:
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- cat
|
||||||
|
- /tmp/cleanup.done
|
||||||
|
initialDelaySeconds: 3
|
||||||
|
periodSeconds: 3
|
||||||
|
successThreshold: 1
|
||||||
|
volumes:
|
||||||
|
- name: etc-openvswitch
|
||||||
|
hostPath:
|
||||||
|
path: /etc/openvswitch
|
||||||
|
- name: var-run
|
||||||
|
hostPath:
|
||||||
|
path: /var/run
|
||||||
|
- name: cni-bin-dir
|
||||||
|
hostPath:
|
||||||
|
path: /opt/cni/bin
|
|
@ -7,20 +7,22 @@ metadata:
|
||||||
name: contiv-config
|
name: contiv-config
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
data:
|
data:
|
||||||
# The location of your cluster store. This is set to the
|
contiv_netmaster_loglevel: {{ contiv_netmaster_loglevel }}
|
||||||
# avdertise-client value below from the contiv-etcd service.
|
contiv_netplugin_loglevel: {{ contiv_netplugin_loglevel }}
|
||||||
# Change it to an external etcd/consul instance if required.
|
contiv_ovsdb_server_extra_flags: "--verbose={{ contiv_ovsdb_server_loglevel }}"
|
||||||
cluster_store: "etcd://127.0.0.1:{{ contiv_etcd_listen_port }}"
|
contiv_ovs_vswitchd_extra_flags: "--verbose={{ contiv_ovs_vswitchd_loglevel }}"
|
||||||
# The CNI network configuration to install on each node.
|
contiv_fwdmode: {{ contiv_fwd_mode }}
|
||||||
cni_config: |-
|
contiv_netmode: {{ contiv_net_mode }}
|
||||||
|
contiv_etcd: "http://127.0.0.1:{{ contiv_etcd_listen_port }}"
|
||||||
|
contiv_cni_config: |-
|
||||||
{
|
{
|
||||||
"cniVersion": "{{ contiv_cni_version }}",
|
"cniVersion": "{{ contiv_cni_version }}",
|
||||||
"name": "contiv-net",
|
"name": "contiv-net",
|
||||||
"type": "contivk8s"
|
"type": "contivk8s"
|
||||||
}
|
}
|
||||||
config: |-
|
contiv_k8s_config: |-
|
||||||
{
|
{
|
||||||
"K8S_API_SERVER": "{{ kube_apiserver_endpoint }}",
|
"K8S_API_SERVER": "{{ kube_apiserver_endpoint_for_contiv }}",
|
||||||
"K8S_CA": "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt",
|
"K8S_CA": "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt",
|
||||||
"K8S_KEY": "",
|
"K8S_KEY": "",
|
||||||
"K8S_CERT": "",
|
"K8S_CERT": "",
|
||||||
|
|
|
@ -19,6 +19,8 @@ spec:
|
||||||
spec:
|
spec:
|
||||||
hostNetwork: true
|
hostNetwork: true
|
||||||
hostPID: true
|
hostPID: true
|
||||||
|
nodeSelector:
|
||||||
|
node-role.kubernetes.io/node: "true"
|
||||||
containers:
|
containers:
|
||||||
- name: contiv-etcd-proxy
|
- name: contiv-etcd-proxy
|
||||||
image: {{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }}
|
image: {{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }}
|
||||||
|
|
|
@ -13,6 +13,7 @@ rules:
|
||||||
- namespaces
|
- namespaces
|
||||||
- networkpolicies
|
- networkpolicies
|
||||||
verbs:
|
verbs:
|
||||||
|
- get
|
||||||
- watch
|
- watch
|
||||||
- list
|
- list
|
||||||
- update
|
- update
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# This manifest deploys the Contiv API Server on Kubernetes.
|
---
|
||||||
kind: DaemonSet
|
kind: DaemonSet
|
||||||
apiVersion: extensions/v1beta1
|
apiVersion: extensions/v1beta1
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -31,20 +31,31 @@ spec:
|
||||||
containers:
|
containers:
|
||||||
- name: contiv-netmaster
|
- name: contiv-netmaster
|
||||||
image: {{ contiv_image_repo }}:{{ contiv_image_tag }}
|
image: {{ contiv_image_repo }}:{{ contiv_image_tag }}
|
||||||
args:
|
|
||||||
- -m
|
|
||||||
- -pkubernetes
|
|
||||||
env:
|
env:
|
||||||
- name: CONTIV_ETCD
|
- name: CONTIV_ROLE
|
||||||
|
value: netmaster
|
||||||
|
- name: CONTIV_NETMASTER_MODE
|
||||||
|
value: kubernetes
|
||||||
|
- name: CONTIV_NETMASTER_ETCD_ENDPOINTS
|
||||||
valueFrom:
|
valueFrom:
|
||||||
configMapKeyRef:
|
configMapKeyRef:
|
||||||
name: contiv-config
|
name: contiv-config
|
||||||
key: cluster_store
|
key: contiv_etcd
|
||||||
- name: CONTIV_CONFIG
|
- name: CONTIV_NETMASTER_FORWARD_MODE
|
||||||
valueFrom:
|
valueFrom:
|
||||||
configMapKeyRef:
|
configMapKeyRef:
|
||||||
name: contiv-config
|
name: contiv-config
|
||||||
key: config
|
key: contiv_fwdmode
|
||||||
|
- name: CONTIV_NETMASTER_NET_MODE
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: contiv-config
|
||||||
|
key: contiv_netmode
|
||||||
|
- name: CONTIV_NETMASTER_LOG_LEVEL
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: contiv-config
|
||||||
|
key: contiv_netmaster_loglevel
|
||||||
securityContext:
|
securityContext:
|
||||||
privileged: true
|
privileged: true
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
# This manifest installs contiv-netplugin container, as well
|
# This manifest installs contiv-netplugin container, as well
|
||||||
# as the Contiv CNI plugins and network config on
|
# as the Contiv CNI plugins and network config on
|
||||||
# each master and worker node in a Kubernetes cluster.
|
# each master and worker node in a Kubernetes cluster.
|
||||||
|
@ -27,73 +28,99 @@ spec:
|
||||||
- key: node-role.kubernetes.io/master
|
- key: node-role.kubernetes.io/master
|
||||||
effect: NoSchedule
|
effect: NoSchedule
|
||||||
serviceAccountName: contiv-netplugin
|
serviceAccountName: contiv-netplugin
|
||||||
|
initContainers:
|
||||||
|
- name: contiv-netplugin-init
|
||||||
|
image: {{ contiv_init_image_repo }}:{{ contiv_init_image_tag }}
|
||||||
|
env:
|
||||||
|
- name: CONTIV_ROLE
|
||||||
|
value: netplugin
|
||||||
|
- name: CONTIV_MODE
|
||||||
|
value: kubernetes
|
||||||
|
- name: CONTIV_K8S_CONFIG
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: contiv-config
|
||||||
|
key: contiv_k8s_config
|
||||||
|
- name: CONTIV_CNI_CONFIG
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: contiv-config
|
||||||
|
key: contiv_cni_config
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /var/contiv
|
||||||
|
name: var-contiv
|
||||||
|
readOnly: false
|
||||||
|
- mountPath: /etc/cni/net.d/
|
||||||
|
name: etc-cni-dir
|
||||||
|
readOnly: false
|
||||||
|
- name: contiv-cni
|
||||||
|
image: {{ contiv_image_repo }}:{{ contiv_version }}
|
||||||
|
command: ["cp", "/contiv/bin/contivk8s", "/opt/cni/bin/contivk8s"]
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /opt/cni/bin
|
||||||
|
name: cni-bin-dir
|
||||||
|
readOnly: false
|
||||||
containers:
|
containers:
|
||||||
# Runs netplugin container on each Kubernetes node. This
|
# Runs netplugin container on each Kubernetes node. This
|
||||||
# container programs network policy and routes on each
|
# container programs network policy and routes on each
|
||||||
# host.
|
# host.
|
||||||
- name: contiv-netplugin
|
- name: contiv-netplugin
|
||||||
image: {{ contiv_image_repo }}:{{ contiv_image_tag }}
|
image: {{ contiv_image_repo }}:{{ contiv_image_tag }}
|
||||||
args:
|
|
||||||
- -pkubernetes
|
|
||||||
- -x
|
|
||||||
env:
|
env:
|
||||||
- name: VLAN_IF
|
- name: VLAN_IF
|
||||||
value: {{ contiv_vlan_interface }}
|
value: {{ contiv_vlan_interface }}
|
||||||
- name: VTEP_IP
|
- name: CONTIV_NETPLUGIN_VLAN_UPLINKS
|
||||||
|
value: {{ contiv_vlan_interface }}
|
||||||
|
- name: CONTIV_NETPLUGIN_VXLAN_PORT
|
||||||
|
value: "{{ contiv_vxlan_port }}"
|
||||||
|
- name: CONTIV_ROLE
|
||||||
|
value: netplugin
|
||||||
|
- name: CONTIV_NETPLUGIN_MODE
|
||||||
|
value: kubernetes
|
||||||
|
- name: CONTIV_NETPLUGIN_VTEP_IP
|
||||||
valueFrom:
|
valueFrom:
|
||||||
fieldRef:
|
fieldRef:
|
||||||
fieldPath: status.podIP
|
fieldPath: status.podIP
|
||||||
- name: CONTIV_ETCD
|
- name: CONTIV_NETPLUGIN_ETCD_ENDPOINTS
|
||||||
valueFrom:
|
valueFrom:
|
||||||
configMapKeyRef:
|
configMapKeyRef:
|
||||||
name: contiv-config
|
name: contiv-config
|
||||||
key: cluster_store
|
key: contiv_etcd
|
||||||
- name: CONTIV_CNI_CONFIG
|
- name: CONTIV_NETPLUGIN_FORWARD_MODE
|
||||||
valueFrom:
|
valueFrom:
|
||||||
configMapKeyRef:
|
configMapKeyRef:
|
||||||
name: contiv-config
|
name: contiv-config
|
||||||
key: cni_config
|
key: contiv_fwdmode
|
||||||
- name: CONTIV_CONFIG
|
- name: CONTIV_NETPLUGIN_NET_MODE
|
||||||
valueFrom:
|
valueFrom:
|
||||||
configMapKeyRef:
|
configMapKeyRef:
|
||||||
name: contiv-config
|
name: contiv-config
|
||||||
key: config
|
key: contiv_netmode
|
||||||
|
- name: CONTIV_NETPLUGIN_LOG_LEVEL
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: contiv-config
|
||||||
|
key: contiv_netplugin_loglevel
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 250m
|
||||||
securityContext:
|
securityContext:
|
||||||
privileged: true
|
privileged: true
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- mountPath: /etc/openvswitch
|
|
||||||
name: etc-openvswitch
|
|
||||||
readOnly: false
|
|
||||||
- mountPath: /lib/modules
|
|
||||||
name: lib-modules
|
|
||||||
readOnly: false
|
|
||||||
- mountPath: /var/run
|
- mountPath: /var/run
|
||||||
name: var-run
|
name: var-run
|
||||||
readOnly: false
|
readOnly: false
|
||||||
- mountPath: /var/contiv
|
- mountPath: /var/contiv
|
||||||
name: var-contiv
|
name: var-contiv
|
||||||
readOnly: false
|
readOnly: false
|
||||||
- mountPath: /opt/cni/bin
|
|
||||||
name: cni-bin-dir
|
|
||||||
readOnly: false
|
|
||||||
- mountPath: /etc/cni/net.d/
|
|
||||||
name: etc-cni-dir
|
|
||||||
readOnly: false
|
|
||||||
volumes:
|
volumes:
|
||||||
# Used by contiv-netplugin
|
# Used by contiv-netplugin
|
||||||
- name: etc-openvswitch
|
|
||||||
hostPath:
|
|
||||||
path: /etc/openvswitch
|
|
||||||
- name: lib-modules
|
|
||||||
hostPath:
|
|
||||||
path: /lib/modules
|
|
||||||
- name: var-run
|
- name: var-run
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /var/run
|
path: /var/run
|
||||||
- name: var-contiv
|
- name: var-contiv
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /var/contiv
|
path: /var/contiv
|
||||||
# Used to install CNI.
|
|
||||||
- name: cni-bin-dir
|
- name: cni-bin-dir
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /opt/cni/bin
|
path: /opt/cni/bin
|
||||||
|
|
|
@ -0,0 +1,80 @@
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
# This manifest deploys the contiv-ovs pod.
|
||||||
|
kind: DaemonSet
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: contiv-ovs
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: contiv-ovs
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
k8s-app: contiv-ovs
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: contiv-ovs
|
||||||
|
annotations:
|
||||||
|
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||||
|
spec:
|
||||||
|
hostNetwork: true
|
||||||
|
hostPID: true
|
||||||
|
tolerations:
|
||||||
|
- key: node-role.kubernetes.io/master
|
||||||
|
effect: NoSchedule
|
||||||
|
containers:
|
||||||
|
# Runs ovs containers on each Kubernetes node.
|
||||||
|
- name: contiv-ovsdb-server
|
||||||
|
image: {{ contiv_ovs_image_repo }}:{{ contiv_ovs_image_tag }}
|
||||||
|
command: ["/scripts/start-ovsdb-server.sh"]
|
||||||
|
securityContext:
|
||||||
|
privileged: false
|
||||||
|
# Won't work until https://github.com/contiv/ovs-docker/pull/4 is merged and image is built again
|
||||||
|
env:
|
||||||
|
- name: OVSDBSERVER_EXTRA_FLAGS
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: contiv-config
|
||||||
|
key: contiv_ovsdb_server_extra_flags
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /etc/openvswitch
|
||||||
|
name: etc-openvswitch
|
||||||
|
readOnly: false
|
||||||
|
- mountPath: /var/run
|
||||||
|
name: var-run
|
||||||
|
readOnly: false
|
||||||
|
- name: contiv-ovs-vswitchd
|
||||||
|
image: {{ contiv_ovs_image_repo }}:{{ contiv_ovs_image_tag }}
|
||||||
|
command: ["/scripts/start-ovs-vswitchd.sh"]
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
# Won't work until https://github.com/contiv/ovs-docker/pull/4 is merged and image is built again
|
||||||
|
env:
|
||||||
|
- name: OVSVSWITCHD_EXTRA_FLAGS
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: contiv-config
|
||||||
|
key: contiv_ovs_vswitchd_extra_flags
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /etc/openvswitch
|
||||||
|
name: etc-openvswitch
|
||||||
|
readOnly: false
|
||||||
|
- mountPath: /lib/modules
|
||||||
|
name: lib-modules
|
||||||
|
readOnly: true
|
||||||
|
- mountPath: /var/run
|
||||||
|
name: var-run
|
||||||
|
readOnly: false
|
||||||
|
volumes:
|
||||||
|
# Used by contiv-ovs
|
||||||
|
- name: etc-openvswitch
|
||||||
|
hostPath:
|
||||||
|
path: /etc/openvswitch
|
||||||
|
- name: lib-modules
|
||||||
|
hostPath:
|
||||||
|
path: /lib/modules
|
||||||
|
- name: var-run
|
||||||
|
hostPath:
|
||||||
|
path: /var/run
|
|
@ -42,13 +42,13 @@ items:
|
||||||
- patch
|
- patch
|
||||||
- update
|
- update
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- policy
|
- policy
|
||||||
resourceNames:
|
resourceNames:
|
||||||
- privileged
|
- privileged
|
||||||
resources:
|
resources:
|
||||||
- podsecuritypolicies
|
- podsecuritypolicies
|
||||||
verbs:
|
verbs:
|
||||||
- use
|
- use
|
||||||
- apiVersion: rbac.authorization.k8s.io/v1beta1
|
- apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
metadata:
|
metadata:
|
||||||
|
|
|
@ -1,5 +1,12 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
|
- name: reset | include file with pre-reset tasks specific to the network_plugin if exists
|
||||||
|
include_tasks: "{{ (role_path + '/../network_plugin/' + kube_network_plugin + '/tasks/pre-reset.yml') | realpath }}"
|
||||||
|
when:
|
||||||
|
- kube_network_plugin in ['contiv']
|
||||||
|
tags:
|
||||||
|
- network
|
||||||
|
|
||||||
- name: reset | stop services
|
- name: reset | stop services
|
||||||
service:
|
service:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
|
@ -150,6 +157,11 @@
|
||||||
- "{{ bin_dir }}/weave"
|
- "{{ bin_dir }}/weave"
|
||||||
- /var/lib/rkt
|
- /var/lib/rkt
|
||||||
- /etc/vault
|
- /etc/vault
|
||||||
|
- /etc/contiv
|
||||||
|
- /var/contiv
|
||||||
|
- /run/contiv
|
||||||
|
- /etc/openvswitch
|
||||||
|
- /run/openvswitch
|
||||||
ignore_errors: yes
|
ignore_errors: yes
|
||||||
tags:
|
tags:
|
||||||
- files
|
- files
|
||||||
|
@ -181,7 +193,7 @@
|
||||||
- name: reset | include file with reset tasks specific to the network_plugin if exists
|
- name: reset | include file with reset tasks specific to the network_plugin if exists
|
||||||
include_tasks: "{{ (role_path + '/../network_plugin/' + kube_network_plugin + '/tasks/reset.yml') | realpath }}"
|
include_tasks: "{{ (role_path + '/../network_plugin/' + kube_network_plugin + '/tasks/reset.yml') | realpath }}"
|
||||||
when:
|
when:
|
||||||
- kube_network_plugin in ['flannel', 'cilium']
|
- kube_network_plugin in ['flannel', 'cilium', 'contiv']
|
||||||
tags:
|
tags:
|
||||||
- network
|
- network
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
drain_grace_period: 300
|
drain_grace_period: 300
|
||||||
drain_timeout: 360s
|
drain_timeout: 360s
|
||||||
drain_label_selector: ""
|
drain_pod_selector: ""
|
||||||
drain_nodes: true
|
drain_nodes: true
|
||||||
|
|
|
@ -32,6 +32,24 @@
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
when: needs_cordoning
|
when: needs_cordoning
|
||||||
|
|
||||||
|
- name: Check kubectl version
|
||||||
|
command: kubectl version --client --short
|
||||||
|
register: kubectl_version
|
||||||
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
|
run_once: yes
|
||||||
|
when:
|
||||||
|
- drain_nodes
|
||||||
|
- needs_cordoning
|
||||||
|
- 'drain_pod_selector != ""'
|
||||||
|
|
||||||
|
- name: Ensure minimum version for drain label selector if necessary
|
||||||
|
assert:
|
||||||
|
that: "kubectl_version.stdout.split(' ')[-1] | version_compare('v1.10.0', '>=')"
|
||||||
|
when:
|
||||||
|
- drain_nodes
|
||||||
|
- needs_cordoning
|
||||||
|
- 'drain_pod_selector != ""'
|
||||||
|
|
||||||
- name: Drain node
|
- name: Drain node
|
||||||
command: >-
|
command: >-
|
||||||
{{ bin_dir }}/kubectl drain
|
{{ bin_dir }}/kubectl drain
|
||||||
|
@ -40,7 +58,7 @@
|
||||||
--grace-period {{ drain_grace_period }}
|
--grace-period {{ drain_grace_period }}
|
||||||
--timeout {{ drain_timeout }}
|
--timeout {{ drain_timeout }}
|
||||||
--delete-local-data {{ inventory_hostname }}
|
--delete-local-data {{ inventory_hostname }}
|
||||||
{% if drain_label_selector != "" %}--selector '{{ drain_label_selector }}'{% endif %}
|
{% if drain_pod_selector != "" %}--pod-selector '{{ drain_pod_selector }}'{% endif %}
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
when:
|
when:
|
||||||
- drain_nodes
|
- drain_nodes
|
||||||
|
|
|
@ -16,11 +16,11 @@
|
||||||
|
|
||||||
# Due to https://github.com/kubernetes/kubernetes/issues/58212 we cannot rely on exit code for "kubectl patch"
|
# Due to https://github.com/kubernetes/kubernetes/issues/58212 we cannot rely on exit code for "kubectl patch"
|
||||||
- name: Check current nodeselector for kube-proxy daemonset
|
- name: Check current nodeselector for kube-proxy daemonset
|
||||||
shell: kubectl get ds kube-proxy --namespace=kube-system -o jsonpath='{.spec.template.spec.nodeSelector.beta\.kubernetes\.io/os}'
|
shell: "{{bin_dir}}/kubectl get ds kube-proxy --namespace=kube-system -o jsonpath='{.spec.template.spec.nodeSelector.beta.kubernetes.io/os}'"
|
||||||
register: current_kube_proxy_state
|
register: current_kube_proxy_state
|
||||||
|
|
||||||
- name: Apply nodeselector patch for kube-proxy daemonset
|
- name: Apply nodeselector patch for kube-proxy daemonset
|
||||||
shell: kubectl patch ds kube-proxy --namespace=kube-system --type=strategic -p "$(cat nodeselector-os-linux-patch.json)"
|
shell: "{{bin_dir}}/kubectl patch ds kube-proxy --namespace=kube-system --type=strategic -p \"$(cat nodeselector-os-linux-patch.json)\""
|
||||||
args:
|
args:
|
||||||
chdir: "{{ kubernetes_user_manifests_path }}"
|
chdir: "{{ kubernetes_user_manifests_path }}"
|
||||||
register: patch_kube_proxy_state
|
register: patch_kube_proxy_state
|
||||||
|
|
|
@ -10,5 +10,6 @@ kube_network_plugin: weave
|
||||||
kubeadm_enabled: true
|
kubeadm_enabled: true
|
||||||
deploy_netchecker: true
|
deploy_netchecker: true
|
||||||
kubernetes_audit: true
|
kubernetes_audit: true
|
||||||
|
dynamic_kubelet_configuration: true
|
||||||
kubedns_min_replicas: 1
|
kubedns_min_replicas: 1
|
||||||
cloud_provider: gce
|
cloud_provider: gce
|
||||||
|
|
|
@ -100,9 +100,8 @@
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
|
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
|
||||||
- { role: kubernetes/node, tags: node }
|
- { role: kubernetes/node, tags: node }
|
||||||
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
|
||||||
- { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" }
|
- { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" }
|
||||||
- { role: kubespray-defaults}
|
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
||||||
environment: "{{proxy_env}}"
|
environment: "{{proxy_env}}"
|
||||||
|
|
||||||
- hosts: kube-master[0]
|
- hosts: kube-master[0]
|
||||||
|
|
Loading…
Reference in New Issue