remove copying container images, using local registry instead

pull/1166/head
jin.gjm 2022-06-10 06:53:28 +08:00
parent c248b34dc8
commit 8209eb36e8
24 changed files with 19 additions and 1303 deletions

1
ezdown
View File

@ -374,7 +374,6 @@ function get_offline_image() {
if [[ ! -f "$imageDir/pause_$pauseVer.tar" ]];then
docker pull "easzlab/pause:$pauseVer" && \
docker save -o "$imageDir/pause_$pauseVer.tar" "easzlab/pause:$pauseVer"
/bin/cp -u "$imageDir/pause_$pauseVer.tar" "$imageDir/pause.tar"
else
docker load -i "$imageDir/pause_$pauseVer.tar"
fi

View File

@ -14,23 +14,3 @@
- { role: flannel, when: "CLUSTER_NETWORK == 'flannel'" }
- { role: kube-router, when: "CLUSTER_NETWORK == 'kube-router'" }
- { role: kube-ovn, when: "CLUSTER_NETWORK == 'kube-ovn'" }
tasks:
- name: 推送cluster-addon的离线镜像包
copy: src={{ item }} dest=/opt/kube/images/
with_fileglob:
- "{{ base_dir }}/down/coredns*.tar"
- "{{ base_dir }}/down/dashboard*.tar"
- "{{ base_dir }}/down/metrics-scraper*.tar"
- "{{ base_dir }}/down/metrics-server*.tar"
- "{{ base_dir }}/down/traefik*.tar"
ignore_errors: true
- name: 导入离线镜像(若执行失败,可忽略)
shell: "for image in $(echo /opt/kube/images/*.tar); do {{ bin_dir }}/docker load -i $image; done;"
ignore_errors: true
when: "CONTAINER_RUNTIME == 'docker'"
- name: 导入离线镜像(若执行失败,可忽略)
shell: "for image in $(echo /opt/kube/images/*.tar); do {{ bin_dir }}/ctr -n=k8s.io images import $image; done;"
ignore_errors: true
when: "CONTAINER_RUNTIME == 'containerd'"

View File

@ -38,72 +38,29 @@
when: '"calico-etcd-secrets" not in secrets_info.stdout'
run_once: true
# 【可选】推送离线docker 镜像,可以忽略执行错误
- block:
- name: 检查是否已下载离线calico镜像
command: "ls {{ base_dir }}/down"
register: download_info
connection: local
run_once: true
- name: 尝试推送离线docker 镜像(若执行失败,可忽略)
copy: src={{ base_dir }}/down/{{ item }} dest=/opt/kube/images/{{ item }}
when: 'item in download_info.stdout'
with_items:
- "pause.tar"
- "{{ calico_offline }}"
ignore_errors: true
- name: 获取calico离线镜像推送情况
command: "ls /opt/kube/images"
register: image_info
# 如果目录下有离线镜像就把它导入到node节点上
- name: 导入 calico的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/docker load -i /opt/kube/images/{{ item }}"
with_items:
- "pause.tar"
- "{{ calico_offline }}"
ignore_errors: true
when: "item in image_info.stdout and CONTAINER_RUNTIME == 'docker'"
- name: 导入 calico的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/ctr -n=k8s.io images import /opt/kube/images/{{ item }}"
with_items:
- "pause.tar"
- "{{ calico_offline }}"
ignore_errors: true
when: "item in image_info.stdout and CONTAINER_RUNTIME == 'containerd'"
- name: 配置 calico DaemonSet yaml文件
template: src=calico-{{ calico_ver_main }}.yaml.j2 dest={{ cluster_dir }}/yml/calico.yaml
run_once: true
connection: local
# 只需单节点执行一次
- name: 运行 calico网络
shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/calico.yaml"
run_once: true
connection: local
# 删除原有cni配置
- name: 删除默认cni配置
file: path=/etc/cni/net.d/10-default.conf state=absent
# [可选]cni calico plugins 已经在calico.yaml完成自动安装
- name: 下载calicoctl 客户端
copy: src={{ base_dir }}/bin/{{ item }} dest={{ bin_dir }}/{{ item }} mode=0755
with_items:
#- calico
#- calico-ipam
#- loopback
- calicoctl
ignore_errors: true
- name: 准备 calicoctl配置文件
template: src=calicoctl.cfg.j2 dest=/etc/calico/calicoctl.cfg
# 等待网络插件部署成功,视下载镜像速度而定
- name: 轮询等待calico-node 运行,视下载镜像速度而定
shell: "{{ bin_dir }}/kubectl get pod -n kube-system -o wide|grep 'calico-node'|grep ' {{ inventory_hostname }} '|awk '{print $3}'"
register: pod_status

View File

@ -209,7 +209,7 @@ spec:
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: calico/cni:v3.15.3
image: easzlab.io.local:5000/calico/cni:v3.15.3
command: ["/install-cni.sh"]
env:
# Name of the CNI config file to create.
@ -248,7 +248,7 @@ spec:
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
# to communicate with Felix over the Policy Sync API.
- name: flexvol-driver
image: calico/pod2daemon-flexvol:v3.15.3
image: easzlab.io.local:5000/calico/pod2daemon-flexvol:v3.15.3
volumeMounts:
- name: flexvol-driver-host
mountPath: /host/driver
@ -259,7 +259,7 @@ spec:
# container programs network policy and routes on each
# host.
- name: calico-node
image: calico/node:v3.15.3
image: easzlab.io.local:5000/calico/node:v3.15.3
env:
# The location of the etcd cluster.
- name: ETCD_ENDPOINTS
@ -485,7 +485,7 @@ spec:
hostNetwork: true
containers:
- name: calico-kube-controllers
image: calico/kube-controllers:v3.15.3
image: easzlab.io.local:5000/calico/kube-controllers:v3.15.3
env:
# The location of the etcd cluster.
- name: ETCD_ENDPOINTS

View File

@ -210,7 +210,7 @@ spec:
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: docker.io/calico/cni:{{ calico_ver }}
image: easzlab.io.local:5000/calico/cni:{{ calico_ver }}
command: ["/opt/cni/bin/install"]
envFrom:
- configMapRef:
@ -254,7 +254,7 @@ spec:
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
# to communicate with Felix over the Policy Sync API.
- name: flexvol-driver
image: docker.io/calico/pod2daemon-flexvol:{{ calico_ver }}
image: easzlab.io.local:5000/calico/pod2daemon-flexvol:{{ calico_ver }}
volumeMounts:
- name: flexvol-driver-host
mountPath: /host/driver
@ -265,7 +265,7 @@ spec:
# container programs network policy and routes on each
# host.
- name: calico-node
image: docker.io/calico/node:{{ calico_ver }}
image: easzlab.io.local:5000/calico/node:{{ calico_ver }}
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
@ -514,7 +514,7 @@ spec:
hostNetwork: true
containers:
- name: calico-kube-controllers
image: docker.io/calico/kube-controllers:{{ calico_ver }}
image: easzlab.io.local:5000/calico/kube-controllers:{{ calico_ver }}
env:
# The location of the etcd cluster.
- name: ETCD_ENDPOINTS

View File

@ -1,473 +0,0 @@
# Calico Version {{ calico_ver }}
# https://docs.projectcalico.org/v3.3/releases#{{ calico_ver }}
# This manifest includes the following component versions:
# calico/node:{{ calico_ver }}
# calico/cni:{{ calico_ver }}
# calico/kube-controllers:{{ calico_ver }}
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# Configure this with the location of your etcd cluster.
etcd_endpoints: "{{ ETCD_ENDPOINTS }}"
# If you're using TLS enabled etcd uncomment the following.
# You must also populate the Secret below with these files.
etcd_ca: "/calico-secrets/etcd-ca"
etcd_cert: "/calico-secrets/etcd-cert"
etcd_key: "/calico-secrets/etcd-key"
# Configure the Calico backend to use.
calico_backend: "{{ CALICO_NETWORKING_BACKEND }}"
# Configure the MTU to use
veth_mtu: "1440"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.0",
"plugins": [
{
"type": "calico",
"log_level": "warning",
"etcd_endpoints": "{{ ETCD_ENDPOINTS }}",
"etcd_key_file": "/etc/calico/ssl/calico-key.pem",
"etcd_cert_file": "/etc/calico/ssl/calico.pem",
"etcd_ca_cert_file": "/etc/kubernetes/ssl/ca.pem",
"mtu": 1500,
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "/root/.kube/config"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
}
]
}
---
# We use cmd-line-way( kubectl create) to create secrets 'calico-etcd-secrets',
# refer to 'roles/calico/tasks/main.yml' for details.
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: calico-node
spec:
priorityClassName: system-cluster-critical
nodeSelector:
beta.kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Make sure calico-node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: calico-node
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: calico/node:{{ calico_ver }}
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
# Set noderef for node controller.
- name: CALICO_K8S_NODE_REF
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,bgp"
# Auto-detect the BGP IP address.
- name: IP
value: "autodetect"
- name: IP_AUTODETECTION_METHOD
value: "{{ IP_AUTODETECTION_METHOD }}"
# Enable IPIP
{% if CALICO_NETWORKING_BACKEND == "brid" %}
- name: CALICO_IPV4POOL_IPIP
value: "{{ CALICO_IPV4POOL_IPIP }}"
{% endif %}
# Enable or Disable VXLAN on the default IP pool.
{% if CALICO_NETWORKING_BACKEND == "vxlan" %}
- name: CALICO_IPV4POOL_VXLAN
value: "Never"
{% endif %}
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within `--cluster-cidr`.
- name: CALICO_IPV4POOL_CIDR
value: "{{ CLUSTER_CIDR }}"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging
- name: FELIX_LOGSEVERITYSCREEN
value: "warning"
- name: FELIX_HEALTHENABLED
value: "true"
# Set Kubernetes NodePorts: If services do use NodePorts outside Calicos expected range,
# Calico will treat traffic to those ports as host traffic instead of pod traffic.
- name: FELIX_KUBENODEPORTRANGES
value: "{{ NODE_PORT_RANGE.split('-')[0] }}:{{ NODE_PORT_RANGE.split('-')[1] }}"
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
livenessProbe:
httpGet:
path: /liveness
port: 9099
host: localhost
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/calico-node
{% if CALICO_NETWORKING_BACKEND == "brid" %}
- -bird-ready
{% endif %}
- -felix-ready
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
- mountPath: /calico-secrets
name: etcd-certs
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: calico/cni:{{ calico_ver }}
command: ["/install-cni.sh"]
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- mountPath: /calico-secrets
name: etcd-certs
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: {{ bin_dir }}
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Mount in the etcd TLS secrets with mode 400.
# See https://kubernetes.io/docs/concepts/configuration/secret/
- name: etcd-certs
secret:
secretName: calico-etcd-secrets
defaultMode: 0400
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system
---
# This manifest deploys the Calico Kubernetes controllers.
# See https://github.com/projectcalico/kube-controllers
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
# The controllers can only have a single active instance.
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
k8s-app: calico-kube-controllers
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
priorityClassName: system-cluster-critical
nodeSelector:
beta.kubernetes.io/os: linux
# The controllers must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
serviceAccountName: calico-kube-controllers
containers:
- name: calico-kube-controllers
image: calico/kube-controllers:{{ calico_ver }}
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: policy,namespace,serviceaccount,workloadendpoint,node
volumeMounts:
# Mount in the etcd TLS secrets.
- mountPath: /calico-secrets
name: etcd-certs
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
volumes:
# Mount in the etcd TLS secrets with mode 400.
# See https://kubernetes.io/docs/concepts/configuration/secret/
- name: etcd-certs
secret:
secretName: calico-etcd-secrets
defaultMode: 0400
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-kube-controllers
rules:
- apiGroups:
- ""
- extensions
resources:
- pods
- namespaces
- nodes
- serviceaccounts
verbs:
- watch
- list
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- watch
- list
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-kube-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-node
rules:
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups: [""]
resources:
- nodes/status
verbs:
- patch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: calico-node
namespace: kube-system

View File

@ -1,491 +0,0 @@
# Calico Version {{ calico_ver }}
# https://docs.projectcalico.org/{{ calico_ver_main }}/releases#{{ calico_ver }}
# This manifest includes the following component versions:
# calico/node:{{ calico_ver }}
# calico/cni:{{ calico_ver }}
# calico/kube-controllers:{{ calico_ver }}
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# Configure this with the location of your etcd cluster.
etcd_endpoints: "{{ ETCD_ENDPOINTS }}"
# If you're using TLS enabled etcd uncomment the following.
# You must also populate the Secret below with these files.
etcd_ca: "/calico-secrets/etcd-ca"
etcd_cert: "/calico-secrets/etcd-cert"
etcd_key: "/calico-secrets/etcd-key"
# Configure the Calico backend to use.
calico_backend: "{{ CALICO_NETWORKING_BACKEND }}"
# Configure the MTU to use
veth_mtu: "1440"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.0",
"plugins": [
{
"type": "calico",
"log_level": "warning",
"etcd_endpoints": "{{ ETCD_ENDPOINTS }}",
"etcd_key_file": "/etc/calico/ssl/calico-key.pem",
"etcd_cert_file": "/etc/calico/ssl/calico.pem",
"etcd_ca_cert_file": "/etc/kubernetes/ssl/ca.pem",
"mtu": 1500,
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "/root/.kube/config"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
}
]
}
---
# We use cmd-line-way( kubectl create) to create secrets 'calico-etcd-secrets',
# refer to 'roles/calico/tasks/main.yml' for details.
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: calico-node
spec:
priorityClassName: system-cluster-critical
nodeSelector:
beta.kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Make sure calico-node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: calico-node
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
initContainers:
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: calico/cni:{{ calico_ver }}
command: ["/install-cni.sh"]
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- mountPath: /calico-secrets
name: etcd-certs
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: calico/node:{{ calico_ver }}
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
# Set noderef for node controller.
- name: CALICO_K8S_NODE_REF
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,bgp"
# Auto-detect the BGP IP address.
- name: IP
value: "autodetect"
- name: IP_AUTODETECTION_METHOD
value: "{{ IP_AUTODETECTION_METHOD }}"
# Enable IPIP
{% if CALICO_NETWORKING_BACKEND == "brid" %}
- name: CALICO_IPV4POOL_IPIP
value: "{{ CALICO_IPV4POOL_IPIP }}"
{% endif %}
# Enable or Disable VXLAN on the default IP pool.
{% if CALICO_NETWORKING_BACKEND == "vxlan" %}
- name: CALICO_IPV4POOL_VXLAN
value: "Never"
{% endif %}
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within `--cluster-cidr`.
- name: CALICO_IPV4POOL_CIDR
value: "{{ CLUSTER_CIDR }}"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging
- name: FELIX_LOGSEVERITYSCREEN
value: "warning"
- name: FELIX_HEALTHENABLED
value: "true"
# Set Kubernetes NodePorts: If services do use NodePorts outside Calicos expected range,
# Calico will treat traffic to those ports as host traffic instead of pod traffic.
- name: FELIX_KUBENODEPORTRANGES
value: "{{ NODE_PORT_RANGE.split('-')[0] }}:{{ NODE_PORT_RANGE.split('-')[1] }}"
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
livenessProbe:
httpGet:
path: /liveness
port: 9099
host: localhost
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/calico-node
{% if CALICO_NETWORKING_BACKEND == "brid" %}
- -bird-ready
{% endif %}
- -felix-ready
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
- mountPath: /calico-secrets
name: etcd-certs
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: {{ bin_dir }}
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Mount in the etcd TLS secrets with mode 400.
# See https://kubernetes.io/docs/concepts/configuration/secret/
- name: etcd-certs
secret:
secretName: calico-etcd-secrets
defaultMode: 0400
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system
---
# This manifest deploys the Calico Kubernetes controllers.
# See https://github.com/projectcalico/kube-controllers
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
# The controllers can only have a single active instance.
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
k8s-app: calico-kube-controllers
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
priorityClassName: system-cluster-critical
nodeSelector:
beta.kubernetes.io/os: linux
# The controllers must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
serviceAccountName: calico-kube-controllers
containers:
- name: calico-kube-controllers
image: calico/kube-controllers:{{ calico_ver }}
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: policy,namespace,serviceaccount,workloadendpoint,node
volumeMounts:
# Mount in the etcd TLS secrets.
- mountPath: /calico-secrets
name: etcd-certs
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
volumes:
# Mount in the etcd TLS secrets with mode 400.
# See https://kubernetes.io/docs/concepts/configuration/secret/
- name: etcd-certs
secret:
secretName: calico-etcd-secrets
defaultMode: 0400
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system
---
# Include a clusterrole for the kube-controllers component,
# and bind it to the calico-kube-controllers serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-kube-controllers
rules:
# Pods are monitored for changing labels.
# The node controller monitors Kubernetes nodes.
# Namespace and serviceaccount labels are used for policy.
- apiGroups:
- ""
resources:
- pods
- nodes
- namespaces
- serviceaccounts
verbs:
- watch
- list
# Watch for changes to Kubernetes NetworkPolicies.
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- watch
- list
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-kube-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
---
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-node
rules:
# The CNI plugin needs to get pods, nodes, and namespaces.
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups: [""]
resources:
- endpoints
- services
verbs:
# Used to discover service IPs for advertisement.
- watch
- list
- apiGroups: [""]
resources:
- nodes/status
verbs:
# Needed for clearing NodeNetworkUnavailable flag.
- patch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: calico-node
namespace: kube-system
---

View File

@ -198,7 +198,7 @@ spec:
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: calico/cni:v3.8.8-1
image: easzlab.io.local:5000/calico/cni:v3.8.8-1
command: ["/install-cni.sh"]
env:
# Name of the CNI config file to create.
@ -237,7 +237,7 @@ spec:
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
# to communicate with Felix over the Policy Sync API.
- name: flexvol-driver
image: calico/pod2daemon-flexvol:v3.8.8
image: easzlab.io.local:5000/calico/pod2daemon-flexvol:v3.8.8
volumeMounts:
- name: flexvol-driver-host
mountPath: /host/driver
@ -248,7 +248,7 @@ spec:
# container programs network policy and routes on each
# host.
- name: calico-node
image: calico/node:v3.8.8-1
image: easzlab.io.local:5000/calico/node:v3.8.8-1
env:
# The location of the etcd cluster.
- name: ETCD_ENDPOINTS
@ -465,7 +465,7 @@ spec:
hostNetwork: true
containers:
- name: calico-kube-controllers
image: calico/kube-controllers:v3.8.8
image: easzlab.io.local:5000/calico/kube-controllers:v3.8.8
env:
# The location of the etcd cluster.
- name: ETCD_ENDPOINTS

View File

@ -20,43 +20,6 @@
path: "/sys/fs/bpf"
state: "mounted"
# 【可选】推送离线docker 镜像,可以忽略执行错误
- block:
- name: 检查是否已下载离线cilium镜像
command: "ls {{ base_dir }}/down"
register: download_info
connection: local
run_once: true
- name: 尝试推送离线docker 镜像(若执行失败,可忽略)
copy: src={{ base_dir }}/down/{{ item }} dest=/opt/kube/images/{{ item }}
when: 'item in download_info.stdout'
with_items:
- "pause.tar"
- "{{ cilium_offline }}"
ignore_errors: true
- name: 获取cilium离线镜像推送情况
command: "ls /opt/kube/images"
register: image_info
# 如果目录下有离线镜像就把它导入到node节点上
- name: 导入 cilium的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/docker load -i /opt/kube/images/{{ item }}"
with_items:
- "pause.tar"
- "{{ cilium_offline }}"
ignore_errors: true
when: "item in image_info.stdout and CONTAINER_RUNTIME == 'docker'"
- name: 导入 cilium的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/ctr -n=k8s.io images import /opt/kube/images/{{ item }}"
with_items:
- "pause.tar"
- "{{ cilium_offline }}"
ignore_errors: true
when: "item in image_info.stdout and CONTAINER_RUNTIME == 'containerd'"
- name: 配置 cilium DaemonSet yaml文件
template: src=cilium.yaml.j2 dest={{ cluster_dir }}/yml/cilium.yaml
tags: reconf

View File

@ -1,19 +1,3 @@
- name: 尝试推送离线coredns镜像若执行失败可忽略
copy: src={{ base_dir }}/down/{{ coredns_offline }} dest=/opt/kube/images/{{ coredns_offline }}
when: 'coredns_offline in download_info.stdout'
- name: 获取coredns离线镜像推送情况
command: "ls /opt/kube/images"
register: image_info
- name: 导入coredns的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/docker load -i /opt/kube/images/{{ coredns_offline }}"
when: 'coredns_offline in image_info.stdout and CONTAINER_RUNTIME == "docker"'
- name: 导入coredns的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/ctr -n=k8s.io images import /opt/kube/images/{{ coredns_offline }}"
when: 'coredns_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
- name: 准备 DNS的部署文件
template: src=dns/coredns.yaml.j2 dest={{ cluster_dir }}/yml/coredns.yaml
run_once: true

View File

@ -1,28 +1,3 @@
- name: 尝试推送离线 dashboard 镜像(若执行失败,可忽略)
copy: src={{ base_dir }}/down/{{ item }} dest=/opt/kube/images/{{ item }}
when: 'item in download_info.stdout'
with_items:
- "{{ dashboard_offline }}"
- "{{ metricsscraper_offline }}"
- name: 获取dashboard离线镜像推送情况
command: "ls /opt/kube/images"
register: image_info
- name: 导入 dashboard 的离线镜像docker
shell: "{{ bin_dir }}/docker load -i /opt/kube/images/{{ item }}"
with_items:
- "{{ dashboard_offline }}"
- "{{ metricsscraper_offline }}"
when: "item in image_info.stdout and CONTAINER_RUNTIME == 'docker'"
- name: 导入 dashboard 的离线镜像containerd
shell: "{{ bin_dir }}/ctr -n=k8s.io images import /opt/kube/images/{{ item }}"
with_items:
- "{{ dashboard_offline }}"
- "{{ metricsscraper_offline }}"
when: "item in image_info.stdout and CONTAINER_RUNTIME == 'containerd'"
- name: prepare some dirs
file: name={{ cluster_dir }}/yml/dashboard state=directory
run_once: true

View File

@ -4,12 +4,6 @@
connection: local
run_once: true
- name: 获取已下载离线镜像信息
command: "ls {{ base_dir }}/down"
register: download_info
connection: local
run_once: true
- name: 注册变量 DNS_SVC_IP
shell: echo {{ SERVICE_CIDR }}|cut -d/ -f1|awk -F. '{print $1"."$2"."$3"."$4+2}'
register: DNS_SVC_IP

View File

@ -1,19 +1,3 @@
- name: 尝试推送离线 metrics-server镜像若执行失败可忽略
copy: src={{ base_dir }}/down/{{ metricsserver_offline }} dest=/opt/kube/images/{{ metricsserver_offline }}
when: 'metricsserver_offline in download_info.stdout'
- name: 获取metrics-server离线镜像推送情况
command: "ls /opt/kube/images"
register: image_info
- name: 导入 metrics-server的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/docker load -i /opt/kube/images/{{ metricsserver_offline }}"
when: 'metricsserver_offline in image_info.stdout and CONTAINER_RUNTIME == "docker"'
- name: 导入 metrics-server的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/ctr -n=k8s.io images import /opt/kube/images/{{ metricsserver_offline }}"
when: 'metricsserver_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
- name: 准备 metrics-server的部署文件
template: src=metrics-server/components.yaml.j2 dest={{ cluster_dir }}/yml/metrics-server.yaml
run_once: true

View File

@ -1,19 +1,3 @@
- name: 尝试推送离线 nfs-provisioner镜像若执行失败可忽略
copy: src={{ base_dir }}/down/{{ nfsprovisioner_offline }} dest=/opt/kube/images/{{ nfsprovisioner_offline }}
when: 'nfsprovisioner_offline in download_info.stdout'
- name: 获取nfs-provisioner离线镜像推送情况
command: "ls /opt/kube/images"
register: image_info
- name: 导入 nfs-provisioner的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/docker load -i /opt/kube/images/{{ nfsprovisioner_offline }}"
when: 'nfsprovisioner_offline in image_info.stdout and CONTAINER_RUNTIME == "docker"'
- name: 导入 nfs-provisioner的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/ctr -n=k8s.io images import /opt/kube/images/{{ nfsprovisioner_offline }}"
when: 'nfsprovisioner_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
- name: 准备 nfs-provisioner 配置目录
file: name={{ cluster_dir }}/yml/nfs-provisioner state=directory
run_once: true

View File

@ -187,7 +187,7 @@ spec:
type: RuntimeDefault
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:{{ dashboardVer }}
image: easzlab.io.local:5000/kubernetesui/dashboard:{{ dashboardVer }}
ports:
- containerPort: 8443
protocol: TCP
@ -273,7 +273,7 @@ spec:
type: RuntimeDefault
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:{{ dashboardMetricsScraperVer }}
image: easzlab.io.local:5000/kubernetesui/metrics-scraper:{{ dashboardMetricsScraperVer }}
ports:
- containerPort: 8000
protocol: TCP

View File

@ -133,7 +133,7 @@ spec:
kubernetes.io/os: linux
containers:
- name: coredns
image: coredns/coredns:{{ corednsVer }}
image: easzlab.io.local:5000/coredns/coredns:{{ corednsVer }}
imagePullPolicy: IfNotPresent
resources:
limits:

View File

@ -139,7 +139,7 @@ spec:
containers:
- name: node-cache
#image: k8s.gcr.io/dns/k8s-dns-node-cache:1.16.0
image: easzlab/k8s-dns-node-cache:{{ dnsNodeCacheVer }}
image: easzlab.io.local:5000/easzlab/k8s-dns-node-cache:{{ dnsNodeCacheVer }}
resources:
requests:
cpu: 25m

View File

@ -139,7 +139,7 @@ spec:
containers:
- name: node-cache
#image: k8s.gcr.io/dns/k8s-dns-node-cache:1.16.0
image: easzlab/k8s-dns-node-cache:{{ dnsNodeCacheVer }}
image: easzlab.io.local:5000/easzlab/k8s-dns-node-cache:{{ dnsNodeCacheVer }}
resources:
requests:
cpu: 25m

View File

@ -136,7 +136,7 @@ spec:
- --kubelet-use-node-status-port
- --metric-resolution=15s
#image: k8s.gcr.io/metrics-server/metrics-server:v0.5.0
image: easzlab/metrics-server:{{ metricsVer }}
image: easzlab.io.local:5000/easzlab/metrics-server:{{ metricsVer }}
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3

View File

@ -93,7 +93,7 @@ spec:
containers:
- name: nfs-client-provisioner
#image: gcr.io/k8s-staging-sig-storage/nfs-subdir-external-provisioner:v4.0.1
image: easzlab/nfs-subdir-external-provisioner:{{ nfs_provisioner_ver }}
image: easzlab.io.local:5000/easzlab/nfs-subdir-external-provisioner:{{ nfs_provisioner_ver }}
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes

View File

@ -13,59 +13,19 @@
- loopback
- portmap
# 【可选】推送离线docker 镜像,可以忽略执行错误
- block:
- name: 检查是否已下载离线flannel镜像
command: "ls {{ base_dir }}/down"
register: download_info
connection: local
run_once: true
- name: 尝试推送离线docker 镜像(若执行失败,可忽略)
copy: src={{ base_dir }}/down/{{ item }} dest=/opt/kube/images/{{ item }}
when: 'item in download_info.stdout'
with_items:
- "pause.tar"
- "{{ flannel_offline }}"
ignore_errors: true
- name: 获取flannel离线镜像推送情况
command: "ls /opt/kube/images"
register: image_info
# 如果目录下有离线镜像就把它导入到node节点上
- name: 导入 flannel的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/docker load -i /opt/kube/images/{{ item }}"
with_items:
- "pause.tar"
- "{{ flannel_offline }}"
ignore_errors: true
when: "item in image_info.stdout and CONTAINER_RUNTIME == 'docker'"
- name: 导入 flannel的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/ctr -n=k8s.io images import /opt/kube/images/{{ item }}"
with_items:
- "pause.tar"
- "{{ flannel_offline }}"
ignore_errors: true
when: "item in image_info.stdout and CONTAINER_RUNTIME == 'containerd'"
- name: 配置 flannel DaemonSet yaml文件
template: src=kube-flannel.yaml.j2 dest={{ cluster_dir }}/yml/flannel.yaml
run_once: true
connection: local
# 只需单节点执行一次
- name: 运行 flannel网络
shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/flannel.yaml"
run_once: true
connection: local
# 删除原有cni配置
- name: 删除默认cni配置
file: path=/etc/cni/net.d/10-default.conf state=absent
# 等待网络插件部署成功,视下载镜像速度而定
- name: 轮询等待flannel 运行,视下载镜像速度而定
shell: "{{ bin_dir }}/kubectl get pod -n kube-system -o wide|grep 'flannel'|grep ' {{ inventory_hostname }} '|awk '{print $3}'"
register: pod_status

View File

@ -117,26 +117,3 @@
- name: 设置node节点role
shell: "{{ bin_dir }}/kubectl label node {{ inventory_hostname }} kubernetes.io/role=node --overwrite"
ignore_errors: true
- block:
- name: 获取已下载离线镜像信息
command: "ls {{ base_dir }}/down"
register: download_info
connection: local
- name: 尝试推送离线dnscache镜像若执行失败可忽略
copy: src={{ base_dir }}/down/{{ dnscache_offline }} dest=/opt/kube/images/{{ dnscache_offline }}
when: 'dnscache_offline in download_info.stdout'
- name: 获取dnscache离线镜像推送情况
command: "ls /opt/kube/images"
register: image_info
- name: 导入dnscache的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/docker load -i /opt/kube/images/{{ dnscache_offline }}"
when: 'dnscache_offline in image_info.stdout and CONTAINER_RUNTIME == "docker"'
- name: 导入dnscache的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/ctr -n=k8s.io images import /opt/kube/images/{{ dnscache_offline }}"
when: 'dnscache_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
when: "ENABLE_LOCAL_DNS_CACHE|bool"

View File

@ -24,43 +24,6 @@
- name: 配置 kubectl plugin
template: src=kubectl-ko.j2 dest=/usr/local/bin/kubectl-ko mode=0755
# 【可选】推送离线镜像,可以忽略执行错误
- block:
- name: 检查是否已下载离线kube_ovn镜像
command: "ls {{ base_dir }}/down"
register: download_info
connection: local
run_once: true
- name: 尝试推送离线docker 镜像(若执行失败,可忽略)
copy: src={{ base_dir }}/down/{{ item }} dest=/opt/kube/images/{{ item }}
when: 'item in download_info.stdout'
with_items:
- "pause.tar"
- "{{ kube_ovn_offline }}"
ignore_errors: true
- name: 获取kube_ovn离线镜像推送情况
command: "ls /opt/kube/images"
register: image_info
# 如果目录下有离线镜像就把它导入到node节点上
- name: 导入 kube_ovn的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/docker load -i /opt/kube/images/{{ item }}"
with_items:
- "pause.tar"
- "{{ kube_ovn_offline }}"
ignore_errors: true
when: "item in image_info.stdout and CONTAINER_RUNTIME == 'docker'"
- name: 导入 kube_ovn的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/ctr -n=k8s.io images import /opt/kube/images/{{ item }}"
with_items:
- "pause.tar"
- "{{ kube_ovn_offline }}"
ignore_errors: true
when: "item in image_info.stdout and CONTAINER_RUNTIME == 'containerd'"
# 只需单节点执行一次
- name: 运行 kube-ovn网络
shell: "{{ bin_dir }}/kubectl label node {{ OVN_DB_NODE }} kube-ovn/role=master --overwrite && \

View File

@ -12,46 +12,6 @@
- loopback
- portmap
# 【可选】推送离线docker 镜像,可以忽略执行错误
- block:
- name: 检查是否已下载离线kube-router镜像
command: "ls {{ base_dir }}/down"
register: download_info
connection: local
run_once: true
- name: 尝试推送离线docker 镜像(若执行失败,可忽略)
copy: src={{ base_dir }}/down/{{ item }} dest=/opt/kube/images/{{ item }}
when: 'item in download_info.stdout'
with_items:
- "pause.tar"
- "{{ kuberouter_offline }}"
- "{{ busybox_offline }}"
ignore_errors: true
- name: 获取kube-router离线镜像推送情况
command: "ls /opt/kube/images"
register: image_info
# 如果目录下有离线镜像就把它导入到node节点上
- name: 导入 kube-router的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/docker load -i /opt/kube/images/{{ item }}"
with_items:
- "pause.tar"
- "{{ kuberouter_offline }}"
- "{{ busybox_offline }}"
ignore_errors: true
when: "item in image_info.stdout and CONTAINER_RUNTIME == 'docker'"
- name: 导入 kube-router的离线镜像若执行失败可忽略
shell: "{{ bin_dir }}/ctr -n=k8s.io images import /opt/kube/images/{{ item }}"
with_items:
- "pause.tar"
- "{{ kuberouter_offline }}"
- "{{ busybox_offline }}"
ignore_errors: true
when: "item in image_info.stdout and CONTAINER_RUNTIME == 'containerd'"
- name: 准备配置 kube-router DaemonSet (without IPVS)
template: src=kuberouter.yaml.j2 dest={{ cluster_dir }}/yml/kube-router.yaml
run_once: true