remove scripts to auto-installing ingress

pull/1166/head
jin.gjm 2022-05-30 20:42:23 +08:00
parent bd0a8e8487
commit e03ec7c6f6
15 changed files with 6 additions and 719 deletions

View File

@ -1,5 +1,7 @@
# 使用 traefik 配置 https ingress
本文档已过期,安装最新版本,请参考相关官方文档。
本文档基于 traefik 配置 https ingress 规则,请先阅读[配置基本 ingress](ingress.md)。与基本 ingress-controller 相比,需要额外配置 https tls 证书,主要步骤如下:
## 1.准备 tls 证书

View File

@ -1,5 +1,7 @@
## Ingress简介
本文档已过期,安装最新版本,请参考相关官方文档。
ingress就是从外部访问k8s集群的入口将用户的URL请求转发到不同的service上。ingress相当于nginx反向代理服务器它包括的规则定义就是URL的路由信息它的实现需要部署`Ingress controller`(比如 [traefik](https://github.com/containous/traefik) [ingress-nginx](https://github.com/kubernetes/ingress-nginx) 等)`Ingress controller`通过apiserver监听ingress和service的变化并根据规则配置负载均衡并提供访问入口达到服务发现的作用。
- 未配置ingress

View File

@ -1,5 +1,7 @@
# metallb 网络负载均衡
本文档已过期,以下内容仅做介绍,安装请参考最新官方文档
`Metallb`是在自有硬件上(非公有云)实现 `Kubernetes Load-balancer`的工具,由`google`团队开源,值得推荐!项目[github主页](https://github.com/google/metallb)。
## metallb 简介
@ -15,96 +17,3 @@
- 地址分配需要向网络管理员申请一段ip地址如果是layer2模式需要这段地址与node节点地址同个网段同一个二层如果是bgp模式没有这个限制。
- 对外宣告layer2模式使用arp协议利用节点的mac额外宣告一个loadbalancer的ip同mac多ipbgp模式下节点利用bgp协议与外部网络设备建立邻居宣告loadbalancer的地址段给外部网络。
## kubeasz 集成安装metallb
因bgp模式需要外部路由器的支持这里主要选用layer2模式如需选择bgp模式相应修改roles/cluster-addon/templates/metallb/bgp.yaml.j2
- 1.修改roles/cluster-addon/defaults/main.yml 配置文件相关
``` bash
# metallb 自动安装
metallb_install: "yes"
# 模式选择: 二层 "layer2" 或者三层 "bgp"
metallb_protocol: "layer2"
metallb_offline: "metallb_v0.7.3.tar"
metallb_vip_pool: "192.168.1.240/29" # 选一段与node节点相同网段的地址
```
- 2.执行安装 `ansible-playbook 07.cluster-addon.yml`其中controller 负责统一loadbalancer地址管理和服务监控speaker 负责节点的loadbalancer地址的对外宣告使用arp或者bgp网络协议注意 **speaker是以DaemonSet 形式运行且只会调度到有node-role.kubernetes.io/metallb-speaker=true标签的节点**所以你可以选择做speaker的节点该节点网络性能要好使用命令 `$ kubectl label nodes 192.168.1.43 node-role.kubernetes.io/metallb-speaker=true`
- 3.验证metallb相关 pod
``` bash
$ kubectl get node
NAME STATUS ROLES AGE VERSION
192.168.1.41 Ready,SchedulingDisabled master 4h v1.11.3
192.168.1.42 Ready node 4h v1.11.3
192.168.1.43 Ready metallb-speaker,node 4h v1.11.3
192.168.1.44 Ready metallb-speaker,node 4h v1.11.3
$ kubectl get pod -n metallb-system
NAME READY STATUS RESTARTS AGE
controller-9c57dbd4-798nb 1/1 Running 0 4h
speaker-9rjmk 1/1 Running 0 4h
speaker-n79l4 1/1 Running 0 4h
```
- 3.创建测试应用验证 loadbalancer 地址分配
``` bash
# 创建测试应用
$ cat > test-nginx.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx3
spec:
selector:
matchLabels:
app: nginx3
template:
metadata:
labels:
app: nginx3
spec:
containers:
- name: nginx3
image: nginx:1
ports:
- name: http
containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx3
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
selector:
app: nginx3
type: LoadBalancer
EOF
$ kubectl apply -f test-nginx.yaml
# 查看生成的loadbalancer 地址,如下验证成功
$ kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.68.0.1 <none> 443/TCP 5h
nginx3 LoadBalancer 10.68.82.227 192.168.1.240 80:38702/TCP 1m
```
- 4.验证使用loadbalacer 来暴露ingress的服务地址之前在[ingress文档](ingress.md)中我们是使用nodeport方式服务类型现在我们可以方便的使用loadbalancer类型了使用loadbalancer地址(192.168.1.241)方便的绑定你要的域名进行访问。
``` bash
# 修改traefik-ingress 使用 LoadBalancer服务
$ sed -i 's/NodePort$/LoadBalancer/g' /etc/ansible/manifests/ingress/traefik/traefik-ingress.yaml
# 创建traefik-ingress
$ kubectl apply -f /etc/ansible/manifests/ingress/traefik/traefik-ingress.yaml
# 验证
$ kubectl get svc --all-namespaces |grep traefik
kube-system traefik-ingress-service LoadBalancer 10.68.163.243 192.168.1.241 80:23456/TCP,8080:37088/TCP 1m
```

View File

@ -190,11 +190,6 @@ dashboard_install: "yes"
dashboardVer: "__dashboard__"
dashboardMetricsScraperVer: "__dash_metrics__"
# ingress 自动安装
ingress_install: "no"
ingress_backend: "traefik"
traefik_chart_ver: "__traefik_chart__"
# prometheus 自动安装
prom_install: "no"
prom_namespace: "monitor"

2
ezctl
View File

@ -155,7 +155,6 @@ function new() {
nfsProvisionerVer=$(grep 'nfsProvisionerVer=' ezdown|cut -d'=' -f2)
pauseVer=$(grep 'pauseVer=' ezdown|cut -d'=' -f2)
promChartVer=$(grep 'promChartVer=' ezdown|cut -d'=' -f2)
traefikChartVer=$(grep 'traefikChartVer=' ezdown|cut -d'=' -f2)
harborVer=$(grep 'HARBOR_VER=' ezdown|cut -d'=' -f2)
registryMirror=true
@ -174,7 +173,6 @@ function new() {
-e "s/__dash_metrics__/$dashboardMetricsScraperVer/g" \
-e "s/__nfs_provisioner__/$nfsProvisionerVer/g" \
-e "s/__prom_chart__/$promChartVer/g" \
-e "s/__traefik_chart__/$traefikChartVer/g" \
-e "s/__harbor__/$harborVer/g" \
-e "s/^ENABLE_MIRROR_REGISTRY.*$/ENABLE_MIRROR_REGISTRY: $registryMirror/g" \
-e "s/__metrics__/$metricsVer/g" "clusters/$1/config.yml"

1
ezdown
View File

@ -35,7 +35,6 @@ export ciliumVer=v1.4.1
export kubeRouterVer=v0.3.1
export kubeOvnVer=v1.5.3
export promChartVer=12.10.6
export traefikChartVer=10.3.0
function usage() {
echo -e "\033[33mUsage:\033[0m ezdown [options] [args]"

View File

@ -1,37 +0,0 @@
# https://github.com/traefik/traefik-helm-chart
- block:
- name: 创建 traefik chart 个性化设置
template: src=traefik/values.yaml.j2 dest={{ cluster_dir }}/yml/traefik-values.yaml
- name: helm 创建 traefik chart {{ traefik_chart_ver }}
shell: "{{ base_dir }}/bin/helm install -n kube-system traefik \
-f {{ cluster_dir }}/yml/traefik-values.yaml \
{{ base_dir }}/roles/cluster-addon/files/traefik-{{ traefik_chart_ver }}.tgz"
run_once: true
connection: local
when: 'ingress_backend == "traefik"'
#- block:
# - block:
# - name: 尝试推送离线 nginx-ingress镜像若执行失败可忽略
# copy: src={{ base_dir }}/down/{{ nginx_ingress_offline }} dest=/opt/kube/images/{{ nginx_ingress_offline }}
# when: 'nginx_ingress_offline in download_info.stdout'
#
# - name: 获取nginx_ingress离线镜像推送情况
# command: "ls /opt/kube/images"
# register: image_info
#
# - name: 导入 nginx_ingress的离线镜像若执行失败可忽略
# shell: "{{ bin_dir }}/docker load -i /opt/kube/images/{{ nginx_ingress_offline }}"
# when: 'nginx_ingress_offline in image_info.stdout and CONTAINER_RUNTIME == "docker"'
#
# - name: 导入 nginx_ingress的离线镜像若执行失败可忽略
# shell: "{{ bin_dir }}/ctr -n=k8s.io images import /opt/kube/images/{{ nginx_ingress_offline }}"
# when: 'nginx_ingress_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
#
# - name: 创建 nginx_ingress部署
# shell: "{{ base_dir }}/bin/kubectl apply -f {{ base_dir }}/manifests/ingress/nginx-ingress/nginx-ingress.yaml"
# connection: local
# run_once: true
# when: 'ingress_backend == "nginx-ingress"'

View File

@ -31,45 +31,8 @@
- import_tasks: dashboard.yml
when: '"kubernetes-dashboard" not in pod_info.stdout and dashboard_install == "yes"'
- import_tasks: ingress.yml
when: '"traefik" not in pod_info.stdout and ingress_install == "yes"'
- import_tasks: prometheus.yml
when: '"kube-prometheus-operator" not in pod_info.stdout and prom_install == "yes"'
- import_tasks: nfs-provisioner.yml
when: '"nfs-client-provisioner" not in pod_info.stdout and nfs_provisioner_install == "yes"'
#- block:
# - block:
# - name: 尝试推送离线 metallb镜像若执行失败可忽略
# copy: src={{ base_dir }}/down/{{ metallb_offline }} dest=/opt/kube/images/{{ metallb_offline }}
# when: 'metallb_offline in download_info.stdout'
#
# - name: 获取metallb离线镜像推送情况
# command: "ls /opt/kube/images"
# register: image_info
#
# - name: 导入 metallb的离线镜像若执行失败可忽略
# shell: "{{ bin_dir }}/docker load -i /opt/kube/images/{{ metallb_offline }}"
# when: 'metallb_offline in image_info.stdout and CONTAINER_RUNTIME == "docker"'
#
# - name: 导入 metallb的离线镜像若执行失败可忽略
# shell: "{{ bin_dir }}/ctr -n=k8s.io images import /opt/kube/images/{{ metallb_offline }}"
# when: 'metallb_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
#
# - name: 生成 metallb 相关 manifests
# template: src=metallb/{{ item }}.j2 dest={{ cluster_dir }}/yml/{{ item }}
# with_items:
# - "metallb.yaml"
# - "{{ metallb_protocol }}.yaml"
# run_once: true
# connection: local
#
# - name: 创建 metallb controller 部署
# shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/metallb.yaml && \
# {{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/{{ metallb_protocol }}.yaml"
# run_once: true
# connection: local
# when: '"metallb" not in pod_info.stdout and metallb_install == "yes"'
# ignore_errors: true

View File

@ -1,113 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
namespace: metallb-system
name: config
data:
config: |
# The peers section tells MetalLB what BGP routers to connect too. There
# is one entry for each router you want to peer with.
peers:
- # The target IP address for the BGP session.
peer-address: 10.0.0.1
# The BGP AS number that MetalLB expects to see advertised by
# the router.
peer-asn: 64512
# The BGP AS number that MetalLB should speak as.
my-asn: 64512
# (optional) the TCP port to talk to. Defaults to 179, you shouldn't
# need to set this in production.
peer-port: 179
# (optional) The proposed value of the BGP Hold Time timer. Refer to
# BGP reference material to understand what setting this implies.
hold-time: 120
# (optional) The router ID to use when connecting to this peer. Defaults
# to the node IP address. Generally only useful when you need to peer with
# another BGP router running on the same machine as MetalLB.
router-id: 1.2.3.4
# (optional) Password for TCPMD5 authenticated BGP sessions
# offered by some peers.
password: "yourPassword"
# (optional) The nodes that should connect to this peer. A node
# matches if at least one of the node selectors matches. Within
# one selector, a node matches if all the matchers are
# satisfied. The semantics of each selector are the same as the
# label- and set-based selectors in Kubernetes, documented at
# https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/.
# By default, all nodes are selected.
node-selectors:
- # Match by label=value
match-labels:
kubernetes.io/hostname: prod-01
# Match by 'key OP values' expressions
match-expressions:
- key: beta.kubernetes.io/arch
operator: In
values: [amd64, arm]
# The address-pools section lists the IP addresses that MetalLB is
# allowed to allocate, along with settings for how to advertise
# those addresses over BGP once assigned. You can have as many
# address pools as you want.
address-pools:
- # A name for the address pool. Services can request allocation
# from a specific address pool using this name, by listing this
# name under the 'metallb.universe.tf/address-pool' annotation.
name: my-ip-space
# Protocol can be used to select how the announcement is done.
# Supported values are bgp and layer2.
protocol: bgp
# A list of IP address ranges over which MetalLB has
# authority. You can list multiple ranges in a single pool, they
# will all share the same settings. Each range can be either a
# CIDR prefix, or an explicit start-end range of IPs.
addresses:
- 198.51.100.0/24
- 192.168.0.150-192.168.0.200
# (optional) If true, MetalLB will not allocate any address that
# ends in .0 or .255. Some old, buggy consumer devices
# mistakenly block traffic to such addresses under the guise of
# smurf protection. Such devices have become fairly rare, but
# the option is here if you encounter serving issues.
avoid-buggy-ips: true
# (optional, default true) If false, MetalLB will not automatically
# allocate any address in this pool. Addresses can still explicitly
# be requested via loadBalancerIP or the address-pool annotation.
auto-assign: false
# (optional) A list of BGP advertisements to make, when
# protocol=bgp. Each address that gets assigned out of this pool
# will turn into this many advertisements. For most simple
# setups, you'll probably just want one.
#
# The default value for this field is a single advertisement with
# all parameters set to their respective defaults.
bgp-advertisements:
- # (optional) How much you want to aggregate up the IP address
# before advertising. For example, advertising 1.2.3.4 with
# aggregation-length=24 would end up advertising 1.2.3.0/24.
# For the majority of setups, you'll want to keep this at the
# default of 32, which advertises the entire IP address
# unmodified.
aggregation-length: 32
# (optional) The value of the BGP "local preference" attribute
# for this advertisement. Only used with IBGP peers,
# i.e. peers where peer-asn is the same as my-asn.
localpref: 100
# (optional) BGP communities to attach to this
# advertisement. Communities are given in the standard
# two-part form <asn>:<community number>. You can also use
# alias names (see below).
communities:
- 64512:1
- no-export
# (optional) BGP community aliases. Instead of using hard to
# read BGP community numbers in address pool advertisement
# configurations, you can define alias names here and use those
# elsewhere in the configuration. The "no-export" community used
# above is defined below.
bgp-communities:
# no-export is a well-known BGP community that prevents
# re-advertisement outside of the immediate autonomous system,
# but people don't usually recognize its numerical value. :)
no-export: 65535:65281

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
namespace: metallb-system
name: config
data:
config: |
address-pools:
- name: my-ip-space
protocol: layer2
addresses:
- {{ metallb_vip_pool }}

View File

@ -1,227 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: metallb-system
labels:
app: metallb
---
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: metallb-system
name: controller
labels:
app: metallb
---
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: metallb-system
name: speaker
labels:
app: metallb
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: metallb-system:controller
labels:
app: metallb
rules:
- apiGroups: [""]
resources: ["services"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["services/status"]
verbs: ["update"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: metallb-system:speaker
labels:
app: metallb
rules:
- apiGroups: [""]
resources: ["services", "endpoints", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
namespace: metallb-system
name: config-watcher
labels:
app: metallb
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create"]
---
## Role bindings
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: metallb-system:controller
labels:
app: metallb
subjects:
- kind: ServiceAccount
name: controller
namespace: metallb-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metallb-system:controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: metallb-system:speaker
labels:
app: metallb
subjects:
- kind: ServiceAccount
name: speaker
namespace: metallb-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metallb-system:speaker
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
namespace: metallb-system
name: config-watcher
labels:
app: metallb
subjects:
- kind: ServiceAccount
name: controller
- kind: ServiceAccount
name: speaker
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: config-watcher
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
namespace: metallb-system
name: speaker
labels:
app: metallb
component: speaker
spec:
selector:
matchLabels:
app: metallb
component: speaker
template:
metadata:
labels:
app: metallb
component: speaker
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "7472"
spec:
serviceAccountName: speaker
terminationGracePeriodSeconds: 0
hostNetwork: true
nodeSelector:
node-role.kubernetes.io/metallb-speaker: "true"
containers:
- name: speaker
image: metallb/speaker:v0.7.3
imagePullPolicy: IfNotPresent
args:
- --port=7472
- --config=config
env:
- name: METALLB_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
ports:
- name: monitoring
containerPort: 7472
resources:
limits:
cpu: 100m
memory: 100Mi
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- all
add:
- net_raw
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: metallb-system
name: controller
labels:
app: metallb
component: controller
spec:
revisionHistoryLimit: 3
selector:
matchLabels:
app: metallb
component: controller
template:
metadata:
labels:
app: metallb
component: controller
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "7472"
spec:
serviceAccountName: controller
terminationGracePeriodSeconds: 0
securityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody
containers:
- name: controller
image: metallb/controller:v0.7.3
imagePullPolicy: IfNotPresent
args:
- --port=7472
- --config=config
ports:
- name: monitoring
containerPort: 7472
resources:
limits:
cpu: 100m
memory: 100Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- all
readOnlyRootFilesystem: true
---

View File

@ -1,184 +0,0 @@
# Configure the deployment
deployment:
enabled: true
# Can be either Deployment or DaemonSet
kind: DaemonSet
replicas: 1
# Activate Pilot integration
pilot:
enabled: false
token: ""
# Create an IngressRoute for the dashboard
ingressRoute:
dashboard:
enabled: true
# Configure providers
providers:
kubernetesCRD:
enabled: true
namespaces: []
# - "default"
kubernetesIngress:
enabled: true
namespaces: []
# - "default"
# IP used for Kubernetes Ingress endpoints
publishedService:
enabled: false
# Published Kubernetes Service to copy status from. Format: namespace/servicename
# By default this Traefik service
# pathOverride: ""
# Add volumes to the traefik pod. The volume name will be passed to tpl.
# This can be used to mount a cert pair or a configmap that holds a config.toml file.
# After the volume has been mounted, add the configs into traefik by using the `additionalArguments` list below, eg:
# additionalArguments:
# - "--entryPoints.web.address=:80"
# - "--entryPoints.websecure.address=:443"
volumes: []
# - name: public-cert
# mountPath: "/certs"
# type: secret
# - name: xxx
# mountPath: "/config"
# type: configMap
# Additional volumeMounts to add to the Traefik container
additionalVolumeMounts: []
# For instance when using a logshipper for access logs
# - name: traefik-logs
# mountPath: /var/log/traefik
# https://docs.traefik.io/observability/logs/
logs:
# Traefik logs concern everything that happens to Traefik itself (startup, configuration, events, shutdown, and so on).
general:
# By default, the logs use a text format (common), but you can
# also ask for the json format in the format option
# format: json
# By default, the level is set to ERROR. Alternative logging levels are DEBUG, PANIC, FATAL, ERROR, WARN, and INFO.
level: ERROR
access:
# To enable access logs
enabled: false
# By default, logs are written using the Common Log Format (CLF).
# To write logs in JSON, use json in the format option.
# If the given format is unsupported, the default (CLF) is used instead.
# format: json
# To write the logs in an asynchronous fashion, specify a bufferingSize option.
# This option represents the number of log lines Traefik will keep in memory before writing
# them to the selected output. In some cases, this option can greatly help performances.
# bufferingSize: 100
# Filtering https://docs.traefik.io/observability/access-logs/#filtering
filters: {}
# statuscodes: "200,300-302"
# retryattempts: true
# minduration: 10ms
# Fields
# https://docs.traefik.io/observability/access-logs/#limiting-the-fieldsincluding-headers
fields:
general:
defaultmode: keep
names: {}
# Examples:
# ClientUsername: drop
headers:
defaultmode: drop
names: {}
# Examples:
# User-Agent: redact
# Authorization: drop
# Content-Type: keep
globalArguments:
- "--global.checknewversion"
# Configure ports
ports:
traefik:
port: 9000
expose: true
exposedPort: 9000
web:
hostPort: 80
# Port Redirections
# Added in 2.2, you can make permanent redirects via entrypoints.
# https://docs.traefik.io/routing/entrypoints/#redirection
# redirectTo: websecure
websecure:
hostPort: 443
# Set TLS at the entrypoint
# https://doc.traefik.io/traefik/routing/entrypoints/#tls
tls:
enabled: false
# this is the name of a TLSOption definition
options: ""
certResolver: ""
domains: []
# - main: example.com
# sans:
# - foo.example.com
# - bar.example.com
# Options for the main traefik service, where the entrypoints traffic comes from.
service:
enabled: true
type: ClusterIP
# If hostNetwork is true, runs traefik in the host network namespace
hostNetwork: false
rbac:
enabled: true
resources: {}
# requests:
# cpu: "100m"
# memory: "50Mi"
# limits:
# cpu: "300m"
# memory: "150Mi"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/role
operator: In
values:
- node
nodeSelector: {}
# Set the container security context
# To run the container with ports below 1024 this will need to be adjust to run as root
securityContext:
capabilities:
drop: [ALL]
readOnlyRootFilesystem: true
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
podSecurityContext:
fsGroup: 65532
readinessProbe:
httpGet:
path: /ping
port: 9000
failureThreshold: 1
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
livenessProbe:
httpGet:
path: /ping
port: 9000
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2

View File

@ -8,11 +8,3 @@ dashboard_offline: "dashboard_{{ dashboardVer }}.tar"
metricsscraper_offline: "metrics-scraper_{{ dashboardMetricsScraperVer }}.tar"
nfsprovisioner_offline: "nfs-provisioner_{{ nfs_provisioner_ver }}.tar"
# metallb 自动安装
#metallb_install: "no"
#metallbVer: "v0.7.3"
# 模式选择: 二层 "layer2" 或者三层 "bgp"
#metallb_protocol: "layer2"
#metallb_offline: "metallb_{{ metallbVer }}.tar"
#metallb_vip_pool: "192.168.1.240/29"