commit fab3b241c1136665c7e825a47ec2f549f7c88a0d Author: roc Date: Fri Oct 13 13:37:29 2023 +0800 first commit diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b2d6de3 --- /dev/null +++ b/.gitignore @@ -0,0 +1,20 @@ +# Dependencies +/node_modules + +# Production +/build + +# Generated files +.docusaurus +.cache-loader + +# Misc +.DS_Store +.env.local +.env.development.local +.env.test.local +.env.production.local + +npm-debug.log* +yarn-debug.log* +yarn-error.log* diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..0838bad --- /dev/null +++ b/Makefile @@ -0,0 +1,15 @@ +SHELL := /bin/bash + +start: + npm run start +install: + npm install +outdated: + npm outdated +init: install + git clone --depth=1 git@gitee.com:imroc/kubernetes-guide.git build +gen: + npx docusaurus build --out-dir=./build/out +push: + cd build && git add -a && git commit -m update && git push +update: gen push diff --git a/README.md b/README.md new file mode 100644 index 0000000..5db7b0f --- /dev/null +++ b/README.md @@ -0,0 +1,23 @@ +# Kubernetes 实践指南 + +本书将介绍 Kubernetes 相关实战经验与总结,助你成为一名云原生老司机 😎。 + +## 关于本书 + +本书为电子书形式,内容为本人多年的云原生与 Kubernetes 实战经验进行系统性整理的结果,不废话,纯干货。 + +## 在线阅读 + +地址:https://imroc.cc/kubernetes + +## 评论与互动 + +本书已集成 [giscus](https://giscus.app/zh-CN) 评论系统,欢迎对感兴趣的文章进行评论与交流。 + +## 贡献 + +本书使用 [docusaurus](https://docusaurus.io/) 构建,已集成自动构建和发布,欢迎 Fork 并 PR 来贡献干货内容 (点击左下角 `编辑此页` 按钮可快速修改文章)。 + +## 许可证 + +您可以使用 [署名 - 非商业性使用 - 相同方式共享 4.0 (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/deed.zh) 协议共享。 diff --git a/babel.config.js b/babel.config.js new file mode 100644 index 0000000..e00595d --- /dev/null +++ b/babel.config.js @@ -0,0 +1,3 @@ +module.exports = { + presets: [require.resolve('@docusaurus/core/lib/babel/preset')], +}; diff --git a/codeblock/hello.go b/codeblock/hello.go new file mode 100644 index 0000000..84cc991 --- /dev/null +++ b/codeblock/hello.go @@ -0,0 +1,11 @@ +package main + +import ( + "fmt" +) + +func main() { + for i := 0; i < 10; i++ { + fmt.Println("hello world", i) + } +} diff --git a/content/README.md b/content/README.md new file mode 100644 index 0000000..5db7b0f --- /dev/null +++ b/content/README.md @@ -0,0 +1,23 @@ +# Kubernetes 实践指南 + +本书将介绍 Kubernetes 相关实战经验与总结,助你成为一名云原生老司机 😎。 + +## 关于本书 + +本书为电子书形式,内容为本人多年的云原生与 Kubernetes 实战经验进行系统性整理的结果,不废话,纯干货。 + +## 在线阅读 + +地址:https://imroc.cc/kubernetes + +## 评论与互动 + +本书已集成 [giscus](https://giscus.app/zh-CN) 评论系统,欢迎对感兴趣的文章进行评论与交流。 + +## 贡献 + +本书使用 [docusaurus](https://docusaurus.io/) 构建,已集成自动构建和发布,欢迎 Fork 并 PR 来贡献干货内容 (点击左下角 `编辑此页` 按钮可快速修改文章)。 + +## 许可证 + +您可以使用 [署名 - 非商业性使用 - 相同方式共享 4.0 (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/deed.zh) 协议共享。 diff --git a/content/appendix/kubectl-cheat-sheet.md b/content/appendix/kubectl-cheat-sheet.md new file mode 100644 index 0000000..4fbbbc4 --- /dev/null +++ b/content/appendix/kubectl-cheat-sheet.md @@ -0,0 +1,139 @@ +# kubectl 速查手册 + +## 使用 kubectl get --raw + +### 获取节点 cadvisor 指标 + +```bash +kubectl get --raw=/api/v1/nodes/11.185.19.215/proxy/metrics/cadvisor + +# 查看有哪些指标名 +kubectl get --raw=/api/v1/nodes/11.185.19.215/proxy/metrics/cadvisor | grep -v "#" | awk -F '{' '{print $1}' | awk '{print $1}' | sort | uniq +``` + +### 获取节点 kubelet 指标 + +```bash +kubectl get --raw=/api/v1/nodes/11.185.19.215/proxy/metrics +``` + +### 获取 node-exporter pod 指标 + +```bash +kubectl get --raw=/api/v1/namespaces/monitoring/pods/node-exporter-n5rz2:9100/proxy/metrics +``` + +### 获取节点 summary 数据 + +```bash +kubectl get --raw=/api/v1/nodes/11.185.19.21/proxy/stats/summary +``` + +### 测试 Resource Metrics API + +获取指定 namespace 下所有 pod 指标: + +```bash +kubectl get --raw "/apis/metrics.k8s.io/v1beta1/namespaces/ns-prjzbsxs-1391012-production/pods/" +``` + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925162846.png) + +获取指定 pod 的指标: + +```bash +kubectl get --raw "/apis/metrics.k8s.io/v1beta1/namespaces/ns-prjzbsxs-1391012-production/pods/mixer-engine-0" +``` + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925162948.png) + +## Node 相关 + +### 表格输出各节点占用的 podCIDR + +``` bash +$ kubectl get no -o=custom-columns=INTERNAL-IP:.metadata.name,EXTERNAL-IP:.status.addresses[1].address,CIDR:.spec.podCIDR +INTERNAL-IP EXTERNAL-IP CIDR +10.100.12.194 152.136.146.157 10.101.64.64/27 +10.100.16.11 10.100.16.11 10.101.66.224/27 +``` + +### 表格输出各节点总可用资源 (Allocatable) + +``` bash +$ kubectl get no -o=custom-columns="NODE:.metadata.name,ALLOCATABLE CPU:.status.allocatable.cpu,ALLOCATABLE MEMORY:.status.allocatable.memory" +NODE ALLOCATABLE CPU ALLOCATABLE MEMORY +10.0.0.2 3920m 7051692Ki +10.0.0.3 3920m 7051816Ki +``` + +### 输出各节点已分配资源的情况 + +所有种类的资源已分配情况概览: + +``` bash +$ kubectl get nodes --no-headers | awk '{print $1}' | xargs -I {} sh -c "echo {} ; kubectl describe node {} | grep Allocated -A 5 | grep -ve Event -ve Allocated -ve percent -ve --;" +10.0.0.2 + Resource Requests Limits + cpu 3040m (77%) 19800m (505%) + memory 4843402752 (67%) 15054901888 (208%) +10.0.0.3 + Resource Requests Limits + cpu 300m (7%) 1 (25%) + memory 250M (3%) 2G (27%) +``` + +表格输出 cpu 已分配情况: + +``` bash +$ kubectl get nodes --no-headers | awk '{print $1}' | xargs -I {} sh -c 'echo -ne "{}\t" ; kubectl describe node {} | grep Allocated -A 5 | grep -ve Event -ve Allocated -ve percent -ve -- | grep cpu | awk '\''{print $2$3}'\'';' +10.0.0.10 460m(48%) +10.0.0.12 235m(25%) +``` + +表格输出 memory 已分配情况: + +``` bash +$ kubectl get nodes --no-headers | awk '{print $1}' | xargs -I {} sh -c 'echo -ne "{}\t" ; kubectl describe node {} | grep Allocated -A 5 | grep -ve Event -ve Allocated -ve percent -ve -- | grep memory | awk '\''{print $2$3}'\'';' +10.0.0.10 257460608(41%) +10.0.0.12 59242880(9%) +``` + +### 查看节点可用区分布情况 + +```bash +$ kubectl get nodes -o=jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.failure-domain\.beta\.kubernetes\.io\/zone}{"\n"}{end}' +10.83.96.127 100004 +10.83.96.132 100004 +10.83.96.139 100004 +10.83.96.8 100004 +10.83.96.93 100004 +``` + +## Pod 相关 + +### 清理 Evicted 的 pod + +``` bash +kubectl get pod -o wide --all-namespaces | awk '{if($4=="Evicted"){cmd="kubectl -n "$1" delete pod "$2; system(cmd)}}' +``` + +### 清理非 Running 的 pod + +``` bash +kubectl get pod -o wide --all-namespaces | awk '{if($4!="Running"){cmd="kubectl -n "$1" delete pod "$2; system(cmd)}}' +``` + +### 升级镜像 + +``` bash +export NAMESPACE="kube-system" +export WORKLOAD_TYPE="daemonset" +export WORKLOAD_NAME="ip-masq-agent" +export CONTAINER_NAME="ip-masq-agent" +export IMAGE="ccr.ccs.tencentyun.com/library/ip-masq-agent:v2.5.0" +``` + +``` bash +kubectl -n $NAMESPACE patch $WORKLOAD_TYPE $WORKLOAD_NAME --patch '{"spec": {"template": {"spec": {"containers": [{"name": "$CONTAINER_NAME","image": "$IMAGE" }]}}}}' +``` diff --git a/content/appendix/terraform/tke-serverless.md b/content/appendix/terraform/tke-serverless.md new file mode 100644 index 0000000..2812acf --- /dev/null +++ b/content/appendix/terraform/tke-serverless.md @@ -0,0 +1,48 @@ +# TKE Serverless 集群 + +```hcl title="main.tf" +terraform { + required_providers { + tencentcloud = { + source = "tencentcloudstack/tencentcloud" + version = "1.80.4" + } + } +} + +provider "tencentcloud" { + secret_id = "************************************" # 云 API 密钥 SecretId + secret_key = "********************************" # 云 API 密钥 SecretKey + region = "ap-shanghai" # 地域,完整可用地域列表参考: https://cloud.tencent.com/document/product/213/6091 +} + + +data "tencentcloud_vpc_instances" "myvpc" { + name = "myvpc" # 指定 VPC 名称 +} + +data "tencentcloud_vpc_subnets" "mysubnet" { + vpc_id = data.tencentcloud_vpc_instances.myvpc.instance_list.0.vpc_id + name = "mysubnet" # 指定子网名称 +} + +resource "tencentcloud_eks_cluster" "myserverless" { + cluster_name = "roc-test-serverless" # 指定 serverless 集群名称 + k8s_version = "1.24.4" # 指定 serverless 集群版本 + + public_lb { + enabled = true # 打开公网访问 (kubectl 远程操作集群) + allow_from_cidrs = ["0.0.0.0/0"] + } + + vpc_id = data.tencentcloud_vpc_instances.roctest.instance_list.0.vpc_id + subnet_ids = [ + data.tencentcloud_vpc_subnets.mysubnet.instance_list.0.subnet_id + ] + cluster_desc = "roc test cluster" # 集群描述 + service_subnet_id = data.tencentcloud_vpc_subnets.mysubnet.instance_list.0.subnet_id + enable_vpc_core_dns = true + need_delete_cbs = true +} +``` + diff --git a/content/appendix/terraform/tke-vpc-cni.md b/content/appendix/terraform/tke-vpc-cni.md new file mode 100644 index 0000000..d021b29 --- /dev/null +++ b/content/appendix/terraform/tke-vpc-cni.md @@ -0,0 +1,154 @@ +# TKE 集群(VPC-CNI) + +```hcl title="main.tf" +terraform { + required_providers { + # highlight-next-line + tencentcloud = { + source = "tencentcloudstack/tencentcloud" + version = "1.81.24" + } + } +} + +variable "secret_id" { + default = "************************************" # 替换 secret id +} + +variable "secret_key" { + default = "********************************" # 替换 secret key +} + +variable "region" { + default = "ap-shanghai" +} + +provider "tencentcloud" { + secret_id = var.secret_id # 云 API 密钥 SecretId + secret_key = var.secret_key # 云 API 密钥 SecretKey + region = var.region # 地域,完整可用地域列表参考: https://cloud.tencent.com/document/product/213/6091 +} + +variable "availability_zone_first" { + default = "ap-shanghai-4" # 替换首选可用区 +} + +variable "availability_zone_second" { + default = "ap-shanghai-2" # 替换备选可用区 +} + +variable "default_instance_type" { + default = "S5.MEDIUM4" +} + +variable "vpc_name" { + default = "roc-test" # 替换 VPC 名称 +} + +variable "cluster_name" { + default = "roc-test-cluster" # 替换集群名称 +} + +variable "image_id" { + default = "img-1tmhysjj" # TencentOS Server 3.2 with Driver +} + +variable "security_group" { + default = "sg-616bnwjw" # 替换安全组 ID +} + +variable "skey_id" { + default = "skey-3t01mlvf" # 替换 ssh 密钥 ID +} + +variable "service_cidr" { + default = "192.168.6.0/24" # 替换 service 网段 +} + +data "tencentcloud_vpc_instances" "vpc" { + name = var.vpc_name +} + +data "tencentcloud_vpc_subnets" "zone_first" { + vpc_id = data.tencentcloud_vpc_instances.vpc.instance_list.0.vpc_id + availability_zone = var.availability_zone_first +} + +data "tencentcloud_vpc_subnets" "zone_second" { + vpc_id = data.tencentcloud_vpc_instances.vpc.instance_list.0.vpc_id + availability_zone = var.availability_zone_second +} + +resource "tencentcloud_kubernetes_cluster" "managed_cluster" { + vpc_id = data.tencentcloud_vpc_instances.vpc.instance_list.0.vpc_id + cluster_max_pod_num = 256 + cluster_name = var.cluster_name + cluster_desc = "roc test cluster" # 替换集群描述 + cluster_version = "1.26.1" + cluster_max_service_num = 256 + cluster_internet = true + cluster_internet_security_group = var.security_group + cluster_deploy_type = "MANAGED_CLUSTER" + + container_runtime = "containerd" + kube_proxy_mode = "ipvs" + network_type = "VPC-CNI" # 集群网络模式,GR 或 VPC-CNI,推荐用 VPC-CNI。如果用 GR,还需要设置集群网段(cluster_cidr) + service_cidr = var.service_cidr + eni_subnet_ids = [ + data.tencentcloud_vpc_subnets.zone_first.instance_list.0.subnet_id, + data.tencentcloud_vpc_subnets.zone_second.instance_list.0.subnet_id + ] + worker_config { # 集群创建时自动创建的 cvm worker 节点(非节点池),如果不需要,可以删除此代码块。 + instance_name = "roc-test" # 替换节点cvm名称 + count = 1 # 替换初始节点数量 + availability_zone = var.availability_zone_first + instance_type = var.default_instance_type + + system_disk_type = "CLOUD_PREMIUM" + system_disk_size = 50 + internet_charge_type = "TRAFFIC_POSTPAID_BY_HOUR" + internet_max_bandwidth_out = 0 # 节点是否需要公网带宽,0 为不需要,1 为需要。 + public_ip_assigned = false + security_group_ids = [var.security_group] + subnet_id = data.tencentcloud_vpc_subnets.zone_first.instance_list.0.subnet_id + + enhanced_security_service = false + enhanced_monitor_service = false + key_ids = [var.skey_id] + img_id = var.image_id + } +} + +# 集群初始化时自动创建的节点池,如果不需要,可删除此代码块 +resource "tencentcloud_kubernetes_node_pool" "mynodepool" { + name = "roc-test-pool" # 替换节点池名称 + cluster_id = tencentcloud_kubernetes_cluster.managed_cluster.id + max_size = 6 # 最大节点数量 + min_size = 0 # 最小节点数量 + vpc_id = data.tencentcloud_vpc_instances.vpc.instance_list.0.vpc_id + subnet_ids = [data.tencentcloud_vpc_subnets.zone_first.instance_list.0.subnet_id] + retry_policy = "INCREMENTAL_INTERVALS" + desired_capacity = 2 # 节点池的期望节点数量 + enable_auto_scale = false + multi_zone_subnet_policy = "EQUALITY" + node_os = "tlinux3.1x86_64" + delete_keep_instance = false + + auto_scaling_config { + instance_type = var.default_instance_type + system_disk_type = "CLOUD_PREMIUM" + system_disk_size = "50" + orderly_security_group_ids = [var.security_group] + + instance_charge_type = "SPOTPAID" + spot_instance_type = "one-time" + spot_max_price = "1000" + public_ip_assigned = false + + key_ids = [var.skey_id] + enhanced_security_service = false + enhanced_monitor_service = false + } +} +``` + diff --git a/content/appendix/yaml.md b/content/appendix/yaml.md new file mode 100644 index 0000000..8f78fa7 --- /dev/null +++ b/content/appendix/yaml.md @@ -0,0 +1,267 @@ +# 实用 YAML + +## RBAC 相关 + +### 给 roc 授权 test 命名空间所有权限,istio-system 命名空间的只读权限 + +```yaml +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: admin + namespace: test +rules: + - apiGroups: ["*"] + resources: ["*"] + verbs: ["*"] + +--- + +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: admin-to-roc + namespace: test +subjects: + - kind: User + name: roc + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: Role + name: admin + apiGroup: rbac.authorization.k8s.io + +--- + +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: readonly + namespace: istio-system +rules: + - apiGroups: ["*"] + resources: ["*"] + verbs: ["get", "watch", "list"] + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: readonly-to-roc + namespace: istio-system +subjects: + - kind: User + name: roc + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: Role + name: readonly + apiGroup: rbac.authorization.k8s.io +``` + +### 给 roc 授权整个集群的只读权限 + +```yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: readonly +rules: + - apiGroups: ["*"] + resources: ["*"] + verbs: ["get", "watch", "list"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: readonly-to-roc +subjects: + - kind: User + name: roc + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: readonly + apiGroup: rbac.authorization.k8s.io +``` + +### 给 manager 用户组里所有用户授权 secret 读权限 + +``` yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: secret-reader +rules: +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "watch", "list"] +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: read-secrets-global +subjects: +- kind: Group + name: manager + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: secret-reader + apiGroup: rbac.authorization.k8s.io +``` + +### 给 roc 授权集群只读权限 (secret读权限除外) + +secret 读权限比较敏感,不要轻易放开,k8s 的 Role/ClusterRole 没有提供类似 "某资源除外" 的能力,secret 在 core group 下,所以只排除 secret 读权限的话需要列举其它所有 core 下面的资源,另外加上其它所有可能的 group 所有资源(包括CRD): + +```yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: readonly +rules: +- apiGroups: [""] + resources: + - bindings + - componentstatuses + - configmaps + - endpoints + - events + - limitranges + - namespaces + - nodes + - persistentvolumeclaims + - persistentvolumes + - pods + - podtemplates + - replicationcontrollers + - resourcequotas + - serviceaccounts + - services + verbs: ["get", "list"] +- apiGroups: + - cert-manager.io + - admissionregistration.k8s.io + - apiextensions.k8s.io + - apiregistration.k8s.io + - apps + - authentication.k8s.io + - autoscaling + - batch + - certificaterequests.cert-manager.io + - certificates.cert-manager.io + - certificates.k8s.io + - cloud.tencent.com + - coordination.k8s.io + - discovery.k8s.io + - events.k8s.io + - extensions + - install.istio.io + - metrics.k8s.io + - monitoring.coreos.com + - networking.istio.io + - node.k8s.io + - policy + - rbac.authorization.k8s.io + - scheduling.k8s.io + - security.istio.io + - storage.k8s.io + resources: ["*"] + verbs: [ "get", "list" ] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: roc +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: readonly +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: roc +``` + +> 可以借助 `kubectl api-resources -o name` 来列举。 + +### 限制 ServiceAccount 权限 + +授权 `build-robot` 这个 ServiceAccount 读取 build 命名空间中 Pod 的信息和 log 的权限: + +``` yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: build-robot + namespace: build + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + namespace: build + name: pod-reader +rules: +- apiGroups: [""] + resources: ["pods", "pods/log"] + verbs: ["get", "list"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: read-pods + namespace: build +subjects: +- kind: ServiceAccount + name: build-robot + namespace: build +roleRef: + kind: Role + name: pod-reader + apiGroup: rbac.authorization.k8s.io +``` + +### ServiceAccount 最高权限 + +```yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cluster-admin + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cluster-admin +rules: +- apiGroups: ["*"] + resources: ["*"] + verbs: ["*"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cluster-admin +subjects: +- kind: ServiceAccount + name: cluster-admin + namespace: kube-system +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io +``` \ No newline at end of file diff --git a/content/best-practices/autoscaling/hpa-velocity.md b/content/best-practices/autoscaling/hpa-velocity.md new file mode 100644 index 0000000..7541b19 --- /dev/null +++ b/content/best-practices/autoscaling/hpa-velocity.md @@ -0,0 +1,254 @@ +# 灵活调节 HPA 扩缩容速率 +## HPA v2beta2 版本开始支持调节扩缩容速率 + +在 K8S 1.18 之前,HPA 扩容是无法调整灵敏度的: + +1. 对于缩容,由 `kube-controller-manager` 的 `--horizontal-pod-autoscaler-downscale-stabilization-window` 参数控制缩容时间窗口,默认 5 分钟,即负载减小后至少需要等 5 分钟才会缩容。 +2. 对于扩容,由 hpa controller 固定的算法、硬编码的常量因子来控制扩容速度,无法自定义。 + +这样的设计逻辑导致用户无法自定义 HPA 的扩缩容速率,而不同的业务场景对于扩容容灵敏度要求可能是不一样的,比如: + +1. 对于有流量突发的关键业务,在需要的时候应该快速扩容 (即便可能不需要,以防万一),但缩容要慢 (防止另一个流量高峰)。 +2. 处理关键数据的应用,数据量飙升时它们应该尽快扩容以减少数据处理时间,数据量降低时应尽快缩小规模以降低成本,数据量的短暂抖动导致不必要的频繁扩缩是可以接受的。 +3. 处理常规数据/网络流量的业务,不是很重要,它们可能会以一般的方式扩大和缩小规模,以减少抖动。 + +HPA 在 K8S 1.18 迎来了一次更新,在之前 v2beta2 版本上新增了扩缩容灵敏度的控制,不过版本号依然保持 v2beta2 不变。 +## 原理与误区 + +HPA 在进行扩缩容时,先是由固定的算法计算出期望副本数: + +```txt +期望副本数 = ceil[当前副本数 * (当前指标 / 期望指标)] +``` + +其中 `当前指标 / 期望指标` 的比例如果接近 1 (在容忍度范围内,默认为 0.1,即比例在 0.9~1.1 之间),则不进行伸缩,避免抖动导致频繁扩缩容。 + +> 容忍度是由 `kube-controller-manager` 参数 `--horizontal-pod-autoscaler-tolerance` 决定,默认是 0.1,即 10%。 + +本文要介绍的扩缩容速率调节,不是指要调整期望副本数的算法,它并不会加大或缩小扩缩容比例或数量,仅仅是控制扩缩容的速率,实现的效果是: 控制 HPA 在 XX 时间内最大允许扩容/缩容 XX 比例/数量的 Pod。 + +## 如何使用 + +这次更新实际就是在 HPA Spec 下新增了一个 `behavior` 字段,下面有 `scaleUp` 和 `scaleDown` 两个字段分别控制扩容和缩容的行为,具体可参考 [官方 API 文档](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#hpascalingrules-v2beta2-autoscaling)。 + +使用示例: +```yaml +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: web +spec: + minReplicas: 1 + maxReplicas: 1000 + metrics: + - pods: + metric: + name: k8s_pod_rate_cpu_core_used_limit + target: + averageValue: "80" + type: AverageValue + type: Pods + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: web + behavior: # 这里是重点 + scaleDown: + stabilizationWindowSeconds: 300 # 需要缩容时,先观察 5 分钟,如果一直持续需要缩容才执行缩容 + policies: + - type: Percent + value: 100 # 允许全部缩掉 + periodSeconds: 15 + scaleUp: + stabilizationWindowSeconds: 0 # 需要扩容时,立即扩容 + policies: + - type: Percent + value: 100 + periodSeconds: 15 # 每 15s 最大允许扩容当前 1 倍数量的 Pod + - type: Pods + value: 4 + periodSeconds: 15 # 每 15s 最大允许扩容 4 个 Pod + selectPolicy: Max # 使用以上两种扩容策略中算出来扩容 Pod 数量最大的 +``` + +* 以上 `behavior` 配置是默认的,即如果不配置,会默认加上。 +* `scaleUp` 和 `scaleDown` 都可以配置1个或多个策略,最终扩缩时用哪个策略,取决于 `selectPolicy`。 +* `selectPolicy` 默认是 `Max`,即扩缩时,评估多个策略算出来的结果,最终选取扩缩 Pod 数量最多的那个策略的结果。 +* `stabilizationWindowSeconds` 是稳定窗口时长,即需要指标高于或低于阈值,并持续这个窗口的时长才会真正执行扩缩,以防止抖动导致频繁扩缩容。扩容时,稳定窗口默认为0,即立即扩容;缩容时,稳定窗口默认为5分钟。 +* `policies` 中定义扩容或缩容策略,`type` 的值可以是 `Pods` 或 `Percent`,表示每 `periodSeconds` 时间范围内,允许扩缩容的最大副本数或比例。 + + +## 场景与示例 + +下面给出一些使用场景的示例。 +### 快速扩容 + +当你的应用需要快速扩容时,可以使用类似如下的 HPA 配置: + +```yaml +behavior: + scaleUp: + policies: + - type: Percent + value: 900 + periodSeconds: 15 # 每 15s 最多允许扩容 9 倍于当前副本数 +``` + +上面的配置表示扩容时最大一次性新增当前 9 倍数量的副本数,当然也不能超过 `maxReplicas` 的限制。 + +假如一开始只有 1 个 Pod,如果遭遇流量突发,且指标持续超阈值 9 倍以上,它将以飞快的速度进行扩容,扩容时 Pod 数量变化趋势如下: + +```txt +1 -> 10 -> 100 -> 1000 +``` + +没有配置缩容策略,将等待全局默认的缩容时间窗口 (默认5分钟) 后开始缩容。 + +### 快速扩容,缓慢缩容 + +如果流量高峰过了,并发量骤降,如果用默认的缩容策略,等几分钟后 Pod 数量也会随之骤降,如果 Pod 缩容后突然又来一个流量高峰,虽然可以快速扩容,但扩容的过程毕竟还是需要一定时间的,如果流量高峰足够高,在这段时间内还是可能造成后端处理能力跟不上,导致部分请求失败。这时候我们可以为 HPA 加上缩容策略,HPA `behavior` 配置示例如下: + +```yaml +behavior: + scaleUp: + policies: + - type: Percent + value: 900 + periodSeconds: 15 # 每 15s 最多允许扩容 9 倍于当前副本数 + scaleDown: + policies: + - type: Pods + value: 1 + periodSeconds: 600 # 每 10 分钟最多只允许缩掉 1 个 Pod +``` + +上面示例中增加了 `scaleDown` 的配置,指定缩容时每 10 分钟才缩掉 1 个 Pod,大大降低了缩容速度,缩容时的 Pod 数量变化趋势如下: + +```txt +1000 -> … (10 min later) -> 999 +``` + +这个可以让关键业务在可能有流量突发的情况下保持处理能力,避免流量高峰导致部分请求失败。 + +### 缓慢扩容 + +如果想要你的应用不太关键,希望扩容时不要太敏感,可以让它扩容平稳缓慢一点,为 HPA 加入下面的 `behavior`: + +```yaml +behavior: + scaleUp: + policies: + - type: Pods + value: 1 + periodSeconds: 300 # 每 5 分钟最多只允许扩容 1 个 Pod +``` + +假如一开始只有 1 个 Pod,指标一直持续超阈值,扩容时它的 Pod 数量变化趋势如下: + +```txt +1 -> 2 -> 3 -> 4 +``` + +### 禁止自动缩容 + +如果应用非常关键,希望扩容后不自动缩容,需要人工干预或其它自己开发的 controller 来判断缩容条件,可以使用类型如下的 `behavior` 配置来禁止自动缩容: + +```yaml +behavior: + scaleDown: + selectPolicy: Disabled +``` + +### 延长缩容时间窗口 + +缩容默认时间窗口是 5 分钟,如果我们需要延长时间窗口以避免一些流量毛刺造成的异常,可以指定下缩容的时间窗口,`behavior` 配置示例如下: + +```yaml +behavior: + scaleDown: + stabilizationWindowSeconds: 600 # 等待 10 分钟再开始缩容 + policies: + - type: Pods + value: 5 + periodSeconds: 600 # 每 10 分钟最多只允许缩掉 5 个 Pod +``` + +上面的示例表示当负载降下来时,会等待 600s (10 分钟) 再缩容,每 10 分钟最多只允许缩掉 5 个 Pod。 + +### 延长扩容时间窗口 + +有些应用经常会有数据毛刺导致频繁扩容,而扩容出来的 Pod 其实没太大必要,反而浪费资源。比如数据处理管道的场景,需要的副本数取决于队列中的事件数量,当队列中堆积了大量事件时,我们希望可以快速扩容,但又不希望太灵敏,因为可能只是短时间内的事件堆积,即使不扩容也可以很快处理掉。 + +默认的扩容算法会在较短的时间内扩容,针对这种场景我们可以给扩容增加一个时间窗口以避免毛刺导致扩容带来的资源浪费,`behavior` 配置示例如下: + +```yaml +behavior: + scaleUp: + stabilizationWindowSeconds: 300 # 扩容前等待 5 分钟的时间窗口 + policies: + - type: Pods + value: 20 + periodSeconds: 60 # 每分钟最多只允许扩容 20 个 Pod +``` + +上面的示例表示扩容时,需要先等待 5 分钟的时间窗口,如果在这段时间内指标又降下来了就不再扩容,如果一直持续超过阈值才扩容,并且每分钟最多只允许扩容 20 个 Pod。 + +## FAQ + +### 为什么我用 v2beta2 创建的 HPA,创建后获取到的 yaml 版本是 v1 或 v2beta1? + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220728151816.png) + +这是因为 HPA 有多个 apiVersion 版本: + +```bash +kubectl api-versions | grep autoscaling +autoscaling/v1 +autoscaling/v2beta1 +autoscaling/v2beta2 +``` + +以任意一种版本创建,都可以以任意版本获取(自动转换)。 + +如果是用 kubectl 获取,kubectl 在进行 API discovery 时,会缓存 apiserver 返回的各种资源与版本信息,有些资源存在多个版本,在 get 时如果不指定版本,会使用默认版本获取,对于 HPA,默认是 v1。 + +如果是通过一些平台的界面获取,取决于平台的实现方式,比如腾讯云容器服务控制台,默认用 v2beta1 版本展示: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220728152913.png) + +如何使用 v2beta2 版本获取或编辑?指定包含版本信息的完整资源名即可: + +```bash +kubectl get horizontalpodautoscaler.v2beta2.autoscaling php-apache -o yaml +# kubectl edit horizontalpodautoscaler.v2beta2.autoscaling php-apache +``` + +### 配置快速扩容,为什么快不起来? + +比如这个配置: + +```yaml +behavior: + scaleUp: + policies: + - type: Percent + value: 900 + periodSeconds: 10 +``` + +含义是允许每 10 秒最大允许扩出 9 倍于当前数量的 Pod,实测中可能发现压力已经很大了,但扩容却并不快。 + +通常原因是计算周期与指标延时: +* 期望副本数的计算有个计算周期,默认是 15 秒 (由 `kube-controller-manager` 的 `--horizontal-pod-autoscaler-sync-period` 参数决定)。 +* 每次计算时,都会通过相应的 metrics API 去获取当前监控指标的值,这个返回的值通常不是实时的,对于腾讯云容器服务而言,监控数据是每分钟上报一次;对于自建的 prometheus + prometheus-adapter 而言,监控数据的更新取决于监控数据抓取间隔,prometheus-adapter 的 `--metrics-relist-interval` 参数决定监控指标刷新周期(从 prometheus 中查询),这两部分时长之和为监控数据更新的最长时间。 + +通常都不需要 HPA 极度的灵敏,有一定的延时一般都是可以接受的。如果实在有对灵敏度特别敏感的场景,可以考虑使用 prometheus,缩小监控指标抓取间隔和 prometheus-adapter 的 `--metrics-relist-interval`。 + +## 小结 + +本文介绍了如何利用 HPA 的新特性来控制扩缩容的速率,以更好的满足各种不同场景对扩容速度的需求,也提供了常见的几种场景与配置示例,可自行根据自己需求对号入座。 + +## 参考资料 + +* [HPA 官方介绍文档](https://kubernetes.io/zh-cn/docs/tasks/run-application/horizontal-pod-autoscale/) +* [控制 HPA 扩容速度的提案](https://github.com/kubernetes/enhancements/tree/master/keps/sig-autoscaling/853-configurable-hpa-scale-velocity) \ No newline at end of file diff --git a/content/best-practices/autoscaling/hpa-with-custom-metrics.md b/content/best-practices/autoscaling/hpa-with-custom-metrics.md new file mode 100644 index 0000000..72b5617 --- /dev/null +++ b/content/best-practices/autoscaling/hpa-with-custom-metrics.md @@ -0,0 +1,312 @@ +# HPA 使用自定义指标进行伸缩 + +Kubernetes 默认提供 CPU 和内存作为 HPA 弹性伸缩的指标,如果有更复杂的场景需求,比如基于业务单副本 QPS 大小来进行自动扩缩容,可以考虑自行安装 [prometheus-adapter](https://github.com/DirectXMan12/k8s-prometheus-adapter) 来实现基于自定义指标的 Pod 弹性伸缩。 + +## 实现原理 + +Kubernetes 提供了 [Custom Metrics API](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/instrumentation/custom-metrics-api.md) 与 [External Metrics API](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/instrumentation/external-metrics-api.md) 来对 HPA 的指标进行扩展,让用户能够根据实际需求进行自定义。 + +prometheus-adapter 对这两种 API 都有支持,通常使用 Custom Metrics API 就够了,本文也主要针对此 API 来实现使用自定义指标进行弹性伸缩。 + +## 前提条件 + +* 部署有 Prometheus 并做了相应的自定义指标采集。 +* 已安装 [helm](https://helm.sh/docs/intro/install/) 。 + +## 业务暴露监控指标 + +这里以一个简单的 golang 业务程序为例,暴露 HTTP 请求的监控指标: + +``` go +package main + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "net/http" + "strconv" +) + +var ( + HTTPRequests = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "httpserver_requests_total", + Help: "Number of the http requests received since the server started", + }, + []string{"status"}, + ) +) + +func init() { + prometheus.MustRegister(HTTPRequests) +} + +func main() { + http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + code := 200 + switch path { + case "/test": + w.WriteHeader(200) + w.Write([]byte("OK")) + case "/metrics": + promhttp.Handler().ServeHTTP(w, r) + default: + w.WriteHeader(404) + w.Write([]byte("Not Found")) + } + HTTPRequests.WithLabelValues(strconv.Itoa(code)).Inc() + }) + http.ListenAndServe(":80", nil) +} +``` + +该示例程序暴露了 `httpserver_requests_total` 指标,记录 HTTP 的请求,通过这个指标可以计算出该业务程序的 QPS 值。 + +## 部署业务程序 + +将前面的程序打包成容器镜像,然后部署到集群,比如使用 Deployment 部署: + +``` yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: httpserver + namespace: httpserver +spec: + replicas: 1 + selector: + matchLabels: + app: httpserver + template: + metadata: + labels: + app: httpserver + spec: + containers: + - name: httpserver + image: registry.imroc.cc/test/httpserver:custom-metrics + imagePullPolicy: Always + +--- + +apiVersion: v1 +kind: Service +metadata: + name: httpserver + namespace: httpserver + labels: + app: httpserver + annotations: + prometheus.io/scrape: "true" + prometheus.io/path: "/metrics" + prometheus.io/port: "http" +spec: + type: ClusterIP + ports: + - port: 80 + protocol: TCP + name: http + selector: + app: httpserver +``` + +## Prometheus 采集业务监控 + +业务部署好了,我们需要让我们的 Promtheus 去采集业务暴露的监控指标。 + +### 方式一: 配置 Promtheus 采集规则 + +在 Promtheus 的采集规则配置文件添加采集规则: + +``` yaml + - job_name: httpserver + scrape_interval: 5s + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - httpserver + relabel_configs: + - action: keep + source_labels: + - __meta_kubernetes_service_label_app + regex: httpserver + - action: keep + source_labels: + - __meta_kubernetes_endpoint_port_name + regex: http +``` + +### 方式二: 配置 ServiceMonitor + +若已安装 prometheus-operator,则可通过创建 ServiceMonitor 的 CRD 对象配置 Prometheus。示例如下: + +``` yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: httpserver +spec: + endpoints: + - port: http + interval: 5s + namespaceSelector: + matchNames: + - httpserver + selector: + matchLabels: + app: httpserver +``` + +## 安装 prometheus-adapter + +我们使用 helm 安装 [prometheus-adapter](https://artifacthub.io/packages/helm/prometheus-community/prometheus-adapter),安装前最重要的是确定并配置自定义指标,按照前面的示例,我们业务中使用 `httpserver_requests_total` 这个指标来记录 HTTP 请求,那么我们可以通过类似下面的 PromQL 计算出每个业务 Pod 的 QPS 监控: + +``` +sum(rate(http_requests_total[2m])) by (pod) +``` + +我们需要将其转换为 prometheus-adapter 的配置,准备一个 `values.yaml`: + +``` yaml +rules: + default: false + custom: + - seriesQuery: 'httpserver_requests_total' + resources: + template: <<.Resource>> + name: + matches: "httpserver_requests_total" + as: "httpserver_requests_qps" # PromQL 计算出来的 QPS 指标 + metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>) +prometheus: + url: http://prometheus.monitoring.svc.cluster.local # 替换 Prometheus API 的地址 (不写端口) + port: 9090u +``` + +执行 helm 命令进行安装: + +``` bash +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm repo update +# Helm 3 +helm install prometheus-adapter prometheus-community/prometheus-adapter -f values.yaml +# Helm 2 +# helm install --name prometheus-adapter prometheus-community/prometheus-adapter -f values.yaml +``` + +## 测试是否安装正确 + +如果安装正确,是可以看到 Custom Metrics API 返回了我们配置的 QPS 相关指标: + +``` bash +$ kubectl get --raw /apis/custom.metrics.k8s.io/v1beta1 +{ + "kind": "APIResourceList", + "apiVersion": "v1", + "groupVersion": "custom.metrics.k8s.io/v1beta1", + "resources": [ + { + "name": "jobs.batch/httpserver_requests_qps", + "singularName": "", + "namespaced": true, + "kind": "MetricValueList", + "verbs": [ + "get" + ] + }, + { + "name": "pods/httpserver_requests_qps", + "singularName": "", + "namespaced": true, + "kind": "MetricValueList", + "verbs": [ + "get" + ] + }, + { + "name": "namespaces/httpserver_requests_qps", + "singularName": "", + "namespaced": false, + "kind": "MetricValueList", + "verbs": [ + "get" + ] + } + ] +} +``` + +也能看到业务 Pod 的 QPS 值: + +``` bash +$ kubectl get --raw /apis/custom.metrics.k8s.io/v1beta1/namespaces/httpserver/pods/*/httpserver_requests_qps +{ + "kind": "MetricValueList", + "apiVersion": "custom.metrics.k8s.io/v1beta1", + "metadata": { + "selfLink": "/apis/custom.metrics.k8s.io/v1beta1/namespaces/httpserver/pods/%2A/httpserver_requests_qps" + }, + "items": [ + { + "describedObject": { + "kind": "Pod", + "namespace": "httpserver", + "name": "httpserver-6f94475d45-7rln9", + "apiVersion": "/v1" + }, + "metricName": "httpserver_requests_qps", + "timestamp": "2020-11-17T09:14:36Z", + "value": "500m", + "selector": null + } + ] +} +``` + +> 上面示例 QPS 为 `500m`,表示 QPS 值为 0.5 + +## 测试 HPA + +假如我们设置每个业务 Pod 的平均 QPS 达到 50,就触发扩容,最小副本为 1 个,最大副本为1000,HPA 可以这么配置: + +``` yaml +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: httpserver + namespace: httpserver +spec: + minReplicas: 1 + maxReplicas: 1000 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: httpserver + metrics: + - type: Pods + pods: + metric: + name: httpserver_requests_qps + target: + averageValue: 50 + type: AverageValue +``` + +然后对业务进行压测,观察是否扩容: + +``` bash +$ kubectl get hpa +NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE +httpserver Deployment/httpserver 83933m/50 1 1000 2 18h + +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +httpserver-6f94475d45-47d5w 1/1 Running 0 3m41s +httpserver-6f94475d45-7rln9 1/1 Running 0 37h +httpserver-6f94475d45-6c5xm 0/1 ContainerCreating 0 1s +httpserver-6f94475d45-wl78d 0/1 ContainerCreating 0 1s +``` + +扩容正常则说明已经实现 HPA 基于业务自定义指标进行弹性伸缩。 \ No newline at end of file diff --git a/content/best-practices/configure-healthcheck.md b/content/best-practices/configure-healthcheck.md new file mode 100644 index 0000000..0f530e8 --- /dev/null +++ b/content/best-practices/configure-healthcheck.md @@ -0,0 +1,109 @@ +# 健康检查配置 + +> 本文视频教程: [https://www.bilibili.com/video/BV16q4y1y7B9](https://www.bilibili.com/video/BV16q4y1y7B9) + +本文分享 K8S 健康检查配置的最佳实践,文末也分享配置不当的案例。 + +## Kubernetes 健康检查介绍 + +K8S 支持三种健康检查: +1. 就绪检查(`readinessProbe`): Pod启动后,如果配了就绪检查,要等就绪检查探测成功,Pod Ready 状态变为 True,允许放流量进来;在运行期间如果突然探测失败,Ready 状态变为 False,摘除流量。 +2. 存活检查(`livenessProbe`): Pod 在运行时,如果存活检查探测失败,会自动重启容器;值得注意的是,存活探测的结果不影响 Pod 的 Ready 状态,这也是许多同学可能误解的地方。 +3. 启动检查(`startupProbe`): 作用是让存活检查和就绪检查的开始探测时间延后,等启动检查成功后再开始探测,通常用于避免业务进程启动慢导致存活检查失败而被无限重启。 + +三种健康检查配置格式都是一样的,以 `readinessProbe` 为例: + +```yaml +readinessProbe: + successThreshold: 1 # 1 次探测成功就认为健康 + failureThreshold: 2 # 连续 2 次探测失败认为不健康 + periodSeconds: 3 # 3s 探测一次 + timeoutSeconds: 2 # 2s 超时还没返回成功就认为不健康 + httpGet: # 使用 http 接口方式探测,GET 请求 80 端口的 "/healthz" 这个 http 接口,响应状态码在200~399之间视为健康,否则不健康。 + port: 80 + path: "/healthz" + #exec: # 使用脚本探测,执行容器内 "/check-health.sh" 这个脚本文件,退出状态码等于0视为健康,否则不健康。 + # command: ["/check-health.sh"] + #tcp: # 使用 TCP 探测,看 9000 端口是否监听。 + # port: 9000 +``` + +## 探测结果一定要真实反应业务健康状态 + +### 首选 HTTP 探测 + +通常是推荐业务自身提供 http 探测接口,如果业务层面健康就返回 200 状态码;否则,就返回 500。 + +### 备选脚本探测 + +如果业务还不支持 http 探测接口,或者有探测接口但不是 http 协议,也可以将探测逻辑写到脚本文件里,然后配置脚本方式探测。 + +### 尽量避免 TCP 探测 + +另外,应尽量避免使用 TCP 探测,因为 TCP 探测实际就是 kubelet 向指定端口发送 TCP SYN 握手包,当端口被监听内核就会直接响应 ACK,探测就会成功: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F22%2F20230922115534.png) + +当程序死锁或 hang 死,这些并不影响端口监听,所以探测结果还是健康,流量打到表面健康但实际不健康的 Pod 上,就无法处理请求,从而引发业务故障。 + +## 所有提供服务的 container 都要加上 ReadinessProbe + +如果你的容器对外提供了服务,监听了端口,那么都应该配上 ReadinessProbe,ReadinessProbe 不通过就视为 Pod 不健康,然后会自动将不健康的 Pod 踢出去,避免将业务流量转发给异常 Pod。 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F22%2F20230922115559.png) + +## 谨慎使用 LivenessProbe + +LivenessProbe 失败会重启 Pod,不要轻易使用,除非你了解后果并且明白为什么你需要它,参考 [Liveness Probes are Dangerous](https://srcco.de/posts/kubernetes-liveness-probes-are-dangerous.html) 。 + +### 探测条件要更宽松 + +如果使用 LivenessProbe,不要和 ReadinessProbe 设置成一样,需要更宽松一点,避免因抖动导致 Pod 频繁被重启。 + +通常是失败阈值 (`failureThreshold`) 设置得更大一点,避免因探测太敏感导致 Pod 很容易被重启。 + +另外如果有必要,超时时间 (`timeoutSeconds`) 和探测间隔 (`periodSeconds`) 也可以根据情况适当延长。 + +### 保护慢启动容器 + +有些应用本身可能启动慢(比如 Java),或者用的富容器,需要起一大堆依赖,导致容器启动需要的较长,如果配置了存活检查,可能会造成启动过程中达到失败阈值被重启,如此循环,无限重启。 + +对于这类启动慢的容器,我们需要保护下,等待应用完全启动后才开始探测: + +1. 如果 K8S 版本低于 1.18,可以设置 LivenessProbe 的初始探测延时 (`initialDelaySeconds`)。 +2. 如果 K8S 版本在 1.18 及其以上,可以配置 [StartProbe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes),保证等应用完全启动后才开始探测。 + +### 避免依赖导致级联故障 + +LivenessProbe 探测逻辑里不要有外部依赖 (db, 其它 pod 等),避免抖动导致级联故障。 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F22%2F20230922115617.png) + +如上图,Pod B 探测逻辑里查 DB,Pod A 探测逻辑里调用 Pod B,如果 DB 抖动,Pod B 变为不健康,Pod A 调用 Pod B 也失败,也变为不健康,从而级联故障。 + +## 反面教材 + +### 突然无限重启且流量异常 + +故障现象: Pod 突然不断重启,期间有流量进入,这部分流量异常。 + +原因: +1. Pod 之前所在节点异常,重建漂移到了其它节点去启动。 +2. Pod 重建后由于基础镜像中依赖的一个服务有问题导致启动较慢,因为同时配置了 ReadinessProbe 与 LivenessProbe,大概率是启动时所有健康检查都失败,达到 LivenessProbe 失败次数阈值,又被重启。 +3. Pod 配置了 preStop 实现优雅终止,被重启前会先执行 preStop,优雅终止的时长较长,preStop 期间 ReadinessProbe 还会继续探测。 +4. 探测方式使用的 TCP 探测,进程优雅终止过程中 TCP 探测仍然会成功(没完全退出前端口监听仍然存在),但实际此时进程已不会处理新请求了。 +5. LivenessProbe 结果不会影响 Pod Ready 状态,是否 Ready 主要取决于 ReadinessProbe 结果,由于 preStop 期间 ReadinessProbe 是成功的,Pod 就变 Ready 了。 +6. Pod Ready 但实际无法处理请求,业务就会异常。 + +总结: +1. Pod 慢启动 + 存活探测 导致被无限重启。需要延长 `initialDelaySeconds` 或 [StartProbe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes) 来保护慢启动容器。 +2. TCP 探测方式不能完全真实反应业务健康状态,导致在优雅终止过程中,ReadinessProbe 探测成功让流量放进来而业务却不会处理,导致流量异常。需要使用更好的探测方式,建议业务提供 HTTP 探活接口,使用 HTTP 探测业务真实健康状态。 + +### netstat 探测超时 + +故障现象: 探测脚本经常 2s 超时。 + +原因: 使用脚本探测,超时时间为 2s,脚本里使用了 netstat 检测端口是否存活来判断业务进程是否正常,当流量较大时,连接数多,netstat 运行所需时间就较长 (因为 netstat 会遍历 `/proc` 下每个 pid 内容来进行统计,执行时长受连接数波动所影响),所以在业务高峰时往往容易执行超时,从而探测失败。 + +总结: 这种探测方式比 TCP 探测方式更原始,强烈不推荐,参考最佳实践优化探测配置。 + diff --git a/content/best-practices/containerization/crontab-in-container.md b/content/best-practices/containerization/crontab-in-container.md new file mode 100644 index 0000000..99d3c4b --- /dev/null +++ b/content/best-practices/containerization/crontab-in-container.md @@ -0,0 +1,48 @@ +# 在容器中使用 crontab + +## 准备 crontab 配置文件 + +新建一个名为 `crontab` 的配置文件,写定时任务规则: + +```txt +* * * * * echo "Crontab is working" > /proc/1/fd/1 +``` + +> `/proc/1/fd/1` 表示输出到容器主进程的标准输出,这样我们可以利用 `kubectl logs` 来查看到执行日志。 + +## 准备 Dockerfile + +### CentOS 镜像 + +```dockerfile +FROM docker.io/centos:7 + +RUN yum -y install crontabs && rm -rf /etc/cron.*/* + +ADD crontab /etc/crontab +RUN chmod 0644 /etc/crontab +RUN crontab /etc/crontab + +CMD ["crond", "-n"] +``` + +### Ubuntu 镜像 + +```dockerfile +FROM docker.io/ubuntu:20.04 + +RUN apt-get update && apt-get install -y cron && rm -rf /etc/cron.*/* + +ADD crontab /etc/crontab +RUN chmod 0644 /etc/crontab +RUN crontab /etc/crontab + +CMD ["cron", "-f", "-l", "2"] +``` + +## 打包镜像 + +```bash +docker build -t docker.io/imroc/crontab:latest -f Dockerfile . +# podman build -t docker.io/imroc/crontab:latest -f Dockerfile . +``` \ No newline at end of file diff --git a/content/best-practices/containerization/golang.md b/content/best-practices/containerization/golang.md new file mode 100644 index 0000000..8819368 --- /dev/null +++ b/content/best-practices/containerization/golang.md @@ -0,0 +1,47 @@ +# Go 应用容器化 + +## 使用多阶段构建编译 + +可以使用 golang 的官方镜像进行编译,建议使用静态编译,因为 golang 官方镜像默认使用的基础镜像是 debian,如果使用默认的编译,会依赖依赖一些动态链接库,当业务镜像使用了其它发行版基础镜像,且动态链接库不一样的话 (比如 alpine),就会导致程序启动时发现依赖的动态链接库找不到而无法启动: + +```txt +standard_init_linux.go:211: exec user process caused "no such file or directory" +``` + +以下是多阶段构建静态编译 golang 程序的 Dockerfile 示例: + +```Dockerfile +FROM golang:latest as builder + +COPY . /build + +WORKDIR /build + +RUN CGO_ENABLED=0 go build -trimpath -ldflags='-s -w -extldflags=-static' -o /app + +FROM ubuntu:22.10 + +COPY --from=builder /app / + +CMD ["/app"] +``` + +如果希望最小化镜像,可以用空基础镜像,让镜像中只包含一个静态编译后 go 二进制: + +```Dockerfile +FROM golang:latest as builder + +COPY . /build + +WORKDIR /build + +RUN CGO_ENABLED=0 go build -trimpath -ldflags='-s -w -extldflags=-static' -o /app + +FROM scratch + +COPY --from=builder /app / + +CMD ["/app"] +``` + +> 建议 k8s 1.23 及其以上版本使用 scratch 基础镜像,即使镜像中不包含 bash 等调试工具,也可以 [使用临时容器来进行调试](https://kubernetes.io/zh-cn/docs/tasks/debug/debug-application/debug-running-pod/#ephemeral-container)。 \ No newline at end of file diff --git a/content/best-practices/containerization/java.md b/content/best-practices/containerization/java.md new file mode 100644 index 0000000..be2aae9 --- /dev/null +++ b/content/best-practices/containerization/java.md @@ -0,0 +1,141 @@ +# Java 应用容器化 + +本文介绍 Java 应用容器化相关注意事项。 + +## 避免低版本 JDK + +JDK 低版本对容器不友好,感知不到自己在容器内: +1. 不知道被分配了多少内存,很容易造成消耗过多内容而触发 Cgroup OOM 被杀死。 +2. 不知道被分配了多少 CPU,认为可用 CPU 数量就是宿主机的 CPU 数量,导致 JVM 创建过多线程,容易高负载被 Cgroup CPU 限流(throttle)。 + +在高版本的 JDK 中 (JDK10) 对容器进行了很好的支持,同时也 backport 到了低版本 (JDK8): +1. 如果使用的 `Oracle JDK`,确保版本大于等于 `8u191`。 +2. 如果使用的 `OpenJDK`,确保版本大于等于 `8u212`。 + +## 常见问题 + +### 相同镜像在部分机器上跑有问题 + +* 现象: 经常会有人说,我的 java 容器镜像,在 A 机器上跑的好好的,在 B 机器上就有问题,都是用的同一个容器镜像啊。 +* 根因:java 类加载的顺序问题,如果有不同版本的重复 jar 包,只会加载其中一个,并且不保证顺序。 +* 解决方案:业务去掉重复的 jar 包。 +* 类似 case 的分析文章:[关于Jar加载顺序的问题分析](https://www.jianshu.com/p/dcad5330b06f) + +### java 默认线程池的线程数问题 + +* 现象:java 应用创建大量线程。 +* 根因:低版本 jdk,无法正确识别 cgroup 的 limit,所以 cpu 的数量及内存的大小是直接从宿主机获取的,跟 cgroup 里的 limit 不一致。 +* 解决方案:业务升级 jdk 版本。 + +## 使用 Maven 构建 Java 容器镜像 +本文介绍如果在容器环境将 Maven 项目构建成 Java 容器镜像,完整示例源码请参考 Github [maven-docker-example](https://github.com/imroc/maven-docker-example)。 + +### pom.xml + +以下是 maven `pom.xml` 示例: + +```xml + + + 4.0.0 + + org.example + http + 1.0-SNAPSHOT + + + + 11 + 11 + + + + + app + + + + org.apache.maven.plugins + maven-jar-plugin + + + + + org.example.http.HttpTest + + true + + ./lib/ + + false + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + + copy + package + + copy-dependencies + + + ${project.build.directory}/lib + + + + + + + + + + org.apache.httpcomponents.client5 + httpclient5 + 5.1.3 + + + + +``` + +关键点: +* 利用 `maven-dependency-plugin` 插件将所有依赖 jar 包拷贝到 `./lib` 下。 +* 利用 `maven-jar-plugin` 插件在打包 jar 时指定 main 函数所在 Class,让 jar 可执行;将依赖包放到 jar 包相对路径的 `./lib` 下并自动加上 `CLASSPATH`。 + +### Dockerfile + +以下是用于构建镜像的 `Dockerfile` 示例: + +```dockerfile +FROM docker.io/library/maven:3.8-jdk-11 AS build + +COPY src /app/src +COPY pom.xml /app + +RUN mvn -f /app/pom.xml clean package + +FROM openjdk:11-jre-slim +COPY --from=build /app/target/app.jar /app/app.jar +COPY --from=build /app/target/lib /app/lib +ENTRYPOINT ["java","-jar","/app/app.jar"] +``` + +关键点: +* 利用多阶段构建,只将生成的 jar 包及其依赖拷贝到最终镜像中,减小镜像体积。 +* 镜像指定启动命令,给 `java` 指定要运行的 jar 包。 + +## 参考资料 + +* [JDK 8u191 Update Release Notes ](https://www.oracle.com/java/technologies/javase/8u191-relnotes.html) +* [Docker support in Java 8 — finally!](https://blog.softwaremill.com/docker-support-in-new-java-8-finally-fd595df0ca54) +* [Better Containerized JVMs in JDK10](http://blog.gilliard.lol/2018/01/10/Java-in-containers-jdk10.html) +* [JVM in a Container](https://merikan.com/2019/04/jvm-in-a-container/#java-8u131-and-java-9) +* [14 best practices for containerising your Java applications](https://www.tutorialworks.com/docker-java-best-practices/) +* [Best Practices: Java Memory Arguments for Containers](https://dzone.com/articles/best-practices-java-memory-arguments-for-container) diff --git a/content/best-practices/containerization/systemd-in-container.md b/content/best-practices/containerization/systemd-in-container.md new file mode 100644 index 0000000..744e4b9 --- /dev/null +++ b/content/best-practices/containerization/systemd-in-container.md @@ -0,0 +1,122 @@ +# 在容器内使用 systemd + +## 概述 + +某些情况下我们需要在容器内使用 systemd 去拉起进程,比如业务历史包袱重,有许多依赖组件,不能仅仅只启动1个业务进程,还有许多其它进程需要启动,短时间内不好改造好,过渡期间使用 systemd 作为主进程拉起所有依赖进程。 + +## 安装 systemd + +如果你用的基础镜像是 centos,那么已经内置了 systemd,建议使用 `centos:8`,启动入口是 `/sbin/init`;如果是 ubuntu,那么需要安装一下 systemd,启动入口是 `/usr/sbin/systemd`,Dockerfile 示例: + +```dockerfile +FROM ubuntu:22.04 +RUN apt update -y +RUN apt install -y systemd +``` + +## 示例 + +systemd 相比业务进程比较特殊,它运行起来需要以下条件: +1. 自己必须是 1 号进程,所以不能启用 `shareProcessNamespace`。 +2. 需要对 `/run` 和 `/sys/fs/cgroup` 等路径进行挂载,通常需要给到 systemd 容器一定特权。 + +最简单的方式是将运行 systemd 的 container 设为特权容器,示例: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: systemd +spec: + replicas: 1 + selector: + matchLabels: + app: systemd + template: + metadata: + labels: + app: systemd + spec: + containers: + - name: systemd + image: centos:8 + command: + - /sbin/init + securityContext: + privileged: true # 设置特权 +``` + +如果希望尽量减少特权,可以只读方式挂载 hostPath `/sys/fs/cgroup`,然后 capabilities 给个 `SYS_ADMIN`: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: systemd +spec: + replicas: 1 + selector: + matchLabels: + app: systemd + template: + metadata: + labels: + app: systemd + spec: + containers: + - name: systemd + image: centos:8 + command: + - /sbin/init + securityContext: + capabilities: + add: + - SYS_ADMIN # 设置容器权限 + privileged: false # 非特权 + volumeMounts: + - mountPath: /sys/fs/cgroup + name: cgroup + readOnly: true # 只读方式挂载 cgroup 目录 + volumes: + - hostPath: + path: /sys/fs/cgroup + type: "" + name: cgroup +``` + +如果用 ubuntu 安装了 systemd,用法类似的,只是启动入口变成了 `/usr/bin/systemd`: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: systemd +spec: + replicas: 1 + selector: + matchLabels: + app: systemd + template: + metadata: + labels: + app: systemd + spec: + containers: + - name: systemd + image: cr.imroc.cc/library/systemd:ubuntu + command: + - /usr/bin/systemd + securityContext: + capabilities: + add: + - SYS_ADMIN + privileged: false + volumeMounts: + - mountPath: /sys/fs/cgroup + name: cgroup + volumes: + - hostPath: + path: /sys/fs/cgroup + type: "" + name: cgroup +``` \ No newline at end of file diff --git a/content/best-practices/containerization/timezone.md b/content/best-practices/containerization/timezone.md new file mode 100644 index 0000000..88fb5cd --- /dev/null +++ b/content/best-practices/containerization/timezone.md @@ -0,0 +1,17 @@ +# 解决容器内时区不一致问题 + +## 背景 + +业务程序在使用时间的时候(比如打印日志),没有指定时区,使用的系统默认时区,而基础镜像一般默认使用 UTC 时间,程序输出时间戳的时候,就与国内的时间相差 8 小时,如何使用国内的时间呢?本文教你如何解决。 + +## 最佳实践:使用多阶段构建拷贝时区文件 + +centos 基础镜像内置了时区文件,可以将里面国内的时区文件拷贝到业务镜像中的 `/etc/localtime` 路径,表示系统默认时区是国内时区: + +```Dockerfile +FROM centos:latest + +FROM ubuntu:22.10 + +COPY --from=0 /usr/share/zoneinfo/Asia/Shanghai /etc/localtime +``` \ No newline at end of file diff --git a/content/best-practices/dns/customize-dns-resolution.md b/content/best-practices/dns/customize-dns-resolution.md new file mode 100644 index 0000000..155460b --- /dev/null +++ b/content/best-practices/dns/customize-dns-resolution.md @@ -0,0 +1,60 @@ +# 自定义域名解析 + +本文介绍在 kubernetes 上如何自定义集群 CoreDNS 的域名解析。 + +## 添加全局自定义域名解析 + +可以为 coredns 配置 hosts 来实现为 kubernetes 集群添加全局的自定义域名解析: + +编辑 coredns 配置: + +```bash +kubectl -n kube-system edit configmap coredns +``` + +加入 hosts: + +```txt + hosts { + 10.10.10.10 harbor.example.com + 10.10.10.11 grafana.example.com + fallthrough + } +``` + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925111323.png) + +> 参考 [CoreDNS hosts 插件说明](https://coredns.io/plugins/hosts/) + +如果是想解析到集群内的 Service,也可以配置下 rewrite: + +```txt + rewrite name harbor.example.com harbor.harbor.svc.cluster.local +``` + + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925111350.png) + +> 参考 [CoreDNS rewrite 插件说明](https://coredns.io/plugins/rewrite/) + +## 为部分 Pod 添加自定义域名解析 + +如果有部分 Pod 对特定的域名解析有依赖,在不希望配置 dns 解析的情况下,可以使用 K8S 提供的 `hostAliases` 来为部分工作负载添加 hosts: + +```yaml + spec: + hostAliases: + - hostnames: [ "harbor.example.com" ] + ip: "10.10.10.10" +``` + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925111402.png) + +添加后在容器内可以看到 hosts 被添加到了 `/etc/hosts` 中: + +```bash +$ cat /etc/hosts +... +# Entries added by HostAliases. +10.10.10.10 harboar.example.com +``` diff --git a/content/best-practices/dns/optimize-coredns-performance.md b/content/best-practices/dns/optimize-coredns-performance.md new file mode 100644 index 0000000..9496524 --- /dev/null +++ b/content/best-practices/dns/optimize-coredns-performance.md @@ -0,0 +1,83 @@ +# CoreDNS 性能优化 + +CoreDNS 作为 Kubernetes 集群的域名解析组件,如果性能不够可能会影响业务,本文介绍几种 CoreDNS 的性能优化手段。 + +## 合理控制 CoreDNS 副本数 + +考虑以下几种方式: +1. 根据集群规模预估 coredns 需要的副本数,直接调整 coredns deployment 的副本数: +```bash +kubectl -n kube-system scale --replicas=10 deployment/coredns +``` +2. 为 coredns 定义 HPA 自动扩缩容。 +3. 安装 [cluster-proportional-autoscaler](https://github.com/kubernetes-sigs/cluster-proportional-autoscaler) 以实现更精确的扩缩容(推荐)。 + +## 禁用 ipv6 + +如果 K8S 节点没有禁用 IPV6 的话,容器内进程请求 coredns 时的默认行为是同时发起 IPV4 和 IPV6 解析,而通常我们只需要用到 IPV4,当容器请求某个域名时,coredns 解析不到 IPV6 记录,就会 forward 到 upstream 去解析,如果到 upstream 需要经过较长时间(比如跨公网,跨机房专线),就会拖慢整个解析流程的速度,业务层面就会感知 DNS 解析慢。 + +CoreDNS 有一个 [template](https://coredns.io/plugins/template/) 的插件,可以用它来禁用 IPV6 的解析,只需要给 CoreDNS 加上如下的配置: + +```txt +template ANY AAAA { + rcode NXDOMAIN +} +``` + +> 这个配置的含义是:给所有 IPV6 的解析请求都响应空记录,即无此域名的 IPV6 记录。 + +## 优化 ndots + +默认情况下,Kubernetes 集群中的域名解析往往需要经过多次请求才能解析到。查看 pod 内 的 `/etc/resolv.conf` 可以知道 `ndots` 选项默认为 5: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925111437.png) + +意思是: 如果域名中 `.` 的数量小于 5,就依次遍历 `search` 中的后缀并拼接上进行 DNS 查询。 + +举个例子,在 debug 命名空间查询 `kubernetes.default.svc.cluster.local` 这个 service: +1. 域名中有 4 个 `.`,小于 5,尝试拼接上第一个 search 进行查询,即 `kubernetes.default.svc.cluster.local.debug.svc.cluster.local`,查不到该域名。 +2. 继续尝试 `kubernetes.default.svc.cluster.local.svc.cluster.local`,查不到该域名。 +3. 继续尝试 `kubernetes.default.svc.cluster.local.cluster.local`,仍然查不到该域名。 +4. 尝试不加后缀,即 `kubernetes.default.svc.cluster.local`,查询成功,返回响应的 ClusterIP。 + +可以看到一个简单的 service 域名解析需要经过 4 轮解析才能成功,集群中充斥着大量无用的 DNS 请求。 + +怎么办呢?我们可以设置较小的 ndots,在 Pod 的 dnsConfig 中可以设置: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925111448.png) + +然后业务发请求时尽量将 service 域名拼完整,这样就不会经过 search 拼接造成大量多余的 DNS 请求。 + +不过这样会比较麻烦,有没有更好的办法呢?有的!请看下面的 autopath 方式。 + +## 启用 autopath + +启用 CoreDNS 的 autopath 插件可以避免每次域名解析经过多次请求才能解析到,原理是 CoreDNS 智能识别拼接过 search 的 DNS 解析,直接响应 CNAME 并附上相应的 ClusterIP,一步到位,可以极大减少集群内 DNS 请求数量。 + +启用方法是修改 CoreDNS 配置: + +```bash +kubectl -n kube-system edit configmap coredns +``` + +修改红框中圈出来的配置: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925111502.png) + +* 加上 `autopath @kubernetes`。 +* 默认的 `pods insecure` 改成 `pods verified`。 + +需要注意的是,启用 autopath 后,由于 coredns 需要 watch 所有的 pod,会增加 coredns 的内存消耗,根据情况适当调节 coredns 的 memory request 和 limit。 + +## 部署 NodeLocal DNSCache + +参考 k8s 官方文档 [Using NodeLocal DNSCache in Kubernetes clusters](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) + +如果是使用 TKE 并且 kube-proxy 转发模式为 iptables,可以直接在扩展组件中安装此扩展组件,扩展组件说明请参考 [TKE 官方文档](https://cloud.tencent.com/document/product/457/49423);如果使用的 ipvs 模式,可以参考 [TKE IPVS 模式安装 localdns](../../tencent/networking/install-localdns-with-ipvs.md)。 + +## 使用 DNSAutoscaler + +社区有开源的 [cluster-proportional-autoscaler](https://github.com/kubernetes-sigs/cluster-proportional-autoscaler) ,可以根据集群规模自动扩缩容,支持比较灵活的扩缩容算法。 + +如果使用的是 TKE,已经将其产品化成 `DNSAutoscaler 扩展组件`,在扩展组件中直接安装即可,组件说明请参考 [TKE 官方文档](https://cloud.tencent.com/document/product/457/49305)。 + diff --git a/content/best-practices/graceful-shutdown/code-example-of-handle-sigterm.md b/content/best-practices/graceful-shutdown/code-example-of-handle-sigterm.md new file mode 100644 index 0000000..88ef706 --- /dev/null +++ b/content/best-practices/graceful-shutdown/code-example-of-handle-sigterm.md @@ -0,0 +1,149 @@ +# 业务代码处理 SIGTERM 信号 + +要实现优雅终止,首先业务代码得支持下优雅终止的逻辑,在业务代码里面处理下 `SIGTERM` 信号,一般主要逻辑就是"排水",即等待存量的任务或连接完全结束,再退出进程。 + +本文给出各种语言的代码示例。 + +## shell + +```bash +#!/bin/sh + +## Redirecting Filehanders +ln -sf /proc/$$/fd/1 /log/stdout.log +ln -sf /proc/$$/fd/2 /log/stderr.log + +## Pre execution handler +pre_execution_handler() { + ## Pre Execution + # TODO: put your pre execution steps here + : # delete this nop +} + +## Post execution handler +post_execution_handler() { + ## Post Execution + # TODO: put your post execution steps here + : # delete this nop +} + +## Sigterm Handler +sigterm_handler() { + if [ $pid -ne 0 ]; then + # the above if statement is important because it ensures + # that the application has already started. without it you + # could attempt cleanup steps if the application failed to + # start, causing errors. + kill -15 "$pid" + wait "$pid" + post_execution_handler + fi + exit 143; # 128 + 15 -- SIGTERM +} + +## Setup signal trap +# on callback execute the specified handler +trap 'sigterm_handler' SIGTERM + +## Initialization +pre_execution_handler + +## Start Process +# run process in background and record PID +>/log/stdout.log 2>/log/stderr.log "$@" & +pid="$!" +# Application can log to stdout/stderr, /log/stdout.log or /log/stderr.log + +## Wait forever until app dies +wait "$pid" +return_code="$?" + +## Cleanup +post_execution_handler +# echo the return code of the application +exit $return_code +``` + +## Go + +```go +package main + +import ( + "fmt" + "os" + "os/signal" + "syscall" +) + +func main() { + + sigs := make(chan os.Signal, 1) + done := make(chan bool, 1) + //registers the channel + signal.Notify(sigs, syscall.SIGTERM) + + go func() { + sig := <-sigs + fmt.Println("Caught SIGTERM, shutting down") + // Finish any outstanding requests, then... + done <- true + }() + + fmt.Println("Starting application") + // Main logic goes here + <-done + fmt.Println("exiting") +} +``` + +## Python + +```python +import signal, time, os + +def shutdown(signum, frame): + print('Caught SIGTERM, shutting down') + # Finish any outstanding requests, then... + exit(0) + +if __name__ == '__main__': + # Register handler + signal.signal(signal.SIGTERM, shutdown) + # Main logic goes here +``` + +## NodeJS + +```js +process.on('SIGTERM', () => { + console.log('The service is about to shut down!'); + + // Finish any outstanding requests, then... + process.exit(0); +}); +``` + +## Java + +```java +import sun.misc.Signal; +import sun.misc.SignalHandler; + +public class ExampleSignalHandler { + public static void main(String... args) throws InterruptedException { + final long start = System.nanoTime(); + Signal.handle(new Signal("TERM"), new SignalHandler() { + public void handle(Signal sig) { + System.out.format("\nProgram execution took %f seconds\n", (System.nanoTime() - start) / 1e9f); + System.exit(0); + } + }); + int counter = 0; + while(true) { + System.out.println(counter++); + Thread.sleep(500); + } + } +} +``` diff --git a/content/best-practices/graceful-shutdown/intro.md b/content/best-practices/graceful-shutdown/intro.md new file mode 100644 index 0000000..f798a81 --- /dev/null +++ b/content/best-practices/graceful-shutdown/intro.md @@ -0,0 +1,7 @@ +# 优雅终止介绍 + +> 本文视频教程: [https://www.bilibili.com/video/BV1fu411m73C](https://www.bilibili.com/video/BV1fu411m73C) + +所谓优雅终止,就是保证在销毁 Pod 的时候保证对业务无损,比如在业务发版时,让工作负载能够平滑滚动更新。 Pod 在销毁时,会停止容器内的进程,通常在停止的过程中我们需要执行一些善后逻辑,比如等待存量请求处理完以避免连接中断,或通知相关依赖进行清理等,从而实现优雅终止目的。 + +本节将介绍在 Kubernetes 场景下,实现 Pod 优雅终止的最佳实践。 diff --git a/content/best-practices/graceful-shutdown/lb-to-pod-directly.md b/content/best-practices/graceful-shutdown/lb-to-pod-directly.md new file mode 100644 index 0000000..50cc32b --- /dev/null +++ b/content/best-practices/graceful-shutdown/lb-to-pod-directly.md @@ -0,0 +1,50 @@ +# LB 直通 Pod 场景 + +## 传统 NodePort 场景 + +K8S 服务对外暴露传统方案是 LB 绑定 Service 的 NodePort 流量从 LB 打到 NodePort 之后再由 kube-proxy 生成的 ipvs 或 iptables 规则进行转发: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925111001.png) + +这样当滚动更新时,LB 绑定的 NodePort 一般无需变动,也就不需要担心 LB 解绑导致对业务有损。 + +## LB 直通 Pod 场景 + +现在很多云厂商也都支持了 LB 直通 Pod,即 LB 直接将流量转发给 Pod,不需要再经过集群内做一次转发: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925111009.png) + +当滚动更新时,LB 就需要解绑旧 Pod,绑定新 Pod,如果 LB 到旧 Pod 上的存量连接的存量请求还没处理完,直接解绑的话就可能造成请求异常;我们期望的是,等待存量请求处理完,LB 才真正解绑旧 Pod。 + +## 解决方案 + +### TKE + +腾讯云 TKE 官方针对四层 Service 和七层 Ingress 都提供了解决方案。 + +如果是四层 Service,在 Service 上加上这样的注解即可(前提是 Service 用了 CLB 直通 Pod 模式): + +```yaml +service.cloud.tencent.com/enable-grace-shutdown: "true" +``` + +> 参考官方文档 [Service 优雅停机](https://cloud.tencent.com/document/product/457/60064) + +如果是七层 CLB 类型 Ingress,在 Ingress 上加上这样的注解即可(前提是 Service 用了 CLB 直通 Pod 模式): + +```yaml +ingress.cloud.tencent.com/enable-grace-shutdown: "true" +``` + +> 参考官方文档 [Ingress 优雅停机](https://cloud.tencent.com/document/product/457/60065) + +### ACK + +阿里云 ACK 目前只针对四层 Service 提供了解决方案,通过注解开启优雅中断与设置中断超时时间: + +```yaml +service.beta.kubernetes.io/alibaba-cloud-loadbalancer-connection-drain: "on" +service.beta.kubernetes.io/alibaba-cloud-loadbalancer-connection-drain-timeout: "900" +``` + +> 参考官方文档 [通过Annotation配置负载均衡](https://help.aliyun.com/document_detail/86531.html) diff --git a/content/best-practices/graceful-shutdown/persistent-connection.md b/content/best-practices/graceful-shutdown/persistent-connection.md new file mode 100644 index 0000000..4d08286 --- /dev/null +++ b/content/best-practices/graceful-shutdown/persistent-connection.md @@ -0,0 +1,13 @@ +# 长连接场景 + +如果业务是长链接场景,比如游戏、会议、直播等,客户端与服务端会保持着长链接: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925110939.png) + +销毁 Pod 时需要的优雅终止的时间通常比较长 (preStop + 业务进程停止超过 30s),有的极端情况甚至可能长达数小时,这时候可以根据实际情况自定义 `terminationGracePeriodSeconds`,避免过早的被 `SIGKILL` 杀死,示例: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925110946.png) + +具体设置多大可以根据业务场景最坏的情况来预估,比如对战类游戏场景,同一房间玩家的客户端都连接的同一个服务端 Pod,一轮游戏最长半个小时,那么我们就设置 `terminationGracePeriodSeconds` 为 1800。 + +如果不好预估最坏的情况,最好在业务层面优化下,比如 Pod 销毁时的优雅终止逻辑里面主动通知下客户端,让客户端连到新的后端,然后客户端来保证这两个连接的平滑切换。等旧 Pod 上所有客户端连接都连切换到了新 Pod 上,才最终退出 diff --git a/content/best-practices/graceful-shutdown/pod-termination-proccess.md b/content/best-practices/graceful-shutdown/pod-termination-proccess.md new file mode 100644 index 0000000..988d04e --- /dev/null +++ b/content/best-practices/graceful-shutdown/pod-termination-proccess.md @@ -0,0 +1,19 @@ +# Pod 终止流程 + +我们先了解下容器在 Kubernetes 环境中的终止流程: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925110746.png) + +1. Pod 被删除,状态变为 `Terminating`。从 API 层面看就是 Pod metadata 中的 deletionTimestamp 字段会被标记上删除时间。 +2. kube-proxy watch 到了就开始更新转发规则,将 Pod 从 service 的 endpoint 列表中摘除掉,新的流量不再转发到该 Pod。 +3. kubelet watch 到了就开始销毁 Pod。 + + 3.1. 如果 Pod 中有 container 配置了 [preStop Hook](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/) ,将会执行。 + + 3.2. 发送 `SIGTERM` 信号给容器内主进程以通知容器进程开始优雅停止。 + + 3.3. 等待 container 中的主进程完全停止,如果在 `terminationGracePeriodSeconds` 内 (默认 30s) 还未完全停止,就发送 `SIGKILL` 信号将其强制杀死。 + + 3.4. 所有容器进程终止,清理 Pod 资源。 + + 3.5. 通知 APIServer Pod 销毁完成,完成 Pod 删除。 diff --git a/content/best-practices/graceful-shutdown/propagating-signals-in-shell.md b/content/best-practices/graceful-shutdown/propagating-signals-in-shell.md new file mode 100644 index 0000000..00b4724 --- /dev/null +++ b/content/best-practices/graceful-shutdown/propagating-signals-in-shell.md @@ -0,0 +1,90 @@ +# 在 SHELL 中传递信号 + +在 Kubernetes 中,Pod 停止时 kubelet 会先给容器中的主进程发 `SIGTERM` 信号来通知进程进行 shutdown 以实现优雅停止,如果超时进程还未完全停止则会使用 `SIGKILL` 来强行终止。 + +但有时我们会遇到一种情况: 业务逻辑处理了 `SIGTERM` 信号,但 Pod 停止时好像没收到信号导致优雅停止逻辑不生效。 + +通常是因为我们的业务进程是在脚本中启动的,容器的启动入口使用了脚本,所以容器中的主进程并不是我们所希望的业务进程而是 shell 进程,导致业务进程收不到 `SIGTERM` 信号,更详细的原因在上一节我们已经介绍了,下面将介绍几种解决方案。 + +## 使用 exec 启动 + +在 shell 中启动二进制的命令前加一个 [exec](https://stackoverflow.com/questions/18351198/what-are-the-uses-of-the-exec-command-in-shell-scripts) 即可让该二进制启动的进程代替当前 shell 进程,即让新启动的进程成为主进程: + +```bash +#! /bin/bash +... + +exec /bin/yourapp # 脚本中执行二进制 +``` + +然后业务进程就可以正常接收所有信号了,实现优雅退出也不在话下。 + +## 多进程场景: 使用 trap 传递信号 + +通常我们一个容器只会有一个进程,也是 Kubernetes 的推荐做法。但有些时候我们不得不启动多个进程,比如从传统部署迁移到 Kubernetes 的过渡期间,使用了富容器,即单个容器中需要启动多个业务进程,这时也只能通过 shell 启动,但无法使用上面的 `exec` 方式来传递信号,因为 `exec` 只能让一个进程替代当前 shell 成为主进程。 + +这个时候我们可以在 shell 中使用 `trap` 来捕获信号,当收到信号后触发回调函数来将信号通过 `kill` 传递给业务进程,脚本示例: + +```bash +#! /bin/bash + +/bin/app1 & pid1="$!" # 启动第一个业务进程并记录 pid +echo "app1 started with pid $pid1" + +/bin/app2 & pid2="$!" # 启动第二个业务进程并记录 pid +echo "app2 started with pid $pid2" + +handle_sigterm() { + echo "[INFO] Received SIGTERM" + kill -SIGTERM $pid1 $pid2 # 传递 SIGTERM 给业务进程 + wait $pid1 $pid2 # 等待所有业务进程完全终止 +} +trap handle_sigterm SIGTERM # 捕获 SIGTERM 信号并回调 handle_sigterm 函数 + +wait # 等待回调执行完,主进程再退出 +``` + +## 完美方案: 使用 init 系统 + +前面一种方案实际是用脚本实现了一个极简的 init 系统 (或 supervisor) 来管理所有子进程,只不过它的逻辑很简陋,仅仅简单的透传指定信号给子进程,其实社区有更完善的方案,[dumb-init](https://github.com/Yelp/dumb-init) 和 [tini](https://github.com/krallin/tini) 都可以作为 init 进程,作为主进程 (PID 1) 在容器中启动,然后它再运行 shell 来执行我们指定的脚本 (shell 作为子进程),shell 中启动的业务进程也成为它的子进程,当它收到信号时会将其传递给所有的子进程,从而也能完美解决 SHELL 无法传递信号问题,并且还有回收僵尸进程的能力。 + +这是以 `dumb-init` 为例制作镜像的 `Dockerfile` 示例: + +```dockerfile +FROM ubuntu:22.04 +RUN apt-get update && apt-get install -y dumb-init +ADD start.sh / +ADD app1 /bin/app1 +ADD app2 /bin/app2 +ENTRYPOINT ["dumb-init", "--"] +CMD ["/start.sh"] +``` + +这是以 `tini` 为例制作镜像的 `Dockerfile` 示例: + +```dockerfile +FROM ubuntu:22.04 +ENV TINI_VERSION v0.19.0 +ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /tini +COPY entrypoint.sh /entrypoint.sh +RUN chmod +x /tini /entrypoint.sh +ENTRYPOINT ["/tini", "--"] +CMD [ "/start.sh" ] +``` + +`start.sh` 脚本示例: + +```bash +#! /bin/bash +/bin/app1 & +/bin/app2 & +wait +``` + +## 参考资料 + +* [Trapping signals in Docker containers](https://medium.com/@gchudnov/trapping-signals-in-docker-containers-7a57fdda7d86) +* [Gracefully Stopping Docker Containers](https://www.ctl.io/developers/blog/post/gracefully-stopping-docker-containers/) +* [Why Your Dockerized Application Isn’t Receiving Signals](https://hynek.me/articles/docker-signals/) +* [Best practices for propagating signals on Docker](https://www.kaggle.com/residentmario/best-practices-for-propagating-signals-on-docker) +* [Graceful shutdowns with ECS](https://aws.amazon.com/cn/blogs/containers/graceful-shutdowns-with-ecs/) \ No newline at end of file diff --git a/content/best-practices/graceful-shutdown/use-prestop.md b/content/best-practices/graceful-shutdown/use-prestop.md new file mode 100644 index 0000000..38a57d4 --- /dev/null +++ b/content/best-practices/graceful-shutdown/use-prestop.md @@ -0,0 +1,26 @@ +# 合理使用 preStop + +若你的业务代码中没有处理 `SIGTERM` 信号,或者你无法控制使用的第三方库或系统来增加优雅终止的逻辑,也可以尝试为 Pod 配置下 preStop,在这里面实现优雅终止的逻辑,示例: + +```yaml + lifecycle: + preStop: + exec: + command: + - /clean.sh +``` + +> 参考 [Kubernetes API 文档](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#lifecycle-1) + +在某些极端情况下,Pod 被删除的一小段时间内,仍然可能有新连接被转发过来,因为 kubelet 与 kube-proxy 同时 watch 到 pod 被删除,kubelet 有可能在 kube-proxy 同步完规则前就已经停止容器了,这时可能导致一些新的连接被转发到正在删除的 Pod,而通常情况下,当应用受到 `SIGTERM` 后都不再接受新连接,只保持存量连接继续处理,所以就可能导致 Pod 删除的瞬间部分请求失败。 + +这种情况下,我们也可以利用 preStop 先 sleep 一小下,等待 kube-proxy 完成规则同步再开始停止容器内进程: + +```yaml + lifecycle: + preStop: + exec: + command: + - sleep + - 5s +``` \ No newline at end of file diff --git a/content/best-practices/graceful-shutdown/why-cannot-receive-sigterm.md b/content/best-practices/graceful-shutdown/why-cannot-receive-sigterm.md new file mode 100644 index 0000000..203a182 --- /dev/null +++ b/content/best-practices/graceful-shutdown/why-cannot-receive-sigterm.md @@ -0,0 +1,25 @@ +# 为什么收不到 SIGTERM 信号? + +我们的业务代码通常会捕捉 `SIGTERM` 信号,然后执行停止逻辑以实现优雅终止。在 Kubernetes 环境中,业务发版时经常会对 workload 进行滚动更新,当旧版本 Pod 被删除时,K8S 会对 Pod 中各个容器中的主进程发送 `SIGTERM` 信号,当达到超时时间进程还未完全停止的话,K8S 就会发送 `SIGKILL` 信号将其强制杀死。 + +业务在 Kubernetes 环境中实际运行时,有时候可能会发现在滚动更新时,我们业务的优雅终止逻辑并没有被执行,现象是在等了较长时间后,业务进程直接被 `SIGKILL` 强制杀死了。 + +## 什么原因? + +通常都是因为容器启动入口使用了 shell,比如使用了类似 `/bin/sh -c my-app` 这样的启动入口。 或者使用 `/entrypoint.sh` 这样的脚本文件作为入口,在脚本中再启动业务进程: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925110850.png) + +这就可能就会导致容器内的业务进程收不到 `SIGTERM` 信号,原因是: + +1. 容器主进程是 shell,业务进程是在 shell 中启动的,成为了 shell 进程的子进程。 + + ![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925110858.png) +2. shell 进程默认不会处理 `SIGTERM` 信号,自己不会退出,也不会将信号传递给子进程,导致业务进程不会触发停止逻辑。 +3. 当等到 K8S 优雅停止超时时间 (`terminationGracePeriodSeconds`,默认 30s),发送 `SIGKILL` 强制杀死 shell 及其子进程。 + + +## 如何解决? + +1. 如果可以的话,尽量不使用 shell 启动业务进程。 +2. 如果一定要通过 shell 启动,比如在启动前需要用 shell 进程一些判断和处理,或者需要启动多个进程,那么就需要在 shell 中传递下 SIGTERM 信号了,解决方案请参考 [在 SHELL 中传递信号](propagating-signals-in-shell.md) 。 diff --git a/content/best-practices/ha/pod-split-up-scheduling.md b/content/best-practices/ha/pod-split-up-scheduling.md new file mode 100644 index 0000000..7d89b7c --- /dev/null +++ b/content/best-practices/ha/pod-split-up-scheduling.md @@ -0,0 +1,168 @@ +# Pod 打散调度 + +将 Pod 打散调度到不同地方,可避免因软硬件故障、光纤故障、断电或自然灾害等因素导致服务不可用,以实现服务的高可用部署。 + +Kubernetes 支持两种方式将 Pod 打散调度: +* Pod 反亲和 (Pod Anti-Affinity) +* Pod 拓扑分布约束 (Pod Topology Spread Constraints) + +本文介绍两种方式的用法示例与对比总结。 + +## 使用 podAntiAffinity + +**将 Pod 强制打散调度到不同节点上(强反亲和),以避免单点故障**: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx +spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: nginx + containers: + - name: nginx + image: nginx +``` + +* `labelSelector.matchLabels` 替换成选中 Pod 实际使用的 label。 +* `topologyKey`: 节点的某个 label 的 key,能代表节点所处拓扑域,可以用 [Well-Known Labels](https://kubernetes.io/docs/reference/labels-annotations-taints/#failure-domainbetakubernetesioregion),常用的是 `kubernetes.io/hostname` (节点维度)、`topology.kubernetes.io/zone` (可用区/机房 维度)。也可以自行手动为节点打上自定义的 label 来定义拓扑域,比如 `rack` (机架维度)、`machine` (物理机维度)、`switch` (交换机维度)。 +* 若不希望用强制,可以使用弱反亲和,让 Pod 尽量调度到不同节点: + ```yaml + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + topologyKey: kubernetes.io/hostname + weight: 100 + ``` + +**将 Pod 强制打散调度到不同可用区(机房),以实现跨机房容灾**: + +将 `kubernetes.io/hostname` 换成 `topology.kubernetes.io/zone`,其余同上。 + +## 使用 topologySpreadConstraints + +**将 Pod 最大程度上均匀的打散调度到各个节点上**: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx +spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + - matchLabels: + app: nginx + containers: + - name: nginx + image: nginx +``` + +* `topologyKey`: 与 podAntiAffinity 中配置类似。 +* `labelSelector`: 与 podAntiAffinity 中配置类似,只是这里可以支持选中多组 pod 的 label。 +* `maxSkew`: 必须是大于零的整数,表示能容忍不同拓扑域中 Pod 数量差异的最大值。这里的 1 意味着只允许相差 1 个 Pod。 +* `whenUnsatisfiable`: 指示不满足条件时如何处理。`DoNotSchedule` 不调度 (保持 Pending),类似强反亲和;`ScheduleAnyway` 表示要调度,类似弱反亲和; + +以上配置连起来解释: 将所有 nginx 的 Pod 严格均匀打散调度到不同节点上,不同节点上 nginx 的副本数量最多只能相差 1 个,如果有节点因其它因素无法调度更多的 Pod (比如资源不足),那么就让剩余的 nginx 副本 Pending。 + +所以,如果要在所有节点中严格打散,通常不太可取,可以加下 nodeAffinity,只在部分资源充足的节点严格打散: + +```yaml + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: io + operator: In + values: + - high + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + - matchLabels: + app: nginx +``` + +或者类似弱反亲和, **将 Pod 尽量均匀的打散调度到各个节点上,不强制** (DoNotSchedule 改为 ScheduleAnyway): + +```yaml + spec: + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + - matchLabels: + app: nginx +``` + +如果集群节点支持跨可用区,也可以 **将 Pod 尽量均匀的打散调度到各个可用区** 以实现更高级别的高可用 (topologyKey 改为 `topology.kubernetes.io/zone`): + +```yaml + spec: + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + labelSelector: + - matchLabels: + app: nginx +``` + +更进一步地,可以 **将 Pod 尽量均匀的打散调度到各个可用区的同时,在可用区内部各节点也尽量打散**: + +```yaml + spec: + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + labelSelector: + - matchLabels: + app: nginx + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + - matchLabels: + app: nginx +``` + +## 小结 + +从示例能明显看出,`topologySpreadConstraints` 比 `podAntiAffinity` 功能更强,提供了提供更精细的调度控制,我们可以理解成 `topologySpreadConstraints` 是 `podAntiAffinity` 的升级版。`topologySpreadConstraints` 特性在 K8S v1.18 默认启用,所以建议 v1.18 及其以上的集群使用 `topologySpreadConstraints` 来打散 Pod 的分布以提高服务可用性。 + +## 参考资料 + +* [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) \ No newline at end of file diff --git a/content/best-practices/ha/smooth-upgrade.md b/content/best-practices/ha/smooth-upgrade.md new file mode 100644 index 0000000..91cc852 --- /dev/null +++ b/content/best-practices/ha/smooth-upgrade.md @@ -0,0 +1,43 @@ +# 工作负载平滑升级 + +解决了服务单点故障和驱逐节点时导致的可用性降低问题后,我们还需要考虑一种可能导致可用性降低的场景,那就是滚动更新。为什么服务正常滚动更新也可能影响服务的可用性呢?别急,下面我来解释下原因。 + +## 业务有损滚动更新 + +假如集群内存在服务间调用: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925112252.png) + +当 server 端发生滚动更新时: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925112258.png) + +发生两种尴尬的情况: +1. 旧的副本很快销毁,而 client 所在节点 kube-proxy 还没更新完转发规则,仍然将新连接调度给旧副本,造成连接异常,可能会报 "connection refused" (进程停止过程中,不再接受新请求) 或 "no route to host" (容器已经完全销毁,网卡和 IP 已不存在)。 +2. 新副本启动,client 所在节点 kube-proxy 很快 watch 到了新副本,更新了转发规则,并将新连接调度给新副本,但容器内的进程启动很慢 (比如 Tomcat 这种 java 进程),还在启动过程中,端口还未监听,无法处理连接,也造成连接异常,通常会报 "connection refused" 的错误。 + +## 最佳实践 + +针对第一种情况,可以给 container 加 preStop,让 Pod 真正销毁前先 sleep 等待一段时间,等待 client 所在节点 kube-proxy 更新转发规则,然后再真正去销毁容器。这样能保证在 Pod Terminating 后还能继续正常运行一段时间,这段时间如果因为 client 侧的转发规则更新不及时导致还有新请求转发过来,Pod 还是可以正常处理请求,避免了连接异常的发生。听起来感觉有点不优雅,但实际效果还是比较好的,分布式的世界没有银弹,我们只能尽量在当前设计现状下找到并实践能够解决问题的最优解。 + +针对第二种情况,可以给 container 加 ReadinessProbe (就绪检查),让容器内进程真正启动完成后才更新 Service 的 Endpoint,然后 client 所在节点 kube-proxy 再更新转发规则,让流量进来。这样能够保证等 Pod 完全就绪了才会被转发流量,也就避免了链接异常的发生。 + +最佳实践 yaml 示例: + +``` yaml + readinessProbe: + httpGet: + path: /healthz + port: 80 + httpHeaders: + - name: X-Custom-Header + value: Awesome + initialDelaySeconds: 10 + timeoutSeconds: 1 + lifecycle: + preStop: + exec: + command: ["/bin/bash", "-c", "sleep 10"] +``` + +最后,业务本身也需要实现优雅终止,避免被销毁时中断业务,参考 [优雅终止最佳实践](../graceful-shutdown/index.html) diff --git a/content/best-practices/logging.md b/content/best-practices/logging.md new file mode 100644 index 0000000..31a4796 --- /dev/null +++ b/content/best-practices/logging.md @@ -0,0 +1,38 @@ +# 日志采集 + +本文介绍 Kubernetes 中,日志采集的最佳实践。 + +## 落盘文件还是标准输出? + +在上 K8S 的过程中,往往会遇到一个问题:业务日志是输出到日志文件,还是输出到标准输出?哪种方式更好? + +如果输出到日志文件,日志轮转就需要自己去完成,要么业务日志框架支持,要么用其它工具去轮转(比如 sidecar 与业务容器共享日志目录,然后 sidecar 中 crontab + logrotate 之类的工具去轮转)。 + +如果输出到标准输出(前提是容器主进程是业务进程),日志轮转则是由 K8S 自动完成,业务不需要关心,对于非 docker 的运行时(比如 containerd),日志轮转由 kubelet 完成,每个容器标准输出的日志轮转规则由 kubelet 以下两个参数决定: + +```txt +--container-log-max-files int32 Set the maximum number of container log files that can be present for a container. The number must be >= 2. This flag can only be used with --container-runtime=remote. (default 5) +--container-log-max-size string Set the maximum size (e.g. 10Mi) of container log file before it is rotated. This flag can only be used with --container-runtime=remote. (default "10Mi") +``` + +> 日志默认最多存储 5 个文件,每个最大 10Mi。 + +对于 docker 运行时,没有实现 CRI 接口,日志轮转由 docker 自身完成,在配置文件 `/etc/docker/daemon.json` 中配置: + +``` json +{ +"log-driver":"json-file", +"log-opts": {"max-size":"500m", "max-file":"3"} +} +``` + +输出到标准输出还有一些其它好处: + +1. 日志内容可以通过标准 K8S API 获取到,比如使用 `kubectl logs` 或一些 K8S 管理平台的可视化界面查看(比如 Kubernetes Dashboard,KubeSphere, Rancher 以及云厂商的容器服务控制台等)。 +2. 运维无需关注业务日志文件路径,可以更方便的使用统一的采集规则进行采集,减少运维复杂度。 + +**最佳实践** + +如果你的应用已经足够云原生了,符合"单进程模型",不再是富容器,那么应尽量将日志输出到标准输出,业务不需要关心日志轮转,使用日志采集工具采集容器标准输出。有一种例外的情况是,对于非 docker 运行时,如果你有单个容器的日志输出过快,速率持续超过 `30MB/s` 的话,kubelet 在轮转压缩的时候,可能会 "追不上",迟迟读不到 EOF,轮转失败,最终可能导致磁盘爆满,这种情况还是建议输出到日志文件,自行轮转。 + +其它情况,可以先将日志落盘到文件,并自行轮转下。 \ No newline at end of file diff --git a/content/best-practices/long-connection.md b/content/best-practices/long-connection.md new file mode 100644 index 0000000..ffe8f5c --- /dev/null +++ b/content/best-practices/long-connection.md @@ -0,0 +1,34 @@ +# 长连接服务 + +## 负载不均问题 + +对于长连接的服务,可能会存在负载不均的问题,下面介绍两种场景。 + +### 滚动更新负载不均 + +在连接数比较固定或波动不大的情况下,滚动更新时,旧 Pod 上的连接逐渐断掉,重连到新启动的 Pod 上,越先启动的 Pod 所接收到的连接数越多,造成负载不均: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925110349.png) + +### rr 策略负载不均 + +假如长连接服务的不同连接的保持时长差异很大,而 ipvs 转发时默认是 rr 策略转发,如果某些后端 Pod "运气较差",它们上面的连接保持时间比较较长,而由于是 rr 转发,它们身上累计的连接数就可能较多,节点上通过 `ipvsadm -Ln -t CLUSTER-IP:PORT` 查看某个 service 的转发情况: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925110404.png) + +部分 Pod 连接数高,意味着相比连接数低的 Pod 要同时处理更多的连接,着消耗的资源也就相对更多,从而造成负载不均。 + +将 kube-proxy 的 ipvs 转发模式设置为 lc (Least-Connection) ,即倾向转发给连接数少的 Pod,可能会有所缓解,但也不一定,因为 ipvs 的负载均衡状态是分散在各个节点的,并没有收敛到一个地方,也就无法在全局层面感知哪个 Pod 上的连接数少,并不能真正做到 lc。可以尝试设置为 sh (Source Hashing),并且这样可以保证即便负载均衡状态没有收敛到同一个地方,也能在全局尽量保持负载均衡。 + +## 扩容失效问题 + +在连接数比较固定或波动不大的情况下,工作负载在 HPA 自动扩容时,由于是长链接,连接数又比较固定,所有连接都 "固化" 在之前的 Pod 上,新扩出的 Pod 几乎没有连接,造成之前的 Pod 高负载,而扩出来的 Pod 又无法分担压力,导致扩容失效: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925110418.png) + +## 最佳实践 + +1. 业务层面自动重连,避免连接 "固化" 到某个后端 Pod 上。比如周期性定时重连,或者一个连接中处理的请求数达到阈值后自动重连。 +2. 不直接请求后端,通过七层代理访问。比如 gRPC 协议,可以 [使用 nginx ingress 转发 gRPC](https://kubernetes.github.io/ingress-nginx/examples/grpc/),也可以 [使用 istio 转发 gRPC](https://istiobyexample.dev/grpc/),这样对于 gRPC 这样多个请求复用同一个长连接的场景,经过七层代理后,可以自动拆分请求,在请求级别负载均衡。 +3. kube-proxy 的 ipvs 转发策略设置为 sh (`--ipvs-scheduler=sh`)。如果用的腾讯云 EKS 弹性集群,没有节点,看不到 kube-proxy,可以通过 `eks.tke.cloud.tencent.com/ipvs-scheduler: 'sh'` 这样的注解来设置,另外还支持将端口号也加入到 hash 的 key,更利于负载均衡,需再设置下 `eks.tke.cloud.tencent.com/ipvs-sh-port: "true"`,参考 [EKS 注解](../tencent/appendix/eks-annotations.md#%E8%AE%BE%E7%BD%AE-ipvs-%E5%8F%82%E6%95%B0)。 + diff --git a/content/best-practices/ops/batch-operate-node-with-ansible.md b/content/best-practices/ops/batch-operate-node-with-ansible.md new file mode 100644 index 0000000..b8abebf --- /dev/null +++ b/content/best-practices/ops/batch-operate-node-with-ansible.md @@ -0,0 +1,90 @@ +# 使用 Ansible 批量操作节点 + +## 原理介绍 + +Ansible 是一款流行的开源运维工具,可以直接通过 SSH 协议批量操作机器,无需事先进行手动安装依赖等操作,十分便捷。我们可以针对需要批量操作的节点,使用 ansbile 批量对节点执行指定的脚本。 + +## 准备 Ansible 控制节点 + +1. 选取实例作为 Ansible 的控制节点,通过此节点批量发起对存量 TKE 节点的操作。可选择与集群所在私有网络 VPC 中任意实例作为控制节点(包括 TKE 节点)。 +2. 选定控制节点后,选择对应方式安装 Ansible: + +- Ubuntu 操作系统安装方式: + ```bash + sudo apt update && sudo apt install software-properties-common -y && sudo apt-add-repository --yes --update ppa:ansible/ansible && sudo apt install ansible -y + ``` + +- CentOS 操作系统安装方式: + ```bash + sudo yum install ansible -y + ``` + +## 准备配置文件 + +将所有需要进行配置操作的节点内网 IP 配置到 `host.ini` 文件中,每行一个 IP。示例如下: + +```txt +10.0.3.33 +10.0.2.4 +``` + +如需操作所有节点,可通过以下命令一键生成 `hosts.ini` 文件。 + +```bash +kubectl get nodes -o jsonpath='{.items[*].status.addresses[?(@.type=="InternalIP")].address}' | tr ' ' '\n' > hosts.ini +``` + +## 准备批量执行脚本 + +将需批量执行的操作写入脚本,并保存为脚本文件,下面举个例子。 + +自建镜像仓库后没有权威机构颁发证书,直接使用 HTTP 或 HTTPS 自签发的证书,默认情况下 dockerd 拉取镜像时会报错。此时可通过批量修改节点的 dockerd 配置,将自建仓库地址添加到 dockerd 配置的 `insecure-registries` 中使 dockerd 忽略证书校验。脚本文件 `modify-dockerd.sh` 内容如下: + +```bash +# yum install -y jq # centos +apt install -y jq # ubuntu +cat /etc/docker/daemon.json | jq '."insecure-registries" += ["myharbor.com"]' > /tmp/daemon.json +cp /tmp/daemon.json /etc/docker/daemon.json +systemctl restart dockerd +``` + +## 使用 Ansible 批量执行脚本 + +通常 TKE 节点在新增时均指向一个 SSH 登录密钥或密码。请按照实际情况执行以下操作: + +### 使用密钥 + +1. 准备密钥文件,例如 `tke.key`。 +2. 执行以下命令,授权密钥文件: + ```bash + chmod 0600 tke.key + ``` + +3. 批量执行脚本: +- Ubuntu 操作系统节点批量执行示例如下: + ```bash + ansible all -i hosts.ini --ssh-common-args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" --user ubuntu --become --become-user=root --private-key=tke.key -m script -a "modify-dockerd.sh" + ``` +- 其他操作系统节点批量执行示例如下: + ```bash + ansible all -i hosts.ini --ssh-common-args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" --user root -m script -a "modify-dockerd.sh" + ``` + + +### 使用密码 + +1. 执行以下命令,将密码输入至 PASS 变量。 + ```bash + read -s PASS + ``` + +2. 批量执行脚本: +- Ubuntu 操作系统节点的 SSH 用户名默认为 ubuntu,批量执行示例如下: + ```bash + ansible all -i hosts.ini --ssh-common-args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" --user ubuntu --become --become-user=root -e "ansible_password=$PASS" -m script -a "modify-dockerd.sh" + ``` + +- 其他系统节点的 SSH 用户名默认为 root,批量执行示例如下: + ```bash + ansible all -i hosts.ini --ssh-common-args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" --user root -e "ansible_password=$PASS" -m script -a "modify-dockerd.sh" + ``` diff --git a/content/best-practices/ops/etcd-optimization.md b/content/best-practices/ops/etcd-optimization.md new file mode 100644 index 0000000..78011a7 --- /dev/null +++ b/content/best-practices/ops/etcd-optimization.md @@ -0,0 +1,52 @@ +# ETCD 优化 + +## 高可用部署 + +部署一个高可用 ETCD 集群可以参考官方文档 [Clustering Guide](https://etcd.io/docs/v3.5/op-guide/clustering/)。 + +> 如果是 self-host 方式部署的集群,可以用 etcd-operator 部署 etcd 集群;也可以使用另一个小集群专门部署 etcd (使用 etcd-operator) + +## 提高磁盘 IO 性能 + +ETCD 对磁盘写入延迟非常敏感,对于负载较重的集群建议磁盘使用 SSD 固态硬盘。可以使用 diskbench 或 fio 测量磁盘实际顺序 IOPS。 + +## 提高 ETCD 的磁盘 IO 优先级 + +由于 ETCD 必须将数据持久保存到磁盘日志文件中,因此来自其他进程的磁盘活动可能会导致增加写入时间,结果导致 ETCD 请求超时和临时 leader 丢失。当给定高磁盘优先级时,ETCD 服务可以稳定地与这些进程一起运行: + +``` bash +sudo ionice -c2 -n0 -p $(pgrep etcd) +``` + +## 提高存储配额 + +默认 ETCD 空间配额大小为 2G,超过 2G 将不再写入数据。通过给 ETCD 配置 `--quota-backend-bytes` 参数增大空间配额,最大支持 8G。 + +## 分离 events 存储 + +集群规模大的情况下,集群中包含大量节点和服务,会产生大量的 event,这些 event 将会对 etcd 造成巨大压力并占用大量 etcd 存储空间,为了在大规模集群下提高性能,可以将 events 存储在单独的 ETCD 集群中。 + +配置 kube-apiserver: + +``` bash +--etcd-servers="http://etcd1:2379,http://etcd2:2379,http://etcd3:2379" --etcd-servers-overrides="/events#http://etcd4:2379,http://etcd5:2379,http://etcd6:2379" +``` + +## 减小网络延迟 + +如果有大量并发客户端请求 ETCD leader 服务,则可能由于网络拥塞而延迟处理 follower 对等请求。在 follower 节点上的发送缓冲区错误消息: + +``` bash +dropped MsgProp to 247ae21ff9436b2d since streamMsg's sending buffer is full +dropped MsgAppResp to 247ae21ff9436b2d since streamMsg's sending buffer is full +``` + +可以通过在客户端提高 ETCD 对等网络流量优先级来解决这些错误。在 Linux 上,可以使用 tc 对对等流量进行优先级排序: + +``` bash +$ tc qdisc add dev eth0 root handle 1: prio bands 3 +$ tc filter add dev eth0 parent 1: protocol ip prio 1 u32 match ip sport 2380 0xffff flowid 1:1 +$ tc filter add dev eth0 parent 1: protocol ip prio 1 u32 match ip dport 2380 0xffff flowid 1:1 +$ tc filter add dev eth0 parent 1: protocol ip prio 2 u32 match ip sport 2379 0xffff flowid 1:1 +$ tc filter add dev eth0 parent 1: protocol ip prio 2 u32 match ip dport 2379 0xffff flowid 1:1 +``` diff --git a/content/best-practices/ops/large-scale-cluster-optimization.md b/content/best-practices/ops/large-scale-cluster-optimization.md new file mode 100644 index 0000000..cd787fe --- /dev/null +++ b/content/best-practices/ops/large-scale-cluster-optimization.md @@ -0,0 +1,125 @@ +# 大规模集群优化 + +Kubernetes 自 v1.6 以来,官方就宣称单集群最大支持 5000 个节点。不过这只是理论上,在具体实践中从 0 到 5000,还是有很长的路要走,需要见招拆招。 + +官方标准如下: + +* 不超过 5000 个节点 +* 不超过 150000 个 pod +* 不超过 300000 个容器 +* 每个节点不超过 100 个 pod + +## Master 节点配置优化 + +GCE 推荐配置: + +* 1-5 节点: n1-standard-1 +* 6-10 节点: n1-standard-2 +* 11-100 节点: n1-standard-4 +* 101-250 节点: n1-standard-8 +* 251-500 节点: n1-standard-16 +* 超过 500 节点: n1-standard-32 + +AWS 推荐配置: + +* 1-5 节点: m3.medium +* 6-10 节点: m3.large +* 11-100 节点: m3.xlarge +* 101-250 节点: m3.2xlarge +* 251-500 节点: c4.4xlarge +* 超过 500 节点: c4.8xlarge + +对应 CPU 和内存为: + +* 1-5 节点: 1vCPU 3.75G内存 +* 6-10 节点: 2vCPU 7.5G内存 +* 11-100 节点: 4vCPU 15G内存 +* 101-250 节点: 8vCPU 30G内存 +* 251-500 节点: 16vCPU 60G内存 +* 超过 500 节点: 32vCPU 120G内存 + +## kube-apiserver 优化 + +### 高可用 + +* 方式一: 启动多个 kube-apiserver 实例通过外部 LB 做负载均衡。 +* 方式二: 设置 `--apiserver-count` 和 `--endpoint-reconciler-type`,可使得多个 kube-apiserver 实例加入到 Kubernetes Service 的 endpoints 中,从而实现高可用。 + +不过由于 TLS 会复用连接,所以上述两种方式都无法做到真正的负载均衡。为了解决这个问题,可以在服务端实现限流器,在请求达到阀值时告知客户端退避或拒绝连接,客户端则配合实现相应负载切换机制。 + +### 控制连接数 + +kube-apiserver 以下两个参数可以控制连接数: + +``` bash +--max-mutating-requests-inflight int The maximum number of mutating requests in flight at a given time. When the server exceeds this, it rejects requests. Zero for no limit. (default 200) +--max-requests-inflight int The maximum number of non-mutating requests in flight at a given time. When the server exceeds this, it rejects requests. Zero for no limit. (default 400) +``` + +节点数量在 1000 - 3000 之间时,推荐: + +``` bash +--max-requests-inflight=1500 +--max-mutating-requests-inflight=500 +``` + +节点数量大于 3000 时,推荐: + +``` bash +--max-requests-inflight=3000 +--max-mutating-requests-inflight=1000 +``` + +## kube-scheduler 与 kube-controller-manager 优化 + +### 高可用 + +kube-controller-manager 和 kube-scheduler 是通过 leader election 实现高可用,启用时需要添加以下参数: + +``` bash +--leader-elect=true +--leader-elect-lease-duration=15s +--leader-elect-renew-deadline=10s +--leader-elect-resource-lock=endpoints +--leader-elect-retry-period=2s +``` + +### 控制 QPS + +与 kube-apiserver 通信的 qps 限制,推荐为: + +``` bash +--kube-api-qps=100 +``` + +## Kubelet 优化 + +* 设置 `--image-pull-progress-deadline=30m` +* 设置 `--serialize-image-pulls=false`(需要 Docker 使用 overlay2 ) +* Kubelet 单节点允许运行的最大 Pod 数:`--max-pods=110`(默认是 110,可以根据实际需要设置) + +## 集群 DNS 高可用 + +设置反亲和,让集群 DNS (kube-dns 或 coredns) 分散在不同节点,避免单点故障: + +``` yaml +affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + labelSelector: + matchExpressions: + - key: k8s-app + operator: In + values: + - kube-dns + topologyKey: kubernetes.io/hostname +``` + +## ETCD 优化 + +参考 [ETCD 优化](etcd-optimization.md) + +## 参考资料 + +* [Considerations for large clusters](https://kubernetes.io/docs/setup/best-practices/cluster-large/) \ No newline at end of file diff --git a/content/best-practices/ops/securely-maintain-or-offline-node.md b/content/best-practices/ops/securely-maintain-or-offline-node.md new file mode 100644 index 0000000..26a5b23 --- /dev/null +++ b/content/best-practices/ops/securely-maintain-or-offline-node.md @@ -0,0 +1,53 @@ +# 安全维护或下线节点 + +有时候我们需要对节点进行维护或进行版本升级等操作,操作之前需要对节点执行驱逐 (kubectl drain),驱逐时会将节点上的 Pod 进行删除,以便它们漂移到其它节点上,当驱逐完毕之后,节点上的 Pod 都漂移到其它节点了,这时我们就可以放心的对节点进行操作了。 + +## 驱逐存在的问题 + +有一个问题就是,驱逐节点是一种有损操作,驱逐的原理: + +1. 封锁节点 (设为不可调度,避免新的 Pod 调度上来)。 +2. 将该节点上的 Pod 删除。 +3. ReplicaSet 控制器检测到 Pod 减少,会重新创建一个 Pod,调度到新的节点上。 + +这个过程是先删除,再创建,并非是滚动更新,因此更新过程中,如果一个服务的所有副本都在被驱逐的节点上,则可能导致该服务不可用。 + +我们再来下什么情况下驱逐会导致服务不可用: + +1. 服务存在单点故障,所有副本都在同一个节点,驱逐该节点时,就可能造成服务不可用。 +2. 服务没有单点故障,但刚好这个服务涉及的 Pod 全部都部署在这一批被驱逐的节点上,所以这个服务的所有 Pod 同时被删,也会造成服务不可用。 +3. 服务没有单点故障,也没有全部部署到这一批被驱逐的节点上,但驱逐时造成这个服务的一部分 Pod 被删,短时间内服务的处理能力下降导致服务过载,部分请求无法处理,也就降低了服务可用性。 + +## 解决方案 + +针对第一点,我们可以使用前面讲的 [Pod 打散调度](../ha/pod-split-up-scheduling.md) 避免单点故障。 + +针对第二和第三点,我们可以通过配置 PDB (PodDisruptionBudget) 来避免所有副本同时被删除,驱逐时 K8S 会 "观察" nginx 的当前可用与期望的副本数,根据定义的 PDB 来控制 Pod 删除速率,达到阀值时会等待 Pod 在其它节点上启动并就绪后再继续删除,以避免同时删除太多的 Pod 导致服务不可用或可用性降低,下面给出两个示例。 + +示例一 (保证驱逐时 nginx 至少有 90% 的副本可用): + +``` yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: zk-pdb +spec: + minAvailable: 90% + selector: + matchLabels: + app: zookeeper +``` + +示例二 (保证驱逐时 zookeeper 最多有一个副本不可用,相当于逐个删除并等待在其它节点完成重建): + +``` yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: zk-pdb +spec: + maxUnavailable: 1 + selector: + matchLabels: + app: zookeeper +``` \ No newline at end of file diff --git a/content/best-practices/ops/securely-modify-container-root-dir.md b/content/best-practices/ops/securely-modify-container-root-dir.md new file mode 100644 index 0000000..4d419b2 --- /dev/null +++ b/content/best-practices/ops/securely-modify-container-root-dir.md @@ -0,0 +1,53 @@ +# 安全变更容器数据盘路径 + +本文介绍如何安全的对容器的数据盘路径进行变更。 + +## Docker 运行时 + +### 注意事项 + +如果节点上容器运行时是 Docker,想要变更 Docker Root Dir,需要谨慎一点。如果操作不慎,可能造成采集不到容器监控数据,因为容器监控数据由 kubelet 的 cadvisor 模块提供,而由于 docker 没有实现 CRI 接口,cadvisor 会对 Docker 有一些特殊处理: 在刚启动时,通过 `docker info` 获取 `Docker Root Dir` 路径,后续逻辑会依赖这个路径。 + +如果在 kubelet 运行过程中,改了 `Docker Root Dir`,cadvisor 并不会更新路径,仍然认为路径是之前的,就会造成 kubelet 不能正常返回监控指标并且报类似如下的错: + +```txt +Mar 21 02:59:26 VM-67-101-centos kubelet[714]: E0321 02:59:26.320938 714 manager.go:1086] Failed to create existing container: /kubepods/burstable/podb267f18b-a641-4004-a660-4c6a43b6e520/03164d8f0d1f55a285b50b2117d6fdb2c33d2fa87f46dba0f43b806017607d03: failed to identify the read-write layer ID for container "03164d8f0d1f55a285b50b2117d6fdb2c33d2fa87f46dba0f43b806017607d03". - open /var/lib/docker/image/overlay2/layerdb/mounts/03164d8f0d1f55a285b50b2117d6fdb2c33d2fa87f46dba0f43b806017607d03/mount-id: no such file or directory +``` + +> 参考 [排障案例: cAdvisor 无数据](../../troubleshooting/node/cadvisor-no-data.md)。 + +### 变更步骤 + +1. 驱逐节点(`kubectl drain NODE`),让存量 Pod 漂移到其它节点上,参考 [安全维护或下线节点](securely-maintain-or-offline-node.md)。 +2. 修改 dockerd 配置文件 `/etc/docker/daemon.json`: + ```json + { + "graph": "/data/docker" + } + ``` +3. 重启 dockerd: + ```bash + systemctl restart docker + # systemctl restart dockerd + ``` +4. 重启 kubelet + ```bash + systemctl restart kubelet + ``` +5. 节点恢复为可调度状态: `kubectl uncordon NODE`。 + +## 其它运行时 + +其它运行时都实现了 CRI 接口,变更容器 Root Dir 就不需要那么严谨,不过安全起见,还是建议先安全的将节点上存量 Pod 驱逐走(参考 [安全维护或下线节点](securely-maintain-or-offline-node.md)),然后再修改运行时配置并重启容器运行时。 + +配置修改方式参考对应运行时的官方文档,这里以常用的 `containerd` 为例: + +1. 修改 `/etc/containerd/config.toml`: + ```toml + root = "/data/containerd" + ``` +2. 重启 containerd: + ```bash + systemctl restart containerd + ``` +3. 节点恢复为可调度状态: `kubectl uncordon NODE`。 \ No newline at end of file diff --git a/content/best-practices/performance-optimization/cpu.md b/content/best-practices/performance-optimization/cpu.md new file mode 100644 index 0000000..a606a07 --- /dev/null +++ b/content/best-practices/performance-optimization/cpu.md @@ -0,0 +1,97 @@ +# CPU 绑核 + +## 背景 + +对于一些计算密集型,或对 CPU 比较敏感的业务,可以开启 CPU 亲和性,即绑核,避免跟其它 Pod 争抢 CPU 降低性能。 + +## 操作步骤 + +1. 驱逐节点: + ```bash + kubectl drain + ``` +2. 停止 kubelet: + ```bash + systemctl stop kubelet + ``` +3. 修改 kubelet 参数: + ```txt + --cpu-manager-policy="static" + ``` +4. 删除旧的 CPU 管理器状态文件: + ```bash + rm var/lib/kubelet/cpu_manager_state + ``` +5. 启动 kubelet + ```bash + systemctl start kubelet + ``` + +## 绑定 NUMA 亲和性 + +CPU 规格较大的节点,可能会跨 NUMA,如果 Pod 中业务进程运行的时候,在不同 NUMA 的 CPU 之间切换,会有一定的性能损耗,这种情况可以进一步开启 NUMA 的亲和性,让 Pod 中进程都跑在同一 NUMA 的 CPU 上,减少性能损耗。 + +### 前提条件 + +* 内核启用 NUMA: 确保 `/etc/default/grub` 中没有 `numa=off`,若有就改为 `numa=on`。 +* k8s 1.18 版本以上 (依赖特性 TopologyManager 在 1.18 进入 beta 默认开启)。 + +### 启用方法 + +增加 kubelet 参数: + +* `--cpu-manager-policy=static` +* `--topology-manager-policy=single-numa-node` + +### 验证 NUMA 亲和性 + +1. 确认节点CPU 分布情况: + +```txt +NUMA node0 CPU(s): 0-23,48-71 +NUMA node1 CPU(s): 24-47,72-95 +``` + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925111834.png) + +2. 先后创建三个static类型(request和limit严格一致)的Pod: + +```txt +debug1: CPU request==limit==40C +debug2: CPU request==limit==40C +debug3: CPU request==limit==10C +``` + +实验预期: +* debug1与debug2分布在不同的numa上,各自占用40C CPU资源,numa1与numa2各自剩余8C。 +* debug3预期需要10C并且都在一个numa上,在debug1和debug2各自占用40C的情况下,总共剩余16C CPU,但每个numa剩余8C{'<'}10C,debug3必定调度失败。 + +3. 验证 + debug1上创建40个100%使用CPU的进程,查看进程分布情况:debug1全部分布在numa0上: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925111846.png) + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925111855.png) + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925111907.png) + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925111918.png) + +同样,debug2全部分布在numa1上。 + +debug3由于没有numa满足>=10C,调度失败。 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925111926.png) + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925111934.png) + +### 确保Pod内的进程在本numa分配内存 + +本质上是通过系统调用(set_mempolicy)设置进程属性,在内核给进程分配内存时,内核只在进程所属numa分配内存。如果业务进程本身没有明显调用set_mempolicy设置内存分配策略,可以通过numactl --localalloc cmd 启动的进程,内核分配内存时会严格保证内存分布在本numa + +## 参考资料 + +* [https://docs.qq.com/doc/DSkNYQWt4bHhva0F6](https://docs.qq.com/doc/DSkNYQWt4bHhva0F6) +* [https://blog.csdn.net/nicekwell/article/details/9368307](https://blog.csdn.net/nicekwell/article/details/9368307) +* [为什么 NUMA 会影响程序的延迟](https://draveness.me/whys-the-design-numa-performance/) +* [控制节点上的 CPU 管理策略](https://kubernetes.io/zh-cn/docs/tasks/administer-cluster/cpu-management-policies/) diff --git a/content/best-practices/performance-optimization/network.md b/content/best-practices/performance-optimization/network.md new file mode 100644 index 0000000..4dfc219 --- /dev/null +++ b/content/best-practices/performance-optimization/network.md @@ -0,0 +1,207 @@ +# 网络性能调优 + +本文整理在 K8S 环境中的网络性能调优实践。一些涉及到内核参数的调整,关于如何调整 Pod 内核参数的方法请参考 [为 Pod 设置内核参数](../../trick/deploy/set-sysctl.md)。 + +## 高并发场景 + +### TIME_WAIT 连接复用 + +如果短连接并发量较高,它所在 netns 中 TIME_WAIT 状态的连接就比较多,而 TIME_WAIT 连接默认要等 2MSL 时长才释放,长时间占用源端口,当这种状态连接数量累积到超过一定量之后可能会导致无法新建连接。 + +所以建议开启 TIME_WAIT 复用,即允许将 TIME_WAIT 连接重新用于新的 TCP 连接: + +```bash +net.ipv4.tcp_tw_reuse=1 +``` + +> 在高版本内核中,`net.ipv4.tcp_tw_reuse` 默认值为 2,表示仅为回环地址开启复用,基本可以粗略的认为没开启复用。 + +### 扩大源端口范围 + +高并发场景,对于 client 来说会使用大量源端口,源端口范围从 `net.ipv4.ip_local_port_range` 这个内核参数中定义的区间随机选取,在高并发环境下,端口范围小容易导致源端口耗尽,使得部分连接异常。通常 Pod 源端口范围默认是 32768-60999,建议将其扩大,调整为 1024-65535: `sysctl -w net.ipv4.ip_local_port_range="1024 65535"`。 + +### 调大最大文件句柄数 + +在 linux 中,每个连接都会占用一个文件句柄,所以句柄数量限制同样也会限制最大连接数, 对于像 Nginx 这样的反向代理,对于每个请求,它会与 client 和 upstream server 分别建立一个连接,即占据两个文件句柄,所以理论上来说 Nginx 能同时处理的连接数最多是系统最大文件句柄数限制的一半。 + +系统最大文件句柄数由 `fs.file-max` 这个内核参数来控制,一些环境默认值可能为 838860,建议调大: + +```bash +fs.file-max=1048576 +``` + +### 调大全连接连接队列的大小 + +TCP 全连接队列的长度如果过小,在高并发环境可能导致队列溢出,使得部分连接无法建立。 + +如果因全连接队列溢出导致了丢包,从统计的计数上是可以看出来的: + +```bash +# 用 netstat 查看统计 +$ netstat -s | grep -E 'overflow|drop' + 12178939 times the listen queue of a socket overflowed + 12247395 SYNs to LISTEN sockets dropped + +# 也可以用 nstat 查看计数器 +$ nstat -az | grep -E 'TcpExtListenOverflows|TcpExtListenDrops' +TcpExtListenOverflows 12178939 0.0 +TcpExtListenDrops 12247395 0.0 +``` + +全连接队列的大小取决于 `net.core.somaxconn` 内核参数以及业务进程调用 listen 时传入的 backlog 参数,取两者中的较小值(`min(backlog,somaxconn)`),一些编程语言通常是默认取 `net.core.somaxconn` 参数的值作为 backlog 参数传入 listen 系统调用(比如Go语言)。 + +高并发环境可以考虑将其改到 `65535`: + +```bash +sysctl -w net.core.somaxconn=65535 +``` + +如何查看队列大小来验证是否成功调整队列大小?可以执行 `ss -lntp` 看 `Send-Q` 的值。 + +```bash +$ ss -lntp +State Recv-Q Send-Q Local Address:Port Peer Address:Port Process +LISTEN 0 65535 0.0.0.0:80 0.0.0.0:* users:(("nginx",pid=347916,fd=6),("nginx",pid=347915,fd=6),("nginx",pid=347887,fd=6)) +``` + +> ss 用 -l 查看 LISTEN 状态连接时,`Recv-Q` 表示的当前已建连但还未被服务端调用 `accept()` 取走的连接数量,即全连接队列中的连接数;`Send-Q` 表示的则是最大的 listen backlog 数值,即全连接队列大小。如果 `Recv-Q` 大小接近 `Send-Q` 的大小时,说明连接队列可能溢出。 + +需要注意的是,Nginx 在 listen 时并没有读取 somaxconn 作为 backlog 参数传入,而是在 nginx 配置文件中有自己单独的参数配置: + +```nginx.conf +server { + listen 80 backlog=1024; + ... +``` + +如果不设置,backlog 在 linux 上默认为 511: + +```txt +backlog=number + sets the backlog parameter in the listen() call that limits the maximum length for the queue of pending connections. By default, backlog is set to -1 on FreeBSD, DragonFly BSD, and macOS, and to 511 on other platforms. +``` + +也就是说,即便你的 `somaxconn` 配的很高,nginx 所监听端口的连接队列最大却也只有 511,高并发场景下还是可能导致连接队列溢出,所以建议配置下 nginx 的 backlog 参数。 + +不过如果用的是 Nginx Ingress ,情况又不太一样,因为 Nginx Ingress Controller 会自动读取 somaxconn 的值作为 backlog 参数写到生成的 `nginx.conf` 中,参考 [源码](https://github.com/kubernetes/ingress-nginx/blob/controller-v0.34.1/internal/ingress/controller/nginx.go#L592)。 + +## 高吞吐场景 + +### 调大 UDP 缓冲区 + +UDP socket 的发送和接收缓冲区是有上限的,如果缓冲区较小,高并发环境可能导致缓冲区满而丢包,从网络计数可以看出来: + +```bash +# 使用 netstat 查看统计 +$ netstat -s | grep "buffer errors" + 429469 receive buffer errors + 23568 send buffer errors + +# 也可以用 nstat 查看计数器 +$ nstat -az | grep -E 'UdpRcvbufErrors|UdpSndbufErrors' +UdpRcvbufErrors 429469 0.0 +UdpSndbufErrors 23568 0.0 +``` + +还可以使用 `ss -nump` 查看当前缓冲区的情况: + +```bash +$ ss -nump +Recv-Q Send-Q Local Address:Port Peer Address:Port Process +0 0 10.10.4.26%eth0:68 10.10.4.1:67 users:(("NetworkManager",pid=960,fd=22)) + skmem:(r0,rb212992,t0,tb212992,f0,w0,o640,bl0,d0) +``` + +> 1. `rb212992` 表示 UDP 接收缓冲区大小是 `212992` 字节,`tb212992` 表示 UDP 发送缓存区大小是 `212992` 字节。 +> 2. `Recv-Q` 和 `Send-Q` 分别表示当前接收和发送缓冲区中的数据包字节数。 + +UDP 发送缓冲区大小取决于: +1. `net.core.wmem_default` 和 `net.core.wmem_max` 这两个内核参数,分别表示缓冲区的默认大小和最大上限。 +2. 如果程序自己调用 `setsockopt`设置`SO_SNDBUF`来自定义缓冲区大小,最终取值不会超过 `net.core.wmem_max`;如果程序没设置,则会使用 `net.core.wmem_default` 作为缓冲区的大小。 + +同理,UDP 接收缓冲区大小取决于: +1. `net.core.rmem_default` 和 `net.core.rmem_max` 这两个内核参数,分别表示缓冲区的默认大小和最大上限。 +2. 如果程序自己调用 `setsockopt`设置`SO_RCVBUF`来自定义缓冲区大小,最终取值不会超过 `net.core.rmem_max`;如果程序没设置,则会使用 `net.core.rmem_default` 作为缓冲区的大小。 + +需要注意的是,这些内核参数在容器网络命名空间中是无法设置的,是 Node 级别的参数,需要在节点上修改,建议修改值: + +```bash +net.core.rmem_default=26214400 # socket receive buffer 默认值 (25M),如果程序没用 setsockopt 更改 buffer 长度的话,默认用这个值。 +net.core.wmem_default=26214400 # socket send buffer 默认值 (25M),如果程序没用 setsockopt 更改 buffer 长度的话,默认用这个值。 +net.core.rmem_max=26214400 # socket receive buffer 上限 (25M),如果程序使用 setsockopt 更改 buffer 长度,最大不能超过此限制。 +net.core.wmem_max=26214400 # socket send buffer 上限 (25M),如果程序使用 setsockopt 更改 buffer 长度,最大不能超过此限制。 +``` + +如果程序自己有调用 `setsockopt` 去设置 `SO_SNDBUF` 或 `SO_RCVBUF`,建议设置到跟前面内核参数对应的最大上限值。 + +### 调大 TCP 缓冲区 + +TCP socket 的发送和接收缓冲区也是有上限的,不过对于发送缓冲区,即便满了也是不会丢包的,只是会让程序发送数据包时卡住,等待缓冲区有足够空间释放出来,所以一般不需要优化发送缓冲区。 + +对于接收缓冲区,在高并发环境如果较小,可能导致缓冲区满而丢包,从网络计数可以看出来: + +```bash +$ nstat -az | grep TcpExtTCPRcvQDrop +TcpExtTCPRcvQDrop 264324 0.0 +``` + +还可以使用 `ss -ntmp` 查看当前缓冲区情况: + +```bash +$ ss -ntmp +ESTAB 0 0 [::ffff:109.244.190.163]:9988 [::ffff:10.10.4.26]:54440 users:(("xray",pid=3603,fd=20)) + skmem:(r0,rb12582912,t0,tb12582912,f0,w0,o0,bl0,d0) +``` + +> 1. `rb12582912` 表示 TCP 接收缓冲区大小是 `12582912` 字节,`tb12582912` 表示 UDP 发送缓存区大小是 `12582912` 字节。 +> 2. `Recv-Q` 和 `Send-Q` 分别表示当前接收和发送缓冲区中的数据包字节数。 + +如果存在 `net.ipv4.tcp_rmem` 这个参数,对于 TCP 而言,会覆盖 `net.core.rmem_default` 和 `net.core.rmem_max` 的值。这个参数网络命名空间隔离的,而在容器网络命名空间中,一般默认是有配置的,所以如果要调整 TCP 接收缓冲区,需要显式在 Pod 级别配置下内核参数: + +```bash +net.ipv4.tcp_rmem="4096 26214400 26214400" +``` + +> 1. 单位是字节,分别是 min, default, max。 +> 2. 如果程序没用 setsockopt 更改 buffer 长度,就会使用 default 作为初始 buffer 长度(覆盖 `net.core.rmem_default`),然后根据内存压力在 min 和 max 之间自动调整。 +> 3. 如果程序使用了 setsockopt 更改 buffer 长度,则使用传入的长度 (仍然受限于 `net.core.rmem_max`)。 + +## 内核参数调优配置示例 + +调整 Pod 内核参数: + +```yaml + initContainers: + - name: setsysctl + image: busybox + securityContext: + privileged: true + command: + - sh + - -c + - | + sysctl -w net.core.somaxconn=65535 + sysctl -w net.ipv4.ip_local_port_range="1024 65535" + sysctl -w net.ipv4.tcp_tw_reuse=1 + sysctl -w fs.file-max=1048576 + sysctl -w net.ipv4.tcp_rmem="4096 26214400 26214400" +``` + +调整节点内核参数(修改 `/etc/sysctl.conf` 并执行 `sysctl -p`): + +```bash +net.core.rmem_default=26214400 +net.core.wmem_default=26214400 +net.core.rmem_max=26214400 +net.core.wmem_max=26214400 +``` + +如果使用的是 [腾讯云弹性集群 EKS](https://console.cloud.tencent.com/tke2/ecluster) 这种没有节点的 Serverless 类型 K8S(每个 Pod 都是独占虚拟机),可以在 Pod 级别加如下注解来修改 Pod 对应虚拟机中的内核参数: + +```yaml +eks.tke.cloud.tencent.com/host-sysctls: '[{"name": "net.core.rmem_max","value": "26214400"},{"name": "net.core.wmem_max","value": "26214400"},{"name": "net.core.rmem_default","value": "26214400"},{"name": "net.core.wmem_default","value": "26214400"}]' +``` + +## 参考资料 + +* [云服务器网络访问丢包](https://cloud.tencent.com/document/product/213/57336) \ No newline at end of file diff --git a/content/best-practices/request-limit.md b/content/best-practices/request-limit.md new file mode 100644 index 0000000..81b9e0c --- /dev/null +++ b/content/best-practices/request-limit.md @@ -0,0 +1,88 @@ +# 合理设置 Request 与 Limit + +如何为容器配置 Request 与 Limit? 这是一个即常见又棘手的问题,这个根据服务类型,需求与场景的不同而不同,没有固定的答案,这里结合生产经验总结了一些最佳实践,可以作为参考。 + +## 所有容器都应该设置 request + +request 的值并不是指给容器实际分配的资源大小,它仅仅是给调度器看的,调度器会 "观察" 每个节点可以用于分配的资源有多少,也知道每个节点已经被分配了多少资源。被分配资源的大小就是节点上所有 Pod 中定义的容器 request 之和,它可以计算出节点剩余多少资源可以被分配(可分配资源减去已分配的 request 之和)。如果发现节点剩余可分配资源大小比当前要被调度的 Pod 的 reuqest 还小,那么就不会考虑调度到这个节点,反之,才可能调度。所以,如果不配置 request,那么调度器就不能知道节点大概被分配了多少资源出去,调度器得不到准确信息,也就无法做出合理的调度决策,很容易造成调度不合理,有些节点可能很闲,而有些节点可能很忙,甚至 NotReady。 + +所以,建议是给所有容器都设置 request,让调度器感知节点有多少资源被分配了,以便做出合理的调度决策,让集群节点的资源能够被合理的分配使用,避免陷入资源分配不均导致一些意外发生。 + +## CPU request 与 limit 的一般性建议 + +* 如果不确定应用最佳的 CPU 限制,可以不设置 CPU limit,参考: [Understanding resource limits in kubernetes: cpu time](https://medium.com/@betz.mark/understanding-resource-limits-in-kubernetes-cpu-time-9eff74d3161b)。 +* 如果要设置 CPU request,大多可以设置到不大于 1 核,除非是 CPU 密集型应用。 + +## 老是忘记设置怎么办? + +有时候我们会忘记给部分容器设置 request 与 limit,其实我们可以使用 LimitRange 来设置 namespace 的默认 request 与 limit 值,同时它也可以用来限制最小和最大的 request 与 limit。 +示例: + +``` yaml +apiVersion: v1 +kind: LimitRange +metadata: + name: mem-limit-range + namespace: test +spec: + limits: + - default: + memory: 512Mi + cpu: 500m + defaultRequest: + memory: 256Mi + cpu: 100m + type: Container +``` + +## 重要的线上应用该如何设置 + +节点资源不足时,会触发自动驱逐,将一些低优先级的 Pod 删除掉以释放资源让节点自愈。没有设置 request,limit 的 Pod 优先级最低,容易被驱逐;request 不等于 limit 的其次; request 等于 limit 的 Pod 优先级较高,不容易被驱逐。所以如果是重要的线上应用,不希望在节点故障时被驱逐导致线上业务受影响,就建议将 request 和 limit 设成一致。 + +## 怎样设置才能提高资源利用率? + +如果给给你的应用设置较高的 request 值,而实际占用资源长期远小于它的 request 值,导致节点整体的资源利用率较低。当然这对时延非常敏感的业务除外,因为敏感的业务本身不期望节点利用率过高,影响网络包收发速度。所以对一些非核心,并且资源不长期占用的应用,可以适当减少 request 以提高资源利用率。 + +如果你的服务支持水平扩容,单副本的 request 值一般可以设置到不大于 1 核,CPU 密集型应用除外。比如 coredns,设置到 0.1 核就可以,即 100m。 + +## 尽量避免使用过大的 request 与 limit + +如果你的服务使用单副本或者少量副本,给很大的 request 与 limit,让它分配到足够多的资源来支撑业务,那么某个副本故障对业务带来的影响可能就比较大,并且由于 request 较大,当集群内资源分配比较碎片化,如果这个 Pod 所在节点挂了,其它节点又没有一个有足够的剩余可分配资源能够满足这个 Pod 的 request 时,这个 Pod 就无法实现漂移,也就不能自愈,加重对业务的影响。 + +相反,建议尽量减小 request 与 limit,通过增加副本的方式来对你的服务支撑能力进行水平扩容,让你的系统更加灵活可靠。 + +## 避免测试 namespace 消耗过多资源影响生产业务 + +若生产集群有用于测试的 namespace,如果不加以限制,可能导致集群负载过高,从而影响生产业务。可以使用 ResourceQuota 来限制测试 namespace 的 request 与 limit 的总大小。 +示例: + +``` yaml +apiVersion: v1 +kind: ResourceQuota +metadata: + name: quota-test + namespace: test +spec: + hard: + requests.cpu: "1" + requests.memory: 1Gi + limits.cpu: "2" + limits.memory: 2Gi +``` + +## FAQ + +### 为什么 CPU 利用率远不到 limit 还会被 throttle ? + +CPU 限流是因为内核使用 CFS 调度算法,对于微突发场景,在一个 CPU 调度周期内 (100ms) 所占用的时间超过了 limit 还没执行完,就会强制 "抢走" CPU 使用权(throttle),等待下一个周期再执行,但是时间拉长一点,进程使用 CPU 所占用的时间比例却很低,监控上就看不出来 CPU 有突增,但实际上又被 throttle 了。 + +更多详细解释参考 [k8s CPU limit和throttling的迷思](https://zhuanlan.zhihu.com/p/433065108)。 + +## 参考资料 + +* [Understanding Kubernetes limits and requests by example](https://sysdig.com/blog/kubernetes-limits-requests/) +* [Understanding resource limits in kubernetes: cpu time](https://medium.com/@betz.mark/understanding-resource-limits-in-kubernetes-cpu-time-9eff74d3161b) +* [Understanding resource limits in kubernetes: memory](https://medium.com/@betz.mark/understanding-resource-limits-in-kubernetes-memory-6b41e9a955f9) +* [Kubernetes best practices: Resource requests and limits](https://cloud.google.com/blog/products/gcp/kubernetes-best-practices-resource-requests-and-limits) +* [Kubernetes 资源分配之 Request 和 Limit 解析](https://cloud.tencent.com/developer/article/1004976) + diff --git a/content/deploy/k3s/install-cases.md b/content/deploy/k3s/install-cases.md new file mode 100644 index 0000000..44b3e53 --- /dev/null +++ b/content/deploy/k3s/install-cases.md @@ -0,0 +1,36 @@ +# k3s 安装实践案例 + +## 概述 + +本文主要给出一些具体的安装实践案例供大家参考。 + +## 安装精简版 k3s + +有时候个人开发者只想用 k3s 来替代容器来部署一些应用,不需要 k8s 很多复杂的功能,此时在安装的时候可以禁用很多不需要的组件,节约服务器资源: + +```bash +$ curl -sfL https://get.k3s.io | sh -s - server \ + --disable-cloud-controller \ + --disable-network-policy \ + --disable-helm-controller \ + --disable=traefik,local-storage,metrics-server,servicelb +``` + +### 路由器上安装极简 k3s + +将 k3s 安装在自家路由器上,统一用声明式的 yaml 管理路由器的应用和功能,方便刷机后也能重新一键安装回来: + +```bash +INSTALL_K3S_MIRROR=cn curl -sfL https://rancher-mirror.rancher.cn/k3s/k3s-install.sh | sh -s - server \ + --kubelet-arg="--hostname-override=10.10.10.2" \ + --disable-kube-proxy \ + --disable-cloud-controller \ + --disable-network-policy \ + --disable-helm-controller \ + --disable=traefik,local-storage,metrics-server,servicelb,coredns +``` + +* 国内家庭网络使用 k3s 默认安装脚本网络不通,使用 mirror 脚本替代。 +* 如果是主路由,公网 ip 每次拨号会变,而 k3s 启动时会获取到外网 ip 作为 hostname,用导出的 kubeconfig 去访问 apiserver 时,会报证书问题(签发时不包含重新拨号之后的外网 ip),可以用 `--kubelet-arg` 强制指定一下路由器使用的静态内网 IP。 +* 在路由器部署的应用通常只用 HostNetwork,不需要访问 service,可以禁用 kube-proxy 和 coredns。 + diff --git a/content/deploy/k3s/offline-installation.md b/content/deploy/k3s/offline-installation.md new file mode 100644 index 0000000..3d35a49 --- /dev/null +++ b/content/deploy/k3s/offline-installation.md @@ -0,0 +1,104 @@ +# k3s 国内离线安装方法 + +## 步骤 + +### 下载离线文件 + +进入 [k3s release](https://github.com/k3s-io/k3s/releases) 页面,下载 k3s 二进制和依赖镜像的压缩包: + +* `k3s`: 二进制。 +* `k3s-airgap-images-amd64.tar`: 镜像压缩包。 + +下载安装脚本: + +```bash +curl -o install.sh https://get.k3s.io +``` + +下载完将所有文件放入需要安装 k3s 的机器上。 + +### 安装依赖镜像 + +```bash +sudo mkdir -p /var/lib/rancher/k3s/agent/images/ +sudo cp ./k3s-airgap-images-amd64.tar /var/lib/rancher/k3s/agent/images/ +``` + +### 安装 k3s 二进制 + +```bash +chmod +x k3s +cp k3s /usr/local/bin/ +``` + +### 执行安装脚本 + +```bash +chmod +x install.sh +INSTALL_K3S_SKIP_DOWNLOAD=true ./install.sh +``` + +### 验证 + +查看 k3s 运行状态: + +```bash +systemctl status k3s +``` + +查看 k3s 日志: + +```bash +journalctl -u k3s -f +``` + +查看 k3s 集群状态: + +```bash +$ k3s kubectl get node +NAME STATUS ROLES AGE VERSION +vm-55-160-centos Ready control-plane,master 3m22s v1.25.2+k3s1 +$ k3s kubectl get pod -A +NAMESPACE NAME READY STATUS RESTARTS AGE +kube-system local-path-provisioner-5b5579c644-6h99x 1/1 Running 0 3m22s +kube-system coredns-75fc8f8fff-sjjzs 1/1 Running 0 3m22s +kube-system helm-install-traefik-crd-mgffn 0/1 Completed 0 3m22s +kube-system metrics-server-74474969b-6bj6r 1/1 Running 0 3m22s +kube-system svclb-traefik-0ab06643-6vj96 2/2 Running 0 3m1s +kube-system helm-install-traefik-m7wdm 0/1 Completed 2 3m22s +kube-system traefik-7d647b7597-dw6b4 1/1 Running 0 3m1s +``` + +### 获取 kubeconfig + +若希望在本机之外用 kubectl 操作集群,可以将 kubeconfig 导出来: + +```bash +k3s kubectl config view --raw > k3s +``` + +修改其中 server 地址的 IP 为本机 IP,将 kubeconfig 文件放到 kubectl 所在机器上,然后用 [kubecm](https://github.com/sunny0826/kubecm) 合并到本地 kubeconfig: + +```bash +kubecm add --context-name=k3s -cf k3s +``` + +使用 [kubectx](https://github.com/ahmetb/kubectx) 切换 context: + +```bash +$ kubectl ctx k3s +Switched to context "k3s". +``` + +使用 kubectl 操作 k3s 集群: + +```bash +$ kubectl get node +NAME STATUS ROLES AGE VERSION +vm-55-160-centos Ready control-plane,master 14m v1.25.2+k3s1 +``` + +## 参考资料 + +* [k3s 离线安装官方文档](https://docs.k3s.io/zh/installation/airgap) + diff --git a/content/deploy/kubespray/install.md b/content/deploy/kubespray/install.md new file mode 100644 index 0000000..9556b19 --- /dev/null +++ b/content/deploy/kubespray/install.md @@ -0,0 +1,197 @@ +# 使用 kubespray 搭建集群 + +## 原理 + +[kubespray](https://github.com/kubernetes-sigs/kubespray) 是利用 [ansible](https://docs.ansible.com/ansible/latest/index.html) 这个工具,通过 SSH 协议批量让指定远程机器执行一系列脚本,安装各种组件,完成 K8S 集群搭建。 + +## 准备工作 + +下载 kubespray 并拷贝一份配置: + +```bash +# 下载 kubespray +$ git clone --depth=1 https://github.com/kubernetes-sigs/kubespray.git +$ cd kubespray +# 安装依赖,包括 ansible +$ sudo pip3 install -r requirements.txt + +# 复制一份配置文件 +cp -rfp inventory/sample inventory/mycluster +``` + +## 修改配置 + +需要修改的配置文件列表: + +* `inventory/mycluster/group_vars/all/*.yml` +* `inventory/mycluster/group_vars/k8s-cluster/*.yml` + +下面介绍一些需要重点关注的配置,根据自己需求进行修改。 + +### 集群网络 + +修改配置文件 `inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml`: + +```yaml +# 选择网络插件,支持 cilium, calico, weave 和 flannel +kube_network_plugin: cilium + +# 设置 Service 网段 +kube_service_addresses: 10.233.0.0/18 + +# 设置 Pod 网段 +kube_pods_subnet: 10.233.64.0/18 +``` + +其它相关配置文件: `inventory/mycluster/group_vars/k8s_cluster/k8s-net-*.yml`。 + +### 运行时 + +修改配置文件 `inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml`: + +```yaml +# 支持 docker, crio 和 containerd,推荐 containerd. +container_manager: containerd + +# 是否开启 kata containers +kata_containers_enabled: false +``` + +其它相关配置文件: + +```txt +inventory/mycluster/group_vars/all/containerd.yml +inventory/mycluster/group_vars/all/cri-o.yml +inventory/mycluster/group_vars/all/docker.yml +``` + +### 集群证书 + +修改配置文件 `inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml`: + +```yaml +# 是否开启自动更新证书,推荐开启。 +auto_renew_certificates: true +``` + +## 准备机器列表 + +拿到集群部署的初始机器内网 ip 列表,修改 `inventory/mycluster/inventory.ini`: + +```ini +[all] +master1 ansible_host=10.10.10.1 +master2 ansible_host=10.10.10.2 +master3 ansible_host=10.10.10.3 +node1 ansible_host=10.10.10.4 +node2 ansible_host=10.10.10.5 +node3 ansible_host=10.10.10.6 +node4 ansible_host=10.10.10.7 +node5 ansible_host=10.10.10.8 +node6 ansible_host=10.10.10.9 +node7 ansible_host=10.10.10.10 + +[kube_control_plane] +master1 +master2 +master3 + +[etcd] +master1 +master2 +master3 + +[kube_node] +master1 +master2 +master3 +node1 +node2 +node3 +node4 +node5 +node6 +node7 + +[calico_rr] + +[k8s_cluster:children] +kube_control_plane +kube_node +calico_rr +``` + +> **注:** 务必使用 `ansible_host` 标识节点内网 IP,否则可能导致出现类似 [这个issue](https://github.com/kubernetes-sigs/kubespray/issues/5949) 的问题。 + +附上 vim 编辑 inventory,批量加机器的技巧: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023/09/25/vim-inventory.gif) + +## 国内环境安装 + +在国内进行安装时,会因 GFW 影响而安装失败,参考 [kubespray 离线安装配置](offline.md)。 + +## 部署集群 + +```bash +ansible-playbook \ + -i inventory/mycluster/inventory.ini \ + --private-key=id_rsa \ + --user=ubuntu -b \ + cluster.yml +``` + +## 获取 kubeconfig + +部署完成后,从 master 节点上的 `/root/.kube/config` 路径获取到 kubeconfig,这里以 ansible 的 fetch 功能为例,将 kubeconfig 拷贝下来: + +```bash +$ ansible -i '10.10.6.9,' -b -m fetch --private-key id_rsa --user=ubuntu -a 'src=/root/.kube/config dest=kubeconfig flat=yes' all +[WARNING]: Skipping callback plugin 'ara_default', unable to load +10.10.6.9 | CHANGED => { + "changed": true, + "checksum": "190eafeead70a8677b736eaa66d84d77c4a7f8be", + "dest": "/root/kubespray/kubeconfig", + "md5sum": "ded532f68930c48a53b3b2144b30f7f5", + "remote_checksum": "190eafeead70a8677b736eaa66d84d77c4a7f8be", + "remote_md5sum": null +} +``` + +> `-i` 中的逗号是故意的,意思是不让 ansible 误以为是个 inventory 文件,而是解析为单个 host。 + +获取到 kubeconfig 后,可以修改其中的 server 地址,将 `https://127.0.0.1:6443` 改为非 master 节点可以访问的地址,最简单就直接替换 `127.0.0.1` 成其中一台 master 节点的 IP 地址,也可以在 Master 前面挂个负载均衡器,然后替换成负载均衡器的地址。 + +## 扩容节点 + +如果要扩容节点,可以准备好节点的内网 IP 列表,并追加到之前的 inventory 文件里,然后再次使用 `ansible-playbook` 运行一次,有点不同的是: `cluster.yml` 换成 `scale.yml`: + +```bash +ansible-playbook \ + -i inventory/mycluster/inventory.ini \ + --private-key=id_rsa \ + --user=ubuntu -b \ + scale.yml +``` + +## 缩容节点 + +如果有节点不再需要了,我们可以将其移除集群,通常步骤是: +1. `kubectl cordon NODE` 驱逐节点,确保节点上的服务飘到其它节点上去,参考 [安全维护或下线节点](../../best-practices/ops/securely-maintain-or-offline-node.md)。 +2. 停止节点上的一些 k8s 组件 (kubelet, kube-proxy) 等。 +3. `kubectl delete NODE` 将节点移出集群。 +4. 如果节点是虚拟机,并且不需要了,可以直接销毁掉。 + +前 3 个步骤,也可以用 kubespray 提供的 `remove-node.yml` 这个 playbook 来一步到位实现: + +```bash +ansible-playbook \ + -i inventory/mycluster/inventory.ini \ + --private-key=id_rsa \ + --user=ubuntu -b \ + --extra-vars "node=node1,node2" \ + remove-node.yml +``` + +> `--extra-vars` 里写要移出的节点名列表,如果节点已经卡死,无法通过 SSH 登录,可以在 `--extra-vars` 加个 `reset_nodes=false` 的选项,跳过第二个步骤。 + diff --git a/content/deploy/kubespray/offline.md b/content/deploy/kubespray/offline.md new file mode 100644 index 0000000..25ca0f5 --- /dev/null +++ b/content/deploy/kubespray/offline.md @@ -0,0 +1,113 @@ +# kubespray 离线安装配置 + +## 背景 + +在国内使用 kubespray 安装 Kubernetes 集群,下载依赖的文件和镜像时,往往会遇到下载失败,这时我们可以利用 kubespray 离线安装配置的能力来部署集群。 + +## 准备工作 + +要想离线安装,首先做下以下准备: +1. 一台不受 GFW 限制的服务器或 PC,用于下载安装 Kubernetes 所依赖的海外文件和镜像。 +2. 一个用于离线安装的静态服务器,存储安装集群所需的二进制静态文件。通常使用 nginx 搭建静态服务器即可。 +3. 一个用于离线安装的镜像仓库,存储安装集群所需的依赖镜像。比如自己搭建的 Harbor,只要网络可以通,能够正常拉取到镜像即可。 + +## 生成依赖文件和镜像的列表 + +```bash +$ cd contrib/offline +$ bash generate_list.sh +$ tree temp/ +temp/ +├── files.list +├── files.list.template +├── images.list +└── images.list.template +``` + +* `flies.list` 是依赖文件的列表。 +* `images.list` 是依赖镜像的列表。 + +## 搬运文件 + +执行以下命令将依赖的静态文件全部下载到 `temp/files` 目录下: + +```bash +wget -x -P temp/files -i temp/files.list +``` + +将静态文件通过静态服务器暴露出来,比如使用 nginx,根据情况修改 nginx 配置,比如: + +```nginx.conf +user root; +server { + listen 80 default_server; + listen [::]:80 default_server; + location /k8s/ { + alias /root/kubespray/contrib/offline/temp/files/; + } +} +``` + +## 搬运镜像 + +我们可以使用 [skopeo](https://github.com/containers/skopeo) 将依赖的镜像同步到我们自己的镜像仓库,安装方法参考 [官方安装文档](https://github.com/containers/skopeo/blob/main/install.md)。 + +安装好后,登录下自己的镜像仓库: + +```bash +$ skopeo login cr.imroc.cc +Username: admin +Password: +Login Succeeded! +``` + +然后将所有依赖镜像同步到我们自己的镜像仓库: + +```bash +for image in $(cat temp/images.list); do skopeo copy docker://${image} docker://cr.imroc.cc/k8s/${image#*/}; done +``` + +注意事项: +1. 替换成自己的仓库地址。 +2. 提前创建好仓库,比如用 harbor,提前创建好名为 "k8s" 的项目,以便将所有镜像都同步到 "k8s" 这个项目路径下。 +3. 如果直接二进制安装 skopeo,需提前创建好配置文件 `/etc/containers/policy.json`,内容可以用默认的,参考 [default-policy.json](https://github.com/containers/skopeo/blob/main/default-policy.json)。 + +## 修改 offline.yml + +搬运好了文件和镜像,我们来修改下 kubespray 的地址,让依赖的文件和镜像下载地址使用我们自己的地址,修改 `/root/kubespray/inventory/mycluster/group_vars/all/offline.yml`: + +```yaml +# 替换镜像地址 +registry_host: "cr.imroc.cc/k8s" +kube_image_repo: "{{ registry_host }}" +gcr_image_repo: "{{ registry_host }}" +github_image_repo: "{{ registry_host }}" +docker_image_repo: "{{ registry_host }}" +quay_image_repo: "{{ registry_host }}" + +# 替换静态文件地址 +files_repo: "http://10.10.10.14/k8s" +kubeadm_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubeadm" +kubectl_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl" +kubelet_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet" +cni_download_url: "{{ files_repo }}/github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" +crictl_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" +etcd_download_url: "{{ files_repo }}/github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" +calicoctl_download_url: "{{ files_repo }}/github.com/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz" +flannel_cni_download_url: "{{ files_repo }}/github.com/flannel-io/cni-plugin/releases/download/{{ flannel_cni_version }}/flannel-{{ image_arch }}" +helm_download_url: "{{ files_repo }}/get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz" +crun_download_url: "{{ files_repo }}/github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}" +kata_containers_download_url: "{{ files_repo }}/github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz" +runc_download_url: "{{ files_repo }}/github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}" +containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz" +nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" +krew_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/krew/releases/download/{{ krew_version }}/krew-{{ host_os }}_{{ image_arch }}.tar.gz" +cri_dockerd_download_url: "{{ files_repo }}/github.com/Mirantis/cri-dockerd/releases/download/{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}-linux-{{ image_arch }}.tar.gz" +gvisor_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc" +gvisor_containerd_shim_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1" +youki_download_url: "{{ files_repo }}/github.com/containers/youki/releases/download/v{{ youki_version }}/youki_v{{ youki_version | regex_replace('\\.', '_') }}_linux.tar.gz" +``` + +> `xxx_download_url` 不是直接 uncomment 得到的,是通过 `images.list.template` 里的内容加上 `{{ files_repo }}` 拼接而来。 + diff --git a/content/deploy/terraform.md b/content/deploy/terraform.md new file mode 100644 index 0000000..04cfb93 --- /dev/null +++ b/content/deploy/terraform.md @@ -0,0 +1,64 @@ +# 使用 Terraform 创建集群 + +利用 Terrafrom 可以创建各种云上产品化的 Kubernetes 集群。 + +## 准备配置文件 + +创建 `main.tf`, 可参考[附录](../appendix/terraform) 中的示例,根据自己需求按照注释提示替换内容 + +## 创建集群 + +在 `main.tf` 所在目录执行 `terraform init`,然后再执行 `terraform apply`,输入 `yes` 确认执行。 + +等待大约1分多钟,会自动打印创建出来的集群 id: + +```txt +tencentcloud_eks_cluster.roc-test: Still creating... [1m10s elapsed] +tencentcloud_eks_cluster.roc-test: Still creating... [1m20s elapsed] +tencentcloud_eks_cluster.roc-test: Creation complete after 1m21s [id=cls-4d2qxcs5] + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. +``` + +## 获取 kubeconfig + +集群刚创建好的时候,APIServer 外网访问的 CLB 还没创建好,不知道外网 IP 地址,terraform 本地记录的状态里,kubeconfig 的 server 地址就为空。所以我们先 refresh 一下,将创建好的 server 地址同步到本地: + +```bash +terraform refresh +``` + +然后导出 kubeconfig 文件: + +```bash +terraform show -json | jq -r '.values.root_module.resources[] | select(.address | test("tencentcloud_eks_cluster.roc-test")) | .values.kube_config' > eks +``` + +> 注意替换 `roc-test` 为自己在 `main.tf` 文件中定义的名字。 + +使用 [kubecm](../trick/kubectl/merge-kubeconfig-with-kubecm.md) 可以一键导入合并 kubeconfig: + +```bash +kubecm add -f eks +``` + +使用 [kubectx](../trick/kubectl/quick-switch-with-kubectx.md) 可以切换 context: + +```bash +kubectl ctx eks +``` + +然后就可以使用 kubectl 操作集群了。 + +## 销毁集群 + +在 `main.tf` 所在目录执行: + +```bash +terraform destroy +``` + +## 参考资料 + +* [Terrafrom TencentCloud Provider Documentation](https://registry.terraform.io/providers/tencentcloudstack/tencentcloud/latest/docs) + diff --git a/content/monitoring/grafana/ha-setup.md b/content/monitoring/grafana/ha-setup.md new file mode 100644 index 0000000..17489a6 --- /dev/null +++ b/content/monitoring/grafana/ha-setup.md @@ -0,0 +1,121 @@ +# Grafana 高可用部署 + +## 概述 + +Grafana 默认安装是单副本,非高可用部署,而 Grafana 自身是支持多副本高可用部署的,本文介绍其配置方法以及已经安装的 Grafana 如何迁移到高可用架构。 + +## 修改配置 + +要让 Grafana 支持高可用,需要对 Grafana 配置文件 (`grafana.ini`) 进行一些关键的修改: + +1. Grafana 默认使用 sqlite3 文件存储数据,多副本共享可能会有数据冲突,可以配置一下 `database` 让多副本共享同一个 mysql 或 postgres 数据库,这样多副本就可以无状态横向伸缩。 +2. Grafana 多副本运行,如果配置了告警规则,每个副本都会重复告警,配置一下 `ha_peers` 让 Grafana 自行选主只让其中一个副本执行告警。 + +```ini +[database] +url = mysql://root:123456@mysql.db.svc.cluster.local:3306/grafana +[unified_alerting] +enabled = true +ha_peers = monitoring-grafana-headless.svc.monitoring.cluster.local:9094 +[alerting] +enabled = false +``` + +* `database` 下配置数据库连接信息,包含数据库类型、用户名、密码、数据库地址、端口以及要具体哪个库。 +* `alerting` 的 `enabled` 置为 false,表示禁用默认的告警方式(每个 Grafana 实例都单独告警)。 +* `unified_alerting` 的 `enabled` 置为 true,表示开启高可用告警。 +* `unified_alerting` 的 `ha_peers` 填入 Grafana 所有实例的地址,在 k8s 环境可用 headless service,dns 会自动解析到所有 pod ip 来实现自动发现 Grafana 所有 IP,端口默认是 9094,用于 gossip 协议实现高可用。 + +## helm chart 配置示例 + +如果 grafana 安装到 Kubernetes,通常使用 helm chart 来安装,一般是 [grafana 官方 chart](https://github.com/grafana/helm-charts/tree/main/charts/grafana),`values.yaml` 配置示例: + +```yaml +replicas: 2 +defaultDashboardsTimezone: browser +grafana.ini: + unified_alerting: + enabled: true + ha_peers: 'monitoring-grafana-headless.monitoring.svc.cluster.local:9094' + alerting: + enabled: false + database: + url: 'mysql://root:123456@mysql.db.svc.cluster.local:3306/grafana' + server: + root_url: "https://grafana.imroc.cc" + paths: + data: /var/lib/grafana/ + logs: /var/log/grafana + plugins: /var/lib/grafana/plugins + provisioning: /etc/grafana/provisioning + analytics: + check_for_updates: true + log: + mode: console + grafana_net: + url: https://grafana.net +``` + +* `grafana.ini` 字段用于修改 grafana 配置文件内容,使用 `yaml` 格式定义,会自动转成 `ini`。 +* `ha_peers` 指向的 headless service 自行提前创建(当前 chart 内置的 headless 没暴露 9094 端口)。 + +headless service 示例: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: monitoring-grafana-headless + namespace: monitoring +spec: + clusterIP: None + ports: + - name: http-web + port: 3000 + protocol: TCP + targetPort: 3000 + - name: alert + port: 9094 + protocol: TCP + targetPort: 9094 + selector: + app.kubernetes.io/instance: monitoring + app.kubernetes.io/name: grafana + type: ClusterIP +``` + +如果你使用的 [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) 安装,实际也是用的 Grafana 官方的 chart,只不过作为了一个子 chart,写 `values.yaml` 时将上面准备的配置放到 `grafana` 字段下面即可: + +```yaml +grafana: + replicas: 2 + defaultDashboardsTimezone: browser + grafana.ini: + ... +``` + +## 已安装的 Grafana 如何迁移到高可用架构 ? + +如果你用的默认安装,使用 sqlite3 文件存储数据,可以先按照如下步骤迁移数据: + +1. 拿到 `grafana.db` 文件,使用 Grafana 官方提供的迁移脚本 [sqlitedump.sh](https://github.com/grafana/database-migrator) 将 sqlite3 的数据转换成 sql 文件: + ```bash + sqlitedump.sh grafana.db > grafana.sql + ``` + > 确保环境中安装了 sqlite3 命令。 +2. 停止 Grafana (如果是 K8S 部署,可以修改副本数为 0)。 +3. 准备好数据库,提前创建好 grafana database: + ```sql + CREATE DATABASE grafana; + ``` +4. 替换 Grafana 配置文件,参考前面的配置示例。 +5. 启动 Grafana,让 Grafana 自动初始化数据库。 +6. 将 sql 文件导入数据库执行: + ```bash + mysql -h172.16.181.186 -P3306 -uroot -p123456 grafana < grafana.sql + ``` +7. 恢复 Grafana 运行。 + +## 参考资料 + +- [Set up Grafana for high availability](https://grafana.com/docs/grafana/latest/setup-grafana/set-up-for-high-availability/) \ No newline at end of file diff --git a/content/monitoring/victoriametrics/install-with-operator.md b/content/monitoring/victoriametrics/install-with-operator.md new file mode 100644 index 0000000..3381403 --- /dev/null +++ b/content/monitoring/victoriametrics/install-with-operator.md @@ -0,0 +1,315 @@ +# 使用 operator 部署 VictoriaMetrics + +## VictoriaMetrics 架构概览 + +以下是 VictoriaMetrics 的核心组件架构图: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220904161934.png) + +* `vmstorage` 负责存储数据,是有状态组件。 +* `vmselect` 负责查询数据,Grafana 添加 Prometheus 数据源时使用 `vmselect` 地址,查询数据时,`vmselect` 会调用各个 `vmstorage` 的接口完成数据的查询。 +* `vminsert` 负责写入数据,采集器将采集到的数据 "吐到" `vminsert`,然后 `vminsert` 会调用各个 `vmstorage` 的接口完成数据的写入。 +* 各个组件都可以水平伸缩,但不支持自动伸缩,因为伸缩需要修改启动参数。 + +## 安装 operator + +使用 helm 安装: + +```bash +helm repo add vm https://victoriametrics.github.io/helm-charts +helm repo update +helm install victoria-operator vm/victoria-metrics-operator +``` + +检查 operator 是否成功启动: + +```bash +$ kubectl -n monitoring get pod +NAME READY STATUS RESTARTS AGE +victoria-operator-victoria-metrics-operator-7b886f85bb-jf6ng 1/1 Running 0 20s +``` + +## 安装 VMSorage, VMSelect 与 VMInsert + +准备 `vmcluster.yaml`: + +```yaml +apiVersion: operator.victoriametrics.com/v1beta1 +kind: VMCluster +metadata: + name: vmcluster + namespace: monitoring +spec: + retentionPeriod: "1" # 默认单位是月,参考 https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#retention + vmstorage: + replicaCount: 2 + storage: + volumeClaimTemplate: + metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: cbs + resources: + requests: + storage: 100Gi + vmselect: + replicaCount: 2 + vminsert: + replicaCount: 2 +``` + +安装: + +```bash +$ kubectl apply -f vmcluster.yaml +vmcluster.operator.victoriametrics.com/vmcluster created +``` + +检查组件是否启动成功: + +```bash +$ kubectl -n monitoring get pod | grep vmcluster +vminsert-vmcluster-77886b8dcb-jqpfw 1/1 Running 0 20s +vminsert-vmcluster-77886b8dcb-l5wrg 1/1 Running 0 20s +vmselect-vmcluster-0 1/1 Running 0 20s +vmselect-vmcluster-1 1/1 Running 0 20s +vmstorage-vmcluster-0 1/1 Running 0 20s +vmstorage-vmcluster-1 1/1 Running 0 20s +``` + +## 安装 VMAlertmanager 与 VMAlert + +准备 `vmalertmanager.yaml`: + +```yaml +apiVersion: operator.victoriametrics.com/v1beta1 +kind: VMAlertmanager +metadata: + name: vmalertmanager + namespace: monitoring +spec: + replicaCount: 1 + selectAllByDefault: true +``` + +安装 `VMAlertmanager`: + +```bash +$ kubectl apply -f vmalertmanager.yaml +vmalertmanager.operator.victoriametrics.com/vmalertmanager created +``` + +准备 `vmalert.yaml`: + +```yaml +apiVersion: operator.victoriametrics.com/v1beta1 +kind: VMAlert +metadata: + name: vmalert + namespace: monitoring +spec: + replicaCount: 1 + selectAllByDefault: true + notifier: + url: http://vmalertmanager-vmalertmanager:9093 + resources: + requests: + cpu: 10m + memory: 10Mi + remoteWrite: + url: http://vminsert-vmcluster:8480/insert/0/prometheus/ + remoteRead: + url: http://vmselect-vmcluster:8481/select/0/prometheus/ + datasource: + url: http://vmselect-vmcluster:8481/select/0/prometheus/ +``` + +安装 `VMAlert`: + +```bash +$ kubectl apply -f vmalert.yaml +vmalert.operator.victoriametrics.com/vmalert created +``` + +检查组件是否启动成功: + +```bash +$ kubectl -n monitoring get pod | grep vmalert +vmalert-vmalert-5987fb9d5f-9wt6l 2/2 Running 0 20s +vmalertmanager-vmalertmanager-0 2/2 Running 0 40s +``` + +## 安装 VMAgent + +vmagent 用于采集监控数据并发送给 VictoriaMetrics 进行存储,对于腾讯云容器服务上的容器监控数据采集,需要用自定义的 `additionalScrapeConfigs` 配置,准备自定义采集规则配置文件 `scrape-config.yaml`: + +```yaml +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: additional-scrape-configs + namespace: monitoring +stringData: + additional-scrape-configs.yaml: |- + - job_name: "tke-cadvisor" + scheme: https + metrics_path: /metrics/cadvisor + tls_config: + insecure_skip_verify: true + authorization: + credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - source_labels: [__meta_kubernetes_node_label_node_kubernetes_io_instance_type] + regex: eklet + action: drop + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - job_name: "tke-kubelet" + scheme: https + metrics_path: /metrics + tls_config: + insecure_skip_verify: true + authorization: + credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - source_labels: [__meta_kubernetes_node_label_node_kubernetes_io_instance_type] + regex: eklet + action: drop + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - job_name: "tke-probes" + scheme: https + metrics_path: /metrics/probes + tls_config: + insecure_skip_verify: true + authorization: + credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - source_labels: [__meta_kubernetes_node_label_node_kubernetes_io_instance_type] + regex: eklet + action: drop + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - job_name: eks + honor_timestamps: true + metrics_path: '/metrics' + params: + collect[]: ['ipvs'] + # - 'cpu' + # - 'meminfo' + # - 'diskstats' + # - 'filesystem' + # - 'load0vg' + # - 'netdev' + # - 'filefd' + # - 'pressure' + # - 'vmstat' + scheme: http + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_tke_cloud_tencent_com_pod_type] + regex: eklet + action: keep + - source_labels: [__meta_kubernetes_pod_phase] + regex: Running + action: keep + - source_labels: [__meta_kubernetes_pod_ip] + separator: ; + regex: (.*) + target_label: __address__ + replacement: ${1}:9100 + action: replace + - source_labels: [__meta_kubernetes_pod_name] + separator: ; + regex: (.*) + target_label: pod + replacement: ${1} + action: replace + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: namespace + replacement: ${1} + action: replace + metric_relabel_configs: + - source_labels: [__name__] + separator: ; + regex: (container_.*|pod_.*|kubelet_.*) + replacement: $1 + action: keep +``` + +再准备 `vmagent.yaml`: + +```yaml +apiVersion: operator.victoriametrics.com/v1beta1 +kind: VMAgent +metadata: + name: vmagent + namespace: monitoring +spec: + selectAllByDefault: true + additionalScrapeConfigs: + key: additional-scrape-configs.yaml + name: additional-scrape-configs + resources: + requests: + cpu: 10m + memory: 10Mi + replicaCount: 1 + remoteWrite: + - url: "http://vminsert-vmcluster:8480/insert/0/prometheus/api/v1/write" +``` + +安装: + +```bash +$ kubectl apply -f scrape-config.yaml +secret/additional-scrape-configs created +$ kubectl apply -f vmagent.yaml +vmagent.operator.victoriametrics.com/vmagent created +``` + +检查组件是否启动成功: + +```bash +$ kubectl -n monitoring get pod | grep vmagent +vmagent-vmagent-cf9bbdbb4-tm4w9 2/2 Running 0 20s +vmagent-vmagent-cf9bbdbb4-ija8r 2/2 Running 0 20s +``` + +## 配置 Grafana + +### 添加数据源 + +VictoriaMetrics 兼容 Prometheus,在 Grafana 添加数据源时,使用 Prometheus 类型,如果 Grafana 跟 VictoriaMetrics 安装在同一集群中,可以使用 service 地址,如: + +```txt +http://vmselect-vmcluster:8481/select/0/prometheus/ +``` + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220904160422.png) + +### 添加 Dashboard + +VictoriaMetrics 官方提供了几个 Grafana Dashboard,id 分别是: +1. 11176 +2. 12683 +3. 14205 + +可以将其导入 Grafana: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220904160727.png) + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220904161558.png) + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220904161641.png) \ No newline at end of file diff --git a/content/networking/faq/ipvs-conn-reuse-mode.md b/content/networking/faq/ipvs-conn-reuse-mode.md new file mode 100644 index 0000000..ba8bace --- /dev/null +++ b/content/networking/faq/ipvs-conn-reuse-mode.md @@ -0,0 +1,58 @@ +# ipvs 连接复用引发的系列问题 + +在 Kubernetes 社区里面有一个讨论已久的 bug ([#81775](https://github.com/kubernetes/kubernetes/issues/81775)),这个问题是当 client 对 service 发起大量新建 TCP 连接时,新的连接被转发到 Terminating 或已完全销毁的旧 Pod 上,导致持续丢包 (报错 `no route to host`),其根因是内核 ipvs 连接复用引发,本文来详细掰扯下。 + +## conn_reuse_mode 简介 + +在介绍原因之前,我们先介绍下 `conn_reuse_mode` 这个内核参数,它是以下两个 patch 引入的: + +1. year 2015 d752c364571743d696c2a54a449ce77550c35ac5 +2. year 2016 f719e3754ee2f7275437e61a6afd520181fdd43b + +其目的是: +1. 当 `client ip:client port` 复用发生时,对于 `TIME_WAIT` 状态下的 ip_vs_conn,进行重新调度,使得 connection 在 rs 上的分布更均衡,以提高性能。 +2. 如果该 mode 是 0,则会复用旧 ip_vs_conn 里的 rs,使得连接更不均衡。 + +所以当 `conn_reuse_mode` 为 0 表示启用 ipvs 连接复用,为 1 表示不复用,是不是有点反直觉?这个确实也比较有争议。 + +## conn_reuse_mode=1 的 bug + +开启这个内核参数 (`conn_reuse_mode=1`) 本意是为了提高新建的性能,实际结果是大幅度降低了性能,实际测试中发现 cps 从 3w 降低到了 1.5K,这也表明内核社区的一些 patch 没有经过严格的性能测试。 + +开启这个内核参数实际就表示 ipvs 转发时不做连接复用,每次新建的连接都会重新调度 rs 并新建 ip_vs_conn,但它的实现有个问题: 在新建连接时 (SYN 包),如果 `client ip:client port` 匹配到了 ipvs 旧连接 (`TIME_WIAT` 状态),且使用了 conntrack,就会丢掉第一个 SYN 包,等待重传后 (1s) 才能成功建连,从而导致建连性能急剧下降。 + +Kubernetes 社区也发现了这个 bug,所以当 kube-proxy 使用 ipvs 转发模式时,默认将 `conn_reuse_mode` 置为 0 来规避这个问题,详见 PR [#71114](https://github.com/kubernetes/kubernetes/pull/71114) 与 issue [#70747](https://github.com/kubernetes/kubernetes/issues/70747) 。 + +## conn_reuse_mode=0 引发的问题 + +由于 Kubernetes 为了规避 `conn_reuse_mode=1` 带来的性能问题,在 ipvs 模式下,让 kube-proxy 在启动时将 `conn_reuse_mode` 置为了 0 ,即使用 ipvs 连接复用的能力,但 ipvs 连接复用有两个问题: + +1. 只要有 `client ip:client port` 匹配上 ip_vs_conn (发生复用),就直接转发给对应的 rs,不管 rs 当前是什么状态,即便 rs 的 weight 为 0 (通常是 `TIME_WAIT` 状态) 也会转发,`TIME_WAIT` 的 rs 通常是 Terminating 状态已销毁的 Pod,转发过去的话连接就必然异常。 +2. 高并发下大量复用,没有为新连接没有调度 rs,直接转发到所复用连接对应的 rs 上,导致很多新连接被 "固化" 到部分 rs 上。 + +业务中实际遇到的现象可能有很多种: + +1. **滚动更新连接异常。** 被访问的服务滚动更新时,Pod 有新建有销毁,ipvs 发生连接复用时转发到了已销毁的 Pod 导致连接异常 (`no route to host`)。 +2. **滚动更新负载不均。** 由于复用时不会重新调度连接,导致新连接也被 "固化" 在某些 Pod 上了。 +3. **新扩容的 Pod 接收流量少。** 同样也是由于复用时不会重新调度连接,导致很多新连接被 "固化" 在扩容之前的这些 Pod 上了。 + +## 规避方案 + +我们知道了问题原因,那么在 ipvs 转发模式下该如何规避呢?我们从南北向和东西向分别考虑下。 + +### 南北向流量 + +1. 使用 LB 直通 Pod。对于南北向流量,通常依赖 NodePort 来暴露,前面的负载均衡器将流量先转到 NodePort 上,然后再通过 ipvs 转发到后端 Pod。现在很多云厂商都支持 LB 直通 Pod,这种模式下负载均衡器直接将请求转发到 Pod,不经过 NodePort,也就没有 ipvs 转发,从而在流量接入层规避这个问题。 +2. 使用 ingress 转发。在集群中部署 ingress controller (比如 nginx ingress),流量到达 ingress 再向后转时 (转发到集群内的 Pod),不会经过 service 转发,而是直接转发到 service 对应的 `Pod IP:Port`,也就绕过了 ipvs。Ingress controller 本身结合使用前面所说的 LB 直通 Pod 方式部署,效果更佳。 + +### 东西向流量 + +集群内的服务间调用 (东西向流量),默认还是会走 ipvs 转发。对于有这种高并发场景的业务,我们可以考虑使用 Serivce Mesh (如 istio) 来治理流量,服务间转发由 sidecar 代理,并且不会经过 ipvs。 + +## 终极方案: 内核修复 + +`conn_reuse_mode=1` 引发性能急需下降的 bug,目前在腾讯云提供的 [TencentOS-kernel](https://github.com/Tencent/TencentOS-kernel) 开源内核已修复,对应 PR [#17](https://github.com/Tencent/TencentOS-kernel/pull/17), [TKE](https://cloud.tencent.com/product/tke) 上的解决方案就是使用这个内核 patch,依赖禁用 ipvs 连接复用 (`conn_reuse_mode=1`),这样同时也就解决了 ipvs 连接复用引发的系列问题,且经过了大规模生产验证。 + +不过以上修复并未直接合并到 linux 社区,当前已有两个相关 patch 合并到了 linux 内核主干 (自 v5.9),分别解决 `conn_reuse_mode` 为 0 和 1 时的上述 bug,其中一个也是借鉴了腾讯云修复的思路,详见 k8s issue [#93297](https://github.com/kubernetes/kubernetes/issues/93297) 。 + +如果你使用了 v5.9 以上的内核,理论上就没有本文所述的问题了。既然 v5.9 以上的内核已修复上述 bug,那么 kube-proxy 就无需显式去设置 `conn_reuse_mode` 这个内核参数了,这也是 PR [#102122](https://github.com/kubernetes/kubernetes/pull/102122) 所做的事。不过值得注意的是,社区 patch 目前并未看到有大规模的生产验证,试用有风险。 \ No newline at end of file diff --git a/content/networking/faq/why-enable-bridge-nf-call-iptables.md b/content/networking/faq/why-enable-bridge-nf-call-iptables.md new file mode 100644 index 0000000..eddcc1b --- /dev/null +++ b/content/networking/faq/why-enable-bridge-nf-call-iptables.md @@ -0,0 +1,44 @@ +# 为什么要开 bridge-nf-call-iptables? + +Kubernetes 环境中,很多时候都要求节点内核参数开启 `bridge-nf-call-iptables`: + +```bash +sysctl -w net.bridge.bridge-nf-call-iptables=1 +``` + +> 参考官方文档 [Network Plugin Requirements](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/#network-plugin-requirements) + +如果不开启或中途因某些操作导致参数被关闭了,就可能造成一些奇奇怪怪的网络问题,排查起来非常麻烦。 + +为什么要开启呢?本文就来跟你详细掰扯下。 + +## 基于网桥的容器网络 + +Kubernetes 集群网络有很多种实现,有很大一部分都用到了 Linux 网桥: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925114751.png) + +* 每个 Pod 的网卡都是 veth 设备,veth pair 的另一端连上宿主机上的网桥。 +* 由于网桥是虚拟的二层设备,同节点的 Pod 之间通信直接走二层转发,跨节点通信才会经过宿主机 eth0。 + +## Service 同节点通信问题 + +不管是 iptables 还是 ipvs 转发模式,Kubernetes 中访问 Service 都会进行 DNAT,将原本访问 ClusterIP:Port 的数据包 DNAT 成 Service 的某个 Endpoint (PodIP:Port),然后内核将连接信息插入 conntrack 表以记录连接,目的端回包的时候内核从 conntrack 表匹配连接并反向 NAT,这样原路返回形成一个完整的连接链路: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925114800.png) + +但是 Linux 网桥是一个虚拟的二层转发设备,而 iptables conntrack 是在三层上,所以如果直接访问同一网桥内的地址,就会直接走二层转发,不经过 conntrack: +1. Pod 访问 Service,目的 IP 是 Cluster IP,不是网桥内的地址,走三层转发,会被 DNAT 成 PodIP:Port。 +2. 如果 DNAT 后是转发到了同节点上的 Pod,目的 Pod 回包时发现目的 IP 在同一网桥上,就直接走二层转发了,没有调用 conntrack,导致回包时没有原路返回 (见下图)。 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925114808.png) + +由于没有原路返回,客户端与服务端的通信就不在一个 "频道" 上,不认为处在同一个连接,也就无法正常通信。 + +常见的问题现象就是偶现 DNS 解析失败,当 coredns 所在节点上的 pod 解析 dns 时,dns 请求落到当前节点的 coredns pod 上时,就可能发生这个问题。 + +## 开启 bridge-nf-call-iptables + +如果 Kubernetes 环境的网络链路中走了 bridge 就可能遇到上述 Service 同节点通信问题,而 Kubernetes 很多网络实现都用到了 bridge。 + +`bridge-nf-call-iptables` 这个内核参数 (置为 1),表示 bridge 设备在二层转发时也去调用 iptables 配置的三层规则 (包含 conntrack),所以开启这个参数就能够解决上述 Service 同节点通信问题,这也是为什么在 Kubernetes 环境中,大多都要求开启 `bridge-nf-call-iptables` 的原因。 diff --git a/content/sidebars.js b/content/sidebars.js new file mode 100644 index 0000000..64a165e --- /dev/null +++ b/content/sidebars.js @@ -0,0 +1,711 @@ +/** + * Creating a sidebar enables you to: + - create an ordered group of docs + - render a sidebar for each doc of that group + - provide next/previous navigation + + The sidebars can be generated from the filesystem, or explicitly defined here. + + Create as many sidebars as you want. + */ + +// @ts-check + +/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ +const sidebars = { + kubernetesSidebar: [ + { + type: "doc", + id: "README", + customProps: { + slug: "/" + } + }, + { + type: 'category', + label: '集群搭建', + collapsed: true, + link: { + type: 'generated-index', + slug: '/deploy' + }, + items: [ + { + type: 'category', + label: '使用 kubespray 搭建集群', + collapsed: true, + link: { + type: 'generated-index', + slug: '/deploy/kubespray' + }, + items: [ + 'deploy/kubespray/install', + 'deploy/kubespray/offline', + ], + }, + { + type: 'category', + label: '安装 k3s 轻量集群', + collapsed: true, + link: { + type: 'generated-index', + slug: '/deploy/k3s' + }, + items: [ + 'deploy/k3s/install-cases', + 'deploy/k3s/offline-installation', + ], + }, + 'deploy/terraform', + ], + }, + { + type: 'category', + label: '最佳实践', + collapsed: true, + link: { + type: 'generated-index', + slug: '/best-practices' + }, + items: [ + { + type: 'category', + label: '优雅终止', + collapsed: true, + link: { + type: 'generated-index', + slug: '/best-practices/graceful-shutdown' + }, + items: [ + 'best-practices/graceful-shutdown/intro', + 'best-practices/graceful-shutdown/pod-termination-proccess', + 'best-practices/graceful-shutdown/code-example-of-handle-sigterm', + 'best-practices/graceful-shutdown/why-cannot-receive-sigterm', + 'best-practices/graceful-shutdown/propagating-signals-in-shell', + 'best-practices/graceful-shutdown/use-prestop', + 'best-practices/graceful-shutdown/persistent-connection', + 'best-practices/graceful-shutdown/lb-to-pod-directly', + ], + }, + { + type: 'category', + label: 'DNS', + collapsed: true, + link: { + type: 'generated-index', + slug: '/best-practices/dns' + }, + items: [ + 'best-practices/dns/customize-dns-resolution', + 'best-practices/dns/optimize-coredns-performance', + ], + }, + { + type: 'category', + label: '性能优化', + collapsed: true, + link: { + type: 'generated-index', + slug: '/best-practices/performance-optimization' + }, + items: [ + 'best-practices/performance-optimization/network', + 'best-practices/performance-optimization/cpu', + ], + }, + { + type: 'category', + label: '高可用', + collapsed: true, + link: { + type: 'generated-index', + slug: '/best-practices/ha' + }, + items: [ + 'best-practices/ha/pod-split-up-scheduling', + 'best-practices/ha/smooth-upgrade', + ], + }, + { + type: 'category', + label: '弹性伸缩', + collapsed: true, + link: { + type: 'generated-index', + slug: '/best-practices/autoscaling' + }, + items: [ + 'best-practices/autoscaling/hpa-velocity', + 'best-practices/autoscaling/hpa-with-custom-metrics', + ], + }, + { + type: 'category', + label: '容器化', + collapsed: true, + link: { + type: 'generated-index', + slug: '/best-practices/containerization' + }, + items: [ + 'best-practices/containerization/systemd-in-container', + 'best-practices/containerization/java', + 'best-practices/containerization/golang', + 'best-practices/containerization/crontab-in-container', + 'best-practices/containerization/timezone', + ], + }, + { + type: 'category', + label: '集群运维', + collapsed: true, + link: { + type: 'generated-index', + slug: '/best-practices/ops' + }, + items: [ + 'best-practices/ops/securely-maintain-or-offline-node', + 'best-practices/ops/securely-modify-container-root-dir', + 'best-practices/ops/large-scale-cluster-optimization', + 'best-practices/ops/etcd-optimization', + 'best-practices/ops/batch-operate-node-with-ansible', + ], + }, + 'best-practices/configure-healthcheck', + 'best-practices/request-limit', + 'best-practices/logging', + 'best-practices/long-connection', + ], + }, + { + type: 'category', + label: '监控告警', + collapsed: true, + link: { + type: 'generated-index', + slug: '/monitoring' + }, + items: [ + { + type: 'category', + label: 'grafana', + collapsed: true, + link: { + type: 'generated-index', + slug: '/monitoring/grafana' + }, + items: [ + 'monitoring/grafana/ha-setup', + ], + }, + { + type: 'category', + label: 'Victoria Metrics', + collapsed: true, + link: { + type: 'generated-index', + slug: '/monitoring/victoriametrics' + }, + items: [ + 'monitoring/victoriametrics/install-with-operator', + ], + } + ], + }, + { + type: 'category', + label: '集群网络', + collapsed: true, + link: { + type: 'generated-index', + slug: '/networking' + }, + items: [ + { + type: 'category', + label: '常见问题', + collapsed: true, + link: { + type: 'generated-index', + slug: '/networking/faq' + }, + items: [ + 'networking/faq/why-enable-bridge-nf-call-iptables', + 'networking/faq/ipvs-conn-reuse-mode', + ], + }, + ], + }, + { + type: 'category', + label: '实用技巧', + collapsed: true, + link: { + type: 'generated-index', + slug: '/trick' + }, + items: [ + { + type: 'category', + label: '高效使用 kubectl', + collapsed: true, + link: { + type: 'generated-index', + slug: '/trick/kubectl' + }, + items: [ + 'trick/kubectl/kubectl-aliases', + 'trick/kubectl/quick-switch-with-kubectx', + 'trick/kubectl/merge-kubeconfig-with-kubecm', + ], + }, + { + type: 'category', + label: '镜像相关', + collapsed: true, + link: { + type: 'generated-index', + slug: '/trick/images' + }, + items: [ + 'trick/images/podman', + 'trick/images/sync-images-with-skopeo', + ], + }, + { + type: 'category', + label: '部署与配置', + collapsed: true, + link: { + type: 'generated-index', + slug: '/trick/deploy' + }, + items: [ + 'trick/deploy/set-sysctl', + ], + }, + { + type: 'category', + label: '证书签发', + collapsed: true, + link: { + type: 'generated-index', + slug: '/trick/certs' + }, + items: [ + 'trick/certs/sign-certs-with-cfssl', + 'trick/certs/sign-free-certs-with-cert-manager', + 'trick/certs/sign-free-certs-for-dnspod', + ], + }, + { + type: 'category', + label: '用户与权限', + collapsed: true, + link: { + type: 'generated-index', + slug: '/trick/user-and-permissions' + }, + items: [ + 'trick/user-and-permissions/create-user-using-csr-api', + ], + }, + ], + }, + { + type: 'category', + label: '故障排查', + collapsed: true, + link: { + type: 'generated-index', + slug: '/troubleshooting' + }, + items: [ + { + type: 'category', + label: '排障技能', + collapsed: true, + link: { + type: 'generated-index', + slug: '/troubleshooting/skill' + }, + items: [ + 'troubleshooting/skill/linux', + 'troubleshooting/skill/enter-netns-with-nsenter', + 'troubleshooting/skill/remote-capture-with-ksniff', + 'troubleshooting/skill/use-systemtap-to-locate-problems', + 'troubleshooting/skill/tcpdump', + 'troubleshooting/skill/wireshark', + ], + }, + { + type: 'category', + label: 'Pod 排障', + collapsed: true, + link: { + type: 'generated-index', + slug: '/troubleshooting/pod' + }, + items: [ + 'troubleshooting/pod/healthcheck-failed', + 'troubleshooting/pod/device-or-resource-busy', + { + type: 'category', + label: 'Pod 状态异常', + collapsed: true, + link: { + type: 'generated-index', + slug: '/troubleshooting/pod/status' + }, + items: [ + 'troubleshooting/pod/status/intro', + 'troubleshooting/pod/status/pod-terminating', + 'troubleshooting/pod/status/pod-pending', + 'troubleshooting/pod/status/pod-containercreating-or-waiting', + 'troubleshooting/pod/status/pod-crash', + 'troubleshooting/pod/status/pod-imagepullbackoff', + ], + } + ], + }, + { + type: 'category', + label: '节点排障', + collapsed: true, + link: { + type: 'generated-index', + slug: '/troubleshooting/node' + }, + items: [ + 'troubleshooting/node/node-crash-and-vmcore', + 'troubleshooting/node/node-high-load', + 'troubleshooting/node/io-high-load', + 'troubleshooting/node/memory-fragmentation', + 'troubleshooting/node/disk-full', + 'troubleshooting/node/pid-full', + 'troubleshooting/node/arp-cache-overflow', + 'troubleshooting/node/runnig-out-of-inotify-watches', + 'troubleshooting/node/kernel-solft-lockup', + 'troubleshooting/node/no-space-left-on-device', + 'troubleshooting/node/ipvs-no-destination-available', + 'troubleshooting/node/cadvisor-no-data', + ], + }, + { + type: 'category', + label: '网络排障', + collapsed: true, + link: { + type: 'generated-index', + slug: '/troubleshooting/network' + }, + items: [ + 'troubleshooting/network/timeout', + 'troubleshooting/network/packet-loss', + 'troubleshooting/network/network-unreachable', + 'troubleshooting/network/slow-network-traffic', + 'troubleshooting/network/dns-exception', + 'troubleshooting/network/close-wait-stacking', + 'troubleshooting/network/traffic-surge', + ], + }, + { + type: 'category', + label: '存储排障', + collapsed: true, + link: { + type: 'generated-index', + slug: '/troubleshooting/storage' + }, + items: [ + 'troubleshooting/storage/unable-to-mount-volumes', + 'troubleshooting/storage/setup-failed-for-volume', + ], + }, + { + type: 'category', + label: '集群排障', + collapsed: true, + link: { + type: 'generated-index', + slug: '/troubleshooting/cluster' + }, + items: [ + 'troubleshooting/cluster/namespace-terminating', + ], + }, + "troubleshooting/sdk", + { + type: 'category', + label: '排障案例', + collapsed: true, + link: { + type: 'generated-index', + slug: '/troubleshooting/cases' + }, + items: [ + { + type: 'category', + label: '运行时排障', + collapsed: true, + link: { + type: 'generated-index', + slug: '/troubleshooting/cases/runtime' + }, + items: [ + 'troubleshooting/cases/runtime/io-high-load-causing-pod-creation-timeout', + 'troubleshooting/cases/runtime/pull-image-fail-in-high-version-containerd', + 'troubleshooting/cases/runtime/mount-root-causing-device-or-resource-busy', + 'troubleshooting/cases/runtime/broken-system-time-causing-sandbox-conflicts', + ], + }, + { + type: 'category', + label: '网络排障', + collapsed: true, + link: { + type: 'generated-index', + slug: '/troubleshooting/cases/network' + }, + items: [ + 'troubleshooting/cases/network/dns-lookup-5s-delay', + 'troubleshooting/cases/network/arp-cache-overflow-causing-healthcheck-failed', + 'troubleshooting/cases/network/cross-vpc-connect-nodeport-timeout', + 'troubleshooting/cases/network/musl-libc-dns-id-conflict-causing-dns-abnormal', + ], + }, + { + type: 'category', + label: '高负载', + collapsed: true, + link: { + type: 'generated-index', + slug: '/troubleshooting/cases/high-load' + }, + items: [ + 'troubleshooting/cases/high-load/disk-full-causing-high-cpu', + ], + }, + { + type: 'category', + label: '集群故障', + collapsed: true, + link: { + type: 'generated-index', + slug: '/troubleshooting/cases/cluster' + }, + items: [ + 'troubleshooting/cases/cluster/delete-rancher-ns-causing-node-disappear', + 'troubleshooting/cases/cluster/scheduler-snapshot-missing-causing-pod-pending', + 'troubleshooting/cases/cluster/kubectl-exec-or-logs-failed', + ], + }, + { + type: 'category', + label: '节点排障', + collapsed: true, + link: { + type: 'generated-index', + slug: '/troubleshooting/cases/node' + }, + items: [ + 'troubleshooting/cases/node/cgroup-leaking', + ], + }, + { + type: 'category', + label: '其它排障', + collapsed: true, + link: { + type: 'generated-index', + slug: '/troubleshooting/cases/others' + }, + items: [ + 'troubleshooting/cases/others/failed-to-modify-hosts-in-multiple-container', + 'troubleshooting/cases/others/job-cannot-delete', + 'troubleshooting/cases/others/dotnet-configuration-cannot-auto-reload', + ], + }, + ], + } + ], + }, + { + type: 'category', + label: '腾讯云容器服务', + collapsed: true, + link: { + type: 'generated-index', + slug: '/tencent' + }, + items: [ + { + type: 'category', + label: 'Serverless 集群与超级节点', + collapsed: true, + link: { + type: 'generated-index', + slug: '/tencent/serverless' + }, + items: [ + 'tencent/serverless/precautions', + 'tencent/serverless/why-tke-supernode-rocks', + 'tencent/serverless/supernode-case-online', + 'tencent/serverless/supernode-case-offline', + 'tencent/serverless/large-image-solution', + ], + }, + { + type: 'category', + label: '网络指南', + collapsed: true, + link: { + type: 'generated-index', + slug: '/tencent/networking' + }, + items: [ + 'tencent/networking/clb-to-pod-directly', + 'tencent/networking/how-to-use-eip', + 'tencent/networking/install-localdns-with-ipvs', + 'tencent/networking/expose-grpc-with-tcm', + ], + }, + { + type: 'category', + label: '存储指南', + collapsed: true, + link: { + type: 'generated-index', + slug: '/tencent/storage' + }, + items: [ + 'tencent/storage/cbs-pvc-expansion', + 'tencent/storage/readonlymany-pv', + 'tencent/storage/mount-cfs-with-v3', + ], + }, + { + type: 'category', + label: '监控告警', + collapsed: true, + link: { + type: 'generated-index', + slug: '/tencent/monitoring' + }, + items: [ + 'tencent/monitoring/prometheus-scrape-config', + 'tencent/monitoring/grafana-dashboard-for-supernode-pod', + ], + }, + { + type: 'category', + label: '镜像与仓库', + collapsed: true, + link: { + type: 'generated-index', + slug: '/tencent/images' + }, + items: [ + 'tencent/images/use-mirror-in-container', + 'tencent/images/use-foreign-container-image', + ], + }, + { + type: 'category', + label: '故障排查', + collapsed: true, + link: { + type: 'generated-index', + slug: '/tencent/troubleshooting' + }, + items: [ + 'tencent/troubleshooting/public-service-or-ingress-connect-failed', + ], + }, + { + type: 'category', + label: '常见应用安装与部署', + collapsed: true, + link: { + type: 'generated-index', + slug: '/tencent/install-apps' + }, + items: [ + 'tencent/install-apps/install-harbor-on-tke', + 'tencent/install-apps/install-gitlab-on-tke', + 'tencent/install-apps/install-kubesphere-on-tke', + ], + }, + { + type: 'category', + label: '常见问题', + collapsed: true, + link: { + type: 'generated-index', + slug: '/tencent/faq' + }, + items: [ + 'tencent/faq/modify-rp-filter-causing-exception', + 'tencent/faq/clb-loopback', + 'tencent/faq/controller-manager-and-scheduler-unhealthy', + ], + }, + { + type: 'category', + label: '解决方案', + collapsed: true, + link: { + type: 'generated-index', + slug: '/tencent/solution' + }, + items: [ + 'tencent/solution/multi-account', + 'tencent/solution/upgrade-inplace', + ], + }, + { + type: 'category', + label: '附录', + collapsed: true, + link: { + type: 'generated-index', + slug: '/tencent/appendix' + }, + items: [ + 'tencent/appendix/useful-kubectl-for-tencent-cloud', + 'tencent/appendix/eks-annotations', + 'tencent/appendix/ingress-error-code', + ], + }, + ], + }, + { + type: 'category', + label: '附录', + collapsed: true, + link: { + type: 'generated-index', + slug: '/appendix' + }, + items: [ + 'appendix/kubectl-cheat-sheet', + 'appendix/yaml', + { + type: 'category', + label: 'Terrafrom 配置', + collapsed: true, + link: { + type: 'generated-index', + slug: '/appendix/terraform' + }, + items: [ + 'appendix/terraform/tke-vpc-cni', + 'appendix/terraform/tke-serverless', + ] + }, + ] + } + ], +}; + +module.exports = sidebars; diff --git a/content/tencent/appendix/eks-annotations.md b/content/tencent/appendix/eks-annotations.md new file mode 100644 index 0000000..bcfc337 --- /dev/null +++ b/content/tencent/appendix/eks-annotations.md @@ -0,0 +1,7 @@ +# EKS 注解 + +相关内容已合并到官方文档,相关链接: + +* [EKS Annotation 官方说明文档](https://cloud.tencent.com/document/product/457/44173) +* [EKS 全局配置说明](https://cloud.tencent.com/document/product/457/71915) +* [EKS 镜像缓存](https://cloud.tencent.com/document/product/457/65908) diff --git a/content/tencent/appendix/ingress-error-code.md b/content/tencent/appendix/ingress-error-code.md new file mode 100644 index 0000000..d5993e1 --- /dev/null +++ b/content/tencent/appendix/ingress-error-code.md @@ -0,0 +1,552 @@ +# Ingress 错误码 + +## E4000 CreateLoadBalancer RequestLimitExceeded + +接口调用出现短时间内出现超频情况,错误会重试。少量出现对服务没有影响。 + +## E4003 CreateLoadBalancer LimitExceeded + +故障原因: 负载均衡资源数量受限。 + +处理办法: 提交工单申请提高负载均衡的资源数量上限。 + +## E4004 CreateListener LimitExceeded + +故障原因: 负载均衡资源下的监听器数量受限。 + +处理办法: 提交工单申请提高负载均衡下监听器的资源数量上限。 + +## E4005 CreateRule LimitExceeded + +故障原因: 负载均衡资源下的规则数量受限。 + +处理办法: 提交工单申请提高负载均衡下的规则的资源数量上限。 + +## E4006 DeleteListener Redirection config on the listener + +故障原因: 在 Ingress 管理的监听器下面设置了重定向规则,导致监听器删除失败。 + +处理办法: 需要自行处理该重定向规则,Ingress 会在接下来的重试中删除该监听器。 + +## E4007 Norm AssumeTkeCredential -8017 | -8032 Record Not Exist + +故障原因: 绝大部分的情况是修改了 `ip-masq-agent-config`,导致访问 Norm 的请求没有进行 IP 伪装,导致 Norm 的鉴权未通过。 + +**排查步骤** + +1. 检查当前配置: + +```bash +kubectl get configmap -n kube-system ip-masq-agent-config +``` + +```txt +nonMasqueradeCIDRs: // 所有pod出去的流量没有进行IP伪装, Norm针对来源IP鉴权(Node) + - 0.0.0.0/0 + +nonMasqueradeCIDRs: // 正常情况, 这里配置的是集群网络和VPC网络的CIDR + - 10.0.0.0/14 + - 172.16.0.0/16 +``` + +2. 检查`ip-masq-agent` 的重启时间,是不是最近有过更新: + +```bash +$ kubectl get pod -n kube-system -l name=ip-masq-agent +NAME READY STATUS RESTARTS AGE +ip-masq-agent-n4p9k 1/1 Running 0 4h +ip-masq-agent-qj6rk 1/1 Running 0 4h +``` + +处理办法: +* 修改 `ip-masq-agent-config` 中 `的nonMasqueradeCIDRs`,使用一个合理的配置。 +* 确认 Masq 配置正确后,重启 Ingress Controller 组件。 + +## E4008 Norm AssumeTkeCredential -8002 Data is nil + +故障原因: 撤销了对于腾讯云容器服务的授权,导致服务无法运行 + +处理办法: +* 登录访问管理服务,找到角色 `TKE_QCSRole`(没有则创建) +* 创建服务预设角色并授予腾讯云容器服务相关权限 + +## E4009 Ingress: xxx secret name is empty + +故障原因: Ingress模板格式错误。spec.tls.secretName 没有填写或为空 + +处理办法: +* 帮助文档地址: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls +* 检查并修改Ingress模板 + +## E4010 Secret xxx not found + +故障原因: Ingress模板信息错误。spec.tls.secretName 中填写的Secrets资源不存在 + +处理办法: +* 帮助文档地址: https://kubernetes.io/docs/concepts/configuration/secret/ +* 查并修改Ingress模板 + +## E4011 Secret xxx has no qcloud cert id + +故障原因: Ingress模板中引用的Secrets内容缺失。或引用的Secrets需要包含qcloud_cert_id字段信息 + +处理办法: + +* 参考 K8S 官方文档: https://kubernetes.io/docs/concepts/configuration/secret/ +* 检查证书配置: + ```bash + $ kubectl get ingress -n -o yaml + apiVersion: extensions/v1beta1 + kind: Ingress + metadata: + annotations: + qcloud_cert_id: YCOLTUdr <-- 检查这个是不是证书ID + spec: + tls: + - secretName: secret-name <-- 检查配置Secret名称 + ``` +* 检查Secret配置: + ```bash + $ kubectl get secret -n -o yaml + apiVersion: v1 + data: + qcloud_cert_id: WUNPTFRVZHI= <-- 检查这个是不是证书ID的Base64编码 + kind: Secret + metadata: + name: nginx-service-2 + namespace: default + type: Opaque + + $ echo -n "WUNPTFRVZHI=" | base64 -d + YCOLTUdr <-- 证书ID一致 + ``` + +* 如何创建Secret: + ```bash + kubectl create secret generic -n --from-literal=qcloud_cert_id=YCOLTUdr <-- 证书ID + ``` + +## E4012 CreateListener InvalidParameterValue + +故障原因: 大概率是Ingress模板信息错误。spec.tls.secretName中指定的Secrets资源中描述的qcloud_cert_id不存在。 + +排查步骤: 查到错误原因,如果错误原因是Query certificate 'xxxxxxx' failed.,确定是的证书ID填写错误。 + +处理办法: +* 登录 SSL证书 控制台,检查证书的ID是否正确。 +* 随后修改Secrets中的证书ID + +## E4013 Ingress rules invalid. 'spec.rules.http' is empty. + +故障原因: Ingress模板不正确,spec.rules.http没有填写实际内容 + +处理办法: 修正自己的Ingress模板 + +## E4017 负载均衡的标签存在篡改 + +故障原因:修改了负载均衡的标签,导致根据标签定位负载均衡资源失败。 + +处理办法: +* 由于标签或负载均衡资源被删除或篡改,数据可能存在不一致,建议删除负载均衡、或删除负载均衡所有标签,然后重建Ingress资源。 + +## E4018 kubernetes.io/ingress.existLbId 中指定的LB资源不存在 + +故障原因: Ingress模板不正确,Annotation `kubernetes.io/ingress.existLbId` 中指定的LoadBalance不存在 + +排查步骤: 检查日志中给出的LBId, 检查改账号在该地域是否存在此LB资源。 + +处理办法: +* 如果查询后台系统,确认LB资源的确存在。转交工单到CLB,排查为何资源查询失败。 +* 如果查询后台系统,确认LB资源不存在。检查模板中定义的LBId是否正确 + +## E4019 Can not use lb: created by TKE for ingress: xxx + +故障原因: kubernetes.io/ingress.existLbId中指定的LBId已经被Ingress或是Service使用(资源生命周期由TKE集群管理),不能重复使用 + +相关参考: Ingress 的声明周期管理 + +处理办法: +* 更换其他LB +* 删除使用了这个LB资源的Ingress或Service(按以下步骤操作) + * 删除LB资源上的tke-createdBy-flag资源 + * 删除使用了这个LB资源的Ingress或Service。(如果不做第一步,LB资源会被自动销毁) + * 指定新的Ingress使用这个LB. + * 在该LB资源上打上tke-createdBy-flag=yes的标签. (如果不做这一步,该资源的生命周期将不会被Ingress负责,后续该资源不会自动销毁) + +## E4020 Error lb: used by ingress: xxx + +故障原因: `kubernetes.io/ingress.existLbId` 中指定的LBId已经被Ingress使用,不能重复使用 + +相关参考: Ingress 的声明周期管理 + +处理办法: +* 更换其他LB +* 删除使用了这个LB资源的Ingress + * 删除LB资源上的tke-createdBy-flag资源(按以下步骤操作) + * 删除使用了这个LB资源的Ingress或Service。(如果不做第一步,LB资源会被自动销毁) + * 指定新的Ingress使用这个LB. + * 在该LB资源上打上tke-createdBy-flag=yes的标签. (如果不做这一步,后续该资源的生命周期将不会被Ingress负责,该资源不会自动销毁) + +## E4021 exist lb: xxx listener not empty + +故障原因: `kubernetes.io/ingress.existLbId` 中指定的LBId中还有监听器没有删除。 + +详细描述: 使用已有LB时,如果LB上存在监听器,可能造成LB资源的误操作。所以禁用还存在监听器的存量监听器。 + +处理办法: +* 更换其他LB +* 删除该LB下的所有监听器 + +## E4022 Ingress rules invalid. + +故障原因: kubernetes.io/ingress.http-rules 标签的格式解析错误 + +详细描述: kubernetes.io/ingress.http-rules 标签内容应该是一个Json格式的字符串,内容不正确时会报错 + +处理办法: 检查模板中定义的 http-rules 是否正确 + +格式示例: + +```yaml +kubernetes.io/ingress.http-rules: '[{"path":"/abc","backend":{"serviceName":"nginx-service-2","servicePort":"8080"}}]' +``` + +## E4023 create lb error: ResourceInsufficient + +故障原因: kubernetes.io/ingress.https-rules 标签的格式解析错误 + +详细描述: kubernetes.io/ingress.https-rules 标签内容应该是一个Json格式的字符串,内容不正确时会报错 + +处理办法: 检查模板中定义的 https-rules 是否正确 + +格式示例: + +```yaml +kubernetes.io/ingress.https-rules: '[{"path":"/abc","backend":{"serviceName":"nginx-service-2","servicePort":"8080"}}]' +``` + +## E4024 create lb error: InvalidParameter or InvalidParameterValue + +故障原因: 创建Ingress LB时,通过注解配置的参数有错误。 + +详细描述: 注解配置的删除,不合法 + +处理办法: 检查注解参数 + +## E4025 create lb error: ResourceInsufficient + +故障原因: 创建Ingress LB时,资源不足。 + +详细描述: 通常是内网型LB的子网IP数量不足 + +处理办法: 检查子网IP是否耗尽 + +## E4026 Ingress extensive parameters invalid. + +故障原因: 创建Ingress LB时,kubernetes.io/ingress.extensiveParameters 标签的格式解析错误 + +详细描述: 提供的注解内容不是一个合法的JSON字符串 + +处理办法: +* 修改注解内容,给出一个示例参考:`kubernetes.io/ingress.extensiveParameters: '{"AddressIPVersion":"IPv4","ZoneId":"ap-guangzhou-1"}'` +* 参数参考文档:https://cloud.tencent.com/document/product/214/30692 + +## E4027 EnsureCreateLoadBalancer Insufficient Account Balance + +故障原因: 账户欠费 + +处理办法: 充钱就好 + +## E4030 This interface only support HTTP/HTTPS listener + +故障原因: 通过使用已有LB的方式,使用传统型CLB无法创建七层规则 + +处理办法: 需要修改指定的CLB,或删除标签让Ingress主动创建CLB + +## E4031 Ingress rule invalid. Invalid path. + +故障原因: 模板中填写的七层规则,Path的格式不符合规则 + +处理办法: 检查路径是否符合以下格式。 + +* 默认为 `/`,必须以 `/` 开头,长度限制为 1-120。 +* 非正则的 URL 路径,以 `/` 开头,支持的字符集如下:`a-z A-Z 0-9 . - / = ?`。 + +## E4032 LoadBalancer AddressIPVersion Error + +故障原因: 使用了错误的 `AddressIPVersion` 参数 + +详细描述: 目前基于IPv4网络的集群只支持,IPv4和NAT IPv6类型的负载均衡。不支持纯IPv6类型的负载均衡。 + +处理办法: +* 如果是创建负载均衡的情况。修改一下kubernetes.io/ingress.extensiveParameters参数。 +* 如果是使用已有负载均衡的情况。不能选用该负载均衡,需要更换其他负载均衡。 + +## E4033 LoadBalancer AddressIPVersion do not support + +故障原因: 该地域不支持IPv6类型的负载均衡。 + +详细描述: 目前不是所有地域都支持IPv6的负载均衡,有强业务需求的请联系负载均衡提出需求。 + +## E4034 Ingress RuleHostEmpty + +故障原因: Ingress规则中没有配置Host + +详细描述: 目前针对IPv4的负载均衡,不配置Host的情况下会使用IPv4的地址作为Host。当使用纯IPv6负载均衡时,默认Host的逻辑不存在,必须指定域名。 + +处理办法: 修改 Ingress,补充Ingress的Host字段 + +## E4035 LoadBalancer CertificateId Invalid + +故障原因: 证书ID格式不正确。(CertId长度不正确) + +处理办法: +* 参考文档:https://cloud.tencent.com/document/product/457/45738 +* 登录负载均衡控制台,确认证书ID,修改Ingress使用的Secret资源内描述的证书ID。 + +## E4036 LoadBalancer CertificateId NotFound + +故障原因: 证书ID不存在。 +处理办法: +* 参考文档:https://cloud.tencent.com/document/product/457/45738 +* 登录负载均衡控制台,确认证书ID,修改Ingress使用的Secret资源内描述的证书ID。 + +## E4037 Annotation 'ingress.cloud.tencent.com/direct-access' Invalid + +故障原因: ingress.cloud.tencent.com/direct-access的合法值是 true 或 false + +处理办法: 检查配置的 `ingress.cloud.tencent.com/direct-access` 注解内容是否是一个合法的 bool 值。 + +## E4038 Certificate Type Error + +故障原因: 配置的证书类型,需要是服务端证书。不能使用客户端证书配置单向证书。 + +处理办法: +* 登录负载均衡控制台,检查使用的证书类型,确认使用的是服务端证书。 +* 如果确认是客户端证书,需要修改。 +* 如果确认是服务端证书,联系负载均衡排查证书使用故障。 + +## E4038 Certificate Out of Date / E4039 Certificate Out of Date + +故障原因: 配置的证书过期了,检查配置的证书的过期时间。 + +处理办法: +* 参考文档:https://cloud.tencent.com/document/product/457/45738 +* 登录负载均衡控制台,检查使用的证书的过期时间。 +* 更换新的证书,并更新Ingress使用的Secret资源,同步证书。 + +## E4040 Certificate Not Found for SNI + +故障原因: Ingress中描述的域名,存在一个或多个没有包含在TLS的域名证书规则中。 + +处理办法: +* 参考文档:https://cloud.tencent.com/document/product/457/45738 +* 检查是否有域名没有提供对应的证书Secret资源。 + +## E4041 Service Not Found + +故障原因: Ingress中引用的Service不存在 +处理办法: 检查Ingress中声明使用的所有Service资源是否存在,注意在Service和Ingress需要在同一个命名空间下。 + +## E4042 Service Port Not Found + +故障原因: Ingress中引用的Service端口不存在 + +处理办法: 检查Ingress中声明使用的所有Service资源及其使用的端口是否存在。 + +## E4043 TkeServiceConfig Not Found + +故障原因: Ingress通过"ingress.cloud.tencent.com/tke-service-config"注解引用的TkeServiceConfig资源不存在 + +处理办法: +* 参考文档: https://cloud.tencent.com/document/product/457/45700 +* 检查Ingress注解中声明的TkeServiceConfig资源是否存在,注意在同一命名空间中。查询命令:`kubectl get tkeserviceconfigs.cloud.tencent.com -n ` + +## E4044 Mixed Rule Invalid + +故障原因: Ingress的注解"kubernetes.io/ingress.rule-mix"不是一个合法的JSON字符串。 + +处理办法: +* https://cloud.tencent.com/document/product/457/45693 +* 参考文档,编写正确的注解内容。或者通过控制台使用Ingress混合协议功能。 + +## E4045 InternetChargeType Invalid + +故障原因: Ingress的注解"kubernetes.io/ingress.internetChargeType"内容不合法。 + +处理办法: 参考 InternetChargeType 参数的可选值:https://cloud.tencent.com/document/api/214/30694#InternetAccessible + +## E4046 InternetMaxBandwidthOut Invalid + +故障原因: Ingress的注解"kubernetes.io/ingress.internetMaxBandwidthOut"内容不合法。 + +处理办法: 参考 InternetMaxBandwidthOut 参数的可选值:https://cloud.tencent.com/document/api/214/30694#InternetAccessible + +## E4047 Service Type Invalid + +故障原因: 作为Ingress后端引用的Service,类型只能是NodePort或LoadBalancer。 +处理办法: 检查Service类型,建议使用NodePort或LoadBalancer类型的Service作为Ingress后端。 + +## E4048 Default Secret conflict. + +故障原因: Ingress中,TLS声明了多个默认证书,出现冲突 + +处理办法: +* https://cloud.tencent.com/document/product/457/45738 +* 检查TLS配置,最多配置一个默认证书。修改更新配置后会自动同步。 + +## E4049 SNI Secret conflict. + +故障原因: Ingress中,TLS声明了多个证书对应同一个域名,出现冲突 + +处理办法: +* https://cloud.tencent.com/document/product/457/45738 +* 检查TLS配置,最多为单个域名配置一个证书。修改更新配置后会自动同步。 + +## E4050 Annotation 'ingress.cloud.tencent.com/tke-service-config-auto' Invalid + +故障原因: ingress.cloud.tencent.com/tke-service-config-auto的合法值是 true 或 false +处理办法: 检查配置的 `ingress.cloud.tencent.com/tke-service-config-auto` 注解内容是否是一个合法的 bool 值。 + +## E4051 Annotation 'ingress.cloud.tencent.com/tke-service-config' Name Invalid + +故障原因: ingress.cloud.tencent.com/tke-service-config的名称不能以 '-auto-ingress-config' or '-auto-service-config' 为后缀。会和自动同步的配置名称出现冲突。 +处理办法: 修改注解 ’ingress.cloud.tencent.com/tke-service-config’ ,使用其他名称的TkeServiceConfig资源。 + +## E4052 Ingress Host Invalid + +故障原因: 根据K8S的限制,Ingress的Host需要满足正则表达式 "(\*|[a-z0-9]([-a-z0-9]*[a-z0-9])?)(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)+" + +处理办法: 默认情况下域名都是符合以上要求的。排除一下域名没有 “.”,域名包含特殊字符等情况就可以了。 + +## E4053 LoadBalancer Subnet IP Insufficient + +故障原因: 负载均衡所在子网的IP已经用光,无法在配置的子网下创建负载均衡。 + +处理办法: +* 确定选定子网所使用的注解:“kubernetes.io/ingress.subnetId”。 +* 建议改用其他子网,或者在该子网下释放一些IP资源。 + +## E4091 CreateLoadBalancer Invoke vpc failed: subnet not exists + +故障原因: 创建内网型LB时指定的子网不正确。 + +处理办法: 检查Ingress模板中的kubernetes.io/ingress.subnetId字段中描述的子网ID是否正确 + +## E5003 CLB InternalError + +故障原因: CLB内部错误 + +处理办法: 转至CLB排查原因 + +## E5004 CVM InternalError + +故障原因: CVM内部错误 + +处理办法: 将工单立刻转至CVM排查后续原因 + +## E5005 TAG InternalError + +故障原因: 标签服务内部错误 + +处理办法: 将工单立刻转至标签服务排查后续原因 + +## E5007 Norm InternalError + +故障原因: 服务内部错误 + +处理办法: 将工单立刻转至标签服务排查后续原因 + +## E5008 TKE InternalError + +故障原因: 服务内部错误 + +处理办法: 将工单立刻转至标签服务排查后续原因 + +## E5009 CLB BatchTarget Faild + +故障原因: CLB内部错误, 后端批量绑定、解绑出现部分错误 + +处理办法: 将工单立刻转至CLB排查后续原因 + +## E6001 Failed to get zone from env: TKE_REGION / E6002 Failed to get vpcId from env: TKE_VPC_ID + +故障原因: 集群资源 configmap tke-config 配置缺失,导致容器启动失败 +处理办法: + * `kubectl get configmap -n kube-system tke-config` 检查configmap是否存在 + * `kubectl create configmap tke-config -n kube-system --from-literal=TKE_REGION= --from-literal=TKE_VPC_ID=` 创建configmap,region、vpc_id需要根据集群具体信息进行修改 + * `kubectl edit deployment -n kube-system l7-lb-controller -o yaml` 确保模板内的 env 内容正确。 + ```yaml + spec: + containers: + - args: + - --cluster-name= + env: + - name: TKE_REGION + valueFrom: + configMapKeyRef: + key: TKE_REGION + name: tke-config + - name: TKE_VPC_ID + valueFrom: + configMapKeyRef: + key: TKE_VPC_ID + name: tke-config + ``` + +## E6006 Error during sync: Post https://clb.internal.tencentcloudapi.com/: dial tcp: i/o timeout + +故障原因 A: CoreDNS对相关API服务的域名解析出现错误 + +可能涉及到相同问题的域名: + +```txt +lb.api.qcloud.com +tag.api.qcloud.com +cbs.api.qcloud.com +cvm.api.qcloud.com +snapshot.api.qcloud.com +monitor.api.qcloud.com +scaling.api.qcloud.com +ccs.api.qcloud.com +tke.internal.tencentcloudapi.com +clb.internal.tencentcloudapi.com +cvm.internal.tencentcloudapi.com +``` + +处理办法: 对l7-lb-controller追加以下域名解析。 + +```bash +kubectl patch deployment l7-lb-controller -n kube-system --patch '{"spec":{"template":{"spec":{"hostAliases":[{"hostnames":["lb.api.qcloud.com","tag.api.qcloud.com","cbs.api.qcloud.com","cvm.api.qcloud.com","snapshot.api.qcloud.com","monitor.api.qcloud.com","scaling.api.qcloud.com","ccs.api.qcloud.com"],"ip":"169.254.0.28"},{"hostnames":["tke.internal.tencentcloudapi.com","clb.internal.tencentcloudapi.com","cvm.internal.tencentcloudapi.com"],"ip":"169.254.0.95"}]}}}}' +``` + +故障原因 B: 集群网络问题 + +处理办法: 暂无,提工单,并附上日志中的异常栈信息。 + +## E6007 | E6009 Ingress InternalError + +故障原因: Ingress 内部错误 + +处理办法: 将工单立刻转至misakazhou,并附上日志中的异常栈信息。 + +## W1000 Service xxx not found in store + +告警原因: 指定的Service不存在,Ingress规则无法找到对应绑定的后端。 +处理办法: 检查集群Service资源中是否存在 backend.serviceName 所描述的资源 + +## W1001 clean not creatted by TKE loadbalancer: xxx for ingress: + +告警原因: 删除Ingress的时候,Ingress使用的负载均衡没有被删除 + +详细描述: Ingress使用的负载均衡资源没有tke-createdBy-flag=yes的标签,生命周期没有在Ingress的管理之下。需要自行手动删除。 + +处理办法: 需要的话,可以选择手动删除该负载均衡资源 + +## W1002 do not clean listener. + +告警原因: 删除Ingress的时候,Ingress使用的负载均衡下的监听器没有被删除 + +详细描述: Ingress使用的负载均衡资源下的监听器名称不是TKE-DEDICATED-LISTENER,该监听器不是Ingress创建的或是被修改,生命周期没有在Ingress的管理之下。需要自行手动删除。 + +处理办法: 需要的话,可以选择手动删除该负载均衡资源下的监听器 diff --git a/content/tencent/appendix/useful-kubectl-for-tencent-cloud.md b/content/tencent/appendix/useful-kubectl-for-tencent-cloud.md new file mode 100644 index 0000000..62abffa --- /dev/null +++ b/content/tencent/appendix/useful-kubectl-for-tencent-cloud.md @@ -0,0 +1,67 @@ +# 实用 kubectl 脚本 + + 本文分享腾讯云容器服务相关常用实用 kubectl 脚本。 + +## ENI 相关 + +查询节点的 eni-ip Allocatable 情况: + +```bash +kubectl get nodes -o=jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.status.allocatable.tke\.cloud\.tencent\.com\/eni-ip}{"\n"}{end}' +``` + +指定可用区节点的 eni-ip Allocatable 情况: + +```bash +kubectl get nodes -o=jsonpath='{range .items[?(@.metadata.labels.failure-domain\.beta\.kubernetes\.io\/zone=="100003")]}{.metadata.name}{"\t"}{.status.allocatable.tke\.cloud\.tencent\.com\/eni-ip}{"\n"}{end}' +``` + +查看各节点 ENI 的子网网段: + +```bash +kubectl get nec -o json | jq -r '.items[] | select(.status.eniInfos!=null)| { name: .metadata.name, zone: , subnetCIDR: [.status.eniInfos[].subnetCIDR]|join(",") }| "\(.name)\t\(.subnetCIDR)"' +``` + +查可以绑指定子网ENI的节点都是在哪个可用区: + +```bash +# 指定子网 +subnetCIDR="11.185.48.0/20" +# 查询哪些节点可以绑这个子网的 ENI +kubectl get nec -o json | jq -r '.items[] | select(.status.eniInfos!=null)| { name: .metadata.name, subnetCIDR: [.status.eniInfos[].subnetCIDR]|join(",") }| "\(.name)\t\(.subnetCIDR)"' | grep $subnetCIDR | awk '{print $1}' > node-cidr.txt +# 查询所有节点的可用区 +kubectl get nodes -o=jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.failure-domain\.beta\.kubernetes\.io\/zone}{"\n"}{end}' > node-zone.txt +# 筛选出可以绑这个子网的节点都是在哪个可用区 +awk 'BEGIN{while(getline<"node-cidr.txt") a[$1]=1;} {if(a[$1]==1) print $0;}' node-zone.txt + + +# 合并一下就是 +subnetCIDR="11.185.48.0/20" +kubectl get nec -o json | jq -r '.items[] | select(.status.eniInfos!=null)| { name: .metadata.name, subnetCIDR: [.status.eniInfos[].subnetCIDR]|join(",") }| "\(.name)\t\(.subnetCIDR)"' | grep $subnetCIDR | awk '{print $1}' > node-cidr.txt && kubectl get nodes -o=jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.failure-domain\.beta\.kubernetes\.io\/zone}{"\n"}{end}' > node-zone.txt && awk 'BEGIN{while(getline<"node-cidr.txt") a[$1]=1;} {if(a[$1]==1) print $0;}' node-zone.txt +``` + +## EKS 相关 + +查看 eks 集群子网剩余 ip 数量: + +```bash +kubectl get node -o json | jq -r '.items[] | {subnet: .metadata.annotations."eks.tke.cloud.tencent.com/subnet-id", ip: .metadata.labels."eks.tke.cloud.tencent.com/available-ip-count"} | "\(.subnet)\t\(.ip)"' +``` + +查看指定子网剩余 ip 数量 + +```bash +# 直接替换子网 id 查 +kubectl get node -o json | jq -r '.items[] | select(.metadata.annotations."eks.tke.cloud.tencent.com/subnet-id"=="subnet-1p9zhi9g") | {ip: .metadata.labels."eks.tke.cloud.tencent.com/available-ip-count"} | "\(.ip)"' + +# 使用变量查 +subnet="subnet-1p9zhi9g" +kubectl get node -o json | jq -r '.items[] | {subnet: .metadata.annotations."eks.tke.cloud.tencent.com/subnet-id", ip: .metadata.labels."eks.tke.cloud.tencent.com/available-ip-count"} | "\(.subnet)\t\(.ip)"' | grep $subnet | awk '{print $2}' +``` + +查看指定固定 IP 的 Pod 所在子网剩余 IP 数量: + +```bash +pod="wedata-lineage-service-test-env-48872523-0" +kubectl get cm static-addresses -o json | jq -r ".data.\"${pod}\"" | xargs kubectl get node -o json | jq -r '{ip: .metadata.labels."eks.tke.cloud.tencent.com/available-ip-count"} | "\(.ip)"' +``` \ No newline at end of file diff --git a/content/tencent/cluster/eks-note.md b/content/tencent/cluster/eks-note.md new file mode 100644 index 0000000..e350341 --- /dev/null +++ b/content/tencent/cluster/eks-note.md @@ -0,0 +1,60 @@ +# Serverless 弹性集群注意事项 + +## 访问公网 + +与 TKE 集群不同的是,EKS 没有节点,无法像 TKE 那样,Pod 可以利用节点自身的公网带宽访问公网。 + +EKS 没有节点,要让 Pod 访问公网有两种方式: + +1. [通过 NAT 网关访问外网](https://cloud.tencent.com/document/product/457/48710) +2. [通过弹性公网 IP 访问外网](https://cloud.tencent.com/document/product/457/60354) + +大多情况下可以考虑方式一,创建 NAT 网关,在 VPC 路由表里配置路由,如果希望整个 VPC 都默认走这个 NAT 网关出公网,可以修改 default 路由表: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220722111352.png) + +如果只想让超级节点的 Pod 走这个 NAT 网关,可以新建路由表。 + +配置方法是在路由表新建一条路由策略,`0.0.0.0/0` 网段的下一条类型为 `NAT 网关`,且选择前面创建的 NAT 网关实例: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220722111650.png) + +创建好后,如果不是 default 路由表,需要关联一下超级节点的子网: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220722111842.png) + +## 9100 端口 + +EKS 默认会在每个 Pod 的 9100 端口进行监听,暴露 Pod 相关监控指标,如果业务本身也监听 9100,会失败,参考 [9100 端口问题](https://imroc.cc/kubernetes/tencent/appendix/eks-annotations.html#9100-%E7%AB%AF%E5%8F%A3%E9%97%AE%E9%A2%98)。 + +## 注意配额限制 + +使用 EKS 集群时注意一下配额限制,如果不够,可以提工单调高上限: +1. 单集群 Pod 数量上限 (默认200)。 +2. 安全组绑定实例数量上限 (如果不给 Pod 指定安全组,会使用当前项目当前地域的默认安全组,每个安全组绑定实例数量上限为 2000)。 + +## ipvs 超时时间问题 + +### istio 场景 dns 超时 + +istio 的 sidecar (istio-proxy) 拦截流量借助了 conntrack 来实现连接跟踪,当部分没有拦截的流量 (比如 UDP) 通过 service 访问时,会经过 ipvs 转发,而 ipvs 和 conntrack 对连接都有一个超时时间设置,如果在 ipvs 和 conntrack 中的超时时间不一致,就可能出现 conntrack 中连接还在,但在 ipvs 中已被清理而导致出去的包被 ipvs 调度到新的 rs,而 rs 回包的时候匹配不到 conntrack,不会做反向 SNAT,从而导致进程收不到回包。 + +在 EKS 中,ipvs 超时时间当前默认是 5s,而 conntrack 超时时间默认是 120s,如果在 EKS 中使用 TCM 或自行安装 istio,当 coredns 扩容后一段时间,业务解析域名时就可能出现 DNS 超时。 + +在产品化解决之前,我们可以给 Pod 加如下注解,将 ipvs 超时时间也设成 120s,与 conntrack 超时时间对齐: + +```yaml +eks.tke.cloud.tencent.com/ipvs-udp-timeout: "120s" +``` + +### gRPC 场景 Connection reset by peer + +gRPC 是长连接,Java 版的 gRPC 默认 idle timeout 是 30 分钟,并且没配置 TCP 连接的 keepalive 心跳,而 ipvs 默认的 tcp timeout 是 15 分钟。 + +这就会导致一个问题: 业务闲置 15 分钟后,ipvs 断开连接,但是上层应用还认为连接在,还会复用连接发包,而 ipvs 中对应连接已不存在,会直接响应 RST 来将连接断掉,从业务日志来看就是 `Connection reset by peer`。 + +这种情况,如果不想改代码来启用 keepalive,可以直接调整下 eks 的 ipvs 的 tcp timeout 时间,与业务 idle timeout 时长保持一致: + +```yaml +eks.tke.cloud.tencent.com/ipvs-tcp-timeout: "1800s" +``` \ No newline at end of file diff --git a/content/tencent/faq/clb-loopback.md b/content/tencent/faq/clb-loopback.md new file mode 100644 index 0000000..b86e37b --- /dev/null +++ b/content/tencent/faq/clb-loopback.md @@ -0,0 +1,107 @@ +# CLB 回环问题 + +## 问题描述 + +使用 TKE 一些用户,可能会遇到因 CLB 回环问题导致服务访问不通或访问 Ingress 几秒延时的现象,本文就此问题介绍下相关背景、原因以及一些思考与建议。 + +## 有哪些现象? + +CLB 回环可能导致的问题现象有: + +1. 不管是 iptables 还是 ipvs 模式,访问本集群内网 Ingress 出现 4 秒延时或不通。 +1. ipvs 模式下,集群内访问本集群 LoadBanacer 类型的内网 Service 出现完全不通,或者时通时不通。 + +## 为什么会回环? + +根本原因在于 CLB 将请求转发到 rs 时,报文的源目的 IP 都在同一节点内,导致数据包在子机内部回环出不去: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925161745.png) + +下面我们针对具体场景来分析下。 + +### 分析 Ingress 回环 + +我们先来分析下 Ingress。使用 TKE 默认自带的 Ingress,会为每个 Ingress 资源创建一个 CLB 以及 80,443 的 7 层监听器规则(HTTP/HTTPS),并为 Ingress 每个 location 绑定对应 TKE 各个节点某个相同的 NodePort 作为 rs (每个 location 对应一个 Service,每个 Service 都通过各个节点的某个相同 NodePort 暴露流量),CLB 根据请求匹配 location 转发到相应的 rs (即 NodePort),流量到了 NodePort 后会再经过 K8S 的 iptables 或 ipvs 转发给对应的后端 Pod。集群中的 Pod 访问本集群的内网 Ingress,CLB 将请求转发给其中一台节点的对应 NodePort: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925161806.png) + +如图,当被转发的这台节点恰好也是发请求的 client 所在节点时: + +1. 集群中的 Pod 访问 CLB,然后 CLB 将请求转发到任意一台节点的对应 NodePort。 +1. 报文到 NodePort 时,目的 IP 是节点 IP,源 IP 是 client pod 的真实 IP, 因为 CLB 不做 SNAT,会将真实源 IP 透传过去。 +1. 由于源 IP 与目的 IP 都在这台机器内,所以就导致了回环,CLB 将收不到来自 rs 的响应。 + + +那为什么访问集群内 Ingress 的故障现象大多是几秒延时呢?因为 7 层 CLB 如果请求 rs 后端超时(大概 4s),会重试下一个 rs,所以如果 client 这侧设置的超时时间较长,出现回环问题的现象就是请求响应慢,有几秒的延时。当然如果集群只有一个节点,CLB 也没得可以重试的 rs,现象就是访问不通了。 + +### 分析 LoadBalancer Service 回环 + +上面分析了 7 层 CLB 的情况,下面来分析下 4 层 CLB。当使用 LoadBalancer 类型的内网 Service 时暴露服务时,会创建内网 CLB 并创建对应的 4 层监听器(TCP/UDP)。当集群内 Pod 访问 LoadBalancer 类型 Service 的 `EXTERNAL-IP` 时(即 CLB IP),原生 K8S 实际上不会去真正访问 LB,而是直接通过 iptables 或 ipvs 转发到后端 Pod (不经过 CLB): + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925161817.png) + +所以原生 K8S 的逻辑是不会有这个问题的。但在 TKE 的 ipvs 模式下,client 访问 CLB IP 的包会真正到 CLB,所以如果在 ipvs 模式下 Pod 访问本集群 LoadBalancer 类型 Service 的 CLB IP 会遇到回环问题,情况跟前面内网 Ingress 回环类似: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925161827.png) + +有一点不同的是,四层 CLB 不会重试下一个 rs,当遇到回环时,现象通常是时通时不通;当然如果集群只有一个节点,也就完全不通。 + +那为什么 TKE 的 ipvs 模式不是用原生 K8S 那样的转发逻辑呢(不经过 LB,直接转发到后端 pod)?这个要从我在 19 年 7 月份发现,到目前为止社区都还没解决的问题说起: https://github.com/kubernetes/kubernetes/issues/79783 + +这里大概介绍下背景,以前 TKE 的 ipvs 模式集群使用 LoadBalancer 内网 Service 暴露服务,内网 CLB 对后端 NodePort 的健康探测会全部失败,原因是: + +1. ipvs 主要工作在 INPUT 链,需要将要转发的 VIP (Service 的 Cluster IP 和 `EXTERNAL-IP` )当成本机 IP,才好让报文进入 INPUT 链交给 ipvs 处理。 +2. kube-proxy 的做法是将 Cluster IP 和 `EXTERNAL-IP` 都绑到一个叫 `kube-ipvs0` 的 dummy 网卡,这个网卡仅仅用来绑 VIP (内核自动为其生成 local 路由),不用于接收流量。 +3. 内网 CLB 对 NodePort 的探测报文源 IP 是 CLB 自身的 VIP,目的 IP 是 Node IP。当探测报文到达节点时,节点发现源 IP 是本机 IP (因为它被绑到了 `kube-ipvs0`),就将其丢掉。所以 CLB 的探测报文永远无法收到响应,也就全部探测失败,虽然 CLB 有全死全活逻辑 (全部探测失败视为全部可以被转发),但也相当于探测就没起到任何作用,在某些情况下会造成一些异常。 + +为了解决这个问题,TKE 的修复策略是:ipvs 模式不绑 `EXTERNAL-IP` 到 `kube-ipvs0` 。也就是说,集群内 Pod 访问 CLB IP 的报文不会进入 INPUT 链,而是直接出节点网卡,真正到达 CLB,这样健康探测的报文进入节点时就不会被当成本机 IP 而丢弃,同时探测响应报文也不会进入 INPUT 链导致出不去。 + +虽然这种方法修复了 CLB 健康探测失败的问题,但也导致集群内 Pod 访问 CLB 的包真正到了 CLB,由于访问集群内的服务,报文又会被转发回其中一台节点,也就存在了回环的可能性。 + +## 为什么公网 CLB 没这个问题? + +使用公网 Ingress 和 LoadBalancer 类型公网 Service 没有回环问题,我的理解主要是公网 CLB 收到的报文源 IP 是子机的出口公网 IP,而子机内部感知不到自己的公网 IP,当报文转发回子机时,不认为公网源 IP 是本机 IP,也就不存在回环。 + +## CLB 是否有避免回环机制? + +有。CLB 会判断源 IP,如果发现后端 rs 也有相同 IP,就不考虑转发给这个 rs,而选择其它 rs。但是,源 Pod IP 跟后端 rs IP 并不相同,CLB 也不知道这两个 IP 是在同一节点,所以还是可能会转发过去,也就可能发生回环。 + +## client 与 server 反亲和部署能否规避? + +如果我将 client 跟 server 通过反亲和性部署,避免 client 跟 server 部署在同一节点,能否规避这个问题?默认情况下, LB 通过节点 NodePort 绑定 rs,可能转发给任意节点 NodePort,此时不管 client 与 server 是否在同一节点都可能发生回环。但如果给 Service 设置 `externalTrafficPolicy: Local`, LB 就只会转发到有 server pod 的节点,如果 client 与 server 通过反亲和调度在不同节点,此时是不会发生回环的,所以反亲和 + `externalTrafficPolicy: Local` 可以规避此问题(包括内网 Ingress 和 LoadBalancer 类型内网 Service),就是有点麻烦。 + +## VPC-CNI 的 LB 直通 Pod 是否也存在这个问题? + +TKE 通常用的 Global Router 网络模式(网桥方案),还有一种是 VPC-CNI (弹性网卡方案)。目前 LB 直通 Pod 只支持 VPC-CNI 的 Pod,即 LB 不绑 NodePort 作为 rs,而是直接绑定后端 Pod 作为 rs: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925161841.png) + +这样就绕过了 NodePort,不会像之前一样可能会转发给任意节点。但如果 client 与 server 在同一节点,也一样还是可能会发生回环,通过反亲和可以规避。 + +## 有什么建议? + +反亲和 与 `externalTrafficPolicy: Local` 的规避方式不太优雅。一般来讲,访问集群内的服务避免访问本集群的 CLB,因为服务本身在集群内部,从 CLB 绕一圈不仅会增加网络链路的长度,还会引发回环问题。 + +访问集群内服务尽量用 Service 名称,比如:`server.prod.svc.cluster.local` ,这样就不会经过 CLB,没有回环问题。 + +如果业务有耦合域名,不能使用 Service 名称,可以使用 coredns 的 rewrite 插件,将域名指向集群内的 Service,coredns 配置示例: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: kube-system +data: + Corefile: |2- + .:53 { + rewrite name roc.oa.com server.prod.svc.cluster.local + ... + +``` +如果多个 Service 共用一个域名,可以自行部署 Ingress Controller (如 nginx-ingress),用上面 rewrite 的方法将域名指向自建的 Ingress Controller,然后自建的 Ingress 根据请求 location (域名+路径) 匹配 Service,再转发给后端 Pod,整段链路也是不经过 CLB,也能规避回环问题。 + +## 总结 + +本文对 TKE 的 CLB 回环问题进行了详细的梳理,介绍了其前因后果以及一些规避的建议。 + diff --git a/content/tencent/faq/controller-manager-and-scheduler-unhealthy.md b/content/tencent/faq/controller-manager-and-scheduler-unhealthy.md new file mode 100644 index 0000000..fbd66f9 --- /dev/null +++ b/content/tencent/faq/controller-manager-and-scheduler-unhealthy.md @@ -0,0 +1,37 @@ +# controller-manager 和 scheduler 状态显示 Unhealthy + +## 背景 + +有些地方显示 TKE 集群的 controller-manager 和 scheduler 组件 Unhealthy,比如使用 `kubectl get cs` 查看: + +```bash +$ kubectl get cs +NAME STATUS MESSAGE ERROR +scheduler Unhealthy Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused +controller-manager Unhealthy Get "http://127.0.0.1:10252/healthz": dial tcp 127.0.0.1:10252: connect: connection refused +etcd-0 Healthy {"health":"true"} +``` + +或者使用 rancher 查看: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925161905.png) + +## 原因 + +是因为 TKE 托管集群的 master 各个组件都是单独部署的,apiserver 与 controller-manager 和 scheduler 都不在同一台机器,而 controller-manager 和 scheduler 的状态,是 apiserver 来探测的,探测的代码是写死的直接连本机: + +```go +func (s componentStatusStorage) serversToValidate() map[string]*componentstatus.Server { + serversToValidate := map[string]*componentstatus.Server{ + "controller-manager": {Addr: "127.0.0.1", Port: ports.InsecureKubeControllerManagerPort, Path: "/healthz"}, + "scheduler": {Addr: "127.0.0.1", Port: ports.InsecureSchedulerPort, Path: "/healthz"}, + } +``` + +这个只是显示问题,不影响使用。 + +## 相关链接 + +* 探测直连本机源码: https://github.com/kubernetes/kubernetes/blob/v1.14.3/pkg/registry/core/rest/storage_core.go#L256 +* k8s issue: https://github.com/kubernetes/kubernetes/issues/19570 +* rancher issue: https://github.com/rancher/rancher/issues/11496 diff --git a/content/tencent/faq/modify-rp-filter-causing-exception.md b/content/tencent/faq/modify-rp-filter-causing-exception.md new file mode 100644 index 0000000..8bd367a --- /dev/null +++ b/content/tencent/faq/modify-rp-filter-causing-exception.md @@ -0,0 +1,26 @@ +# 修改 rp_filter 导致网络异常 + +## 背景 + +如果在 TKE 使用了 VPC-CNI 网络模式,会关闭节点的 rp_filter: + +```bash +net.ipv4.conf.all.rp_filter=0 +net.ipv4.conf.eth0.rp_filter=0 +``` + +如果因为某种原因,将 rp_filter 打开了(参数置为1),会导致各种异常现象,排查下来就是网络不通,不通的原因就是 rp_filter 被打开了。 + +## 什么情况下可能被打开? + +通常有两种原因 给节点加了自定义初始化的脚本,修改了默认的内核参数,将 rp_filter 打开了。 +2. 使用了[自定义镜像](https://cloud.tencent.com/document/product/457/39563) ,在自定义镜像中自定义了内核参数,打开了 rp_filter。 + +## 为什么打开 rp_filter 会不通? + +rp_filter 是控制内核是否开启校验数据包源地址的开关,如果被打开,当数据包发送和接收时的走的路径不太一样时,就会丢弃报文,主要是为了防止 DDoS 或 IP 欺骗。而 TKE VPC-CNI 网络的实现机制,当 Pod 与 VPC 网段之外的 IP 直接通信时,数据包发送走的单独的弹性网卡,接收会走主网卡(eth0),如果开启了 rp_filter,这时就会导致网络不通。 + +总结几种常见的场景: +1. Pod 访问公网 (公网目的 IP 在 VPC 网段之外) +2. 使用了公网 [启用 CLB 直通 Pod](../networking/clb-to-pod-directly.md) (公网源 IP 在 VPC 网段之外) +3. Pod 访问 apiserver (169 的 IP 在 VPC 网段之外) diff --git a/content/tencent/images/use-foreign-container-image.md b/content/tencent/images/use-foreign-container-image.md new file mode 100644 index 0000000..2502407 --- /dev/null +++ b/content/tencent/images/use-foreign-container-image.md @@ -0,0 +1,55 @@ +# 使用海外容器镜像 + +## 背景 + +在 TKE 上部署开源应用时,经常会遇到依赖的镜像拉不下来或非常慢的问题,比如 gcr, quay.io 等境外公开镜像仓库。实际 TKE 已经提供了海外镜像加速的能力,本文介绍如何使用此项能力来部署开源应用。 + +## 镜像地址映射 + +以下是支持的镜像仓库及其映射地址: + +| 海外镜像仓库地址 | 腾讯云映射地址 | +|:----|:----| +| quay.io | quay.tencentcloudcr.com | +| nvcr.io | nvcr.tencentcloudcr.com | + +## 修改镜像地址 + +在部署应用时,修改下镜像地址,将镜像仓库域名替换为腾讯云上的映射地址 (见上方表格),比如将 `quay.io/prometheus/node-exporter:v0.18.1` 改为 `quay.tencentcloudcr.com/prometheus/node-exporter:v0.18.1`,这样拉取镜像时就会走到加速地址。 + +## 不想修改镜像地址 ? + +如果镜像太多,嫌修改镜像地址太麻烦 (比如使用 helm 部署,用到了很多镜像),可以利用 containerd 的 mirror 配置来实现无需修改镜像地址 (前提是容器运行时使用的 containerd )。 + +> docker 仅支持 docker hub 的 mirror 配置,所以如果容器运行时是 docker 就必须修改镜像地址。 + +具体方法是修改 containerd 配置 (`/etc/containerd/config.toml`),将腾讯云映射地址配到 mirrors 里: + +```toml + [plugins.cri.registry] + [plugins.cri.registry.mirrors] + [plugins.cri.registry.mirrors."quay.io"] + endpoint = ["https://quay.tencentcloudcr.com"] + [plugins.cri.registry.mirrors."nvcr.io"] + endpoint = ["https://nvcr.tencentcloudcr.com"] + [plugins.cri.registry.mirrors."docker.io"] + endpoint = ["https://mirror.ccs.tencentyun.com"] +``` + +不过每个节点都去手动修改过于麻烦,我们可以在添加节点或创建节点池时指定下自定义数据 (即初始化节点时会运行的自定义脚本) 来自动修改 containerd 配置: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925161649.png) + +将下面的脚本粘贴进去: + +```bash +sed -i '/\[plugins\.cri\.registry\.mirrors\]/ a\\ \ \ \ \ \ \ \ [plugins.cri.registry.mirrors."quay.io"]\n\ \ \ \ \ \ \ \ \ \ endpoint = ["https://quay.tencentcloudcr.com"]' /etc/containerd/config.toml +sed -i '/\[plugins\.cri\.registry\.mirrors\]/ a\\ \ \ \ \ \ \ \ [plugins.cri.registry.mirrors."nvcr.io"]\n\ \ \ \ \ \ \ \ \ \ endpoint = ["https://nvcr.tencentcloudcr.com"]' /etc/containerd/config.toml +systemctl restart containerd +``` + +> 推荐使用节点池,扩容节点时都会自动运行脚本,就不需要每次加节点都去配下自定义数据了。 + +## 参考资料 + +* [TKE 官方文档: 境外镜像拉取加速](https://cloud.tencent.com/document/product/457/51237) diff --git a/content/tencent/images/use-mirror-in-container.md b/content/tencent/images/use-mirror-in-container.md new file mode 100644 index 0000000..b551394 --- /dev/null +++ b/content/tencent/images/use-mirror-in-container.md @@ -0,0 +1,285 @@ +# 使用软件源加速软件包安装 + +## 概述 + +在 TKE 环境中,在容器运行中或构建镜像时,如果需要安装一些软件包,通常会使用基础镜像内自带的包管理工具进行安装,而基础镜像内默认的软件源在国内使用往往会比较慢,造成安装过程非常慢。而腾讯云实际本身提供了各个 linux 发行版的软件源,我们将容器内的软件源替换为腾讯云的软件源即可实现加速。 + +## 确定 linux 发行版版本 + +一般容器镜像都是基于某个基础镜像构建而来,通常查看 Dockerfile 就可以直到基础镜像用的哪个 linux 发行版。 + +也可以直接进入运行中的容器,执行 `cat /etc/os-release` 来检查基础镜像的 linux 发行版版本。 + +## Ubuntu + +先根据 Ubuntu 发新版替换软件源,然后执行 `apt update -y` 更新软件源,最后再使用 `apt install -y xxx` 来安装需要的软件包。 + +**下面是各发行版的软件源替换方法** + +### Ubuntu 20 + +```bash +cat > /etc/apt/sources.list <<'EOF' +deb http://mirrors.tencentyun.com/ubuntu/ focal main restricted universe multiverse +deb http://mirrors.tencentyun.com/ubuntu/ focal-security main restricted universe multiverse +deb http://mirrors.tencentyun.com/ubuntu/ focal-updates main restricted universe multiverse +#deb http://mirrors.tencentyun.com/ubuntu/ focal-proposed main restricted universe multiverse +#deb http://mirrors.tencentyun.com/ubuntu/ focal-backports main restricted universe multiverse +deb-src http://mirrors.tencentyun.com/ubuntu/ focal main restricted universe multiverse +deb-src http://mirrors.tencentyun.com/ubuntu/ focal-security main restricted universe multiverse +deb-src http://mirrors.tencentyun.com/ubuntu/ focal-updates main restricted universe multiverse +#deb-src http://mirrors.tencentyun.com/ubuntu/ focal-proposed main restricted universe multiverse +#deb-src http://mirrors.tencentyun.com/ubuntu/ focal-backports main restricted universe multiverse +EOF +``` + +### Ubuntu 18 + +```bash +cat > /etc/apt/sources.list <<'EOF' +deb http://mirrors.tencentyun.com/ubuntu/ bionic main restricted universe multiverse +deb http://mirrors.tencentyun.com/ubuntu/ bionic-security main restricted universe multiverse +deb http://mirrors.tencentyun.com/ubuntu/ bionic-updates main restricted universe multiverse +#deb http://mirrors.tencentyun.com/ubuntu/ bionic-proposed main restricted universe multiverse +#deb http://mirrors.tencentyun.com/ubuntu/ bionic-backports main restricted universe multiverse +deb-src http://mirrors.tencentyun.com/ubuntu/ bionic main restricted universe multiverse +deb-src http://mirrors.tencentyun.com/ubuntu/ bionic-security main restricted universe multiverse +deb-src http://mirrors.tencentyun.com/ubuntu/ bionic-updates main restricted universe multiverse +#deb-src http://mirrors.tencentyun.com/ubuntu/ bionic-proposed main restricted universe multiverse +#deb-src http://mirrors.tencentyun.com/ubuntu/ bionic-backports main restricted universe multiverse +EOF +``` + +### Ubuntu 16 + +```bash +cat > /etc/apt/sources.list <<'EOF' +deb http://mirrors.tencentyun.com/ubuntu/ xenial main restricted universe multiverse +deb http://mirrors.tencentyun.com/ubuntu/ xenial-security main restricted universe multiverse +deb http://mirrors.tencentyun.com/ubuntu/ xenial-updates main restricted universe multiverse +deb-src http://mirrors.tencentyun.com/ubuntu/ xenial main restricted universe multiverse +deb-src http://mirrors.tencentyun.com/ubuntu/ xenial-security main restricted universe multiverse +deb-src http://mirrors.tencentyun.com/ubuntu/ xenial-updates main restricted universe multiverse +EOF +``` + +## Debian + +先根据 Debian 发新版替换软件源,然后执行 `apt update -y` 更新软件源,最后再使用 `apt install -y xxx` 安装需要的软件包。 + +**下面是各发行版的软件源替换方法** + +### Debian 10 +```bash +cat > /etc/apt/sources.list <<'EOF' +deb http://mirrors.tencentyun.com/debian buster main contrib non-free +# deb-src http://mirrors.tencentyun.com/debian buster main contrib non-free +deb http://mirrors.tencentyun.com/debian buster-updates main contrib non-free +# deb-src http://mirrors.tencentyun.com/debian buster-updates main contrib non-free +deb http://mirrors.tencentyun.com/debian-security buster/updates main contrib non-free + +# deb-src http://mirrors.tencentyun.com/debian-security buster/updates main contrib non-free +# deb http://mirrors.tencentyun.com/debian buster-backports main contrib non-free +# deb-src http://mirrors.tencentyun.com/debian buster-backports main contrib non-free +# deb http://mirrors.tencentyun.com/debian buster-proposed-updates main contrib non-free +# deb-src http://mirrors.tencentyun.com/debian buster-proposed-updates main contrib non-free +EOF +``` + +### Debian 9 + +```bash +cat > /etc/apt/sources.list <<'EOF' +deb http://mirrors.tencentyun.com/debian stretch main contrib non-free +deb http://mirrors.tencentyun.com/debian stretch-updates main contrib non-free +deb http://mirrors.tencentyun.com/debian-security stretch/updates main +#deb http://mirrors.tencentyun.com/debian stretch-backports main contrib non-free +#deb http://mirrors.tencentyun.com/debian stretch-proposed-updates main contrib non-free + +deb-src http://mirrors.tencentyun.com/debian stretch main contrib non-free +deb-src http://mirrors.tencentyun.com/debian stretch-updates main contrib non-free +deb-src http://mirrors.tencentyun.com/debian-security stretch/updates main +#deb-src http://mirrors.tencentyun.com/debian stretch-backports main contrib non-free +#deb-src http://mirrors.tencentyun.com/debian stretch-proposed-updates main contrib non-free +EOF +``` + +## CentOS + +先删除 CentOS 镜像中所有自带软件源: +```bash +rm -f /etc/yum.repos.d/* +``` + +再根据 CentOS 发新版替换软件源,然后执行下面命令更新缓存: + +```bash +yum clean all +yum makecache +``` + +最后再使用 `yum install -y xxx` 来安装需要的软件包。 + +**下面是各发行版的软件源替换方法** + +### CentOS 8 + +```bash +cat > /etc/yum.repos.d/CentOS-Base.repo <<'EOF' +# Qcloud-Base.repo + +[BaseOS] +name=Qcloud-$releasever - BaseOS +baseurl=http://mirrors.tencentyun.com/centos/$releasever/BaseOS/$basearch/os/ +gpgcheck=1 +enabled=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-Qcloud-8 +EOF + +cat > /etc/yum.repos.d/CentOS-Epel.repo <<'EOF' +[epel] +name=EPEL for redhat/centos $releasever - $basearch +baseurl=http://mirrors.tencentyun.com/epel/$releasever/Everything/$basearch +failovermethod=priority +enabled=1 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-8 +EOF + +cat > /etc/yum.repos.d/CentOS-centosplus.repo <<'EOF' +# Qcloud-centosplus.repo + +[centosplus] +name=Qcloud-$releasever - Plus +baseurl=http://mirrors.tencentyun.com/centos/$releasever/centosplus/$basearch/os/ +gpgcheck=1 +enabled=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-Qcloud-8 +EOF + +cat > /etc/yum.repos.d/Qcloud-Extras.repo <<'EOF' +# Qcloud-Extras.repo + +[extras] +name=Qcloud-$releasever - Extras +baseurl=http://mirrors.tencentyun.com/centos/$releasever/extras/$basearch/os/ +gpgcheck=1 +enabled=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-Qcloud-8 +EOF + +cat > /etc/yum.repos.d/Qcloud-Devel.repo <<'EOF' +# Qcloud-Devel.repo + +[Devel] +name=Qcloud-$releasever - Devel WARNING! FOR BUILDROOT USE ONLY! +baseurl=http://mirrors.tencentyun.com/$contentdir/$releasever/Devel/$basearch/os/ +gpgcheck=1 +enabled=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-Qcloud-8 +EOF + +cat > /etc/yum.repos.d/Qcloud-AppStream.repo <<'EOF' +# Qcloud-AppStream.repo + +[AppStream] +name=Qcloud-$releasever - AppStream +baseurl=http://mirrors.tencentyun.com/centos/$releasever/AppStream/$basearch/os/ +gpgcheck=1 +enabled=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-Qcloud-8 +EOF + +cat > /etc/yum.repos.d/Qcloud-PowerTools.repo <<'EOF' +# Qcloud-PowerTools.repo + +[PowerTools] +name=Qcloud-$releasever - PowerTools +baseurl=http://mirrors.tencentyun.com/centos/$releasever/PowerTools/$basearch/os/ +gpgcheck=1 +enabled=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-Qcloud-8 +EOF + +cat > /etc/yum.repos.d/Qcloud-HA.repo <<'EOF' +# Qcloud-HA.repo + +[HighAvailability] +name=Qcloud-$releasever - HA +baseurl=http://mirrors.tencentyun.com/$contentdir/$releasever/HighAvailability/$basearch/os/ +gpgcheck=1 +enabled=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-Qcloud-8 +EOF +``` + + +### CenOS 7 + +```bash +cat > /etc/yum.repos.d/CentOS-Base.repo <<'EOF' +[extras] +gpgcheck=1 +gpgkey=http://mirrors.tencentyun.com/centos/RPM-GPG-KEY-CentOS-7 +enabled=1 +baseurl=http://mirrors.tencentyun.com/centos/$releasever/extras/$basearch/ +name=Qcloud centos extras - $basearch +[os] +gpgcheck=1 +gpgkey=http://mirrors.tencentyun.com/centos/RPM-GPG-KEY-CentOS-7 +enabled=1 +baseurl=http://mirrors.tencentyun.com/centos/$releasever/os/$basearch/ +name=Qcloud centos os - $basearch +[updates] +gpgcheck=1 +gpgkey=http://mirrors.tencentyun.com/centos/RPM-GPG-KEY-CentOS-7 +enabled=1 +baseurl=http://mirrors.tencentyun.com/centos/$releasever/updates/$basearch/ +name=Qcloud centos updates - $basearch +EOF + +cat > /etc/yum.repos.d/CentOS-Epel.repo <<'EOF' +[epel] +name=EPEL for redhat/centos $releasever - $basearch +failovermethod=priority +gpgcheck=1 +gpgkey=http://mirrors.tencentyun.com/epel/RPM-GPG-KEY-EPEL-7 +enabled=1 +baseurl=http://mirrors.tencentyun.com/epel/$releasever/$basearch/ +EOF +``` + +### CentOS 6 + +```bash +cat > /etc/yum.repos.d/CentOS-Base.repo <<'EOF' +[extras] +gpgcheck=1 +gpgkey=http://mirrors.tencentyun.com/centos/RPM-GPG-KEY-CentOS-6 +enabled=1 +baseurl=http://mirrors.tencentyun.com/centos/$releasever/extras/$basearch/ +name=Qcloud centos extras - $basearch +[os] +gpgcheck=1 +gpgkey=http://mirrors.tencentyun.com/centos/RPM-GPG-KEY-CentOS-6 +enabled=1 +baseurl=http://mirrors.tencentyun.com/centos/$releasever/os/$basearch/ +name=Qcloud centos os - $basearch +[updates] +gpgcheck=1 +gpgkey=http://mirrors.tencentyun.com/centos/RPM-GPG-KEY-CentOS-6 +enabled=1 +baseurl=http://mirrors.tencentyun.com/centos/$releasever/updates/$basearch/ +name=Qcloud centos updates - $basearch +EOF + +cat > /etc/yum.repos.d/CentOS-Epel.repo <<'EOF' +[epel] +name=epel for redhat/centos $releasever - $basearch +failovermethod=priority +gpgcheck=1 +gpgkey=http://mirrors.tencentyun.com/epel/RPM-GPG-KEY-EPEL-6 +enabled=1 +baseurl=http://mirrors.tencentyun.com/epel/$releasever/$basearch/ +EOF +``` \ No newline at end of file diff --git a/content/tencent/install-apps/install-gitlab-on-tke.md b/content/tencent/install-apps/install-gitlab-on-tke.md new file mode 100644 index 0000000..a809e9c --- /dev/null +++ b/content/tencent/install-apps/install-gitlab-on-tke.md @@ -0,0 +1,453 @@ +# 自建 Gitlab 代码仓库 + +本文介绍如何在腾讯云容器服务上部署 Gitlab 代码仓库。 + +## 前提条件 + +* 已安装 [Helm](https://helm.sh)。 +* 已开启集群访问并配置好 kubeconfig,可以通过 kubectl 操作集群(参考[官方文档:连接集群](https://cloud.tencent.com/document/product/457/32191))。 + +## 准备 chart + +Gitlab 官方提供了 helm chart,可以下载下来: + +```bash +helm repo add gitlab https://charts.gitlab.io/ +helm fetch gitlab/gitlab --untar +helm fetch gitlab/gitlab-runner --untar +``` + +> 参考 [Gitlab 官方文档: Deployment Guide](https://docs.gitlab.com/charts/installation/deployment.html) + +不过要愉快的部署到腾讯云容器服务,要修改的配置项较多: +* 如果存储使用默认的云硬盘(cbs),容量必须是 10Gi 的倍数,官方 chart 有一些 8Gi 的定义,会导致 pvc 一直 pending,pod 也一致 pending,需要修改一下配置。 +* gitlab 相关组件的容器镜像地址使用的是 gitlab 官方的镜像仓库,在国内拉取可能会失败,需要同步到国内并修改镜像地址。 +* 很多组件和功能可能用不到,建议是最小化安装,不需要的通通禁用,如 nginx-ingress, cert-manager, prometheus 等。 +* 服务暴露方式和 TLS 证书管理,不同平台差异比较大,建议是单独管理,helm 安装时只安装应用本身,ingress 和 tls 相关配置禁用掉。 + +修改这些配置比较繁琐,我已经维护了一份 Gitlab 适配腾讯云容器服务的 chart 包,相关 gitlab 镜像也做了同步,可以实现一键安装。可以通过 git 拉下来: + +```bash +git clone https://github.com/tke-apps/gitlab.git +cd gitlab +``` + +## StorageClass 注意事项 + +像 gitaly, minio 这些组件,是需要挂载持久化存储的,在腾讯云容器服务,默认使用的是云硬盘(CBS),块存储,通常也建议使用这种,不过在使用之前,建议确保默认 StorageClass 支持磁盘容量在线扩容,这个特性需要确保集群版本在 1.18 以上,且安装了 CBS CSI 插件(Out-of-Tree),新版本集群默认会安装。 + +然后找到默认 StorageClass,通常名为 "cbs": + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220721150831.png) + +编辑 yaml: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220721151305.png) + +先确保以下两点,如果不满足,可以删除重建: +* 默认 StorageClass 的 `is-default-class` 注解为 true。 +* provisioner 是 `com.tencent.cloud.csi.cbs`。 + +如果满足,添加字段 `allowVolumeExpansion: true` 并保存。 + +另外,也可以通过 kubectl 修改,先查询 default StorageClass: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220721151628.png) + +然后使用 `kubectl edit sc ` 进行修改。 + +## 部署 Gitlab + +### 准备配置 + +创建 `gitlab.yaml` 配置,分享一下我的配置: + +```yaml +redis: + install: true + master: + nodeSelector: + node.kubernetes.io/instance-type: eklet + persistence: + enabled: false +postgresql: + install: false +minio: + persistence: + enabled: true + volumeName: gitlab-minio + accessMode: ReadWriteMany + size: '100Gi' +gitlab: + gitaly: + persistence: + enabled: true + volumeName: 'gitlab-gitaly' + accessMode: ReadWriteMany + size: 100Gi +global: + hosts: + domain: imroc.cc + https: true + gitlab: + name: gitlab.imroc.cc + https: true + nodeSelector: + node.kubernetes.io/instance-type: eklet + psql: + password: + useSecret: true + secret: gitlab-psql-password-secret + key: password + host: 'pgsql-postgresql.db' + port: 5432 + username: gitlab + database: gitlab +``` + +* redis 作为缓存,不想持久化数据,降低成本。 +* postgresql 使用现有的数据库,不安装,配置上数据库连接信息(数据库密码通过secret存储,提前创建好)。 +* minio 和 gitaly 挂载的存储,使用了 NFS,提前创建好 pv,在 `persistence` 配置里指定 `volumeName` 来绑定 pv。 +* 我的集群是标准集群,有普通节点和超级节点,我希望 gitlab 所有组件都调度到超级节点,global 和 redis 与 minio 里指定 nodeSelector,强制调度到超级节点。 +* 服务暴露方式我用的 istio-ingressgateway,证书也配到 gateway 上的,对外访问方式是 https,在 `global.hosts` 下配置对外访问域名,`https` 置为 true(用于页面内的连接跳转,避免https页面跳到http链接)。 + + +`gitlab-psql-password-secret.yaml`(存 postgresql 密码的 secret): + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: gitlab-psql-password-secret + namespace: gitlab +type: Opaque +stringData: + password: '123456' +``` + +gitaly 和 minio 挂载的存储我使用 NFS,提前创建好 CFS 实例和相应的文件夹路径,并 `chmod 0777 ` 修改目录权限,避免因权限问题导致 pod 启动失败。以下分别是它们的 pv yaml 定义: + +`minio-nfs-pv.yaml`: + +```yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: gitlab-minio +spec: + accessModes: + - ReadWriteMany + capacity: + storage: 100Gi + nfs: + path: /gitlab/minio + server: 10.10.0.15 + persistentVolumeReclaimPolicy: Retain + volumeMode: Filesystem + storageClassName: 'cbs' +``` + +`gitaly-nfs-pv.yaml`: + +```yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: gitlab-gitaly +spec: + accessModes: + - ReadWriteMany + capacity: + storage: 100Gi + nfs: + path: /gitlab/gitaly + server: 10.10.0.15 + persistentVolumeReclaimPolicy: Retain + volumeMode: Filesystem + storageClassName: 'cbs' +``` + +* `storageClassName` 我使用默认的 StorageClass 名称,因为部署配置里没指定 storageClass 会自动给 pvc 加上默认的,如果 pv 跟 pvc 的 `storageClassName` 不匹配,会导致调度失败。 + +上述 pv 和 secret 是 gitlab 应用依赖的,需要在部署 gitlab 之前先 apply 到集群: + +```bash +kubectl apply -f gitlab-psql-password-secret.yaml +kubectl apply -f minio-nfs-pv.yaml +kubectl apply -f gitaly-nfs-pv.yaml +``` + +### 安装 gitlab + +使用 helm 安装: + +```bash +helm upgrade -n gitlab --install gitlab -f gitlab.yaml ./gitlab +``` + +检查 gitlab 组件是否正常运行: + +```bash +$ kubectl -n gitlab get pod +NAME READY STATUS RESTARTS AGE +gitlab-gitaly-0 1/1 Running 0 8m +gitlab-gitlab-exporter-7bc89d678-d4c7h 1/1 Running 0 8m +gitlab-gitlab-shell-77d99c8b45-kbfmd 1/1 Running 0 8m +gitlab-kas-549b4cf77c-thjrv 1/1 Running 0 8m +gitlab-migrations-1-2pnx7 0/1 Completed 0 8m +gitlab-minio-7b57f77ccb-g9mqb 1/1 Running 0 8m +gitlab-minio-create-buckets-1-hvz9g 0/1 Completed 0 6m +gitlab-redis-master-0 2/2 Running 0 6m +gitlab-sidekiq-all-in-1-v2-5f8c64987f-jhtv9 1/1 Running 0 8m +gitlab-toolbox-66bbb6d4dc-qff92 1/1 Running 0 8m +gitlab-webservice-default-868fbf9fbc-9cb8g 2/2 Running 0 8m +``` + +> 后续想卸载可使用这个命令: `helm -n gitlab uninstall gitlab` + +### 暴露 Gitlab 服务 + +查看 service: + +```bash +$ kubectl -n gitlab get service +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +gitlab-gitaly ClusterIP None 8075/TCP,9236/TCP 8m +gitlab-gitlab-exporter ClusterIP 172.16.189.22 9168/TCP 8m +gitlab-gitlab-shell ClusterIP 172.16.251.106 22/TCP 8m +gitlab-kas ClusterIP 172.16.245.70 8150/TCP,8153/TCP,8154/TCP,8151/TCP 8m +gitlab-minio-svc ClusterIP 172.16.187.127 9000/TCP 8m +gitlab-redis-headless ClusterIP None 6379/TCP 8m +gitlab-redis-master ClusterIP 172.16.156.40 6379/TCP 8m +gitlab-redis-metrics ClusterIP 172.16.196.188 9121/TCP 8m +gitlab-webservice-default ClusterIP 172.16.143.4 8080/TCP,8181/TCP,8083/TCP 8m +``` + +其中带 `webservice` 的 service 是 Gitlab 访问总入口,需要特别注意的是,端口是 8181,不是 8080 那个。 + +我使用 istio-ingressgateway,Gateway 本身已提前监听好 443 并挂好证书: + +```bash +kubectl -n external get gw imroc -o yaml +``` + +```yaml +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: imroc + namespace: external +spec: + selector: + app: istio-ingressgateway + istio: ingressgateway + servers: + - port: + number: 443 + name: HTTPS-443-pp0c + protocol: HTTPS + hosts: + - imroc.cc + - "*.imroc.cc" + tls: + mode: SIMPLE + credentialName: imroc-cc-crt-secret +``` + +只需创建一个 VirtualService,将 gitlab 服务与 Gateway 绑定,暴露出去。 + +`gitlab-vs.yaml`: + +```yaml +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + name: gitlab-imroc-cc + namespace: gitlab +spec: + gateways: + - external/imroc + hosts: + - 'gitlab.imroc.cc' + http: + - route: + - destination: + host: gitlab-webservice-default + port: + number: 8181 # 注意这里端口是 8181,不是 8080 +``` + +执行创建: + +```bash +kubectl apply -f gitlab-vs.yaml +``` + +除了暴露 https,如果需要通过 ssh 协议来 push 或 pull 代码,需要暴露 22 端口,使用单独的 Gateway 对象来暴露(绑定同一个 ingressgateway),`shell-gw.yaml`: + +```yaml +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: shell + namespace: external +spec: + selector: + app: istio-ingressgateway + istio: ingressgateway + servers: + - port: + number: 22 + name: shell + protocol: TCP + hosts: + - "*" +``` + +创建 Gateway: + +```bash +kubectl apply -f shell-gw.yaml +``` + +为 22 端口创建 VirtualService 并绑定 Gateway,`gitlab-shell-vs.yaml`: + +```yaml +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + name: gitlab-shell + namespace: gitlab +spec: + gateways: + - external/shell + hosts: + - '*' + tcp: + - match: + - port: 22 + route: + - destination: + host: gitlab-gitlab-shell + port: + number: 22 +``` + +创建 VirutalService: + +```bash +kubectl apply -f gitlab-shell-vs.yaml +``` + +### 获取 root 初始密码并登录 + +服务暴露出来之后,确保 DNS 也正确配置,解析到网关的 IP,我这里则是 istio-ingressgateway 对应的 CLB 的外网 IP。 + +在浏览器中打开 gitlab 外部地址: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220721115146.png) + +自动跳转到登录页面,管理员用户名为 root,密码可通过自动生成的 secret 获取: + +```bash +$ kubectl -n gitlab get secret | grep initial-root-password +gitlab-gitlab-initial-root-password Opaque 1 38m +$ kubectl -n gitlab get secret gitlab-gitlab-initial-root-password -o jsonpath='{.data.password}' | base64 -d +kxe***********************************************************k5 +``` + +拿到密码后输入然后登录即可。 + +## 部署并注册 gitlab-runner + +Gitlab 有很强大的 CI 功能,我们可以在集群中也部署一下 gitlab-runner,如果为代码仓库设置了 CI 流程,可以自动将任务分发给 gitlab-runner 去执行 CI 任务,每个任务再创建单独的 Pod 去运行: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/gitlab-runner-arch.png) + +下面介绍 gitlab-runner 的部署与注册方法。 + +### 获取注册 token + +在【Admin】-【Overview】-【Runners】 复制注册 token: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220721115450.png) + +也可以通过 kubectl 获取 secret 得到 token: + +```bash +$ kubectl -n gitlab get secret gitlab-gitlab-runner-secret -o jsonpath='{.data.runner-registration-token}' | base64 -d +AF************************************************************kF +``` + +### 准备配置 + +`gitlab-runner.yaml`: + +```yaml +runnerRegistrationToken: AF************************************************************kF +gitlabUrl: 'https://gitlab.imroc.cc' +runners: + locked: false + config: | + [[runners]] + environment = ["FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=1"] + [runners.kubernetes] + image = "ubuntu:20.04" +``` + +注意: +* `runnerRegistrationToken` 替换为上一步获取到的 token。 +* `gitlabUrl` 替换为 gitlab 访问地址。 +* 超级节点(EKS)的 Pod,不支持 attach,如果 runner 调度到超级节点(EKS) 就会有问题,打开 runer [FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY](https://docs.gitlab.com/runner/configuration/feature-flags.html#available-feature-flags) 的 feature flag 来换成 exec 方式。 + +### 安装 gitlab-runner + +使用 helm 安装: + +```bash +helm upgrade -n gitlab --install gitlab-runner -f gitlab-runner.yaml ./gitlab-runner +``` + +检查 runner 是否正常运行: + +```bash +$ kubectl -n gitlab get pod | grep runner +gitlab-runner-6fb794bb6b-s6n5h 1/1 Running 0 2m17s +``` + +> 后续想卸载可使用这个命令: `helm -n gitlab uninstall gitlab-runner` + +### 检查是否注册成功 + +进入 Gitlab 【Admin】-【Overview】-【Runners】页面检查 runner 是否注册成功: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220721130051.png) + +## 附录 +### 测试场景 + +如果只是测试下 Gitlab,不长期使用,在不需要的时候可以把所有副本缩为 0 以节约成本: + +```bash +kubectl get deployments.v1.apps | grep -v NAME | awk '{print $1}' | xargs -I {} kubectl scale deployments.v1.apps/{} --replicas=0 +kubectl get sts | grep -v NAME | awk '{print $1}' | xargs -I {} kubectl scale sts/{} --replicas=0 +``` + +在需要用的时候置为 1: + +```bash +kubectl get deployments.v1.apps | grep -v NAME | awk '{print $1}' | xargs -I {} kubectl scale deployments.v1.apps/{} --replicas=1 +kubectl get sts | grep -v NAME | awk '{print $1}' | xargs -I {} kubectl scale sts/{} --replicas=1 +``` + +如果使用了 `https://github.com/tke-apps/gitlab` 这个仓库,可以直接用以下命令缩0: + +```bash +make scale0 +``` + +扩到1: + +```bash +make scale1 +``` \ No newline at end of file diff --git a/content/tencent/install-apps/install-harbor-on-tke.md b/content/tencent/install-apps/install-harbor-on-tke.md new file mode 100644 index 0000000..684b98e --- /dev/null +++ b/content/tencent/install-apps/install-harbor-on-tke.md @@ -0,0 +1,288 @@ +# 自建 Harbor 镜像仓库 + +## 概述 + +腾讯云有 [容器镜像服务 TCR](https://cloud.tencent.com/product/tcr),企业级容器镜像仓库,满足绝大多数镜像仓库的需求,如果需要使用镜像仓库,可以首选 TCR,如果是考虑到成本,或想使用 Harbor 的高级功能(如 [Proxy Cache](https://goharbor.io/docs/2.1.0/administration/configure-proxy-cache/)) 等因素,可以考虑自建 Harbor 镜像仓库,本文介绍如何在腾讯云容器服务中部署 Harbor 作为自建的容器镜像仓库。 + +## 前提条件 + +* 已安装 [Helm](https://helm.sh)。 +* 已开启集群访问并配置好 kubeconfig,可以通过 kubectl 操作集群(参考[官方文档:连接集群](https://cloud.tencent.com/document/product/457/32191))。 + +## 操作步骤 + +### 准备 COS 对象存储 + +镜像的存储建议是放对象存储,因为容量大,可扩展,成本低,速度还快。腾讯云上的对象存储是 [COS](https://cloud.tencent.com/product/cos),而 harbor 的存储驱动暂不支持 COS,不过 COS 自身兼容 S3,所以可以配置 harbor 使用 S3 存储驱动。 + +下面我们登录腾讯云账号,在 [COS 控制台](https://console.cloud.tencent.com/cos/bucket) 创建一个存储桶: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220718202301.png) + +记录一下如下信息后面用: +* `region`: 存储桶所在地域,如 `ap-chengdu`,参考 [地域和可用区](https://cloud.tencent.com/document/product/213/6091)。 +* `bucket`: 存储桶名称,如 `registry-12*******6` (有 appid 后缀)。 +* `regionendpoint`: 类似 `https://cos..myqcloud.com` 这种格式的 url,如 `https://cos.ap-chengdu.myqcloud.com`。 + +### 创建云 API 密钥 + +在 [访问密钥](https://console.cloud.tencent.com/cam/capi) 这里新建密钥: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220718203635.png) + +> 如果之前已经新建过,可跳过此步骤。 + +记录一下生成的 `SecretId` 和 `SecretKey`,后面需要用。 + +### 准备 chart + +```bash +helm repo add harbor https://helm.goharbor.io +helm fetch harbor/harbor --untar +``` + +* 参考 [Harbor 官方文档: Deploying Harbor with High Availability via Helm](https://goharbor.io/docs/edge/install-config/harbor-ha-helm/) +* 查看 `./harbor/values.yaml` 可以看到配置项。 + +### 准备配置 + +`harbor-values.yaml`: + +```yaml +expose: + type: clusterIP + tls: + enabled: false # 建议关闭 tls,如果对外需要 https 访问,可以将 TLS 放到前面的 7 层代理进行配置。 +externalURL: https://registry.imroc.cc # 镜像仓库的对外访问地址 +persistence: + imageChartStorage: + type: s3 + s3: # 务必修改! COS 相关配置 + region: ap-chegndu + bucket: harbor-12*******6 + accesskey: AKI*******************************zv # SecretId + secretkey: g5****************************FR # SecretKey + regionendpoint: https://cos.ap-chengdu.myqcloud.com + rootdirectory: / # 存储桶中存储镜像数据的路径 + persistentVolumeClaim: + registry: + existingClaim: 'registry-registry' + jobservice: + existingClaim: "registry-jobservice" +harborAdminPassword: '123456' # 务必修改! harbor 管理员登录密码 +chartmuseum: + enabled: false +trivy: + enabled: false +notary: + enabled: false +database: + type: external + external: + host: 'pgsql-postgresql.db' + username: 'postgres' + password: '123456' + coreDatabase: 'registry' +redis: + type: external + external: + addr: 'redis.db:6379' + coreDatabaseIndex: "10" + jobserviceDatabaseIndex: "11" + registryDatabaseIndex: "12" + chartmuseumDatabaseIndex: "13" + trivyAdapterIndex: "14" +``` + +注意事项: +* `expose` 配置暴露服务,我这里打算用其它方式暴露(istio-ingress-gateway),不使用 Ingress, LoadBalancer 之类的方式,所以 type 置为 clusterIP (表示仅集群内访问);另外,tls 也不需要,都是在 gateway 上配置就行。 +* `s3` 配置实为 COS 相关配置,将前面步骤记录的信息填上去。 +* chartmuseum, trivy, notary 我都不需要,所以 `enabled` 都设为 `false`。 +* `harborAdminPassword` 是 harbor 管理员登录密码,设置一下。 +* `database` 是配置 postgresql 数据库,我使用现成的数据库,配置 type 为 external 并写上相关连接配置。 +* `redis` 是配置 redis 缓存,我使用现成的 redis,配置 type 为 external 并写上相关连接配置。 +* `persistentVolumeClaim` 配置持久化存储,我这里只有 `registry` 和 `jobservice` 模块需要挂载存储,存储我挂载的 CFS (腾讯云 NFS 服务),指定 `existingClaim` 为提前创建好的 pvc,参考附录【挂载 CFS】。 + +### 安装 + +```bash +helm upgrade --install -n registry -f harbor-values.yaml registry ./harbor +``` + +> 后续如需卸载可以执行: helm uninstall registry + +检查 pod 是否正常启动: + +```bash +$ kubectl -n registry get pod +NAME READY STATUS RESTARTS AGE +registry-harbor-core-55d577c7-l9k5j 1/1 Running 0 1m +registry-harbor-jobservice-66846c575-dbvdz 1/1 Running 0 1m +registry-harbor-nginx-7d94c9446c-z6rkn 1/1 Running 0 1m +registry-harbor-portal-d87bc7554-psp2r 1/1 Running 0 1m +registry-harbor-registry-66d899c9c9-v2w7r 2/2 Running 0 1m +``` + +检查自动创建的 service: + +```bash +$ kubectl -n registry get svc +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +harbor ClusterIP 172.16.195.61 80/TCP 1m +registry-harbor-core ClusterIP 172.16.244.174 80/TCP 1m +registry-harbor-jobservice ClusterIP 172.16.219.62 80/TCP 1m +registry-harbor-portal ClusterIP 172.16.216.247 80/TCP 1m +registry-harbor-registry ClusterIP 172.16.146.201 5000/TCP,8080/TCP 1m +``` + +### 暴露服务 + +我这里使用 istio-ingressgateway 进行暴露,创建 VirtualService 与 Gateway 绑定: + +```yaml +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + name: registry-imroc-cc + namespace: registry +spec: + gateways: + - external/imroc + hosts: + - 'registry.imroc.cc' + http: + - route: + - destination: + host: harbor + port: + number: 80 +``` + +而 Gateway 则是提前创建好的,监听 443,并配置了证书: + +```yaml +apiVersion: networking.istio.io/v1beta1 +kind: Gateway +metadata: + name: imroc + namespace: external +spec: + selector: + app: istio-ingressgateway + istio: ingressgateway + servers: + - hosts: + - imroc.cc + - '*.imroc.cc' + port: + name: HTTPS-443 + number: 443 + protocol: HTTPS + tls: + credentialName: imroc-cc-crt-secret + mode: SIMPLE +``` + +### 验证服务与 COS 最终一致性问题 + +最后,可以登录一下 registry 并 push 下镜像试试: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220718212040.png) + +以上直接 push 成功是比较幸运的情况,通常往往会报 500 错误: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220718212302.png) + +什么原因? 是因为 COS 是保证最终一致性,当镜像数据 put 成功后,并不能保证马上能 list 到,导致 harbor 以为没 put 成功,从而报错,参考 [这篇文章](https://cloud.tencent.com/developer/article/1855894)。 + +如何解决?可以提工单将指定存储桶改为强一致性。但是由于 COS 底层架构升级的原因,暂时无法后台改配置,预计今年年底后才可以申请,相关工单截图: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220718212820.png) + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220718212838.png) + +临时规避的方法可以是:上传失败时重试下,直至上传成功。 + +## 附录 + +### 挂载 CFS + +使用如下 yaml 将 CFS 作为 jobservice 和 registry 模块的持久化存储进行挂载: + +`registry-nfs-pv.yaml`: + +```yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: registry-registry +spec: + accessModes: + - ReadWriteMany + capacity: + storage: 10Gi + nfs: + path: /registry/registry + server: 10.10.0.15 + persistentVolumeReclaimPolicy: Retain + storageClassName: "" + volumeMode: Filesystem + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: registry-registry + namespace: registry +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 10Gi + storageClassName: "" + volumeMode: Filesystem + volumeName: registry-registry + +``` + +`jobservice-nfs-pv.yaml`: + +```yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: registry-jobservice +spec: + accessModes: + - ReadWriteMany + capacity: + storage: 10Gi + nfs: + path: /registry/jobservice + server: 10.10.0.15 + persistentVolumeReclaimPolicy: Retain + storageClassName: "" + volumeMode: Filesystem + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: registry-jobservice + namespace: registry +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 10Gi + storageClassName: "" + volumeMode: Filesystem + volumeName: registry-jobservice +``` + +注意: +* 确保创建的 CFS 与 TKE/EKS 集群在同一个 VPC。 +* nfs 的 server ip 在 [CFS 控制台](https://console.cloud.tencent.com/cfs/fs) 可以查看,替换 yaml 中的 ip 地址。 +* yaml 中如果指定 path ,确保提前创建好,且 `chmod 0777 ` 一下,避免因权限问题导致无法启动。 \ No newline at end of file diff --git a/content/tencent/install-apps/install-kubesphere-on-tke.md b/content/tencent/install-apps/install-kubesphere-on-tke.md new file mode 100644 index 0000000..3253cf3 --- /dev/null +++ b/content/tencent/install-apps/install-kubesphere-on-tke.md @@ -0,0 +1,174 @@ +# 安装 KubeSphere + +## 概述 + +本文介绍在腾讯云容器服务上如何安装 KubeSphere 及其踩坑与注意事项。 + +## 安装步骤 + +具体安装步骤参考 KubeSphere 官方文档:[在腾讯云 TKE 安装 KubeSphere](https://kubesphere.io/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-ks-on-tencent-tke/)。 + +## 踩坑与注意事项 + +### cbs 磁盘容量以 10Gi 为倍数 + +腾讯云容器服务默认使用 CBS 云硬盘作为存储,容量只支持 10Gi 的倍数,如果定义 pvc 时指定的容量不是 10Gi 的倍数,就会挂盘失败。 + +安装 KubeSphere 时,修改下 `ClusterConfiguration` 中各个组件的 `volumeSize` 配置,确保是 10Gi 的倍数。 + +### 卸载卡住与卸载不干净导致重装失败 + +有时安装出问题,希望卸载重装,使用 KubeSphere 官方文档 [从 Kubernetes 上卸载 KubeSphere](https://kubesphere.io/zh/docs/installing-on-kubernetes/uninstall-kubesphere-from-k8s/) 中的 `kubesphere-delete.sh` 脚本进行清理,可能会出现卡住的情况。 + +通常是有 finalizer 的原因: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/企业微信截图_06c82094-d4da-4199-9380-78cc76c05810.png) + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/企业微信截图_cc7a9842-618d-4d77-9f6e-43a5ffb078e3.png) + +编辑资源删除相应 finalizer 即可。 + +如果清理不干净,重装还会报错: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/dirty-clusterrole.png) + +通常是关联的一些 MutatingWebhookConfiguration,ValidatingWebhookConfiguration, ClusterRole, ClusterRoleBinding 等资源没清理,可以根据 ks-installer 日志定位并清理。 + +### 监控不兼容导致看不到超级节点中 Pod 的监控 + +KubeSphere 部署完后看工作负载的 Pod 列表,没有超级节点上 Pod 的监控数据: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220901152659.png) + +是因为 KubeSphere 启用的监控,采集 cadvisor 监控数据的采集规则是,访问所有节点的 10250 端口去拉监控数据,而超级节点的 IP 是个无法路由的 “假” IP,所以拉不到数据。 + +解决方案:按照以下步骤增加自定义采集规则。 + +1. 准备 secret yaml `scrape-config.yaml`: + +```yaml +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: additional-scrape-configs + namespace: kubesphere-monitoring-system +stringData: + additional-scrape-configs.yaml: |- + - job_name: kubelet # eks cadvisor 监控,为兼容 ks 查询,固定 job 名为 kubelet + honor_timestamps: true + metrics_path: '/metrics' + params: + collect[]: + - 'ipvs' + scheme: http + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_tke_cloud_tencent_com_pod_type] + regex: eklet + action: keep + - source_labels: [__meta_kubernetes_pod_phase] + regex: Running + action: keep + - source_labels: [__meta_kubernetes_pod_ip] + separator: ; + regex: (.*) + target_label: __address__ + replacement: ${1}:9100 + action: replace + - source_labels: [__meta_kubernetes_pod_name] + separator: ; + regex: (.*) + target_label: pod + replacement: ${1} + action: replace + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: namespace + replacement: ${1} + action: replace + metric_relabel_configs: + - source_labels: [__name__] + separator: ; + regex: container_.* + replacement: $1 + action: keep + - target_label: metrics_path + replacement: /metrics/cadvisor + action: replace + - job_name: eks # eks cadvisor 之外的其它监控 + honor_timestamps: true + metrics_path: '/metrics' + params: + collect[]: + - 'ipvs' + scheme: http + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_tke_cloud_tencent_com_pod_type] + regex: eklet + action: keep + - source_labels: [__meta_kubernetes_pod_phase] + regex: Running + action: keep + - source_labels: [__meta_kubernetes_pod_ip] + separator: ; + regex: (.*) + target_label: __address__ + replacement: ${1}:9100 + action: replace + - source_labels: [__meta_kubernetes_pod_name] + separator: ; + regex: (.*) + target_label: pod + replacement: ${1} + action: replace + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: namespace + replacement: ${1} + action: replace + metric_relabel_configs: + - source_labels: [__name__] + separator: ; + regex: (container_.*|pod_.*|kubelet_.*) + replacement: $1 + action: keep +``` + +2. 创建 secret: + +```bash +kubectl apply -f scrape-config.yaml +``` + +3. 修改 Prometheus CR: + +```bash +kubectl -n kubesphere-monitoring-system edit prometheuses.monitoring.coreos.com k8s +``` + +加入 `additionalScrapeConfigs`: + +```yaml +spec: + additionalScrapeConfigs: + key: additional-scrape-configs.yaml + name: additional-scrape-configs +``` + +### ks-apiserver 出现 crash + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/ks-apiserver-crash.png) + +一般是 kubesphere 的 chart 包不完善,crd 没装完整,可以手动装一下: + +```bash +kubectl apply -f https://raw.githubusercontent.com/kubesphere/notification-manager/master/config/bundle.yaml +``` + +> 参考: https://kubesphere.com.cn/forum/d/7610-ks-330-ks-apiserver-crash/3 \ No newline at end of file diff --git a/content/tencent/logging/ingressgateway.md b/content/tencent/logging/ingressgateway.md new file mode 100644 index 0000000..f774ba1 --- /dev/null +++ b/content/tencent/logging/ingressgateway.md @@ -0,0 +1,14 @@ +# ingressgateway 和 egressgateway 日志采集与检索 + +## + +2022-08-25T09:28:36.316+0800 +2022-08-25T01:45:16.897Z +%Y-%m-%dT%H:%M:%S.%f%z + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220825095215.png) + + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220825095615.png) + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220825095845.png) diff --git a/content/tencent/monitoring/grafana-dashboard-for-supernode-pod.md b/content/tencent/monitoring/grafana-dashboard-for-supernode-pod.md new file mode 100644 index 0000000..8a74407 --- /dev/null +++ b/content/tencent/monitoring/grafana-dashboard-for-supernode-pod.md @@ -0,0 +1,2282 @@ +# 超级节点 Pod 监控仪表盘 + +## 概述 + +超级节点的 Pod 9100 端口提供了 Pod 所在虚拟机,机器维度的监控指标,实际上就是 `node_exporter` 的指标,只不过指标名由 `node_` 前缀改为了 `pod_` 前缀,基于这些指标可以捏出超级节点 Pod 监控仪表盘。 + +本文提供现成的超级节点 Pod 虚拟机维度的 Grafana 监控仪表盘。 + +> 需要 Grafana 9 以上版本。 + +## 导入仪表盘 + +在 Grafana 中点击 Import: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220815182825.png) + +将下面的 json 复制粘贴进去: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220815182909.png) + +```json +{ + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__elements": {}, + "__requires": [ + { + "type": "panel", + "id": "bargauge", + "name": "Bar gauge", + "version": "" + }, + { + "type": "panel", + "id": "gauge", + "name": "Gauge", + "version": "" + }, + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "9.0.6" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "table", + "name": "Table", + "version": "" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "description": "", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 53, + "panels": [], + "title": "概览", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 1, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 0.8 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 0, + "y": 1 + }, + "id": 36, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "9.0.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "(\n 1 - \n avg(irate(pod_cpu_seconds_total{cluster=\"$cluster\", mode=\"idle\", namespace=\"$namespace\", pod=\"$pod\"}[2m]))\n)", + "refId": "A" + } + ], + "title": "CPU 使用率", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 1, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 0.8 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 3, + "y": 1 + }, + "id": 43, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "9.0.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "1 - (\n pod_memory_MemAvailable_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"} / pod_memory_MemTotal_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}\n)", + "refId": "A" + } + ], + "title": "内存使用率", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 10737418240 + } + ] + }, + "unit": "binBps" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 6, + "y": 1 + }, + "id": 38, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "9.0.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(irate(pod_network_receive_bytes_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", device=\"eth0\"}[2m]))", + "refId": "A" + } + ], + "title": "入流量", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 10737418240 + } + ] + }, + "unit": "binBps" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 9, + "y": 1 + }, + "id": 39, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "9.0.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(irate(pod_network_transmit_bytes_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", device=\"eth0\"}[2m]))", + "refId": "A" + } + ], + "title": "出流量", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 1, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 0.9 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 12, + "y": 1 + }, + "id": 44, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "9.0.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(pod_load5{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) / count(count(pod_cpu_seconds_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (cpu))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "平均负载(5m)", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 1, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 0.9 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 15, + "y": 1 + }, + "id": 51, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "9.0.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(pod_load5{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) / count(count(pod_cpu_seconds_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (cpu))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "平均负载(5m)", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 1, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 0.8 + }, + { + "color": "red", + "value": 0.9 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 18, + "y": 1 + }, + "id": 48, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "9.0.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "1 - (\n pod_filesystem_avail_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", mountpoint=\"/\"} /\n pod_filesystem_size_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", mountpoint=\"/\"}\n)", + "refId": "A" + } + ], + "title": "系统盘使用率", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": null + }, + { + "color": "green", + "value": 1073741824 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 21, + "y": 1 + }, + "id": 49, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "9.0.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "pod_memory_MemAvailable_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}", + "refId": "A" + } + ], + "title": "剩余内存", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "displayMode": "auto", + "inspect": false, + "minWidth": 50 + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": null + }, + { + "color": "green", + "value": 10737418240 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "磁盘使用率" + }, + "properties": [ + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "custom.displayMode", + "value": "lcd-gauge" + }, + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "剩余空间" + }, + "properties": [ + { + "id": "custom.displayMode", + "value": "color-text" + } + ] + } + ] + }, + "gridPos": { + "h": 4, + "w": 12, + "x": 0, + "y": 5 + }, + "id": 29, + "options": { + "footer": { + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Value #A" + } + ] + }, + "pluginVersion": "9.0.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "pod_filesystem_avail_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", fstype!~\"nfs.*|tmpfs\"}", + "format": "table", + "instant": true, + "range": false, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "pod_filesystem_avail_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", fstype!~\"nfs.*|tmpfs\"}", + "format": "table", + "hide": false, + "instant": true, + "range": false, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "1 - \n(\n pod_filesystem_avail_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", fstype!~\"nfs.*|tmpfs\"}\n /\n pod_filesystem_size_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", fstype!~\"nfs.*|tmpfs\"}\n)", + "format": "table", + "hide": false, + "instant": true, + "range": false, + "refId": "C" + } + ], + "title": "当前空间使用情况", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "mountpoint", + "Value #A", + "Value #B", + "Value #C", + "device", + "fstype" + ] + } + } + }, + { + "id": "seriesToColumns", + "options": { + "byField": "device" + } + }, + { + "id": "organize", + "options": { + "excludeByName": { + "fstype 2": true, + "fstype 3": true, + "mountpoint 1": false, + "mountpoint 2": true, + "mountpoint 3": true + }, + "indexByName": {}, + "renameByName": { + "Value #A": "剩余空间", + "Value #B": "总容量", + "Value #C": "磁盘使用率", + "device": "设备", + "fstype": "文件系统", + "fstype 1": "文件系统", + "fstype 2": "", + "mountpoint": "挂载点", + "mountpoint 1": "挂载点" + } + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-GrYlRd" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 12, + "y": 5 + }, + "id": 34, + "options": { + "displayMode": "lcd", + "minVizHeight": 10, + "minVizWidth": 0, + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true + }, + "pluginVersion": "9.0.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "(1 - avg(irate(pod_cpu_seconds_total{cluster=\"$cluster\", mode=\"idle\", namespace=\"$namespace\", pod=\"$pod\"}[2m])) by (cpu))", + "instant": false, + "legendFormat": "cpu{{cpu}}", + "range": true, + "refId": "A" + } + ], + "title": "CPU 使用率", + "type": "bargauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "displayMode": "auto", + "inspect": false, + "minWidth": 50 + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": null + }, + { + "color": "green", + "value": 1000 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "使用率" + }, + "properties": [ + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "custom.displayMode", + "value": "lcd-gauge" + }, + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "剩余数量" + }, + "properties": [ + { + "id": "custom.displayMode", + "value": "color-text" + } + ] + } + ] + }, + "gridPos": { + "h": 4, + "w": 12, + "x": 0, + "y": 9 + }, + "id": 42, + "options": { + "footer": { + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Value #A" + } + ] + }, + "pluginVersion": "9.0.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "pod_filesystem_files_free{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", fstype!~\"nfs.*|tmpfs\"}", + "format": "table", + "instant": true, + "range": false, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "pod_filesystem_files{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", fstype!~\"nfs.*|tmpfs\"}", + "format": "table", + "hide": false, + "instant": true, + "range": false, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "1 - \n(\n pod_filesystem_files_free{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", fstype!~\"nfs.*|tmpfs\"}\n /\n pod_filesystem_files{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", fstype!~\"nfs.*|tmpfs\"}\n)", + "format": "table", + "hide": false, + "instant": true, + "range": false, + "refId": "C" + } + ], + "title": "当前 inode 使用情况", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "mountpoint", + "Value #A", + "Value #B", + "Value #C", + "device", + "fstype" + ] + } + } + }, + { + "id": "seriesToColumns", + "options": { + "byField": "device" + } + }, + { + "id": "organize", + "options": { + "excludeByName": { + "fstype 2": true, + "fstype 3": true, + "mountpoint 1": false, + "mountpoint 2": true, + "mountpoint 3": true + }, + "indexByName": {}, + "renameByName": { + "Value #A": "剩余数量", + "Value #B": "总量", + "Value #C": "使用率", + "device": "设备", + "fstype": "文件系统", + "fstype 1": "文件系统", + "fstype 2": "", + "mountpoint": "挂载点", + "mountpoint 1": "挂载点" + } + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "displayMode": "auto", + "inspect": false, + "minWidth": 50 + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": null + }, + { + "color": "green", + "value": 1000 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 12, + "x": 0, + "y": 13 + }, + "id": 50, + "options": { + "footer": { + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Value #A" + } + ] + }, + "pluginVersion": "9.0.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "pod_memory_MemAvailable_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}", + "format": "table", + "instant": true, + "range": false, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "pod_memory_MemTotal_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}", + "format": "table", + "hide": false, + "instant": true, + "range": false, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "pod_memory_Buffers_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"} + pod_memory_Cached_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"} + pod_memory_SReclaimable_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}", + "format": "table", + "hide": false, + "instant": true, + "range": false, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "pod_memory_MemTotal_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"} - pod_memory_Buffers_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"} - pod_memory_Cached_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"} - pod_memory_MemFree_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"} - pod_memory_SReclaimable_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}", + "format": "table", + "hide": false, + "instant": true, + "range": false, + "refId": "D" + } + ], + "title": "内存信息", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "instance", + "Value #A", + "Value #B", + "Value #C", + "Value #D" + ] + } + } + }, + { + "id": "seriesToColumns", + "options": { + "byField": "instance" + } + }, + { + "id": "organize", + "options": { + "excludeByName": { + "instance": true + }, + "indexByName": { + "Value #A": 2, + "Value #B": 4, + "Value #C": 3, + "Value #D": 1, + "instance": 0 + }, + "renameByName": { + "Value #A": "可用内存", + "Value #B": "总内存", + "Value #C": "buff/cache 内存", + "Value #D": "已使用内存" + } + } + } + ], + "type": "table" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 17 + }, + "id": 31, + "panels": [], + "title": "存储", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": null + }, + { + "color": "green", + "value": 10 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "B" + }, + "properties": [ + { + "id": "unit", + "value": "percentunit" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 18 + }, + "id": 40, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "9.0.4", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "pod_filesystem_size_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", fstype!~\"nfs.*|tmpfs\"} - pod_filesystem_free_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", fstype!~\"nfs.*|tmpfs\"}", + "format": "time_series", + "instant": false, + "legendFormat": "{{mountpoint}} 使用量", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "(pod_filesystem_size_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", fstype!~\"nfs.*|tmpfs\"} - pod_filesystem_free_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", fstype!~\"nfs.*|tmpfs\"}) /\npod_filesystem_size_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", fstype!~\"nfs.*|tmpfs\"}", + "hide": false, + "legendFormat": "{{mountpoint}} 使用率", + "range": true, + "refId": "B" + } + ], + "title": "磁盘空间使用情况", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": null + }, + { + "color": "green", + "value": 10 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "B" + }, + "properties": [ + { + "id": "unit", + "value": "percentunit" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 18 + }, + "id": 41, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "9.0.4", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "pod_filesystem_files{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", fstype!~\"nfs.*|tmpfs\"} - pod_filesystem_files_free{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", fstype!~\"nfs.*|tmpfs\"}", + "format": "time_series", + "instant": false, + "legendFormat": "{{mountpoint}} 使用量", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "(pod_filesystem_files{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", fstype!~\"nfs.*|tmpfs\"} - pod_filesystem_files_free{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", fstype!~\"nfs.*|tmpfs\"}) /\npod_filesystem_files{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", fstype!~\"nfs.*|tmpfs\"}", + "hide": false, + "legendFormat": "{{mountpoint}} 使用率", + "range": true, + "refId": "B" + } + ], + "title": "磁盘 inode 使用情况", + "transformations": [], + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 26 + }, + "id": 16, + "panels": [], + "title": "计算", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "left", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "内存使用率" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + } + ] + } + ] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 27 + }, + "id": 25, + "options": { + "legend": { + "calcs": [ + "mean", + "max", + "last" + ], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "(\n1 - \n (\n pod_memory_MemAvailable_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"} / \n pod_memory_MemTotal_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}\n )\n)", + "hide": false, + "legendFormat": "内存使用率", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "(1 - avg(irate(pod_cpu_seconds_total{cluster=\"$cluster\", mode=\"idle\", namespace=\"$namespace\", pod=\"$pod\"}[2m])))", + "hide": false, + "legendFormat": "CPU 使用率", + "range": true, + "refId": "A" + } + ], + "title": "CPU/内存 使用率", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 27 + }, + "id": 27, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "(1 - avg(irate(pod_cpu_seconds_total{cluster=\"$cluster\", mode=\"idle\", namespace=\"$namespace\", pod=\"$pod\"}[2m])) by (cpu))", + "legendFormat": "{{cpu}}", + "range": true, + "refId": "A" + } + ], + "title": "CPU 使用率(单核)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 36 + }, + "id": 19, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "pod_load1{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}", + "hide": false, + "legendFormat": "1min", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "pod_load5{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}", + "hide": false, + "legendFormat": "5min", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "pod_load15{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}", + "hide": false, + "legendFormat": "15min", + "range": true, + "refId": "C" + } + ], + "title": "平均负载", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 36 + }, + "id": 22, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(irate(pod_cpu_seconds_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", mode!=\"idle\"}[2m])) by (instance, mode)", + "hide": false, + "legendFormat": "{{mode}}", + "range": true, + "refId": "A" + } + ], + "title": "CPU 使用率明细", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 45 + }, + "id": 57, + "panels": [], + "title": "网络", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "binBps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 46 + }, + "id": 55, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(irate(pod_network_receive_bytes_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", device=\"eth0\"}[2m]))", + "legendFormat": "入流量", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "-sum(irate(pod_network_transmit_bytes_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", device=\"eth0\"}[2m]))", + "hide": false, + "legendFormat": "出流量", + "range": true, + "refId": "B" + } + ], + "title": "网络流量", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 46 + }, + "id": 58, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(irate(pod_network_receive_drop_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", device=\"eth0\"}[2m]))", + "legendFormat": "接收丢包速率", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "-sum(irate(pod_network_transmit_drop_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", device=\"eth0\"}[2m]))", + "hide": false, + "legendFormat": "发送丢包速率", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(irate(pod_network_receive_errs_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", device=\"eth0\"}[2m]))", + "hide": false, + "legendFormat": "接收错误速率", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "-sum(irate(pod_network_transmit_errs_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", device=\"eth0\"}[2m]))", + "hide": false, + "legendFormat": "发送错误速率", + "range": true, + "refId": "D" + } + ], + "title": "异常数据包", + "type": "timeseries" + } + ], + "refresh": false, + "schemaVersion": 36, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "Prometheus", + "value": "Prometheus" + }, + "hide": 0, + "includeAll": false, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(up{}, cluster)", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "label_values(up{}, cluster)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(pod_load1{}, namespace)", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "namespace", + "options": [], + "query": { + "query": "label_values(pod_load1{}, namespace)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(pod_load1{namespace=\"$namespace\"}, pod)", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "pod", + "options": [], + "query": { + "query": "label_values(pod_load1{namespace=\"$namespace\"}, pod)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "超级节点Pod监控", + "uid": "6FmNqii4k", + "version": 18, + "weekStart": "" +} +``` + +## 效果展示 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220815183733.png) +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220815183756.png) +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220815183812.png) \ No newline at end of file diff --git a/content/tencent/monitoring/prometheus-scrape-config.md b/content/tencent/monitoring/prometheus-scrape-config.md new file mode 100644 index 0000000..330fea5 --- /dev/null +++ b/content/tencent/monitoring/prometheus-scrape-config.md @@ -0,0 +1,240 @@ +# Prometheus 采集配置最佳实践 + +使用 Prometheus 采集腾讯云容器服务的监控数据时如何配置采集规则?主要需要注意的是 kubelet 与 cadvisor 的监控指标采集,本文分享为 Prometheus 配置 `scrape_config` 来采集腾讯云容器服务集群的监控数据的方法。 + +## 普通节点采集规则 + +```yaml + - job_name: "tke-cadvisor" + scheme: https + metrics_path: /metrics/cadvisor # 采集容器 cadvisor 监控数据 + tls_config: + insecure_skip_verify: true # tke 的 kubelet 使用自签证书,忽略证书校验 + authorization: + credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - source_labels: [__meta_kubernetes_node_label_node_kubernetes_io_instance_type] + regex: eklet # 排除超级节点 + action: drop + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - job_name: "tke-kubelet" + scheme: https + metrics_path: /metrics # 采集 kubelet 自身的监控数据 + tls_config: + insecure_skip_verify: true + authorization: + credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - source_labels: [__meta_kubernetes_node_label_node_kubernetes_io_instance_type] + regex: eklet + action: drop + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - job_name: "tke-probes" # 采集容器健康检查健康数据 + scheme: https + metrics_path: /metrics/probes + tls_config: + insecure_skip_verify: true + authorization: + credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - source_labels: [__meta_kubernetes_node_label_node_kubernetes_io_instance_type] + regex: eklet + action: drop + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) +``` + +* 使用节点服务发现 (`kubernetes_sd_configs` 的 role 为 `node`),抓取所有节点 `kubelet:10250` 暴露的几种监控数据。 +* 如果集群是普通节点与超级节点混用,排除超级节点 (`relabel_configs` 中将带 `node.kubernetes.io/instance-type: eklet` 这种 label 的 node 排除)。 +* TKE 节点上的 kubelet 证书是自签的,需要忽略证书校验,所以 `insecure_skip_verify` 要置为 true。 +* kubelet 通过 `/metrics/cadvisor`, `/metrics` 与 `/metrics/probes` 路径分别暴露了容器 cadvisor 监控数据、kubelet 自身监控数据以及容器健康检查健康数据,为这三个不同路径分别配置采集 job 进行采集。 + +## 超级节点采集规则 + +```yaml + - job_name: eks # 采集超级节点监控数据 + honor_timestamps: true + metrics_path: '/metrics' # 所有健康数据都在这个路径 + params: # 通常需要加参数过滤掉 ipvs 相关的指标,因为可能数据量较大,打高 Pod 负载。 + collect[]: + - 'ipvs' + # - 'cpu' + # - 'meminfo' + # - 'diskstats' + # - 'filesystem' + # - 'load0vg' + # - 'netdev' + # - 'filefd' + # - 'pressure' + # - 'vmstat' + scheme: http + kubernetes_sd_configs: + - role: pod # 超级节点 Pod 的监控数据暴露在 Pod 自身 IP 的 9100 端口,所以使用 Pod 服务发现 + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_tke_cloud_tencent_com_pod_type] + regex: eklet # 只采集超级节点的 Pod + action: keep + - source_labels: [__meta_kubernetes_pod_phase] + regex: Running # 非 Running 状态的 Pod 机器资源已释放,不需要采集 + action: keep + - source_labels: [__meta_kubernetes_pod_ip] + separator: ; + regex: (.*) + target_label: __address__ + replacement: ${1}:9100 # 监控指标暴露在 Pod 的 9100 端口 + action: replace + - source_labels: [__meta_kubernetes_pod_name] + separator: ; + regex: (.*) + target_label: pod # 将 Pod 名字写到 "pod" label + replacement: ${1} + action: replace + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: namespace # 将 Pod 所在 namespace 写到 "namespace" label + replacement: ${1} + action: replace + metric_relabel_configs: + - source_labels: [__name__] + separator: ; + regex: (container_.*|pod_.*|kubelet_.*) + replacement: $1 + action: keep +``` + +* 超级节点的监控数据暴露在每个 Pod 的 9100 端口的 `/metrics` 这个 HTTP API 路径(非 HTTPS),使用 Pod 服务发现(`kubernetes_sd_configs` 的 role 为 `pod`),用一个 job 就可以采集完。 +* 超级节点的 Pod 支持通过 `collect[]` 这个查询参数来过滤掉不希望采集的指标,这样可以避免指标数据量过大,导致 Pod 负载升高,通常要过滤掉 `ipvs` 的指标。 +* 如果集群是普通节点与超级节点混用,确保只采集超级节点的 Pod (`relabel_configs` 中只保留有 `tke.cloud.tencent.com/pod-type:eklet` 这个注解的 Pod)。 +* 如果 Pod 的 phase 不是 Running 也无法采集,可以排除。 +* `container_` 开头的指标是 cadvisor 监控数据,`pod_` 前缀指标是超级节点 Pod 所在子机的监控数据(相当于将 `node_exporter` 的 `node_` 前缀指标替换成了 `pod_`),`kubelet_` 前缀指标是超级节点 Pod 子机内兼容 kubelet 的指标(主要是 pvc 存储监控)。 + +## kube-prometheus-stack 配置 + +如今都流行使用 [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) 这个 helm chart 来自建 Prometheus,在 `values.yaml` 中进行自定义配置然后安装到集群,其中可以配置 Prometheus 原生的 `scrape_config` (非 CRD),配置方法是将自定义的 `scrape_config` 写到 `prometheus.prometheusSpec.additionalScrapeConfigs` 字段下,下面是示例: + +```yaml +prometheus: + prometheusSpec: + additionalScrapeConfigs: + - job_name: "tke-cadvisor" + scheme: https + metrics_path: /metrics/cadvisor + tls_config: + insecure_skip_verify: true + authorization: + credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - source_labels: [__meta_kubernetes_node_label_node_kubernetes_io_instance_type] + regex: eklet + action: drop + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - job_name: "tke-kubelet" + scheme: https + metrics_path: /metrics + tls_config: + insecure_skip_verify: true + authorization: + credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - source_labels: [__meta_kubernetes_node_label_node_kubernetes_io_instance_type] + regex: eklet + action: drop + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - job_name: "tke-probes" + scheme: https + metrics_path: /metrics/probes + tls_config: + insecure_skip_verify: true + authorization: + credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - source_labels: [__meta_kubernetes_node_label_node_kubernetes_io_instance_type] + regex: eklet + action: drop + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - job_name: eks + honor_timestamps: true + metrics_path: '/metrics' + params: + collect[]: ['ipvs'] + # - 'cpu' + # - 'meminfo' + # - 'diskstats' + # - 'filesystem' + # - 'load0vg' + # - 'netdev' + # - 'filefd' + # - 'pressure' + # - 'vmstat' + scheme: http + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_tke_cloud_tencent_com_pod_type] + regex: eklet + action: keep + - source_labels: [__meta_kubernetes_pod_phase] + regex: Running + action: keep + - source_labels: [__meta_kubernetes_pod_ip] + separator: ; + regex: (.*) + target_label: __address__ + replacement: ${1}:9100 + action: replace + - source_labels: [__meta_kubernetes_pod_name] + separator: ; + regex: (.*) + target_label: pod + replacement: ${1} + action: replace + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: namespace + replacement: ${1} + action: replace + metric_relabel_configs: + - source_labels: [__name__] + separator: ; + regex: (container_.*|pod_.*|kubelet_.*) + replacement: $1 + action: keep + storageSpec: + volumeClaimTemplate: + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 100Gi +``` + +## FAQ + +### 为什么使用 collect[] 这种奇怪的参数过滤指标? + +超级节点的 Pod 监控指标使用 `collect[]` 查询参数来过滤不需要的监控指标: + +```bash +curl ${IP}:9100/metrics?collect[]=ipvs&collect[]=vmstat +``` + +为什么要使用这么奇怪的参数名?这是因为 `node_exporter` 就是用的这个参数,超级节点的 Pod 内部引用了 `node_exporter` 的逻辑,[这里](https://github.com/prometheus/node_exporter#filtering-enabled-collectors) 是 `node_exporter` 的 `collect[]` 参数用法说明。 \ No newline at end of file diff --git a/content/tencent/networking/clb-to-pod-directly.md b/content/tencent/networking/clb-to-pod-directly.md new file mode 100644 index 0000000..af4137f --- /dev/null +++ b/content/tencent/networking/clb-to-pod-directly.md @@ -0,0 +1,124 @@ +# 启用 CLB 直通 Pod + +## 概述 + +TKE 提供了 CLB 直通 Pod 的能力,不经过 NodePort,网络链路上少了一跳,带来了一系列好处: + +1. 链路更短,性能会有所提高。 +2. 没有 SNAT,避免了流量集中可能导致的源端口耗尽、conntrack 插入冲突等问题。 +3. 不经过 NodePort,也就不会再经过 k8s 的 iptables/ipvs 转发,从而负载均衡状态就都收敛到了 CLB 这一个地方,可避免负载均衡状态分散导致的全局负载不均问题。 +4. 由于没有 SNAT,天然可以获取真实源 IP,不再需要 `externalTrafficPolicy: Local` 。 +5. 实现会话保持更简单,只需要让 CLB 开启会话保持即可,不需要设置 Service 的 `sessionAffinity`。 + +虽然 CLB 直通 Pod 提供了这么多好处,但默认不会启用,本文介绍如何在 TKE 上启用 CLB 直通 Pod。 + +## 前提条件 + +1. `Kubernetes`集群版本需要高于 1.12,因为 CLB 直绑 Pod,检查 Pod 是否 Ready,除了看 Pod 是否 Running、是否通过 readinessProbe 外, 还需要看 LB 对 Pod 的健康探测是否通过,这依赖于 `ReadinessGate` 特性,该特性在 Kubernetes 1.12 才开始支持。 +2. 集群网络模式必须开启 `VPC-CNI` 弹性网卡模式,因为目前 LB 直通 Pod 的实现是基于弹性网卡的,普通的网络模式暂时不支持,这个在未来将会支持。 + +## CLB 直通 Pod 启用方法 + +启用方法是在创建 Service 或 Ingress 时,声明一下要使用 CLB 直通 Pod。 + +### Service 声明 CLB 直通 Pod + +当你用 LoadBalancer 的 Service 暴露服务时,需要声明使用直连模式: + +* 如果通过控制台创建 Service,可以勾选 `采用负载均衡直连Pod模式`: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925161405.png) + +* 如果通过 yaml 创建 Service,需要为 Service 加上 `service.cloud.tencent.com/direct-access: "true"` 的 annotation: + + ```yaml + apiVersion: v1 + kind: Service + metadata: + annotations: + service.cloud.tencent.com/direct-access: "true" # 关键 + labels: + app: nginx + name: nginx-service-eni + spec: + externalTrafficPolicy: Cluster + ports: + - name: 80-80-no + port: 80 + protocol: TCP + targetPort: 80 + selector: + app: nginx + sessionAffinity: None + type: LoadBalancer + ``` + +### CLB Ingress 声明 CLB 直通 Pod + +当使用 CLB Ingress 暴露服务时,同样也需要声明使用直连模式: + +* 如果通过控制台创建 CLB Ingress,可以勾选 `采用负载均衡直连Pod模式`: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925161417.png) + +* 如果通过 yaml 创建 CLB Ingress,需要为 Ingress 加上 `ingress.cloud.tencent.com/direct-access: "true"` 的 annotation: + + ```yaml + apiVersion: networking.k8s.io/v1beta1 + kind: Ingress + metadata: + annotations: + ingress.cloud.tencent.com/direct-access: "true" + kubernetes.io/ingress.class: qcloud + name: test-ingress + namespace: default + spec: + rules: + - http: + paths: + - backend: + serviceName: nginx + servicePort: 80 + path: / + ``` + +启用方法根据集群网络模式有细微差别,见下文分解。 + +### GlobalRouter + VPC-CNI 网络模式混用注意事项 + +如果 TKE 集群创建时,网络模式选择的 [GlobalRouter](https://cloud.tencent.com/document/product/457/50354) ,后面再开启的 [VPC-CNI](https://cloud.tencent.com/document/product/457/50355) ,这样集群的网络模式就是 GlobalRouter + VPC-CNI 两种网络模式混用。 + +这种集群创建的 Pod 默认没有使用弹性网卡,如果要启用 CLB 直通 Pod,首先在部署工作负载的时候,声明一下 Pod 要使用 VPC-CNI 模式 (弹性网卡),具体操作方法是使用 yaml 创建工作负载 (不通过 TKE 控制台),为 Pod 指定 `tke.cloud.tencent.com/networks: tke-route-eni` 这个 annotation 来声明使用弹性网卡,并且为其中一个容器加上 `tke.cloud.tencent.com/eni-ip: "1"` 这样的 requests 与 limits,示例: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: nginx + name: nginx-deployment-eni +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + annotations: + tke.cloud.tencent.com/networks: tke-route-eni + labels: + app: nginx + spec: + containers: + - image: nginx + name: nginx + resources: + requests: + tke.cloud.tencent.com/eni-ip: "1" + limits: + tke.cloud.tencent.com/eni-ip: "1" +``` + +## 参考资料 + +* [在 TKE 上使用负载均衡直通 Pod](https://cloud.tencent.com/document/product/457/48793) diff --git a/content/tencent/networking/expose-grpc-with-tcm.md b/content/tencent/networking/expose-grpc-with-tcm.md new file mode 100644 index 0000000..fe8d019 --- /dev/null +++ b/content/tencent/networking/expose-grpc-with-tcm.md @@ -0,0 +1,130 @@ +# 使用 TCM 对外暴露 gRPC 服务 + +## 背景 + +gRPC 是长连接服务,而长连接服务负载不均是通病,因为使用四层负载均衡的话,只能在连接调度层面负载均衡,但不能在请求级别负载均衡。不同连接上的请求数量、网络流量、请求耗时、存活时长等可能都不一样,就容易造成不同 Pod 的负载不一样。而 istio 天然支持 gRPC 负载均衡,即在七层进行负载均衡,可以将不同请求转发到不同后端,从而避免负载不均问题,腾讯云容器服务也对 istio 进行了产品化托管,产品叫 [TCM](https://cloud.tencent.com/product/tcm),本文介绍如何使用 TCM 来暴露 gRPC 服务。 + +## 创建网格 + +进入 [TCM控制台](https://console.cloud.tencent.com/tke2/mesh),新建一个网格,每个网格可以管理多个 TKE/EKS 集群,创建网格的时候就可以关联集群(创建完之后关联也可以): + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220722100428.png) + +边缘代理网关通常会启用 Ingress Gateway,即将内部服务通过 CLB 暴露出来: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220722100440.png) + +## 启用 sidecar 自动注入 + +网格创建好后,点进去,在 【服务】-【sidecar自动注入】中勾选要启用自动注入的 namespace: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220722100456.png) + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220722100510.png) + +gRPC 服务端部署在哪个 namespace 就勾选哪个。 + +## 部署 gRPC 服务端 + +将 gRPC 服务部署到网格中的一个集群,确保部署的 namespace 开启了sidecar自动注入: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: server + namespace: test +spec: + replicas: 1 + selector: + matchLabels: + app: server + template: + metadata: + labels: + app: server + spec: + containers: + - name: server + image: docker.io/imroc/grpc_server:latest + imagePullPolicy: Always +``` + +如果服务端在开启自动注入之前已经部署了,可以重建下服务端 Pod,重建后会触发自动注入。 + +## 创建 Service + +给工作负载关联一个 Service,使用 yaml 创建: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: server + namespace: test + labels: + app: server +spec: + type: ClusterIP + ports: + - port: 8000 + protocol: TCP + targetPort: 50051 + name: grpc + selector: + app: server +``` + +注意: + +- 重点是端口的 name 要以 grpc 开头,也可以直接写 grpc,istio 通过 port name 识别协议类型。 +- 不通过控制台创建的原因主要是因为控制台创建 Service 不支持为端口指定 name。 + +## 创建 Gateway + +如果希望 gRPC 对集群外暴露,istio 需要确保有 Gateway 对象,如果没有创建,可以先创建一个,在 TCM 中这样操作,【Gateway】-【新建】: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220722100526.png) + +【网关列表】引用最开始创建的 Ingress Gateway,【协议端口】使用GRPC,指定的端口号为 CLB 要监听的端口号,【Hosts】为服务从外部被访问的IP或域名,通配符 `*` 表示匹配所有: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220722100539.png) + +## 创建 VirtualService + +VirtualService 是 istio 描述服务的基本对象,我们使用 VirtualService 将 gRPC 服务关联到 Gateway 上,就可以将服务暴露出去了,在 TCM 上这样操作,【Virtual Service】-【新建】: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220722100605.png) + +【名称】随意,【命名空间】为服务端所在命名空间,【关联Hosts】这里可以跟 Gateway 那里的设置保持一致,【挂载Gateway】选择前面创建的 Gateway,【类型】选HTTP(istio中http既可以路由http,也可以用于路由grpc),【匹配条件】删除默认,不写条件,【目的端】选择服务端的 service + port: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220722100618.png) + +保存后即可,然后就可以通过 CLB 暴露出来的地址访问 grpc 服务了,并且会自动在请求级别进行负载均衡,CLB 的地址取决于创建出来的 Ingress Gateway 所使用的 CLB,测试一下效果: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220722100628.png) + +Virtual Service 如果通过 yaml 创建,可以参考下面示例: + +```yaml +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: server + namespace: test +spec: + gateways: + - test/grpc + hosts: + - '*' + http: + - route: + - destination: + host: server +``` + +## demo仓库 + +包含服务端代码示例、Dockerfile、部署 yaml 等。 + +仓库地址:[https://github.com/imroc/grpc-demo](https://github.com/imroc/grpc-demo) diff --git a/content/tencent/networking/how-to-use-eip.md b/content/tencent/networking/how-to-use-eip.md new file mode 100644 index 0000000..b09adfd --- /dev/null +++ b/content/tencent/networking/how-to-use-eip.md @@ -0,0 +1,110 @@ +# Pod 绑 EIP + +腾讯云容器服务的 TKE 暂不支持 Pod 绑 EIP,但 EKS 集群(弹性集群) 是支持的,且需要配置 yaml,加上相应的注解,本文给出实例。 + +## yaml 示例 + +EKS 的 EIP 核心注解是 `eks.tke.cloud.tencent.com/eip-attributes`,内容可以填写创建 EIP 接口的相关的参数,详细参数列表参考 [这里](https://cloud.tencent.com/document/api/215/16699#2.-.E8.BE.93.E5.85.A5.E5.8F.82.E6.95.B0) 。 + +下面给出一个简单示例,为每个 Pod 副本都绑定带宽上限 50Mbps,按流量计费的 EIP: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: eip +spec: + replicas: 1 + selector: + matchLabels: + app: eip + template: + metadata: + labels: + app: eip + annotations: + 'eks.tke.cloud.tencent.com/eip-attributes': '{"InternetMaxBandwidthOut":50, "InternetChargeType":"TRAFFIC_POSTPAID_BY_HOUR"}' + spec: + containers: + - name: eip + image: cr.imroc.cc/library/net-tools:latest + command: + - sleep + - infinity +``` + +## 如何在容器内获取自身公网 IP ? + +可以利用 K8S 的 [Downward API](https://kubernetes.io/zh/docs/tasks/inject-data-application/environment-variable-expose-pod-information/) ,将 Pod 上的一些字段注入到环境变量或挂载到文件,Pod 的 EIP 信息最终会写到 Pod 的 `tke.cloud.tencent.com/eip-public-ip` 这个 annotation 上,但不会 Pod 创建时就写上,是在启动过程写上去的,所以如果注入到环境变量最终会为空,挂载到文件就没问题,以下是使用方法: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: eip +spec: + replicas: 1 + selector: + matchLabels: + app: eip + template: + metadata: + labels: + app: eip + spec: + containers: + - name: eip + image: cr.imroc.cc/library/net-tools:latest + command: + - sleep + - infinity + volumeMounts: + - mountPath: /etc/podinfo + name: podinfo + volumes: + - name: podinfo + downwardAPI: + items: + - path: "labels" + fieldRef: + fieldPath: metadata.labels + - path: "annotations" # 关键 + fieldRef: + fieldPath: metadata.annotations +``` + +容器内进程启动时可以读取 `/etc/podinfo/annotations` 中的内容来获取 EIP。 + + +## 如何保留 EIP + +需要使用 StatefulSet 部署,且加上 `eks.tke.cloud.tencent.com/eip-claim-delete-policy: "Never"` 这个 annotation: + +```yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: eip + name: eip +spec: + serviceName: "" + replicas: 1 + selector: + matchLabels: + app: eip + template: + metadata: + annotations: + eks.tke.cloud.tencent.com/eip-attributes: "{}" + eks.tke.cloud.tencent.com/eip-claim-delete-policy: "Never" # 关键 + labels: + app: eip + spec: + containers: + - name: eip + image: cr.imroc.cc/library/net-tools:latest + command: + - sleep + - infinity +``` \ No newline at end of file diff --git a/content/tencent/networking/install-localdns-with-ipvs.md b/content/tencent/networking/install-localdns-with-ipvs.md new file mode 100644 index 0000000..144ccdc --- /dev/null +++ b/content/tencent/networking/install-localdns-with-ipvs.md @@ -0,0 +1,313 @@ +# IPVS 模式安装 localdns + +## 背景 + +TKE 对 NodeLocal DNS Cache 进行了产品化支持,直接在扩展组件里面就可以一键安装到集群,参考 [NodeLocalDNSCache 扩展组件说明](https://cloud.tencent.com/document/product/457/49423) ,可是仅仅支持 iptables 转发模式的集群,而目前大多集群都会使用 IPVS 转发模式,无法安装这个扩展组件。 + +本文将介绍如何在 TKE IPVS 模式集群中自行安装 NodeLocal DNS Cache。 + + +## 准备 yaml + +复制以下 yaml 到文件 `nodelocaldns.yaml`: + +```yaml +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: node-local-dns + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +--- +apiVersion: v1 +kind: Service +metadata: + name: kube-dns-upstream + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/name: "KubeDNSUpstream" +spec: + ports: + - name: dns + port: 53 + protocol: UDP + targetPort: 53 + - name: dns-tcp + port: 53 + protocol: TCP + targetPort: 53 + selector: + k8s-app: kube-dns +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: node-local-dns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile +data: + Corefile: | + cluster.local:53 { + errors + cache { + success 9984 30 + denial 9984 5 + } + reload + loop + bind 169.254.20.10 + forward . __PILLAR__CLUSTER__DNS__ { + force_tcp + } + prometheus :9253 + health 169.254.20.10:8080 + } + in-addr.arpa:53 { + errors + cache 30 + reload + loop + bind 169.254.20.10 + forward . __PILLAR__CLUSTER__DNS__ { + force_tcp + } + prometheus :9253 + } + ip6.arpa:53 { + errors + cache 30 + reload + loop + bind 169.254.20.10 + forward . __PILLAR__CLUSTER__DNS__ { + force_tcp + } + prometheus :9253 + } + .:53 { + errors + cache 30 + reload + loop + bind 169.254.20.10 + forward . __PILLAR__UPSTREAM__SERVERS__ + prometheus :9253 + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: node-local-dns + namespace: kube-system + labels: + k8s-app: node-local-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 10% + selector: + matchLabels: + k8s-app: node-local-dns + template: + metadata: + labels: + k8s-app: node-local-dns + annotations: + prometheus.io/port: "9253" + prometheus.io/scrape: "true" + spec: + priorityClassName: system-node-critical + serviceAccountName: node-local-dns + hostNetwork: true + dnsPolicy: Default # Don't use cluster DNS. + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + - effect: "NoExecute" + operator: "Exists" + - effect: "NoSchedule" + operator: "Exists" + containers: + - name: node-cache + image: cr.imroc.cc/k8s/k8s-dns-node-cache:1.17.0 + resources: + requests: + cpu: 25m + memory: 5Mi + args: [ "-localip", "169.254.20.10", "-conf", "/etc/Corefile", "-upstreamsvc", "kube-dns-upstream" ] + securityContext: + privileged: true + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9253 + name: metrics + protocol: TCP + livenessProbe: + httpGet: + host: 169.254.20.10 + path: /health + port: 8080 + initialDelaySeconds: 60 + timeoutSeconds: 5 + volumeMounts: + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - name: config-volume + mountPath: /etc/coredns + - name: kube-dns-config + mountPath: /etc/kube-dns + volumes: + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + - name: kube-dns-config + configMap: + name: kube-dns + optional: true + - name: config-volume + configMap: + name: node-local-dns + items: + - key: Corefile + path: Corefile.base +--- +# A headless service is a service with a service IP but instead of load-balancing it will return the IPs of our associated Pods. +# We use this to expose metrics to Prometheus. +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9253" + prometheus.io/scrape: "true" + labels: + k8s-app: node-local-dns + name: node-local-dns + namespace: kube-system +spec: + clusterIP: None + ports: + - name: metrics + port: 9253 + targetPort: 9253 + selector: + k8s-app: node-local-dns +``` + +## 替换集群 DNS 地址 + +获取集群 DNS 的地址并替换 yaml 文件中的 `__PILLAR__CLUSTER__DNS__` 变量: + +```bash +kubedns=`kubectl get svc kube-dns -n kube-system -o jsonpath={.spec.clusterIP}` + +sed -i "s/__PILLAR__CLUSTER__DNS__/$kubedns/g" nodelocaldns.yaml +``` + +> `__PILLAR__UPSTREAM__SERVERS__` 这个变量我们不管,localdns pod 会自行填充。 + + +## 一键安装 + +通过以下命令一键安装到集群: + +```bash +kubectl apply -f nodelocaldns.yaml +``` + +## 修改 kubelet 参数 + +IPVS 模式集群由于需要为所有 Service 在 `kube-ipvs0` 这个 dummy 网卡上绑对应的 Cluster IP,以实现 IPVS 转发,所以 localdns 就无法再监听集群 DNS 的 Cluster IP。而 kubelet 的 `--cluster-dns` 默认指向的是集群 DNS 的 Cluster IP 而不是 localdns 监听的地址,安装 localdns 之后集群中的 Pod 默认还是使用的集群 DNS 解析。 + +如何让 Pod 默认使用 localdns 进行 DNS 解析呢?需要改每个节点上 kubelet 的 `--cluster-dns` 启动参数: + +```txt +--cluster-dns=169.254.20.10 +``` + +可以通过以下脚本进行修改并重启 kubelet 来生效: + +```bash +sed -i 's/CLUSTER_DNS.*/CLUSTER_DNS="--cluster-dns=169.254.20.10"/' /etc/kubernetes/kubelet +systemctl restart kubelet +``` + +### 存量节点修改 + +如何修改集群中已有节点的 kubelet 参数呢?目前没有产品化解决方案,可以自行通过第三方工具来修改,通常使用 ansible,安装方式参考 [官方文档: Installing Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) 。 + +安装好 ansible 之后,按照以下步骤操作: + +1. 导出所有节点 IP 到 `hosts.ini`: + +```bash +kubectl get nodes -o jsonpath='{.items[*].status.addresses[?(@.type=="InternalIP")].address}' | tr ' ' '\n' > hosts.ini +``` + +2. 准备脚本 `modify-kubelet.sh`: + +```bash +sed -i 's/CLUSTER_DNS.*/CLUSTER_DNS="--cluster-dns=169.254.20.10"/' /etc/kubernetes/kubelet +systemctl restart kubelet +``` + +3. 准备可以用于节点登录的 ssh 秘钥或密码 (秘钥改名为 key,并执行 `chmod 0600 key`) +4. 使用 ansible 在所有节点上运行脚本 `modify-kubelet.sh`: + * 使用秘钥的示例: + ```bash + ansible all -i hosts.ini --ssh-common-args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" --user root --private-key=key -m script -a "mo dify-kubelet.sh" + ``` + * 使用密码的示例: + ```bash + ansible all -i hosts.ini --ssh-common-args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" -m script --extra-vars "ansible_user=root an sible_password=yourpassword" -a "modify-kubelet.sh" + ``` + > **注:** 如果节点使用的 ubuntu 系统,默认 user 是 ubuntu,可以自行替换下,另外 ansible 参数再加上 `--become --become-user=root` 以便让 ansible 执行脚本时拥有 root 权限,避免操作失败。 + +### 增量节点修改 + +如何让新增的节点都默认修改 kubelet 参数呢?可以在加节点时设置【自定义数据】(即自定义初始化脚本),会在节点组件初始化好后执行: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925161511.png) + +每个节点都贴一下脚本过于麻烦,一般建议使用节点池,在创建节电池时指定节点的【自定义数据】,这样就可以让节点池里扩容出来的节点都执行下这个脚本,而无需每个节点都单独设置: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925161519.png) + +## 关于存量 Pod + +集群中正在运行的存量 Pod 还是会使用旧的集群 DNS,等重建后会自动切换到 localdns,新创建的 Pod 也都会默认使用 localdns。 + +一般没特别需要的情况下,可以不管存量 Pod,等下次更新, Pod 重建后就会自动切换到 localdns;如果想要立即切换,可以将工作负载滚动更新触发 Pod 重建来实现手动切换。 + +## 参考资料 + +* [Using NodeLocal DNSCache in Kubernetes clusters](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) diff --git a/content/tencent/networking/offload-cert-to-clb.md b/content/tencent/networking/offload-cert-to-clb.md new file mode 100644 index 0000000..3a37339 --- /dev/null +++ b/content/tencent/networking/offload-cert-to-clb.md @@ -0,0 +1,10 @@ +# 卸载 SSL 证书到 CLB + +## 概述 + +本文介绍如何将腾讯云容器服务中部署的服务,通过 CLB 暴露并且将 SSL 卸载到 CLB。 + +## 操作步骤 + +### 准备证书 + diff --git a/content/tencent/serverless/large-image-solution.md b/content/tencent/serverless/large-image-solution.md new file mode 100644 index 0000000..a43274c --- /dev/null +++ b/content/tencent/serverless/large-image-solution.md @@ -0,0 +1,125 @@ +# 大镜像解决方案 + +## 背景 + +超级节点(Serverless) 的 Pod,默认分配的系统盘大小是 20GB,当容器镜像非常大的时候(比如镜像中包含大的 AI 模型),拉取镜像会因空间不足而失败: + +```txt + Warning Failed 50s eklet Failed to pull image "registry.imroc.cc/test/large:latest": rpc error: code = Unknown desc = failed to pull and unpack image "registry.imroc.cc/test/large:latest": failed to copy: write /var/lib/containerd/io.containerd.content.v1.content/ingest/002e585a6f26fd1a69a59a72588300b909c745455c03e6d99e894d03664d47ce/data: no space left on device +``` + +针对这种问题,有两种解决方案。 + +## 方案一: 使用镜像缓存 + +在 [镜像缓存页面](https://console.cloud.tencent.com/tke2/image-cache/list) 新建实例(确保地域与集群所在地域相同): + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220725202430.png) + +填入大镜像的镜像地址,以及系统盘大小: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220725202725.png) + +> 如果是私有镜像,也添加下镜像凭证。 + +等待实例创建完成: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220725205919.png) + +最后创建工作负载时,使用 `eks.tke.cloud.tencent.com/use-image-cache: auto` 为 Pod 开启镜像缓存,自动匹配同名镜像的镜像缓存实例,根据快照创建新的磁盘作为 Pod 系统盘,yaml 示例: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: large +spec: + replicas: 1 + selector: + matchLabels: + app: large + template: + metadata: + labels: + app: large + annotations: + eks.tke.cloud.tencent.com/use-image-cache: auto + spec: + nodeSelector: + node.kubernetes.io/instance-type: eklet + containers: + - name: large + image: registry.imroc.cc/test/large:latest + command: + - "sleep" + - "infinity" + resources: + requests: + cpu: '1' + memory: '2Gi' + limits: + cpu: '1' + memory: '2Gi' +``` + +如果是通过控制台 UI 创建工作负载,可以直接勾选下镜像缓存: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220725211252.png) + +> 通常使用自动匹配即可,更多详情说明参考官方文档 [镜像缓存](https://cloud.tencent.com/document/product/457/65908)。 + +工作负载创建好后,从 Pod 事件可以看到类似 ` Image cache imc-al38vsrl used. Disk disk-e8crnrhp attached` 的信息: + +```txt +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 79s default-scheduler Successfully assigned test/large-77fb4b647f-rpbm9 to eklet-subnet-ahugkjhr-517773 + Normal Starting 78s eklet Starting pod sandbox eks-5epp4l7h + Normal Starting 42s eklet Sync endpoints + Normal ImageCacheUsed 42s eklet Image cache imc-al38vsrl used. Disk disk-e8crnrhp attached + Normal Pulling 41s eklet Pulling image "registry.imroc.cc/test/large:latest" + Normal Pulled 40s eklet Successfully pulled image "registry.imroc.cc/test/large:latest" in 1.126771639s + Normal Created 40s eklet Created container large + Normal Started 40s eklet Started container large +``` + +进容器内部也可以看到根路径容量不止 20GB 了: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220725211450.png) + +如果有很多工作负载都使用大镜像,不想每个都配,也可以将注解配置到全局,参考 [EKS 全局配置说明](https://cloud.tencent.com/document/product/457/71915)。 + +## 方案二: 修改系统盘大小 + +Pod 系统盘默认大小为 20GB,如有需要,可以改大,超过 20GB 的部分将会进行计费。 + +修改的方式是在 Pod 上加 `eks.tke.cloud.tencent.com/root-cbs-size: “50”` 这样的注解,示例: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx +spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + annotations: + eks.tke.cloud.tencent.com/root-cbs-size: "50" + spec: + containers: + - name: nginx + image: nginx +``` + +## 总结 + +针对大镜像的场景,可以使用本文介绍的两种解决方案:镜像缓存和自定义系统盘大小。 + +使用镜像缓存的优势在于,可以加速大镜像 Pod 的启动;自定义系统盘大小的优势在于,不需要创建镜像缓存实例,比较简单方便。可以根据自身需求选取合适的方案。 \ No newline at end of file diff --git a/content/tencent/serverless/precautions.md b/content/tencent/serverless/precautions.md new file mode 100644 index 0000000..e350341 --- /dev/null +++ b/content/tencent/serverless/precautions.md @@ -0,0 +1,60 @@ +# Serverless 弹性集群注意事项 + +## 访问公网 + +与 TKE 集群不同的是,EKS 没有节点,无法像 TKE 那样,Pod 可以利用节点自身的公网带宽访问公网。 + +EKS 没有节点,要让 Pod 访问公网有两种方式: + +1. [通过 NAT 网关访问外网](https://cloud.tencent.com/document/product/457/48710) +2. [通过弹性公网 IP 访问外网](https://cloud.tencent.com/document/product/457/60354) + +大多情况下可以考虑方式一,创建 NAT 网关,在 VPC 路由表里配置路由,如果希望整个 VPC 都默认走这个 NAT 网关出公网,可以修改 default 路由表: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220722111352.png) + +如果只想让超级节点的 Pod 走这个 NAT 网关,可以新建路由表。 + +配置方法是在路由表新建一条路由策略,`0.0.0.0/0` 网段的下一条类型为 `NAT 网关`,且选择前面创建的 NAT 网关实例: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220722111650.png) + +创建好后,如果不是 default 路由表,需要关联一下超级节点的子网: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220722111842.png) + +## 9100 端口 + +EKS 默认会在每个 Pod 的 9100 端口进行监听,暴露 Pod 相关监控指标,如果业务本身也监听 9100,会失败,参考 [9100 端口问题](https://imroc.cc/kubernetes/tencent/appendix/eks-annotations.html#9100-%E7%AB%AF%E5%8F%A3%E9%97%AE%E9%A2%98)。 + +## 注意配额限制 + +使用 EKS 集群时注意一下配额限制,如果不够,可以提工单调高上限: +1. 单集群 Pod 数量上限 (默认200)。 +2. 安全组绑定实例数量上限 (如果不给 Pod 指定安全组,会使用当前项目当前地域的默认安全组,每个安全组绑定实例数量上限为 2000)。 + +## ipvs 超时时间问题 + +### istio 场景 dns 超时 + +istio 的 sidecar (istio-proxy) 拦截流量借助了 conntrack 来实现连接跟踪,当部分没有拦截的流量 (比如 UDP) 通过 service 访问时,会经过 ipvs 转发,而 ipvs 和 conntrack 对连接都有一个超时时间设置,如果在 ipvs 和 conntrack 中的超时时间不一致,就可能出现 conntrack 中连接还在,但在 ipvs 中已被清理而导致出去的包被 ipvs 调度到新的 rs,而 rs 回包的时候匹配不到 conntrack,不会做反向 SNAT,从而导致进程收不到回包。 + +在 EKS 中,ipvs 超时时间当前默认是 5s,而 conntrack 超时时间默认是 120s,如果在 EKS 中使用 TCM 或自行安装 istio,当 coredns 扩容后一段时间,业务解析域名时就可能出现 DNS 超时。 + +在产品化解决之前,我们可以给 Pod 加如下注解,将 ipvs 超时时间也设成 120s,与 conntrack 超时时间对齐: + +```yaml +eks.tke.cloud.tencent.com/ipvs-udp-timeout: "120s" +``` + +### gRPC 场景 Connection reset by peer + +gRPC 是长连接,Java 版的 gRPC 默认 idle timeout 是 30 分钟,并且没配置 TCP 连接的 keepalive 心跳,而 ipvs 默认的 tcp timeout 是 15 分钟。 + +这就会导致一个问题: 业务闲置 15 分钟后,ipvs 断开连接,但是上层应用还认为连接在,还会复用连接发包,而 ipvs 中对应连接已不存在,会直接响应 RST 来将连接断掉,从业务日志来看就是 `Connection reset by peer`。 + +这种情况,如果不想改代码来启用 keepalive,可以直接调整下 eks 的 ipvs 的 tcp timeout 时间,与业务 idle timeout 时长保持一致: + +```yaml +eks.tke.cloud.tencent.com/ipvs-tcp-timeout: "1800s" +``` \ No newline at end of file diff --git a/content/tencent/serverless/supernode-case-offline.md b/content/tencent/serverless/supernode-case-offline.md new file mode 100644 index 0000000..ff7fc59 --- /dev/null +++ b/content/tencent/serverless/supernode-case-offline.md @@ -0,0 +1,57 @@ +# 超级节点案例分享: 便捷管理离线任务与大规模压测 + +## 概述 + +腾讯云容器服务的超级节点有着隔离性强,扩容快,成本低等特点,天然适合离线任务与大规模压测。 + +本文分享这种场景的几个真实实践案例。 + +## 案例一: CI 系统(某出行客户) + +gitlab-runner 启动 Pod 运行 CI 任务,任务结束即销毁 Pod,使用常驻节点会造成资源利用率低。任务量大时扩容节点时间长,造成部分 CI 任务过慢。 + +方案改进: 使用 Serverless 集群(超级节点),无需常驻节点资源,Pod 按量计费,且支持竞价实例,任务结束即停止计费,降低成本。任务量大时也可以快速扩容,提高 CI 效率。 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/超级节点CI案例.png) + +## 案例二: 游戏 AI 训练(某游戏客户) + +使用 GPU Pod 训练游戏 NPC AI 模型,训练完成后,再启动大量 CPU Pod 对模型进行验证。 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/超级节点游戏AI训练案例.png) + +使用 TKE 普通节点,持续跑大量任务,Pod 数量规模巨大且扩缩容频繁,导致普通节点经常需要扩容。普通节点扩容慢,导致部分任务过慢。扩容过程可能出错,比如售罄,初始化失败等。 + +方案改进: 切换到 Serverless 集群(超级节点),扩缩容速度得到极大提升(超10倍),不再有任务过慢的情况。由于使用超级节点,购买的资源规格取决于 Pod 规格,没有大规格,不容易出现售罄;没有初始化节点过程,也不会发生初始化失败的问题。超级节点支持 Pod 的竞价实例,且任务跑完即释放,极大降低成本。 + +## 案例三: 大规模 CronJob 优化 (某教育客户) + +因业务需要,需要启动大规模的 CronJob 跑离线任务,使用 TKE 普通节点,在线业务与离线 CronJob 混部,频繁启停场景下,cgroup 弱隔离带来普通节点稳定性问题。为避免售罄、节点扩容慢问题,购买了大量包年包月常驻节点,低峰期资源利用率低很低。 + +方案改进: 添加超级节点,将 CronJob 调度到超级节点,普通节点稳定性大幅提升。无需预留资源,pod 按量计费,定时任务资源成本降低 70% 左右。Job 实现秒级启动(EKS 镜像缓存,pod 启动加速)。 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/超级节点cronjob案例.png) + +## 案例四: 边缘集群直播案例 (某视频客户) + +问题与困境: 在中心地域部署业务,边缘主播推流延迟大影响体验。每个地域都单独部署一套 K8S 集群,运维压力大。 + +Serverless 集群方案: 统一 K8S 接口运维多地域集群,无节点,免运维。弹性转码服务,成本低,扩容灵活。 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220719190209.png) + +## 案例五: 日志处理 (某社交平台客户) + +使用 logstash 进行日志清洗,集群规模大,业务高峰期产生日志量特别大,普通节点扩容慢,导致有丢日志的情况发生。高峰期过后,普通节点资源利用率较低。 + +方案改进: 高峰期极速扩容,不存在丢日志问题。高峰期过后,平均负载降低,自动缩容,缩掉的 Pod 停止计费,提高资源利用率,降低成本。 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/超级节点日志清洗案例.png) + +## 案例六: 大规模压测 (某社交平台客户) + +TKE 普通节点隔离性弱,压测时需要控制调度策略,避免与在线业务混部,造成干扰。压测时带宽消耗非常大,单节点调度过多压测 Pod 容易达到节点带宽瓶颈而丢包。 + +方案改进: 使用 Serverless 集群(超级节点), Pod 之间强隔离,压测 Pod 不会对在线业务造成干扰,无需关心调度策略,解放运维。每个 Pod 独占虚拟机,基本不会因达到带宽瓶颈而丢包。 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/超级节点大规模压测案例.png) \ No newline at end of file diff --git a/content/tencent/serverless/supernode-case-online.md b/content/tencent/serverless/supernode-case-online.md new file mode 100644 index 0000000..7dfcd4c --- /dev/null +++ b/content/tencent/serverless/supernode-case-online.md @@ -0,0 +1,41 @@ +# 超级节点案例分享: 轻松应对流量洪峰 + +## 概述 + +腾讯云容器服务的超级节点可以轻松应对流量洪峰。 + +本文分享这种场景的几个真实实践案例。 + +## 案例一: 信息流系统(某新闻媒体客户) + +在线业务,购买了包年包月普通节点,在业务高峰期算力不足,扩容节点慢导致部分请求失败。业务高峰时间有时无法预测(可能某个新闻突然就爆火了),扩容慢问题的影响进一步被放大。 + +方案改进: 普通节点作为常驻资源池进行兜底,优先调度 Pod 到普通节点,当普通节点资源不足再调度到超级节点。高峰期 HPA 自动扩容 Pod,过程中不会触发扩容节点,不存在扩容慢问题。超级节点上 Pod 缩容后停止计费,降低成本。 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/tke弹eks.png) + +## 案例二: 元宇宙案例(某元宇宙客户) + +元宇宙业务,类似在线游戏,全球同服,带宽需求量极大,需要每个 Pod 绑 EIP,且使用的游戏框架依赖读 eth0 公网 IP。做活动时,流量相比平时大很多,大概100倍,需要能够快速扩容。 + +Serverless 集群方案: 超级节点的 Pod 支持绑 EIP 和开启 EIP 直通(将公网 IP 地址绑到 eth0 网卡)。使用超级节点+HPC 定时扩容(活动时间可预知),轻松应对活动高峰,活动结束 Pod 销毁释放资源,降低成本。 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/超级节点元宇宙案例.png) + +## 案例三: 医疗场景案例(某医疗客户) + +医生提交任务到系统,利用 GPU 推理来自动生成报告,辅助医生判断病情。 + +上午医生上班时间是高峰期,其余时间的量则非常低,常驻的普通节点在低峰期闲置造成浪费。普通节点在高峰期扩容速度太慢,导致一些任务需要等待很久,影响医生工作效率。 + +方案改进: 使用 Serverless 集群,GPU Pod 直接按需创建,无需常驻节点,也无需扩容节点,提高资源利用率,降低成本。POD 启动速度快,高峰期任务也能得到及时运行,提高医生工作效率。 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/超级节点医疗案例.png) + +## 案例四: 录制与转码(某教育客户) + +在线授课生成视频回看的业务场景,服务端录制有一定时效性,在业务高峰期需快速扩容。转码会消耗大量计算资源,低峰期需求量则非常少,使用普通节点时,一般要保留一些常驻节点兜底,避免售罄时不可用,低峰期造成资源闲置和浪费。 + +方案改进: 由于上课时间比较集中,可使用 [HPC 插件](https://cloud.tencent.com/document/product/457/56753) 定时提前扩容录制 Pod,轻松应对高峰期,结合 HPA 快速扩容还可应对预期之外的流量洪峰。Pod 按需创建,销毁立即停止计费,无需预留资源,节约成本。 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/录制与转码.png) \ No newline at end of file diff --git a/content/tencent/serverless/why-tke-supernode-rocks.md b/content/tencent/serverless/why-tke-supernode-rocks.md new file mode 100644 index 0000000..f1426d8 --- /dev/null +++ b/content/tencent/serverless/why-tke-supernode-rocks.md @@ -0,0 +1,88 @@ +# 为什么超级节点这么牛! + +## 概述 + +腾讯云容器服务中集群节点有普通节点和超级节点之分,具体怎么选呢?本文告诉你答案。 + +## 集群与节点类型 + +腾讯云容器服务产品化的 Kubernetes 集群最主要是以下两种: + +- 标准集群 +- Serverless 集群 + +不管哪种集群,都需要添加节点才能运行服务(Pod)。对于标准集群,同时支持添加普通节点与超级节点: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/tke标准集群.png) + +而对于 Serverless 集群,只支持添加超级节点: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/serverless集群.png) + +## 普通节点与超级节点的区别 + +普通节点都很好理解,就是将虚拟机(CVM)添加到集群中作为 K8S 的一个节点,每台虚拟机(节点)上可以调度多个 Pod 运行。 + +那超级节点又是什么呢?可以理解是一种虚拟的节点,每个超级节点代表一个 VPC 的子网,调度到超级节点的 Pod 分配出的 IP 也会在这个子网中,每个 Pod 都独占一台轻量虚拟机,Pod 之间都是强隔离的,跟在哪个超级节点上无关。 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/普通节点与超级节点.png) + +> 更多详细解释请参考 [官方文档: 超级节点概述](https://cloud.tencent.com/document/product/457/74014)。 + +所以,调度到超级节点的 Pod,你可以认为它没有节点,自身就是一个独立的虚拟机,超级节点仅仅是一个虚拟的节点概念,并不是指某台机器,一个超级节点能调度的 Pod 数量主要取决于这个超级节点关联的子网的 IP 数量。 + +虽然超级节点里的 Pod 独占一台虚拟机,但是很它很轻量,可以快速启动,也不要运维节点了,这种特性也带来了一些相对普通节点非常明显的优势,下面对这些优势详细讲解下。 + +## 超级节点的优势 + +### 隔离性更强 + +Pod 之间是虚拟机级别的强隔离,不存在 Pod 之间干扰问题(如某个 Pod 磁盘 IO 过高影响其它 Pod),也不会因底层故障导致大范围受影响。 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/超级节点隔离性.png) + +### 免运维 + +无需运维节点: +* Pod 重建即可自动升级基础组件或内核到最新版。 +* 如果 Pod 因高负载或其它原因导致长时间无心跳上报,底层虚拟机也可以自动重建,迁移到新机器并开机运行实现自愈。 +* 检测到硬件故障自动热迁移实现自愈。 +* 检测到 GPU 坏卡可自动迁移到正常机器。 + +### 弹性更高效 + +对于普通节点,扩容比较慢,因为需要各种安装与初始化流程,且固定机型+大规格的节点,有时可能有售罄的风险。 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/普通节点池扩容.png) + +而超级节点只需扩容 POD,超级节点本身没有安装与初始化流程,可快速扩容应对业务高峰。且 POD 规格相对较小,机型可根据资源情况自动调整,售罄概率很低。 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/超级节点扩容pod.png) + +### 成本更省 + +为避免扩容慢,或者因某机型+规格的机器资源不足导致扩容失败,普通节点往往会预留一些 buffer,在低峰期资源利用率很低,造成资源的闲置和浪费。 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/普通节点预留buffer.png) + +而超级节点可按需使用,POD 销毁立即停止计费,由于 POD 规格一般不大,且机型可根据资源大盘情况自动灵活调整,不容易出现售罄的情况,无需预留 buffer,极大提升资源利用率,降低成本。 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/超级节点无需预留buffer.png) + +## 如何选择? + +### 一般建议 + +超级节点在很多场景中优势都比较明显,大多情况下使用超级节点都可以满足需求。 + +如果是超级节点没有明显无法满足自身需求的话,可以考虑优先使用 Serverless 集群,只用超级节点。 + +如果存在超级节点无法满足需求的情况,可以使用标准集群,添加普通节点,同时也可以添加超级节点来混用,将超级节点无法满足需求的服务只调度到普通节点。 + +那哪些情况超级节点无法满足需求呢?参考下面 **适合普通节点的场景**。 + +### 适合普通节点的场景 + +- 需要定制操作系统,[自定义系统镜像](https://cloud.tencent.com/document/product/457/39563)。 +- 需要很多小规格的 Pod 来节约成本,比如 0.01 核,或者甚至没有 request 与 limit (通常用于测试环境,需要创建大量 Pod,但资源占用很低)。 +- 需要对集群配置进行高度自定义,比如修改运行时的一些配置(如 registry mirror)。 \ No newline at end of file diff --git a/content/tencent/solution/multi-account.md b/content/tencent/solution/multi-account.md new file mode 100644 index 0000000..38d0cef --- /dev/null +++ b/content/tencent/solution/multi-account.md @@ -0,0 +1,546 @@ +# 腾讯云跨账号流量统一接入与治理方案 + +## 需求场景 + +服务部署在不同腾讯云账号下,想统一在一个腾讯云账号下接入流量,部分流量可能会转发到其它腾讯云账号下的服务。 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812105933.png) + +## 需求分析 + +多集群跨 VPC 流量管理,可以通过 [腾讯云服务网格](https://cloud.tencent.com/product/tcm)(TCM) + [云联网](https://cloud.tencent.com/product/ccn)(CCN) 来实现,自动对多个容器集群进行服务发现(Pod IP),利用 isito ingressgateway 统一接入流量,然后直接转发到后端服务的 Pod IP: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812114344.png) + +但这里需求关键点是跨账号,虽然跨账号网络也可以用云联网打通,但是 TCM 是无法直接管理其它账号下的集群的,原因很明显,关联集群时只能选择本账号下的集群,没有权限关联其它账号下的集群: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812112012.png) + +幸运的是,我们可以利用 [云原生分布式云中心](https://cloud.tencent.com/product/tdcc)(TDCC) 来管理其它账号的集群 (TDCC 目前还在内测中,需提交 [内核申请](https://cloud.tencent.com/apply/p/897g10ltlv6) 进行开通),将其它账号的集群注册到 TDCC 中,然后在 TCM 里添加 TDCC 中注册的集群,TCM 通过关联 TDCC 注册集群来间接对其它账号的集群进行服务发现,以实现多账号下的集群流量统一纳管: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812114733.png) + +## 注意事项: 其它账号尽量使用独立集群 + +istio 注入 sidecar 时需要集群 apiserver 调用 TCM 控制面 webhook: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812123716.png) + +如果使用托管集群(TKE托管集群或EKS集群),apiserver 是用户不可见的,使用 169 开头的 IP,这个 IP 只在 VPC 内可用。 + +所以如果将账号B的托管集群注册到账号A的 TDCC 中,账号B的托管集群 apiserver 也无法调用到账号A的TCM控制面,就会导致无法注入 sidecar,而独立集群没这个问题,因为 apiserver 是部署在用户 CVM 上,使用 CVM 的 IP,打通云联网后网络就可以互通,所以推荐其它账号下的集群使用 TKE 独立集群。 + +当然如果能保证完全没有 sidecar 自动注入的需求,不需要账号 B 的服务通过网格的服务发现主动调用账号 A 的服务,这种情况使用托管集群也可以。 + +## 操作步骤 + +### 准备集群 + +在账号A下(用于接入流量的账号),准备好一个或多个 TKE/EKS 集群,在其它账号准备好 TKE 独立集群。 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812141030.png) + +注意,一定保证所有集群使用的网段互不冲突。 + +### 使用云联网打通网络 + +登录账号A,进入[云联网控制台](https://console.cloud.tencent.com/vpc/ccn)里,新建一个云联网,然后点击【新增实例】,将需要账号A下需要打通网络的VPC全部关联进来: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812141458.png) + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812141636.png) + +登录其它账号,进入[VPC控制台](https://console.cloud.tencent.com/vpc/vpc),点击进入需要与账号A打通网络的VPC,点【立即关联】: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812141906.png) + +选择【其它账号】,输入账号A的ID以及前面创建的云联网的ID以申请加入账号A创建的云联网: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812142033.png) + +然后再登录账号A,点进前面创建的云联网,同意其它账号VPC加入云联网的申请: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812142351.png) + +不出意外,不同账号不同 VPC 成功通过云联网打通网络: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812142710.png) + +如果你使用了 TKE 集群的 Global Router 网络模式,在集群基本信息页面,将容器网络注册到云联网的开关打开,以便让 Global Router 网络模式的容器 IP 通过云联网下发给所有其它 VPC: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812143110.png) + +### 开通 TDCC + +登录账号A,进入 [TDCC 控制台](https://console.cloud.tencent.com/tdcc),首次进入需要按流程进行开通操作。 + +首先会提示为 TDCC 进行授权: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812143957.png) + +点击【同意授权】: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812143719.png) + +选择要开通的 TDCC 所在地域以及 VPC 与子网: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812144338.png) + +需要注意的是: +* TDCC 是多集群的控制面,可以同时管理多个地域的集群,尽量将 TDCC 所在地域选在服务部署的地域,如果服务分散在多个地域,或者 TDCC 还不支持服务所在地域,可以尽量选择离服务近一点的地域,尽量降低 TDCC 控制面到集群之间的时延。 +* TDCC 与集群如果跨地域,仅仅增加一点控制面之间的时延,不影响数据面。数据面之间的转发时延只取决于集群之间的距离,与 TDCC 无关,比如,集群都在成都地域,但 TDCC 不支持成都,可以将 TDCC 选择广州。 +* 可以将 TDCC 所在 VPC 也加入到云联网,这样其它账号注册集群到 TDCC 时就可以使用内网方式,网络稳定性更有保障。 + +等待 TDCC 的 Hub 集群创建完成: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812150235.png) + +完成后,在 [TDCC 集群列表页面](https://console.cloud.tencent.com/tdcc/cluster),点击【注册已有集群】: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812150408.png) + +虽然其它账号使用的 TKE 独立集群,但这里一定要选择 【非TKE集群】: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812150500.png) + +> 因为如果选 【TKE集群】,只能选到本账号的,其它账号的选不了。 + +选择其它账号集群实际所在地域,然后点【完成】,回到集群列表页面,点击【查看注册命令】: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812151006.png) + +可以看到自动生成的 yaml,将其下载下来,保存成 `agent.yaml`: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812151205.png) + +然后 kubectl 的 context 切换到其它账号中要注册到 TDCC 的集群,使用 kubectl 将 yaml apply 进去: + +```bash +kubectl apply -f agent.yaml +``` + +不出意外,TDCC 集群列表页面可以看到注册集群状态变为了`运行中`,即将其它账号下的集群成功注册到 TDCC: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812151528.png) + +### 创建服务网格 + +登录账号A,进入 [TCM 控制台](https://console.cloud.tencent.com/tke2/mesh),点【新建】来创建一个服务网格: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812151827.png) + +推荐选择最高版本 istio,托管网格: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812152008.png) + +> 服务发现就是关联集群,可以在创建网格时就关联,也可以等创建完再关联。 + +如果将 TDCC 中的注册集群关联进 TCM?在关联集群时,选择 TDCC 所在地域和注册集群类型,然后就可以下拉选择其它账号下注册进来的集群了: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812152410.png) + +不出意外,账号A和其它账号的集群都关联到同一个服务网格了: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220811204947.png) + +### 创建 Ingress Gateway + +进入账号A创建的网格,在基本信息页面里创建 Ingress Gateway: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812160625.png) + +配置一下 Ingress Gateway,`接入集群` 选要统一接入流量的集群: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812161504.png) + +创建好后,点进去: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812161751.png) + +可以看到创建出来的 CLB IP 地址以及对应的 CLB ID: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812161959.png) + +> 如有需要,创建 Ingress Gateway 时也可以选择已有 CLB。 + +Ingress Gateway 组件创建好了,再创建一个 Gateway 对象与之关联: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812163550.png) + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812163740.png) + +也可以直接用 yaml 创建: + +```yaml +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: cluster + namespace: istio-system +spec: + selector: + app: istio-ingressgateway + istio: ingressgateway + servers: + - port: + number: 80 + name: HTTP-80 + protocol: HTTP + hosts: + - "*.imroc.cc" +``` + +### 配置 DNS 解析 + +将三个不同的域名都解析到前面创建的 Ingress Gateway 的 CLB IP: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812162136.png) + +验证一下是否都正确解析到了同一个 IP: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812162402.png) + +### 部署测试服务 + +分别在几个集群部署服务,这里给出一个示例,将 3 个不同服务分别部署在不同集群中,其中一个集群在其它账号下: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812153613.png) + +* 3 个服务使用不同域名,但 DNS 都指向同一个 ingressgateway,统一接入流量。 +* 根据不同域名转发给不同的服务。 + +服务部署使用 [prism](https://stoplight.io/open-source/prism),模拟不同服务的返回不同,访问根路径分别返回字符串`cluster1`、`cluster2`与`cluster3`。 + +第一个服务的 yaml (`cluster1.yaml`): + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: cluster1-conf + namespace: test +data: + mock.yaml: | + openapi: 3.0.3 + info: + title: MockServer + description: MockServer + version: 1.0.0 + paths: + '/': + get: + responses: + '200': + content: + 'text/plain': + schema: + type: string + example: cluster1 +--- +apiVersion: v1 +kind: Service +metadata: + name: cluster1 + namespace: test + labels: + app: cluster1 +spec: + type: ClusterIP + ports: + - port: 80 + name: http + protocol: TCP + targetPort: 80 + selector: + app: cluster1 + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cluster1 + namespace: test +spec: + replicas: 1 + selector: + matchLabels: + app: cluster1 + version: v1 + template: + metadata: + labels: + app: cluster1 + version: v1 + spec: + containers: + - name: cluster1 + image: stoplight/prism:4 + args: + - mock + - -h + - 0.0.0.0 + - -p + - "80" + - /etc/prism/mock.yaml + volumeMounts: + - mountPath: /etc/prism + name: config + volumes: + - name: config + configMap: + name: cluster1-conf +``` + +将其 apply 到账号 A 的集群1: + +```bash +kubectl create ns test +kubectl apply -f cluster1.yaml +``` + +等待部署成功: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812155724.png) + +第二个服务的 yaml (`cluster2.yaml`): + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: cluster2-conf + namespace: test +data: + mock.yaml: | + openapi: 3.0.3 + info: + title: MockServer + description: MockServer + version: 1.0.0 + paths: + '/': + get: + responses: + '200': + content: + 'text/plain': + schema: + type: string + example: cluster2 +--- +apiVersion: v1 +kind: Service +metadata: + name: cluster2 + namespace: test + labels: + app: cluster2 +spec: + type: ClusterIP + ports: + - port: 80 + name: http + protocol: TCP + targetPort: 80 + selector: + app: cluster2 + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cluster2 + namespace: test +spec: + replicas: 1 + selector: + matchLabels: + app: cluster2 + version: v1 + template: + metadata: + labels: + app: cluster2 + version: v1 + spec: + containers: + - name: cluster2 + image: stoplight/prism:4 + args: + - mock + - -h + - 0.0.0.0 + - -p + - "80" + - /etc/prism/mock.yaml + volumeMounts: + - mountPath: /etc/prism + name: config + volumes: + - name: config + configMap: + name: cluster2-conf +``` + +将其 apply 到账号 A 的集群2: + +```bash +kubectl create ns test +kubectl apply -f cluster2.yaml +``` + +类似的,第三个服务的 yaml (`cluster3.yaml`): + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: cluster3-conf + namespace: test +data: + mock.yaml: | + openapi: 3.0.3 + info: + title: MockServer + description: MockServer + version: 1.0.0 + paths: + '/': + get: + responses: + '200': + content: + 'text/plain': + schema: + type: string + example: cluster3 +--- +apiVersion: v1 +kind: Service +metadata: + name: cluster3 + namespace: test + labels: + app: cluster3 +spec: + type: ClusterIP + ports: + - port: 80 + name: http + protocol: TCP + targetPort: 80 + selector: + app: cluster3 + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cluster3 + namespace: test +spec: + replicas: 1 + selector: + matchLabels: + app: cluster3 + version: v1 + template: + metadata: + labels: + app: cluster3 + version: v1 + spec: + containers: + - name: cluster3 + image: stoplight/prism:4 + args: + - mock + - -h + - 0.0.0.0 + - -p + - "80" + - /etc/prism/mock.yaml + volumeMounts: + - mountPath: /etc/prism + name: config + volumes: + - name: config + configMap: + name: cluster3-conf +``` + +将其 apply 到另一个账号的集群: + +```bash +kubectl create ns test +kubectl apply -f cluster3.yaml +``` + +### 配置 VirtualService 规则 + +可以在 TCM 控制台可视化操作,也可以用 apply yaml,这里示例使用 yaml。 + +首先,为三个不同服务创建对应的 VirtualService 并与 Gateway 关联: + +```yaml +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + name: cluster1-imroc-cc + namespace: test +spec: + gateways: + - istio-system/cluster + hosts: + - 'cluster1.imroc.cc' + http: + - route: + - destination: + host: cluster1.test.svc.cluster.local + port: + number: 80 +--- +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + name: cluster2-imroc-cc + namespace: test +spec: + gateways: + - istio-system/cluster + hosts: + - 'cluster2.imroc.cc' + http: + - route: + - destination: + host: cluster2.test.svc.cluster.local + port: + number: 80 +--- +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + name: cluster3-imroc-cc + namespace: test +spec: + gateways: + - istio-system/cluster + hosts: + - cluster3.imroc.cc + http: + - route: + - destination: + host: cluster3.test.svc.cluster.local + port: + number: 80 +``` + +### 测试效果 + +使用 curl 请求不同服务的域名,可以看到将请求均正确转发到了对应的集群,并响应了对应不同的结果: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/20220812164255.png) + +## 总结 + +本文给出了在腾讯云上利用 TCM+CCN+TDCC 实现跨账号多集群流量统一接入和治理的方案,示例中的功能相对简单,如有需要,还可以自行配置 istio 规则实现更细粒度的流量治理,比如根据不同 url 路径转发到不同集群的服务,甚至相同 url 同时转发到不同集群,配置流量比例等。 \ No newline at end of file diff --git a/content/tencent/solution/upgrade-inplace.md b/content/tencent/solution/upgrade-inplace.md new file mode 100644 index 0000000..83d7e20 --- /dev/null +++ b/content/tencent/solution/upgrade-inplace.md @@ -0,0 +1,109 @@ +# 原地升级 + +## 需求与背景 + +Kubernetes 默认不支持原地升级,使用腾讯云容器服务也一样,也没有集成相关插件来支持,可以安装开源的 openkruise 来实现,本文介绍如何在腾讯云容器服务上利用 openkruise 让工作负载进行原地升级。 + +## 原地升级的好处 + +原地升级的主要好处是,更新更快,并且可以避免更新后底层资源不足导致一直 Pending: + +* 不需要重建 Pod,对于 EKS 来说,都不需要重建虚拟机。 +* 原地升级实际就是替换容器镜像,重启下容器,对于 EKS 来说,可以避免 Pod 重建后底层没资源调度的情况。 +* 不需要重新拉取整个镜像,只需要拉取有变化的 layer 即可。 + +## 操作步骤 + +### 安装 openkruise + + +```bash +helm repo add openkruise https://openkruise.github.io/charts/ +helm repo update +helm install kruise openkruise/kruise +``` + +> 参考 [官方安装文档](https://openkruise.io/zh/docs/installation) + +### 创建支持原地升级的工作负载 + +OpenKruise 中有以下几种工作负载支持原地升级: + +* CloneSet +* Advanced StatefulSet +* Advanced DaemonSet +* SidecarSet + +> 更多原地升级详细文档参考 [官方文档](https://openkruise.io/zh/docs/core-concepts/inplace-update/) + +以下用 `Advanced StatefulSet` 进行演示,准备 `sts.yaml` + +```yaml +apiVersion: apps.kruise.io/v1beta1 +kind: StatefulSet +metadata: + name: sample +spec: + replicas: 3 + serviceName: fake-service + selector: + matchLabels: + app: sample + template: + metadata: + labels: + app: sample + spec: + readinessGates: + # A new condition that ensures the pod remains at NotReady state while the in-place update is happening + - conditionType: InPlaceUpdateReady + containers: + - name: main + image: nginx:alpine + podManagementPolicy: Parallel # allow parallel updates, works together with maxUnavailable + updateStrategy: + type: RollingUpdate + rollingUpdate: + # Do in-place update if possible, currently only image update is supported for in-place update + podUpdatePolicy: InPlaceIfPossible + # Allow parallel updates with max number of unavailable instances equals to 2 + maxUnavailable: 2 +``` + +部署到集群: + +```bash +$ kubectl apply -f sts.yaml +statefulset.apps.kruise.io/sample created +``` + +检查 pod 是否正常拉起: + +```bash +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +sample-0 1/1 Running 0 16s +sample-1 1/1 Running 0 16s +sample-2 1/1 Running 0 16s +``` + +### 更新镜像 + +修改 yaml 中的 image 为 `nginx:latest`,然后再 apply: + +```bash +$ kubectl apply -f sts.yaml +statefulset.apps.kruise.io/sample configured +``` + +观察 pod: + +```bash +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +sample-0 1/1 Running 1 2m47s +sample-1 1/1 Running 1 2m47s +sample-2 1/1 Running 1 2m47s +``` + +可以看到,pod 中的容器只是重启了下,并没重建 pod,至此,原地升级验证成功。 \ No newline at end of file diff --git a/content/tencent/storage/cbs-pvc-expansion.md b/content/tencent/storage/cbs-pvc-expansion.md new file mode 100644 index 0000000..786bbb2 --- /dev/null +++ b/content/tencent/storage/cbs-pvc-expansion.md @@ -0,0 +1,106 @@ +# 扩容 CBS 类型的 PVC + +## 概述 + +TKE 中一般使用 PVC 来声明存储容量和类型,自动绑定 PV 并挂载到 Pod,通常都使用 CBS (云硬盘) 存储。当 CBS 的磁盘容量不够用了,如何进行扩容呢?分两种情况,本文会详细介绍。 + +## 存储插件类型 + +CBS 存储插件在 TKE 中存在两种形式: +1. In-Tree: Kubernetes 早期只支持以 In-Tree 的方式扩展存储插件,也就是将插件的逻辑编译进 Kubernetes 的组件中,也是 TKE 集群 1.20 版本之前默认自带的存储插件。 +2. CSI: Kubernetes 社区发展过程中,引入存储扩展卷的 API,将存储插件实现逻辑从 Kubernetes 代码库中剥离出去,各个存储插件的实现单独维护和部署,无需侵入 Kubernetes 自身组件,也是社区现在推荐的存储扩展方式。TKE 在 1.20 版本之前,如果要使用 CSI 插件,可以在扩展组件中安装 CBS CSI 插件;自 1.20 版本开始,默认安装 CBS CSI 插件,将 In-Tree 插件完全下掉。 + +可以检查 PVC 对应 StorageClass 的 yaml,如果 provisioner 是 `cloud.tencent.com/qcloud-cbs`,说明是 In-tree,如果是 `com.tencent.cloud.csi.cbs` 就是 CSI。 + +## In-Tree 插件扩容 PVC + +如何符合以下两种情况,说明你的 CBS PVC 用的 In-Tree 插件: +1. 如果你的集群版本低于 1.20,并且没有安装 CSI 插件 (默认没有安装),那么你使用的 CBS 类型 PVC 一定用的 In-Tree 插件; +2. 如果安装了 CSI 插件,但创建的 PVC 引用的 StorageClass 并没有使用 CSI (如下图)。 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925162004.png) + +对 In-Tree 插件的 PVC 进行扩容需要手动操作,比较麻烦,操作步骤如下: + +1. 获取 pvc 所绑定的 pv: +```bash +$ kubectl -n monitoring get pvc grafana -o jsonpath='{.spec.volumeName}' +grafana +``` + +2. 获取 pv 对应的 cbs id: +```bash +$ kubectl get pv -o jsonpath="{.spec.qcloudCbs.cbsDiskId}" grafana +disk-780nl2of +``` + +3. 在[云硬盘控制台](https://console.cloud.tencent.com/cvm/cbs/index) 找到对应云盘,进⾏扩容操作: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925162014.png) + +4. 登录 CBS 挂载的节点 (pod 所在节点),找到这块 cbs 盘对应的设备路径: +```bash +$ ls -l /dev/disk/by-id/*disk-780nl2of* +lrwxrwxrwx 1 root root 9 Jul 18 23:26 /dev/disk/by-id/virtio-disk-780nl2of -> ../../vdc +``` + +5. 执⾏命令扩容⽂件系统(替换 cbs 设备路径): + +```bash +# 对于 ext4 ⽂件系统(通常是这种) +resize2fs /dev/vdc +# 对于 xfs ⽂件系统 +xfs_growfs /dev/vdc +``` + +### FAQ + +**不需要改 PVC 或 PV 吗?** + +不需要,PVC 和 PV 的容量显示也还是会显示扩容之前的⼤⼩,但实际⼤⼩是扩容后的。 + +## CSI 插件扩容 PVC + +如果 TKE 集群版本在 1.20 及其以上版本,一定是用的 CSI 插件;如果低于 1.20,安装了 CBS CSI 扩展组件,且 PVC 引用的 StorageClass 是 CBS CSI 类型的,开启了在线扩容能力,那么就可以直接修改 PVC 容量实现自动扩容 PV 的容量。 + +所以 CBS CSI 插件扩容 PVC 过于简单,只有修改 PVC 容量一个步骤,这里就先讲下如何确保 PVC 能够在线扩容。 + +如果用控制台创建 StorageClass ,确保勾选 【启用在线扩容】(默认就会勾选): + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925162024.png) + +如果使用 YAML 创建,确保将 `allowVolumeExpansion` 设为 true: + +```yaml +allowVolumeExpansion: true # 这里是关键 +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: cbs-csi-expand +parameters: + diskType: CLOUD_PREMIUM +provisioner: com.tencent.cloud.csi.cbs +reclaimPolicy: Delete +volumeBindingMode: WaitForFirstConsumer +``` + +创建 PVC 时记得选择 CBS CSI 类型且开启了在线扩容的 StorageClass: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925162035.png) + +然后当需要扩容 PVC 的时候,直接修改 PVC 的容量即可: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925162045.png) + +> 修改完后对应的 CBS 磁盘容量会自动扩容到指定大小 (注意必须是 10Gi 的倍数),可以自行到云硬盘控制台确认。 + +### FAQ + +**需要重启 Pod 吗?** + +可以不重启 pod 直接扩容,但,这种情况下被扩容的云盘的文件系统被 mount 在节点上,如果有频繁 I/O 的话,有可能会出现文件系统扩容错误。为了确保文件系统的稳定性,还是推荐先让云盘文件系统处于未 mount 情况下进行扩容,可以将 Pod 副本调为 0 或修改 PV 打上非法的 zone (`kubectl label pv pvc-xxx failure-domain.beta.kubernetes.io/zone=nozone`) 让 Pod 重建后 Pending,然后再修改 PVC 容量进行在线扩容,最后再恢复 Pod Running 以挂载扩容后的磁盘。 + +**担心扩容导致数据出问题,如何兜底?** + +可以在扩容前使用快照来备份数据,避免扩容失败导致数据丢失。 + diff --git a/content/tencent/storage/mount-cfs-with-v3.md b/content/tencent/storage/mount-cfs-with-v3.md new file mode 100644 index 0000000..e918fed --- /dev/null +++ b/content/tencent/storage/mount-cfs-with-v3.md @@ -0,0 +1,171 @@ +# 使用 V3 协议挂载 CFS + +## 背景 + +腾讯云 CFS 文件存储,同时支持 NFS V3 和 V4 协议,mount 的时候,如果不指定协议,默认是客户端与服务端协商得到版本号,大多情况下会使用 NFS V4 协议,但 CFS 文件存储使用 NFS V4 挂载的话目前存在不稳定的问题,建议是显式指定使用 NFS V3 协议挂载。 + +本文分别介绍在腾讯云容器服务 TKE 和 EKS 两种集群中,显式指定使用 NFS V3 协议挂载的方法。 + +## 使用 CFS 插件 (仅限 TKE 集群) + +### StorageClass 自动创建 CFS + +如果 TKE 集群安装了 CFS 扩展组件,可以自动创建并挂载 CFS 存储,创建 StorageClass 时协议版本选择 V3: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925162117.png) + +yaml 示例: + +```yaml +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: cfs +parameters: + vers: "3" # 关键点:指定协议版本。 + pgroupid: pgroup-mni3ng8n # 指定自动创建出来的 CFS 的权限组 ID。 + storagetype: SD # 指定自动创建出来的 CFS 的存储类型。SD 为标准存储,HP 为性能存储。 + subdir-share: "true" # 是否每个 PVC 都共享同一个 CFS 实例。 + vpcid: vpc-e8wtynjo # 指定 VPC ID,确保与当前集群 VPC 相同。 + subnetid: subnet-e7uo51yj # 指定自动创建出来的 CFS 的子网 ID。 +provisioner: com.tencent.cloud.csi.tcfs.cfs +reclaimPolicy: Delete +volumeBindingMode: Immediate +``` + +后续使用 PVC 直接指定前面创建的 StorageClass 即可。 + +### 静态创建复用已有 CFS 实例 + +如果已经有 CFS 实例了,希望不自动创建而直接复用已有 CFS 实例,可以使用静态创建。 + +yaml 实例: + +```yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: cfs-pv +spec: + accessModes: + - ReadWriteMany + capacity: + storage: 10Gi + csi: + driver: com.tencent.cloud.csi.cfs + volumeAttributes: + fsid: yemafcez # 指定 fsid,在 CFS 实例控制台页面的挂载点信息里看 NFS 3.0 挂载命令,里面有 fsid。 + host: 10.10.9.6 # CFS 实例 IP。 + path: / # 指定要挂载的 CFS 实例的目录。 + vers: "3" # 关键点:指定协议版本。 + volumeHandle: cfs-pv + persistentVolumeReclaimPolicy: Retain + storageClassName: "" # 指定 StorageClass 为空 + volumeMode: Filesystem +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: cfs-pvc +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 10Gi + storageClassName: "" # 指定 StorageClass 为空 + volumeMode: Filesystem + volumeName: cfs-pv # PVC 引用 PV 的名称,手动绑定关系。 +``` + +### CSI Inline 方式 + +如果不想用 PV,也可以在定义 Volumes 时使用 CSI Inline 的方式,yaml 示例: + +```yaml +--- +apiVersion: storage.k8s.io/v1beta1 +kind: CSIDriver +metadata: + name: com.tencent.cloud.csi.cfs +spec: + attachRequired: false + podInfoOnMount: false + volumeLifecycleModes: + - Ephemeral # 告知 CFS 插件启用 inline 的功能,以便让 CSI Inline 定义方式可以正常工作 + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx +spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:latest + volumeMounts: + - mountPath: /test + name: cfs + volumes: + - csi: # 这里定义 CSI Inline + driver: com.tencent.cloud.csi.cfs + volumeAttributes: + fsid: yemafcez + host: 10.10.9.6 + path: / + vers: "3" + proto: tcp + name: cfs +``` + +## PV 指定 mountOptions (TKE 集群与 EKS 弹性集群通用) + +K8S 原生支持挂载 NFS 存储,而 CFS 本质就是 NFS 存储,可以直接 K8S 原生用法,只是需要在 PV 指定下挂载选项 (mountOptions),具体加哪些,可以在 CFS 实例控制台页面的挂载点信息里看 NFS 3.0 挂载命令。 + +这种方式需要自行提前创建好 CFS 示例,然后手动创建 PV/PVC 与 CFS 实例关联,yaml 示例: + +```yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: cfs-pv +spec: + accessModes: + - ReadWriteMany + capacity: + storage: 10Gi + nfs: + path: /yemafcez # v3 协议这里 path 一定要以 fsid 开头,在 CFS 实例控制台页面的挂载点信息里看 NFS 3.0 挂载命令,里面有 fsid。 + server: 10.10.9.6 # CFS 实例 IP。 + mountOptions: # 指定挂载选项,从 CFS 实例控制台挂载点信息里面获取。 + - vers=3 # 使用 v3 协议 + - proto=tcp + - nolock,noresvport + persistentVolumeReclaimPolicy: Retain + storageClassName: "" # 指定 StorageClass 为空 + volumeMode: Filesystem + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: cfs-pvc +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 10Gi + storageClassName: "" # 指定 StorageClass 为空 + volumeMode: Filesystem + volumeName: cfs-pv # PVC 引用 PV 的名称,手动绑定关系。 +``` diff --git a/content/tencent/storage/readonlymany-pv.md b/content/tencent/storage/readonlymany-pv.md new file mode 100644 index 0000000..96b482c --- /dev/null +++ b/content/tencent/storage/readonlymany-pv.md @@ -0,0 +1,66 @@ +# 定义 ReadOnlyMany 存储的方法 + +## 概述 + +要实现 `ReadOnlyMany` (多机只读) 的前提条件是后端存储是共享存储,在腾讯云上有 `COS` (对象存储) 和 `CFS` (文件存储) 两种。本文介绍这两种共享存储在腾讯云容器服务环境里定义成 PV 的使用方法。 + +## COS + +1. `accessModes` 指定 `ReadOnlyMany`。 +2. `csi.volumeAttributes.additional_args` 指定 `-oro`。 + +yaml 示例: + +```yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: registry +spec: + accessModes: + - ReadOnlyMany + capacity: + storage: 1Gi + csi: + readOnly: true + driver: com.tencent.cloud.csi.cosfs + volumeHandle: registry + volumeAttributes: + additional_args: "-oro" + url: "http://cos.ap-chengdu.myqcloud.com" + bucket: "roc-**********" + path: /test + nodePublishSecretRef: + name: cos-secret + namespace: kube-system +``` + +## CFS + +1. `accessModes` 指定 `ReadOnlyMany`。 +2. `mountOptions` 指定 `ro`。 + +yaml 示例: + +```yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: test +spec: + accessModes: + - ReadOnlyMany + capacity: + storage: 10Gi + storageClassName: cfs + persistentVolumeReclaimPolicy: Retain + volumeMode: Filesystem + mountOptions: + - ro + csi: + driver: com.tencent.cloud.csi.cfs + volumeAttributes: + host: 10.10.99.99 + path: /test + volumeHandle: cfs-******** +``` diff --git a/content/tencent/troubleshooting/public-service-or-ingress-connect-failed.md b/content/tencent/troubleshooting/public-service-or-ingress-connect-failed.md new file mode 100644 index 0000000..dbc628b --- /dev/null +++ b/content/tencent/troubleshooting/public-service-or-ingress-connect-failed.md @@ -0,0 +1,31 @@ +# 排查公网服务不通 + +## 问题描述 + +部署在 TKE 集群内的服务使用公网对外暴露 (LoadBalancer 类型 Service 或 Ingress),但访问不通。 + +## 常见原因 + +### 节点安全组没放通 NodePort + +如果服务使用 TKE 默认的公网 Service 或 Ingress 暴露,CLB 会转发流量到 NodePort,流量转发链路是: client –> CLB –> NodePort –> ... + +CLB 转发的数据包不会做 SNAT,所以报文到达节点时源 IP 就是 client 的公网 IP,如果节点安全组入站规则没有放通 client –> NodePort 链路的话,是访问不通的。 + +**解决方案1:** 节点安全组入站规则对公网访问 NodePort 区间端口(30000-32768): + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925162137.png) + +**解决方案2:** 若担心直接放开整个 NodePort 区间所有端口有安全风险,可以只暴露 service 所用到的 NodePort (比较麻烦)。 + +**解决方案3:** 若只允许固定 IP 段的 client 访问 ingressgateway,可以只对这个 IP 段放开整个 NodePort 区间所有端口。 + +**解决方案4:** 启用 CLB 直通 Pod,这样流量就不经过 NodePort,所以就没有此安全组问题。启用 CLB 直通 Pod 需要集群网络支持 VPC-CNI,详细请参考 [如何启用 CLB 直通 Pod](https://imroc.cc/k8s/tke/faq/loadblancer-to-pod-directly/) 。 + +### 使用了 ClusterIP 类型 Service + +如果使用 TKE 默认的 CLB Ingress 暴露服务,依赖后端 Service 要有 NodePort,如果 Service 是 ClusterIP 类型,将无法转发,也就不通。 + +**解决方案1**: Ingress 涉及的后端 Service 改为 NodePort 类型。 + +**解决方案2:** 不使用 TKE 默认的 CLB Ingress,其它类型 Ingress,比如 [Nginx Ingress](https://cloud.tencent.com/document/product/457/50502) 。 diff --git a/content/trick/certs/sign-certs-with-cfssl.md b/content/trick/certs/sign-certs-with-cfssl.md new file mode 100644 index 0000000..5ee2744 --- /dev/null +++ b/content/trick/certs/sign-certs-with-cfssl.md @@ -0,0 +1,120 @@ +# 使用 cfssl 生成证书 + +搭建各种云原生环境的过程中,经常需要生成证书,比如最常见的 etcd,本文记录使用 cfssl 快速生成证书的方法。 + +## 安装 cfssl + +**方法1**: 去 [release](https://github.com/cloudflare/cfssl/releases) 页面下载,然后解压安装。 + +**方法2**: 使用 go install 安装: + +```bash +go install github.com/cloudflare/cfssl/cmd/cfssl@latest +go install github.com/cloudflare/cfssl/cmd/cfssljson@latest +``` + +## 创建 CA 证书 + +由于各个组件都需要配置证书,并且依赖 CA 证书来签发证书,所以我们首先要生成好 CA 证书以及后续的签发配置文件: + +``` bash +cat > ca-csr.json < ca-config.json < 由于这里是 CA 证书,是签发其它证书的根证书,这个证书密钥不会分发出去作为 client 证书,所有组件使用的 client 证书都是由 CA 证书签发而来,所以 CA 证书的 CN 和 O 的名称并不重要,后续其它签发出来的证书的 CN 和 O 的名称才是有用的。 + +## 为 ETCD 签发证书 + +这里证书可以只创建一次,所有 etcd 实例都共用这里创建的证书: + +``` bash +cat > etcd-csr.json < hosts 需要包含 etcd 被访问时用到的地址,可以用 IP ,域名或泛域名。 + +会生成下面两个重要的文件: + +* `etcd-key.pem`: etcd 密钥。 +* `etcd.pem`: etcd 证书。 diff --git a/content/trick/certs/sign-free-certs-for-dnspod.md b/content/trick/certs/sign-free-certs-for-dnspod.md new file mode 100644 index 0000000..b87529f --- /dev/null +++ b/content/trick/certs/sign-free-certs-for-dnspod.md @@ -0,0 +1,156 @@ +# 为 dnspod 的域名签发免费证书 + +如果你的域名使用 [DNSPod](https://docs.dnspod.cn/) 管理,想在 Kubernetes 上为域名自动签发免费证书,可以使用 cert-manager 来实现。 + +cert-manager 支持许多 dns provider,但不支持国内的 dnspod,不过 cert-manager 提供了 [Webhook](https://cert-manager.io/docs/concepts/webhook/) 机制来扩展 provider,社区也有 dnspod 的 provider 实现,但没怎么维护了。 + +本文将介绍如何结合 cert-manager 与本人开发的 [cert-manager-webhook-dnspod](https://github.com/imroc/cert-manager-webhook-dnspod) 来实现为 dnspod 上的域名自动签发免费证书,支持最新 cert-manager,接入腾讯云API密钥(dnspod 官方推荐方式,不用 `apiID` 和 `apiToken`)。 + +## 基础知识 + +推荐先阅读 [使用 cert-manager 签发免费证书](sign-free-certs-with-cert-manager.md) 。 + +## 创建腾讯云 API 密钥 + +登录腾讯云控制台,在 [API密钥管理](https://console.cloud.tencent.com/cam/capi) 中新建密钥,然后复制自动生成的 `SecretId` 和 `SecretKey` 并保存下来,以备后面的步骤使用。 + +## 安装 cert-manager-webhook-dnspod + +阅读了前面推荐的文章,假设集群中已经安装了 cert-manager,下面使用 helm 来安装下 cert-manager-webhook-dnspod 。 + +首先准备下 helm 配置文件 (`dnspod-webhook-values.yaml`): + +```yaml +clusterIssuer: + enabled: true + name: dnspod # 自动创建的 ClusterIssuer 名称 + ttl: 600 + staging: false + secretId: 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' # 替换成你的 SecretId + secretKey: 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' # 替换成你的 SecretKey + email: roc@imroc.cc # 用于接收证书过期的邮件告警。如果cert-manager和webhook都正常工作,证书会自动续期不会过期 + +``` + +> 完整配置见 [values.yaml](https://github.com/imroc/cert-manager-webhook-dnspod/blob/master/charts/values.yaml) + +然后使用 helm 进行安装: + +```bash +helm repo add roc https://charts.imroc.cc +helm upgrade --install -f dnspod-webhook-values.yaml cert-manager-webhook-dnspod roc/cert-manager-webhook-dnspod -n cert-manager +``` + +## 创建证书 + +创建 `Certificate` 对象来签发免费证书: + +```yaml +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: example-crt + namespace: istio-system +spec: + secretName: example-crt-secret # 证书保存在这个 secret 中 + issuerRef: + name: dnspod # 这里使用自动生成出来的 ClusterIssuer + kind: ClusterIssuer + group: cert-manager.io + dnsNames: # 填入需要签发证书的域名列表,支持泛域名,确保域名是使用 dnspod 管理的 + - "example.com" + - "*.example.com" +``` + +等待状态变成 Ready 表示签发成功: + +```bash +$ kubectl -n istio-system get certificates.cert-manager.io +NAME READY SECRET AGE +example-crt True example-crt-secret 25d +``` + +若签发失败可 describe 一下看下原因: + +```bash +kubectl -n istio-system describe certificates.cert-manager.io example-crt +``` + +## 使用证书 + +证书签发成功后会保存到我们指定的 secret 中,下面给出一些使用示例。 + +在 ingress 中使用: + +```yaml +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: test-ingress + annotations: + kubernetes.io/ingress.class: nginx +spec: + rules: + - host: test.example.com + http: + paths: + - path: / + backend: + serviceName: web + servicePort: 80 + tls: + hosts: + - test.example.com + secretName: example-crt-secret # 引用证书 secret +``` + +在 istio 的 ingressgateway 中使用: + +```yaml +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: example-gw + namespace: istio-system +spec: + selector: + app: istio-ingressgateway + istio: ingressgateway + servers: + - port: + number: 80 + name: HTTP-80 + protocol: HTTP + hosts: + - example.com + - "*.example.com" + tls: + httpsRedirect: true # http 重定向 https (强制 https) + - port: + number: 443 + name: HTTPS-443 + protocol: HTTPS + hosts: + - example.com + - "*.example.com" + tls: + mode: SIMPLE + credentialName: example-crt-secret # 引用证书 secret +--- +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + name: example-vs + namespace: test +spec: + gateways: + - istio-system/example-gw # 转发规则绑定到 ingressgateway,将服务暴露出去 + hosts: + - 'test.example.com' + http: + - route: + - destination: + host: example + port: + number: 80 +``` \ No newline at end of file diff --git a/content/trick/certs/sign-free-certs-with-cert-manager.md b/content/trick/certs/sign-free-certs-with-cert-manager.md new file mode 100644 index 0000000..98fc263 --- /dev/null +++ b/content/trick/certs/sign-free-certs-with-cert-manager.md @@ -0,0 +1,243 @@ +# 使用 cert-manager 签发免费证书 + +随着 HTTPS 不断普及,越来越多的网站都在从 HTTP 升级到 HTTPS,使用 HTTPS 就需要向权威机构申请证书,需要付出一定的成本,如果需求数量多,也是一笔不小的开支。cert-manager 是 Kubernetes 上的全能证书管理工具,如果对安全级别和证书功能要求不高,可以利用 cert-manager 基于 [ACME](https://tools.ietf.org/html/rfc8555X) 协议与 [Let's Encrypt](https://letsencrypt.org/) 来签发免费证书并自动续期,实现永久免费使用证书。 + +## cert-manager 工作原理 + +cert-manager 部署到 Kubernetes 集群后,它会 watch 它所支持的 CRD 资源,我们通过创建 CRD 资源来指示 cert-manager 为我们签发证书并自动续期: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023/09/25/cert-manager.svg) + +解释下几个关键的资源: + +* Issuer/ClusterIssuer: 用于指示 cert-manager 用什么方式签发证书,本文主要讲解签发免费证书的 ACME 方式。ClusterIssuer 与 Issuer 的唯一区别就是 Issuer 只能用来签发自己所在 namespace 下的证书,ClusterIssuer 可以签发任意 namespace 下的证书。 +* Certificate: 用于告诉 cert-manager 我们想要什么域名的证书以及签发证书所需要的一些配置,包括对 Issuer/ClusterIssuer 的引用。 + +## 免费证书签发原理 + +Let’s Encrypt 利用 ACME 协议来校验域名是否真的属于你,校验成功后就可以自动颁发免费证书,证书有效期只有 90 天,在到期前需要再校验一次来实现续期,幸运的是 cert-manager 可以自动续期,这样就可以使用永久免费的证书了。如何校验这个域名是否属于你呢?主流的两种校验方式是 HTTP-01 和 DNS-01,详细校验原理可参考 [Let's Encrypt 的运作方式](https://letsencrypt.org/zh-cn/how-it-works/),下面将简单描述下。 + +### HTTP-01 校验原理 + +HTTP-01 的校验原理是给你域名指向的 HTTP 服务增加一个临时 location ,Let’s Encrypt 会发送 http 请求到 `http:///.well-known/acme-challenge/`,`YOUR_DOMAIN` 就是被校验的域名,`TOKEN` 是 ACME 协议的客户端负责放置的文件,在这里 ACME 客户端就是 cert-manager,它通过修改或创建 Ingress 规则来增加这个临时校验路径并指向提供 `TOKEN` 的服务。Let’s Encrypt 会对比 `TOKEN` 是否符合预期,校验成功后就会颁发证书。此方法仅适用于给使用 Ingress 暴露流量的服务颁发证书,并且不支持泛域名证书。 + +### DNS-01 校验原理 + +DNS-01 的校验原理是利用 DNS 提供商的 API Key 拿到你的 DNS 控制权限, 在 Let’s Encrypt 为 ACME 客户端提供令牌后,ACME 客户端 \(cert-manager\) 将创建从该令牌和您的帐户密钥派生的 TXT 记录,并将该记录放在 `_acme-challenge.`。 然后 Let’s Encrypt 将向 DNS 系统查询该记录,如果找到匹配项,就可以颁发证书。此方法不需要你的服务使用 Ingress,并且支持泛域名证书。 + +## 校验方式对比 + +HTTP-01 的校验方式的优点是: 配置简单通用,不管使用哪个 DNS 提供商都可以使用相同的配置方法;缺点是:需要依赖 Ingress,如果你的服务不是用 Ingress 暴露流量的就不适用,而且不支持泛域名证书。 + +DNS-01 的校验方式的优点是没有 HTTP-01 校验方式缺点,不依赖 Ingress,也支持泛域名;缺点就是不同 DNS 提供商的配置方式不一样,而且 DNS 提供商有很多,cert-manager 的 Issuer 不可能每个都去支持,不过有一些可以通过部署实现了 cert-manager 的 [Webhook](https://cert-manager.io/docs/concepts/webhook/) 的服务来扩展 Issuer 进行支持,比如 DNSPod 和 阿里 DNS,详细 Webhook 列表请参考: https://cert-manager.io/docs/configuration/acme/dns01/#webhook + +选择哪种方式呢?条件允许的话,建议是尽量用 `DNS-01` 的方式,限制更少,功能更全。 + +## 操作步骤 + +### 安装 cert-manager + +通常直接使用 yaml 方式一键安装 cert-manager 到集群,参考官网文档 [Installing with regular manifests](https://cert-manager.io/docs/installation/kubernetes/#installing-with-regular-manifests) 。 + +### 配置 DNS + +登录你的 DNS 提供商后台,配置域名的 DNS A 记录,指向你需要证书的后端服务对外暴露的 IP 地址,以 cloudflare 为例: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925141024.png) + +### HTTP-01 校验方式签发证书 + +如果使用 HTTP-01 的校验方式,需要用到 Ingress 来配合校验。cert-manager 会通过自动修改 Ingress 规则或自动新增 Ingress 两种方式之一来实现对外暴露校验所需的临时 HTTP 路径,这个就是在给 Issuer 配置 http01 校验,指定 Ingress 的 `name` 或 `class` 的区别 (见下面的示例)。 + +```yaml +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: letsencrypt-http01 + namespace: prod +spec: + acme: + server: https://acme-v02.api.letsencrypt.org/directory + privateKeySecretRef: + name: letsencrypt-http01-account-key + solvers: + - http01: + ingress: + name: web # 指定被自动修改的 Ingress 名称 +``` + +使用上面的 Issuer 签发证书,cert-manager 会自动修改 `prod/web` 这个 Ingress 资源,以暴露校验所需的临时路径,这是自动修改 Ingress 的方式,你也可以使用自动新增 Ingress 的 方式,示例: + +```yaml +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: letsencrypt-http01 + namespace: prod +spec: + acme: + server: https://acme-v02.api.letsencrypt.org/directory + privateKeySecretRef: + name: letsencrypt-http01-account-key + solvers: + - http01: + ingress: + class: nginx # 指定自动创建的 Ingress 的 ingress class +``` + +使用上面的 Issuer 签发证书,cert-manager 会自动创建 Ingress 资源,以暴露校验所需的临时路径。 + +有了 Issuer,接下来就可以创建 Certificate 并引用 Issuer 进行签发了,示例: + +```yaml +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: test-mydomain-com + namespace: prod +spec: + dnsNames: + - test.mydomain.com # 要签发证书的域名 + issuerRef: + kind: Issuer + name: letsencrypt-http01 # 引用 Issuer,指示采用 http01 方式进行校验 + secretName: test-mydomain-com-tls # 最终签发出来的证书会保存在这个 Secret 里面 +``` + +### DNS-01 校验方式签发证书 + +如果使用 DNS-01 的校验方式,就需要看你使用的哪个 DNS 提供商了,cert-manager 内置了一些 DNS 提供商的支持,详细列表和用法请参考 [Supported DNS01 providers](https://cert-manager.io/docs/configuration/acme/dns01/#supported-dns01-providers),不过 cert-manager 不可能去支持所有的 DNS 提供商,如果没有你所使用的 DNS 提供商怎么办呢?有两种方案: + +* 方案一:设置 Custom Nameserver。在你的 DNS 提供商后台设置 custom nameserver,指向像 cloudflare 这种可以管理其它 DNS 提供商域名的 nameserver 地址,具体地址可登录 cloudflare 后台查看: + + ![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925141043.png) + + 下面是 namecheap 设置 custom nameserver 的示例: + + ![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925141059.png) + + 最后配置 Issuer 指定 DNS-01 验证时,加上 cloudflare 的一些信息即可(见下文示例)。 + +* 方案二:使用 Webhook。使用 cert-manager 的 Webhook 来扩展 cert-manager 的 DNS-01 验证所支持的 DNS 提供商,已经有许多第三方实现,包括国内常用的 DNSPod 与阿里 DNS,详细列表参考: [Webhook](https://cert-manager.io/docs/configuration/acme/dns01/#webhook)。 + +下面以 cloudflare 为例来签发证书: + +1. 登录 cloudflare,点到 `My Profile > API Tokens > Create Token` 来创建 Token: + + ![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925141115.png) + + 复制 Token 并妥善保管: + + ![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925141130.png) + + 将 Token 保存到 Secret 中: + + ```yaml + apiVersion: v1 + kind: Secret + metadata: + name: cloudflare-api-token-secret + namespace: cert-manager + type: Opaque + stringData: + api-token: # 粘贴 Token 到这里,不需要 base64 加密。 + ``` + + > 如果是要创建 ClusterIssuer,Secret 需要创建在 cert-manager 所在命名空间中,如果是 Issuer,那就创建在 Issuer 所在命名空间中。 + + 创建 ClusterIssuer: + + ```yaml + apiVersion: cert-manager.io/v1 + kind: ClusterIssuer + metadata: + name: letsencrypt-dns01 + spec: + acme: + privateKeySecretRef: + name: letsencrypt-dns01 + server: https://acme-v02.api.letsencrypt.org/directory + solvers: + - dns01: + cloudflare: + email: my-cloudflare-acc@example.com # 替换成你的 cloudflare 邮箱账号,API Token 方式认证非必需,API Keys 认证是必需 + apiTokenSecretRef: + key: api-token + name: cloudflare-api-token-secret # 引用保存 cloudflare 认证信息的 Secret + ``` + + 创建 Certificate: + + ```yaml + apiVersion: cert-manager.io/v1 + kind: Certificate + metadata: + name: test-mydomain-com + namespace: default + spec: + dnsNames: + - test.mydomain.com # 要签发证书的域名 + issuerRef: + kind: ClusterIssuer + name: letsencrypt-dns01 # 引用 ClusterIssuer,指示采用 dns01 方式进行校验 + secretName: test-mydomain-com-tls # 最终签发出来的证书会保存在这个 Secret 里面 + ``` + +### 获取和使用证书 + +创建好 Certificate 后,等一小会儿,我们可以 kubectl 查看是否签发成功: + +```bash +$ kubectl get certificate -n prod +NAME READY SECRET AGE +test-mydomain-com True test-mydomain-com-tls 1m +``` + +如果 `READY` 为 `False` 表示失败,可以通过 describe 查看 event 来排查失败原因: + +```bash +$ kubectl describe certificate test-mydomain-com -n prod +``` + +如果为 `True` 表示签发成功,证书就保存在我们所指定的 Secret 中 (上面的例子是 `default/test-mydomain-com-tls`),可以通过 kubectl 查看: + +```bash +$ kubectl get secret test-mydomain-com-tls -n default +... +data: + tls.crt: + tls.key: +``` + +其中 `tls.crt` 就是证书,`tls.key` 是密钥。 + +你可以将它们挂载到你需要证书的应用中,或者使用 Ingress,可以直接在 Ingress 中引用 secret,示例: + +```yaml +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: test-ingress + annotations: + kubernetes.io/Ingress.class: nginx +spec: + rules: + - host: test.mydomain.com + http: + paths: + - path: /web + backend: + serviceName: web + servicePort: 80 + tls: + hosts: + - test.mydomain.com + secretName: test-mydomain-com-tls +``` + +## 参考资料 + +* [cert-manager 官网](https://cert-manager.io/) +* [Let's Encrypt 的运作方式](https://letsencrypt.org/zh-cn/how-it-works/) +* [Issuer API 文档](https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.Issuer) +* [Certificate API 文档](https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.Certificate) diff --git a/content/trick/deploy/set-sysctl.md b/content/trick/deploy/set-sysctl.md new file mode 100644 index 0000000..ee9c0ca --- /dev/null +++ b/content/trick/deploy/set-sysctl.md @@ -0,0 +1,74 @@ +# 为 Pod 设置内核参数 + +本文介绍为 Pod 设置内核参数的几种方式。 + +## 在 securityContext 中指定 sysctls + +自 k8s 1.12 起,[sysctls](https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/) 特性 beta 并默认开启,允许用户在 pod 的 `securityContext` 中设置内核参数,用法示例: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: sysctl-example +spec: + securityContext: + sysctls: + - name: net.core.somaxconn + value: "1024" + - name: net.core.somaxconn + value: "1024" + ... +``` + +不过使用该方法,默认情况下有些认为是 unsafe 的参数是不能改的,需要将其配到 kubelet 的 `--allowed-unsafe-sysctls` 中才可以用。 + +## 使用 initContainers + +如果希望设置内核参数更简单通用,可以在 initContainer 中设置,不过这个要求给 initContainer 打开 `privileged` 权限。示例: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: sysctl-example-init +spec: + initContainers: + - image: busybox + command: + - sh + - -c + - | + sysctl -w net.core.somaxconn=65535 + sysctl -w net.ipv4.ip_local_port_range="1024 65535" + sysctl -w net.ipv4.tcp_tw_reuse=1 + sysctl -w fs.file-max=1048576 + imagePullPolicy: Always + name: setsysctl + securityContext: + privileged: true + containers: + ... +``` + +> 这里用了 privileged 容器,只是为了让这个 container 有权限修改当前容器网络命名空间中的内核参数,只要 Pod 没使用 hostNetwork,内核参数的修改是不会影响 Node 上的内核参数的,两者是隔离的,所以不需要担心会影响 Node 上其它 Pod 的内核参数 (hostNetwork 的 Pod 就不要在 Pod 上修改内核参数了)。 + +## 使用 tuning CNI 插件统一设置 sysctl + +如果想要为所有 Pod 统一配置某些内核参数,可以使用 [tuning](https://github.com/containernetworking/plugins/tree/master/plugins/meta/tuning) 这个 CNI 插件来做: + +```json +{ + "name": "mytuning", + "type": "tuning", + "sysctl": { + "net.core.somaxconn": "500", + "net.ipv4.tcp_tw_reuse": "1" + } +} +``` + +## 参考资料 + +* [Using sysctls in a Kubernetes Cluster](https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/) +* [tuning 插件文档](https://www.cni.dev/plugins/current/meta/tuning/) diff --git a/content/trick/images/podman.md b/content/trick/images/podman.md new file mode 100644 index 0000000..0b2da22 --- /dev/null +++ b/content/trick/images/podman.md @@ -0,0 +1,62 @@ +# 使用 Podman 构建镜像 + +## 概述 + +[Podman](https://podman.io/) 是一个类似 docker 的工具,可以运行容器,也可以构建镜像,甚至可以像 docker 一样支持构建多平台镜像。如今 Docker Desktop 已经宣布收费,可以考虑使用 Podman 来替代。 + +## 安装 + +参考 [官方安装文档](https://podman.io/getting-started/installation),我使用的是 Mac,安装很简单: + +```bash +brew install podman +``` + +由于 podman 是基于 Linux 的,安装在 Mac 需要先启动它的虚拟机: + +```bash +podman machine init +podman machine start +``` + +最后检查下是否 ok: + +```bash +podman info +``` + +## Podman 构建镜像的背后 + +Podman 构建镜像在背后实际是利用了 [Buildah](https://buildah.io/) 这个工具去构建,只是封装了一层,更容易使用了。 + +## Podman 构建镜像的方法 + +`podman build` 基本兼容 `docker build`,所以你可以像使用 docker 一样去使用 podman 构建镜像。 + +## FAQ + +### 未启动虚拟机导致报错 + +执行 podman 命令是,遇到 `connect: no such file or directory` 的报错: + +```bash +$ podman build --platform=linux/amd64 . -t imroc/crontab:centos -f centos.Dockerfile +Cannot connect to Podman. Please verify your connection to the Linux system using `podman system connection list`, or try `podman machine init` and `podman machine start` to manage a new Linux VM +Error: unable to connect to Podman socket: Get "http://d/v4.0.2/libpod/_ping": dial unix ///var/folders/91/dsfxsd7j28z2mxl7vm91mjg40000gn/T/podman-run--1/podman/podman.sock: connect: no such file or directory +``` + +通常是因为在非 Linux 的系统上,没有启动 podman linux 虚拟机导致的,启动下就可以了。 + +### 代理导致拉取镜像失败 + +使用 podman 构建镜像或直接拉取镜像的过程中,遇到这种报错: + +```txt +Error: error creating build container: initializing source docker://centos:8: pinging container registry registry-1.docker.io: Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp 127.0.0.1:12639: connect: connection refused +``` + +通常是因为启动 podman 虚拟机时,终端上有 HTTP 代理的环境变量,可以销毁虚拟机,重新启动,启动前确保当前终端没有 HTTP 代理的环境变量。 + +## 参考资料 + +* [Migrating from Docker to Podman](https://marcusnoble.co.uk/2021-09-01-migrating-from-docker-to-podman/) \ No newline at end of file diff --git a/content/trick/images/sync-images-with-skopeo.md b/content/trick/images/sync-images-with-skopeo.md new file mode 100644 index 0000000..0f964be --- /dev/null +++ b/content/trick/images/sync-images-with-skopeo.md @@ -0,0 +1,81 @@ +# 使用 skopeo 批量同步 helm chart 依赖镜像 + +## skopeo 是什么? + +[skepeo](https://github.com/containers/skopeo) 是一个开源的容器镜像搬运工具,比较通用,各种镜像仓库都支持。 + +## 安装 skopeo + +参考官方的 [安装指引](https://github.com/containers/skopeo/blob/main/install.md)。 + +## 导出当前 helm 配置依赖哪些镜像 + +```bash +$ helm template -n monitoring -f kube-prometheus-stack.yaml ./kube-prometheus-stack | grep "image:" | awk -F 'image:' '{print $2}' | awk '{$1=$1;print}' | sed -e 's/^"//' -e 's/"$//' > images.txt +$ cat images.txt +quay.io/prometheus/node-exporter:v1.3.1 +quay.io/kiwigrid/k8s-sidecar:1.19.2 +quay.io/kiwigrid/k8s-sidecar:1.19.2 +grafana/grafana:9.0.2 +registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.5.0 +quay.io/prometheus-operator/prometheus-operator:v0.57.0 +quay.io/prometheus/alertmanager:v0.24.0 +quay.io/prometheus/prometheus:v2.36.1 +bats/bats:v1.4.1 +k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.1.1 +k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.1.1 +``` + +* 使用 helm template 渲染 yaml,利用脚本导出所有依赖的容器镜像并记录到 `images.txt`。 +* 可以检查下 `images.txt` 中哪些不需要同步,删除掉。 + +## 准备同步脚本 + +准备同步脚本(`sync.sh`): + +```bash +#! /bin/bash + +DST_IMAGE_REPO="registry.imroc.cc/prometheus" + +cat images.txt | while read line +do + while : + do + skopeo sync --src=docker --dest=docker $line $DST_IMAGE_REPO + if [ "$?" == "0" ]; then + break + fi + done +done +``` + +* 修改 `DST_IMAGE_REPO` 为你要同步的目标仓库地址与路径,`images.txt` 中的镜像都会被同步到这个仓库路径下面。 + +赋予脚本执行权限: + +```bash +chmod +x sync.sh +``` + +## 登录仓库 + +同步镜像时,不管是源和目的,涉及到私有镜像,都需要先登录,不然同步会报错。 + +登录方法很简单,跟 `docker login` 一样,指定要登录的镜像仓库地址: + +```bash +skopeo login registry.imroc.cc +``` + +然后输入用户名密码即可。 + +## 执行同步 + +最后执行 `./sync.sh` 即可将所有镜像一键同步到目标仓库中,中途如果失败会一直重试直到成功。 + +## FAQ + +### 为什么不用 skopeo 配置文件方式批量同步? + +因为配置相对复杂和麻烦,不如直接用一个列表文本,每行代表一个镜像,通过脚本读取每一行分别进行同步,这样更简单。 \ No newline at end of file diff --git a/content/trick/kubectl/kubectl-aliases.md b/content/trick/kubectl/kubectl-aliases.md new file mode 100644 index 0000000..95056bc --- /dev/null +++ b/content/trick/kubectl/kubectl-aliases.md @@ -0,0 +1,35 @@ +# 使用 kubectl-aliases 缩短命令 + +日常使用 kubectl 进行各种操作,每次输入完整命令会比较浪费时间,推荐使用 [kubectl-aliases](https://github.com/ahmetb/kubectl-aliases) 来提升 kubectl 日常操作效率,敲更少的字符完成更多的事。 + +## 安装 kubectl-aliases + +参考 [官方安装文档](https://github.com/ahmetb/kubectl-aliases#installation) + +## 查看完整列表 + +```bash +cat ~/.kubectl_aliases +``` + +## 高频使用的别名 + +```bash +ka // kubectl apply --recursive -f +kg // kubectl get +kgpo // kubectl get pods +ksys // kubectl -n kube-system +ksysgpo // kubectl -n kube-system get pods +kd // kubectl describe +kdpo // kubectl describe pod +``` + +## 自定义 + +建议针对自己常用的操作设置下别名,比如经常操作 istio 的话,可以用 `ki` 来代替 `kubectl -n istio-system`。 + +编辑 `~/.kubectl_aliases`: + +```bash +alias ki='kubectl -n istio-system' +``` diff --git a/content/trick/kubectl/merge-kubeconfig-with-kubecm.md b/content/trick/kubectl/merge-kubeconfig-with-kubecm.md new file mode 100644 index 0000000..abf7958 --- /dev/null +++ b/content/trick/kubectl/merge-kubeconfig-with-kubecm.md @@ -0,0 +1,42 @@ +# 使用 kubecm 合并 kubeconfig + +Kubernetes 提供了 kubectl 命令行工具来操作集群,使用 kubeconfig 作为配置文件,默认路径是 `~/.kube/config`,如果想使用 kubectl 对多个集群进行管理和操作,就在 kubeconfig 中配置多个集群的信息即可,通常可以通过编辑 kubeconfig 文件或执行一堆 `kubectl config` 的命令来实现。 + +一般情况下,Kubernetes 集群在安装或创建好之后,都会生成 kubeconfig 文件,如何简单高效的将这些 kubeconfig 合并以便让我们通过一个 kubeconfig 就能方便的管理多集群呢?我们可以借助 [kubecm](https://github.com/sunny0826/kubecm) 这个工具,本文将介绍如何利用 `kubecm` 来实现多集群的 kubeconfig 高效管理。 + +## 安装 kubecm + +首先需要在管理多集群的机器上安装 `kubecm`,安装方法参考 [官方文档](https://kubecm.cloud/#/zh-cn/install) 。 + +## 使用 kubecm 添加访问凭证到 kubeconfig + +首先拿到你集群的 kubeconfig 文件,将其重命名为你想指定的 context 名称,然后通过下面的命令将 kubeconfig 信息合并到 `~/.kube/config`: + +``` bash +kubecm add --context-name=dev -cf config.yaml +``` + +* `dev` 替换为希望导入后的 context 名称。 +* `config.yaml` 替换为 kubeconfig 文件名。 + +## 查看集群列表 + +通过 `kubecm` 添加了要管理和操作的集群后,通过 `kubecm ls` 可查看 kubeconfig 中的集群列表 (星号标识的是当前操作的集群): + + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925135859.png) + +## 切换集群 + +当想要切换到其它集群操作时,可使用 `kubecm switch` 进行交互式切换: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925135910.png) + + +不过还是推荐使用 kubectx 进行切换。 + +## 移除集群 + +如果想要移除某个集群,可以用 `kubecm delete `: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925135920.png) diff --git a/content/trick/kubectl/quick-switch-with-kubectx.md b/content/trick/kubectl/quick-switch-with-kubectx.md new file mode 100644 index 0000000..2884425 --- /dev/null +++ b/content/trick/kubectl/quick-switch-with-kubectx.md @@ -0,0 +1,41 @@ +# 使用 kubectx 和 kubens 快速切换 + +推荐使用 `kubectx` 和 `kubens` 来在多个集群和命名空间之间快速切换。 + +## 项目地址 + +这两个工具都在同一个项目中: [https://github.com/ahmetb/kubectx](https://github.com/ahmetb/kubectx) + +## 安装 + +参考 [官方安装文档](https://github.com/ahmetb/kubectx#installation)。 + +推荐使用 kubectl 插件的方式安装: + +```bash +kubectl krew install ctx +kubectl krew install ns +``` + +> 如果没安装 [krew](https://krew.sigs.k8s.io/),需提前安装下,参考 [krew 安装文档](https://krew.sigs.k8s.io/docs/user-guide/setup/install/)。 + +## 使用 + +插件方式安装后,使用如下命令切换集群: + +```bash +kubectl ctx [CLUSTER] +``` + +切换命名空间: + +```bash +kubectl ns [NAMESPACE] +``` + +推荐结合 [使用 kubectl 别名快速执行命令](./kubectl-aliases.md) 来缩短命令: + +```bash +k ctx [CLUSTER] +k ns [NAMESPACE] +``` \ No newline at end of file diff --git a/content/trick/user-and-permissions/create-user-using-csr-api.md b/content/trick/user-and-permissions/create-user-using-csr-api.md new file mode 100644 index 0000000..253adaa --- /dev/null +++ b/content/trick/user-and-permissions/create-user-using-csr-api.md @@ -0,0 +1,114 @@ +# 使用 CSR API 创建用户 + +k8s 支持 CSR API,通过创建 `CertificateSigningRequest` 资源就可以发起 CSR 请求,管理员审批通过之后 `kube-controller-manager` 就会为我们签发证书,确保 `kube-controller-manager` 配了根证书密钥对: + +``` bash +--cluster-signing-cert-file=/var/lib/kubernetes/ca.pem +--cluster-signing-key-file=/var/lib/kubernetes/ca-key.pem +``` + +## 安装 cfssl + +我们用 cfssl 来创建 key 和 csr 文件,所以需要先安装 cfssl: + +``` bash +curl -L https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -o cfssl +curl -L https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -o cfssljson +curl -L https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -o cfssl-certinfo + +chmod +x cfssl cfssljson cfssl-certinfo +sudo mv cfssl cfssljson cfssl-certinfo /usr/local/bin/ +``` + +> 更多 cfssl 详情参考: [使用 cfssl 生成证书](../certs/sign-certs-with-cfssl.md)。 + +## 创建步骤 + +指定要创建的用户名: + +``` bash +USERNAME="roc" +``` + +再创建 key 和 csr 文件: + +``` bash +cat < ${USERNAME}.pem +``` + +得到证书文件: + +``` +roc.pem +``` + +至此,我们已经创建好了用户,用户的证书密钥对文件: + +``` +roc.pem +roc-key.pem +``` + +## 配置 kubeconfig + +``` bash +# 增加 user +kubectl config set-credentials ${USERNAME} --embed-certs=true --client-certificate=${USERNAME}.pem --client-key=${USERNAME}-key.pem + +# 如果还没配 cluster,可以通过下面命令配一下 +kubectl config set-cluster --server= --certificate-authority= + +# 增加 context,绑定 cluster 和 user +kubectl config set-context --cluster= --user=${USERNAME} + +# 使用刚增加的 context +kubectl config use-context +``` \ No newline at end of file diff --git a/content/troubleshooting/cases/cluster/delete-rancher-ns-causing-node-disappear.md b/content/troubleshooting/cases/cluster/delete-rancher-ns-causing-node-disappear.md new file mode 100644 index 0000000..b001fb4 --- /dev/null +++ b/content/troubleshooting/cases/cluster/delete-rancher-ns-causing-node-disappear.md @@ -0,0 +1,76 @@ +# 误删 rancher 的 namespace 导致 node 被清空 + +## 问题描述 + +集群的节点突然全都不见了 (`kubectl get node` 为空),导致集群瘫痪,但实际上节点对应的虚拟机都还在。因为集群没开审计,所以也不太好查 node 是被什么删除的。 + +## 快速恢复 + +由于只是 k8s node 资源被删除,实际的机器都还在,我们可以批量重启节点,自动拉起 kubelet 重新注册 node,即可恢复。 + +## 可疑操作 + +发现在节点消失前,有个可疑的操作: 有同学发现在另外一个集群里有许多乱七八糟的 namespace (比如 `c-dxkxf`),查看这些 namespace 中没有运行任何工作负载,可能是其它人之前创建的测试 namespace,就将其删除掉了。 + +## 分析 + +删除 namespace 的集群中安装了 rancher,怀疑被删除的 namespace 是 rancher 自动创建的。 + +rancher 管理了其它 k8s 集群,架构图: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925152844.png) + +猜想: 删除的 namespace 是 rancher 创建的,删除时清理了 rancher 的资源,也触发了 rancher 清理 node 的逻辑。 + +## 模拟复现 + +尝试模拟复现,验证猜想: +1. 创建一个 k8s 集群,作为 rancher 的 root cluster,并将 rancher 安装进去。 +2. 进入 rancher web 界面,创建一个 cluster,使用 import 方式: + + ![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925152855.png) + +3. 输入 cluster name: + + ![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925152905.png) + +4. 弹出提示,让在另一个集群执行下面的 kubectl 命令将其导入到 rancher: + + ![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925152916.png) + +5. 创建另一个 k8s 集群作为被 rancher 管理的集群,并将 kubeconfig 导入本地以便后续使用 kubectl 操作。 +6. 导入 kubeconfig 并切换 context 后,执行 rancher 提供的 kubectl 命令将集群导入 rancher: + + ![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925152928.png) + + 可以看到在被管理的 TKE 集群中自动创建了 cattle-system 命名空间,并运行一些 rancher 的 agent: + + ![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925152939.png) + +7. 将 context 切换到安装 rancher 的集群 (root cluster),可以发现添加集群后,自动创建了一些 namespace: 1 个 `c-` 开头的,2 个 `p-` 开头的: + + ![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925152949.png) + + 猜想是 `c-` 开头的 namespace 被 rancher 用来存储所添加的 `cluster` 的相关信息;`p-` 用于存储 `project` 相关的信息,官方也说了会自动为每个 cluster 创建 2 个 project: + + ![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925152959.png) + +8. 查看有哪些 rancher 的 crd,有个 `nodes.management.cattle.io` 比较显眼,明显用于存储 cluster 的 node 信息: + + ![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153008.png) + +9. 看下 node 存储在哪个 namespace (果然在 `c-` 开头的 namespace 中): + + ![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153022.png) + +10. 尝试删除 `c-` 开头的 namesapce,并切换 context 到被添加的集群,执行 `kubectl get node`: + + ![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153038.png) + + 节点被清空,问题复现。 + +## 结论 + +实验证明,rancher 的 `c-` 开头的 namespace 保存了所添加集群的 node 信息,如果删除了这种 namespace,也就删除了其中所存储的 node 信息,rancher watch 到了就会自动删除所关联集群的 k8s node 资源。 + +所以,千万不要轻易去清理 rancher 创建的 namespace,rancher 将一些有状态信息直接存储到了 root cluster 中 (通过 CRD 资源),删除 namespace 可能造成很严重的后果。 diff --git a/content/troubleshooting/cases/cluster/kubectl-exec-or-logs-failed.md b/content/troubleshooting/cases/cluster/kubectl-exec-or-logs-failed.md new file mode 100644 index 0000000..35f9caf --- /dev/null +++ b/content/troubleshooting/cases/cluster/kubectl-exec-or-logs-failed.md @@ -0,0 +1,11 @@ +# kubectl 执行 exec 或 logs 失败 + +## 原因 + +通常是 `kube-apiserver` 到 `kubelet:10250` 之间的网络不通,10250 是 kubelet 提供接口的端口,`kubectl exec` 和 `kubectl logs` 的原理就是 apiserver 调 kubelet,kubelet 再调运行时 (比如 dockerd) 来实现的。 + +## 解决方案 + +保证 kubelet 10250 端口对 apiserver 放通。 + +检查防火墙、iptables 规则是否对 10250 端口或某些 IP 进行了拦截。 diff --git a/content/troubleshooting/cases/cluster/scheduler-snapshot-missing-causing-pod-pending.md b/content/troubleshooting/cases/cluster/scheduler-snapshot-missing-causing-pod-pending.md new file mode 100644 index 0000000..9427cfb --- /dev/null +++ b/content/troubleshooting/cases/cluster/scheduler-snapshot-missing-causing-pod-pending.md @@ -0,0 +1,173 @@ +# 调度器 cache 快照遗漏部分信息导致 pod pending + +## 问题背景 + +新建一个如下的 k8s 集群,有3个master node和1个worker node(worker 和 master在不同的可用区),node信息如下: + +| node | label信息 | +|:----|:----| +| master-01 | failure-domain.beta.kubernetes.io/region=sh,failure-domain.beta.kubernetes.io/zone=200002 | +| master-02 | failure-domain.beta.kubernetes.io/region=sh,failure-domain.beta.kubernetes.io/zone=200002 | +| master-03 | failure-domain.beta.kubernetes.io/region=sh,failure-domain.beta.kubernetes.io/zone=200002 | +| worker-node-01 | failure-domain.beta.kubernetes.io/region=sh,failure-domain.beta.kubernetes.io/zone=200004 | + +待集群创建好之后,然后创建了一个daemonset对象,就出现了daemonset的某个pod一直卡主pending状态的现象。 + +现象如下: + +```bash +$ kubectl get pod -o wide +NAME READY STATUS RESTARTS AGE NODE +debug-4m8lc 1/1 Running 1 89m master-01 +debug-dn47c 0/1 Pending 0 89m +debug-lkmfs 1/1 Running 1 89m master-02 +debug-qwdbc 1/1 Running 1 89m worker-node-01 +``` + +## 结论先行 + +k8s的调度器在调度某个pod时,会从调度器的内部cache中同步一份快照(snapshot),其中保存了pod可以调度的node信息。 + +上面问题(daemonset的某个pod实例卡在pending状态)发生的原因就是同步的过程发生了部分node信息丢失,导致了daemonset的部分pod实例无法调度到指定的节点上,出现了pending状态。 + +接下来是详细的排查过程。 + +## 日志排查 + +截图中出现的节点信息(来自用户线上集群): +* k8s master节点:ss-stg-ma-01、ss-stg-ma-02、ss-stg-ma-03 +* k8s worker节点:ss-stg-test-01 + +1. 获取调度器的日志 + +这里首先是通过动态调大调度器的日志级别,比如,直接调大到`V(10)`,尝试获取一些相关日志。 + +当日志级别调大之后,有抓取到一些关键信息,信息如下: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153153.png) + +* 解释一下,当调度某个pod时,有可能会进入到调度器的抢占`preempt`环节,而上面的日志就是出自于抢占环节。 集群中有4个节点(3个master node和1个worker node),但是日志中只显示了3个节点,缺少了一个master节点。所以,这里暂时怀疑下是调度器内部缓存cache中少了`node info`。 + +2. 获取调度器内部cache信息 + +k8s v1.18已经支持打印调度器内部的缓存cache信息。打印出来的调度器内部缓存cache信息如下: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153214.png) + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153224.png) + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153238.png) + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153305.png) + +可以看出,调度器的内部缓存cache中的`node info`是完整的(3个master node和1个worker node)。 + +通过分析日志,可以得到一个初步结论:调度器内部缓存cache中的`node info`是完整的,但是当调度pod时,缓存cache中又会缺少部分node信息。 + +## 问题根因 + +在进一步分析之前,我们先一起再熟悉下调度器调度pod的流程(部分展示)和nodeTree数据结构。 + +### **pod调度流程(部分展示)** + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153317.png) + +结合上图,一次pod的调度过程就是一次`Scheduler Cycle`。在这个`Cycle`开始时,第一步就是`update snapshot`。snapshot我们可以理解为cycle内的cache,其中保存了pod调度时所需的`node info`,而`update snapshot`,就是一次nodeTree(调度器内部cache中保存的node信息)到`snapshot`的同步过程。 + +而同步过程主要是通过`nodeTree.next()`函数来实现,函数逻辑如下: + +```go +// next returns the name of the next node. NodeTree iterates over zones and in each zone iterates +// over nodes in a round robin fashion. +func (nt *nodeTree) next() string { + if len(nt.zones) == 0 { + return "" + } + numExhaustedZones := 0 + for { + if nt.zoneIndex >= len(nt.zones) { + nt.zoneIndex = 0 + } + zone := nt.zones[nt.zoneIndex] + nt.zoneIndex++ + // We do not check the exhausted zones before calling next() on the zone. This ensures + // that if more nodes are added to a zone after it is exhausted, we iterate over the new nodes. + nodeName, exhausted := nt.tree[zone].next() + if exhausted { + numExhaustedZones++ + if numExhaustedZones >= len(nt.zones) { // all zones are exhausted. we should reset. + nt.resetExhausted() + } + } else { + return nodeName + } + } +} +``` + +再结合上面排查过程得出的结论,我们可以再进一步缩小问题范围:nodeTree(调度器内部cache)到`snapshot.nodeInfoList`的同步过程丢失了某个节点信息。 + +### nodeTree数据结构 + +(方便理解,本文使用了链表来展示) + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153338.png) + +### 重现问题,定位根因 + +创建k8s集群时,会先加入master node,然后再加入worker node(意思是worker node时间上会晚于master node加入集群的时间)。 + +第一轮同步:3台master node创建好,然后发生pod调度(比如,cni 插件,以daemonset的方式部署在集群中),会触发一次nodeTree(调度器内部cache)到`snapshot.nodeInfoList`的同步。同步之后,nodeTree的两个游标就变成了如下结果: + +`nodeTree.zoneIndex = 1, nodeTree.nodeArray[sh:200002].lastIndex = 3,` + +第二轮同步:当worker node加入集群中后,然后新建一个daemonset,就会触发第二轮的同步(nodeTree(调度器内部cache)到`snapshot.nodeInfoList`的同步)。 + +同步过程如下: + +1. zoneIndex=1, nodeArray[sh:200004].lastIndex=0, we get worker-node-01. + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153351.png) + +2. zoneIndex=2 >= len(zones); zoneIndex=0, nodeArray[sh:200002].lastIndex=3, return. + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153400.png) + +3. zoneIndex=1, nodeArray[sh:200004].lastIndex=1, return. + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153410.png) + +4. zoneIndex=0, nodeArray[sh:200002].lastIndex=0, we get master-01. + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153419.png) + +5. zoneIndex=1, nodeArray[sh:200004].lastIndex=0, we get worker-node-01. + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153429.png) + +6. zoneIndex=2 >= len(zones); zoneIndex=0, nodeArray[sh:200002].lastIndex=1, we get master-02. + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153440.png) + +同步完成之后,调度器的`snapshot.nodeInfoList`得到如下的结果: + +```json +[ + worker-node-01, + master-01, + worker-node-01, + master-02, +] +``` + +master-03去哪了?在第二轮同步的过程中丢了。 + +## 解决方案 + +从`问题根因`的分析中,可以看出,导致问题发生的原因,在于nodeTree数据结构中的游标zoneIndex 和 lastIndex(zone级别)值被保留了,所以,解决的方案就是在每次同步SYNC时,强制重置游标(归0)。 + +## 参考资料 + +* [相关 issue](https://github.com/kubernetes/kubernetes/issues/97120) +* [相关pr (k8s v1.18)](https://github.com/kubernetes/kubernetes/pull/93387) +* [TKE 修复版本 v1.18.4-tke.5](https://cloud.tencent.com/document/product/457/9315#tke-kubernetes-1.18.4-revisions) diff --git a/content/troubleshooting/cases/high-load/disk-full-causing-high-cpu.md b/content/troubleshooting/cases/high-load/disk-full-causing-high-cpu.md new file mode 100644 index 0000000..a3f3ac6 --- /dev/null +++ b/content/troubleshooting/cases/high-load/disk-full-causing-high-cpu.md @@ -0,0 +1,40 @@ +# 容器磁盘满导致 CPU 飙高 + +## 问题描述 + +某服务的其中两个副本异常,CPU 飙高。 + +## 排查 + +1. 查看 `container_cpu_usage_seconds_total` 监控,CPU 飙升,逼近 limit。 +2. 查看 `container_cpu_cfs_throttled_periods_total` 监控,CPU 飙升伴随 CPU Throttle 飙升,所以服务异常应该是 CPU 被限流导致。 +3. 查看 `container_cpu_system_seconds_total` 监控,发现 CPU 飙升主要是 CPU system 占用导致,容器内 `pidstat -u -t 5 1` 可以看到进程 `%system` 占用分布情况。 +4. `perf top` 看 system 占用高主要是 `vfs_write` 写数据导致。 + + ![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925152714.png) + +5. `iostat -xhd 2` 看 IO 并不高,磁盘利用率也不高,io wait 也不高。 +6. `sync_inodes_sb` 看起来是写数据时触发了磁盘同步的耗时逻辑 +7. 深入看内核代码,当磁盘满的时候会调用 flush 刷磁盘所有数据,这个会一直在内核态运行很久,相当于对这个文件系统做 sync。 + + ![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925152724.png) + +8. 节点上 `df -h` 看并没有磁盘满。 +9. 容器内 `df -h` 看根目录空间满了. + + ![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925152735.png) + +10. 看到 docker `daemon.json` 配置,限制了容器内 rootfs 最大只能占用 200G + + ![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925152744.png) + +11. 容器内一级级的 `du -sh *` 排查发现主要是一个 `nohup.log` 文件占满了磁盘。 + + +## 结论 + +容器内空间满了继续写数据会导致内核不断刷盘对文件系统同步,会导致内核态 CPU 占用升高,设置了 cpu limit 通常会被 throttle,导致服务处理慢,影响业务。 + +## 建议 + +对日志进行轮转,或直接打到标准输出,避免写满容器磁盘。 diff --git a/content/troubleshooting/cases/network/arp-cache-overflow-causing-healthcheck-failed.md b/content/troubleshooting/cases/network/arp-cache-overflow-causing-healthcheck-failed.md new file mode 100644 index 0000000..89d5e29 --- /dev/null +++ b/content/troubleshooting/cases/network/arp-cache-overflow-causing-healthcheck-failed.md @@ -0,0 +1,17 @@ +# ARP 爆满导致健康检查失败 + +## 案例 + +一用户某集群节点数 1200+,用户监控方案是 daemonset 部署 node-exporter 暴露节点监控指标,使用 hostNework 方式,statefulset 部署 promethues 且仅有一个实例,落在了一个节点上,promethues 请求所有节点 node-exporter 获取节点监控指标,也就是或扫描所有节点,导致 arp cache 需要存所有 node 的记录,而节点数 1200+,大于了 `net.ipv4.neigh.default.gc_thresh3` 的默认值 1024,这个值是个硬限制,arp cache记录数大于这个就会强制触发 gc,所以会造成频繁gc,当有数据包发送会查本地 arp,如果本地没找到 arp 记录就会判断当前 arp cache 记录数+1是否大于 gc_thresh3,如果没有就会广播 arp 查询 mac 地址,如果大于了就直接报 `arp_cache: neighbor table overflow!`,并且放弃 arp 请求,无法获取 mac 地址也就无法知道探测报文该往哪儿发(即便就在本机某个 veth pair),kubelet 对本机 pod 做存活检查发 arp 查 mac 地址,在 arp cahce 找不到,由于这时 arp cache已经满了,刚要 gc 但还没做所以就只有报错丢包,导致存活检查失败重启 pod。 + +## 解决方案 + +调整部分节点内核参数,将 arp cache 的 gc 阀值调高 (`/etc/sysctl.conf`): + +``` bash +net.ipv4.neigh.default.gc_thresh1 = 80000 +net.ipv4.neigh.default.gc_thresh2 = 90000 +net.ipv4.neigh.default.gc_thresh3 = 100000 +``` + +并给 node 打下 label,修改 pod spec,加下 nodeSelector 或者 nodeAffnity,让 pod 只调度到这部分改过内核参数的节点,更多请参考本书 [节点排障: ARP 表爆满](../../node/arp-cache-overflow.md) diff --git a/content/troubleshooting/cases/network/cross-vpc-connect-nodeport-timeout.md b/content/troubleshooting/cases/network/cross-vpc-connect-nodeport-timeout.md new file mode 100644 index 0000000..f991252 --- /dev/null +++ b/content/troubleshooting/cases/network/cross-vpc-connect-nodeport-timeout.md @@ -0,0 +1,58 @@ +# tcp_tw_recycle 导致跨 VPC 访问 NodePort 超时 + +## 现象 + +从 VPC a 访问 VPC b 的 TKE 集群的某个节点的 NodePort,有时候正常,有时候会卡住直到超时。 + +## 排查 + +原因怎么查? + +当然是先抓包看看啦,抓 server 端 NodePort 的包,发现异常时 server 能收到 SYN,但没响应 ACK: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153553.png) + +反复执行 `netstat -s | grep LISTEN` 发现 SYN 被丢弃数量不断增加: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153601.png) + +分析: + +- 两个VPC之间使用对等连接打通的,CVM 之间通信应该就跟在一个内网一样可以互通。 +- 为什么同一 VPC 下访问没问题,跨 VPC 有问题? 两者访问的区别是什么? + +再仔细看下 client 所在环境,发现 client 是 VPC a 的 TKE 集群节点,捋一下: + +- client 在 VPC a 的 TKE 集群的节点 +- server 在 VPC b 的 TKE 集群的节点 + +因为 TKE 集群中有个叫 `ip-masq-agent` 的 daemonset,它会给 node 写 iptables 规则,默认 SNAT 目的 IP 是 VPC 之外的报文,所以 client 访问 server 会做 SNAT,也就是这里跨 VPC 相比同 VPC 访问 NodePort 多了一次 SNAT,如果是因为多了一次 SNAT 导致的这个问题,直觉告诉我这个应该跟内核参数有关,因为是 server 收到包没回包,所以应该是 server 所在 node 的内核参数问题,对比这个 node 和 普通 TKE node 的默认内核参数,发现这个 node `net.ipv4.tcp_tw_recycle = 1`,这个参数默认是关闭的,跟用户沟通后发现这个内核参数确实在做压测的时候调整过。 + +## tcp_tw_recycle 的坑 + +解释一下,TCP 主动关闭连接的一方在发送最后一个 ACK 会进入 `TIME_AWAIT` 状态,再等待 2 个 MSL 时间后才会关闭(因为如果 server 没收到 client 第四次挥手确认报文,server 会重发第三次挥手 FIN 报文,所以 client 需要停留 2 MSL的时长来处理可能会重复收到的报文段;同时等待 2 MSL 也可以让由于网络不通畅产生的滞留报文失效,避免新建立的连接收到之前旧连接的报文),了解更详细的过程请参考 TCP 四次挥手。 + +参数 `tcp_tw_recycle` 用于快速回收 `TIME_AWAIT` 连接,通常在增加连接并发能力的场景会开启,比如发起大量短连接,快速回收可避免 `tw_buckets` 资源耗尽导致无法建立新连接 (`time wait bucket table overflow`) + +查得 `tcp_tw_recycle` 有个坑,在 RFC1323 有段描述: + +` +An additional mechanism could be added to the TCP, a per-host cache of the last timestamp received from any connection. This value could then be used in the PAWS mechanism to reject old duplicate segments from earlier incarnations of the connection, if the timestamp clock can be guaranteed to have ticked at least once since the old connection was open. This would require that the TIME-WAIT delay plus the RTT together must be at least one tick of the sender’s timestamp clock. Such an extension is not part of the proposal of this RFC. +` + +大概意思是说 TCP 有一种行为,可以缓存每个连接最新的时间戳,后续请求中如果时间戳小于缓存的时间戳,即视为无效,相应的数据包会被丢弃。 + +Linux 是否启用这种行为取决于 `tcp_timestamps` 和 `tcp_tw_recycle`,因为 `tcp_timestamps` 缺省开启,所以当 `tcp_tw_recycle` 被开启后,实际上这种行为就被激活了,当客户端或服务端以 `NAT` 方式构建的时候就可能出现问题。 + +当多个客户端通过 NAT 方式联网并与服务端交互时,服务端看到的是同一个 IP,也就是说对服务端而言这些客户端实际上等同于一个,可惜由于这些客户端的时间戳可能存在差异,于是乎从服务端的视角看,便可能出现时间戳错乱的现象,进而直接导致时间戳小的数据包被丢弃。如果发生了此类问题,具体的表现通常是是客户端明明发送的 SYN,但服务端就是不响应 ACK。 + +## 真相大白 + +回到我们的问题上,client 所在节点上可能也会有其它 pod 访问到 server 所在节点,而它们都被 SNAT 成了 client 所在节点的 NODE IP,但时间戳存在差异,server 就会看到时间戳错乱,因为开启了 `tcp_tw_recycle` 和 `tcp_timestamps` 激活了上述行为,就丢掉了比缓存时间戳小的报文,导致部分 SYN 被丢弃,这也解释了为什么之前我们抓包发现异常时 server 收到了 SYN,但没有响应 ACK,进而说明为什么 client 的请求部分会卡住直到超时。 + +由于 `tcp_tw_recycle` 坑太多,在内核 4.12 之后已移除: [remove tcp_tw_recycle](https://github.com/torvalds/linux/commit/4396e46187ca5070219b81773c4e65088dac50cc) + +## 解决方案 + +1. 关闭 tcp_tw_recycle。 +2. 升级内核,启用 `net.ipv4.tcp_tw_reuse`。 diff --git a/content/troubleshooting/cases/network/dns-lookup-5s-delay.md b/content/troubleshooting/cases/network/dns-lookup-5s-delay.md new file mode 100644 index 0000000..76ddd53 --- /dev/null +++ b/content/troubleshooting/cases/network/dns-lookup-5s-delay.md @@ -0,0 +1,191 @@ +# DNS 5 秒延时 + +## 现象 + +用户反馈从 pod 中访问服务时,总是有些请求的响应时延会达到5秒。正常的响应只需要毫秒级别的时延。 + +## 抓包 + +* [使用 nsenter 进入 netns](../../skill/enter-netns-with-nsenter.md),然后使用节点上的 tcpdump 抓 pod 中的包,发现是有的 DNS 请求没有收到响应,超时 5 秒后,再次发送 DNS 请求才成功收到响应。 +* 在 kube-dns pod 抓包,发现是有 DNS 请求没有到达 kube-dns pod,在中途被丢弃了。 + +为什么是 5 秒? `man resolv.conf` 可以看到 glibc 的 resolver 的缺省超时时间是 5s: + +```txt +timeout:n + Sets the amount of time the resolver will wait for a response from a remote name server before retrying the query via a different name server. Measured in seconds, the default is RES_TIMEOUT (currently 5, see + ). The value for this option is silently capped to 30. +``` + +## 丢包原因 + +经过搜索发现这是一个普遍问题。 + +根本原因是内核 conntrack 模块的 bug,netfilter 做 NAT 时可能发生资源竞争导致部分报文丢弃。 + +Weave works的工程师 [Martynas Pumputis](martynas@weave.works) 对这个问题做了很详细的分析:[Racy conntrack and DNS lookup timeouts](https://www.weave.works/blog/racy-conntrack-and-dns-lookup-timeouts) + +相关结论: + +* 只有多个线程或进程,并发从同一个 socket 发送相同五元组的 UDP 报文时,才有一定概率会发生 +* glibc, musl\(alpine linux的libc库\)都使用 "parallel query", 就是并发发出多个查询请求,因此很容易碰到这样的冲突,造成查询请求被丢弃 +* 由于 ipvs 也使用了 conntrack, 使用 kube-proxy 的 ipvs 模式,并不能避免这个问题 + +## 问题的根本解决 + +Martynas 向内核提交了两个 patch 来 fix 这个问题,不过他说如果集群中有多个DNS server的情况下,问题并没有完全解决。 + +其中一个 patch 已经在 2018-7-18 被合并到 linux 内核主线中: [netfilter: nf\_conntrack: resolve clash for matching conntracks](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=ed07d9a021df6da53456663a76999189badc432a) + +目前只有4.19.rc 版本包含这个patch。 + +## 规避办法 + +### 规避方案一:使用TCP发送DNS请求 + +由于TCP没有这个问题,有人提出可以在容器的resolv.conf中增加`options use-vc`, 强制glibc使用TCP协议发送DNS query。下面是这个man resolv.conf中关于这个选项的说明: + +```text +use-vc (since glibc 2.14) + Sets RES_USEVC in _res.options. This option forces the + use of TCP for DNS resolutions. +``` + +笔者使用镜像"busybox:1.29.3-glibc" \(libc 2.24\) 做了试验,并没有见到这样的效果,容器仍然是通过UDP发送DNS请求。 + +### 规避方案二:避免相同五元组DNS请求的并发 + +resolv.conf还有另外两个相关的参数: + +* single-request-reopen \(since glibc 2.9\) +* single-request \(since glibc 2.10\) + +man resolv.conf中解释如下: + +```text +single-request-reopen (since glibc 2.9) + Sets RES_SNGLKUPREOP in _res.options. The resolver + uses the same socket for the A and AAAA requests. Some + hardware mistakenly sends back only one reply. When + that happens the client system will sit and wait for + the second reply. Turning this option on changes this + behavior so that if two requests from the same port are + not handled correctly it will close the socket and open + a new one before sending the second request. + +single-request (since glibc 2.10) + Sets RES_SNGLKUP in _res.options. By default, glibc + performs IPv4 and IPv6 lookups in parallel since + version 2.9. Some appliance DNS servers cannot handle + these queries properly and make the requests time out. + This option disables the behavior and makes glibc + perform the IPv6 and IPv4 requests sequentially (at the + cost of some slowdown of the resolving process). +``` + +用自己的话解释下: + +* `single-request-reopen`: 发送 A 类型请求和 AAAA 类型请求使用不同的源端口,这样两个请求在 conntrack 表中不占用同一个表项,从而避免冲突 +* `single-request`: 避免并发,改为串行发送 A 类型和 AAAA 类型请求,没有了并发,从而也避免了冲突 + +要给容器的 `resolv.conf` 加上 options 参数,有几个办法: + +1. 在容器的 "ENTRYPOINT" 或者 "CMD" 脚本中,执行 /bin/echo 'options single-request-reopen' >> /etc/resolv.conf** + +2. 在 pod 的 postStart hook 中: + +```yaml + lifecycle: + postStart: + exec: + command: + - /bin/sh + - -c + - "/bin/echo 'options single-request-reopen' >> /etc/resolv.conf" +``` + +3. 使用 template.spec.dnsConfig (k8s v1.9 及以上才支持): + +```yaml + template: + spec: + dnsConfig: + options: + - name: single-request-reopen +``` + +4. 使用 ConfigMap 覆盖 pod 里面的 /etc/resolv.conf: + +configmap: + +```yaml +apiVersion: v1 +data: + resolv.conf: | + nameserver 1.2.3.4 + search default.svc.cluster.local svc.cluster.local cluster.local ec2.internal + options ndots:5 single-request-reopen timeout:1 +kind: ConfigMap +metadata: + name: resolvconf +``` + +pod spec: + +```yaml + volumeMounts: + - name: resolv-conf + mountPath: /etc/resolv.conf + subPath: resolv.conf +... + + volumes: + - name: resolv-conf + configMap: + name: resolvconf + items: + - key: resolv.conf + path: resolv.conf +``` + +5. 使用 MutatingAdmissionWebhook + +[MutatingAdmissionWebhook](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#mutatingadmissionwebhook-beta-in-1-9) 是 1.9 引入的 Controller,用于对一个指定的 Resource 的操作之前,对这个 resource 进行变更。 istio 的自动 sidecar注入就是用这个功能来实现的。 我们也可以通过 MutatingAdmissionWebhook,来自动给所有POD,注入以上3\)或者4\)所需要的相关内容。 + +以上方法中, 1 和 2 都需要修改镜像, 3 和 4 则只需要修改 pod 的 spec, 能适用于所有镜像。不过还是有不方便的地方: + +* 每个工作负载的yaml都要做修改,比较麻烦 +* 对于通过helm创建的工作负载,需要修改helm charts + +方法5\)对集群使用者最省事,照常提交工作负载即可。不过初期需要一定的开发工作量。 + +### 最佳实践:使用 LocalDNS + +容器的DNS请求都发往本地的DNS缓存服务 (dnsmasq, nscd 等),不需要走DNAT,也不会发生conntrack冲突。另外还有个好处,就是避免DNS服务成为性能瓶颈。 + +使用 LocalDNS 缓存有两种方式: + +* 每个容器自带一个DNS缓存服务 +* 每个节点运行一个DNS缓存服务,所有容器都把本节点的DNS缓存作为自己的 nameserver + +从资源效率的角度来考虑的话,推荐后一种方式。官方也意识到了这个问题比较常见,给出了 coredns 以 cache 模式作为 daemonset 部署的解决方案: [https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) + +### 实施办法 + +条条大路通罗马,不管怎么做,最终到达上面描述的效果即可。 + +POD中要访问节点上的DNS缓存服务,可以使用节点的IP。 如果节点上的容器都连在一个虚拟bridge上, 也可以使用这个bridge的三层接口的IP(在TKE中,这个三层接口叫cbr0)。 要确保DNS缓存服务监听这个地址。 + +如何把 POD 的 /etc/resolv.conf 中的 nameserver 设置为节点IP呢? + +一个办法,是设置 POD.spec.dnsPolicy 为 "Default", 意思是POD里面的 /etc/resolv.conf, 使用节点上的文件。缺省使用节点上的 /etc/resolv.conf (如果kubelet通过参数--resolv-conf指定了其他文件,则使用--resolv-conf所指定的文件)。 + +另一个办法,是给每个节点的kubelet指定不同的--cluster-dns参数,设置为节点的IP,POD.spec.dnsPolicy仍然使用缺省值"ClusterFirst"。 kops项目甚至有个issue在讨论如何在部署集群时设置好--cluster-dns指向节点IP: [https://github.com/kubernetes/kops/issues/5584](https://github.com/kubernetes/kops/issues/5584) + +## 参考资料 + +* [Racy conntrack and DNS lookup timeouts](https://www.weave.works/blog/racy-conntrack-and-dns-lookup-timeouts) +* [5 – 15s DNS lookups on Kubernetes?](https://blog.quentin-machu.fr/2018/06/24/5-15s-dns-lookups-on-kubernetes/) +* [DNS intermittent delays of 5s](https://github.com/kubernetes/kubernetes/issues/56903) +* [记一次Docker/Kubernetes上无法解释的连接超时原因探寻之旅](https://mp.weixin.qq.com/s/VYBs8iqf0HsNg9WAxktzYQ) + diff --git a/content/troubleshooting/cases/network/musl-libc-dns-id-conflict-causing-dns-abnormal.md b/content/troubleshooting/cases/network/musl-libc-dns-id-conflict-causing-dns-abnormal.md new file mode 100644 index 0000000..dbe578b --- /dev/null +++ b/content/troubleshooting/cases/network/musl-libc-dns-id-conflict-causing-dns-abnormal.md @@ -0,0 +1,95 @@ +# dns id 冲突导致解析异常 + +## 现象 + +有个用户反馈域名解析有时有问题,看报错是解析超时。 + +## 排查 + +第一反应当然是看 coredns 的 log: + +``` bash +[ERROR] 2 loginspub.xxxxmobile-inc.net. +A: unreachable backend: read udp 172.16.0.230:43742->10.225.30.181:53: i/o timeout +``` + +这是上游 DNS 解析异常了,因为解析外部域名 coredns 默认会请求上游 DNS 来查询,这里的上游 DNS 默认是 coredns pod 所在宿主机的 `resolv.conf` 里面的 nameserver (coredns pod 的 dnsPolicy 为 "Default",也就是会将宿主机里的 `resolv.conf` 里的 nameserver 加到容器里的 `resolv.conf`, coredns 默认配置 `proxy . /etc/resolv.conf`, 意思是非 service 域名会使用 coredns 容器中 `resolv.conf` 文件里的 nameserver 来解析) + +确认了下,超时的上游 DNS 10.225.30.181,并不是期望的 nameserver,VPC 默认 DNS 应该是 180 开头的。看了 coredns 所在节点的 `resolv.conf`,发现确实多出了这个非期望的 nameserver,跟用户确认了下,这个 DNS 不是用户自己加上去的,添加节点时这个 nameserver 本身就在 `resolv.conf` 中。 + +根据内部同学反馈, 10.225.30.181 是广州一台年久失修将被撤裁的 DNS,物理网络,没有 VIP,撤掉就没有了,所以如果 coredns 用到了这台 DNS 解析时就可能 timeout。后面我们自己测试,某些 VPC 的集群确实会有这个 nameserver,奇了怪了,哪里冒出来的? + +又试了下直接创建 CVM,不加进 TKE 节点发现没有这个 nameserver,只要一加进 TKE 节点就有了 !!! + +看起来是 TKE 的问题,将 CVM 添加到 TKE 集群会自动重装系统,初始化并加进集群成为 K8S 的 node,确认了初始化过程并不会写 `resolv.conf`,会不会是 TKE 的 OS 镜像问题?尝试搜一下除了 `/etc/resolv.conf` 之外哪里还有这个 nameserver 的 IP,最后发现 `/etc/resolvconf/resolv.conf.d/base` 这里面有。 + +看下 `/etc/resolvconf/resolv.conf.d/base` 的作用:Ubuntu 的 `/etc/resolv.conf` 是动态生成的,每次重启都会将 `/etc/resolvconf/resolv.conf.d/base` 里面的内容加到 `/etc/resolv.conf` 里。 + +经确认: 这个文件确实是 TKE 的 Ubuntu OS 镜像里自带的,可能发布 OS 镜像时不小心加进去的。 + +那为什么有些 VPC 的集群的节点 `/etc/resolv.conf` 里面没那个 IP 呢?它们的 OS 镜像里也都有那个文件那个 IP 呀。 + +请教其它部门同学发现: + +- 非 dhcp 子机,cvm 的 cloud-init 会覆盖 `/etc/resolv.conf` 来设置 dns +- dhcp 子机,cloud-init 不会设置,而是通过 dhcp 动态下发 +- 2018 年 4 月 之后创建的 VPC 就都是 dhcp 类型了的,比较新的 VPC 都是 dhcp 类型的 + +## 真相大白 + +`/etc/resolv.conf` 一开始内容都包含 `/etc/resolvconf/resolv.conf.d/base` 的内容,也就是都有那个不期望的 nameserver,但老的 VPC 由于不是 dhcp 类型,所以 cloud-init 会覆盖 `/etc/resolv.conf`,抹掉了不被期望的 nameserver,而新创建的 VPC 都是 dhcp 类型,cloud-init 不会覆盖 `/etc/resolv.conf`,导致不被期望的 nameserver 残留在了 `/etc/resolv.conf`,而 coredns pod 的 dnsPolicy 为 “Default”,也就是会将宿主机的 `/etc/resolv.conf` 中的 nameserver 加到容器里,coredns 解析集群外的域名默认使用这些 nameserver 来解析,当用到那个将被撤裁的 nameserver 就可能 timeout。 + +## 解决方案 + +临时解决: 删掉 `/etc/resolvconf/resolv.conf.d/base` 重启。 +长期解决: 我们重新制作 TKE Ubuntu OS 镜像然后发布更新。 + +## 再次出问题 + +这下应该没问题了吧,But, 用户反馈还是会偶尔解析有问题,但现象不一样了,这次并不是 dns timeout。 + +用脚本跑测试仔细分析现象: + +- 请求 `loginspub.xxxxmobile-inc.net` 时,偶尔提示域名无法解析 +- 请求 `accounts.google.com` 时,偶尔提示连接失败 + +进入 dns 解析偶尔异常的容器的 netns 抓包: + +- dns 请求会并发请求 A 和 AAAA 记录 +- 测试脚本发请求打印序号,抓包然后 wireshark 分析对比异常时请求序号偏移量,找到异常时的 dns 请求报文,发现异常时 A 和 AAAA 记录的请求 id 冲突,并且 AAAA 响应先返回 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153648.png) + +正常情况下id不会冲突,这里冲突了也就能解释这个 dns 解析异常的现象了: + +- `loginspub.xxxxmobile-inc.net` 没有 AAAA (ipv6) 记录,它的响应先返回告知 client 不存在此记录,由于请求 id 跟 A 记录请求冲突,后面 A 记录响应返回了 client 发现 id 重复就忽略了,然后认为这个域名无法解析 +- `accounts.google.com` 有 AAAA 记录,响应先返回了,client 就拿这个记录去尝试请求,但当前容器环境不支持 ipv6,所以会连接失败 + +## 分析 + +那为什么 dns 请求 id 会冲突? + +继续观察发现: 其它节点上的 pod 不会复现这个问题,有问题这个节点上也不是所有 pod 都有这个问题,只有基于 alpine 镜像的容器才有这个问题,在此节点新起一个测试的 `alpine:latest` 的容器也一样有这个问题。 + +为什么 alpine 镜像的容器在这个节点上有问题在其它节点上没问题? 为什么其他镜像的容器都没问题?它们跟 alpine 的区别是什么? + +发现一点区别: alpine 使用的底层 c 库是 musl libc,其它镜像基本都是 glibc + +翻 musl libc 源码, 构造 dns 请求时,请求 id 的生成没加锁,而且跟当前时间戳有关 (`network/res_mkquery.c`): + +``` c +/* Make a reasonably unpredictable id */ +clock_gettime(CLOCK_REALTIME, &ts); +id = ts.tv_nsec + ts.tv_nsec/65536UL & 0xffff; +``` + +看注释,作者应该认为这样id基本不会冲突,事实证明,绝大多数情况确实不会冲突,我在网上搜了很久没有搜到任何关于 musl libc 的 dns 请求 id 冲突的情况。这个看起来取决于硬件,可能在某种类型硬件的机器上运行,短时间内生成的 id 就可能冲突。我尝试跟用户在相同地域的集群,添加相同配置相同机型的节点,也复现了这个问题,但后来删除再添加时又不能复现了,看起来后面新建的 cvm 又跑在了另一种硬件的母机上了。 + +OK,能解释通了,再底层的细节就不清楚了,我们来看下解决方案: + +- 换基础镜像 (不用alpine) +- 完全静态编译业务程序(不依赖底层c库),比如go语言程序编译时可以关闭 cgo (CGO_ENABLED=0),并告诉链接器要静态链接 (`go build` 后面加 `-ldflags '-d'`),但这需要语言和编译工具支持才可以 + +## 最终解决方案 + +最终建议用户基础镜像换成另一个比较小的镜像: `debian:stretch-slim`。 diff --git a/content/troubleshooting/cases/node/cgroup-leaking.md b/content/troubleshooting/cases/node/cgroup-leaking.md new file mode 100644 index 0000000..45084c4 --- /dev/null +++ b/content/troubleshooting/cases/node/cgroup-leaking.md @@ -0,0 +1,102 @@ +# cgroup 泄露 + +## 现象 + +创建 Pod 失败,运行时报错 `no space left on device`: + +```txt +Dec 24 11:54:31 VM_16_11_centos dockerd[11419]: time="2018-12-24T11:54:31.195900301+08:00" level=error msg="Handler for POST /v1.31/containers/b98d4aea818bf9d1d1aa84079e1688cd9b4218e008c58a8ef6d6c3c106403e7b/start returned error: OCI runtime create failed: container_linux.go:348: starting container process caused \"process_linux.go:279: applying cgroup configuration for process caused \\\"mkdir /sys/fs/cgroup/memory/kubepods/burstable/pod79fe803c-072f-11e9-90ca-525400090c71/b98d4aea818bf9d1d1aa84079e1688cd9b4218e008c58a8ef6d6c3c106403e7b: no space left on device\\\"\": unknown" +``` + +## 内核 Bug + +`memcg` 是 Linux 内核中用于管理 cgroup 内存的模块,整个生命周期应该是跟随 cgroup 的,但是在低版本内核中\(已知3.10\),一旦给某个 memory cgroup 开启 kmem accounting 中的 `memory.kmem.limit_in_bytes` 就可能会导致不能彻底删除 memcg 和对应的 cssid,也就是说应用即使已经删除了 cgroup \(`/sys/fs/cgroup/memory` 下对应的 cgroup 目录已经删除\), 但在内核中没有释放 cssid,导致内核认为的 cgroup 的数量实际数量不一致,我们也无法得知内核认为的 cgroup 数量是多少。 + +关于 cgroup kernel memory,在 [kernel.org](https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v1/memory.html#kernel-memory-extension-config-memcg-kmem) 中有如下描述: + +``` +2.7 Kernel Memory Extension (CONFIG_MEMCG_KMEM) +----------------------------------------------- + +With the Kernel memory extension, the Memory Controller is able to limit +the amount of kernel memory used by the system. Kernel memory is fundamentally +different than user memory, since it can't be swapped out, which makes it +possible to DoS the system by consuming too much of this precious resource. + +Kernel memory accounting is enabled for all memory cgroups by default. But +it can be disabled system-wide by passing cgroup.memory=nokmem to the kernel +at boot time. In this case, kernel memory will not be accounted at all. + +Kernel memory limits are not imposed for the root cgroup. Usage for the root +cgroup may or may not be accounted. The memory used is accumulated into +memory.kmem.usage_in_bytes, or in a separate counter when it makes sense. +(currently only for tcp). + +The main "kmem" counter is fed into the main counter, so kmem charges will +also be visible from the user counter. + +Currently no soft limit is implemented for kernel memory. It is future work +to trigger slab reclaim when those limits are reached. +``` + +这是一个 cgroup memory 的扩展,用于限制对 kernel memory 的使用,但该特性在老于 4.0 版本中是个实验特性,存在泄露问题,在 4.x 较低的版本也还有泄露问题,应该是造成泄露的代码路径没有完全修复,推荐 4.3 以上的内核。 + +## 造成容器创建失败 + +这个问题可能会导致创建容器失败,因为创建容器为其需要创建 cgroup 来做隔离,而低版本内核有个限制:允许创建的 cgroup 最大数量写死为 65535 \([点我跳转到 commit](https://github.com/torvalds/linux/commit/38460b48d06440de46b34cb778bd6c4855030754#diff-c04090c51d3c6700c7128e84c58b1291R3384)\),如果节点上经常创建和销毁大量容器导致创建很多 cgroup,删除容器但没有彻底删除 cgroup 造成泄露\(真实数量我们无法得知\),到达 65535 后再创建容器就会报创建 cgroup 失败并报错 `no space left on device`,使用 kubernetes 最直观的感受就是 pod 创建之后无法启动成功。 + +pod 启动失败,报 event 示例: + +``` bash +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 15m default-scheduler Successfully assigned jenkins/jenkins-7845b9b665-nrvks to 10.10.252.4 + Warning FailedCreatePodContainer 25s (x70 over 15m) kubelet, 10.10.252.4 unable to ensure pod container exists: failed to create container for [kubepods besteffort podc6eeec88-8664-11e9-9524-5254007057ba] : mkdir /sys/fs/cgroup/memory/kubepods/besteffort/podc6eeec88-8664-11e9-9524-5254007057ba: no space left on device +``` + +dockerd 日志报错示例: + +``` bash +Dec 24 11:54:31 VM_16_11_centos dockerd[11419]: time="2018-12-24T11:54:31.195900301+08:00" level=error msg="Handler for POST /v1.31/containers/b98d4aea818bf9d1d1aa84079e1688cd9b4218e008c58a8ef6d6c3c106403e7b/start returned error: OCI runtime create failed: container_linux.go:348: starting container process caused \"process_linux.go:279: applying cgroup configuration for process caused \\\"mkdir /sys/fs/cgroup/memory/kubepods/burstable/pod79fe803c-072f-11e9-90ca-525400090c71/b98d4aea818bf9d1d1aa84079e1688cd9b4218e008c58a8ef6d6c3c106403e7b: no space left on device\\\"\": unknown" +``` + +kubelet 日志报错示例: + +``` bash +Sep 09 18:09:09 VM-0-39-ubuntu kubelet[18902]: I0909 18:09:09.449722 18902 remote_runtime.go:92] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to start sandbox container for pod "osp-xxx-com-ljqm19-54bf7678b8-bvz9s": Error response from daemon: oci runtime error: container_linux.go:247: starting container process caused "process_linux.go:258: applying cgroup configuration for process caused \"mkdir /sys/fs/cgroup/memory/kubepods/burstable/podf1bd9e87-1ef2-11e8-afd3-fa163ecf2dce/8710c146b3c8b52f5da62e222273703b1e3d54a6a6270a0ea7ce1b194f1b5053: no space left on device\"" +``` + +新版的内核限制为 `2^31` \(可以看成几乎不限制,[点我跳转到代码](https://github.com/torvalds/linux/blob/3120b9a6a3f7487f96af7bd634ec49c87ef712ab/kernel/cgroup/cgroup.c#L5233)\): `cgroup_idr_alloc()` 传入 end 为 0 到 `idr_alloc()`, 再传给 `idr_alloc_u32()`, end 的值最终被三元运算符 `end>0 ? end-1 : INT_MAX` 转成了 `INT_MAX` 常量,即 `2^31`。所以如果新版内核有泄露问题会更难定位,表现形式会是内存消耗严重,幸运的是新版内核已经修复,推荐 4.3 以上。 + +### 规避方案 + +如果你用的低版本内核\(比如 CentOS 7 v3.10 的内核\)并且不方便升级内核,可以通过不开启 kmem accounting 来实现规避,但会比较麻烦。 + +kubelet 和 runc 都会给 memory cgroup 开启 kmem accounting,所以要规避这个问题,就要保证kubelet 和 runc 都别开启 kmem accounting,下面分别进行说明: + +#### runc + +runc 在合并 [这个PR](https://github.com/opencontainers/runc/pull/1350/files) \(2017-02-27\) 之后创建的容器都默认开启了 kmem accounting,后来社区也注意到这个问题,并做了比较灵活的修复, [PR 1921](https://github.com/opencontainers/runc/pull/1921) 给 runc 增加了 "nokmem" 编译选项,缺省的 release 版本没有使用这个选项, 自己使用 nokmem 选项编译 runc 的方法: + +``` bash +cd $GO_PATH/src/github.com/opencontainers/runc/ +make BUILDTAGS="seccomp nokmem" +``` + +docker-ce v18.09.1 之后的 runc 默认关闭了 kmem accounting,所以也可以直接升级 docker 到这个版本之后。 + +#### kubelet + +如果是 1.14 版本及其以上,可以在编译的时候通过 build tag 来关闭 kmem accounting: + +``` bash +KUBE_GIT_VERSION=v1.14.1 ./build/run.sh make kubelet GOFLAGS="-tags=nokmem" +``` + +如果是低版本需要修改代码重新编译。kubelet 在创建 pod 对应的 cgroup 目录时,也会调用 libcontianer 中的代码对 cgroup 做设置,在 `pkg/kubelet/cm/cgroup_manager_linux.go` 的 `Create` 方法中,会调用 `Manager.Apply` 方法,最终调用 `vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go` 中的 `MemoryGroup.Apply` 方法,开启 kmem accounting。这里也需要进行处理,可以将这部分代码注释掉然后重新编译 kubelet。 + +## 参考资料 + +* 一行 kubernetes 1.9 代码引发的血案(与 CentOS 7.x 内核兼容性问题): [http://dockone.io/article/4797](http://dockone.io/article/4797) +* Cgroup泄漏--潜藏在你的集群中: [https://tencentcloudcontainerteam.github.io/2018/12/29/cgroup-leaking/](https://tencentcloudcontainerteam.github.io/2018/12/29/cgroup-leaking/) diff --git a/content/troubleshooting/cases/others/dotnet-configuration-cannot-auto-reload.md b/content/troubleshooting/cases/others/dotnet-configuration-cannot-auto-reload.md new file mode 100644 index 0000000..a166561 --- /dev/null +++ b/content/troubleshooting/cases/others/dotnet-configuration-cannot-auto-reload.md @@ -0,0 +1,130 @@ +# .Net Core 配置文件无法热加载 + +## 问题描述 + +在使用 kubernetes 部署应用时, 我使用 `kubernetes` 的 `configmap` 来管理配置文件: `appsettings.json` +, 修改configmap 的配置文件后, 我来到了容器里, 通过 `cat /app/config/appsetting.json` 命令查看容器是否已经加载了最新的配置文件, 很幸运的是, 通过命令行查看容器配置发现已经处于最新状态(修改configmap后10-15s 生效), 我尝试请求应用的API, 发现API 在执行过程中使用的配置是老旧的内容, 而不是最新的内容。在本地执行应用时并未出现配置无法热更新的问题。 + +```bash +# 相关版本 +kubernetes 版本: 1.14.2 +# 要求版本大于等于 3.1 +.Net core: 3.1 + +# 容器 os-release (并非 windows) + +NAME="Debian GNU/Linux" +VERSION_ID="10" +VERSION="10 (buster)" +VERSION_CODENAME=buster +ID=debian +HOME_URL="https://www.debian.org/" +SUPPORT_URL="https://www.debian.org/support" +BUG_REPORT_URL="https://bugs.debian.org/" + +# 基础镜像: +mcr.microsoft.com/dotnet/core/sdk:3.1-buster +mcr.microsoft.com/dotnet/core/aspnet:3.1-buster-slim +``` + +## 问题猜想 + +通过命令行排查发现最新的 `configmap` 配置内容已经在容器的指定目录上更新到最新,但是应用仍然使用老旧的配置内容, 这意味着问题发生在: configmap->**容器->应用**, 容器和应用之间, 容器指定目录下的配置更新并没有触发 `.Net` 热加载机制, 那究竟是为什么没有触发配置热加载,需要深挖根本原因, 直觉猜想是: 查看 `.Net Core` 标准库的配置热加载的实现检查触发条件, 很有可能是触发的条件不满足导致应用配置无法重新加载。 + +## 问题排查 + +猜想方向是热更新的触发条件不满足, 我们熟知使用 `configmap` 挂载文件是使用[symlink](https://en.wikipedia.org/wiki/Symbolic_link)来挂载, 而非常用的物理文件系统, 在修改完 `configmap` , 容器重新加载配置后,这一过程并不会改变文件的修改时间等信息(从容器的角度看)。对此,我们做了一个实验,通过对比configmap修改前和修改后来观察配置( `appsettings.json` )在容器的属性变化(注: 均在容器加载最新配置后对比), 使用 `stat` 命令来佐证了这个细节点。 + +**Before:** + +```bash +root@app-785bc59df6-gdmnf:/app/Config# stat appsettings.json +File: Config/appsettings.json -> ..data/appsettings.json + Size: 35 Blocks: 0 IO Block: 4096 symbolic link +Device: ca01h/51713d Inode: 27263079 Links: 1 +Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) +Access: 2020-04-25 08:21:18.490453316 +0000 +Modify: 2020-04-25 08:21:18.490453316 +0000 +Change: 2020-04-25 08:21:18.490453316 +0000 +Birth: - +``` + +**After:** + +```bash +root@app-785bc59df6-gdmnf:/app/Config# stat appsettings.json + File: appsettings.json -> ..data/appsettings.json + Size: 35 Blocks: 0 IO Block: 4096 symbolic link +Device: ca01h/51713d Inode: 27263079 Links: 1 +Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) +Access: 2020-04-25 08:21:18.490453316 +0000 +Modify: 2020-04-25 08:21:18.490453316 +0000 +Change: 2020-04-25 08:21:18.490453316 +0000 +Birth: - +``` + +通过标准库源码发现, `.Net core` 配置热更新机制似乎是基于文件的最后修改日期来触发的, 根据上面的前后对比显而易见, `configmap` 的修改并没有让容器里的指定的文件的最后修改日期改变,也就未触发 `.Net` 应用配置的热加载。 + +## 解决办法 + +既然猜想基本得到证实, 由于不太熟悉这门语言, 我们尝试在网络上寻找解决办法,很幸运的是我们找到了找到了相关的内容, [fbeltrao](https://github.com/fbeltrao) 开源了一个第三方库([ConfigMapFileProvider](https://github.com/fbeltrao/ConfigMapFileProvider)) 来专门解决这个问题,**通过监听文件内容hash值的变化实现配置热加载**。 +于是, 我们在修改了项目的代码: + + +**Before:** + +```csharp +// 配置被放在了/app/Config/ 目录下 +var configPath = Path.Combine(env.ContentRootPath, "Config"); +config.AddJsonFile(Path.Combine(configPath, "appsettings.json"), + optional: false, + reloadOnChange: true); +``` + +**After:** + +```csharp +// 配置被放在了/app/Config/ 目录下 +config.AddJsonFile(ConfigMapFileProvider.FromRelativePath("Config"), + "appsettings.json", + optional: false, + reloadOnChange: true); +``` + +修改完项目的代码后, 重新构建镜像, 更新部署在 `kubernetes` 上的应用, 然后再次测试, 到此为止, 会出现两种状态: + +1. 一种是你热加载配置完全可用, 非常值得祝贺, 你已经成功修复了这个bug; +2. 一种是你的热加载配置功能还存在 bug, 比如: 上一次请求, 配置仍然使用的老旧配置内容, 下一次请求却使用了最新的配置内容,这个时候, 我们需要继续向下排查: `.NET Core` 引入了`Options`模式,使用类来表示相关的设置组,用强类型的类来表达配置项(白话大概表述为: 代码里面有个对象对应配置里的某个字段, 配置里对应的字段更改会触发代码里对象的属性变化), 示例如下: + +**配置示例:** + +```bash +$ cat appsettings.json + "JwtIssuerOptions": { + "Issuer": "test", + "Audience": "test", + "SecretKey": "test" + ... + } +``` + +**代码示例:** + +```csharp +services.Configure(Configuration.GetSection("JwtIssuerOptions")); +``` + +而 Options 模式分为三种: + +1. `IOptions`: Singleton(单例),值一旦生成, 除非通过代码的方式更改,否则它的值不会更新 +2. `IOptionsMonitor`: Singleton(单例), 通过 `IOptionsChangeTokenSource` 能够和配置文件一起更新,也能通过代码的方式更改值 +3. `IOptionsSnapshot`: Scoped,配置文件更新的下一次访问,它的值会更新,但是它不能跨范围通过代码的方式更改值,只能在当前范围(请求)内有效。 + +在知道这三种模式的意义后,我们已经完全找到了问题的根因, 把 `Options` 模式设置为:`IOptionsMonitor`就能解决完全解决配置热加载的问题。 + +## 相关链接 + +1. [配置监听ConfigMapFileProvider](https://github.com/fbeltrao/ConfigMapFileProvider) +2. [相似的Issue: 1175](https://github.com/dotnet/extensions/issues/1175) +3. [官方Options 描述](https://docs.microsoft.com/en-us/aspnet/core/fundamentals/configuration/options?view=aspnetcore-3.1) +4. [IOptions、IOptionsMonitor以及IOptionsSnapshot 测试](https://www.cnblogs.com/wenhx/p/ioptions-ioptionsmonitor-and-ioptionssnapshot.html) diff --git a/content/troubleshooting/cases/others/failed-to-modify-hosts-in-multiple-container.md b/content/troubleshooting/cases/others/failed-to-modify-hosts-in-multiple-container.md new file mode 100644 index 0000000..a2867ac --- /dev/null +++ b/content/troubleshooting/cases/others/failed-to-modify-hosts-in-multiple-container.md @@ -0,0 +1,54 @@ +# 多容器场景下修改 hosts 失效 + +## 问题现象 + +业务容器启动的逻辑中,修改了 `/etc/hosts` 文件,当 Pod 只存在这一个业务容器时,文件可以修改成功,但存在多个时 (比如注入了 istio 的 sidecar),修改可能会失效。 + +## 分析 + +1. 容器中的 `/etc/hosts` 是由 kubelet 生成并挂载到 Pod 中所有容器,如果 Pod 有多个容器,它们挂载的 `/etc/hosts` 文件都对应宿主机上同一个文件,路径通常为 `/var/lib/kubelet/pods//etc-hosts`。 + > 如果是 docker 运行时,可以通过 `docker inspect -f {{.HostsPath}}` 查看。 + +2. kubelet 在启动容器时,都会走如下的调用链(`makeMounts->makeHostsMount->ensureHostsFile`)来给容器挂载 `/etc/hosts`,而在 `ensureHostsFile` 函数中都会重新创建一个新的 `etc-hosts` 文件,导致在其他容器中对 `/etc/hosts` 文件做的任何修改都被还原了。 + +所以,当 Pod 中存在多个容器时,容器内修改 `/etc/hosts` 的操作可能会被覆盖回去。 + +## 解决方案 + +通常不推荐在容器内修改 `/etc/hosts`,应该采用更云原生的做法,参考 [自定义域名解析](../../../best-practices/dns/customize-dns-resolution.md)。 + +### 使用 HostAliases + +如果只是某一个 workload 需要 hosts,可以用 HostAliases: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: host +spec: + replicas: 1 + selector: + matchLabels: + app: host + template: + metadata: + labels: + app: host + spec: + hostAliases: # 这下面定义 hosts + - ip: "10.10.10.10" + hostnames: + - "mysql.example.com" + containers: + - name: nginx + image: nginx:latest +``` + +> 参考官方文档 [Adding entries to Pod /etc/hosts with HostAliases](https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/)。 + +### CoreDNS hosts + +如果是多个 workload 都需要共同的 hosts,可以修改集群 CoreDNS 配置,在集群级别增加 hosts: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925151945.png) diff --git a/content/troubleshooting/cases/others/job-cannot-delete.md b/content/troubleshooting/cases/others/job-cannot-delete.md new file mode 100644 index 0000000..05a1a7a --- /dev/null +++ b/content/troubleshooting/cases/others/job-cannot-delete.md @@ -0,0 +1,15 @@ +# Job 无法被删除 + +## 原因 + +* 可能是 k8s 的一个bug: [https://github.com/kubernetes/kubernetes/issues/43168](https://github.com/kubernetes/kubernetes/issues/43168) +* 本质上是脏数据问题,Running+Succeed != 期望Completions 数量,低版本 kubectl 不容忍,delete job 的时候打开debug(加-v=8),会看到kubectl不断在重试,直到达到timeout时间。新版kubectl会容忍这些,删除job时会删除关联的pod + +## 解决方法 + +1. 升级 kubectl 版本,1.12 以上 +2. 低版本 kubectl 删除 job 时带 `--cascade=false` 参数\(如果job关联的pod没删完,加这个参数不会删除关联的pod\) + +```bash +kubectl delete job --cascade=false +``` diff --git a/content/troubleshooting/cases/runtime/broken-system-time-causing-sandbox-conflicts.md b/content/troubleshooting/cases/runtime/broken-system-time-causing-sandbox-conflicts.md new file mode 100644 index 0000000..dc771d5 --- /dev/null +++ b/content/troubleshooting/cases/runtime/broken-system-time-causing-sandbox-conflicts.md @@ -0,0 +1,116 @@ +# 系统时间被修改导致 sandbox 冲突 + +## 问题描述 + +节点重启后,节点上的存量 pod 出现无法正常 running,容器(sandbox)在不断重启的现象。 + +查看事件,提示是 sandbox 的 name 存在冲突 (`The container name xxx is already used by yyy`),具体事件如下: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153743.png) + +## 结论先行 + +这个问题的根因是节点的时间问题,节点重启前的系统时间比节点重启后的系统时间提前,影响了 kubelet 内部缓存 cache 中的 sandbox 的排序,导致 kubelet 每次起了一个新 sandbox 之后,都只会拿到旧的 sandbox,导致了 sandbox 的不断创建和 name 冲突。 + +## 排查日志 + +先来看下 kubelet 的日志,部分截图如下: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153751.png) + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153759.png) + +截图上是同一个 pod(kube-proxy)最近的两次 podWorker 逻辑截图,都抛出了同一个提示:`No ready sandbox for pod can be found, Need to start a new one`。这个应该就是造成容器冲突的来源,每次沉浸到 podWorker 的逻辑之后,podWorker 都要尝试去创建一个新的sandbox,进而造成容器冲突。 + +疑问:为啥 podWorker 每次都去创建一个新的 sandbox? + +接下来继续调大 kubelet 的日志级别(k8s v1.16已经支持动态调整,这里调大日志级别到V(6)),这里主要是尝试拿到某个 pod 所关联的所有 sandbox,截图如下: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153810.png) + +通过配合节点上执行 docker inspect(ps)相关命令发现,异常的 pod(kube-proxy)存在两个 sandbox(重启前的+重启后的),并且在 sandboxID 数组中的排序为 `[重启前的sandbox, 重启后的 sandbox]` (这里先 mark 一下)。 + +## 相关知识 + +在进一步分析之前,我们先介绍下相关背景知识。 + +### Pod 创建流程 + +先来一起熟悉下 pod 创建流程: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153823.png) + +### PLEG 组件 + +再看下 `PLEG` 的工作流程。kubelet 启动之后,会运行起 `PLEG` 组件,定期的缓存 pod 的信息(包括 pod status)。在 `PLEG` 的每次 relist 逻辑中,会对比 `old pod` 和 `new pod`,检查是否存在变化,如果新旧 pod 之间存在变化,则开始执行下面两个逻辑: +1. 生成 event 事件,比如 containerStart 等,最后再投递到 `eventChannel` 中,供 podWorker 来消费。 +2. 更新内部缓存 cache。在跟新缓存 `updateCache` 的逻辑中,会调用 runtime 的相关接口获取到与 pod 相关的 status 状态信息,然后并缓存到内部缓存 cache中,最后发起通知 ( podWorker 会发起订阅) 。 + +podStatus的数据结构如下: + +```go +# podStatus +type PodStatus struct { + // ID of the pod. + ID types.UID + ... + ... + // Only for kuberuntime now, other runtime may keep it nil. + SandboxStatuses []*runtimeapi.PodSandboxStatus +} + +# SandboxStatus +// PodSandboxStatus contains the status of the PodSandbox. +type PodSandboxStatus struct { + // ID of the sandbox. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ... + // Creation timestamp of the sandbox in nanoseconds. Must be > 0. + CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + ... +} +``` + +podStatus 会保存 pod 的一些基础信息,再加上 containerStatus 和 sandboxStatus 信息。 + +这里重点关注下 SandboxStatus 的排序问题,配合代码可以发现,排序是按照 sandbox 的 Create time 来执行的,并且时间越新,位置越靠前。排序相关的代码部分如下: + +```go +// Newest first. +type podSandboxByCreated []*runtimeapi.PodSandbox + +func (p podSandboxByCreated) Len() int { return len(p) } +func (p podSandboxByCreated) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p podSandboxByCreated) Less(i, j int) bool { return p[i].CreatedAt > p[j].CreatedAt } +``` + +### podWorker 组件 + +最后再看下 podWorker 的工作流程。podWorker 的工作就是负责 pod 在节点上的正确运行(比如挂载 volume,新起 sandbox,新起 container 等),一个 pod 对应一个 podWorker,直到 pod 销毁。当节点重启后,kubelet 会收到 `type=ADD` 的事件来创建 pod 对象。 + +当 pod 更新之后,会触发 `event=containerStart` 事件的投递,然后 kubelet 就会收到 `type=SYNC` 的事件,来更新 pod 对象。在每次 podWorker 的内部逻辑中(`managePodLoop()`) 中,会存在一个 podStatus(内部缓存)的订阅,如下: + +```go +// This is a blocking call that would return only if the cache +// has an entry for the pod that is newer than minRuntimeCache +// Time. This ensures the worker doesn't start syncing until +// after the cache is at least newer than the finished time of +// the previous sync. +status, err := p.podCache.GetNewerThan(podUID, lastSyncTime) +``` + +来等待内部 cache 中的 podStatus 更新,然后再操作后续动作(是否重新挂载 volume、是否重建 sandbox,是否重建 container 等)。 + +## 复现问题,定位根因 + +接下来,我们一起来模拟复现下问题现场。 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153835.png) + +在节点重启之前,由于是新建节点后,所以对于 pod 来说,status 中只有一个命名以 `_0` 结尾的 sandbox。当操作重启节点之后,kubelet 收到 `type=ADD` 的事件,podWorker 开始创建 pod,由于之前以 `_0` 命名结尾的 sandbox 已经 died 了,所以会新建一个新的以 `_1` 命名结尾的 sandbox,当新的以 `_1` 命名结尾的 sandbox 运行之后(containerStarted),就会投递一个 `type=SYNC` 的事件给到 kubelet,然后 podWorker 会被再次触发(内部 cache 也更新了,通知也发出了)。正常情况下,podWorker 会拿到 podStatus 中新的 sandbox(以 `_1` 命名结尾的),就不会再创建 sandbox 了,也就是不会发生 name 冲突的问题。而用户的环境却是,此时拿到了以 `_0` 命名结尾的旧的 sandbox,所以再新一轮的 podWorker 逻辑中,会再次创建一个新的以 `_1` 命名的 sandbox,从而产生冲突。 + +而这里的根因就是时间问题,节点重启前的sandbox(以 `_0` 命名结尾的)的 `create time` ,比节点重启后的sandbox(以 `_1` 命名结尾的)的 `create time` 还要提前,所以导致了内部 cache 中 sandbox 的排序发生了错乱,从而触发 name 冲突问题。 + +## 解决方案 + +根据上面的排查发现,kubelet 的内部缓存中,sandbox 的排序是有系统时间来决定的,所以,尽量保证 k8s 集群中的时间有正确同步,或者不要乱改节点上的时间。 diff --git a/content/troubleshooting/cases/runtime/io-high-load-causing-pod-creation-timeout.md b/content/troubleshooting/cases/runtime/io-high-load-causing-pod-creation-timeout.md new file mode 100644 index 0000000..165a6cf --- /dev/null +++ b/content/troubleshooting/cases/runtime/io-high-load-causing-pod-creation-timeout.md @@ -0,0 +1,53 @@ +# 磁盘 IO 过高导致 Pod 创建超时 + +## 问题背景 + +在创建 TKE 集群的 worker node 时,用户往往会单独再购买一块云盘,绑到节点上,用于 docker 目录挂载所用(将 docker 目录单独放到数据盘上)。此时,docker 的读写层(RWLayer)就会落到云盘上。 + +在该使用场景下,有用户反馈,在创建 Pod 时,会偶现 Pod 创建超时的报错,具体报错如下: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153934.png) + +## 结论先行 + +当单独挂载一块云盘用于 docker 目录挂载使用时,会出现如下情况:云盘的真实使用超过云盘所支持的最大吞吐,导致 pod 创建超时。 + +## pod 失败的异常事件 + +从报错的事件上来看,可以看到报错是 create sandbox 时,rpc 调用超时了。 + +在 create sandbox 时,dockershim 会发起两次dockerd调用,分别是:`POST /containers/create` 和 `POST /containers/start`。而事件上给出的报错,就是 `POST /containers/create` 时的报错。 + +## 日志和堆栈分析 + +开启dockerd的debug模式后,在异常报错时间段内,能够看到有与 `POST /containers/create` 相关的日志,但是并没有看到与 `POST /containers/start` 相关的日志,说明 docker daemon 有收到 create container 的 rpc 请求,但是并没有在timeout的时间内,完成请求。可以对应到 pod 的异常报错事件。 + +当稳定复现问题(rpc timeout)之后,手动尝试在节点上通过curl命令,向docker daemon请求create containber。 + +命令如下: + +```bash +$ curl --unix-socket /var/run/docker.sock "http://1.38/containers/create?name=test01" -v -X POST -H "Content-Type: application/json" -d '{"Image": "nginx:latest"}' +``` + +当执行 curl 命令之后,确实要等很长时间(>2min)才返回。 + +并抓取 dockerd 的堆栈信息,发现如下:**在问题发生时,有一个 delete container 动作,长时间卡在了 unlinkat 系统调用。** + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153951.png) + +container 的 create 和 delete 请求都会沉浸到 layer store组件,来创建或者删除容器的读写层。 + +在 layer store 组件中,维护了一个内部数据结构(layerStore),其中有一个字段 `mounts map[string]*mountedLayer` 用于维护所有容器的读写层信息,并且还配置了一个读写锁用于保护该信息(数据mounts的任何增删操作都需要先获取一个读写锁)。如果某个请求(比如container delete)长时间没有返回,就会阻塞其他 container 的创建或者删除。 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925153959.png) + +## 云盘监控 + +云盘的相关监控可以重点关注以下三个指标:云盘写流量、IO await、IO %util。 + +## 解决方案 + +配合业务场景需求,更换更高性能的云盘。 + +腾讯云上的云硬盘种类和吞吐指标可以 [官方参考文档](https://cloud.tencent.com/document/product/362/2353) 。 diff --git a/content/troubleshooting/cases/runtime/mount-root-causing-device-or-resource-busy.md b/content/troubleshooting/cases/runtime/mount-root-causing-device-or-resource-busy.md new file mode 100644 index 0000000..81ad75a --- /dev/null +++ b/content/troubleshooting/cases/runtime/mount-root-causing-device-or-resource-busy.md @@ -0,0 +1,286 @@ +# 挂载根目录导致 device or resource busy + +## 现象 + +在删除 pod 时,可能会遇到如下事件 `unlinkat xxxxx: device or resource busy`,设备或资源忙导致某个文件无法被删除,进而导致 pod 卡在 Terminating 状态。 + +接下来,就单独针对在 **containerd运行时环境** 下,发生的相关报错进行回顾分析,具体的报错现象如下: + +```txt +unlinkat /var/run/netns/cni-49ddd103-d374-1f86-7324-13abaeb9c910: device or resource busy +``` + +## 复现场景 + +环境: + +* containerd 运行时 +* centos 7.6 操作系统 + +通过先后创建如下两个服务(sleeping 和 rootfsmount)可以复现问题。 + +1. 先创建 sleeping 服务: + +```bash +$ cat < 此部分为引用说明,详情可见参考链接1 + +* 内核特性,用于控制某个挂载点下的子挂载点是否"传播"给其他挂载点,只应用于 bind mount 和 mount namespace 场景中。 +* Shared subtrees 技术引入了两个概念,分别是 peer group 和 propagation type,接下来一一介绍。 + +2.1 peer group + +共享挂载信息的一组挂载点,来源主要两种: +* bind mount,此时源和目的挂载点属于同一 peer group,要求源也是挂载点。 +* 新的 namespace 创建,新的 namespace 会拷贝旧的一份挂载信息,于是,新旧中相同挂载点属于同一 peer group。 + +2.2 propagation type + +每个挂载点都有这样的一个元数据(propagation type),用于控制当一个挂载点的下面创建和移除挂载点的时候,是否会传播到属于相同peer group的其他挂载点下去,主要有三种: + +* `MS_SHARED`: 挂载信息在同一个 peer group 里会相互传播。比如把节点上的主目录挂载到容器内的 `/rootfs`,如果节点上的主目录创建了新的挂载点X,则* 在容器内的 `/rootfs` 下面也会出现新的挂载点 `/rootfs/X`。 +* `MS_PRIVATE`:挂载信息在同一个 peer group 里不会相互传播。比如把节点上的主目录挂载到容器内的 `/rootfs`,如果节点上的主目录创建了新的挂载点X,则容器内的 `/rootfs` 下面不会出现新的挂载点 `/rootfs/X`。 +* `MS_SLAVE`:挂载信息传播是单向的。比如把节点上的主目录挂载到容器内的 `/rootfs`,如果节点上的主目录创建了新的挂载点 X,则在容器内的 `/rootfs` 下面也会出现新的挂载点 `/rootfs/X` ,反之则不行。 + +这个对应到 k8s 中 `Container.volumeMounts` 的 `mountPropagation` 字段,分别是:Bidirectional、None、HostToContainer。 + +## 进一步分析 + +让我们再回到复现场景中的第二步,创建 rootfsmount 服务时,发生了什么。 + +通过命令抓取下 contianerd 的所有 mount 系统调用,发现有如下两个 mount 记录: + +```bash +$ strace -f -e trace=mount -p +... +[pid 15532] mount("/", "/run/containerd/io.containerd.runtime.v2.task/k8s.io/5b498caf152857cf1c797761e1f52d64c2ce7d4602b72304da7e154ed31043c8/rootfs/rootfs", 0xc0000f7500, MS_BIND|MS_REC, NULL) = 0 +[pid 15532] mount("", "/run/containerd/io.containerd.runtime.v2.task/k8s.io/5b498caf152857cf1c797761e1f52d64c2ce7d4602b72304da7e154ed31043c8/rootfs/rootfs", 0xc0000f7506, MS_REC|MS_PRIVATE, NULL) = 0 +... +``` + +这个就对应于 pod 配置中的 volumeMount,我们再进一步看下 container 中的 mount 信息。 + +将节点上的主目录 `/` (挂载点) 挂载到了容器中的 `/rootfs` (挂载点),并且 propagation type 为 rprivate。 + +```bash +$ crictl inspect +... +{ + "destination": "/rootfs", + "type": "bind", + "source": "/", + "options": [ + "rbind", + "rprivate", + "rw" + ] +}, +... +``` + +让我们再看下pod(或者容器内)的挂载情况: + +```bash +$ cat /proc/self/mountinfo +... +# 对应pod的volumeMount设置,将宿主机上的主目录/ 挂载到了容器内的/rootfs目录下 +651 633 253:1 / /rootfs rw,relatime - ext4 /dev/vda1 rw,data=ordered +695 677 0:3 / /rootfs/run/netns/cni-49ddd103-d374-1f86-7324-13abaeb9c910 rw,nosuid,nodev,noexec,relatime - proc proc rw +... +``` + +节点上的挂载点(/var/run/netns/cni-49ddd103-d374-1f86-7324-13abaeb9c910)在容器内,也是挂载点(/rootfs/run/netns/cni-49ddd103-d374-1f86-7324-13abaeb9c910)。 + +## 结论 + +当测试服务 rootfsmount 的 pod 实例创建时,会把节点上的主目录 `/` 挂载到容器内(比如 `/rootfs`),由于主目录在节点上是一个挂载点,所以节点上的主目录和容器内的/rootfs属于同一个 peer group,并且采用了默认的 propagation type:rprivate。 + +当测试服务 sleepping 的 pod 实例销毁时,需要解挂和销毁对应的 netns 文件(/var/run/netns/cni-49ddd103-d374-1f86-7324-13abaeb9c910),由于此时的 propagation type 是 rprivate,节点上主目录下的子挂载点解挂不会传递到容器的 net namespace 内,所以,这个 netns 文件(/rootfs/run/netns/cni-49ddd103-d374-1f86-7324-13abaeb9c910)依然是一个挂载点,导致在销毁 netns 文件时会失败。 + + +## 解决方案 + +1. 给 rootfsmount 服务的 volumeMount 配置新增 propagation type,设置为 HostToContainer 或者 Bidirectional。 + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: rootfsmount +spec: + replicas: 1 + selector: + matchLabels: + app: rootfsmount + template: + metadata: + labels: + app: rootfsmount + spec: + containers: + - name: rootfsmount + image: busybox + args: ["sleep", "1h"] + volumeMounts: + - mountPath: /rootfs + name: host-rootfs + mountPropagation: HostToContainer # 这里显示声明mountPropagation为HostToContainer 或者 Bidirectional + volumes: + - hostPath: + path: / + type: "" + name: host-rootfs +``` + + +2. centos 和 redhat 的内核,可以开启如下内核参数: + +```bash +echo 1 > /proc/sys/fs/may_detach_mounts +``` + +## 疑问:为啥 dockerd 运行时没有这个问题? + +这里主要有两点: + +1. dockerd 在启动的时候,开启了内核参数 `fs.may\_detach\_mounts`。 + +```go +// This is used to allow removal of mountpoints that may be mounted in other +// namespaces on RHEL based kernels starting from RHEL 7.4. +// Without this setting, removals on these RHEL based kernels may fail with +// "device or resource busy". +// This setting is not available in upstream kernels as it is not configurable, +// but has been in the upstream kernels since 3.15. +func setMayDetachMounts() error { + f, err := os.OpenFile("/proc/sys/fs/may_detach_mounts", os.O_WRONLY, 0) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return errors.Wrap(err, "error opening may_detach_mounts kernel config file") + } + defer f.Close() + + _, err = f.WriteString("1") + if os.IsPermission(err) { + // Setting may_detach_mounts does not work in an + // unprivileged container. Ignore the error, but log + // it if we appear not to be in that situation. + if !rsystem.RunningInUserNS() { + logrus.Debugf("Permission denied writing %q to /proc/sys/fs/may_detach_mounts", "1") + } + return nil + } + return err +} +``` + + +2. dockerd 在挂载目录时,会验证挂载的源目录与 daemon 的 root 目录的关系,如果源目录是 root 目录的子目录或者 root 目录是源目录的子目录,则将 propagation type 设置为 `MS_SLAVE`。 + +```go +// validateBindDaemonRoot ensures that if a given mountpoint's source is within +// the daemon root path, that the propagation is setup to prevent a container +// from holding private refereneces to a mount within the daemon root, which +// can cause issues when the daemon attempts to remove the mountpoint. +func (daemon *Daemon) validateBindDaemonRoot(m mount.Mount) (bool, error) { + if m.Type != mount.TypeBind { + return false, nil + } + + // check if the source is within the daemon root, or if the daemon root is within the source + if !strings.HasPrefix(m.Source, daemon.root) && !strings.HasPrefix(daemon.root, m.Source) { + return false, nil + } + + if m.BindOptions == nil { + return true, nil + } + + switch m.BindOptions.Propagation { + case mount.PropagationRSlave, mount.PropagationRShared, "": + return m.BindOptions.Propagation == "", nil + default: + } + + return false, errdefs.InvalidParameter(errors.Errorf(`invalid mount config: must use either propagation mode "rslave" or "rshared" when mount source is within the daemon root, daemon root: %q, bind mount source: %q, propagation: %q`, daemon.root, m.Source, m.BindOptions.Propagation)) +} +``` + +## 参考文档 + +* [Shared subtree](https://segmentfault.com/a/1190000006899213) +* [Mount Propagation](https://kubernetes.io/zh/docs/concepts/storage/volumes/#mount-propagation) diff --git a/content/troubleshooting/cases/runtime/pull-image-fail-in-high-version-containerd.md b/content/troubleshooting/cases/runtime/pull-image-fail-in-high-version-containerd.md new file mode 100644 index 0000000..97297d8 --- /dev/null +++ b/content/troubleshooting/cases/runtime/pull-image-fail-in-high-version-containerd.md @@ -0,0 +1,169 @@ +# 高版本 containerd 下载镜像失败 + +## 问题描述 + +在 containerd 运行时的 kubernetes 线上环境中,出现了镜像无法下载的情况,具体报错如下: + +```txt +Failed to pull image ` `"ccr.ccs.tencentyun.com/tkeimages/tke-hpc-controller:v1.0.0"` `: rpc error: code = NotFound desc = failed to pull and unpack image ` `"ccr.ccs.tencentyun.com/tkeimages/tke-hpc-controller:v1.0.0"` `: failed to unpack image on snapshotter overlayfs: failed to extract layer sha256:d72a74c56330b347f7d18b64d2effd93edd695fde25dc301d52c37efbcf4844e: failed to get reader from content store: content digest sha256:2bf487c4beaa6fa7ea6e46ec1ff50029024ebf59f628c065432a16a940792b58: not found +``` + +containerd 的日志中也有相关日志: + +```txt +containerd[136]: time="2020-11-19T16:11:56.975489200Z" level=info msg="PullImage \"redis:2.8.23\"" +containerd[136]: time="2020-11-19T16:12:00.140053300Z" level=warning msg="reference for unknown type: application/octet-stream" digest="sha256:481995377a044d40ca3358e4203fe95eca1d58b98a1d4c2d9cec51c0c4569613" mediatype=application/octet-stream size=5946 +``` + +## 尝试复现 + +分析环境信息: + +* container v1.4.3 运行时。 +* 基于 1.10 版本的 docker 制作的镜像(比如 dockerhub 镜像仓库中的 redis:2.8.23)。 + +然后根据以上版本信息构造相同环境,通过如下命令拉取镜像: + +```bash +$ crictl pull docker.io/libraryredis:2.8.23 +FATA[0001] pulling image failed: rpc error: code = NotFound desc = failed to pull and unpack image "docker.io/library/redis:2.8.23": failed to unpack image on snapshotter overlayfs: failed to extract layer sha256:4dcab49015d47e8f300ec33400a02cebc7b54cadd09c37e49eccbc655279da90: failed to get reader from content store: content digest sha256:51f5c6a04d83efd2d45c5fd59537218924bc46705e3de6ffc8bc07b51481610b: not found +``` + +问题复现,基本确认跟 containerd 版本与打包镜像的 docker 版本有关。 + +## 分析镜像下载的过程 + +在 containerd 运行时环境中,完整拉取一个镜像,主要会经历以下几步,如图所示: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925154119.png) + +接下来以 `centos:latest` 镜像的拉取过程为例。 + +1. 将镜像名解析成 oci 规范里 descriptor + +主要是 HEAD 请求,并且记录下返回中的 `Content-Type` 和 `Docker-Content-Digest`: + +```bash +$ curl -v -X HEAD -H "Accept: application/vnd.docker.distribution.manifest.v2+json, application/vnd.docker.distribution.manifest.list.v2+json, application/vnd.oci.image.manifest.v1+json, application/vnd.oci.image.index.v1+json, */*" https://mirror.ccs.tencentyun.com/v2/library/centos/manifests/latest?ns=docker.io +... +< HTTP/1.1 200 OK +< Date: Mon, 17 May 2021 11:53:29 GMT +< Content-Type: application/vnd.docker.distribution.manifest.list.v2+json +< Content-Length: 762 +< Connection: keep-alive +< Docker-Content-Digest: sha256:5528e8b1b1719d34604c87e11dcd1c0a20bedf46e83b5632cdeac91b8c04efc1 +``` + +2. 获取镜像的 list 列表: + +```bash +$ curl -X GET -H "Accept: application/vnd.docker.distribution.manifest.list.v2+json" https://mirror.ccs.tencentyun.com/v2/library/centos/manifests/sha256:5528e8b1b1719d34604c87e11dcd1c0a20bedf46e83b5632cdeac91b8c04efc1 +{ + "manifests":[ + { + "digest":"sha256:dbbacecc49b088458781c16f3775f2a2ec7521079034a7ba499c8b0bb7f86875", + "mediaType":"application\/vnd.docker.distribution.manifest.v2+json", + "platform":{ + "architecture":"amd64", + "os":"linux" + }, + "size":529 + }, + { + "digest":"sha256:7723d6b5d15b1c64d0a82ee6298c66cf8c27179e1c8a458e719041ffd08cd091", + "mediaType":"application\/vnd.docker.distribution.manifest.v2+json", + "platform":{ + "architecture":"arm64", + "os":"linux", + "variant":"v8" + }, + "size":529 + }, + ... + "mediaType":"application\/vnd.docker.distribution.manifest.list.v2+json", + "schemaVersion":2 +} +``` + +3. 获取特定操作系统上的镜像 manifest。由于宿主机的环境是 linux,所以 `containerd` 会选择适合该平台的镜像进行拉取: + +```bash +$ curl -X GET -H "Accept: application/vnd.docker.distribution.manifest.v2+json" https://mirror.ccs.tencentyun.com/v2/library/centos/manifests/sha256:dbbacecc49b08458781c16f3775f2a2ec7521079034a7ba499c8b0bb7f86875 +{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 2143, + "digest": "sha256:300e315adb2f96afe5f0b2780b87f28ae95231fe3bdd1e16b9ba606307728f55" + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 75181999, + "digest": "sha256:7a0437f04f83f084b7ed68ad9c4a4947e12fc4e1b006b38129bac89114ec3621" + } + ] +} +``` + +5. 拉取镜像的 config 和 layers。最后一步就是解析第三步中获取的 manifest,分别再下载镜像的 config 和 layers 就可以。 + +## 关于 mediaType:application/octet-stream + +`mediaType:application/octet-stream` 是 docker 较早(docker v1.10 之前)支持的 `docker/oci` 标准,现在已经不支持了,而社区也任务该 mediaType 也太老了,所以 containerd 后续也就不再支持了 (详情可以参考 PR [#5497](https://github.com/containerd/containerd/pull/5497)) 。 + +## 定位根因 + +接下来以 `redis:2.8.23` 镜像的拉取过程为例说明一下拉取失败的原因。 + +1. 将镜像名解析成 OCI 规范里 descriptor。这里还是 HEAD 请求,但返回中的 `Content-Type` 已经不是 list 类型了,而是 `application/vnd.docker.distribution.manifest.v2+json`: + +```bash +$ curl -v -X HEAD -H "Accept: application/vnd.docker.distribution.manifest.v2+json, application/vnd.docker.distribution.manifest.list.v2+json, application/vnd.oci.image.manifest.v1+json, application/vnd.oci.image.index.v1+json, */*" https://mirror.ccs.tencentyun.com/v2/library/redis/manifests/2.8.23?ns=docker.io +... +< HTTP/1.1 200 OK +< Date: Thu, 20 May 2021 02:25:08 GMT +< Content-Type: application/vnd.docker.distribution.manifest.v2+json +< Content-Length: 1968 +< Connection: keep-alive +< Docker-Content-Digest: sha256:e507029ca6a11b85f8628ff16d7ff73ae54582f16fd757e64431f5ca6d27a13c +``` + +2. 直接解析 manifest。因为 HEAD 请求中返回的是 manifest 类型,而不是 list 类型,所以这里会直接解析,解析出的 config 的 mediaType 是 `application/octet-stream`: + +```bash +$ curl -X GET -H "Accept: application/vnd.docker.distribution.manifest.v2+json" https://mirror.ccs.tencentyun.com/v2/library/redis/manifests/sha256:e507029ca6a11b85f8628ff16d7ff73ae54582f16fd757e64431f5ca6d27a13c +{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/octet-stream", # 这里的 mediaType 是 application/octet-stream + "size": 5946, + "digest": "sha256:481995377a044d40ca3358e4203fe95eca1d58b98a1d4c2d9cec51c0c4569613" + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 51356334, + "digest": "sha256:51f5c6a04d83efd2d45c5fd59537218924bc46705e3de6ffc8bc07b51481610b" + }, + ... + ] +``` + +3. containerd 中已经不支持 `mediaType: application/octet-stream`。 在 unpacker 逻辑中, containerd 已经不再支持 `mediaType: application/octet-stream`,导致了不会再下载 layers,故而出错。具体代码在 `containerd/unpacker.go` 中: + +```go + case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: +``` + +## 解决方案 + +如果遇到该问题,应基于新的 dockerd 运行时(>= docker v1.11)来重新构建镜像,并推送到镜像仓库中。 + +## 疑问:为什么 containerd v1.3.4 版本支持,而新版 v1.4.3 版本却不支持 ? + +在 containerd v1.3.4 的版本中,合进了 [PR #2814:bugfix: support application/octet-stream during pull](https://github.com/containerd/containerd/pull/2814) ,支持了 `mediaType:application/octet-stream` 镜像格式的下载。 + +而在 v1.4.3 中,包含 [PR #3870](https://github.com/containerd/containerd/pull/3870) ,又去掉了对 `mediaType:application/octet-stream` 镜像格式的支持,导致了 v1.3.4 和 v1.4.3 版本的行为不一致。 diff --git a/content/troubleshooting/cluster/namespace-terminating.md b/content/troubleshooting/cluster/namespace-terminating.md new file mode 100644 index 0000000..d2a29c6 --- /dev/null +++ b/content/troubleshooting/cluster/namespace-terminating.md @@ -0,0 +1,116 @@ +# Namespace 一直 Terminating + +## 概述 + +本文分享 namespace 一直卡在 terminating 状态的可能原因与解决方法。 + +## Namespace 上存在 Finalizers 且对应软件已卸载 + +删除 ns 后,一直卡在 Terminating 状态。通常是存在 finalizers,通过 `kubectl get ns xxx -o yaml` 可以看到是否有 finalizers: + +``` bash +$ kubectl get ns -o yaml kube-node-lease +apiVersion: v1 +kind: Namespace +metadata: +... + finalizers: + - finalizers.kubesphere.io/namespaces + labels: + kubesphere.io/workspace: system-workspace + name: kube-node-lease + ownerReferences: + - apiVersion: tenant.kubesphere.io/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: Workspace + name: system-workspace + uid: d4310acd-1fdc-11ea-a370-a2c490b9ae47 +spec: {} +``` + +此例是因为之前装过 kubesphere,然后卸载了,但没有清理 finalizers,将其删除就可以了。 + +k8s 资源的 metadata 里如果存在 finalizers,那么该资源一般是由某应用创建的,或者是这个资源是此应用关心的。应用会在资源的 metadata 里的 finalizers 加了一个它自己可以识别的标识,这意味着这个资源被删除时需要由此应用来做删除前的清理,清理完了它需要将标识从该资源的 finalizers 中移除,然后才会最终彻底删除资源。比如 Rancher 创建的一些资源就会写入 finalizers 标识。 + +如果应用被删除,而finalizer没清理,删除资源时就会一直卡在terminating,可以手动删除finalizer来解决。 + +手动删除方法: +1. `kubectl edit ns xx` 删除 `spec.finalizers`。 +2. 如果k8s版本较高会发现方法1行不通,因为高版本更改 namespace finalizers 被移到了 namespace 的 finalize 这个 subresource (参考[官方文档API文档](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/architecture/namespaces.md#rest-api)),并且需要使用 `PUT` 请求,可以先执行 `kubectl proxy` 然后再起一个终端用 curl 模拟请求去删 `finalizers`: +``` bash +curl -H "Content-Type: application/json" -XPUT -d '{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"delete-me"},"spec":{"finalizers":[]}}' http://localhost:8001/api/v1/namespaces/delete-me/finalize +``` +> 替换 `delete-me` 为你的 namespace 名称 + +参考资料: + +* Node Lease 的 Proposal: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/0009-node-heartbeat.md + +## Namespace 中残留的资源存在 Finalizers 且相应软件已卸载 + +查看 namespace yaml: + +```bash +$ kubectl get ns istio-system -o yaml +... +status: + conditions: + - lastTransitionTime: "2021-12-07T05:07:14Z" + message: 'Some resources are remaining: kialis.kiali.io has 1 resource instances' + reason: SomeResourcesRemain + status: "True" + type: NamespaceContentRemaining + - lastTransitionTime: "2021-12-07T05:07:14Z" + message: 'Some content in the namespace has finalizers remaining: kiali.io/finalizer + in 1 resource instances' + reason: SomeFinalizersRemain + status: "True" + type: NamespaceFinalizersRemaining + phase: Terminating +``` + +可以看到 `SomeResourcesRemain` 和 `SomeFinalizersRemain`,对应资源类型是 `kialis.kiali.io`,可以获取看一下: + +```bash +$ kubectl -n istio-system get kialis.kiali.io +NAME AGE +kiali 5d23h +``` + +这个例子明显看是安装过 kiali,且有 kiali 残留的 crd 资源,但 kiali 已卸载。 +清理 namespace 时清理 kiali 资源时,发现资源上存在 finilizer,需等待 kiali 本身进一步清理,由于 kiali 已卸载就无法清理,导致一直在等待。 + +这个时候我们可以手动删下资源上的 finalizer 即可: + +```bash +kubectl -n istio-system edit kialis.kiali.io kiali +``` + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925154207.png) + +## metrics server 被删除 + +删除 ns 时,apiserver 会调注册上去的扩展 api 去清理资源,如果扩展 api 对应的服务也被删了,那么就无法清理完成,也就一直卡在 Terminating。 + +下面的例子就是使用 prometheus-adapter 注册的 resource metrics api,但 prometheus-adapter 已经被删除了: + +``` bash +$ kubectl get apiservice +... +v1beta1.metrics.k8s.io monitoring/prometheus-adapter False (ServiceNotFound) 75d +... +``` + +## 强删 namespace 方法 + +有时候实在找不到原因,也可以强删 namespace,以下是强删方法: + +```bash +NAMESPACE=delete-me +kubectl get ns $NAMESPACE -o json | jq '.spec.finalizers=[]' > ns.json +kubectl proxy --port=8899 & +PID=$! +curl -X PUT http://localhost:8899/api/v1/namespaces/$NAMESPACE/finalize -H "Content-Type: application/json" --data-binary @ns.json +kill $PID +``` diff --git a/content/troubleshooting/network/close-wait-stacking.md b/content/troubleshooting/network/close-wait-stacking.md new file mode 100644 index 0000000..dcdbdf9 --- /dev/null +++ b/content/troubleshooting/network/close-wait-stacking.md @@ -0,0 +1,51 @@ +# 排查 CLOSE_WAIT 堆积 + +TCP 连接的 `CLOSE_WAIT` 状态,正常情况下是短暂的,如果出现堆积,一般说明应用有问题。 + +## CLOSE_WAIT 堆积的危害 + +每个 `CLOSE_WAIT` 连接会占据一个文件描述,堆积大量的 `CLOSE_WAIT` 可能造成文件描述符不够用,导致建连或打开文件失败,报错 `too many open files`: + +```txt +dial udp 9.215.0.48:9073: socket: too many open files +``` + +## 如何判断? + +检查系统 `CLOSE_WAIT` 连接数: + +```bash +lsof | grep CLOSE_WAIT | wc -l +``` + +检查指定进程 `CLOSE_WAIT` 连接数: + +```bash +lsof -p $PID | grep CLOSE_WAIT | wc -l +``` + +## 为什么会产生大量 CLOSE_WAIT? + +我们看下 TCP 四次挥手过程: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925144654.png) + +主动关闭的一方发出 FIN 包,被动关闭的一方响应 ACK 包,此时,被动关闭的一方就进入了 `CLOSE_WAIT` 状态。如果一切正常,稍后被动关闭的一方也会发出 FIN 包,然后迁移到 `LAST_ACK` 状态。 + +通常,`CLOSE_WAIT` 状态在服务器停留时间很短,如果你发现大量的 `CLOSE_WAIT` 状态,那么就意味着被动关闭的一方没有及时发出 FIN 包,一般来说都是被动关闭的一方应用程序有问题。 + +### 应用没有 Close + +如果 `CLOSE_WAIT` 堆积的量特别大(比如 10w+),甚至导致文件描述符不够用了,一般就是应用没有 Close 连接导致。 + +当连接被关闭时,被动关闭方在代码层面没有 close 掉相应的 socket 连接,那么自然不会发出 FIN 包,从而会导致 `CLOSE_WAIT` 堆积。可能是代码里根本没写 Close,也可能是代码不严谨,出现死循环之类的问题,导致即便后面写了 close 也永远执行不到。 + +### 应用迟迟不 accept 连接 + +如果 `CLOSE_WAIT` 堆积的量不是很大,可能是全连接队列 (accept queue) 堆积了。我们先看下 TCP 连接建立的过程: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925144703.png) + +连接建立好之后会被放入 accept queue,等待应用 accept,如果应用迟迟没有从队列里面去 accept 连接,等到 client 超时时间,主动关闭了连接,这时连接在 server 端仍在全连接队列中,状态变为 `CLOSE_WAIT`。 + +如果连接一直不被应用 accept 出来,内核也不会自动响应 ACK 去关闭连接的。不过这种情况的堆积量一般也不高,取决于 accept queue 的大小。 diff --git a/content/troubleshooting/network/dns-exception.md b/content/troubleshooting/network/dns-exception.md new file mode 100644 index 0000000..d46be05 --- /dev/null +++ b/content/troubleshooting/network/dns-exception.md @@ -0,0 +1,86 @@ +# 排查 DNS 解析异常 + +## 排查思路 + +### 确保集群 DNS 正常运行 + +容器内解析 DNS 走的集群 DNS(通常是 CoreDNS),所以首先要确保集群 DNS 运行正常。 + +kubelet 启动参数 `--cluster-dns` 可以看到 dns 服务的 cluster ip: + +```bash +$ ps -ef | grep kubelet +... /usr/bin/kubelet --cluster-dns=172.16.14.217 ... +``` + +找到 dns 的 service: + +```bash +$ kubectl get svc -n kube-system | grep 172.16.14.217 +kube-dns ClusterIP 172.16.14.217 53/TCP,53/UDP 47d +``` + +看是否存在 endpoint: + +```bash +$ kubectl -n kube-system describe svc kube-dns | grep -i endpoints +Endpoints: 172.16.0.156:53,172.16.0.167:53 +Endpoints: 172.16.0.156:53,172.16.0.167:53 +``` + +检查 endpoint 的 对应 pod 是否正常: + +```bash +$ kubectl -n kube-system get pod -o wide | grep 172.16.0.156 +kube-dns-898dbbfc6-hvwlr 3/3 Running 0 8d 172.16.0.156 10.0.0.3 +``` + +### 确保 Pod 能与集群 DNS 通信 + +检查下 pod 是否能连上集群 dns,可以在 pod 里 telnet 一下 dns 的 53 端口: + +```bash +# 连 dns service 的 cluster ip +$ telnet 172.16.14.217 53 +``` + +> 如果容器内没有 telnet 等测试工具,可以 [使用 nsenter 进入 netns](../skill/enter-netns-with-nsenter.md),然后利用宿主机上的 telnet 进行测试。 + +如果检查到是网络不通,就需要排查下网络设置: + +* 检查节点的安全组设置,需要放开集群的容器网段。 +* 检查是否还有防火墙规则,检查 iptables。 +* 检查 kube-proxy 是否正常运行,集群 DNS 的 IP 是 cluster ip,会经过 kube-proxy 生成的 iptables 或 ipvs 规则进行转发。 + +### 抓包 + +如果前面检查都没问题,可以考虑抓包看下,如果好复现,可以直接 [使用 nsenter 进入 netns](../skill/enter-netns-with-nsenter.md) 抓容器内的包: + +```bash +tcpdump -i any port 53 -w dns.pcap +# tcpdump -i any port 53 -nn -tttt +``` + +如果还不能分析出来,就在请求链路上的多个点一起抓,比如 Pod 的容器内、宿主机cbr0网桥、宿主机主网卡(eth0)、coredns pod 所在宿主机主网卡、cbr0 以及容器内。等复现拉通对比分析,看看包在哪个点丢的。 + +## 现象与可能原因 + +### 5 秒延时 + +如果DNS查询经常延时5秒才返回,通常是遇到内核 conntrack 冲突导致的丢包,详见 [排障案例: DNS 5秒延时](../cases/network/dns-lookup-5s-delay.md) + +### 解析外部域名超时 + +可能原因: + +* 上游 DNS 故障。 +* 上游 DNS 的 ACL 或防火墙拦截了报文。 + +### 所有解析都超时 + +如果集群内某个 Pod 不管解析 Service 还是外部域名都失败,通常是 Pod 与集群 DNS 之间通信有问题。 + +可能原因: + +* 节点防火墙没放开集群网段,导致如果 Pod 跟集群 DNS 的 Pod 不在同一个节点就无法通信,DNS 请求也就无法被收到。 +* kube-proxy 异常。 diff --git a/content/troubleshooting/network/network-unreachable.md b/content/troubleshooting/network/network-unreachable.md new file mode 100644 index 0000000..56d7f2d --- /dev/null +++ b/content/troubleshooting/network/network-unreachable.md @@ -0,0 +1,27 @@ +# 排查网络不通 + +## 排查思路 + +TODO + +## 可能原因 + +### 端口监听挂掉 + +如果容器内的端口已经没有进程监听了,内核就会返回 Reset 包,客户端就会报错连接被拒绝,可以进容器 netns 检查下端口是否存活: + +``` bash +netstat -tunlp +``` + +### iptables 规则问题 + +检查报文是否有命中丢弃报文的 iptables 规则: + +```bash +iptables -t filter -nvL +iptables -t nat -nvL +iptables -t raw -nvL +iptables -t mangle -nvL +iptables-save +``` diff --git a/content/troubleshooting/network/packet-loss.md b/content/troubleshooting/network/packet-loss.md new file mode 100644 index 0000000..aa865cd --- /dev/null +++ b/content/troubleshooting/network/packet-loss.md @@ -0,0 +1,168 @@ +# 排查网络丢包 + +本文汇总网络丢包相关问题的排查思路与可能原因。 + +## 网络丢包的定义与现象 + +网络丢包是指部分包正常,部分包被丢弃。 + +从现象上看就不是网络一直不通,而是: +1. 偶尔不通。 +2. 速度慢(丢包导致重传)。 + +## 排查思路 + +TODO + +### 可能原因 + +### 高并发 NAT 导致 conntrack 插入冲突 + +如果高并发并且做了 NAT,比如使用了 ip-masq-agent,对集群外的网段或公网进行 SNAT,又或者集群内访问 Service 被做了 DNAT,再加上高并发的话,内核就会高并发进行 NAT 和 conntrack 插入,当并发 NAT 后五元组冲突,最终插入的时候只有先插入的那个成功,另外冲突的就会插入失败,然后就丢包了。 + +可以通过 `conntrack -S` 确认,如果 `insert_failed` 计数在增加,说明有 conntrack 插入冲突。 + +### conntrack 表爆满 + +看内核日志: + +``` bash +# demsg +$ journalctl -k | grep "nf_conntrack: table full" +nf_conntrack: nf_conntrack: table full, dropping packet +``` + +若有以上报错,证明 conntrack 表满了,需要调大 conntrack 表: + +``` bash +sysctl -w net.netfilter.nf_conntrack_max=1000000 +``` + +### socket buffer 满导致丢包 + +`netstat -s | grep "buffer errors"` 的计数统计在增加,说明流量较大,socket buffer 不够用,需要调大下 buffer 容量: + +```bash +net.ipv4.tcp_wmem = 4096 16384 4194304 +net.ipv4.tcp_rmem = 4096 87380 6291456 +net.ipv4.tcp_mem = 381462 508616 762924 +net.core.rmem_default = 8388608 +net.core.rmem_max = 26214400 +net.core.wmem_max = 26214400 +``` + +### arp 表爆满 + +看内核日志: + +``` bash +# demsg +$ journalctl -k | grep "neighbor table overflow" +arp_cache: neighbor table overflow! +``` + +若有以上报错,证明 arp 表满了,查看当前 arp 记录数: + +``` bash +$ arp -an | wc -l +1335 +``` + +查看 arp gc 阀值: + +``` bash +$ sysctl -a | grep gc_thresh +net.ipv4.neigh.default.gc_thresh1 = 128 +net.ipv4.neigh.default.gc_thresh2 = 512 +net.ipv4.neigh.default.gc_thresh3 = 1024 +``` + +调大 arp 表: +``` bash +sysctl -w net.ipv4.neigh.default.gc_thresh1=80000 +sysctl -w net.ipv4.neigh.default.gc_thresh2=90000 +sysctl -w net.ipv4.neigh.default.gc_thresh3=100000 +``` + +更多请参考 [节点排障: Arp 表爆满](../node/arp-cache-overflow.md)。 + +### MTU 不一致导致丢包 + +如果容器内网卡 MTU 比另一端宿主机内的网卡 MTU 不一致(通常是 CNI 插件问题),数据包就可能被截断导致一些数据丢失: +1. 如果容器内的 MTU 更大,发出去的包如果超过 MTU 可能就被丢弃了(通常节点内核不会像交换机那样严谨会分片发送)。 +2. 同样的,如果容器内的 MTU 更小,进来的包如果超过 MTU 可能就被丢弃。 + +> tcp 协商 mss 的时候,主要看的是进程通信两端网卡的 MTU。 + +MTU 大小可以通过 `ip address show` 或 `ifconfig` 来确认。 + +### QoS 限流丢包 + +在云厂商的云主机环境,有可能会在底层会对某些包进行 QoS 限流,比如为了防止公共 DNS 被 DDoS 攻击,限制 UDP 53 端口的包的流量,超过特定速度阈值就丢包,导致部分 DNS 请求丢包而超时。 + +### PPS 限速对包 + +网卡的速度始终是有上限的,在云环境下,不同机型不同规格的云主机的 PPS 上限也不一样,超过阈值后就不保证能正常转发,可能就丢包了。 + +### 连接队列满导致丢包 + +对于 TCP 连接,三次握手建立连接,没建连成功前存储在半连接队列,建连成功但还没被应用层 accept 之前,存储在全连接队列。队列大小是有上限的,如果慢了就会丢包: +* 如果并发太高或机器负载过高,半连接队列可能会满,新来的 SYN 建连包会被丢包。 +* 如果应用层 accept 连接过慢,会导致全连接队列堆积,满了就会丢包,通常是并发高、机器负载高或应用 hung 死等原因。 + +查看丢包统计: + +```bash +netstat -s | grep -E 'drop|overflow' +``` + +```bash +$ cat /proc/net/netstat | awk '/TcpExt/ { print $21,$22 }' +ListenOverlows ListenDrops +20168 20168 +``` + +> 不同内核版本的列号可能有差别 + +如果有现场,还可以观察全连接队列阻塞情况 (`Rec-Q`): + +```bash +ss -lnt +``` + +通过以下内核参数可以调整队列大小 (namespace隔离): + +```bash +net.ipv4.tcp_max_syn_backlog = 8096 # 调整半连接队列上限 +net.core.somaxconn = 32768 # 调整全连接队列上限 +``` + +需要注意的是,`somaxconn` 只是调整了队列最大的上限,但实际队列大小是应用在 `listen` 时传入的 `backlog` 大小,大多编程语言默认会自动读取 `somaxconn` 的值作为 `listen` 系统调用的 `backlog` 参数的大小。 + +如果是用 nginx,`backlog` 的值需要在 `nginx.conf` 配置中显示指定,否则会用它自己的默认值 `511`。 + +### 源端口耗尽 + +当作为 client 发请求,或外部流量从 NodePort 进来时进行 SNAT,会从当前 netns 中选择一个端口作为源端口,端口范围由 `net.ipv4.ip_local_port_range` 这个内核参数决定,如果并发量大,就可能导致源端口耗尽,从而丢包。 + +### tcp_tw_recycle 导致丢包 + +在低版本内核中(比如 3.10),支持使用 tcp_tw_recycle 内核参数来开启 TIME_WAIT 的快速回收,但如果 client 也开启了 timestamp (一般默认开启),同时也就会导致在 NAT 环境丢包,甚至没有 NAT 时,稍微高并发一点,也会导致 PAWS 校验失败,导致丢包: + +``` bash +# 看 SYN 丢包是否全都是 PAWS 校验失败 +$ cat /proc/net/netstat | grep TcpE| awk '{print $15, $22}' +PAWSPassive ListenDrops +96305 96305 +``` + +参考资料: + +* https://github.com/torvalds/linux/blob/v3.10/net/ipv4/tcp_ipv4.c#L1465 +* https://www.freesoft.org/CIE/RFC/1323/13.htm +* https://zhuanlan.zhihu.com/p/35684094 +* https://my.oschina.net/u/4270811/blog/3473655/print + +### listen 了源 port_range 范围内的端口 + +比如 `net.ipv4.ip_local_port_range="1024 65535"`,但又 listen 了 `9100` 端口,当作为 client 发请求时,选择一个 port_range 范围内的端口作为源端口,就可能选到 9100,但这个端口已经被 listen 了,就可能会选取失败,导致丢包。 diff --git a/content/troubleshooting/network/slow-network-traffic.md b/content/troubleshooting/network/slow-network-traffic.md new file mode 100644 index 0000000..e6ce7a5 --- /dev/null +++ b/content/troubleshooting/network/slow-network-traffic.md @@ -0,0 +1,30 @@ +# 排查网速差 + +网络差是指已经建立的连接,通信慢或期间有断连,本文介绍网络速度差的可能原因。 + +## 公网线路丢包 + +如果通信经过了公网传输,而公网线路难免有波动,任意一方网络环境差导致丢包都会让网速降下来。 + +这时 server 端可以调下拥塞算法,4.19 以上的内核自带了 bbr,在公网丢包情况下,能明显提升网络性能,可以启用观察下: + +```bash +sysctl -w net.core.default_qdisc = fq +sysctl -w net.ipv4.tcp_available_congestion_control = bbr +``` + +## 达到带宽或 PPS 上限而被限速 + +如果是走公网,一般都有个公网带宽上限,可以看看监控是否达到带宽上限而被限速。 + +如果是走内网,也是可能会被限速的;通常云厂商的服务器有各种机型和规格,性能指标各不一样,可以先看下对应机型和规格的 PPS 和内网带宽能力,比如腾讯云可以看 [CVM实例规格](https://cloud.tencent.com/document/product/213/11518),然后再看下监控,是否达到上限。 + +## NAT 环境没开启 nf_conntrack_tcp_be_liberal + +容器环境下,不开启这个参数可能造成 NAT 过的 TCP 连接带宽上不去或经常断连。 + +现象是有一点时延的 TCP 单流速度慢或经常断连,比如: +1. 跨地域专线挂载 nfs ,时延 5ms,下载速度就上不去,只能到 12Mbps 左右。 +2. 经过公网上传文件经常失败。 + +原因是如果流量存在一定时延时,有些包就可能 out of window 了,netfilter 会将 out of window 的包置为 INVALID,如果是 INVALID 状态的包,netfilter 不会对其做 IP 和端口的 NAT 转换,这样协议栈再去根据 ip + 端口去找这个包的连接时,就会找不到,这个时候就会回复一个 RST,但这个 RST 是直接宿主机发出,容器内不知道,导致容器内还以为连接没断不停重试。 所以如果数据包对应的 TCP 连接做过 NAT,在 conntrack 记录了地址转换信息,也有可能部分包因 out of window 不走 conntrack 转换地址,造成一些混乱导致流量速度慢或卡住的现象。 diff --git a/content/troubleshooting/network/timeout.md b/content/troubleshooting/network/timeout.md new file mode 100644 index 0000000..f73d017 --- /dev/null +++ b/content/troubleshooting/network/timeout.md @@ -0,0 +1,50 @@ +# 排查网络超时 + +本文记录网络超时的可能原因。 + +## 网络完全不通 + +如果发现是网络完全不通导致的超时,可以参考 [排查网络不通](network-unreachable.md)。 + +## 网络偶尔丢包 + +超时也可能是丢包导致的,参考 [排查网络丢包](packet-loss.md) 。 + +## cpu 限流 (throttled) + +有以下情况 CPU 会被限流: + +1. Pod 使用的 CPU 超过了 limit,会直接被限流。 +2. 容器内同时在 running 状态的进程/线程数太多,内核 CFS 调度周期内无法保证容器所在 cgroup 内所有进程都分到足够的时间片运行,部分进程会被限流。 +3. 内核态 CPU 占用过高也可能会影响到用户态任务执行,触发 cgroup 的 CPU throttle,有些内核态的任务是不可中断的,比如大量创建销毁进程,回收内存等任务,部分核陷入内核态过久,当切回用户态时发现该 CFS 调度周期时间所剩无几,部分进程也无法分到足够时间片从而被限流。 + +CPU 被限流后进程就运行变慢了,应用层的表现通常就是超时。 + +如果确认?可以查 Promehtues 监控,PromQL 查询语句: + +1. cpu 被限制比例: + +```txt +sum by (namespace, pod)( + irate(container_cpu_cfs_throttled_periods_total{container!="POD", container!=""}[5m]) +) / +sum by (namespace, pod)( + irate(container_cpu_cfs_periods_total{container!="POD", container!=""}[5m]) +) +``` + +2. cpu 被限制核数: + +```txt +sum by (namespace, pod)( + irate(container_cpu_cfs_throttled_periods_total{container!="POD", container!="", cluster="$cluster"}[5m]) +) +``` + +如何确认超时就是 CPU throttle 导致的呢?建议: +1. 看下 throttle 严不严重,如果只有少了 throttle,可能不会导致超时。 +2. 拉长监控图时间范围,对比开始超时的时间段与之前正常的时间段,是否都有 throttle,如果是有 throttle 或加重很多后才超时,那很可能是因为 throttle 导致的超时。 + +## 节点高负载 + +如果节点高负载了,即便没 throttle,进程所分配的 CPU 时间片也不够用,也会导致进程处理慢,从而超时,详见 [节点高负载排查思路](../node/node-high-load.md) diff --git a/content/troubleshooting/network/traffic-surge.md b/content/troubleshooting/network/traffic-surge.md new file mode 100644 index 0000000..1bc8587 --- /dev/null +++ b/content/troubleshooting/network/traffic-surge.md @@ -0,0 +1,26 @@ +# 排查流量激增 + +## iftop 纠出大流量 IP + +```bash +$ iftop +10.21.45.8 => 10.111.100.101 3.35Mb 2.92Mb 2.94Mb + <= 194Mb 160Mb 162Mb +10.21.45.8 => 10.121.101.22 3.41Mb 2.89Mb 3.04Mb + <= 192Mb 159Mb 172Mb +10.21.45.8 => 10.22.122.55 279Kb 313Kb 292Kb + <= 11.3Kb 12.1Kb 11.9Kb +... +``` + +## netstat 查看大流量 IP 连接 + +```bash +$ netstat -np | grep 10.121.101.22 +tcp 0 0 10.21.45.8:48320 10.121.101.22:12002 TIME_WAIT - +tcp 0 0 10.21.45.8:59179 10.121.101.22:12002 TIME_WAIT - +tcp 0 0 10.21.45.8:55835 10.121.101.22:12002 TIME_WAIT - +tcp 0 0 10.21.45.8:49420 10.121.101.22:12002 TIME_WAIT - +tcp 0 0 10.21.45.8:55559 10.121.101.22:12002 TIME_WAIT - +... +``` \ No newline at end of file diff --git a/content/troubleshooting/node/arp-cache-overflow.md b/content/troubleshooting/node/arp-cache-overflow.md new file mode 100644 index 0000000..01455e7 --- /dev/null +++ b/content/troubleshooting/node/arp-cache-overflow.md @@ -0,0 +1,66 @@ +# ARP 表爆满 + +## 判断 arp_cache 是否溢出 + +内核日志会有有下面的报错: + +``` txt +arp_cache: neighbor table overflow! +``` + +查看当前 arp 记录数: + +``` bash +$ arp -an | wc -l +1335 +``` + +查看 arp gc 阀值: + +``` bash +$ sysctl -a | grep gc_thresh +net.ipv4.neigh.default.gc_thresh1 = 128 +net.ipv4.neigh.default.gc_thresh2 = 512 +net.ipv4.neigh.default.gc_thresh3 = 1024 +net.ipv6.neigh.default.gc_thresh1 = 128 +net.ipv6.neigh.default.gc_thresh2 = 512 +net.ipv6.neigh.default.gc_thresh3 = 1024 +``` + +当前 arp 记录数接近 `gc_thresh3` 比较容易 overflow,因为当 arp 记录达到 `gc_thresh3` 时会强制触发 gc 清理,当这时又有数据包要发送,并且根据目的 IP 在 arp cache 中没找到 mac 地址,这时会判断当前 arp cache 记录数加 1 是否大于 `gc_thresh3`,如果没有大于就会 时就会报错: `arp_cache: neighbor table overflow!` + +## 解决方案 + +调整节点内核参数,将 arp cache 的 gc 阀值调高 (`/etc/sysctl.conf`): + +``` bash +net.ipv4.neigh.default.gc_thresh1 = 80000 +net.ipv4.neigh.default.gc_thresh2 = 90000 +net.ipv4.neigh.default.gc_thresh3 = 100000 +``` + +分析是否只是部分业务的 Pod 的使用场景需要节点有比较大的 arp 缓存空间。 + +如果不是,就需要调整所有节点内核参数。 + +如果是,可以将部分 Node 打上标签,比如: + + ``` bash + kubectl label node host1 arp_cache=large + ``` + +然后用 nodeSelector 或 nodeAffnity 让这部分需要内核有大 arp_cache 容量的 Pod 只调度到这部分节点,推荐使用 nodeAffnity,yaml 示例: + +``` yaml + template: + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: arp_cache + operator: In + values: + - large +``` \ No newline at end of file diff --git a/content/troubleshooting/node/cadvisor-no-data.md b/content/troubleshooting/node/cadvisor-no-data.md new file mode 100644 index 0000000..6cc4505 --- /dev/null +++ b/content/troubleshooting/node/cadvisor-no-data.md @@ -0,0 +1,37 @@ +# cAdvisor 无数据 + +## 可能原因 + +### 修改容器数据盘后未重启 kubelet + +如果修改过容器数据盘 (docker root),重启了容器运行时,但又没驱逐和重启 kubelet,这时 kubelet 就可能无法正常返回 cAdvisor 数据,日志报错: + +```txt +Mar 21 02:59:26 VM-67-101-centos kubelet[714]: E0321 02:59:26.320938 714 manager.go:1086] Failed to create existing container: /kubepods/burstable/podb267f18b-a641-4004-a660-4c6a43b6e520/03164d8f0d1f55a285b50b2117d6fdb2c33d2fa87f46dba0f43b806017607d03: failed to identify the read-write layer ID for container "03164d8f0d1f55a285b50b2117d6fdb2c33d2fa87f46dba0f43b806017607d03". - open /var/lib/docker/image/overlay2/layerdb/mounts/03164d8f0d1f55a285b50b2117d6fdb2c33d2fa87f46dba0f43b806017607d03/mount-id: no such file or directory +``` + +如何确认?可以看下数据盘是否修改过: + +```bash +$ docker info +... +Docker Root Dir: /data/bcs/service/docker +... +``` + +确认下容器运行时启动时间是否晚于 kubelet: + +```txt +● kubelet.service - kubelet + Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled) + Active: active (running) since Fri 2022-01-14 14:39:52 CST; 2 months 6 days ago + + +● dockerd.service - dockerd +Loaded: loaded (/usr/lib/systemd/system/dockerd.service; enabled; vendor preset: disabled) +Active: active (running) since Fri 2022-01-14 14:41:45 CST; 2 months 6 days ago +``` + +如果都是,可能就是因为修改了容器数据盘路径并且没有重启 kubelet。 + +解决方案就是: 对 Node 进行驱逐,让存量旧 Pod 漂移到其它节点,最后重启下 kubelet。 \ No newline at end of file diff --git a/content/troubleshooting/node/disk-full.md b/content/troubleshooting/node/disk-full.md new file mode 100644 index 0000000..f754648 --- /dev/null +++ b/content/troubleshooting/node/disk-full.md @@ -0,0 +1,92 @@ +# 磁盘爆满 + +## 什么情况下磁盘可能会爆满 ? + +kubelet 有 gc 和驱逐机制,通过 `--image-gc-high-threshold`, `--image-gc-low-threshold`, `--eviction-hard`, `--eviction-soft`, `--eviction-minimum-reclaim` 等参数控制 kubelet 的 gc 和驱逐策略来释放磁盘空间,如果配置正确的情况下,磁盘一般不会爆满。 + +通常导致爆满的原因可能是配置不正确或者节点上有其它非 K8S 管理的进程在不断写数据到磁盘占用大量空间导致磁盘爆满。 + +## 磁盘爆满会有什么影响 ? + +影响 K8S 运行我们主要关注 kubelet 和容器运行时这两个最关键的组件,它们所使用的目录通常不一样,kubelet 一般不会单独挂盘,直接使用系统磁盘,因为通常占用空间不会很大,容器运行时单独挂盘的场景比较多,当磁盘爆满的时候我们也要看 kubelet 和 容器运行时使用的目录是否在这个磁盘,通过 `df` 命令可以查看磁盘挂载点。 + +### 容器运行时使用的目录所在磁盘爆满 + +如果容器运行时使用的目录所在磁盘空间爆满,可能会造成容器运行时无响应,比如 docker,执行 docker 相关的命令一直 hang 住, kubelet 日志也可以看到 PLEG unhealthy,因为 CRI 调用 timeout,当然也就无法创建或销毁容器,通常表现是 Pod 一直 ContainerCreating 或 一直 Terminating。 + +docker 默认使用的目录主要有: + +* `/var/run/docker`: 用于存储容器运行状态,通过 dockerd 的 `--exec-root` 参数指定。 +* `/var/lib/docker`: 用于持久化容器相关的数据,比如容器镜像、容器可写层数据、容器标准日志输出、通过 docker 创建的 volume 等 + +Pod 启动可能报类似下面的事件: + +``` txt + Warning FailedCreatePodSandBox 53m kubelet, 172.22.0.44 Failed create pod sandbox: rpc error: code = DeadlineExceeded desc = context deadline exceeded +``` + +``` txt + Warning FailedCreatePodSandBox 2m (x4307 over 16h) kubelet, 10.179.80.31 (combined from similar events): Failed create pod sandbox: rpc error: code = Unknown desc = failed to create a sandbox for pod "apigateway-6dc48bf8b6-l8xrw": Error response from daemon: mkdir /var/lib/docker/aufs/mnt/1f09d6c1c9f24e8daaea5bf33a4230de7dbc758e3b22785e8ee21e3e3d921214-init: no space left on device +``` + +``` txt + Warning Failed 5m1s (x3397 over 17h) kubelet, ip-10-0-151-35.us-west-2.compute.internal (combined from similar events): Error: container create failed: container_linux.go:336: starting container process caused "process_linux.go:399: container init caused \"rootfs_linux.go:58: mounting \\\"/sys\\\" to rootfs \\\"/var/lib/dockerd/storage/overlay/051e985771cc69f3f699895a1dada9ef6483e912b46a99e004af7bb4852183eb/merged\\\" at \\\"/var/lib/dockerd/storage/overlay/051e985771cc69f3f699895a1dada9ef6483e912b46a99e004af7bb4852183eb/merged/sys\\\" caused \\\"no space left on device\\\"\"" +``` + +Pod 删除可能报类似下面的事件: + +``` txt +Normal Killing 39s (x735 over 15h) kubelet, 10.179.80.31 Killing container with id docker://apigateway:Need to kill Pod +``` + +### kubelet 使用的目录所在磁盘爆满 + +如果 kubelet 使用的目录所在磁盘空间爆满(通常是系统盘),新建 Pod 时连 Sandbox 都无法创建成功,因为 mkdir 将会失败,通常会有类似这样的 Pod 事件: + +``` txt + Warning UnexpectedAdmissionError 44m kubelet, 172.22.0.44 Update plugin resources failed due to failed to write checkpoint file "kubelet_internal_checkpoint": write /var/lib/kubelet/device-plugins/.728425055: no space left on device, which is unexpected. +``` + +kubelet 默认使用的目录是 `/var/lib/kubelet`, 用于存储插件信息、Pod 相关的状态以及挂载的 volume (比如 `emptyDir`, `ConfigMap`, `Secret`),通过 kubelet 的 `--root-dir` 参数指定。 + +## 如何分析磁盘占用 ? + +* 如果运行时使用的是 Docker,请参考本书 排错技巧: 分析 Docker 磁盘占用 (TODO) + +## 如何恢复 ? + +如果容器运行时使用的 Docker,我们无法直接重启 dockerd 来释放一些空间,因为磁盘爆满后 dockerd 无法正常响应,停止的时候也会卡住。我们需要先手动清理一点文件腾出空间好让 dockerd 能够停止并重启。 + +可以手动删除一些 docker 的 log 文件或可写层文件,通常删除 log: + +``` bash +$ cd /var/lib/docker/containers +$ du -sh * # 找到比较大的目录 +$ cd dda02c9a7491fa797ab730c1568ba06cba74cecd4e4a82e9d90d00fa11de743c +$ cat /dev/null > dda02c9a7491fa797ab730c1568ba06cba74cecd4e4a82e9d90d00fa11de743c-json.log.9 # 删除log文件 +``` + +* **注意:** 使用 `cat /dev/null >` 方式删除而不用 `rm`,因为用 rm 删除的文件,docker 进程可能不会释放文件,空间也就不会释放;log 的后缀数字越大表示越久远,先删除旧日志。 + +然后将该 node 标记不可调度,并将其已有的 pod 驱逐到其它节点,这样重启 dockerd 就会让该节点的 pod 对应的容器删掉,容器相关的日志(标准输出)与容器内产生的数据文件(没有挂载 volume, 可写层)也会被清理: + +``` bash +kubectl drain +``` + +重启 dockerd: + +``` bash +systemctl restart dockerd +# or systemctl restart docker +``` + +等重启恢复,pod 调度到其它节点,排查磁盘爆满原因并清理和规避,然后取消节点不可调度标记: + +``` bash +kubectl uncordon +``` + +## 如何规避 ? + +正确配置 kubelet gc 和 驱逐相关的参数,即便到达爆满地步,此时节点上的 pod 也都早就自动驱逐到其它节点了,不会存在 Pod 一直 ContainerCreating 或 Terminating 的问题。 diff --git a/content/troubleshooting/node/io-high-load.md b/content/troubleshooting/node/io-high-load.md new file mode 100644 index 0000000..8a9ef82 --- /dev/null +++ b/content/troubleshooting/node/io-high-load.md @@ -0,0 +1,156 @@ +# IO 高负载 + +系统如果出现 IO WAIT 高,说明 IO 设备的速度跟不上 CPU 的处理速度,CPU 需要在那里干等,这里的等待实际也占用了 CPU 时间,导致系统负载升高,可能就会影响业务进程的处理速度,导致业务超时。 + +## 如何判断 ? + +使用 `top` 命令看下当前负载: + +```text +top - 19:42:06 up 23:59, 2 users, load average: 34.64, 35.80, 35.76 +Tasks: 679 total, 1 running, 678 sleeping, 0 stopped, 0 zombie +Cpu(s): 15.6%us, 1.7%sy, 0.0%ni, 74.7%id, 7.9%wa, 0.0%hi, 0.1%si, 0.0%st +Mem: 32865032k total, 30989168k used, 1875864k free, 370748k buffers +Swap: 8388604k total, 5440k used, 8383164k free, 7982424k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 9783 mysql 20 0 17.3g 16g 8104 S 186.9 52.3 3752:33 mysqld + 5700 nginx 20 0 1330m 66m 9496 S 8.9 0.2 0:20.82 php-fpm + 6424 nginx 20 0 1330m 65m 8372 S 8.3 0.2 0:04.97 php-fpm +``` + +`%wa` (wait) 表示 IO WAIT 的 cpu 占用,默认看到的是所有核的平均值,要看每个核的 `%wa` 值需要按下 "1": + +```text +top - 19:42:08 up 23:59, 2 users, load average: 34.64, 35.80, 35.76 +Tasks: 679 total, 1 running, 678 sleeping, 0 stopped, 0 zombie +Cpu0 : 29.5%us, 3.7%sy, 0.0%ni, 48.7%id, 17.9%wa, 0.0%hi, 0.1%si, 0.0%st +Cpu1 : 29.3%us, 3.7%sy, 0.0%ni, 48.9%id, 17.9%wa, 0.0%hi, 0.1%si, 0.0%st +Cpu2 : 26.1%us, 3.1%sy, 0.0%ni, 64.4%id, 6.0%wa, 0.0%hi, 0.3%si, 0.0%st +Cpu3 : 25.9%us, 3.1%sy, 0.0%ni, 65.5%id, 5.4%wa, 0.0%hi, 0.1%si, 0.0%st +Cpu4 : 24.9%us, 3.0%sy, 0.0%ni, 66.8%id, 5.0%wa, 0.0%hi, 0.3%si, 0.0%st +Cpu5 : 24.9%us, 2.9%sy, 0.0%ni, 67.0%id, 4.8%wa, 0.0%hi, 0.3%si, 0.0%st +Cpu6 : 24.2%us, 2.7%sy, 0.0%ni, 68.3%id, 4.5%wa, 0.0%hi, 0.3%si, 0.0%st +Cpu7 : 24.3%us, 2.6%sy, 0.0%ni, 68.5%id, 4.2%wa, 0.0%hi, 0.3%si, 0.0%st +Cpu8 : 23.8%us, 2.6%sy, 0.0%ni, 69.2%id, 4.1%wa, 0.0%hi, 0.3%si, 0.0%st +Cpu9 : 23.9%us, 2.5%sy, 0.0%ni, 69.3%id, 4.0%wa, 0.0%hi, 0.3%si, 0.0%st +Cpu10 : 23.3%us, 2.4%sy, 0.0%ni, 68.7%id, 5.6%wa, 0.0%hi, 0.0%si, 0.0%st +Cpu11 : 23.3%us, 2.4%sy, 0.0%ni, 69.2%id, 5.1%wa, 0.0%hi, 0.0%si, 0.0%st +Cpu12 : 21.8%us, 2.4%sy, 0.0%ni, 60.2%id, 15.5%wa, 0.0%hi, 0.0%si, 0.0%st +Cpu13 : 21.9%us, 2.4%sy, 0.0%ni, 60.6%id, 15.2%wa, 0.0%hi, 0.0%si, 0.0%st +Cpu14 : 21.4%us, 2.3%sy, 0.0%ni, 72.6%id, 3.7%wa, 0.0%hi, 0.0%si, 0.0%st +Cpu15 : 21.5%us, 2.2%sy, 0.0%ni, 73.2%id, 3.1%wa, 0.0%hi, 0.0%si, 0.0%st +Cpu16 : 21.2%us, 2.2%sy, 0.0%ni, 73.6%id, 3.0%wa, 0.0%hi, 0.0%si, 0.0%st +Cpu17 : 21.2%us, 2.1%sy, 0.0%ni, 73.8%id, 2.8%wa, 0.0%hi, 0.0%si, 0.0%st +Cpu18 : 20.9%us, 2.1%sy, 0.0%ni, 74.1%id, 2.9%wa, 0.0%hi, 0.0%si, 0.0%st +Cpu19 : 21.0%us, 2.1%sy, 0.0%ni, 74.4%id, 2.5%wa, 0.0%hi, 0.0%si, 0.0%st +Cpu20 : 20.7%us, 2.0%sy, 0.0%ni, 73.8%id, 3.4%wa, 0.0%hi, 0.0%si, 0.0%st +Cpu21 : 20.8%us, 2.0%sy, 0.0%ni, 73.9%id, 3.2%wa, 0.0%hi, 0.0%si, 0.0%st +Cpu22 : 20.8%us, 2.0%sy, 0.0%ni, 74.4%id, 2.8%wa, 0.0%hi, 0.0%si, 0.0%st +Cpu23 : 20.8%us, 1.9%sy, 0.0%ni, 74.4%id, 2.8%wa, 0.0%hi, 0.0%si, 0.0%st +Mem: 32865032k total, 30209248k used, 2655784k free, 370748k buffers +Swap: 8388604k total, 5440k used, 8383164k free, 7986552k cached +``` + +`wa` 通常是 0%,如果经常在 1% 之上,说明存储设备的速度已经太慢,无法跟上 cpu 的处理速度。 + +## 如何排查 ? + +### 使用 iostat 检查设备是否 hang 住 + +```bash +iostat -xhd 2 +``` + +如果有 100% 的 `%util` 的设备,说明该设备基本 hang 住了 + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925143829.png) + +### 观察高 IO 的磁盘读写情况 + +```bash +# 捕获 %util 超过 90 时 vdb 盘的读写指标,每秒检查一次 +while true; do iostat -xhd | grep -A1 vdb | grep -v vdb | awk '{if ($NF > 90){print $0}}'; sleep 1s; done +``` + +如果读写流量或 IOPS 不高,但 `%util` 不高,通常是磁盘本身有问题了,需要检查下磁盘。 在云上托管的 k8s 集群通常就使用的云厂商的云盘(比如腾讯云CBS),可以拿到磁盘 ID 反馈下。 + +如果读写流量或 IOPS 高,继续下面的步骤排查出哪些进程导致的 IO 高负载。 + +### 查看哪些进程占住磁盘 + +```bash +fuser -v -m /dev/vdb +``` + +### 查找 D 状态的进程 + +D 状态 (Disk Sleep) 表示进程正在等待 IO,不可中断,正常情况下不会保持太久,如果进程长时间处于 D 状态,通常是设备故障 + +```bash +ps -eo pid,ppid,stat,command + +## 捕获 D 状态的进程 +while true; do ps -eo pid,ppid,stat,command | awk '{if ($3 ~ /D/) {print $0}}'; sleep 0.5s; done +``` + +### 观察高 IO 进程 + +```bash +iotop -oP +# 展示 I/O 统计,每秒更新一次 +pidstat -d 1 +# 只看某个进程 +pidstat -d 1 -p 3394470 +``` + +## 使用 pidstat 统计 + +```bash +timeout 10 pidstat -dl 3 > io.txt +cat io.txt | awk '{if ($6>2000||$5>2000)print $0}' +``` + +### 使用 ebpf 抓高 IOPS 进程 + +安装 bcc-tools: +```bash +yum install -y bcc-tools +``` + +分析: +```bash +$ cd /usr/share/bcc/tools +$ ./biosnoop 5 > io.txt +$ cat io.txt | awk '{print $3,$2,$4,$5}' | sort | uniq -c | sort -rn | head -10 + 6850 3356537 containerd vdb R + 1294 3926934 containerd vdb R + 864 1670 xfsaild/vdb vdb W + 578 3953662 kworker/u180:1 vda W + 496 3540267 logsys_cfg_cli vdb R + 459 1670 xfsaild/vdb vdb R + 354 3285936 php-fpm vdb R + 340 3285934 php-fpm vdb R + 292 2952592 sap1001 vdb R + 273 324710 python vdb R +$ pstree -apnhs 3356537 +systemd,1 --switched-root --system --deserialize 22 + └─containerd,3895 + └─{containerd},3356537 +$ timeout 10 strace -fp 3895 > strace.txt 2>&1 +# vdb 的 IOPS 高,vdb 挂载到了 /data 目录,这里过滤下 "/data" +$ grep "/data" strace.txt | tail -10 +[pid 19562] newfstatat(AT_FDCWD, "/data/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/6974/fs/data/log/monitor/snaps/20211010/ps-2338.log", {st_mode=S_IFREG|0644, st_size=6509, ...}, AT_SYMLINK_NOFOLLOW) = 0 +[pid 19562] newfstatat(AT_FDCWD, "/data/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/6974/fs/data/log/monitor/snaps/20211010/ps-2339.log", {st_mode=S_IFREG|0644, st_size=6402, ...}, AT_SYMLINK_NOFOLLOW) = 0 +[pid 19562] newfstatat(AT_FDCWD, "/data/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/6974/fs/data/log/monitor/snaps/20211010/ps-2340.log", {st_mode=S_IFREG|0644, st_size=6509, ...}, AT_SYMLINK_NOFOLLOW) = 0 +[pid 19562] newfstatat(AT_FDCWD, "/data/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/6974/fs/data/log/monitor/snaps/20211010/ps-2341.log", {st_mode=S_IFREG|0644, st_size=6509, ...}, AT_SYMLINK_NOFOLLOW) = 0 +[pid 19562] newfstatat(AT_FDCWD, "/data/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/6974/fs/data/log/monitor/snaps/20211010/ps-2342.log", {st_mode=S_IFREG|0644, st_size=6970, ...}, AT_SYMLINK_NOFOLLOW) = 0 +[pid 19562] newfstatat(AT_FDCWD, "/data/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/6974/fs/data/log/monitor/snaps/20211010/ps-2343.log", {st_mode=S_IFREG|0644, st_size=6509, ...}, AT_SYMLINK_NOFOLLOW) = 0 +[pid 19562] newfstatat(AT_FDCWD, "/data/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/6974/fs/data/log/monitor/snaps/20211010/ps-2344.log", {st_mode=S_IFREG|0644, st_size=6402, ...}, AT_SYMLINK_NOFOLLOW) = 0 +[pid 19562] newfstatat(AT_FDCWD, "/data/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/6974/fs/data/log/monitor/snaps/20211010/ps-2345.log", +[pid 19562] newfstatat(AT_FDCWD, "/data/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/6974/fs/data/log/monitor/snaps/20211010/ps-2346.log", {st_mode=S_IFREG|0644, st_size=7756, ...}, AT_SYMLINK_NOFOLLOW) = 0 +[pid 19562] newfstatat(AT_FDCWD, "/data/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/6974/fs/data/log/monitor/snaps/20211010/ps-2347.log", Process 3895 detached +$ grep "/data" strace.txt > data.txt +# 合并且排序,自行用脚本分析下哪些文件操作多 +$ cat data.txt | awk -F '"' '{print $2}' | sort | uniq -c | sort -n > data-sorted.txt +``` diff --git a/content/troubleshooting/node/ipvs-no-destination-available.md b/content/troubleshooting/node/ipvs-no-destination-available.md new file mode 100644 index 0000000..f2098c0 --- /dev/null +++ b/content/troubleshooting/node/ipvs-no-destination-available.md @@ -0,0 +1,28 @@ +# IPVS no destination available + +## 现象 + +内核日志不停报 `no destination available` 这样的 warning 日志,查看 dmesg: + +```log +[23709.680898] IPVS: rr: TCP 192.168.0.52:80 - no destination available +[23710.709824] IPVS: rr: TCP 192.168.0.52:80 - no destination available +[23832.428700] IPVS: rr: TCP 127.0.0.1:30209 - no destination available +[23833.461818] IPVS: rr: TCP 127.0.0.1:30209 - no destination available +``` + +## 原因 + +一般是因为有 Service 用了 `externalTrafficPolicy:Local`,当 Node 上没有该 Service 对应 Pod 时,Node 上的该 Service 对应 NodePort 的 IPVS 规则里,RS 列表为空。当有流量打到这个 Node 的对应 NodePort 上时,由于 RS 列表为空,内核就会报这个 warning 日志。 + +在云厂商托管的 K8S 服务里,通常是 LB 会去主动探测 NodePort,发到没有这个 Service 对应 Pod 实例的 Node 时,报文被正常丢弃,从而内核报 warning 日志。 + +这个日志不会对服务造成影响,可以忽略不管。如果是在腾讯云 TKE 环境里,并且用的 TencentOS,可以设置一个内核参数来抑制这个 warning 日志输出: + +```bash +sysctl -w net.ipv4.vs.ignore_no_rs_error=1 +``` + +## 参考资料 + +* Kubernetes Issue: [IPVS error log occupation with externalTrafficPolicy: Local option in Service](https://github.com/kubernetes/kubernetes/issues/100925) \ No newline at end of file diff --git a/content/troubleshooting/node/kernel-solft-lockup.md b/content/troubleshooting/node/kernel-solft-lockup.md new file mode 100644 index 0000000..4eaceb4 --- /dev/null +++ b/content/troubleshooting/node/kernel-solft-lockup.md @@ -0,0 +1,19 @@ +# soft lockup (内核软死锁) + +## 内核报错 + +``` log +Oct 14 15:13:05 VM_1_6_centos kernel: NMI watchdog: BUG: soft lockup - CPU#5 stuck for 22s! [runc:[1:CHILD]:2274] +``` + +## 原因 + +发生这个报错通常是内核繁忙 (扫描、释放或分配大量对象),分不出时间片给用户态进程导致的,也伴随着高负载,如果负载降低报错则会消失。 + +## 什么情况下会导致内核繁忙 + +* 短时间内创建大量进程 (可能是业务需要,也可能是业务bug或用法不正确导致创建大量进程) + +## 参考资料 + +* [What are all these "Bug: soft lockup" messages about](https://www.suse.com/support/kb/doc/?id=7017652) diff --git a/content/troubleshooting/node/memory-fragmentation.md b/content/troubleshooting/node/memory-fragmentation.md new file mode 100644 index 0000000..6ad05f7 --- /dev/null +++ b/content/troubleshooting/node/memory-fragmentation.md @@ -0,0 +1,82 @@ +# 内存碎片化 + +## 判断是否内存碎片化严重 + +内存页分配失败,内核日志报类似下面的错: + +```bash +mysqld: page allocation failure. order:4, mode:0x10c0d0 +``` + +* `mysqld` 是被分配的内存的程序 +* `order` 表示需要分配连续页的数量\(2^order\),这里 4 表示 2^4=16 个连续的页 +* `mode` 是内存分配模式的标识,定义在内核源码文件 `include/linux/gfp.h` 中,通常是多个标识相与运算的结果,不同版本内核可能不一样,比如在新版内核中 `GFP_KERNEL` 是 `__GFP_RECLAIM | __GFP_IO | __GFP_FS` 的运算结果,而 `__GFP_RECLAIM` 又是 `___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM` 的运算结果 + +当 order 为 0 时,说明系统以及完全没有可用内存了,order 值比较大时,才说明内存碎片化了,无法分配连续的大页内存。 + +## 内存碎片化造成的问题 + +### 容器启动失败 + +K8S 会为每个 pod 创建 netns 来隔离 network namespace,内核初始化 netns 时会为其创建 nf\_conntrack 表的 cache,需要申请大页内存,如果此时系统内存已经碎片化,无法分配到足够的大页内存内核就会报错\(`v2.6.33 - v4.6`\): + +```bash +runc:[1:CHILD]: page allocation failure: order:6, mode:0x10c0d0 +``` + +Pod 状态将会一直在 ContainerCreating,dockerd 启动容器失败,日志报错: + +```text +Jan 23 14:15:31 dc05 dockerd: time="2019-01-23T14:15:31.288446233+08:00" level=error msg="containerd: start container" error="oci runtime error: container_linux.go:247: starting container process caused \"process_linux.go:245: running exec setns process for init caused \\\"exit status 6\\\"\"\n" id=5b9be8c5bb121264899fac8d9d36b02150269d41ce96ba6ad36d70b8640cb01c +Jan 23 14:15:31 dc05 dockerd: time="2019-01-23T14:15:31.317965799+08:00" level=error msg="Create container failed with error: invalid header field value \"oci runtime error: container_linux.go:247: starting container process caused \\\"process_linux.go:245: running exec setns process for init caused \\\\\\\"exit status 6\\\\\\\"\\\"\\n\"" +``` + +kubelet 日志报错: + +```text +Jan 23 14:15:31 dc05 kubelet: E0123 14:15:31.352386 26037 remote_runtime.go:91] RunPodSandbox from runtime service failed: rpc error: code = 2 desc = failed to start sandbox container for pod "matchdataserver-1255064836-t4b2w": Error response from daemon: {"message":"invalid header field value \"oci runtime error: container_linux.go:247: starting container process caused \\\"process_linux.go:245: running exec setns process for init caused \\\\\\\"exit status 6\\\\\\\"\\\"\\n\""} +Jan 23 14:15:31 dc05 kubelet: E0123 14:15:31.352496 26037 kuberuntime_sandbox.go:54] CreatePodSandbox for pod "matchdataserver-1255064836-t4b2w_basic(485fd485-1ed6-11e9-8661-0a587f8021ea)" failed: rpc error: code = 2 desc = failed to start sandbox container for pod "matchdataserver-1255064836-t4b2w": Error response from daemon: {"message":"invalid header field value \"oci runtime error: container_linux.go:247: starting container process caused \\\"process_linux.go:245: running exec setns process for init caused \\\\\\\"exit status 6\\\\\\\"\\\"\\n\""} +Jan 23 14:15:31 dc05 kubelet: E0123 14:15:31.352518 26037 kuberuntime_manager.go:618] createPodSandbox for pod "matchdataserver-1255064836-t4b2w_basic(485fd485-1ed6-11e9-8661-0a587f8021ea)" failed: rpc error: code = 2 desc = failed to start sandbox container for pod "matchdataserver-1255064836-t4b2w": Error response from daemon: {"message":"invalid header field value \"oci runtime error: container_linux.go:247: starting container process caused \\\"process_linux.go:245: running exec setns process for init caused \\\\\\\"exit status 6\\\\\\\"\\\"\\n\""} +Jan 23 14:15:31 dc05 kubelet: E0123 14:15:31.352580 26037 pod_workers.go:182] Error syncing pod 485fd485-1ed6-11e9-8661-0a587f8021ea ("matchdataserver-1255064836-t4b2w_basic(485fd485-1ed6-11e9-8661-0a587f8021ea)"), skipping: failed to "CreatePodSandbox" for "matchdataserver-1255064836-t4b2w_basic(485fd485-1ed6-11e9-8661-0a587f8021ea)" with CreatePodSandboxError: "CreatePodSandbox for pod \"matchdataserver-1255064836-t4b2w_basic(485fd485-1ed6-11e9-8661-0a587f8021ea)\" failed: rpc error: code = 2 desc = failed to start sandbox container for pod \"matchdataserver-1255064836-t4b2w\": Error response from daemon: {\"message\":\"invalid header field value \\\"oci runtime error: container_linux.go:247: starting container process caused \\\\\\\"process_linux.go:245: running exec setns process for init caused \\\\\\\\\\\\\\\"exit status 6\\\\\\\\\\\\\\\"\\\\\\\"\\\\n\\\"\"}" +Jan 23 14:15:31 dc05 kubelet: I0123 14:15:31.372181 26037 kubelet.go:1916] SyncLoop (PLEG): "matchdataserver-1255064836-t4b2w_basic(485fd485-1ed6-11e9-8661-0a587f8021ea)", event: &pleg.PodLifecycleEvent{ID:"485fd485-1ed6-11e9-8661-0a587f8021ea", Type:"ContainerDied", Data:"5b9be8c5bb121264899fac8d9d36b02150269d41ce96ba6ad36d70b8640cb01c"} +Jan 23 14:15:31 dc05 kubelet: W0123 14:15:31.372225 26037 pod_container_deletor.go:77] Container "5b9be8c5bb121264899fac8d9d36b02150269d41ce96ba6ad36d70b8640cb01c" not found in pod's containers +Jan 23 14:15:31 dc05 kubelet: I0123 14:15:31.678211 26037 kuberuntime_manager.go:383] No ready sandbox for pod "matchdataserver-1255064836-t4b2w_basic(485fd485-1ed6-11e9-8661-0a587f8021ea)" can be found. Need to start a new one +``` + +查看slab \(后面的0多表示伙伴系统没有大块内存了\): + +```bash +$ cat /proc/buddyinfo +Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 +Node 0, zone DMA32 2725 624 489 178 0 0 0 0 0 0 0 +Node 0, zone Normal 1163 1101 932 222 0 0 0 0 0 0 0 +``` + +### 系统 OOM + +内存碎片化会导致即使当前系统总内存比较多,但由于无法分配足够的大页内存导致给进程分配内存失败,就认为系统内存不够用,需要杀掉一些进程来释放内存,从而导致系统 OOM + +## 解决方法 + +* 周期性地或者在发现大块内存不足时,先进行drop\_cache操作: + +```bash +echo 3 > /proc/sys/vm/drop_caches +``` + +* 必要时候进行内存整理,开销会比较大,会造成业务卡住一段时间\(慎用\): + +```bash +echo 1 > /proc/sys/vm/compact_memory +``` + +## 如何防止内存碎片化 + +TODO + +## 附录 + +相关链接: + +* [https://huataihuang.gitbooks.io/cloud-atlas/content/os/linux/kernel/memory/drop\_caches\_and\_compact\_memory.html](https://huataihuang.gitbooks.io/cloud-atlas/content/os/linux/kernel/memory/drop_caches_and_compact_memory.html) + diff --git a/content/troubleshooting/node/no-space-left-on-device.md b/content/troubleshooting/node/no-space-left-on-device.md new file mode 100644 index 0000000..14b479b --- /dev/null +++ b/content/troubleshooting/node/no-space-left-on-device.md @@ -0,0 +1,85 @@ +# no space left on device + +- 有时候节点 NotReady, kubelet 日志报 `no space left on device`。 +- 有时候创建 Pod 失败,`describe pod` 看 event 报 `no space left on device`。 + +出现这种错误有很多中可能原因,下面我们来根据现象找对应原因。 + +## inotify watch 耗尽 + +节点 NotReady,kubelet 启动失败,看 kubelet 日志: + +``` bash +Jul 18 15:20:58 VM_16_16_centos kubelet[11519]: E0718 15:20:58.280275 11519 raw.go:140] Failed to watch directory "/sys/fs/cgroup/memory/kubepods": inotify_add_watch /sys/fs/cgroup/memory/kubepods/burstable/pod926b7ff4-7bff-11e8-945b-52540048533c/6e85761a30707b43ed874e0140f58839618285fc90717153b3cbe7f91629ef5a: no space left on device +``` + +系统调用 `inotify_add_watch` 失败,提示 `no space left on device`, 这是因为系统上进程 watch 文件目录的总数超出了最大限制,可以修改内核参数调高限制,详细请参考本书 [处理实践: inotify watch 耗尽](../../../handle/runnig-out-of-inotify-watches/) + +## cgroup 泄露 + +查看当前 cgroup 数量: + +``` bash +$ cat /proc/cgroups | column -t +#subsys_name hierarchy num_cgroups enabled +cpuset 5 29 1 +cpu 7 126 1 +cpuacct 7 126 1 +memory 9 127 1 +devices 4 126 1 +freezer 2 29 1 +net_cls 6 29 1 +blkio 10 126 1 +perf_event 3 29 1 +hugetlb 11 29 1 +pids 8 126 1 +net_prio 6 29 1 +``` + +cgroup 子系统目录下面所有每个目录及其子目录都认为是一个独立的 cgroup,所以也可以在文件系统中统计目录数来获取实际 cgroup 数量,通常跟 `/proc/cgroups` 里面看到的应该一致: + +``` bash +$ find -L /sys/fs/cgroup/memory -type d | wc -l +127 +``` + +当 cgroup 泄露发生时,这里的数量就不是真实的了,低版本内核限制最大 65535 个 cgroup,并且开启 kmem 删除 cgroup 时会泄露,大量创建删除容器后泄露了许多 cgroup,最终总数达到 65535,新建容器创建 cgroup 将会失败,报 `no space left on device` + +详细请参考本书 [排障案例: cgroup 泄露](../cases/node/cgroup-leaking.md) + +## 磁盘被写满 + +Pod 启动失败,状态 `CreateContainerError`: + +``` bash +csi-cephfsplugin-27znb 0/2 CreateContainerError 167 17h +``` + +Pod 事件报错: + +``` bash + Warning Failed 5m1s (x3397 over 17h) kubelet, ip-10-0-151-35.us-west-2.compute.internal (combined from similar events): Error: container create failed: container_linux.go:336: starting container process caused "process_linux.go:399: container init caused \"rootfs_linux.go:58: mounting \\\"/sys\\\" to rootfs \\\"/var/lib/containers/storage/overlay/051e985771cc69f3f699895a1dada9ef6483e912b46a99e004af7bb4852183eb/merged\\\" at \\\"/var/lib/containers/storage/overlay/051e985771cc69f3f699895a1dada9ef6483e912b46a99e004af7bb4852183eb/merged/sys\\\" caused \\\"no space left on device\\\"\"" +``` + +## limits 单位错误 + +Pod 事件报错: + +```txt +Mount Volume.SetUp failed for volume "kube-api-access-j562g" :write /var/lib/kubelet/pods /7c251070 +-cf3c-4180-97a2-647e858f3f2/volumes/kubernetes.io~projected/kube-api-access-j562g/..2023_07_25_07_25_22.573608539/ca.crt: no space left on device +``` + +可能是因为定义 requests 和 limits 时忘了写单位,或单位有误: + +```yaml +limits: + memory: 512mi # 应该大写开头,改成 512Mi +``` + +```yaml +limits: + memory: 512 # 没有单位默认为字节,太小,应带上单位 +``` + +根因:可能是因为内存相关的 volume 都受 memory limit 限制 (projected volume, emptydir 等)。 \ No newline at end of file diff --git a/content/troubleshooting/node/node-crash-and-vmcore.md b/content/troubleshooting/node/node-crash-and-vmcore.md new file mode 100644 index 0000000..8744e6b --- /dev/null +++ b/content/troubleshooting/node/node-crash-and-vmcore.md @@ -0,0 +1,28 @@ +# 节点 Crash 与 Vmcore 分析 + +本文介绍节点 Crash 后如何分析 vmcore 进行排查。 + +## kdump 介绍 + +目前大多 Linux 发新版都会默认开启 kdump 服务,以方便在内核崩溃的时候, 可以通过 kdump 服务提供的 kexec 机制快速的启用保留在内存中的第二个内核来收集并转储内核崩溃的日志信息(`vmcore` 等文件), 这种机制需要服务器硬件特性的支持, 不过现今常用的服务器系列均已支持. + +如果没有特别配置 kdump,当发生 crash 时,通常默认会将 vmcore 保存到 `/var/crash` 路径下,也可以查看 `/etc/kdump.conf` 配置来确认: + +```bash +$ grep ^path /etc/kdump.conf +path /var/crash +``` + +## 快速查看原因 + +在需要快速了解崩溃原因的时候, 可以简单查看崩溃主机(如果重启成功)的 `vmcore-dmesg.txt` 文件, 该文件列出了内核崩溃时的堆栈信息, 有助于我们大致了解崩溃的原因, 方便处理措施的决断. 如下所示为生成的日志文件通常的路径: + +```txt +/var/crash/127.0.0.1-2019-11-11-08:40:08/vmcore-dmesg.txt +``` + +## 参考资料 + +* [Linux 系统内核崩溃分析处理简介](https://blog.arstercz.com/brief-intro-to-linux-kernel-crash-analyze/) +* [Kernel crash dump guide](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/kernel_administration_guide/kernel_crash_dump_guide) +* [Using kdump and kexec with the Red Hat Enterprise Linux for Real Time Kernel](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_for_real_time/7/html/tuning_guide/using_kdump_and_kexec_with_the_rt_kernel) \ No newline at end of file diff --git a/content/troubleshooting/node/node-high-load.md b/content/troubleshooting/node/node-high-load.md new file mode 100644 index 0000000..adde265 --- /dev/null +++ b/content/troubleshooting/node/node-high-load.md @@ -0,0 +1,95 @@ +# 节点高负载 + +Kubernetes 节点高负载如何排查?本文来盘一盘。 + +## 如何判断节点高负载? + +可以通过 `top` 或 `uptime` 来确定 load 大小,如果 load 小于 CPU 数量,属于低负载,如果大于 CPU 数量 2~3 倍,就比较高了,当然也看业务敏感程度,不太敏感的大于 4 倍算高负载。 + +## 排查思路 + +观察监控:通常不是因为内核 bug 导致的高负载,在卡死之前从监控一般能看出一些问题,可以观察下各项监控指标。 + +排查现场:如果没有相关监控或监控维度较少不足以查出问题,就尝试登录节点抓现场分析。有时负载过高通常使用 ssh 登录不上,如果可以用 vnc,可以尝试下使用 vnc 登录。 + +## 排查现场思路 + +loadavg 可以认为是 R状态线程数和D状态线程数的总和 (R 代表需要 cpu,是 cpu 负载。 D 通常代表需要 IO,是 IO 负载) + +简单判断办法: + +```bash +ps -eL -o lwp,pid,ppid,state,comm | grep -E " R | D " +``` + +然后数一下各种状态多少个进程,看看是 D 住还是 R。 + +如果是长时间 D 住,可以进一步查看进程堆栈看看 D 在哪里: + +```bash +cat /proc//stack +``` + +如果是大量进程/线程在 R 状态,那就是同时需要 CPU 的进程/线程数过多,CPU 忙不过来了,可以利用 perf 分析程序在忙什么: + +```bash +perf -p +``` + +## 线程数量过多 + +如果 load 高但 CPU 利用率不高,通常是同时 running 的进程/线程数过多,排队等 CPU 切换的进程/线程较多。 + +通常在 load 高时执行任何命令都会非常卡,因为执行这些命令也都意味着要创建和执行新的进程,所以下面排查过程中执行命令时需要耐心等待。 + +看系统中可创建的进程数实际值: + +```bash +cat /proc/sys/kernel/pid_max +``` + +> 修改方式: sysctl -w kernel.pid_max=65535 + +通过以下命令统计当前 PID 数量: + +```bash +ps -eLf | wc -l +``` + +如果数量过多,可以大致扫下有哪些进程,如果有大量重复启动命令的进程,就可能是这个进程对应程序的 bug 导致。 + +还可以通过以下命令统计线程数排名: + +```bash +printf "NUM\tPID\tCOMMAND\n" && ps -eLf | awk '{$1=null;$3=null;$4=null;$5=null;$6=null;$7=null;$8=null;$9=null;print}' | sort |uniq -c |sort -rn | head -10 +``` + +找出线程数量较多的进程,可能就是某个容器的线程泄漏,导致 PID 耗尽。 + +随便取其中一个 PID,用 nsenter 进入进程 netns: + +```bash +nsenter -n --target +``` + +然后执行 `ip a` 看下 IP 地址,如果不是节点 IP,通常就是 Pod IP,可以通过 `kubectl get pod -o wide -A | grep ` 来反查进程来自哪个 Pod。 + +## 陷入内核态过久 + +有些时候某些 CPU 可能会执行耗时较长的内核态任务,比如大量创建/销毁进程,回收内存,需要较长时间 reclaim memory,必须要执行完才能切回用户态,虽然内核一般会有 migration 内核线程将这种负载较高的核上的任务迁移到其它核上,但也只能适当缓解,如果这种任务较多,整体的 CPU system 占用就会较高,影响到用户态进程任务的执行,对于业务来说,就是 CPU 不够用,处理就变慢,发生超时。 + +CPU 内核态占用的 Prometheus 查询语句: +```txt +sum(irate(node_cpu_seconds_total{instance="10.10.1.14",mode="system"}[2m])) +``` + +## IO 高负载 + +参考 [IO 高负载](io-high-load.md) 进行排查。 + +## FAQ + +### 如果机器完全无法操作怎么办? + +有时候高负载是无法 ssh 登录的,即使通过 vnc 方式登录成功,由于机器太卡也是执行不了任何命令。如通过监控也看不出任何原因,又想要彻查根因,可以从虚拟化底层入手,给虚拟机发信号触发 coredump (无需登录虚拟机),如果用的云产品,可以提工单让虚拟主机的产品售后来排查分析。 + diff --git a/content/troubleshooting/node/pid-full.md b/content/troubleshooting/node/pid-full.md new file mode 100644 index 0000000..f1124b3 --- /dev/null +++ b/content/troubleshooting/node/pid-full.md @@ -0,0 +1,43 @@ +# PID 爆满 + +## 如何判断 PID 耗尽 + +首先要确认当前的 PID 限制,检查全局 PID 最大限制: + +``` bash +cat /proc/sys/kernel/pid_max +``` + +也检查下线程数限制: + +``` bash +cat /proc/sys/kernel/threads-max +``` + +再检查下当前用户是否还有 `ulimit` 限制最大进程数。 + +确认当前实际 PID 数量,检查当前用户的 PID 数量: + +``` bash +ps -eLf | wc -l +``` + +如果发现实际 PID 数量接近最大限制说明 PID 就可能会爆满导致经常有进程无法启动,低版本内核可能报错: `Cannot allocate memory`,这个报错信息不准确,在内核 4.1 以后改进了: https://github.com/torvalds/linux/commit/35f71bc0a09a45924bed268d8ccd0d3407bc476f + +## 如何解决 + +临时调大 PID 和线程数限制: + +``` bash +echo 65535 > /proc/sys/kernel/pid_max +echo 65535 > /proc/sys/kernel/threads-max +``` + +永久调大 PID 和线程数限制: + +``` bash +echo "kernel.pid_max=65535 " >> /etc/sysctl.conf && sysctl -p +echo "kernel.threads-max=65535 " >> /etc/sysctl.conf && sysctl -p +``` + +k8s 1.14 支持了限制 Pod 的进程数量: https://kubernetes.io/blog/2019/04/15/process-id-limiting-for-stability-improvements-in-kubernetes-1.14/ diff --git a/content/troubleshooting/node/runnig-out-of-inotify-watches.md b/content/troubleshooting/node/runnig-out-of-inotify-watches.md new file mode 100644 index 0000000..11e2f03 --- /dev/null +++ b/content/troubleshooting/node/runnig-out-of-inotify-watches.md @@ -0,0 +1,108 @@ +# inotify 资源耗尽 + +## inotify 耗尽的危害 + +如果 inotify 资源耗尽,kubelet 创建容器将会失败: + +```log +Failed to watch directory "/sys/fs/cgroup/blkio/system.slice": inotify_add_watch /sys/fs/cgroup/blkio/system.slice/var-lib-kubelet-pods-d111600d\x2dcdf2\x2d11e7\x2d8e6b\x2dfa163ebb68b9-volumes-kubernetes.io\x7esecret-etcd\x2dcerts.mount: no space left on device +``` + +## 查看 inotify watch 的限制 + +每个 linux 进程可以持有多个 fd,每个 inotify 类型的 fd 可以 watch 多个目录,每个用户下所有进程 inotify 类型的 fd 可以 watch 的总目录数有个最大限制,这个限制可以通过内核参数配置: `fs.inotify.max_user_watches`。 + +查看最大 inotify watch 数: + +```bash +$ cat /proc/sys/fs/inotify/max_user_watches +8192 +``` + +## 查看进程的 inotify watch 情况 + +使用下面的脚本查看当前有 inotify watch 类型 fd 的进程以及每个 fd watch 的目录数量,降序输出,带总数统计: + +```bash +#!/usr/bin/env bash +# +# Copyright 2019 (c) roc +# +# This script shows processes holding the inotify fd, alone with HOW MANY directories each inotify fd watches(0 will be ignored). +total=0 +result="EXE PID FD-INFO INOTIFY-WATCHES\n" +while read pid fd; do \ + exe="$(readlink -f /proc/$pid/exe || echo n/a)"; \ + fdinfo="/proc/$pid/fdinfo/$fd" ; \ + count="$(grep -c inotify "$fdinfo" || true)"; \ + if [ $((count)) != 0 ]; then + total=$((total+count)); \ + result+="$exe $pid $fdinfo $count\n"; \ + fi +done <<< "$(lsof +c 0 -n -P -u root|awk '/inotify$/ { gsub(/[urw]$/,"",$4); print $2" "$4 }')" && echo "total $total inotify watches" && result="$(echo -e $result|column -t)\n" && echo -e "$result" | head -1 && echo -e "$result" | sed "1d" | sort -k 4rn; +``` + +示例输出: + +```bash +total 7882 inotify watches +EXE PID FD-INFO INOTIFY-WATCHES +/usr/local/qcloud/YunJing/YDEyes/YDService 25813 /proc/25813/fdinfo/8 7077 +/usr/bin/kubelet 1173 /proc/1173/fdinfo/22 665 +/usr/bin/ruby2.3 13381 /proc/13381/fdinfo/14 54 +/usr/lib/policykit-1/polkitd 1458 /proc/1458/fdinfo/9 14 +/lib/systemd/systemd-udevd 450 /proc/450/fdinfo/9 13 +/usr/sbin/nscd 7935 /proc/7935/fdinfo/3 6 +/usr/bin/kubelet 1173 /proc/1173/fdinfo/28 5 +/lib/systemd/systemd 1 /proc/1/fdinfo/17 4 +/lib/systemd/systemd 1 /proc/1/fdinfo/18 4 +/lib/systemd/systemd 1 /proc/1/fdinfo/26 4 +/lib/systemd/systemd 1 /proc/1/fdinfo/28 4 +/usr/lib/policykit-1/polkitd 1458 /proc/1458/fdinfo/8 4 +/usr/local/bin/sidecar-injector 4751 /proc/4751/fdinfo/3 3 +/usr/lib/accountsservice/accounts-daemon 1178 /proc/1178/fdinfo/7 2 +/usr/local/bin/galley 8228 /proc/8228/fdinfo/10 2 +/usr/local/bin/galley 8228 /proc/8228/fdinfo/9 2 +/lib/systemd/systemd 1 /proc/1/fdinfo/11 1 +/sbin/agetty 1437 /proc/1437/fdinfo/4 1 +/sbin/agetty 1440 /proc/1440/fdinfo/4 1 +/usr/bin/kubelet 1173 /proc/1173/fdinfo/10 1 +/usr/local/bin/envoy 4859 /proc/4859/fdinfo/5 1 +/usr/local/bin/envoy 5427 /proc/5427/fdinfo/5 1 +/usr/local/bin/envoy 6058 /proc/6058/fdinfo/3 1 +/usr/local/bin/envoy 6893 /proc/6893/fdinfo/3 1 +/usr/local/bin/envoy 6950 /proc/6950/fdinfo/3 1 +/usr/local/bin/galley 8228 /proc/8228/fdinfo/3 1 +/usr/local/bin/pilot-agent 3819 /proc/3819/fdinfo/5 1 +/usr/local/bin/pilot-agent 4244 /proc/4244/fdinfo/5 1 +/usr/local/bin/pilot-agent 5901 /proc/5901/fdinfo/3 1 +/usr/local/bin/pilot-agent 6789 /proc/6789/fdinfo/3 1 +/usr/local/bin/pilot-agent 6808 /proc/6808/fdinfo/3 1 +/usr/local/bin/pilot-discovery 6231 /proc/6231/fdinfo/3 1 +/usr/local/bin/sidecar-injector 4751 /proc/4751/fdinfo/5 1 +/usr/sbin/acpid 1166 /proc/1166/fdinfo/6 1 +/usr/sbin/dnsmasq 7572 /proc/7572/fdinfo/8 1 +``` + +## 调整 inotify watch 限制 + +如果看到总 watch 数比较大,接近最大限制,可以修改内核参数调高下这个限制。 + +临时调整: + +```bash +sudo sysctl fs.inotify.max_user_watches=524288 +``` + +永久生效: + +```bash +echo "fs.inotify.max_user_watches=524288" >> /etc/sysctl.conf && sysctl -p +``` + +打开 inotify_add_watch 跟踪,进一步 debug inotify watch 耗尽的原因: + +```bash +echo 1 >> /sys/kernel/debug/tracing/events/syscalls/sys_exit_inotify_add_watch/enable +``` + diff --git a/content/troubleshooting/pod/device-or-resource-busy.md b/content/troubleshooting/pod/device-or-resource-busy.md new file mode 100644 index 0000000..20bd816 --- /dev/null +++ b/content/troubleshooting/pod/device-or-resource-busy.md @@ -0,0 +1,78 @@ +# 排查 device or resource busy + +## 背景 + +在 kubernetes 环境中,可能会遇到因目录被占用导致 pod 一直 terminating: + +```log +Aug 27 15:52:22 VM-244-70-centos kubelet[906978]: E0827 15:52:22.816125 906978 nestedpendingoperations.go:270] Operation for "\"kubernetes.io/secret/b45f3af4-3574-472e-b263-c2b71c3b2ea0-default-token-fltdk\" (\"b45f3af4-3574-472e-b263-c2b71c3b2ea0\")" failed. No retries permitted until 2021-08-27 15:54:24.816098325 +0800 CST m=+108994.575932846 (durationBeforeRetry 2m2s). Error: "UnmountVolume.TearDown failed for volume \"default-token-fltdk\" (UniqueName: \"kubernetes.io/secret/b45f3af4-3574-472e-b263-c2b71c3b2ea0-default-token-fltdk\") pod \"b45f3af4-3574-472e-b263-c2b71c3b2ea0\" (UID: \"b45f3af4-3574-472e-b263-c2b71c3b2ea0\") : unlinkat /var/lib/kubelet/pods/b45f3af4-3574-472e-b263-c2b71c3b2ea0/volumes/kubernetes.io~secret/default-token-fltdk: device or resource busy" +``` + +本文记录下排查方法。 + +## 找出目录被谁占用的 + +看下目录哪个进程 mount 了: + +```bash +$ find /proc/*/mounts -exec grep /var/lib/kubelet/pods/0104ab85-d0ea-4ac5-a5f9-5bdd12cca589/volumes/kubernetes.io~secret/kube-proxy-token-nvthm {} + 2>/dev/null +/proc/6076/mounts:tmpfs /var/lib/kubelet/pods/0104ab85-d0ea-4ac5-a5f9-5bdd12cca589/volumes/kubernetes.io~secret/kube-proxy-token-nvthm tmpfs rw,relatime 0 0 +``` + +根据找出的进程号,看看是谁干的: + +```bash +$ ps -ef | grep -v grep | grep 6076 +root 6076 6057 0 Aug26 ? 00:01:54 /usr/local/loglistener/bin loglistener -c /usr/local/loglistener/etc/loglistener.conf +``` + +看下完整的进程树: + +```bash +$ pstree -apnhs 6076 +systemd,1 --switched-root --system --deserialize 22 + └─dockerd,1809 --config-file=/etc/docker/daemon.json + └─docker-containe,1868 --config /var/run/docker/containerd/containerd.toml + └─docker-containe,6057 -namespace moby -workdir /data/docker/containerd/daemon/io.containerd.runtime.v1.linux/moby/9a8457284ce7078ef838e78b79c87c5b27d8a6682597b44ba7a74d7ec6965365 -address /var/run/docker/containerd/docker-containerd.sock -containerd-binary /usr/bin/docker-containerd -runtime-root ... + └─loglistener,6076 loglistener -c /usr/local/loglistener/etc/loglistener.conf + ├─{loglistener},6108 + ├─{loglistener},6109 + ├─{loglistener},6110 + ├─{loglistener},6111 + └─{loglistener},6112 +``` + +## 反查 Pod + +如果占住这个目录的进程也是通过 Kubernetes 部署的,我们可以反查出是哪个 Pod 干的。 + +通过 nsenter 进入容器的 netns,查看 ip 地址,反查出是哪个 pod: + +```bash +$ nsenter -n -t 6076 +$ ip a +1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever +2: eth1: mtu 1500 qdisc mq state UP group default qlen 10000 + link/ether 52:54:00:ca:89:c0 brd ff:ff:ff:ff:ff:ff + inet 192.168.244.70/24 brd 192.168.244.255 scope global eth1 + valid_lft forever preferred_lft forever + inet6 fe80::5054:ff:feca:89c0/64 scope link + valid_lft forever preferred_lft forever +$ kubectl get pod -o wide -A | grep 192.168.244.70 +log-agent-24nn6 2/2 Running 0 84d 192.168.244.70 10.10.10.22 +``` + +如果 pod 是 hostNetwork 的,无法通过 ip 来分辨出是哪个 pod,可以提取进程树中出现的容器 id 前几位,然后查出容器名: + +```bash +$ docker ps | grep 9a8457284c +9a8457284ce7 imroc/loglistener "/usr/local/logliste…" 34 hours ago Up 34 hours k8s_loglistener_log-agent-wd2rp_kube-system_b0dcfe14-1619-43b5-a158-1e2063696138_1 +``` + +Kubernetes 的容器名就可以看出该容器属于哪个 pod。 + diff --git a/content/troubleshooting/pod/healthcheck-failed.md b/content/troubleshooting/pod/healthcheck-failed.md new file mode 100644 index 0000000..4e73241 --- /dev/null +++ b/content/troubleshooting/pod/healthcheck-failed.md @@ -0,0 +1,38 @@ +# 排查健康检查失败 + +* Kubernetes 健康检查包含就绪检查(readinessProbe)和存活检查(livenessProbe) +* pod 如果就绪检查失败会将此 pod ip 从 service 中摘除,通过 service 访问,流量将不会被转发给就绪检查失败的 pod +* pod 如果存活检查失败,kubelet 将会杀死容器并尝试重启 + +健康检查失败的可能原因有多种,除了业务程序BUG导致不能响应健康检查导致 unhealthy,还能有有其它原因,下面我们来逐个排查。 + +## 健康检查配置不合理 + +`initialDelaySeconds` 太短,容器启动慢,导致容器还没完全启动就开始探测,如果 successThreshold 是默认值 1,检查失败一次就会被 kill,然后 pod 一直这样被 kill 重启。 + +## 节点负载过高 + +cpu 占用高(比如跑满)会导致进程无法正常发包收包,通常会 timeout,导致 kubelet 认为 pod 不健康。参考 [排查节点高负载](../node/node-high-load.md) 。 + +## 容器进程被木马进程杀死 + +参考 [使用 systemtap 定位疑难杂症](../skill/use-systemtap-to-locate-problems.md) 进一步定位。 + +## 容器内进程端口监听挂掉 + +使用 `netstat -tunlp` 检查端口监听是否还在,如果不在了,抓包可以看到会直接 reset 掉健康检查探测的连接: + +```bash +20:15:17.890996 IP 172.16.2.1.38074 > 172.16.2.23.8888: Flags [S], seq 96880261, win 14600, options [mss 1424,nop,nop,sackOK,nop,wscale 7], length 0 +20:15:17.891021 IP 172.16.2.23.8888 > 172.16.2.1.38074: Flags [R.], seq 0, ack 96880262, win 0, length 0 +20:15:17.906744 IP 10.0.0.16.54132 > 172.16.2.23.8888: Flags [S], seq 1207014342, win 14600, options [mss 1424,nop,nop,sackOK,nop,wscale 7], length 0 +20:15:17.906766 IP 172.16.2.23.8888 > 10.0.0.16.54132: Flags [R.], seq 0, ack 1207014343, win 0, length 0 +``` + +连接异常,从而健康检查失败。发生这种情况的原因可能在一个节点上启动了多个使用 `hostNetwork` 监听相同宿主机端口的 Pod,只会有一个 Pod 监听成功,但监听失败的 Pod 的业务逻辑允许了监听失败,并没有退出,Pod 又配了健康检查,kubelet 就会给 Pod 发送健康检查探测报文,但 Pod 由于没有监听所以就会健康检查失败。 + +## SYN backlog 设置过小 + +SYN backlog 大小即 SYN 队列大小,如果短时间内新建连接比较多,而 SYN backlog 设置太小,就会导致新建连接失败,通过 `netstat -s | grep TCPBacklogDrop` 可以看到有多少是因为 backlog 满了导致丢弃的新连接。 + +如果确认是 backlog 满了导致的丢包,建议调高 backlog 的值,内核参数为 `net.ipv4.tcp_max_syn_backlog`。 diff --git a/content/troubleshooting/pod/status/intro.md b/content/troubleshooting/pod/status/intro.md new file mode 100644 index 0000000..4c34669 --- /dev/null +++ b/content/troubleshooting/pod/status/intro.md @@ -0,0 +1,35 @@ +# 排查 Pod 状态异常 + +本节分享 Pod 状态异常的排查思路与可能原因。 + +## 常见异常状态排查 + +- [Terminating](pod-terminating.md) +- [Pending](pod-pending.md) +- [ContainerCreating 或 Waiting](pod-containercreating-or-waiting.md) +- [CrashLoopBackOff](pod-crash.md) +- [ImagePullBackOff](pod-imagepullbackoff.md) + +## ImageInspectError + +通常是镜像文件损坏了,可以尝试删除损坏的镜像重新拉取。 + +## Error + +通常处于 Error 状态说明 Pod 启动过程中发生了错误。常见的原因包括: + +* 依赖的 ConfigMap、Secret 或者 PV 等不存在。 +* 请求的资源超过了管理员设置的限制,比如超过了 LimitRange 等。 +* 违反集群的安全策略,比如违反了 PodSecurityPolicy 等。 +* 容器无权操作集群内的资源,比如开启 RBAC 后,需要为 ServiceAccount 配置角色绑定。 + +## Unknown + +通常是节点失联,没有上报状态给 apiserver,到达阀值后 controller-manager 认为节点失联并将其状态置为 `Unknown`。 + +可能原因: + +* 节点高负载导致无法上报。 +* 节点宕机。 +* 节点被关机。 +* 网络不通。 diff --git a/content/troubleshooting/pod/status/pod-containercreating-or-waiting.md b/content/troubleshooting/pod/status/pod-containercreating-or-waiting.md new file mode 100644 index 0000000..3cdf17e --- /dev/null +++ b/content/troubleshooting/pod/status/pod-containercreating-or-waiting.md @@ -0,0 +1,142 @@ +# Pod 一直 ContainerCreating 或 Waiting + +## 镜像问题 + +* 镜象名称错误。 +* 错误的镜像标签。 +* 错误的存储仓库。 +* 存储仓库需要身份验证。 + +## 依赖问题 + +在 pod 启动之前,kubelet 将尝试检查与其他 Kubernetes 元素的所有依赖关系。如果无法满足这些依赖项之一,则 pod 将会保持挂起状态。 + +依赖主要是挂载相关的: + +1. pvc +2. configmap +3. secret + +## 挂载 Volume 失败 + +Volume 挂载失败也分许多种情况,先列下我这里目前已知的。 + +### Pod 漂移没有正常解挂之前的磁盘 + +在云尝试托管的 K8S 服务环境下,默认挂载的 Volume 一般是块存储类型的云硬盘,如果某个节点挂了,kubelet 无法正常运行或与 apiserver 通信,到达时间阀值后会触发驱逐,自动在其它节点上启动相同的副本 (Pod 漂移),但是由于被驱逐的 Node 无法正常运行并不知道自己被驱逐了,也就没有正常执行解挂,cloud-controller-manager 也在等解挂成功后再调用云厂商的接口将磁盘真正从节点上解挂,通常会等到一个时间阀值后 cloud-controller-manager 会强制解挂云盘,然后再将其挂载到 Pod 最新所在节点上,这种情况下 ContainerCreating 的时间相对长一点,但一般最终是可以启动成功的,除非云厂商的 cloud-controller-manager 逻辑有 bug。 + +### 命中 K8S 挂载 configmap/secret 的 subpath 的 bug + +最近发现如果 Pod 挂载了 configmap 或 secret, 如果后面修改了 configmap 或 secret 的内容,Pod 里的容器又原地重启了(比如存活检查失败被 kill 然后重启拉起),就会触发 K8S 的这个 bug,团队的小伙伴已提 PR: https://github.com/kubernetes/kubernetes/pull/82784 + +如果是这种情况,容器会一直启动不成功,可以看到类似以下的报错: + +``` bash +$ kubectl -n prod get pod -o yaml manage-5bd487cf9d-bqmvm +... +lastState: terminated +containerID: containerd://e6746201faa1dfe7f3251b8c30d59ebf613d99715f3b800740e587e681d2a903 +exitCode: 128 +finishedAt: 2019-09-15T00:47:22Z +message: 'failed to create containerd task: OCI runtime create failed: container_linux.go:345: +starting container process caused "process_linux.go:424: container init +caused \"rootfs_linux.go:58: mounting \\\"/var/lib/kubelet/pods/211d53f4-d08c-11e9-b0a7-b6655eaf02a6/volume-subpaths/manage-config-volume/manage/0\\\" +to rootfs \\\"/run/containerd/io.containerd.runtime.v1.linux/k8s.io/e6746201faa1dfe7f3251b8c30d59ebf613d99715f3b800740e587e681d2a903/rootfs\\\" +at \\\"/run/containerd/io.containerd.runtime.v1.linux/k8s.io/e6746201faa1dfe7f3251b8c30d59ebf613d99715f3b800740e587e681d2a903/rootfs/app/resources/application.properties\\\" +caused \\\"no such file or directory\\\"\"": unknown' +``` + +### Unable to mount volumes + +如果报类似如下事件: + +```txt +Unable to mount volumes for pod "es-0_prod(0f08e3aa-aa56-11ec-ab5b-5254006900dd)": timeout expired waiting for volumes to attach or mount for pod "prod"/"es-0". list of unmounted volumes=[applog]. list of unattached volumes=[applog default-token-m7bf7] +``` + +参考 [存储排障: Unable to mount volumes](../../storage/unable-to-mount-volumes.md)。 + +## 磁盘爆满 + +启动 Pod 会调 CRI 接口创建容器,容器运行时创建容器时通常会在数据目录下为新建的容器创建一些目录和文件,如果数据目录所在的磁盘空间满了就会创建失败并报错: + +```bash +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning FailedCreatePodSandBox 2m (x4307 over 16h) kubelet, 10.179.80.31 (combined from similar events): Failed create pod sandbox: rpc error: code = Unknown desc = failed to create a sandbox for pod "apigateway-6dc48bf8b6-l8xrw": Error response from daemon: mkdir /var/lib/docker/aufs/mnt/1f09d6c1c9f24e8daaea5bf33a4230de7dbc758e3b22785e8ee21e3e3d921214-init: no space left on device +``` + +解决方法参考本书 [节点排障:磁盘爆满](../../node/disk-full.md) + +## 节点内存碎片化 + +如果节点上内存碎片化严重,缺少大页内存,会导致即使总的剩余内存较多,但还是会申请内存失败,参考 [节点排障: 内存碎片化](../../node/memory-fragmentation.md) + +## limit 设置太小或者单位不对 + +如果 limit 设置过小以至于不足以成功运行 Sandbox 也会造成这种状态,常见的是因为 memory limit 单位设置不对造成的 limit 过小,比如误将 memory 的 limit 单位像 request 一样设置为小 `m`,这个单位在 memory 不适用,会被 k8s 识别成 byte, 应该用 `Mi` 或 `M`。, + +举个例子: 如果 memory limit 设为 1024m 表示限制 1.024 Byte,这么小的内存, pause 容器一起来就会被 cgroup-oom kill 掉,导致 pod 状态一直处于 ContainerCreating。 + +这种情况通常会报下面的 event: + +``` txt +Pod sandbox changed, it will be killed and re-created。 +``` + +kubelet 报错: + +``` txt +to start sandbox container for pod ... Error response from daemon: OCI runtime create failed: container_linux.go:348: starting container process caused "process_linux.go:301: running exec setns process for init caused \"signal: killed\"": unknown +``` + +## 拉取镜像失败 + +镜像拉取失败也分很多情况,这里列举下: + +* 配置了错误的镜像 +* Kubelet 无法访问镜像仓库(比如默认 pause 镜像在 gcr.io 上,国内环境访问需要特殊处理) +* 拉取私有镜像的 imagePullSecret 没有配置或配置有误 +* 镜像太大,拉取超时(可以适当调整 kubelet 的 --image-pull-progress-deadline 和 --runtime-request-timeout 选项) + +## CNI 网络错误 + +如果发生 CNI 网络错误通常需要检查下网络插件的配置和运行状态,如果没有正确配置或正常运行通常表现为: + +* 无法配置 Pod 网络 +* 无法分配 Pod IP + +## controller-manager 异常 + +查看 master 上 kube-controller-manager 状态,异常的话尝试重启。 + +## 安装 docker 没删干净旧版本 + +如果节点上本身有 docker 或者没删干净,然后又安装 docker,比如在 centos 上用 yum 安装: + +``` bash +yum install -y docker +``` + +这样可能会导致 dockerd 创建容器一直不成功,从而 Pod 状态一直 ContainerCreating,查看 event 报错: + +``` + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning FailedCreatePodSandBox 18m (x3583 over 83m) kubelet, 192.168.4.5 (combined from similar events): Failed create pod sandbox: rpc error: code = Unknown desc = failed to start sandbox container for pod "nginx-7db9fccd9b-2j6dh": Error response from daemon: ttrpc: client shutting down: read unix @->@/containerd-shim/moby/de2bfeefc999af42783115acca62745e6798981dff75f4148fae8c086668f667/shim.sock: read: connection reset by peer: unknown + Normal SandboxChanged 3m12s (x4420 over 83m) kubelet, 192.168.4.5 Pod sandbox changed, it will be killed and re-created. +``` + +可能是因为重复安装 docker 版本不一致导致一些组件之间不兼容,从而导致 dockerd 无法正常创建容器。 + +## 存在同名容器 + +如果节点上已有同名容器,创建 sandbox 就会失败,event: + +``` + Warning FailedCreatePodSandBox 2m kubelet, 10.205.8.91 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create a sandbox for pod "lomp-ext-d8c8b8c46-4v8tl": operation timeout: context deadline exceeded + Warning FailedCreatePodSandBox 3s (x12 over 2m) kubelet, 10.205.8.91 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create a sandbox for pod "lomp-ext-d8c8b8c46-4v8tl": Error response from daemon: Conflict. The container name "/k8s_POD_lomp-ext-d8c8b8c46-4v8tl_default_65046a06-f795-11e9-9bb6-b67fb7a70bad_0" is already in use by container "30aa3f5847e0ce89e9d411e76783ba14accba7eb7743e605a10a9a862a72c1e2". You have to remove (or rename) that container to be able to reuse that name. +``` + +关于什么情况下会产生同名容器,这个有待研究。 diff --git a/content/troubleshooting/pod/status/pod-crash.md b/content/troubleshooting/pod/status/pod-crash.md new file mode 100644 index 0000000..69fef78 --- /dev/null +++ b/content/troubleshooting/pod/status/pod-crash.md @@ -0,0 +1,197 @@ +# 排查 Pod CrashLoopBackOff + +Pod 如果处于 `CrashLoopBackOff` 状态说明之前是启动了,只是又异常退出了,只要 Pod 的 [restartPolicy](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy) 不是 Never 就可能被重启拉起。 + +通过 kubectl 可以发现是否有 Pod 发生重启: + +```bash +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +grafana-c9dd59d46-s9dc6 2/2 Running 2 69d +``` + +当 `RESTARTS` 大于 0 时,说明 Pod 中有容器重启了。 + +这时,我们可以先看下容器进程的退出状态码来缩小问题范围。 + +## 排查容器退出状态码 + +使用 `kubectl describe pod ` 查看异常 pod 的状态: + +```bash +Containers: + kubedns: + Container ID: docker://5fb8adf9ee62afc6d3f6f3d9590041818750b392dff015d7091eaaf99cf1c945 + Image: ccr.ccs.tencentyun.com/library/kubedns-amd64:1.14.4 + Image ID: docker-pullable://ccr.ccs.tencentyun.com/library/kubedns-amd64@sha256:40790881bbe9ef4ae4ff7fe8b892498eecb7fe6dcc22661402f271e03f7de344 + Ports: 10053/UDP, 10053/TCP, 10055/TCP + Host Ports: 0/UDP, 0/TCP, 0/TCP + Args: + --domain=cluster.local. + --dns-port=10053 + --config-dir=/kube-dns-config + --v=2 + State: Running + Started: Tue, 27 Aug 2019 10:58:49 +0800 + Last State: Terminated + Reason: Error + Exit Code: 255 + Started: Tue, 27 Aug 2019 10:40:42 +0800 + Finished: Tue, 27 Aug 2019 10:58:27 +0800 + Ready: True + Restart Count: 1 +``` + +在容器列表里看 `Last State` 字段,其中 `ExitCode` 即程序上次退出时的状态码,如果不为 0,表示异常退出,我们可以分析下原因。 + +### 退出状态码的范围 + +* 必须在 0-255 之间。 +* 0 表示正常退出。 +* 外界中断将程序退出的时候状态码区间在 129-255,(操作系统给程序发送中断信号,比如 `kill -9` 是 `SIGKILL`,`ctrl+c` 是 `SIGINT`) +* 一般程序自身原因导致的异常退出状态区间在 1-128 (这只是一般约定,程序如果一定要用129-255的状态码也是可以的),这时可以用 `kubectl logs -p` 查看容器重启前的标准输出。 + +假如写代码指定的退出状态码时不在 0-255 之间,例如: `exit(-1)`,这时会自动做一个转换,最终呈现的状态码还是会在 0-255 之间。 我们把状态码记为 `code` + +* 当指定的退出时状态码为负数,那么转换公式如下: + +```text +256 - (|code| % 256) +``` + +* 当指定的退出时状态码为正数,那么转换公式如下: + +```text +code % 256 +``` + +### 常见异常状态码 + +**0** + +此状态码表示正常退出,一般是业务进程主动退出了,可以排查下退出前日志,如果日志有打到标准输出,可以通过 `kubectl logs -p` 查看退出前的容器标准输出。 + +也可能是存活检查失败被重启,重启时收到 SIGTERM 信号进程正常退出,可以检查事件是否有存活检查失败的日志。 + +**137** + +此状态码说名容器是被 `SIGKILL` 信号强制杀死的。可能原因: +1. 发生 Cgroup OOM。Pod 中容器使用的内存达到了它的资源限制(`resources.limits`),在 `describe pod` 输出中一般可以看到 Reason 是 `OOMKilled`。 +2. 发生系统 OOM,内核会选取一些进程杀掉来释放内存,可能刚好选到某些容器的主进程。 +3. `livenessProbe` (存活检查) 失败,kubelet 重启容器时等待超时,最后发 `SIGKILL` 强制重启。 +4. 被其它未知进程杀死,比如某些安全组件或恶意木马。 + +**1 和 255** + +这种可能是一般错误,具体错误原因只能看业务日志,因为很多程序员写异常退出时习惯用 `exit(1)` 或 `exit(-1)`,-1 会根据转换规则转成 255。 + +255 也可能是 Pod 宿主机发生了重启导致的容器重启。 + +### 状态码参考 + +这里罗列了一些状态码的含义:[Appendix E. Exit Codes With Special Meanings](https://tldp.org/LDP/abs/html/exitcodes.html) + +### Linux 标准中断信号 + +Linux 程序被外界中断时会发送中断信号,程序退出时的状态码就是中断信号值加上 128 得到的,比如 `SIGKILL` 的中断信号值为 9,那么程序退出状态码就为 9+128=137。以下是标准信号值参考: + +```text +Signal Value Action Comment +────────────────────────────────────────────────────────────────────── +SIGHUP 1 Term Hangup detected on controlling terminal + or death of controlling process +SIGINT 2 Term Interrupt from keyboard +SIGQUIT 3 Core Quit from keyboard +SIGILL 4 Core Illegal Instruction +SIGABRT 6 Core Abort signal from abort(3) +SIGFPE 8 Core Floating-point exception +SIGKILL 9 Term Kill signal +SIGSEGV 11 Core Invalid memory reference +SIGPIPE 13 Term Broken pipe: write to pipe with no + readers; see pipe(7) +SIGALRM 14 Term Timer signal from alarm(2) +SIGTERM 15 Term Termination signal +SIGUSR1 30,10,16 Term User-defined signal 1 +SIGUSR2 31,12,17 Term User-defined signal 2 +SIGCHLD 20,17,18 Ign Child stopped or terminated +SIGCONT 19,18,25 Cont Continue if stopped +SIGSTOP 17,19,23 Stop Stop process +SIGTSTP 18,20,24 Stop Stop typed at terminal +SIGTTIN 21,21,26 Stop Terminal input for background process +SIGTTOU 22,22,27 Stop Terminal output for background process +``` + +### C/C++ 退出状态码 + +`/usr/include/sysexits.h` 试图将退出状态码标准化(仅限 C/C++): + +```text +#define EX_OK 0 /* successful termination */ + +#define EX__BASE 64 /* base value for error messages */ + +#define EX_USAGE 64 /* command line usage error */ +#define EX_DATAERR 65 /* data format error */ +#define EX_NOINPUT 66 /* cannot open input */ +#define EX_NOUSER 67 /* addressee unknown */ +#define EX_NOHOST 68 /* host name unknown */ +#define EX_UNAVAILABLE 69 /* service unavailable */ +#define EX_SOFTWARE 70 /* internal software error */ +#define EX_OSERR 71 /* system error (e.g., can't fork) */ +#define EX_OSFILE 72 /* critical OS file missing */ +#define EX_CANTCREAT 73 /* can't create (user) output file */ +#define EX_IOERR 74 /* input/output error */ +#define EX_TEMPFAIL 75 /* temp failure; user is invited to retry */ +#define EX_PROTOCOL 76 /* remote error in protocol */ +#define EX_NOPERM 77 /* permission denied */ +#define EX_CONFIG 78 /* configuration error */ + +#define EX__MAX 78 /* maximum listed value */ +``` + +## 可能原因 + +以下是一些可能原因。 + +### 容器进程主动退出 + +如果是容器进程主动退出,退出状态码一般在 0-128 之间,除了可能是业务程序 BUG,还有其它许多可能原因。 + +可以通过 `kubectl logs -p` 查看容器退出前的标准输出,如果有采集业务日志,也可以排查下业务日志。 + +### 系统 OOM + +如果发生系统 OOM,可以看到 Pod 中容器退出状态码是 137,表示被 `SIGKILL` 信号杀死,同时内核会报错: `Out of memory: Kill process ...`。大概率是节点上部署了其它非 K8S 管理的进程消耗了比较多的内存,或者 kubelet 的 `--kube-reserved` 和 `--system-reserved` 配的比较小,没有预留足够的空间给其它非容器进程,节点上所有 Pod 的实际内存占用总量不会超过 `/sys/fs/cgroup/memory/kubepods` 这里 cgroup 的限制,这个限制等于 `capacity - "kube-reserved" - "system-reserved"`,如果预留空间设置合理,节点上其它非容器进程(kubelet, dockerd, kube-proxy, sshd 等) 内存占用没有超过 kubelet 配置的预留空间是不会发生系统 OOM 的,可以根据实际需求做合理的调整。 + +### cgroup OOM + +如果是 cgrou OOM 杀掉的进程,从 Pod 事件的下 `Reason` 可以看到是 `OOMKilled`,说明容器实际占用的内存超过 limit 了,同时内核日志会报: `Memory cgroup out of memory`。 可以根据需求调整下 limit。 + +### 健康检查失败 + +参考 [Pod 健康检查失败](../healthcheck-failed.md) 进一步定位。 + +### 宿主机重启 + +Pod 所在宿主机重启会导致容器重启,状态码一般为 255。 + +### 节点内存碎片化 + +如果节点上内存碎片化严重,缺少大页内存,会导致即使总的剩余内存较多,但还是会申请内存失败,参考 [内存碎片化](../../node/memory-fragmentation.md)。 + +### 挂载了 configmap subpath + +K8S 对 configmap subpath 的支持有个问题,如果容器挂载 configmap 指定了 subpath,且后来修改了 configmap 中的内容,当容器重启时会失败,参考 issue [modified subpath configmap mount fails when container restarts](https://github.com/kubernetes/kubernetes/issues/68211)。 + +事件日志里可以看出是挂载 subpath 报 `no such file or directory`,describe pod 类似这样: + +```txt + Last State: Terminated + Reason: StartError + Message: failed to create containerd task: OCI runtime create failed: container_linux.go:349: starting container process caused "process_linux.go:449: container init caused \"rootfs_linux.go:58: mounting \\\"/data/kubelet/pods/d6f90d2b-a5c4-11ec-8b09-5254009e5e2e/volume-subpaths/conf/demo-container/2\\\" to rootfs \\\"/run/containerd/io.containerd.runtime.v2.task/k8s.io/f28499d3c81b145ef2e88c31adaade0466ef71cee537377a439bad36707a7e3e/rootfs\\\" at \\\"/run/containerd/io.containerd.runtime.v2.task/k8s.io/f28499d3c81b145ef2e88c31adaade0466ef71cee537377a439bad36707a7e3e/rootfs/app/conf/server.yaml\\\" caused \\\"no such file or directory\\\"\"": unknown + Exit Code: 128 +``` + +> 有些平台实现了原地重启的能力,即更新工作负载不会重建 Pod,只是重启,更容易发生类似的问题。 + +建议是修改用法,不挂载 subpath。通常使用 subpath 是因为不想覆盖镜像内已有的配置文件,可以将 configmap挂载到其它路径,然后再将镜像内已有的配置文件 include 进来。 \ No newline at end of file diff --git a/content/troubleshooting/pod/status/pod-imagepullbackoff.md b/content/troubleshooting/pod/status/pod-imagepullbackoff.md new file mode 100644 index 0000000..3307f1f --- /dev/null +++ b/content/troubleshooting/pod/status/pod-imagepullbackoff.md @@ -0,0 +1,42 @@ +# 排查 Pod ImagePullBackOff + +## http 类型 registry,地址未加入到 insecure-registry + +dockerd 默认从 https 类型的 registry 拉取镜像,如果使用 https 类型的 registry,则必须将它添加到 insecure-registry 参数中,然后重启或 reload dockerd 生效。 + +## https 自签发类型 resitry,没有给节点添加 ca 证书 + +如果 registry 是 https 类型,但证书是自签发的,dockerd 会校验 registry 的证书,校验成功才能正常使用镜像仓库,要想校验成功就需要将 registry 的 ca 证书放置到 `/etc/docker/certs.d//ca.crt` 位置。 + +## 私有镜像仓库认证失败 + +如果 registry 需要认证,但是 Pod 没有配置 imagePullSecret,配置的 Secret 不存在或者有误都会认证失败。 + +## 镜像文件损坏 + +如果 push 的镜像文件损坏了,下载下来也用不了,需要重新 push 镜像文件。 + +## 镜像拉取超时 + +如果节点上新起的 Pod 太多就会有许多可能会造成容器镜像下载排队,如果前面有许多大镜像需要下载很长时间,后面排队的 Pod 就会报拉取超时。 + +kubelet 默认串行下载镜像: + +``` txt +--serialize-image-pulls Pull images one at a time. We recommend *not* changing the default value on nodes that run docker daemon with version < 1.9 or an Aufs storage backend. Issue #10959 has more details. (default true) +``` + +也可以开启并行下载并控制并发: + +``` txt +--registry-qps int32 If > 0, limit registry pull QPS to this value. If 0, unlimited. (default 5) +--registry-burst int32 Maximum size of a bursty pulls, temporarily allows pulls to burst to this number, while still not exceeding registry-qps. Only used if --registry-qps > 0 (default 10) +``` + +## 镜像不不存在 + +kubelet 日志: + +``` bash +PullImage "imroc/test:v0.2" from image service failed: rpc error: code = Unknown desc = Error response from daemon: manifest for imroc/test:v0.2 not found +``` \ No newline at end of file diff --git a/content/troubleshooting/pod/status/pod-pending.md b/content/troubleshooting/pod/status/pod-pending.md new file mode 100644 index 0000000..465525b --- /dev/null +++ b/content/troubleshooting/pod/status/pod-pending.md @@ -0,0 +1,140 @@ +# 排查 Pod 一直 Pending + +Pod 一直 Pending 一般是调度失败,通常我们可以通过 describe 来看下 event 来判断 pending 原因: + +``` bash +$ kubectl describe pod tikv-0 +... +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning FailedScheduling 3m (x106 over 33m) default-scheduler 0/4 nodes are available: 1 node(s) had no available volume zone, 2 Insufficient cpu, 3 Insufficient memory. +``` + +## 任何节点中都没有足够的资源来分配 pod + +Kubernetes 会根据 Pod 的 Request 和所有节点的资源已分配与可分配的情况 (CPU, Memory, GPU, MaxPod 等) 来决定哪些节点可以被调度,如果所有节点都没有足够资源了,Pod 就会一直保持 Pending。 + +如果判断某个 Node 资源是否足够? 通过 `kubectl describe node ` 查看 node 资源情况,关注以下信息: + +* `Allocatable`: 表示此节点能够申请的资源总和 +* `Allocated resources`: 表示此节点已分配的资源 (Allocatable 减去节点上所有 Pod 总的 Request) + +前者与后者相减,可得出剩余可申请的资源。如果这个值小于 Pod 的 request,就不满足 Pod 的资源要求,Scheduler 在 Predicates (预选) 阶段就会剔除掉这个 Node,也就不会调度上去。 + +## 不满足亲和性 + +如果 Pod 包含 nodeSelector 指定了节点需要包含的 label,调度器将只会考虑将 Pod 调度到包含这些 label 的 Node 上,如果没有 Node 有这些 label 或者有这些 label 的 Node 其它条件不满足也将会无法调度。参考官方文档:https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + +如果 Pod 包含 affinity(亲和性)的配置,调度器根据调度算法也可能算出没有满足条件的 Node,从而无法调度。affinity 有以下几类: + +* nodeAffinity: 节点亲和性,可以看成是增强版的 nodeSelector,用于限制 Pod 只允许被调度到某一部分 Node。 +* podAffinity: Pod 亲和性,用于将一些有关联的 Pod 调度到同一个地方,同一个地方可以是指同一个节点或同一个可用区的节点等。 +* podAntiAffinity: Pod 反亲和性,用于避免将某一类 Pod 调度到同一个地方避免单点故障,比如将集群 DNS 服务的 Pod 副本都调度到不同节点,避免一个节点挂了造成整个集群 DNS 解析失败,使得业务中断。 + +## 节点不可调度 + +由于节点压力(NotReady)或人为行为(节点封锁),节点可能会变为不可调度的状态,这些节点在状态发生变化之前不会调度任何 pod。 + +可以通过 `kubectl get node` 查看节点是否是 `NotReady` 或 `SchedulingDisabled`。 + +## 挂载磁盘或固定 IP 导致无法漂移 + +如果 Pod 挂载了磁盘(块存储),而一般云盘的实现是不能跨可用区的(时延太高),如果集群中节点分布在多个可用区,当前可用区节点无资源可调度时,Pod 也无法漂移到其它可用区。 + +Pod 报类似如下事件日志: + +```txt +0/4 nodes are available: 2 node(s) insufficient memory, 2 node(s) had no available volume zone. +``` + +解决方法:要么删除 pvc 并重建 pod,自动在被调度到的可用区里创建磁盘并挂载;要么在 pod 之前所在可用区内扩容节点以补充资源。 + +同理,如果固定了 IP(通过插件或云厂商的网络实现),通常 Pod 就不能漂移到其它子网的节点上去。 + +解决方法: 加节点,或取消固定 IP 然后重建。 + +## 污点与容忍 + +节点如果被打上了污点,Pod 必须要容忍污点才能调度上去: + +```bash +0/5 nodes are available: 3 node(s) had taints that the pod didn't tolerate, 2 Insufficient memory. +``` + +通过 describe node 可以看下 Node 有哪些 Taints: + +``` bash +$ kubectl describe nodes host1 +... +Taints: special=true:NoSchedule +... +``` + +如果希望 Pod 可以调度上去,通常解决方法有两个: + +1. 删除污点: + +``` bash +kubectl taint nodes host1 special- +``` + +2. 给 Pod 加上这个污点的容忍: + +``` yaml +tolerations: +- key: "special" + operator: "Equal" + value: "true" + effect: "NoSchedule" +``` + +我们通常使用后者的方法来解决。污点既可以是手动添加也可以是被自动添加,下面来深入分析一下。 + +### 手动添加的污点 + +通过类似以下方式可以给节点添加污点: + +``` bash +$ kubectl taint node host1 special=true:NoSchedule +node "host1" tainted +``` + +另外,有些场景下希望新加的节点默认不调度 Pod,直到调整完节点上某些配置才允许调度,就给新加的节点都加上 `node.kubernetes.io/unschedulable` 这个污点。 + +### 自动添加的污点 + +如果节点运行状态不正常,污点也可以被自动添加,从 v1.12 开始,`TaintNodesByCondition` 特性进入 Beta 默认开启,controller manager 会检查 Node 的 Condition,如果命中条件就自动为 Node 加上相应的污点,这些 Condition 与 Taints 的对应关系如下: + +``` txt +Conditon Value Taints +-------- ----- ------ +OutOfDisk True node.kubernetes.io/out-of-disk +Ready False node.kubernetes.io/not-ready +Ready Unknown node.kubernetes.io/unreachable +MemoryPressure True node.kubernetes.io/memory-pressure +PIDPressure True node.kubernetes.io/pid-pressure +DiskPressure True node.kubernetes.io/disk-pressure +NetworkUnavailable True node.kubernetes.io/network-unavailable +``` + +解释下上面各种条件的意思: + +* OutOfDisk 为 True 表示节点磁盘空间不够了 +* Ready 为 False 表示节点不健康 +* Ready 为 Unknown 表示节点失联,在 `node-monitor-grace-period` 这么长的时间内没有上报状态 controller-manager 就会将 Node 状态置为 Unknown (默认 40s) +* MemoryPressure 为 True 表示节点内存压力大,实际可用内存很少 +* PIDPressure 为 True 表示节点上运行了太多进程,PID 数量不够用了 +* DiskPressure 为 True 表示节点上的磁盘可用空间太少了 +* NetworkUnavailable 为 True 表示节点上的网络没有正确配置,无法跟其它 Pod 正常通信 + +另外,在云环境下,比如腾讯云 TKE,添加新节点会先给这个 Node 加上 `node.cloudprovider.kubernetes.io/uninitialized` 的污点,等 Node 初始化成功后才自动移除这个污点,避免 Pod 被调度到没初始化好的 Node 上。 + +## kube-scheduler 没有正常运行 + +检查 maser 上的 `kube-scheduler` 是否运行正常,异常的话可以尝试重启临时恢复。 + +## 参考资料 + +* [Understanding Kubernetes pod pending problems](https://sysdig.com/blog/kubernetes-pod-pending-problems/) +* [彻底搞懂 K8S Pod Pending 故障原因及解决方案 ](https://mp.weixin.qq.com/s/SBpnxLfMq4Ubsvg5WH89lA) \ No newline at end of file diff --git a/content/troubleshooting/pod/status/pod-terminating.md b/content/troubleshooting/pod/status/pod-terminating.md new file mode 100644 index 0000000..2fc611c --- /dev/null +++ b/content/troubleshooting/pod/status/pod-terminating.md @@ -0,0 +1,244 @@ +# 排查 Pod 一直 Terminating + +有时候删除 Pod 一直卡在 Terminating 状态,一直删不掉,本文给出排查思路与可能原因。 + +## 分析思路 + +Pod 处于 Terminating 状态说明 Pod 是被删除,但一直无法结束。 + +Pod 被删除主要可能是: +1. 用户主动删除的 Pod。 +2. 工作负载在滚动更新,自动删除的 Pod。 +3. 触发了节点驱逐,自动清理的 Pod。 +4. 节点长时间处于 `NotReady` 状态,Pod 被自动删除以便被重新调度。 + +Pod 被删除的流程: +1. APIServer 收到删除 Pod 的请求,Pod 被标记删除,处于 `Terminating` 状态。 +2. 节点上的 kubelet watch 到了 Pod 被删除,开始销毁 Pod。 +3. Kubelet 调用运行时接口,清理相关容器。 +4. 所有容器销毁成功,通知 APIServer。 +5. APIServer 感知到 Pod 成功销毁,检查 metadata 是否还有 `finalizers`,如果有就等待其它控制器清理完,如果没有就直接从 etcd 中删除 Pod 记录。 + +可以看出来,删除 Pod 流程涉及到的组件包含: APIServer, etcd, kubelet 与容器运行时 (如 docker、containerd)。 + +既然都能看到 Pod 卡在 Terminating 状态,说明 APIServer 能正常响应,也能正常从 etcd 中获取数据,一般不会有什么问题,有问题的地方主要就是节点上的操作。 + +通常可以结合事件与上节点排查来分析。 + +## 检查 Pod 所在节点是否异常 + +可以先用 kubectl 初步检查下节点是否异常: + +```bash +# 查找 Terminating 的 Pod 及其所在 Node +$ kubectl get pod -o wide | grep Terminating +grafana-5d7ff8cb89-8gdtz 1/1 Terminating 1 97d 10.10.7.150 172.20.32.15 + +# 检查 Node 是否异常 +$ kubectl get node 172.20.32.15 +NAME STATUS ROLES AGE VERSION +172.20.32.15 NotReady 182d v1.20.6 + +# 查看 Node 相关事件 +$ kubectl describe node 172.20.32.15 +``` + +如果有监控,查看下节点相关监控指标,没有监控也可以登上节点去排查。 + +### 节点高负载 + +如果节点负载过高,分不出足够的 CPU 去销毁 Pod,导致一直无法销毁完成;甚至可能销毁了 Pod,但因负载过高无法与 APIServer 正常通信,一直超时,APIServer 也就无法感知到 Pod 被销毁,导致 Pod 一直无法被彻底删除。 + +### 节点被关机 + +如果节点关机了,自然无法进行销毁 Pod 的操作。 + +### 节点网络异常 + +如果节点因网络异常无法与 APIServer 通信,APIServer 也就无法感知到 Pod 被销毁,导致 Pod 一直不会被彻底删除。 + +网络异常的原因可能很多,比如: +1. iptables 规则有问题。 +2. 路由配置有问题。 +3. 网卡被 down。 +4. BPF 程序问题。 + +### 内核异常 + +有时候可能触发内核 BUG 导致节点异常,检查下内核日志: + +```bash +dmesg +# journalctl -k +``` + +## 分析 kubelet 与容器运行时 + +先检查下 kubelet 与容器运行时是否在运行: + +```bash +ps -ef | grep kubelet +ps -ef | grep containerd +# ps -ef | grep dockerd +``` + +分析 kubelet 日志: + +```bash +journalctl -u kubelet --since "3 hours ago" | grep $POD_NAME +``` + +分析运行时日志: + +```bash +journalctl -u containerd +# journalctl -u dockerd +``` + +### 磁盘爆满 + +如果容器运行时 (docker 或 containerd 等) 的数据目录所在磁盘被写满,运行时就无法正常无法创建和销毁容器,kubelet 调用运行时去删除容器时就没有反应,看 event 类似这样: + +```bash +Normal Killing 39s (x735 over 15h) kubelet, 10.179.80.31 Killing container with id docker://apigateway:Need to kill Pod +``` + +解决方案:清理磁盘空间 + +### 存在 "i" 文件属性 + +如果容器的镜像本身或者容器启动后写入的文件存在 "i" 文件属性,此文件就无法被修改删除,而删除 Pod 时会清理容器目录,但里面包含有不可删除的文件,就一直删不了,Pod 状态也将一直保持 Terminating,kubelet 报错: + +``` log +Sep 27 14:37:21 VM_0_7_centos kubelet[14109]: E0927 14:37:21.922965 14109 remote_runtime.go:250] RemoveContainer "19d837c77a3c294052a99ff9347c520bc8acb7b8b9a9dc9fab281fc09df38257" from runtime service failed: rpc error: code = Unknown desc = failed to remove container "19d837c77a3c294052a99ff9347c520bc8acb7b8b9a9dc9fab281fc09df38257": Error response from daemon: container 19d837c77a3c294052a99ff9347c520bc8acb7b8b9a9dc9fab281fc09df38257: driver "overlay2" failed to remove root filesystem: remove /data/docker/overlay2/b1aea29c590aa9abda79f7cf3976422073fb3652757f0391db88534027546868/diff/usr/bin/bash: operation not permitted +Sep 27 14:37:21 VM_0_7_centos kubelet[14109]: E0927 14:37:21.923027 14109 kuberuntime_gc.go:126] Failed to remove container "19d837c77a3c294052a99ff9347c520bc8acb7b8b9a9dc9fab281fc09df38257": rpc error: code = Unknown desc = failed to remove container "19d837c77a3c294052a99ff9347c520bc8acb7b8b9a9dc9fab281fc09df38257": Error response from daemon: container 19d837c77a3c294052a99ff9347c520bc8acb7b8b9a9dc9fab281fc09df38257: driver "overlay2" failed to remove root filesystem: remove /data/docker/overlay2/b1aea29c590aa9abda79f7cf3976422073fb3652757f0391db88534027546868/diff/usr/bin/bash: operation not permitted +``` + +通过 `man chattr` 查看 "i" 文件属性描述: + +``` txt + A file with the 'i' attribute cannot be modified: it cannot be deleted or renamed, no +link can be created to this file and no data can be written to the file. Only the superuser +or a process possessing the CAP_LINUX_IMMUTABLE capability can set or clear this attribute. +``` + +彻底解决当然是不要在容器镜像中或启动后的容器设置 "i" 文件属性,临时恢复方法: 复制 kubelet 日志报错提示的文件路径,然后执行 `chattr -i `: + +``` bash +chattr -i /data/docker/overlay2/b1aea29c590aa9abda79f7cf3976422073fb3652757f0391db88534027546868/diff/usr/bin/bash +``` + +执行完后等待 kubelet 自动重试,Pod 就可以被自动删除了。 + +### docker 17 的 bug + +docker hang 住,没有任何响应,看 event: + +```bash +Warning FailedSync 3m (x408 over 1h) kubelet, 10.179.80.31 error determining status: rpc error: code = DeadlineExceeded desc = context deadline exceeded +``` + +怀疑是17版本dockerd的BUG。可通过 `kubectl -n cn-staging delete pod apigateway-6dc48bf8b6-clcwk --force --grace-period=0` 强制删除pod,但 `docker ps` 仍看得到这个容器 + +处置建议: + +* 升级到docker 18. 该版本使用了新的 containerd,针对很多bug进行了修复。 +* 如果出现terminating状态的话,可以提供让容器专家进行排查,不建议直接强行删除,会可能导致一些业务上问题。 + +### 低版本 kubelet list-watch 的 bug + +之前遇到过使用 v1.8.13 版本的 k8s,kubelet 有时 list-watch 出问题,删除 pod 后 kubelet 没收到事件,导致 kubelet 一直没做删除操作,所以 pod 状态一直是 Terminating + +### dockerd 与 containerd 的状态不同步 + +判断 dockerd 与 containerd 某个容器的状态不同步的方法: + +* describe pod 拿到容器 id +* docker ps 查看的容器状态是 dockerd 中保存的状态 +* 通过 docker-container-ctr 查看容器在 containerd 中的状态,比如: + ``` bash + $ docker-container-ctr --namespace moby --address /var/run/docker/containerd/docker-containerd.sock task ls |grep a9a1785b81343c3ad2093ad973f4f8e52dbf54823b8bb089886c8356d4036fe0 + a9a1785b81343c3ad2093ad973f4f8e52dbf54823b8bb089886c8356d4036fe0 30639 STOPPED + ``` + +containerd 看容器状态是 stopped 或者已经没有记录,而 docker 看容器状态却是 runing,说明 dockerd 与 containerd 之间容器状态同步有问题,目前发现了 docker 在 aufs 存储驱动下如果磁盘爆满可能发生内核 panic : + +``` txt +aufs au_opts_verify:1597:dockerd[5347]: dirperm1 breaks the protection by the permission bits on the lower branch +``` + +如果磁盘爆满过,dockerd 一般会有下面类似的日志: + +``` log +Sep 18 10:19:49 VM-1-33-ubuntu dockerd[4822]: time="2019-09-18T10:19:49.903943652+08:00" level=error msg="Failed to log msg \"\" for logger json-file: write /opt/docker/containers/54922ec8b1863bcc504f6dac41e40139047f7a84ff09175d2800100aaccbad1f/54922ec8b1863bcc504f6dac41e40139047f7a84ff09175d2800100aaccbad1f-json.log: no space left on device" +``` + +随后可能发生状态不同步,已提issue: https://github.com/docker/for-linux/issues/779 + +* 临时恢复: 执行 `docker container prune` 或重启 dockerd +* 长期方案: 运行时推荐直接使用 containerd,绕过 dockerd 避免 docker 本身的各种 BUG + +### Daemonset Controller 的 BUG + +有个 k8s 的 bug 会导致 daemonset pod 无限 terminating,1.10 和 1.11 版本受影响,原因是 daemonset controller 复用 scheduler 的 predicates 逻辑,里面将 nodeAffinity 的 nodeSelector 数组做了排序(传的指针),spec 就会跟 apiserver 中的不一致,daemonset controller 又会为 rollingUpdate类型计算 hash (会用到spec),用于版本控制,造成不一致从而无限启动和停止的循环。 + +* issue: https://github.com/kubernetes/kubernetes/issues/66298 +* 修复的PR: https://github.com/kubernetes/kubernetes/pull/66480 + +升级集群版本可以彻底解决,临时规避可以给 rollingUpdate 类型 daemonset 不使用 nodeAffinity,改用 nodeSelector。 + +### mount 的目录被其它进程占用 + +dockerd 报错 `device or resource busy`: + +``` bash +May 09 09:55:12 VM_0_21_centos dockerd[6540]: time="2020-05-09T09:55:12.774467604+08:00" level=error msg="Handler for DELETE /v1.38/containers/b62c3796ea2ed5a0bd0eeed0e8f041d12e430a99469dd2ced6f94df911e35905 returned error: container b62c3796ea2ed5a0bd0eeed0e8f041d12e430a99469dd2ced6f94df911e35905: driver \"overlay2\" failed to remove root filesystem: remove /data/docker/overlay2/8bde3ec18c5a6915f40dd8adc3b2f296c1e40cc1b2885db4aee0a627ff89ef59/merged: device or resource busy" +``` + +查找还有谁在"霸占"此目录: + +``` bash +$ grep 8bde3ec18c5a6915f40dd8adc3b2f296c1e40cc1b2885db4aee0a627ff89ef59 /proc/*/mountinfo +/proc/27187/mountinfo:4500 4415 0:898 / /var/lib/docker/overlay2/8bde3ec18c5a6915f40dd8adc3b2f296c1e40cc1b2885db4aee0a627ff89ef59/merged rw,relatime - overlay overlay rw,lowerdir=/data/docker/overlay2/l/DNQH6VPJHFFANI36UDKS262BZK:/data/docker/overlay2/l/OAYZKUKWNH7GPT4K5MFI6B7OE5:/data/docker/overlay2/l/ANQD5O27DRMTZJG7CBHWUA65YT:/data/docker/overlay2/l/G4HYAKVIRVUXB6YOXRTBYUDVB3:/data/docker/overlay2/l/IRGHNAKBHJUOKGLQBFBQTYFCFU:/data/docker/overlay2/l/6QG67JLGKMFXGVB5VCBG2VYWPI:/data/docker/overlay2/l/O3X5VFRX2AO4USEP2ZOVNLL4ZK:/data/docker/overlay2/l/H5Q5QE6DMWWI75ALCIHARBA5CD:/data/docker/overlay2/l/LFISJNWBKSRTYBVBPU6PH3YAAZ:/data/docker/overlay2/l/JSF6H5MHJEC4VVAYOF5PYIMIBQ:/data/docker/overlay2/l/7D2F45I5MF2EHDOARROYPXCWHZ:/data/docker/overlay2/l/OUJDAGNIZXVBKBWNYCAUI5YSGG:/data/docker/overlay2/l/KZLUO6P3DBNHNUH2SNKPTFZOL7:/data/docker/overlay2/l/O2BPSFNCVXTE4ZIWGYSRPKAGU4,upperdir=/data/docker/overlay2/8bde3ec18c5a6915f40dd8adc3b2f296c1e40cc1b2885db4aee0a627ff89ef59/diff,workdir=/data/docker/overlay2/8bde3ec18c5a6915f40dd8adc3b2f296c1e40cc1b2885db4aee0a627ff89ef59/work +/proc/27187/mountinfo:4688 4562 0:898 / /var/lib/docker/overlay2/81c322896bb06149c16786dc33c83108c871bb368691f741a1e3a9bfc0a56ab2/merged/data/docker/overlay2/8bde3ec18c5a6915f40dd8adc3b2f296c1e40cc1b2885db4aee0a627ff89ef59/merged rw,relatime - overlay overlay rw,lowerdir=/data/docker/overlay2/l/DNQH6VPJHFFANI36UDKS262BZK:/data/docker/overlay2/l/OAYZKUKWNH7GPT4K5MFI6B7OE5:/data/docker/overlay2/l/ANQD5O27DRMTZJG7CBHWUA65YT:/data/docker/overlay2/l/G4HYAKVIRVUXB6YOXRTBYUDVB3:/data/docker/overlay2/l/IRGHNAKBHJUOKGLQBFBQTYFCFU:/data/docker/overlay2/l/6QG67JLGKMFXGVB5VCBG2VYWPI:/data/docker/overlay2/l/O3X5VFRX2AO4USEP2ZOVNLL4ZK:/data/docker/overlay2/l/H5Q5QE6DMWWI75ALCIHARBA5CD:/data/docker/overlay2/l/LFISJNWBKSRTYBVBPU6PH3YAAZ:/data/docker/overlay2/l/JSF6H5MHJEC4VVAYOF5PYIMIBQ:/data/docker/overlay2/l/7D2F45I5MF2EHDOARROYPXCWHZ:/data/docker/overlay2/l/OUJDAGNIZXVBKBWNYCAUI5YSGG:/data/docker/overlay2/l/KZLUO6P3DBNHNUH2SNKPTFZOL7:/data/docker/overlay2/l/O2BPSFNCVXTE4ZIWGYSRPKAGU4,upperdir=/data/docker/overlay2/8bde3ec18c5a6915f40dd8adc3b2f296c1e40cc1b2885db4aee0a627ff89ef59/diff,workdir=/data/docker/overlay2/8bde3ec18c5a6915f40dd8adc3b2f296c1e40cc1b2885db4aee0a627ff89ef59/work +``` + +> 自行替换容器 id + +找到进程号后查看此进程更多详细信息: + +``` bash +ps -f 27187 +``` + +> 更多请参考 [排查 device or resource busy](../device-or-resource-busy.md)。 + +## 检查 Finalizers + +k8s 资源的 metadata 里如果存在 `finalizers`,那么该资源一般是由某程序创建的,并且在其创建的资源的 metadata 里的 `finalizers` 加了一个它的标识,这意味着这个资源被删除时需要由创建资源的程序来做删除前的清理,清理完了它需要将标识从该资源的 `finalizers` 中移除,然后才会最终彻底删除资源。比如 Rancher 创建的一些资源就会写入 `finalizers` 标识。 + +处理建议:`kubectl edit` 手动编辑资源定义,删掉 `finalizers`,这时再看下资源,就会发现已经删掉了。 + +## 检查 terminationGracePeriodSeconds 是否过大 + +如果满足以下条件: +1. Pod 配置了 `terminationGracePeriodSeconds` 且值非常大(比如 86400)。 +2. 主进程没有处理 SIGTERM 信号(比如主进程是 shell 或 systemd)。 + +就会导致删除 Pod 不能立即退出,需要等到超时阈值(`terminationGracePeriodSeconds`)后强杀进程,而超时时间非常长,看起来就像一直卡在 Terminating 中。 + +解决方案: +1. 等待超时时间自动删除。 +2. 使用 kubectl 强删: + ```bash + kubectl delete pod --force --grace-period=0 POD_NAME + ``` +## propagation type 问题 + +Pod 事件报错: + +```txt +unlinkat /var/run/netns/cni-49ddd103-d374-1f86-7324-13abaeb9c910: device or resource busy +``` + +原因与解决方案参考: [挂载根目录导致 device or resource busy](../../cases/runtime/mount-root-causing-device-or-resource-busy.md)。 diff --git a/content/troubleshooting/sdk.md b/content/troubleshooting/sdk.md new file mode 100644 index 0000000..7fb9c5b --- /dev/null +++ b/content/troubleshooting/sdk.md @@ -0,0 +1,15 @@ +# SDK 排障 + +## python SDK 报证书 hostname 不匹配 + +使用 kubernetes 的 [python SDK](https://github.com/kubernetes-client/python),报错: + +```txt +hostname '10.10.36.196' doesn't match either of 'cls-bx5o9kt5-apiserver-service', 'kubernetes', 'kubernetes.default', 'kubernetes.default.svc', 'kubernetes.default.svc.cluster.local', 'localhost' +``` + +一般原因是 python 的依赖包版本不符合要求,主要关注: +* urllib3>=1.24.2 +* ipaddress>=1.0.17 + +参考 [官方文档说明](https://github.com/kubernetes-client/python/blob/master/README.md#hostname-doesnt-match)。 \ No newline at end of file diff --git a/content/troubleshooting/skill/enter-netns-with-nsenter.md b/content/troubleshooting/skill/enter-netns-with-nsenter.md new file mode 100644 index 0000000..e2938ba --- /dev/null +++ b/content/troubleshooting/skill/enter-netns-with-nsenter.md @@ -0,0 +1,81 @@ +# 使用 nsenter 进入 netns 抓包 + +## 背景 + +我们使用 Kubernetes 时难免发生一些网络问题,往往需要进入容器的网络命名空间 (netns) 中,进行一些网络调试来定位问题,本文介绍如何进入容器的 netns。 + +## 获取容器 ID + +使用 kubectl 获取 pod 中任意 cotnainer 的 id: + +```bash +kubectl -n test describe pod debug-685b48bcf5-ggn5d +``` + +输出示例片段1 (containerd运行时): + +```txt +Containers: + debug: + Container ID: containerd://529bbd5c935562a9ba66fc9b9ffa95d486c6324f26d8253d744ffe3dfd728289 +``` + +输出示例片段2 (dockerd运行时): + +```txt +Containers: + debug: + Container ID: docker://e64939086488a9302821566b0c1f193b755c805f5ff5370d5ce5e6f154ffc648 +``` + +## 获取 PID + +拿到 container id 后,我们登录到 pod 所在节点上去获取其主进程 pid。 + +containerd 运行时使用 crictl 命令获取: + +```bash +$ crictl inspect 529bbd5c935562a9ba66fc9b9ffa95d486c6324f26d8253d744ffe3dfd728289 | grep -i pid + "pid": 2266462, + "pid": 1 + "type": "pid" +``` + +> 此例中 pid 为 2266462 + +dockerd 运行时使用 docker 命令获取: + +```bash +$ docker inspect e64939086488a9302821566b0c1f193b755c805f5ff5370d5ce5e6f154ffc648 | grep -i pid + "Pid": 910351, + "PidMode": "", + "PidsLimit": 0, +``` + +> 此例中 pid 为 910351 + +## 使用 nsenter 进入容器 netns + +在节点上使用 nsenter 进入 pod 的 netns: + +```bash +nsenter -n --target 910351 +``` + +## 调试网络 + +成功进入容器的 netns,可以使用节点上的网络工具进行调试网络,可以首先使用 `ip a` 验证下 ip 地址是否为 pod ip: + +```bash +$ ip a +1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever +3: eth0@if8: mtu 1500 qdisc noqueue state UP group default + link/ether 6a:c6:6f:67:dd:6c brd ff:ff:ff:ff:ff:ff link-netnsid 0 + inet 172.18.0.67/26 brd 172.18.0.127 scope global eth0 + valid_lft forever preferred_lft forever +``` + +如果要抓包也可以利用节点上的 tcpdump 工具抓包。 \ No newline at end of file diff --git a/content/troubleshooting/skill/linux.md b/content/troubleshooting/skill/linux.md new file mode 100644 index 0000000..1c121fd --- /dev/null +++ b/content/troubleshooting/skill/linux.md @@ -0,0 +1,261 @@ +# Linux 常用排查命令 + +## 查看 socket buffer + +查看是否阻塞: + +```bash +$ netstat -antup | awk '{if($2>100||$3>100){print $0}}' +Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name +tcp 2066 36 9.134.55.160:8000 10.35.16.97:63005 ESTABLISHED 1826655/nginx +``` + +* `Recv-Q` 是接收队列,如果持续有堆积,可能是高负载,应用处理不过来,也可能是程序的 bug,卡住了,导致没有从 buffer 中取数据,可以看看对应 pid 的 stack 卡在哪里了(`cat /proc/$PID/stack`)。 + +查看是否有 UDP buffer 满导致丢包: + +```bash +# 使用 netstat 查看统计 +$ netstat -s | grep "buffer errors" + 429469 receive buffer errors + 23568 send buffer errors + +# 也可以用 nstat 查看计数器 +$ nstat -az | grep -E 'UdpRcvbufErrors|UdpSndbufErrors' +UdpRcvbufErrors 429469 0.0 +UdpSndbufErrors 23568 0.0 +``` + +对于 TCP,发送 buffer 慢不会导致丢包,只是会让程序发送数据包时卡住,等待缓冲区有足够空间释放出来,而接收 buffer 满了会导致丢包,可以通过计数器查看: + +```bash +$ nstat -az | grep TcpExtTCPRcvQDrop +TcpExtTCPRcvQDrop 264324 0.0 +``` + +查看当前 UDP buffer 的情况: + +```bash +$ ss -nump +Recv-Q Send-Q Local Address:Port Peer Address:Port Process +0 0 10.10.4.26%eth0:68 10.10.4.1:67 users:(("NetworkManager",pid=960,fd=22)) + skmem:(r0,rb212992,t0,tb212992,f0,w0,o640,bl0,d0) +``` + +* rb212992 表示 UDP 接收缓冲区大小是 212992 字节,tb212992 表示 UDP 发送缓存区大小是 212992 字节。 +* Recv-Q 和 Send-Q 分别表示当前接收和发送缓冲区中的数据包字节数。 + +查看当前 TCP buffer 的情况: + +```bash +$ ss -ntmp +ESTAB 0 0 [::ffff:109.244.190.163]:9988 [::ffff:10.10.4.26]:54440 users:(("xray",pid=3603,fd=20)) + skmem:(r0,rb12582912,t0,tb12582912,f0,w0,o0,bl0,d0) +``` + +* rb12582912 表示 TCP 接收缓冲区大小是 12582912 字节,tb12582912 表示 TCP 发送缓存区大小是 12582912 字节。 +* Recv-Q 和 Send-Q 分别表示当前接收和发送缓冲区中的数据包字节数。 + +## 查看监听队列 + +```bash +$ ss -lnt +State Recv-Q Send-Q Local Address:Port Peer Address:Port +LISTEN 129 128 *:80 *:* +``` + +> `Recv-Q` 表示 accept queue 中的连接数,如果满了(`Recv-Q`的值比`Send-Q`大1),要么是并发太大,或负载太高,程序处理不过来;要么是程序 bug,卡住了,导致没有从 accept queue 中取连接,可以看看对应 pid 的 stack 卡在哪里了(`cat /proc/$PID/stack`)。 + +## 查看网络计数器 + +```bash +$ nstat -az +... +TcpExtListenOverflows 12178939 0.0 +TcpExtListenDrops 12247395 0.0 +... +``` + +```bash +netstat -s | grep -E 'drop|overflow' +``` + +> 如果有 overflow,意味着 accept queue 有满过,可以查看监听队列看是否有现场。 + +## 查看 conntrack + +```bash +$ conntrack -S +cpu=0 found=770 invalid=3856 ignore=42570125 insert=0 insert_failed=0 drop=0 early_drop=0 error=0 search_restart=746284 +cpu=1 found=784 invalid=3647 ignore=41988392 insert=0 insert_failed=0 drop=0 early_drop=0 error=0 search_restart=718963 +cpu=2 found=25588 invalid=71264 ignore=243330690 insert=0 insert_failed=0 drop=0 early_drop=0 error=0 search_restart=2319295 +cpu=3 found=25706 invalid=70168 ignore=242616824 insert=0 insert_failed=0 drop=0 early_drop=0 error=18 search_restart=2320376 +``` + +* 若有 `insert_failed`,表示存在 conntrack 插入失败,会导致丢包。 + +## 查看连接数 + +如果有 ss 命令,可以使用 `ss -s` 统计: + +```bash +$ ss -s +Total: 470 +TCP: 220 (estab 47, closed 150, orphaned 0, timewait 71) + +Transport Total IP IPv6 +RAW 0 0 0 +UDP 63 60 3 +TCP 70 55 15 +INET 133 115 18 +FRAG 0 0 0 +``` + +如果没有 `ss`,也可以尝试用脚本统计当前各种状态的 TCP 连接数: + +```bash +netstat -n | awk '/^tcp/ {++S[$NF]} END {for(a in S) print a, S[a]}' +``` + +示例输出: + +```txt +ESTABLISHED 18 +TIME_WAIT 457 +``` + +或者直接手动统计 `/proc`: + +```bash +cat /proc/net/tcp* | wc -l +``` + +## 测试网络连通性 + +不断 telnet 查看网络是否能通: + +```bash +while true; do echo "" | telnet 10.0.0.3 443; sleep 0.1; done +``` + +* `ctrl+c` 终止测试 +* 替换 `10.0.0.3` 与 `443` 为需要测试的 IP/域名 和端口 + +没有安装 telnet,也可以使用 nc 测试: + +```bash +$ nc -vz 10.0.0.3 443 +``` + +## 排查流量激增 + +### iftop 纠出大流量 IP + +```bash +$ iftop +10.21.45.8 => 10.111.100.101 3.35Mb 2.92Mb 2.94Mb + <= 194Mb 160Mb 162Mb +10.21.45.8 => 10.121.101.22 3.41Mb 2.89Mb 3.04Mb + <= 192Mb 159Mb 172Mb +10.21.45.8 => 10.22.122.55 279Kb 313Kb 292Kb + <= 11.3Kb 12.1Kb 11.9Kb +... +``` + +### netstat 查看大流量 IP 连接 + +```bash +$ netstat -np | grep 10.121.101.22 +tcp 0 0 10.21.45.8:48320 10.121.101.22:12002 TIME_WAIT - +tcp 0 0 10.21.45.8:59179 10.121.101.22:12002 TIME_WAIT - +tcp 0 0 10.21.45.8:55835 10.121.101.22:12002 TIME_WAIT - +tcp 0 0 10.21.45.8:49420 10.121.101.22:12002 TIME_WAIT - +tcp 0 0 10.21.45.8:55559 10.121.101.22:12002 TIME_WAIT - +... +``` + +## 排查资源占用 + +### 文件被占用 + +看某个文件在被哪些进程读写: + +```bash +lsof <文件名> +``` + +看某个进程打开了哪些文件: + +```bash +lsof -p +``` + +### 端口占用 + +查看 22 端口被谁占用: + +```bash +lsof -i :22 +``` + +```bash +netstat -tunlp | grep 22 +``` + +## 查看进程树 + +```bash +$ pstree -apnhs 3356537 +systemd,1 --switched-root --system --deserialize 22 + └─containerd,3895 + └─{containerd},3356537 +``` + +## 测试对比 CPU 性能 + +看计算圆周率耗时,耗时越短说明 CPU 性能越强: + +```bash +time echo "scale=5000; 4*a(1)"| bc -l -q +``` + +## 查看证书内容 + +查看 secret 里的证书内容: + +```bash +kubectl get secret test-crt-secret -o jsonpath='{.data.tls\.crt}' | base64 -d | openssl x509 -noout -text +``` + +查看证书文件内容: + +```bash +openssl x509 -noout -text -in test.crt +``` + +查看远程地址的证书内容: + +```bash +echo | openssl s_client -connect imroc.cc:443 2>/dev/null | openssl x509 -noout -text +``` + +## 磁盘占用 + +### 空间占用 + +```bash +df -h +``` + +### inode 占用 + +```bash +# df -i +Filesystem Inodes IUsed IFree IUse% Mounted on +/dev/vda1 6553600 283895 6269705 5% / +/dev/vdb1 26214400 62421 26151979 1% /data + +$ tune2fs -l /dev/vda1 | grep -i inode +Inode count: 6553600 +Free inodes: 6465438 +``` \ No newline at end of file diff --git a/content/troubleshooting/skill/remote-capture-with-ksniff.md b/content/troubleshooting/skill/remote-capture-with-ksniff.md new file mode 100644 index 0000000..9b6320e --- /dev/null +++ b/content/troubleshooting/skill/remote-capture-with-ksniff.md @@ -0,0 +1,87 @@ +# 使用 ksniff 远程抓包 + +## 概述 + +Kubernetes 环境中遇到网络问题需要抓包排查怎么办?传统做法是登录 Pod 所在节点,然后 [使用 nsenter 进入 Pod netns 抓包](enter-netns-with-nsenter),最后使用节点上 tcpdump 工具进行抓包。整个过程比较繁琐,好在社区出现了 [ksniff](https://github.com/eldadru/ksniff) 这个小工具,它是一个 kubectl 插件,可以让我们在 Kubernetes 中抓包变得更简单快捷。 + +本文将介绍如何使用 ksniff 这个工具来对 Pod 进行抓包。 + +## 安装 + +ksniff 一般使用 [krew](https://github.com/kubernetes-sigs/krew) 这个 kubectl 包管理器进行安装: + +```bash +kubectl krew install sniff +``` +## 使用 wireshark 实时分析 + +抓取指定 Pod 所有网卡数据包,自动弹出本地安装的 wireshark 并实时捕获: + +```bash +kubectl -n test sniff website-7d7d96cdbf-6v4p6 +``` + +可以使用 wireshark 的过滤器实时过滤分析哟: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925152123.png) + +## 保存抓包文件 + +有时在生产环境我们可能无法直接在本地执行 kubectl,需要经过跳板机,这个时候我们可以将抓到的包保存成文件,然后再拷到本地使用 wireshark 分析。 + +只需加一个 `-o` 参数指定下保存的文件路径即可: + +```bash +kubectl -n test sniff website-7d7d96cdbf-6v4p6 -o test.pcap +``` + +## 特权模式 + +ksniff 默认通过上传 tcpdump 二进制文件到目标 Pod 的一个容器里,然后执行二进制来实现抓包。但该方式依赖容器是以 root 用户启动的,如果不是就无法抓包。 + +这个时候我们可以加一个 `-p` 参数,表示会在 Pod 所在节点新起一个 privileged 的 Pod,然后该 Pod 会调用容器运行时 (dockerd 或 containerd 等),新起一个以 root 身份启动的 container,并 attach 到目标 Pod 的 netns,然后执行 container 中的 tcpdump 二进制来实现抓包。 + +用法示例: + +```bash +kubectl -n test sniff website-7d7d96cdbf-6v4p6 -p +``` + +## 查看明文 + +如果数据包内容很多都是明文 (比如 HTTP),只希望大概看下明文内容,可以指定 `-o -` 将抓包内容直接打印到标准输出 (stdout): + +```bash +kubectl -n test sniff website-7d7d96cdbf-6v4p6 -o - +``` +## 抓取时过滤 + +有时数据量很大,如果在抓取时不过滤,可能会对 apiserver 造成较大压力 (数据传输经过 apiserver),这种情况我们最好在抓取时就指定 tcpdump 过滤条件,屏蔽掉不需要的数据,避免数据量过大。 + +加 `-f` 参数即可指定过滤条件,示例: + +```bash +kubectl -n test sniff website-7d7d96cdbf-6v4p6 -f "port 80" +``` + +## FAQ + +### wireshark 报 unknown + +打开抓包文件时,报错 `pcap: network type 276 unknown or unsupported`: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925152136.png) + +通常是因为 wireshark 版本低导致的,升级到最新版就行。 + +### 抓包时报 No such file or directory + +使用 kubectl sniff 抓包时,报错 `ls: cannot access '/tmp/static-tcpdump': No such file or directory` 然后退出: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925152145.png) + +这是笔者在 mac 上安装当时最新的 ksniff v1.6.0 版本遇到的问题。该问题明显是一个 bug,static-tcpdump 二进制没有上传成功就去执行导致的,考虑三种解决方案: + +1. 手动使用 kubectl cp 将二进制拷到目标 Pod 再执行 kubectl sniff 抓包。 +2. kubectl sniff 指定 `-p` 参数使用特权模式 (亲测有效)。 +3. 编译最新的 ksniff,替换当前 kubectl-sniff 二进制,这也是笔者目前的使用方式。 diff --git a/content/troubleshooting/skill/tcpdump.md b/content/troubleshooting/skill/tcpdump.md new file mode 100644 index 0000000..ecc66c2 --- /dev/null +++ b/content/troubleshooting/skill/tcpdump.md @@ -0,0 +1,52 @@ +# 使用 tcpdump 抓包与分析 + +## 抓包基础 + +```bash +# 抓包内容实时显示到控制台 +tcpdump -i eth0 host 10.0.0.10 -nn -tttt +tcpdump -i any host 10.0.0.10 -nn -tttt +tcpdump -i any host 10.0.0.10 and port 8088 -nn -tttt +# 抓包存到文件 +tcpdump -i eth0 -w test.pcap +# 读取抓包内容 +tcpdump -r test.pcap -nn -tttt +``` + +常用参数: + +* `-r`: 指定包文件。 +* `-nn`: 显示数字ip和端口,不转换成名字。 +* `-tttt`: 显示时间戳格式: `2006-01-02 15:04:05.999999`。 + +## 轮转抓包 + +```bash +# 每100M轮转一次,最多保留200个文件 (推荐,文件大小可控,可通过文件修改时间判断报文时间范围) +tcpdump -i eth0 port 8880 -w cvm.pcap -C 100 -W 200 + +# 每2分钟轮转一次,后缀带上时间 +tcpdump -i eth0 port 31780 -w node-10.70.10.101-%Y-%m%d-%H%M-%S.pcap -G 120 +``` + +## 过滤连接超时的包(reset) + +一般如果有连接超时发生,一般 client 会发送 reset 包,可以过滤下: + +```bash +tcpdump -r test.pcap 'tcp[tcpflags] & (tcp-rst) != 0' -nn -ttt +``` + +## 统计流量源IP + +```bash +tcpdump -i eth0 dst port 60002 -c 10000|awk '{print $3}'|awk -F. -v OFS="." '{print $1,$2,$3,$4}'|sort |uniq -c|sort -k1 -n +``` + +统计效果: + +```txt + 321 169.254.128.100 + 409 10.0.0.175 + 2202 10.0.226.49 +``` diff --git a/content/troubleshooting/skill/use-systemtap-to-locate-problems.md b/content/troubleshooting/skill/use-systemtap-to-locate-problems.md new file mode 100644 index 0000000..b068712 --- /dev/null +++ b/content/troubleshooting/skill/use-systemtap-to-locate-problems.md @@ -0,0 +1,151 @@ +# 使用 Systemtap 定位疑难杂症 + +## 安装 + +### Ubuntu + +安装 systemtap: + +```bash +apt install -y systemtap +``` + +运行 `stap-prep` 检查还有什么需要安装: + +```bash +$ stap-prep +Please install linux-headers-4.4.0-104-generic +You need package linux-image-4.4.0-104-generic-dbgsym but it does not seem to be available + Ubuntu -dbgsym packages are typically in a separate repository + Follow https://wiki.ubuntu.com/DebuggingProgramCrash to add this repository + +apt install -y linux-headers-4.4.0-104-generic +``` + +提示需要 dbgsym 包但当前已有软件源中并不包含,需要使用第三方软件源安装,下面是 dbgsym 安装方法\(参考官方wiki: [https://wiki.ubuntu.com/Kernel/Systemtap](https://wiki.ubuntu.com/Kernel/Systemtap)\): + +```bash +sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys C8CAB6595FDFF622 + +codename=$(lsb_release -c | awk '{print $2}') +sudo tee /etc/apt/sources.list.d/ddebs.list << EOF +deb http://ddebs.ubuntu.com/ ${codename} main restricted universe multiverse +deb http://ddebs.ubuntu.com/ ${codename}-security main restricted universe multiverse +deb http://ddebs.ubuntu.com/ ${codename}-updates main restricted universe multiverse +deb http://ddebs.ubuntu.com/ ${codename}-proposed main restricted universe multiverse +EOF + +sudo apt-get update +``` + +配置好源后再运行下 `stap-prep`: + +```bash +$ stap-prep +Please install linux-headers-4.4.0-104-generic +Please install linux-image-4.4.0-104-generic-dbgsym +``` + +提示需要装这两个包,我们安装一下: + +```bash +apt install -y linux-image-4.4.0-104-generic-dbgsym +apt install -y linux-headers-4.4.0-104-generic +``` + +### CentOS + +安装 systemtap: + +```bash +yum install -y systemtap +``` + +默认没装 `debuginfo`,我们需要装一下,添加软件源 `/etc/yum.repos.d/CentOS-Debug.repo`: + +```bash +[debuginfo] +name=CentOS-$releasever - DebugInfo +baseurl=http://debuginfo.centos.org/$releasever/$basearch/ +gpgcheck=0 +enabled=1 +protect=1 +priority=1 +``` + +执行 `stap-prep` \(会安装 `kernel-debuginfo`\) + +最后检查确保 `kernel-debuginfo` 和 `kernel-devel` 均已安装并且版本跟当前内核版本相同,如果有多个版本,就删除跟当前内核版本不同的包\(通过`uname -r`查看当前内核版本\)。 + +重点检查是否有多个版本的 `kernel-devel`: + +```bash +$ rpm -qa | grep kernel-devel +kernel-devel-3.10.0-327.el7.x86_64 +kernel-devel-3.10.0-514.26.2.el7.x86_64 +kernel-devel-3.10.0-862.9.1.el7.x86_64 +``` + +如果存在多个,保证只留跟当前内核版本相同的那个,假设当前内核版本是 `3.10.0-862.9.1.el7.x86_64`,那么使用 rpm 删除多余的版本: + +```bash +rpm -e kernel-devel-3.10.0-327.el7.x86_64 kernel-devel-3.10.0-514.26.2.el7.x86_64 +``` + +## 使用 systemtap 揪出杀死容器的真凶 + +Pod 莫名其妙被杀死? 可以使用 systemtap 来监视进程的信号发送,原理是 systemtap 将脚本翻译成 C 代码然后调用 gcc 编译成 linux 内核模块,再通过 `modprobe` 加载到内核,根据脚本内容在内核做各种 hook,在这里我们就 hook 一下信号的发送,找出是谁 kill 掉了容器进程。 + +首先,找到被杀死的 pod 又自动重启的容器的当前 pid,describe 一下 pod: + +```bash + ...... + Container ID: docker://5fb8adf9ee62afc6d3f6f3d9590041818750b392dff015d7091eaaf99cf1c945 + ...... + Last State: Terminated + Reason: Error + Exit Code: 137 + Started: Thu, 05 Sep 2019 19:22:30 +0800 + Finished: Thu, 05 Sep 2019 19:33:44 +0800 +``` + +拿到容器 id 反查容器的主进程 pid: + +```bash +$ docker inspect -f "{{.State.Pid}}" 5fb8adf9ee62afc6d3f6f3d9590041818750b392dff015d7091eaaf99cf1c945 +7942 +``` + +通过 `Exit Code` 可以看出容器上次退出的状态码,如果进程是被外界中断信号杀死的,退出状态码将在 129-255 之间,137 表示进程是被 SIGKILL 信号杀死的,但我们从这里并不能看出是被谁杀死的。 + +如果问题可以复现,我们可以使用下面的 systemtap 脚本来监视容器是被谁杀死的\(保存为`sg.stp`\): + +```bash +global target_pid = 7942 +probe signal.send{ + if (sig_pid == target_pid) { + printf("%s(%d) send %s to %s(%d)\n", execname(), pid(), sig_name, pid_name, sig_pid); + printf("parent of sender: %s(%d)\n", pexecname(), ppid()) + printf("task_ancestry:%s\n", task_ancestry(pid2task(pid()), 1)); + } +} +``` + +* 变量 `pid` 的值替换为查到的容器主进程 pid + +运行脚本: + +```bash +stap sg.stp +``` + +当容器进程被杀死时,脚本捕捉到事件,执行输出: + +```text +pkill(23549) send SIGKILL to server(7942) +parent of sender: bash(23495) +task_ancestry:swapper/0(0m0.000000000s)=>systemd(0m0.080000000s)=>vGhyM0(19491m2.579563677s)=>sh(33473m38.074571885s)=>bash(33473m38.077072025s)=>bash(33473m38.081028267s)=>bash(33475m4.817798337s)=>pkill(33475m5.202486630s) +``` + +通过观察 `task_ancestry` 可以看到杀死进程的所有父进程,在这里可以看到有个叫 `vGhyM0` 的奇怪进程名,通常是中了木马,需要安全专家介入继续排查。 + diff --git a/content/troubleshooting/skill/wireshark.md b/content/troubleshooting/skill/wireshark.md new file mode 100644 index 0000000..aa085f2 --- /dev/null +++ b/content/troubleshooting/skill/wireshark.md @@ -0,0 +1,73 @@ +# 使用 wireshark 分析数据包 + +## 分析 DNS 异常 + +### 找出没有收到响应的 dns 请求 + +```txt +dns && (dns.flags.response == 0) && ! dns.response_in +``` + +### 根据 dns 请求 id 过滤 + +```txt +dns.id == 0xff0b +``` + +### 找出慢响应 + +超过 100 ms 的响应: + +```txt +dns.flags.rcode eq 0 and dns.time gt .1 +``` + +### 过滤 NXDomain 的响应 + +所有 `No such name` 的响应: + +```txt +dns.flags.rcode == 3 +``` + +排除集群内部 service: + +```txt +((dns.flags.rcode == 3) && !(dns.qry.name contains ".local") && !(dns.qry.name contains ".svc") && !(dns.qry.name contains ".cluster")) +``` + +指定某个外部域名: + +```txt +((dns.flags.rcode == 3) && (dns.qry.name == "imroc.cc") +``` + +## 分析 TCP 异常 + +### 找出连接超时的请求 + +客户端连接超时,如果不是因为 dns 解析超时,那就是因为 tcp 握手超时了,通常是服务端没响应 SYNACK 或响应太慢。 + +超时的时候客户端一般会发 RST 给服务端,过滤出握手超时的包: + +```txt +(tcp.flags.reset eq 1) and (tcp.flags.ack eq 0) +``` + +过滤出服务端握手时响应 SYNACK 慢的包: + +```txt +tcp.flags eq 0x012 && tcp.time_delta gt 0.0001 +``` + +还可以将 `Time since previous frame in this TCP stream` 添加为一列: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925152342.png) + +点击列名降序排列可查出慢包 (可加更多条件过滤调不需要希望展示的包): + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925152349.png) + +找出可疑包后使用 `Conversation Filter` 过滤出完整连接的完整会话内容: + +![](https://image-host-1251893006.cos.ap-chengdu.myqcloud.com/2023%2F09%2F25%2F20230925152358.png) diff --git a/content/troubleshooting/storage/setup-failed-for-volume.md b/content/troubleshooting/storage/setup-failed-for-volume.md new file mode 100644 index 0000000..b6f32d7 --- /dev/null +++ b/content/troubleshooting/storage/setup-failed-for-volume.md @@ -0,0 +1,22 @@ +# MountVolume.SetUp failed for volume + +## failed to sync secret cache: timed out waiting for the condition + +Pod 报如下的 warning 事件: + +```txt +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning FailedMount 41m kubelet MountVolume.SetUp failed for volume "default-token-bgg5p" : failed to sync secret cache: timed out waiting for the condition +``` + +如果只是偶现,很快自动恢复,这个是正常的,不必担心。通常是因为节点上 kubelet 调 apiserver 接口获取 configmap 或 secret 的内容时超时了,超时原因可能是: +1. 被 apiserver 限速 (节点上pod多,或同时启动很多pod,对apiserver发起的调用多,导致被临时限速了一下),一般很快可以自动恢复。 +2. 被 kubelet 限速 (默认单节点向apiserver发送读请求每秒上限5个,所有类型请求每秒上限10个)。 + ```txt + --kube-api-burst int32 Burst to use while talking with kubernetes apiserver. Doesn't cover events and node heartbeat apis which rate limiting is controlled by a different set of flags (default 10) + --kube-api-qps int32 QPS to use while talking with kubernetes apiserver. Doesn't cover events and node heartbeat apis which rate limiting is controlled by a different set of flags (default 5) + ``` + +如果是一直报这个,排查下 RBAC 设置。 \ No newline at end of file diff --git a/content/troubleshooting/storage/unable-to-mount-volumes.md b/content/troubleshooting/storage/unable-to-mount-volumes.md new file mode 100644 index 0000000..96061f4 --- /dev/null +++ b/content/troubleshooting/storage/unable-to-mount-volumes.md @@ -0,0 +1,67 @@ +# Unable to mount volumes + +## 问题现象 + +Pod 一直 Pending,有类似如下 Warning 事件: + +```txt +Unable to mount volumes for pod "es-0_prod(0f08e3aa-aa56-11ec-ab5b-5254006900dd)": timeout expired waiting for volumes to attach or mount for pod "prod"/"es-0". list of unmounted volumes=[applog]. list of unattached volumes=[applog default-token-m7bf7] +``` + +## 快速排查 + +首选根据 Pod 事件日志的提示进行快速排查,观察下 Pod 事件日志,除了 `Unable to mount volumes` 是否还有其它相关日志。 + +### MountVolume.WaitForAttach failed + +Pod 报类似如下事件日志: + +```txt +MountVolume.WaitForAttach failed for volume "pvc-067327ac-00ec-11ec-bdce-5254001a6990" : Could not find attached disk("disk-68i8q1gq"). Timeout waiting for mount paths to be created. +``` + +说明磁盘正在等待被 attach 到节点上,这个操作通常是云厂商的 provisioner 组件去调用磁盘相关 API 来实现的 (挂载磁盘到虚拟机)。可以检查下节点是否有被挂载该磁盘,云上一般是在云控制台查看云服务器的磁盘挂载情况。 + +出现这种一般是没有挂载上,可以先自查一下是否遇上了这种场景: +1. pod 原地重启,detach 磁盘时超时。 +2. 容器原地快速重启,controller-manager 误以为已经 attach 就没调用 CSI 去 attach 磁盘,并标记 node 为 attached,记录到 node status 里。 +3. kubelet watch 到 node 已经 attach 了,取出磁盘信息准备拿来 mount,但是发现对应盘符找不到,最后报错。 + +如果是,只有重建 pod 使其调度到其它节点上,重新挂载。 + +如果不是,有可能是 CSI 插件本身的问题,可以反馈给相关技术人员。 + +## 排查思路 + +如果无法通过事件快速排查,可以尝试从头开始一步步查,这里分享排查思路。 + +1. 查看 pod 定义,看看有哪些 volume: + ```bash + kubectl get pod $POD_NAME -o jsonpath='{.spec.volumes}' | jq + ``` + ```json + [ + { + "name": "applog", + "persistentVolumeClaim": { + "claimName": "applog-es-0" + } + }, + { + "name": "default-token-m7bf7", + "secret": { + "defaultMode": 420, + "secretName": "default-token-m7bf7" + } + } + ] + ``` +2. 检查事件中 `list of unmounted volumes` 对应的 volume 是哪个,通常是对应一个 pvc,拿到对应 pvc 名称。 +3. 检查 pvc 状态是否为 `Bound`: + ```bash + $ kubectl get pvc applog-es-0 + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE + applog-es-0 Bound pvc-067327ac-00ec-11ec-bdce-5254001a6990 100Gi RWO cbs-stata 215d + ``` +4. 如果没有 `Bound`,说明还没有可用的 PV 能绑定,如果 PV 是手动创建,需要创建下,如果是用 `StorageClass` 自动创建而没有创建出来,可以 describe 一下 pvc,看下事件日志的提示,应该就可能看出原因,如果还不能看出来,就看下对应 provisoner 组件的日志。 +5. 如果是 `Bound` 状态,说明存储已经准备好了,出问题的是挂载,要么是 attach 失败,要么是 mount 失败。如果是 attach 失败,可以结合 `controller-manager` 和 CSI 插件相关日志来分析,如果是 mount 失败,可以排查下 kubelet 的日志。 diff --git a/docusaurus.config.js b/docusaurus.config.js new file mode 100644 index 0000000..a6bbc82 --- /dev/null +++ b/docusaurus.config.js @@ -0,0 +1,160 @@ +// @ts-check +// `@type` JSDoc annotations allow editor autocompletion and type checking +// (when paired with `@ts-check`). +// There are various equivalent ways to declare your Docusaurus config. +// See: https://docusaurus.io/docs/api/docusaurus-config + +/** @type {import('@docusaurus/types').Config} */ +const config = { + title: 'Kubernetes 实践指南', // 网站标题 + tagline: '云原生老司机带你飞', // slogan + favicon: 'img/logo.svg', // 电子书 favicon 文件,注意替换 + + url: 'https://imroc.cc', // 在线电子书的 url + baseUrl: '/kerbernetes', // 在线电子书所在 url 的路径,如果没有子路径,可改为 "/" + + // GitHub pages deployment config. + // If you aren't using GitHub pages, you don't need these. + organizationName: 'imroc', // GitHub 的 org/user 名称 + projectName: 'kubernetes-guide', // Github repo 名称 + + onBrokenLinks: 'warn', // 避免路径引用错误导致编译失败 + onBrokenMarkdownLinks: 'warn', + + i18n: { + // 默认语言用中文 + defaultLocale: 'zh-CN', + // 不需要多语言支持的话,就只填中文 + locales: ['zh-CN'], + }, + + plugins: [ + 'docusaurus-plugin-sass', // 启用 sass 插件,支持 scss + [ + /** @type {import('@docusaurus/plugin-content-docs').PluginOptions} */ + '@docusaurus/plugin-content-docs', + ({ + id: 'kubernetes', + path: 'content', + // 文档的路由前缀 + routeBasePath: '/', + // 左侧导航栏的配置 + sidebarPath: require.resolve('./content/sidebars.js'), + // 每个文档左下角 "编辑此页" 的链接 + editUrl: ({ docPath }) => + `https://github.com/imroc/kubernetes-guide/edit/master/content/${docPath}`, + }), + ], + ], + + presets: [ + [ + 'classic', + /** @type {import('@docusaurus/preset-classic').Options} */ + ({ + docs: false, // 禁用 preset 默认的 docs,直接用 plugin-content-docs 配置可以更灵活。 + blog: false, // 禁用博客 + theme: { + customCss: require.resolve('./src/css/custom.scss'), // custom.css 重命名为 custom.scss + }, + }), + ], + ], + + themeConfig: + /** @type {import('@docusaurus/preset-classic').ThemeConfig} */ + ({ + // algolia 搜索功能 + // algolia: { + // appId: 'ZYXQ3PWXL9', + // apiKey: '69800e3da186c48e981785d47ee09e6e', + // indexName: 'imroc', + // contextualSearch: true, + // }, + // giscus 评论功能 + giscus: { + repo: 'imroc/kubernetes-guide', + repoId: 'R_kgDOG-4vhA', + category: 'General', + categoryId: 'DIC_kwDOG-4vhM4COPpN', + }, + navbar: { + title: 'Kuberntes 实践指南', // 左上角的电子书名称 + logo: { + alt: 'Kubernetes', + src: 'img/logo.svg', // 电子书 logo 文件,注意替换 + }, + items: [ + { + href: 'https://github.com/imroc/kubenretes-guide', // 改成自己的仓库地址 + label: 'GitHub', + position: 'right', + }, + ], + }, + // 自定义页脚 + footer: { + style: 'dark', + links: [ + { + title: '相关电子书', + items: [ + { + label: 'istio 实践指南', + href: 'https://imroc.cc/istio', + }, + ], + }, + { + title: '更多', + items: [ + { + label: 'roc 云原生', + href: 'https://imroc.cc', + }, + { + label: 'GitHub', + href: 'https://github.com/imroc/kubernetes-guide', + }, + ], + }, + ], + copyright: `Copyright © ${new Date().getFullYear()} My Project, Inc. Built with Docusaurus.`, + }, + // 自定义代码高亮 + prism: { + theme: require('prism-react-renderer/themes/vsDark'), + magicComments: [ + { + className: 'code-block-highlighted-line', + line: 'highlight-next-line', + block: { start: 'highlight-start', end: 'highlight-end' } + }, + { + className: 'code-block-add-line', + line: 'highlight-add-line', + block: { start: 'highlight-add-start', end: 'highlight-add-end' } + }, + { + className: 'code-block-update-line', + line: 'highlight-update-line', + block: { start: 'highlight-update-start', end: 'highlight-update-end' } + }, + { + className: 'code-block-error-line', + line: 'highlight-error-line', + block: { start: 'highlight-error-start', end: 'highlight-error-end' } + }, + ], + // languages enabled by default: https://github.com/FormidableLabs/prism-react-renderer/blob/master/packages/generate-prism-languages/index.ts#L9-L23 + // prism supported languages: https://prismjs.com/#supported-languages + additionalLanguages: [ + 'java', + 'json', + 'hcl', + ], + }, + }), +}; + +module.exports = config; diff --git a/package-lock.json b/package-lock.json new file mode 100644 index 0000000..1b2aa2b --- /dev/null +++ b/package-lock.json @@ -0,0 +1,14055 @@ +{ + "name": "kubernetes-guide", + "version": "0.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "kubernetes-guide", + "version": "0.0.0", + "dependencies": { + "@docusaurus/core": "3.0.0-beta.0", + "@docusaurus/preset-classic": "3.0.0-beta.0", + "@giscus/react": "^2.3.0", + "@mdx-js/react": "^2.3.0", + "clsx": "^1.2.1", + "docusaurus-plugin-sass": "^0.2.5", + "path-browserify": "^1.0.1", + "prism-react-renderer": "^1.3.5", + "raw-loader": "^4.0.2", + "react": "^18.0.0", + "react-dom": "^18.0.0", + "sass": "^1.69.3" + }, + "devDependencies": { + "@docusaurus/module-type-aliases": "3.0.0-beta.0", + "@docusaurus/tsconfig": "3.0.0-beta.0", + "typescript": "~5.2.2" + }, + "engines": { + "node": ">=16.14" + } + }, + "node_modules/@algolia/autocomplete-core": { + "version": "1.8.2", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-core/-/autocomplete-core-1.8.2.tgz", + "integrity": "sha512-mTeshsyFhAqw/ebqNsQpMtbnjr+qVOSKXArEj4K0d7sqc8It1XD0gkASwecm9mF/jlOQ4Z9RNg1HbdA8JPdRwQ==", + "dependencies": { + "@algolia/autocomplete-shared": "1.8.2" + } + }, + "node_modules/@algolia/autocomplete-preset-algolia": { + "version": "1.8.2", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.8.2.tgz", + "integrity": "sha512-J0oTx4me6ZM9kIKPuL3lyU3aB8DEvpVvR6xWmHVROx5rOYJGQcZsdG4ozxwcOyiiu3qxMkIbzntnV1S1VWD8yA==", + "dependencies": { + "@algolia/autocomplete-shared": "1.8.2" + }, + "peerDependencies": { + "@algolia/client-search": ">= 4.9.1 < 6", + "algoliasearch": ">= 4.9.1 < 6" + } + }, + "node_modules/@algolia/autocomplete-shared": { + "version": "1.8.2", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-shared/-/autocomplete-shared-1.8.2.tgz", + "integrity": "sha512-b6Z/X4MczChMcfhk6kfRmBzPgjoPzuS9KGR4AFsiLulLNRAAqhP+xZTKtMnZGhLuc61I20d5WqlId02AZvcO6g==" + }, + "node_modules/@algolia/cache-browser-local-storage": { + "version": "4.20.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.20.0.tgz", + "integrity": "sha512-uujahcBt4DxduBTvYdwO3sBfHuJvJokiC3BP1+O70fglmE1ShkH8lpXqZBac1rrU3FnNYSUs4pL9lBdTKeRPOQ==", + "dependencies": { + "@algolia/cache-common": "4.20.0" + } + }, + "node_modules/@algolia/cache-common": { + "version": "4.20.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.20.0.tgz", + "integrity": "sha512-vCfxauaZutL3NImzB2G9LjLt36vKAckc6DhMp05An14kVo8F1Yofb6SIl6U3SaEz8pG2QOB9ptwM5c+zGevwIQ==" + }, + "node_modules/@algolia/cache-in-memory": { + "version": "4.20.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.20.0.tgz", + "integrity": "sha512-Wm9ak/IaacAZXS4mB3+qF/KCoVSBV6aLgIGFEtQtJwjv64g4ePMapORGmCyulCFwfePaRAtcaTbMcJF+voc/bg==", + "dependencies": { + "@algolia/cache-common": "4.20.0" + } + }, + "node_modules/@algolia/client-account": { + "version": "4.20.0", + "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.20.0.tgz", + "integrity": "sha512-GGToLQvrwo7am4zVkZTnKa72pheQeez/16sURDWm7Seyz+HUxKi3BM6fthVVPUEBhtJ0reyVtuK9ArmnaKl10Q==", + "dependencies": { + "@algolia/client-common": "4.20.0", + "@algolia/client-search": "4.20.0", + "@algolia/transporter": "4.20.0" + } + }, + "node_modules/@algolia/client-analytics": { + "version": "4.20.0", + "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.20.0.tgz", + "integrity": "sha512-EIr+PdFMOallRdBTHHdKI3CstslgLORQG7844Mq84ib5oVFRVASuuPmG4bXBgiDbcsMLUeOC6zRVJhv1KWI0ug==", + "dependencies": { + "@algolia/client-common": "4.20.0", + "@algolia/client-search": "4.20.0", + "@algolia/requester-common": "4.20.0", + "@algolia/transporter": "4.20.0" + } + }, + "node_modules/@algolia/client-common": { + "version": "4.20.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.20.0.tgz", + "integrity": "sha512-P3WgMdEss915p+knMMSd/fwiHRHKvDu4DYRrCRaBrsfFw7EQHon+EbRSm4QisS9NYdxbS04kcvNoavVGthyfqQ==", + "dependencies": { + "@algolia/requester-common": "4.20.0", + "@algolia/transporter": "4.20.0" + } + }, + "node_modules/@algolia/client-personalization": { + "version": "4.20.0", + "resolved": "https://registry.npmjs.org/@algolia/client-personalization/-/client-personalization-4.20.0.tgz", + "integrity": "sha512-N9+zx0tWOQsLc3K4PVRDV8GUeOLAY0i445En79Pr3zWB+m67V+n/8w4Kw1C5LlbHDDJcyhMMIlqezh6BEk7xAQ==", + "dependencies": { + "@algolia/client-common": "4.20.0", + "@algolia/requester-common": "4.20.0", + "@algolia/transporter": "4.20.0" + } + }, + "node_modules/@algolia/client-search": { + "version": "4.20.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.20.0.tgz", + "integrity": "sha512-zgwqnMvhWLdpzKTpd3sGmMlr4c+iS7eyyLGiaO51zDZWGMkpgoNVmltkzdBwxOVXz0RsFMznIxB9zuarUv4TZg==", + "dependencies": { + "@algolia/client-common": "4.20.0", + "@algolia/requester-common": "4.20.0", + "@algolia/transporter": "4.20.0" + } + }, + "node_modules/@algolia/events": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@algolia/events/-/events-4.0.1.tgz", + "integrity": "sha512-FQzvOCgoFXAbf5Y6mYozw2aj5KCJoA3m4heImceldzPSMbdyS4atVjJzXKMsfX3wnZTFYwkkt8/z8UesLHlSBQ==" + }, + "node_modules/@algolia/logger-common": { + "version": "4.20.0", + "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.20.0.tgz", + "integrity": "sha512-xouigCMB5WJYEwvoWW5XDv7Z9f0A8VoXJc3VKwlHJw/je+3p2RcDXfksLI4G4lIVncFUYMZx30tP/rsdlvvzHQ==" + }, + "node_modules/@algolia/logger-console": { + "version": "4.20.0", + "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.20.0.tgz", + "integrity": "sha512-THlIGG1g/FS63z0StQqDhT6bprUczBI8wnLT3JWvfAQDZX5P6fCg7dG+pIrUBpDIHGszgkqYEqECaKKsdNKOUA==", + "dependencies": { + "@algolia/logger-common": "4.20.0" + } + }, + "node_modules/@algolia/requester-browser-xhr": { + "version": "4.20.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.20.0.tgz", + "integrity": "sha512-HbzoSjcjuUmYOkcHECkVTwAelmvTlgs48N6Owt4FnTOQdwn0b8pdht9eMgishvk8+F8bal354nhx/xOoTfwiAw==", + "dependencies": { + "@algolia/requester-common": "4.20.0" + } + }, + "node_modules/@algolia/requester-common": { + "version": "4.20.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.20.0.tgz", + "integrity": "sha512-9h6ye6RY/BkfmeJp7Z8gyyeMrmmWsMOCRBXQDs4mZKKsyVlfIVICpcSibbeYcuUdurLhIlrOUkH3rQEgZzonng==" + }, + "node_modules/@algolia/requester-node-http": { + "version": "4.20.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.20.0.tgz", + "integrity": "sha512-ocJ66L60ABSSTRFnCHIEZpNHv6qTxsBwJEPfYaSBsLQodm0F9ptvalFkHMpvj5DfE22oZrcrLbOYM2bdPJRHng==", + "dependencies": { + "@algolia/requester-common": "4.20.0" + } + }, + "node_modules/@algolia/transporter": { + "version": "4.20.0", + "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.20.0.tgz", + "integrity": "sha512-Lsii1pGWOAISbzeyuf+r/GPhvHMPHSPrTDWNcIzOE1SG1inlJHICaVe2ikuoRjcpgxZNU54Jl+if15SUCsaTUg==", + "dependencies": { + "@algolia/cache-common": "4.20.0", + "@algolia/logger-common": "4.20.0", + "@algolia/requester-common": "4.20.0" + } + }, + "node_modules/@ampproject/remapping": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz", + "integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.0", + "@jridgewell/trace-mapping": "^0.3.9" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.22.13", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", + "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", + "dependencies": { + "@babel/highlight": "^7.22.13", + "chalk": "^2.4.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/code-frame/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/@babel/code-frame/node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" + }, + "node_modules/@babel/code-frame/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@babel/code-frame/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.23.2", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.23.2.tgz", + "integrity": "sha512-0S9TQMmDHlqAZ2ITT95irXKfxN9bncq8ZCoJhun3nHL/lLUxd2NKBJYoNGWH7S0hz6fRQwWlAWn/ILM0C70KZQ==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.23.2", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.23.2.tgz", + "integrity": "sha512-n7s51eWdaWZ3vGT2tD4T7J6eJs3QoBXydv7vkUM06Bf1cbVD2Kc2UrkzhiQwobfV7NwOnQXYL7UBJ5VPU+RGoQ==", + "dependencies": { + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.22.13", + "@babel/generator": "^7.23.0", + "@babel/helper-compilation-targets": "^7.22.15", + "@babel/helper-module-transforms": "^7.23.0", + "@babel/helpers": "^7.23.2", + "@babel/parser": "^7.23.0", + "@babel/template": "^7.22.15", + "@babel/traverse": "^7.23.2", + "@babel/types": "^7.23.0", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/core/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/generator": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.0.tgz", + "integrity": "sha512-lN85QRR+5IbYrMWM6Y4pE/noaQtg4pNiqeNGX60eqOfo6gtEj6uw/JagelB8vVztSd7R6M5n1+PQkDbHbBRU4g==", + "dependencies": { + "@babel/types": "^7.23.0", + "@jridgewell/gen-mapping": "^0.3.2", + "@jridgewell/trace-mapping": "^0.3.17", + "jsesc": "^2.5.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-annotate-as-pure": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.22.5.tgz", + "integrity": "sha512-LvBTxu8bQSQkcyKOU+a1btnNFQ1dMAd0R6PyW3arXes06F6QLWLIrd681bxRPIXlrMGR3XYnW9JyML7dP3qgxg==", + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.22.15.tgz", + "integrity": "sha512-QkBXwGgaoC2GtGZRoma6kv7Szfv06khvhFav67ZExau2RaXzy8MpHSMO2PNoP2XtmQphJQRHFfg77Bq731Yizw==", + "dependencies": { + "@babel/types": "^7.22.15" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.15.tgz", + "integrity": "sha512-y6EEzULok0Qvz8yyLkCvVX+02ic+By2UdOhylwUOvOn9dvYc9mKICJuuU1n1XBI02YWsNsnrY1kc6DVbjcXbtw==", + "dependencies": { + "@babel/compat-data": "^7.22.9", + "@babel/helper-validator-option": "^7.22.15", + "browserslist": "^4.21.9", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-create-class-features-plugin": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.22.15.tgz", + "integrity": "sha512-jKkwA59IXcvSaiK2UN45kKwSC9o+KuoXsBDvHvU/7BecYIp8GQ2UwrVvFgJASUT+hBnwJx6MhvMCuMzwZZ7jlg==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-environment-visitor": "^7.22.5", + "@babel/helper-function-name": "^7.22.5", + "@babel/helper-member-expression-to-functions": "^7.22.15", + "@babel/helper-optimise-call-expression": "^7.22.5", + "@babel/helper-replace-supers": "^7.22.9", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-create-class-features-plugin/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-create-regexp-features-plugin": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.22.15.tgz", + "integrity": "sha512-29FkPLFjn4TPEa3RE7GpW+qbE8tlsu3jntNYNfcGsc49LphF1PQIiD+vMZ1z1xVOKt+93khA9tc2JBs3kBjA7w==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "regexpu-core": "^5.3.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-create-regexp-features-plugin/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-define-polyfill-provider": { + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.4.3.tgz", + "integrity": "sha512-WBrLmuPP47n7PNwsZ57pqam6G/RGo1vw/87b0Blc53tZNGZ4x7YvZ6HgQe2vo1W/FR20OgjeZuGXzudPiXHFug==", + "dependencies": { + "@babel/helper-compilation-targets": "^7.22.6", + "@babel/helper-plugin-utils": "^7.22.5", + "debug": "^4.1.1", + "lodash.debounce": "^4.0.8", + "resolve": "^1.14.2" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/helper-environment-visitor": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", + "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-function-name": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", + "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", + "dependencies": { + "@babel/template": "^7.22.15", + "@babel/types": "^7.23.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-hoist-variables": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", + "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-member-expression-to-functions": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.23.0.tgz", + "integrity": "sha512-6gfrPwh7OuT6gZyJZvd6WbTfrqAo7vm4xCzAXOusKqq/vWdKXphTpj5klHKNmRUU6/QRGlBsyU9mAIPaWHlqJA==", + "dependencies": { + "@babel/types": "^7.23.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.15.tgz", + "integrity": "sha512-0pYVBnDKZO2fnSPCrgM/6WMc7eS20Fbok+0r88fp+YtWVLZrp4CkafFGIp+W0VKw4a22sgebPT99y+FDNMdP4w==", + "dependencies": { + "@babel/types": "^7.22.15" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.23.0.tgz", + "integrity": "sha512-WhDWw1tdrlT0gMgUJSlX0IQvoO1eN279zrAUbVB+KpV2c3Tylz8+GnKOLllCS6Z/iZQEyVYxhZVUdPTqs2YYPw==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-module-imports": "^7.22.15", + "@babel/helper-simple-access": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/helper-validator-identifier": "^7.22.20" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-optimise-call-expression": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.22.5.tgz", + "integrity": "sha512-HBwaojN0xFRx4yIvpwGqxiV2tUfl7401jlok564NgB9EHS1y6QT17FmKWm4ztqjeVdXLuC4fSvHc5ePpQjoTbw==", + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz", + "integrity": "sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-remap-async-to-generator": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.22.20.tgz", + "integrity": "sha512-pBGyV4uBqOns+0UvhsTO8qgl8hO89PmiDYv+/COyp1aeMcmfrfruz+/nCMFiYyFF/Knn0yfrC85ZzNFjembFTw==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-wrap-function": "^7.22.20" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-replace-supers": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.22.20.tgz", + "integrity": "sha512-qsW0In3dbwQUbK8kejJ4R7IHVGwHJlV6lpG6UA7a9hSa2YEiAib+N1T2kr6PEeUT+Fl7najmSOS6SmAwCHK6Tw==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-member-expression-to-functions": "^7.22.15", + "@babel/helper-optimise-call-expression": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-simple-access": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz", + "integrity": "sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==", + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-skip-transparent-expression-wrappers": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.22.5.tgz", + "integrity": "sha512-tK14r66JZKiC43p8Ki33yLBVJKlQDFoA8GYN67lWCDCqoL6EMMSuM9b+Iff2jHaM/RRFYl7K+iiru7hbRqNx8Q==", + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-split-export-declaration": { + "version": "7.22.6", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", + "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz", + "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", + "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.22.15.tgz", + "integrity": "sha512-bMn7RmyFjY/mdECUbgn9eoSY4vqvacUnS9i9vGAGttgFWesO6B4CYWA7XlpbWgBt71iv/hfbPlynohStqnu5hA==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-wrap-function": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.22.20.tgz", + "integrity": "sha512-pms/UwkOpnQe/PDAEdV/d7dVCoBbB+R4FvYoHGZz+4VPcg7RtYy2KP7S2lbuWM6FCSgob5wshfGESbC/hzNXZw==", + "dependencies": { + "@babel/helper-function-name": "^7.22.5", + "@babel/template": "^7.22.15", + "@babel/types": "^7.22.19" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.23.2", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.23.2.tgz", + "integrity": "sha512-lzchcp8SjTSVe/fPmLwtWVBFC7+Tbn8LGHDVfDp9JGxpAY5opSaEFgt8UQvrnECWOTdji2mOWMz1rOhkHscmGQ==", + "dependencies": { + "@babel/template": "^7.22.15", + "@babel/traverse": "^7.23.2", + "@babel/types": "^7.23.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/highlight": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz", + "integrity": "sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==", + "dependencies": { + "@babel/helper-validator-identifier": "^7.22.20", + "chalk": "^2.4.2", + "js-tokens": "^4.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/highlight/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/@babel/highlight/node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" + }, + "node_modules/@babel/highlight/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@babel/highlight/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/parser": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.0.tgz", + "integrity": "sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==", + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.22.15.tgz", + "integrity": "sha512-FB9iYlz7rURmRJyXRKEnalYPPdn87H5no108cyuQQyMwlpJ2SJtpIUBI27kdTin956pz+LPypkPVPUTlxOmrsg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.22.15.tgz", + "integrity": "sha512-Hyph9LseGvAeeXzikV88bczhsrLrIZqDPxO+sSmAunMPaGrBGhfMWzCPYTtiW9t+HzSE2wtV8e5cc5P6r1xMDQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", + "@babel/plugin-transform-optional-chaining": "^7.22.15" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.13.0" + } + }, + "node_modules/@babel/plugin-proposal-private-property-in-object": { + "version": "7.21.0-placeholder-for-preset-env.2", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz", + "integrity": "sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==", + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-async-generators": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", + "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-properties": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", + "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.12.13" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-static-block": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", + "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-dynamic-import": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", + "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-export-namespace-from": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz", + "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.3" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-assertions": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.22.5.tgz", + "integrity": "sha512-rdV97N7KqsRzeNGoWUOK6yUsWarLjE5Su/Snk9IYPU9CwkWHs4t+rTGOvffTR8XGkJMTAdLfO0xVnXm8wugIJg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.22.5.tgz", + "integrity": "sha512-KwvoWDeNKPETmozyFE0P2rOLqh39EoQHNjqizrI5B8Vt0ZNS7M56s7dAiAqbYfiAYOuIzIh96z3iR2ktgu3tEg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-meta": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", + "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-json-strings": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", + "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.22.5.tgz", + "integrity": "sha512-gvyP4hZrgrs/wWMaocvxZ44Hw0b3W8Pe+cMxc8V1ULQ07oh8VNbIRaoD1LRZVTvD+0nieDKjfgKg89sD7rrKrg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", + "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-numeric-separator": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", + "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", + "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", + "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-private-property-in-object": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", + "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-top-level-await": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", + "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.22.5.tgz", + "integrity": "sha512-1mS2o03i7t1c6VzH6fdQ3OA8tcEIxwG18zIPRp+UY1Ihv6W+XZzBCVxExF9upussPXJ0xE9XRHwMoNs1ep/nRQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-unicode-sets-regex": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz", + "integrity": "sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-arrow-functions": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.22.5.tgz", + "integrity": "sha512-26lTNXoVRdAnsaDXPpvCNUq+OVWEVC6bx7Vvz9rC53F2bagUWW4u4ii2+h8Fejfh7RYqPxn+libeFBBck9muEw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-generator-functions": { + "version": "7.23.2", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.23.2.tgz", + "integrity": "sha512-BBYVGxbDVHfoeXbOwcagAkOQAm9NxoTdMGfTqghu1GrvadSaw6iW3Je6IcL5PNOw8VwjxqBECXy50/iCQSY/lQ==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-remap-async-to-generator": "^7.22.20", + "@babel/plugin-syntax-async-generators": "^7.8.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-to-generator": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.22.5.tgz", + "integrity": "sha512-b1A8D8ZzE/VhNDoV1MSJTnpKkCG5bJo+19R4o4oy03zM7ws8yEMK755j61Dc3EyvdysbqH5BOOTquJ7ZX9C6vQ==", + "dependencies": { + "@babel/helper-module-imports": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-remap-async-to-generator": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-block-scoped-functions": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.22.5.tgz", + "integrity": "sha512-tdXZ2UdknEKQWKJP1KMNmuF5Lx3MymtMN/pvA+p/VEkhK8jVcQ1fzSy8KM9qRYhAf2/lV33hoMPKI/xaI9sADA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-block-scoping": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.23.0.tgz", + "integrity": "sha512-cOsrbmIOXmf+5YbL99/S49Y3j46k/T16b9ml8bm9lP6N9US5iQ2yBK7gpui1pg0V/WMcXdkfKbTb7HXq9u+v4g==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-properties": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.22.5.tgz", + "integrity": "sha512-nDkQ0NfkOhPTq8YCLiWNxp1+f9fCobEjCb0n8WdbNUBc4IB5V7P1QnX9IjpSoquKrXF5SKojHleVNs2vGeHCHQ==", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-static-block": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.22.11.tgz", + "integrity": "sha512-GMM8gGmqI7guS/llMFk1bJDkKfn3v3C4KHK9Yg1ey5qcHcOlKb0QvcMrgzvxo+T03/4szNh5lghY+fEC98Kq9g==", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.22.11", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-class-static-block": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.12.0" + } + }, + "node_modules/@babel/plugin-transform-classes": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.22.15.tgz", + "integrity": "sha512-VbbC3PGjBdE0wAWDdHM9G8Gm977pnYI0XpqMd6LrKISj8/DJXEsWqgRuTYaNE9Bv0JGhTZUzHDlMk18IpOuoqw==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-compilation-targets": "^7.22.15", + "@babel/helper-environment-visitor": "^7.22.5", + "@babel/helper-function-name": "^7.22.5", + "@babel/helper-optimise-call-expression": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-replace-supers": "^7.22.9", + "@babel/helper-split-export-declaration": "^7.22.6", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-computed-properties": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.22.5.tgz", + "integrity": "sha512-4GHWBgRf0krxPX+AaPtgBAlTgTeZmqDynokHOX7aqqAB4tHs3U2Y02zH6ETFdLZGcg9UQSD1WCmkVrE9ErHeOg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/template": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-destructuring": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.23.0.tgz", + "integrity": "sha512-vaMdgNXFkYrB+8lbgniSYWHsgqK5gjaMNcc84bMIOMRLH0L9AqYq3hwMdvnyqj1OPqea8UtjPEuS/DCenah1wg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-dotall-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.22.5.tgz", + "integrity": "sha512-5/Yk9QxCQCl+sOIB1WelKnVRxTJDSAIxtJLL2/pqL14ZVlbH0fUQUZa/T5/UnQtBNgghR7mfB8ERBKyKPCi7Vw==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-duplicate-keys": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.22.5.tgz", + "integrity": "sha512-dEnYD+9BBgld5VBXHnF/DbYGp3fqGMsyxKbtD1mDyIA7AkTSpKXFhCVuj/oQVOoALfBs77DudA0BE4d5mcpmqw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-dynamic-import": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.22.11.tgz", + "integrity": "sha512-g/21plo58sfteWjaO0ZNVb+uEOkJNjAaHhbejrnBmu011l/eNDScmkbjCC3l4FKb10ViaGU4aOkFznSu2zRHgA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-dynamic-import": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-exponentiation-operator": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.22.5.tgz", + "integrity": "sha512-vIpJFNM/FjZ4rh1myqIya9jXwrwwgFRHPjT3DkUA9ZLHuzox8jiXkOLvwm1H+PQIP3CqfC++WPKeuDi0Sjdj1g==", + "dependencies": { + "@babel/helper-builder-binary-assignment-operator-visitor": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-export-namespace-from": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.22.11.tgz", + "integrity": "sha512-xa7aad7q7OiT8oNZ1mU7NrISjlSkVdMbNxn9IuLZyL9AJEhs1Apba3I+u5riX1dIkdptP5EKDG5XDPByWxtehw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-for-of": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.22.15.tgz", + "integrity": "sha512-me6VGeHsx30+xh9fbDLLPi0J1HzmeIIyenoOQHuw2D4m2SAU3NrspX5XxJLBpqn5yrLzrlw2Iy3RA//Bx27iOA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-function-name": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.22.5.tgz", + "integrity": "sha512-UIzQNMS0p0HHiQm3oelztj+ECwFnj+ZRV4KnguvlsD2of1whUeM6o7wGNj6oLwcDoAXQ8gEqfgC24D+VdIcevg==", + "dependencies": { + "@babel/helper-compilation-targets": "^7.22.5", + "@babel/helper-function-name": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-json-strings": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.22.11.tgz", + "integrity": "sha512-CxT5tCqpA9/jXFlme9xIBCc5RPtdDq3JpkkhgHQqtDdiTnTI0jtZ0QzXhr5DILeYifDPp2wvY2ad+7+hLMW5Pw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-json-strings": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-literals": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.22.5.tgz", + "integrity": "sha512-fTLj4D79M+mepcw3dgFBTIDYpbcB9Sm0bpm4ppXPaO+U+PKFFyV9MGRvS0gvGw62sd10kT5lRMKXAADb9pWy8g==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-logical-assignment-operators": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.22.11.tgz", + "integrity": "sha512-qQwRTP4+6xFCDV5k7gZBF3C31K34ut0tbEcTKxlX/0KXxm9GLcO14p570aWxFvVzx6QAfPgq7gaeIHXJC8LswQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-member-expression-literals": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.22.5.tgz", + "integrity": "sha512-RZEdkNtzzYCFl9SE9ATaUMTj2hqMb4StarOJLrZRbqqU4HSBE7UlBw9WBWQiDzrJZJdUWiMTVDI6Gv/8DPvfew==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-amd": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.23.0.tgz", + "integrity": "sha512-xWT5gefv2HGSm4QHtgc1sYPbseOyf+FFDo2JbpE25GWl5BqTGO9IMwTYJRoIdjsF85GE+VegHxSCUt5EvoYTAw==", + "dependencies": { + "@babel/helper-module-transforms": "^7.23.0", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-commonjs": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.23.0.tgz", + "integrity": "sha512-32Xzss14/UVc7k9g775yMIvkVK8xwKE0DPdP5JTapr3+Z9w4tzeOuLNY6BXDQR6BdnzIlXnCGAzsk/ICHBLVWQ==", + "dependencies": { + "@babel/helper-module-transforms": "^7.23.0", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-simple-access": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-systemjs": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.23.0.tgz", + "integrity": "sha512-qBej6ctXZD2f+DhlOC9yO47yEYgUh5CZNz/aBoH4j/3NOlRfJXJbY7xDQCqQVf9KbrqGzIWER1f23doHGrIHFg==", + "dependencies": { + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-module-transforms": "^7.23.0", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.20" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-umd": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.22.5.tgz", + "integrity": "sha512-+S6kzefN/E1vkSsKx8kmQuqeQsvCKCd1fraCM7zXm4SFoggI099Tr4G8U81+5gtMdUeMQ4ipdQffbKLX0/7dBQ==", + "dependencies": { + "@babel/helper-module-transforms": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.22.5.tgz", + "integrity": "sha512-YgLLKmS3aUBhHaxp5hi1WJTgOUb/NCuDHzGT9z9WTt3YG+CPRhJs6nprbStx6DnWM4dh6gt7SU3sZodbZ08adQ==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-new-target": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.22.5.tgz", + "integrity": "sha512-AsF7K0Fx/cNKVyk3a+DW0JLo+Ua598/NxMRvxDnkpCIGFh43+h/v2xyhRUYf6oD8gE4QtL83C7zZVghMjHd+iw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-nullish-coalescing-operator": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.22.11.tgz", + "integrity": "sha512-YZWOw4HxXrotb5xsjMJUDlLgcDXSfO9eCmdl1bgW4+/lAGdkjaEvOnQ4p5WKKdUgSzO39dgPl0pTnfxm0OAXcg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-numeric-separator": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.22.11.tgz", + "integrity": "sha512-3dzU4QGPsILdJbASKhF/V2TVP+gJya1PsueQCxIPCEcerqF21oEcrob4mzjsp2Py/1nLfF5m+xYNMDpmA8vffg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-numeric-separator": "^7.10.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-object-rest-spread": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.22.15.tgz", + "integrity": "sha512-fEB+I1+gAmfAyxZcX1+ZUwLeAuuf8VIg67CTznZE0MqVFumWkh8xWtn58I4dxdVf080wn7gzWoF8vndOViJe9Q==", + "dependencies": { + "@babel/compat-data": "^7.22.9", + "@babel/helper-compilation-targets": "^7.22.15", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-transform-parameters": "^7.22.15" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-object-super": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.22.5.tgz", + "integrity": "sha512-klXqyaT9trSjIUrcsYIfETAzmOEZL3cBYqOYLJxBHfMFFggmXOv+NYSX/Jbs9mzMVESw/WycLFPRx8ba/b2Ipw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-replace-supers": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-optional-catch-binding": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.22.11.tgz", + "integrity": "sha512-rli0WxesXUeCJnMYhzAglEjLWVDF6ahb45HuprcmQuLidBJFWjNnOzssk2kuc6e33FlLaiZhG/kUIzUMWdBKaQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-optional-chaining": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.23.0.tgz", + "integrity": "sha512-sBBGXbLJjxTzLBF5rFWaikMnOGOk/BmK6vVByIdEggZ7Vn6CvWXZyRkkLFK6WE0IF8jSliyOkUN6SScFgzCM0g==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", + "@babel/plugin-syntax-optional-chaining": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-parameters": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.22.15.tgz", + "integrity": "sha512-hjk7qKIqhyzhhUvRT683TYQOFa/4cQKwQy7ALvTpODswN40MljzNDa0YldevS6tGbxwaEKVn502JmY0dP7qEtQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-private-methods": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.22.5.tgz", + "integrity": "sha512-PPjh4gyrQnGe97JTalgRGMuU4icsZFnWkzicB/fUtzlKUqvsWBKEpPPfr5a2JiyirZkHxnAqkQMO5Z5B2kK3fA==", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-private-property-in-object": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.22.11.tgz", + "integrity": "sha512-sSCbqZDBKHetvjSwpyWzhuHkmW5RummxJBVbYLkGkaiTOWGxml7SXt0iWa03bzxFIx7wOj3g/ILRd0RcJKBeSQ==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-create-class-features-plugin": "^7.22.11", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-property-literals": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.22.5.tgz", + "integrity": "sha512-TiOArgddK3mK/x1Qwf5hay2pxI6wCZnvQqrFSqbtg1GLl2JcNMitVH/YnqjP+M31pLUeTfzY1HAXFDnUBV30rQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-constant-elements": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.22.5.tgz", + "integrity": "sha512-BF5SXoO+nX3h5OhlN78XbbDrBOffv+AxPP2ENaJOVqjWCgBDeOY3WcaUcddutGSfoap+5NEQ/q/4I3WZIvgkXA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-display-name": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.22.5.tgz", + "integrity": "sha512-PVk3WPYudRF5z4GKMEYUrLjPl38fJSKNaEOkFuoprioowGuWN6w2RKznuFNSlJx7pzzXXStPUnNSOEO0jL5EVw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.22.15.tgz", + "integrity": "sha512-oKckg2eZFa8771O/5vi7XeTvmM6+O9cxZu+kanTU7tD4sin5nO/G8jGJhq8Hvt2Z0kUoEDRayuZLaUlYl8QuGA==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-module-imports": "^7.22.15", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-jsx": "^7.22.5", + "@babel/types": "^7.22.15" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-development": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.22.5.tgz", + "integrity": "sha512-bDhuzwWMuInwCYeDeMzyi7TaBgRQei6DqxhbyniL7/VG4RSS7HtSL2QbY4eESy1KJqlWt8g3xeEBGPuo+XqC8A==", + "dependencies": { + "@babel/plugin-transform-react-jsx": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-pure-annotations": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.22.5.tgz", + "integrity": "sha512-gP4k85wx09q+brArVinTXhWiyzLl9UpmGva0+mWyKxk6JZequ05x3eUcIUE+FyttPKJFRRVtAvQaJ6YF9h1ZpA==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-regenerator": { + "version": "7.22.10", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.22.10.tgz", + "integrity": "sha512-F28b1mDt8KcT5bUyJc/U9nwzw6cV+UmTeRlXYIl2TNqMMJif0Jeey9/RQ3C4NOd2zp0/TRsDns9ttj2L523rsw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "regenerator-transform": "^0.15.2" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-reserved-words": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.22.5.tgz", + "integrity": "sha512-DTtGKFRQUDm8svigJzZHzb/2xatPc6TzNvAIJ5GqOKDsGFYgAskjRulbR/vGsPKq3OPqtexnz327qYpP57RFyA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-runtime": { + "version": "7.23.2", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.23.2.tgz", + "integrity": "sha512-XOntj6icgzMS58jPVtQpiuF6ZFWxQiJavISGx5KGjRj+3gqZr8+N6Kx+N9BApWzgS+DOjIZfXXj0ZesenOWDyA==", + "dependencies": { + "@babel/helper-module-imports": "^7.22.15", + "@babel/helper-plugin-utils": "^7.22.5", + "babel-plugin-polyfill-corejs2": "^0.4.6", + "babel-plugin-polyfill-corejs3": "^0.8.5", + "babel-plugin-polyfill-regenerator": "^0.5.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-runtime/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/plugin-transform-shorthand-properties": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.22.5.tgz", + "integrity": "sha512-vM4fq9IXHscXVKzDv5itkO1X52SmdFBFcMIBZ2FRn2nqVYqw6dBexUgMvAjHW+KXpPPViD/Yo3GrDEBaRC0QYA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-spread": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.22.5.tgz", + "integrity": "sha512-5ZzDQIGyvN4w8+dMmpohL6MBo+l2G7tfC/O2Dg7/hjpgeWvUx8FzfeOKxGog9IimPa4YekaQ9PlDqTLOljkcxg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-sticky-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.22.5.tgz", + "integrity": "sha512-zf7LuNpHG0iEeiyCNwX4j3gDg1jgt1k3ZdXBKbZSoA3BbGQGvMiSvfbZRR3Dr3aeJe3ooWFZxOOG3IRStYp2Bw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-template-literals": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.22.5.tgz", + "integrity": "sha512-5ciOehRNf+EyUeewo8NkbQiUs4d6ZxiHo6BcBcnFlgiJfu16q0bQUw9Jvo0b0gBKFG1SMhDSjeKXSYuJLeFSMA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-typeof-symbol": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.22.5.tgz", + "integrity": "sha512-bYkI5lMzL4kPii4HHEEChkD0rkc+nvnlR6+o/qdqR6zrm0Sv/nodmyLhlq2DO0YKLUNd2VePmPRjJXSBh9OIdA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-typescript": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.22.15.tgz", + "integrity": "sha512-1uirS0TnijxvQLnlv5wQBwOX3E1wCFX7ITv+9pBV2wKEk4K+M5tqDaoNXnTH8tjEIYHLO98MwiTWO04Ggz4XuA==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-create-class-features-plugin": "^7.22.15", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-typescript": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-escapes": { + "version": "7.22.10", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.22.10.tgz", + "integrity": "sha512-lRfaRKGZCBqDlRU3UIFovdp9c9mEvlylmpod0/OatICsSfuQ9YFthRo1tpTkGsklEefZdqlEFdY4A2dwTb6ohg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-property-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.22.5.tgz", + "integrity": "sha512-HCCIb+CbJIAE6sXn5CjFQXMwkCClcOfPCzTlilJ8cUatfzwHlWQkbtV0zD338u9dZskwvuOYTuuaMaA8J5EI5A==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.22.5.tgz", + "integrity": "sha512-028laaOKptN5vHJf9/Arr/HiJekMd41hOEZYvNsrsXqJ7YPYuX2bQxh31fkZzGmq3YqHRJzYFFAVYvKfMPKqyg==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-sets-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.22.5.tgz", + "integrity": "sha512-lhMfi4FC15j13eKrh3DnYHjpGj6UKQHtNKTbtc1igvAhRy4+kLhV07OpLcsN0VgDEw/MjAvJO4BdMJsHwMhzCg==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/preset-env": { + "version": "7.23.2", + "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.23.2.tgz", + "integrity": "sha512-BW3gsuDD+rvHL2VO2SjAUNTBe5YrjsTiDyqamPDWY723na3/yPQ65X5oQkFVJZ0o50/2d+svm1rkPoJeR1KxVQ==", + "dependencies": { + "@babel/compat-data": "^7.23.2", + "@babel/helper-compilation-targets": "^7.22.15", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-validator-option": "^7.22.15", + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.22.15", + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.22.15", + "@babel/plugin-proposal-private-property-in-object": "7.21.0-placeholder-for-preset-env.2", + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-class-properties": "^7.12.13", + "@babel/plugin-syntax-class-static-block": "^7.14.5", + "@babel/plugin-syntax-dynamic-import": "^7.8.3", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3", + "@babel/plugin-syntax-import-assertions": "^7.22.5", + "@babel/plugin-syntax-import-attributes": "^7.22.5", + "@babel/plugin-syntax-import-meta": "^7.10.4", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.10.4", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5", + "@babel/plugin-syntax-top-level-await": "^7.14.5", + "@babel/plugin-syntax-unicode-sets-regex": "^7.18.6", + "@babel/plugin-transform-arrow-functions": "^7.22.5", + "@babel/plugin-transform-async-generator-functions": "^7.23.2", + "@babel/plugin-transform-async-to-generator": "^7.22.5", + "@babel/plugin-transform-block-scoped-functions": "^7.22.5", + "@babel/plugin-transform-block-scoping": "^7.23.0", + "@babel/plugin-transform-class-properties": "^7.22.5", + "@babel/plugin-transform-class-static-block": "^7.22.11", + "@babel/plugin-transform-classes": "^7.22.15", + "@babel/plugin-transform-computed-properties": "^7.22.5", + "@babel/plugin-transform-destructuring": "^7.23.0", + "@babel/plugin-transform-dotall-regex": "^7.22.5", + "@babel/plugin-transform-duplicate-keys": "^7.22.5", + "@babel/plugin-transform-dynamic-import": "^7.22.11", + "@babel/plugin-transform-exponentiation-operator": "^7.22.5", + "@babel/plugin-transform-export-namespace-from": "^7.22.11", + "@babel/plugin-transform-for-of": "^7.22.15", + "@babel/plugin-transform-function-name": "^7.22.5", + "@babel/plugin-transform-json-strings": "^7.22.11", + "@babel/plugin-transform-literals": "^7.22.5", + "@babel/plugin-transform-logical-assignment-operators": "^7.22.11", + "@babel/plugin-transform-member-expression-literals": "^7.22.5", + "@babel/plugin-transform-modules-amd": "^7.23.0", + "@babel/plugin-transform-modules-commonjs": "^7.23.0", + "@babel/plugin-transform-modules-systemjs": "^7.23.0", + "@babel/plugin-transform-modules-umd": "^7.22.5", + "@babel/plugin-transform-named-capturing-groups-regex": "^7.22.5", + "@babel/plugin-transform-new-target": "^7.22.5", + "@babel/plugin-transform-nullish-coalescing-operator": "^7.22.11", + "@babel/plugin-transform-numeric-separator": "^7.22.11", + "@babel/plugin-transform-object-rest-spread": "^7.22.15", + "@babel/plugin-transform-object-super": "^7.22.5", + "@babel/plugin-transform-optional-catch-binding": "^7.22.11", + "@babel/plugin-transform-optional-chaining": "^7.23.0", + "@babel/plugin-transform-parameters": "^7.22.15", + "@babel/plugin-transform-private-methods": "^7.22.5", + "@babel/plugin-transform-private-property-in-object": "^7.22.11", + "@babel/plugin-transform-property-literals": "^7.22.5", + "@babel/plugin-transform-regenerator": "^7.22.10", + "@babel/plugin-transform-reserved-words": "^7.22.5", + "@babel/plugin-transform-shorthand-properties": "^7.22.5", + "@babel/plugin-transform-spread": "^7.22.5", + "@babel/plugin-transform-sticky-regex": "^7.22.5", + "@babel/plugin-transform-template-literals": "^7.22.5", + "@babel/plugin-transform-typeof-symbol": "^7.22.5", + "@babel/plugin-transform-unicode-escapes": "^7.22.10", + "@babel/plugin-transform-unicode-property-regex": "^7.22.5", + "@babel/plugin-transform-unicode-regex": "^7.22.5", + "@babel/plugin-transform-unicode-sets-regex": "^7.22.5", + "@babel/preset-modules": "0.1.6-no-external-plugins", + "@babel/types": "^7.23.0", + "babel-plugin-polyfill-corejs2": "^0.4.6", + "babel-plugin-polyfill-corejs3": "^0.8.5", + "babel-plugin-polyfill-regenerator": "^0.5.3", + "core-js-compat": "^3.31.0", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-env/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/preset-modules": { + "version": "0.1.6-no-external-plugins", + "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.6-no-external-plugins.tgz", + "integrity": "sha512-HrcgcIESLm9aIR842yhJ5RWan/gebQUJ6E/E5+rf0y9o6oj7w0Br+sWuL6kEQ/o/AdfvR1Je9jG18/gnpwjEyA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@babel/types": "^7.4.4", + "esutils": "^2.0.2" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/preset-react": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/preset-react/-/preset-react-7.22.15.tgz", + "integrity": "sha512-Csy1IJ2uEh/PecCBXXoZGAZBeCATTuePzCSB7dLYWS0vOEj6CNpjxIhW4duWwZodBNueH7QO14WbGn8YyeuN9w==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-validator-option": "^7.22.15", + "@babel/plugin-transform-react-display-name": "^7.22.5", + "@babel/plugin-transform-react-jsx": "^7.22.15", + "@babel/plugin-transform-react-jsx-development": "^7.22.5", + "@babel/plugin-transform-react-pure-annotations": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-typescript": { + "version": "7.23.2", + "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.23.2.tgz", + "integrity": "sha512-u4UJc1XsS1GhIGteM8rnGiIvf9rJpiVgMEeCnwlLA7WJPC+jcXWJAGxYmeqs5hOZD8BbAfnV5ezBOxQbb4OUxA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-validator-option": "^7.22.15", + "@babel/plugin-syntax-jsx": "^7.22.5", + "@babel/plugin-transform-modules-commonjs": "^7.23.0", + "@babel/plugin-transform-typescript": "^7.22.15" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/regjsgen": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/@babel/regjsgen/-/regjsgen-0.8.0.tgz", + "integrity": "sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==" + }, + "node_modules/@babel/runtime": { + "version": "7.23.2", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.23.2.tgz", + "integrity": "sha512-mM8eg4yl5D6i3lu2QKPuPH4FArvJ8KhTofbE7jwMUv9KX5mBvwPAqnV3MlyBNqdp9RyRKP6Yck8TrfYrPvX3bg==", + "dependencies": { + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/runtime-corejs3": { + "version": "7.23.2", + "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.23.2.tgz", + "integrity": "sha512-54cIh74Z1rp4oIjsHjqN+WM4fMyCBYe+LpZ9jWm51CZ1fbH3SkAzQD/3XLoNkjbJ7YEmjobLXyvQrFypRHOrXw==", + "dependencies": { + "core-js-pure": "^3.30.2", + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/template": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz", + "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==", + "dependencies": { + "@babel/code-frame": "^7.22.13", + "@babel/parser": "^7.22.15", + "@babel/types": "^7.22.15" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.23.2", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.2.tgz", + "integrity": "sha512-azpe59SQ48qG6nu2CzcMLbxUudtN+dOM9kDbUqGq3HXUJRlo7i8fvPoxQUzYgLZ4cMVmuZgm8vvBpNeRhd6XSw==", + "dependencies": { + "@babel/code-frame": "^7.22.13", + "@babel/generator": "^7.23.0", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/parser": "^7.23.0", + "@babel/types": "^7.23.0", + "debug": "^4.1.0", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.0.tgz", + "integrity": "sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==", + "dependencies": { + "@babel/helper-string-parser": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.20", + "to-fast-properties": "^2.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@colors/colors": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", + "integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==", + "optional": true, + "engines": { + "node": ">=0.1.90" + } + }, + "node_modules/@discoveryjs/json-ext": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz", + "integrity": "sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/@docsearch/css": { + "version": "3.3.5", + "resolved": "https://registry.npmjs.org/@docsearch/css/-/css-3.3.5.tgz", + "integrity": "sha512-NaXVp3I8LdmJ54fn038KHgG7HmbIzZlKS2FkVf6mKcW5bYMJovkx4947joQyZk5yubxOZ+ddHSh79y39Aevufg==" + }, + "node_modules/@docsearch/react": { + "version": "3.3.5", + "resolved": "https://registry.npmjs.org/@docsearch/react/-/react-3.3.5.tgz", + "integrity": "sha512-Zuxf4z5PZ9eIQkVCNu76v1H+KAztKItNn3rLzZa7kpBS+++TgNARITnZeUS7C1DKoAhJZFr6T/H+Lvc6h/iiYg==", + "dependencies": { + "@algolia/autocomplete-core": "1.8.2", + "@algolia/autocomplete-preset-algolia": "1.8.2", + "@docsearch/css": "3.3.5", + "algoliasearch": "^4.0.0" + }, + "peerDependencies": { + "@types/react": ">= 16.8.0 < 19.0.0", + "react": ">= 16.8.0 < 19.0.0", + "react-dom": ">= 16.8.0 < 19.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "react": { + "optional": true + }, + "react-dom": { + "optional": true + } + } + }, + "node_modules/@docusaurus/core": { + "version": "3.0.0-beta.0", + "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-3.0.0-beta.0.tgz", + "integrity": "sha512-tNVEw//Xdzg81y6n+mIV1wrTjwU3GIixTpo00uel7hrn1/vH05HTcj5sn+7R5Iq+tXBK2hc3h+Gjt7opwsvklQ==", + "dependencies": { + "@babel/core": "^7.22.9", + "@babel/generator": "^7.22.9", + "@babel/plugin-syntax-dynamic-import": "^7.8.3", + "@babel/plugin-transform-runtime": "^7.22.9", + "@babel/preset-env": "^7.22.9", + "@babel/preset-react": "^7.22.5", + "@babel/preset-typescript": "^7.22.5", + "@babel/runtime": "^7.22.6", + "@babel/runtime-corejs3": "^7.22.6", + "@babel/traverse": "^7.22.8", + "@docusaurus/cssnano-preset": "3.0.0-beta.0", + "@docusaurus/logger": "3.0.0-beta.0", + "@docusaurus/mdx-loader": "3.0.0-beta.0", + "@docusaurus/react-loadable": "5.5.2", + "@docusaurus/utils": "3.0.0-beta.0", + "@docusaurus/utils-common": "3.0.0-beta.0", + "@docusaurus/utils-validation": "3.0.0-beta.0", + "@slorber/static-site-generator-webpack-plugin": "^4.0.7", + "@svgr/webpack": "^6.5.1", + "autoprefixer": "^10.4.14", + "babel-loader": "^9.1.3", + "babel-plugin-dynamic-import-node": "^2.3.3", + "boxen": "^6.2.1", + "chalk": "^4.1.2", + "chokidar": "^3.5.3", + "clean-css": "^5.3.2", + "cli-table3": "^0.6.3", + "combine-promises": "^1.1.0", + "commander": "^5.1.0", + "copy-webpack-plugin": "^11.0.0", + "core-js": "^3.31.1", + "css-loader": "^6.8.1", + "css-minimizer-webpack-plugin": "^4.2.2", + "cssnano": "^5.1.15", + "del": "^6.1.1", + "detect-port": "^1.5.1", + "escape-html": "^1.0.3", + "eta": "^2.2.0", + "file-loader": "^6.2.0", + "fs-extra": "^11.1.1", + "html-minifier-terser": "^7.2.0", + "html-tags": "^3.3.1", + "html-webpack-plugin": "^5.5.3", + "import-fresh": "^3.3.0", + "leven": "^3.1.0", + "lodash": "^4.17.21", + "mini-css-extract-plugin": "^2.7.6", + "postcss": "^8.4.26", + "postcss-loader": "^7.3.3", + "prompts": "^2.4.2", + "react-dev-utils": "^12.0.1", + "react-helmet-async": "^1.3.0", + "react-loadable": "npm:@docusaurus/react-loadable@5.5.2", + "react-loadable-ssr-addon-v5-slorber": "^1.0.1", + "react-router": "^5.3.4", + "react-router-config": "^5.1.1", + "react-router-dom": "^5.3.4", + "rtl-detect": "^1.0.4", + "semver": "^7.5.4", + "serve-handler": "^6.1.5", + "shelljs": "^0.8.5", + "terser-webpack-plugin": "^5.3.9", + "tslib": "^2.6.0", + "update-notifier": "^6.0.2", + "url-loader": "^4.1.1", + "wait-on": "^7.0.1", + "webpack": "^5.88.1", + "webpack-bundle-analyzer": "^4.9.0", + "webpack-dev-server": "^4.15.1", + "webpack-merge": "^5.9.0", + "webpackbar": "^5.0.2" + }, + "bin": { + "docusaurus": "bin/docusaurus.mjs" + }, + "engines": { + "node": ">=16.14" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/cssnano-preset": { + "version": "3.0.0-beta.0", + "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-3.0.0-beta.0.tgz", + "integrity": "sha512-CMjYTp5Lhg0fU23MfD9VrmN3mAOtAx4RzvVRG6T/da+p+gVdAKGgzt6q46e5uDpLs9Q8+OQW8oDfe/tdUD0gqQ==", + "dependencies": { + "cssnano-preset-advanced": "^5.3.10", + "postcss": "^8.4.26", + "postcss-sort-media-queries": "^4.4.1", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=16.14" + } + }, + "node_modules/@docusaurus/logger": { + "version": "3.0.0-beta.0", + "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-3.0.0-beta.0.tgz", + "integrity": "sha512-fHD5tSPVkppGp5b36vdVOyYNGpNNSIqIRUixVuRq9yM3k4xN54Rt1wr01rkupuJ978DkNEnx9zTV+uY08CYEKg==", + "dependencies": { + "chalk": "^4.1.2", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=16.14" + } + }, + "node_modules/@docusaurus/mdx-loader": { + "version": "3.0.0-beta.0", + "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-3.0.0-beta.0.tgz", + "integrity": "sha512-xfRw38IAvoclaLWUMGU2EXuD6iawvFYTw+rmIdrjigGDxWfo8Id2fXr+dHfXZM7BYYAVckZeUfyYnzx/heGB+g==", + "dependencies": { + "@babel/parser": "^7.22.7", + "@babel/traverse": "^7.22.8", + "@docusaurus/logger": "3.0.0-beta.0", + "@docusaurus/utils": "3.0.0-beta.0", + "@docusaurus/utils-validation": "3.0.0-beta.0", + "@mdx-js/mdx": "^2.1.5", + "@slorber/remark-comment": "^1.0.0", + "escape-html": "^1.0.3", + "estree-util-value-to-estree": "^2.1.0", + "file-loader": "^6.2.0", + "fs-extra": "^11.1.1", + "hastscript": "^7.1.0", + "image-size": "^1.0.2", + "mdast-util-mdx": "^2.0.0", + "mdast-util-to-string": "^3.2.0", + "rehype-raw": "^6.1.1", + "remark-directive": "^2.0.1", + "remark-emoji": "^2.2.0", + "remark-gfm": "^3.0.1", + "stringify-object": "^3.3.0", + "tslib": "^2.6.0", + "unified": "^10.1.2", + "unist-util-visit": "^2.0.3", + "url-loader": "^4.1.1", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=16.14" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/module-type-aliases": { + "version": "3.0.0-beta.0", + "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-3.0.0-beta.0.tgz", + "integrity": "sha512-Gy12aDp5oRdx1dHdqzyxsoR0TcGUeDOsItLtm7cpsnk/r4q459ifLtuY9X2Dyo4EpOt9+4XGuxQY8Q1DpMhCTg==", + "dependencies": { + "@docusaurus/react-loadable": "5.5.2", + "@docusaurus/types": "3.0.0-beta.0", + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router-config": "*", + "@types/react-router-dom": "*", + "react-helmet-async": "*", + "react-loadable": "npm:@docusaurus/react-loadable@5.5.2" + }, + "peerDependencies": { + "react": "*", + "react-dom": "*" + } + }, + "node_modules/@docusaurus/plugin-content-blog": { + "version": "3.0.0-beta.0", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-3.0.0-beta.0.tgz", + "integrity": "sha512-fpauSrjOpC/HvdYbNimiLHgdomjfdOcjkgyzfNaDdQo0wtA0tl2MqZoCoYuh/UpdiMWkJX/L/kUyZaIiiNO7zQ==", + "dependencies": { + "@docusaurus/core": "3.0.0-beta.0", + "@docusaurus/logger": "3.0.0-beta.0", + "@docusaurus/mdx-loader": "3.0.0-beta.0", + "@docusaurus/types": "3.0.0-beta.0", + "@docusaurus/utils": "3.0.0-beta.0", + "@docusaurus/utils-common": "3.0.0-beta.0", + "@docusaurus/utils-validation": "3.0.0-beta.0", + "cheerio": "^1.0.0-rc.12", + "feed": "^4.2.2", + "fs-extra": "^11.1.1", + "lodash": "^4.17.21", + "reading-time": "^1.5.0", + "srcset": "^4.0.0", + "tslib": "^2.6.0", + "unist-util-visit": "^2.0.3", + "utility-types": "^3.10.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=16.14" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-content-docs": { + "version": "3.0.0-beta.0", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-3.0.0-beta.0.tgz", + "integrity": "sha512-mCMjysyJIMnhx+YVv8nRwJtkaz+oKfxUx4EGH+83kWgBBMXq0XHPheeUAPBvJOfYyO7jN8zChmYdOMmOuEIcLA==", + "dependencies": { + "@docusaurus/core": "3.0.0-beta.0", + "@docusaurus/logger": "3.0.0-beta.0", + "@docusaurus/mdx-loader": "3.0.0-beta.0", + "@docusaurus/module-type-aliases": "3.0.0-beta.0", + "@docusaurus/types": "3.0.0-beta.0", + "@docusaurus/utils": "3.0.0-beta.0", + "@docusaurus/utils-validation": "3.0.0-beta.0", + "@types/react-router-config": "^5.0.7", + "combine-promises": "^1.1.0", + "fs-extra": "^11.1.1", + "import-fresh": "^3.3.0", + "js-yaml": "^4.1.0", + "lodash": "^4.17.21", + "tslib": "^2.6.0", + "utility-types": "^3.10.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=16.14" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-content-pages": { + "version": "3.0.0-beta.0", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-3.0.0-beta.0.tgz", + "integrity": "sha512-C5ex2E0OFHq5JkDDHtq7KuU3N9J9nWR/yDburjutN7qO8+TvdoC1nw59wGtfPZyVEeAnJJVVKZKf8pkiYJc5ig==", + "dependencies": { + "@docusaurus/core": "3.0.0-beta.0", + "@docusaurus/mdx-loader": "3.0.0-beta.0", + "@docusaurus/types": "3.0.0-beta.0", + "@docusaurus/utils": "3.0.0-beta.0", + "@docusaurus/utils-validation": "3.0.0-beta.0", + "fs-extra": "^11.1.1", + "tslib": "^2.6.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=16.14" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-debug": { + "version": "3.0.0-beta.0", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-3.0.0-beta.0.tgz", + "integrity": "sha512-11EfWWPZW/ZhAklyZOjyWBHIRb4IeNwrW4IcMz5gz08tLMe74A6JhMGtnQZrH7084H94WKRToK7dzKKmSipqDw==", + "dependencies": { + "@docusaurus/core": "3.0.0-beta.0", + "@docusaurus/types": "3.0.0-beta.0", + "@docusaurus/utils": "3.0.0-beta.0", + "@microlink/react-json-view": "^1.22.2", + "fs-extra": "^11.1.1", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=16.14" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-google-analytics": { + "version": "3.0.0-beta.0", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-3.0.0-beta.0.tgz", + "integrity": "sha512-oErAWR0jkr2Bvx1lX6H7tG86UCQMb0yJVSyfr2nk34i9jfxfjOpSP34mLv/5wy5Ff90Xorw6PkuyuBVJtV36Pg==", + "dependencies": { + "@docusaurus/core": "3.0.0-beta.0", + "@docusaurus/types": "3.0.0-beta.0", + "@docusaurus/utils-validation": "3.0.0-beta.0", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=16.14" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-google-gtag": { + "version": "3.0.0-beta.0", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-3.0.0-beta.0.tgz", + "integrity": "sha512-KCpMZmVA/p6f1L1zkGF5YS11xcKx8w+6CeDY2meJuKE/L+9TEY41PcT84yfWlXtmpTxU7ZDHcX1UwuYhGWtsfA==", + "dependencies": { + "@docusaurus/core": "3.0.0-beta.0", + "@docusaurus/types": "3.0.0-beta.0", + "@docusaurus/utils-validation": "3.0.0-beta.0", + "@types/gtag.js": "^0.0.12", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=16.14" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-google-tag-manager": { + "version": "3.0.0-beta.0", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-3.0.0-beta.0.tgz", + "integrity": "sha512-JHod06M63Mdi1B3WTdnuDFtw/M+aGtn5jmoyT7MZcOET4lNa2wOda6IUDz2X+vpIP+JgUBZLs4elhRi7K5f+mg==", + "dependencies": { + "@docusaurus/core": "3.0.0-beta.0", + "@docusaurus/types": "3.0.0-beta.0", + "@docusaurus/utils-validation": "3.0.0-beta.0", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=16.14" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-sitemap": { + "version": "3.0.0-beta.0", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-3.0.0-beta.0.tgz", + "integrity": "sha512-tO3xkOW241wEG9Rx7US7FAr9F7RukVjCbJTZ/qeIqz+0SYznm3+kINxWFN+RnCUeUSEJhzxvCL6Zjqh5NAd41w==", + "dependencies": { + "@docusaurus/core": "3.0.0-beta.0", + "@docusaurus/logger": "3.0.0-beta.0", + "@docusaurus/types": "3.0.0-beta.0", + "@docusaurus/utils": "3.0.0-beta.0", + "@docusaurus/utils-common": "3.0.0-beta.0", + "@docusaurus/utils-validation": "3.0.0-beta.0", + "fs-extra": "^11.1.1", + "sitemap": "^7.1.1", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=16.14" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/preset-classic": { + "version": "3.0.0-beta.0", + "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-3.0.0-beta.0.tgz", + "integrity": "sha512-zLD/Qa492YUD9jktXuDc1DTxzGlrO7qyjmQHKxvOf+sS9qKRc88l16FeaiXmLP5sgOWW1acMoyyVf/HrDWXrRg==", + "dependencies": { + "@docusaurus/core": "3.0.0-beta.0", + "@docusaurus/plugin-content-blog": "3.0.0-beta.0", + "@docusaurus/plugin-content-docs": "3.0.0-beta.0", + "@docusaurus/plugin-content-pages": "3.0.0-beta.0", + "@docusaurus/plugin-debug": "3.0.0-beta.0", + "@docusaurus/plugin-google-analytics": "3.0.0-beta.0", + "@docusaurus/plugin-google-gtag": "3.0.0-beta.0", + "@docusaurus/plugin-google-tag-manager": "3.0.0-beta.0", + "@docusaurus/plugin-sitemap": "3.0.0-beta.0", + "@docusaurus/theme-classic": "3.0.0-beta.0", + "@docusaurus/theme-common": "3.0.0-beta.0", + "@docusaurus/theme-search-algolia": "3.0.0-beta.0", + "@docusaurus/types": "3.0.0-beta.0" + }, + "engines": { + "node": ">=16.14" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/react-loadable": { + "version": "5.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz", + "integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==", + "dependencies": { + "@types/react": "*", + "prop-types": "^15.6.2" + }, + "peerDependencies": { + "react": "*" + } + }, + "node_modules/@docusaurus/theme-classic": { + "version": "3.0.0-beta.0", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-3.0.0-beta.0.tgz", + "integrity": "sha512-D9xnt22/2yO7e6jSXHJzN5IsMJ967TbK7nLjW/5FRI/UX8b9q5zHSOaw3VjlQ7hWJ4JeG3V5b28QYDnru0ybBA==", + "dependencies": { + "@docusaurus/core": "3.0.0-beta.0", + "@docusaurus/mdx-loader": "3.0.0-beta.0", + "@docusaurus/module-type-aliases": "3.0.0-beta.0", + "@docusaurus/plugin-content-blog": "3.0.0-beta.0", + "@docusaurus/plugin-content-docs": "3.0.0-beta.0", + "@docusaurus/plugin-content-pages": "3.0.0-beta.0", + "@docusaurus/theme-common": "3.0.0-beta.0", + "@docusaurus/theme-translations": "3.0.0-beta.0", + "@docusaurus/types": "3.0.0-beta.0", + "@docusaurus/utils": "3.0.0-beta.0", + "@docusaurus/utils-common": "3.0.0-beta.0", + "@docusaurus/utils-validation": "3.0.0-beta.0", + "@mdx-js/react": "^2.1.5", + "clsx": "^1.2.1", + "copy-text-to-clipboard": "^3.2.0", + "infima": "0.2.0-alpha.43", + "lodash": "^4.17.21", + "nprogress": "^0.2.0", + "postcss": "^8.4.26", + "prism-react-renderer": "^1.3.5", + "prismjs": "^1.29.0", + "react-router-dom": "^5.3.4", + "rtlcss": "^4.1.0", + "tslib": "^2.6.0", + "utility-types": "^3.10.0" + }, + "engines": { + "node": ">=16.14" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/theme-common": { + "version": "3.0.0-beta.0", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-3.0.0-beta.0.tgz", + "integrity": "sha512-3fOL7/v1App4B8uonnKLxV1jWmdCykFHeVf727WpJs6S2msADJGQCZe/ZS1D7IobKHPlBTn038ls73yKhkLKlA==", + "dependencies": { + "@docusaurus/mdx-loader": "3.0.0-beta.0", + "@docusaurus/module-type-aliases": "3.0.0-beta.0", + "@docusaurus/plugin-content-blog": "3.0.0-beta.0", + "@docusaurus/plugin-content-docs": "3.0.0-beta.0", + "@docusaurus/plugin-content-pages": "3.0.0-beta.0", + "@docusaurus/utils": "3.0.0-beta.0", + "@docusaurus/utils-common": "3.0.0-beta.0", + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router-config": "*", + "clsx": "^1.2.1", + "parse-numeric-range": "^1.3.0", + "prism-react-renderer": "^1.3.5", + "tslib": "^2.6.0", + "utility-types": "^3.10.0" + }, + "engines": { + "node": ">=16.14" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/theme-search-algolia": { + "version": "3.0.0-beta.0", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-3.0.0-beta.0.tgz", + "integrity": "sha512-ixrBFD9dR7PeccstIC1YC+pdJc35QKleWFEGNPjuu/bSUUd2RxGvx9hZeQB4Dq8KL/hWZ2i+2qaBsPMjPBxieg==", + "dependencies": { + "@docsearch/react": "~3.3.3", + "@docusaurus/core": "3.0.0-beta.0", + "@docusaurus/logger": "3.0.0-beta.0", + "@docusaurus/plugin-content-docs": "3.0.0-beta.0", + "@docusaurus/theme-common": "3.0.0-beta.0", + "@docusaurus/theme-translations": "3.0.0-beta.0", + "@docusaurus/utils": "3.0.0-beta.0", + "@docusaurus/utils-validation": "3.0.0-beta.0", + "algoliasearch": "^4.18.0", + "algoliasearch-helper": "^3.13.3", + "clsx": "^1.2.1", + "eta": "^2.2.0", + "fs-extra": "^11.1.1", + "lodash": "^4.17.21", + "tslib": "^2.6.0", + "utility-types": "^3.10.0" + }, + "engines": { + "node": ">=16.14" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/theme-translations": { + "version": "3.0.0-beta.0", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-3.0.0-beta.0.tgz", + "integrity": "sha512-L47LI/5fCMgP93S4iA1VWzPaU9zkAvQYJMJ93H1iXMjLdKSvtS2JL+hHkL7uPh8DSUbtWeAUeayPCh2gxc+b1g==", + "dependencies": { + "fs-extra": "^11.1.1", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=16.14" + } + }, + "node_modules/@docusaurus/tsconfig": { + "version": "3.0.0-beta.0", + "resolved": "https://registry.npmjs.org/@docusaurus/tsconfig/-/tsconfig-3.0.0-beta.0.tgz", + "integrity": "sha512-3fQyX79kyr0AEVMKHgnF7d9WKrCHNTWvTcJK957uIQPX07TBDyQ3tJX9FXC3Ib+Z8nsRRe36D3lvpFSfSjVCKw==", + "dev": true + }, + "node_modules/@docusaurus/types": { + "version": "3.0.0-beta.0", + "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-3.0.0-beta.0.tgz", + "integrity": "sha512-99ueMwPtRtlODUH1nEim5k6yk819K2hCTf0Gns5cLgmZfnjFQvTEcuQE43kB2Zl9bbWn88w9TBAYvDac9LP32w==", + "dependencies": { + "@types/history": "^4.7.11", + "@types/react": "*", + "commander": "^5.1.0", + "joi": "^17.9.2", + "react-helmet-async": "^1.3.0", + "utility-types": "^3.10.0", + "webpack": "^5.88.1", + "webpack-merge": "^5.9.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/utils": { + "version": "3.0.0-beta.0", + "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-3.0.0-beta.0.tgz", + "integrity": "sha512-H1ePqc8GVR3gA/2MFiB4m5Oczbm9l1X1SraYK1Pv6RljLHkSmvUf+tr6UU/tItKiDHwtAunv8kep0m4bYvDL7Q==", + "dependencies": { + "@docusaurus/logger": "3.0.0-beta.0", + "@svgr/webpack": "^6.5.1", + "escape-string-regexp": "^4.0.0", + "file-loader": "^6.2.0", + "fs-extra": "^11.1.1", + "github-slugger": "^1.5.0", + "globby": "^11.1.0", + "gray-matter": "^4.0.3", + "js-yaml": "^4.1.0", + "lodash": "^4.17.21", + "micromatch": "^4.0.5", + "resolve-pathname": "^3.0.0", + "shelljs": "^0.8.5", + "tslib": "^2.6.0", + "url-loader": "^4.1.1", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=16.14" + }, + "peerDependencies": { + "@docusaurus/types": "*" + }, + "peerDependenciesMeta": { + "@docusaurus/types": { + "optional": true + } + } + }, + "node_modules/@docusaurus/utils-common": { + "version": "3.0.0-beta.0", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-3.0.0-beta.0.tgz", + "integrity": "sha512-bfcBkzShK00MffNtAdamoHXt1bVGlypCKzbaqQDmVjFoBVYIvXLXP437uqSu2cx+Bg9JtI37J7Be6nsZqzrR/A==", + "dependencies": { + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=16.14" + }, + "peerDependencies": { + "@docusaurus/types": "*" + }, + "peerDependenciesMeta": { + "@docusaurus/types": { + "optional": true + } + } + }, + "node_modules/@docusaurus/utils-validation": { + "version": "3.0.0-beta.0", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-3.0.0-beta.0.tgz", + "integrity": "sha512-skzBB/uA5Tc0dxTlLmuH2BwAn7d1AntUINZyPZ/XMuqMhUXlHF/FKZMZNafC0UFI1M6SyqTwju8myXzD2nosEQ==", + "dependencies": { + "@docusaurus/logger": "3.0.0-beta.0", + "@docusaurus/utils": "3.0.0-beta.0", + "joi": "^17.9.2", + "js-yaml": "^4.1.0", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=16.14" + } + }, + "node_modules/@giscus/react": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@giscus/react/-/react-2.3.0.tgz", + "integrity": "sha512-tj79B+NNBfidhPdXJqWoqRm5Jhoc6CBhXMYwBR9nwTwsrdaB/spcQXmHpKcUuOdXZtlYSwMfCFcBogMNbD+gKQ==", + "dependencies": { + "giscus": "^1.3.0" + }, + "peerDependencies": { + "react": "^16 || ^17 || ^18", + "react-dom": "^16 || ^17 || ^18" + } + }, + "node_modules/@hapi/hoek": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", + "integrity": "sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ==" + }, + "node_modules/@hapi/topo": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@hapi/topo/-/topo-5.1.0.tgz", + "integrity": "sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==", + "dependencies": { + "@hapi/hoek": "^9.0.0" + } + }, + "node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", + "integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==", + "dependencies": { + "@jridgewell/set-array": "^1.0.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.9" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.1.tgz", + "integrity": "sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/set-array": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", + "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/source-map": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.5.tgz", + "integrity": "sha512-UTYAUj/wviwdsMfzoSJspJxbkH5o1snzwX0//0ENX1u/55kkZZkcTZP6u9bwKGkv+dkk9at4m1Cpt0uY80kcpQ==", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.0", + "@jridgewell/trace-mapping": "^0.3.9" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", + "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.19", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.19.tgz", + "integrity": "sha512-kf37QtfW+Hwx/buWGMPcR60iF9ziHa6r/CZJIHbmcm4+0qrXiVdxegAH0F6yddEVQ7zdkjcGCgCzUu+BcbhQxw==", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@leichtgewicht/ip-codec": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.4.tgz", + "integrity": "sha512-Hcv+nVC0kZnQ3tD9GVu5xSMR4VVYOteQIr/hwFPVEvPdlXqgGEuRjiheChHgdM+JyqdgNcmzZOX/tnl0JOiI7A==" + }, + "node_modules/@lit-labs/ssr-dom-shim": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@lit-labs/ssr-dom-shim/-/ssr-dom-shim-1.1.2.tgz", + "integrity": "sha512-jnOD+/+dSrfTWYfSXBXlo5l5f0q1UuJo3tkbMDCYA2lKUYq79jaxqtGEvnRoh049nt1vdo1+45RinipU6FGY2g==" + }, + "node_modules/@lit/reactive-element": { + "version": "1.6.3", + "resolved": "https://registry.npmjs.org/@lit/reactive-element/-/reactive-element-1.6.3.tgz", + "integrity": "sha512-QuTgnG52Poic7uM1AN5yJ09QMe0O28e10XzSvWDz02TJiiKee4stsiownEIadWm8nYzyDAyT+gKzUoZmiWQtsQ==", + "dependencies": { + "@lit-labs/ssr-dom-shim": "^1.0.0" + } + }, + "node_modules/@mdx-js/mdx": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@mdx-js/mdx/-/mdx-2.3.0.tgz", + "integrity": "sha512-jLuwRlz8DQfQNiUCJR50Y09CGPq3fLtmtUQfVrj79E0JWu3dvsVcxVIcfhR5h0iXu+/z++zDrYeiJqifRynJkA==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/mdx": "^2.0.0", + "estree-util-build-jsx": "^2.0.0", + "estree-util-is-identifier-name": "^2.0.0", + "estree-util-to-js": "^1.1.0", + "estree-walker": "^3.0.0", + "hast-util-to-estree": "^2.0.0", + "markdown-extensions": "^1.0.0", + "periscopic": "^3.0.0", + "remark-mdx": "^2.0.0", + "remark-parse": "^10.0.0", + "remark-rehype": "^10.0.0", + "unified": "^10.0.0", + "unist-util-position-from-estree": "^1.0.0", + "unist-util-stringify-position": "^3.0.0", + "unist-util-visit": "^4.0.0", + "vfile": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/@mdx-js/mdx/node_modules/unist-util-visit": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz", + "integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-is": "^5.0.0", + "unist-util-visit-parents": "^5.1.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/@mdx-js/react": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@mdx-js/react/-/react-2.3.0.tgz", + "integrity": "sha512-zQH//gdOmuu7nt2oJR29vFhDv88oGPmVw6BggmrHeMI+xgEkp1B2dX9/bMBSYtK0dyLX/aOmesKS09g222K1/g==", + "dependencies": { + "@types/mdx": "^2.0.0", + "@types/react": ">=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "peerDependencies": { + "react": ">=16" + } + }, + "node_modules/@microlink/react-json-view": { + "version": "1.22.2", + "resolved": "https://registry.npmjs.org/@microlink/react-json-view/-/react-json-view-1.22.2.tgz", + "integrity": "sha512-liJzdlbspT5GbEuPffw4jzZfXOypKLK1Er9br03T31bAaIi/WptZqpcJaXPi7OmwC7v/YYczCkmAS7WaEfItPQ==", + "dependencies": { + "flux": "~4.0.1", + "react-base16-styling": "~0.6.0", + "react-lifecycles-compat": "~3.0.4", + "react-textarea-autosize": "~8.3.2" + }, + "peerDependencies": { + "react": ">= 15", + "react-dom": ">= 15" + } + }, + "node_modules/@microlink/react-json-view/node_modules/flux": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/flux/-/flux-4.0.4.tgz", + "integrity": "sha512-NCj3XlayA2UsapRpM7va6wU1+9rE5FIL7qoMcmxWHRzbp0yujihMBm9BBHZ1MDIk5h5o2Bl6eGiCe8rYELAmYw==", + "dependencies": { + "fbemitter": "^3.0.0", + "fbjs": "^3.0.1" + }, + "peerDependencies": { + "react": "^15.0.2 || ^16.0.0 || ^17.0.0" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@pnpm/config.env-replace": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@pnpm/config.env-replace/-/config.env-replace-1.1.0.tgz", + "integrity": "sha512-htyl8TWnKL7K/ESFa1oW2UB5lVDxuF5DpM7tBi6Hu2LNL3mWkIzNLG6N4zoCUP1lCKNxWy/3iu8mS8MvToGd6w==", + "engines": { + "node": ">=12.22.0" + } + }, + "node_modules/@pnpm/network.ca-file": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@pnpm/network.ca-file/-/network.ca-file-1.0.2.tgz", + "integrity": "sha512-YcPQ8a0jwYU9bTdJDpXjMi7Brhkr1mXsXrUJvjqM2mQDgkRiz8jFaQGOdaLxgjtUfQgZhKy/O3cG/YwmgKaxLA==", + "dependencies": { + "graceful-fs": "4.2.10" + }, + "engines": { + "node": ">=12.22.0" + } + }, + "node_modules/@pnpm/network.ca-file/node_modules/graceful-fs": { + "version": "4.2.10", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz", + "integrity": "sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==" + }, + "node_modules/@pnpm/npm-conf": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/@pnpm/npm-conf/-/npm-conf-2.2.2.tgz", + "integrity": "sha512-UA91GwWPhFExt3IizW6bOeY/pQ0BkuNwKjk9iQW9KqxluGCrg4VenZ0/L+2Y0+ZOtme72EVvg6v0zo3AMQRCeA==", + "dependencies": { + "@pnpm/config.env-replace": "^1.1.0", + "@pnpm/network.ca-file": "^1.0.1", + "config-chain": "^1.1.11" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@polka/url": { + "version": "1.0.0-next.23", + "resolved": "https://registry.npmjs.org/@polka/url/-/url-1.0.0-next.23.tgz", + "integrity": "sha512-C16M+IYz0rgRhWZdCmK+h58JMv8vijAA61gmz2rspCSwKwzBebpdcsiUmwrtJRdphuY30i6BSLEOP8ppbNLyLg==" + }, + "node_modules/@sideway/address": { + "version": "4.1.4", + "resolved": "https://registry.npmjs.org/@sideway/address/-/address-4.1.4.tgz", + "integrity": "sha512-7vwq+rOHVWjyXxVlR76Agnvhy8I9rpzjosTESvmhNeXOXdZZB15Fl+TI9x1SiHZH5Jv2wTGduSxFDIaq0m3DUw==", + "dependencies": { + "@hapi/hoek": "^9.0.0" + } + }, + "node_modules/@sideway/formula": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.1.tgz", + "integrity": "sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg==" + }, + "node_modules/@sideway/pinpoint": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@sideway/pinpoint/-/pinpoint-2.0.0.tgz", + "integrity": "sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ==" + }, + "node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==" + }, + "node_modules/@sindresorhus/is": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-5.6.0.tgz", + "integrity": "sha512-TV7t8GKYaJWsn00tFDqBw8+Uqmr8A0fRU1tvTQhyZzGv0sJCGRQL3JGMI3ucuKo3XIZdUP+Lx7/gh2t3lewy7g==", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sindresorhus/is?sponsor=1" + } + }, + "node_modules/@slorber/remark-comment": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@slorber/remark-comment/-/remark-comment-1.0.0.tgz", + "integrity": "sha512-RCE24n7jsOj1M0UPvIQCHTe7fI0sFL4S2nwKVWwHyVr/wI/H8GosgsJGyhnsZoGFnD/P2hLf1mSbrrgSLN93NA==", + "dependencies": { + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.1.0", + "micromark-util-symbol": "^1.0.1" + } + }, + "node_modules/@slorber/static-site-generator-webpack-plugin": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/@slorber/static-site-generator-webpack-plugin/-/static-site-generator-webpack-plugin-4.0.7.tgz", + "integrity": "sha512-Ug7x6z5lwrz0WqdnNFOMYrDQNTPAprvHLSh6+/fmml3qUiz6l5eq+2MzLKWtn/q5K5NpSiFsZTP/fck/3vjSxA==", + "dependencies": { + "eval": "^0.1.8", + "p-map": "^4.0.0", + "webpack-sources": "^3.2.2" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/@svgr/babel-plugin-add-jsx-attribute": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-6.5.1.tgz", + "integrity": "sha512-9PYGcXrAxitycIjRmZB+Q0JaN07GZIWaTBIGQzfaZv+qr1n8X1XUEJ5rZ/vx6OVD9RRYlrNnXWExQXcmZeD/BQ==", + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-remove-jsx-attribute": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-8.0.0.tgz", + "integrity": "sha512-BcCkm/STipKvbCl6b7QFrMh/vx00vIP63k2eM66MfHJzPr6O2U0jYEViXkHJWqXqQYjdeA9cuCl5KWmlwjDvbA==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-remove-jsx-empty-expression": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-8.0.0.tgz", + "integrity": "sha512-5BcGCBfBxB5+XSDSWnhTThfI9jcO5f0Ai2V24gZpG+wXF14BzwxxdDb4g6trdOux0rhibGs385BeFMSmxtS3uA==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-replace-jsx-attribute-value": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-6.5.1.tgz", + "integrity": "sha512-8DPaVVE3fd5JKuIC29dqyMB54sA6mfgki2H2+swh+zNJoynC8pMPzOkidqHOSc6Wj032fhl8Z0TVn1GiPpAiJg==", + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-svg-dynamic-title": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-6.5.1.tgz", + "integrity": "sha512-FwOEi0Il72iAzlkaHrlemVurgSQRDFbk0OC8dSvD5fSBPHltNh7JtLsxmZUhjYBZo2PpcU/RJvvi6Q0l7O7ogw==", + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-svg-em-dimensions": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-6.5.1.tgz", + "integrity": "sha512-gWGsiwjb4tw+ITOJ86ndY/DZZ6cuXMNE/SjcDRg+HLuCmwpcjOktwRF9WgAiycTqJD/QXqL2f8IzE2Rzh7aVXA==", + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-transform-react-native-svg": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-6.5.1.tgz", + "integrity": "sha512-2jT3nTayyYP7kI6aGutkyfJ7UMGtuguD72OjeGLwVNyfPRBD8zQthlvL+fAbAKk5n9ZNcvFkp/b1lZ7VsYqVJg==", + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-transform-svg-component": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-6.5.1.tgz", + "integrity": "sha512-a1p6LF5Jt33O3rZoVRBqdxL350oge54iZWHNI6LJB5tQ7EelvD/Mb1mfBiZNAan0dt4i3VArkFRjA4iObuNykQ==", + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-preset": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-preset/-/babel-preset-6.5.1.tgz", + "integrity": "sha512-6127fvO/FF2oi5EzSQOAjo1LE3OtNVh11R+/8FXa+mHx1ptAaS4cknIjnUA7e6j6fwGGJ17NzaTJFUwOV2zwCw==", + "dependencies": { + "@svgr/babel-plugin-add-jsx-attribute": "^6.5.1", + "@svgr/babel-plugin-remove-jsx-attribute": "*", + "@svgr/babel-plugin-remove-jsx-empty-expression": "*", + "@svgr/babel-plugin-replace-jsx-attribute-value": "^6.5.1", + "@svgr/babel-plugin-svg-dynamic-title": "^6.5.1", + "@svgr/babel-plugin-svg-em-dimensions": "^6.5.1", + "@svgr/babel-plugin-transform-react-native-svg": "^6.5.1", + "@svgr/babel-plugin-transform-svg-component": "^6.5.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/core": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/core/-/core-6.5.1.tgz", + "integrity": "sha512-/xdLSWxK5QkqG524ONSjvg3V/FkNyCv538OIBdQqPNaAta3AsXj/Bd2FbvR87yMbXO2hFSWiAe/Q6IkVPDw+mw==", + "dependencies": { + "@babel/core": "^7.19.6", + "@svgr/babel-preset": "^6.5.1", + "@svgr/plugin-jsx": "^6.5.1", + "camelcase": "^6.2.0", + "cosmiconfig": "^7.0.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@svgr/hast-util-to-babel-ast": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-6.5.1.tgz", + "integrity": "sha512-1hnUxxjd83EAxbL4a0JDJoD3Dao3hmjvyvyEV8PzWmLK3B9m9NPlW7GKjFyoWE8nM7HnXzPcmmSyOW8yOddSXw==", + "dependencies": { + "@babel/types": "^7.20.0", + "entities": "^4.4.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@svgr/plugin-jsx": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/plugin-jsx/-/plugin-jsx-6.5.1.tgz", + "integrity": "sha512-+UdQxI3jgtSjCykNSlEMuy1jSRQlGC7pqBCPvkG/2dATdWo082zHTTK3uhnAju2/6XpE6B5mZ3z4Z8Ns01S8Gw==", + "dependencies": { + "@babel/core": "^7.19.6", + "@svgr/babel-preset": "^6.5.1", + "@svgr/hast-util-to-babel-ast": "^6.5.1", + "svg-parser": "^2.0.4" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@svgr/core": "^6.0.0" + } + }, + "node_modules/@svgr/plugin-svgo": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/plugin-svgo/-/plugin-svgo-6.5.1.tgz", + "integrity": "sha512-omvZKf8ixP9z6GWgwbtmP9qQMPX4ODXi+wzbVZgomNFsUIlHA1sf4fThdwTWSsZGgvGAG6yE+b/F5gWUkcZ/iQ==", + "dependencies": { + "cosmiconfig": "^7.0.1", + "deepmerge": "^4.2.2", + "svgo": "^2.8.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@svgr/core": "*" + } + }, + "node_modules/@svgr/webpack": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/webpack/-/webpack-6.5.1.tgz", + "integrity": "sha512-cQ/AsnBkXPkEK8cLbv4Dm7JGXq2XrumKnL1dRpJD9rIO2fTIlJI9a1uCciYG1F2aUsox/hJQyNGbt3soDxSRkA==", + "dependencies": { + "@babel/core": "^7.19.6", + "@babel/plugin-transform-react-constant-elements": "^7.18.12", + "@babel/preset-env": "^7.19.4", + "@babel/preset-react": "^7.18.6", + "@babel/preset-typescript": "^7.18.6", + "@svgr/core": "^6.5.1", + "@svgr/plugin-jsx": "^6.5.1", + "@svgr/plugin-svgo": "^6.5.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@szmarczak/http-timer": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-5.0.1.tgz", + "integrity": "sha512-+PmQX0PiAYPMeVYe237LJAYvOMYW1j2rH5YROyS3b4CTVJum34HfRvKvAzozHAQG0TnHNdUfY9nCeUyRAs//cw==", + "dependencies": { + "defer-to-connect": "^2.0.1" + }, + "engines": { + "node": ">=14.16" + } + }, + "node_modules/@trysound/sax": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/@trysound/sax/-/sax-0.2.0.tgz", + "integrity": "sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA==", + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/@types/acorn": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/@types/acorn/-/acorn-4.0.6.tgz", + "integrity": "sha512-veQTnWP+1D/xbxVrPC3zHnCZRjSrKfhbMUlEA43iMZLu7EsnTtkJklIuwrCPbOi8YkvDQAiW05VQQFvvz9oieQ==", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/@types/body-parser": { + "version": "1.19.3", + "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.3.tgz", + "integrity": "sha512-oyl4jvAfTGX9Bt6Or4H9ni1Z447/tQuxnZsytsCaExKlmJiU8sFgnIBRzJUpKwB5eWn9HuBYlUlVA74q/yN0eQ==", + "dependencies": { + "@types/connect": "*", + "@types/node": "*" + } + }, + "node_modules/@types/bonjour": { + "version": "3.5.11", + "resolved": "https://registry.npmjs.org/@types/bonjour/-/bonjour-3.5.11.tgz", + "integrity": "sha512-isGhjmBtLIxdHBDl2xGwUzEM8AOyOvWsADWq7rqirdi/ZQoHnLWErHvsThcEzTX8juDRiZtzp2Qkv5bgNh6mAg==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/connect": { + "version": "3.4.36", + "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.36.tgz", + "integrity": "sha512-P63Zd/JUGq+PdrM1lv0Wv5SBYeA2+CORvbrXbngriYY0jzLUWfQMQQxOhjONEz/wlHOAxOdY7CY65rgQdTjq2w==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/connect-history-api-fallback": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.5.1.tgz", + "integrity": "sha512-iaQslNbARe8fctL5Lk+DsmgWOM83lM+7FzP0eQUJs1jd3kBE8NWqBTIT2S8SqQOJjxvt2eyIjpOuYeRXq2AdMw==", + "dependencies": { + "@types/express-serve-static-core": "*", + "@types/node": "*" + } + }, + "node_modules/@types/debug": { + "version": "4.1.9", + "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.9.tgz", + "integrity": "sha512-8Hz50m2eoS56ldRlepxSBa6PWEVCtzUo/92HgLc2qTMnotJNIm7xP+UZhyWoYsyOdd5dxZ+NZLb24rsKyFs2ow==", + "dependencies": { + "@types/ms": "*" + } + }, + "node_modules/@types/eslint": { + "version": "8.44.4", + "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.44.4.tgz", + "integrity": "sha512-lOzjyfY/D9QR4hY9oblZ76B90MYTB3RrQ4z2vBIJKj9ROCRqdkYl2gSUx1x1a4IWPjKJZLL4Aw1Zfay7eMnmnA==", + "dependencies": { + "@types/estree": "*", + "@types/json-schema": "*" + } + }, + "node_modules/@types/eslint-scope": { + "version": "3.7.5", + "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.5.tgz", + "integrity": "sha512-JNvhIEyxVW6EoMIFIvj93ZOywYFatlpu9deeH6eSx6PE3WHYvHaQtmHmQeNw7aA81bYGBPPQqdtBm6b1SsQMmA==", + "dependencies": { + "@types/eslint": "*", + "@types/estree": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.2.tgz", + "integrity": "sha512-VeiPZ9MMwXjO32/Xu7+OwflfmeoRwkE/qzndw42gGtgJwZopBnzy2gD//NN1+go1mADzkDcqf/KnFRSjTJ8xJA==" + }, + "node_modules/@types/estree-jsx": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.1.tgz", + "integrity": "sha512-sHyakZlAezNFxmYRo0fopDZW+XvK6ipeZkkp5EAOLjdPfZp8VjZBJ67vSRI99RSCAoqXVmXOHS4fnWoxpuGQtQ==", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/@types/express": { + "version": "4.17.19", + "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.19.tgz", + "integrity": "sha512-UtOfBtzN9OvpZPPbnnYunfjM7XCI4jyk1NvnFhTVz5krYAnW4o5DCoIekvms+8ApqhB4+9wSge1kBijdfTSmfg==", + "dependencies": { + "@types/body-parser": "*", + "@types/express-serve-static-core": "^4.17.33", + "@types/qs": "*", + "@types/serve-static": "*" + } + }, + "node_modules/@types/express-serve-static-core": { + "version": "4.17.37", + "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.17.37.tgz", + "integrity": "sha512-ZohaCYTgGFcOP7u6aJOhY9uIZQgZ2vxC2yWoArY+FeDXlqeH66ZVBjgvg+RLVAS/DWNq4Ap9ZXu1+SUQiiWYMg==", + "dependencies": { + "@types/node": "*", + "@types/qs": "*", + "@types/range-parser": "*", + "@types/send": "*" + } + }, + "node_modules/@types/gtag.js": { + "version": "0.0.12", + "resolved": "https://registry.npmjs.org/@types/gtag.js/-/gtag.js-0.0.12.tgz", + "integrity": "sha512-YQV9bUsemkzG81Ea295/nF/5GijnD2Af7QhEofh7xu+kvCN6RdodgNwwGWXB5GMI3NoyvQo0odNctoH/qLMIpg==" + }, + "node_modules/@types/hast": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-2.3.6.tgz", + "integrity": "sha512-47rJE80oqPmFdVDCD7IheXBrVdwuBgsYwoczFvKmwfo2Mzsnt+V9OONsYauFmICb6lQPpCuXYJWejBNs4pDJRg==", + "dependencies": { + "@types/unist": "^2" + } + }, + "node_modules/@types/history": { + "version": "4.7.11", + "resolved": "https://registry.npmjs.org/@types/history/-/history-4.7.11.tgz", + "integrity": "sha512-qjDJRrmvBMiTx+jyLxvLfJU7UznFuokDv4f3WRuriHKERccVpFU+8XMQUAbDzoiJCsmexxRExQeMwwCdamSKDA==" + }, + "node_modules/@types/html-minifier-terser": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/@types/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", + "integrity": "sha512-oh/6byDPnL1zeNXFrDXFLyZjkr1MsBG667IM792caf1L2UPOOMf65NFzjUH/ltyfwjAGfs1rsX1eftK0jC/KIg==" + }, + "node_modules/@types/http-cache-semantics": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/http-cache-semantics/-/http-cache-semantics-4.0.2.tgz", + "integrity": "sha512-FD+nQWA2zJjh4L9+pFXqWOi0Hs1ryBCfI+985NjluQ1p8EYtoLvjLOKidXBtZ4/IcxDX4o8/E8qDS3540tNliw==" + }, + "node_modules/@types/http-errors": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.2.tgz", + "integrity": "sha512-lPG6KlZs88gef6aD85z3HNkztpj7w2R7HmR3gygjfXCQmsLloWNARFkMuzKiiY8FGdh1XDpgBdrSf4aKDiA7Kg==" + }, + "node_modules/@types/http-proxy": { + "version": "1.17.12", + "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.12.tgz", + "integrity": "sha512-kQtujO08dVtQ2wXAuSFfk9ASy3sug4+ogFR8Kd8UgP8PEuc1/G/8yjYRmp//PcDNJEUKOza/MrQu15bouEUCiw==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.4.tgz", + "integrity": "sha512-z/QT1XN4K4KYuslS23k62yDIDLwLFkzxOuMplDtObz0+y7VqJCaO2o+SPwHCvLFZh7xazvvoor2tA/hPz9ee7g==" + }, + "node_modules/@types/istanbul-lib-report": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", + "integrity": "sha512-gPQuzaPR5h/djlAv2apEG1HVOyj1IUs7GpfMZixU0/0KXT3pm64ylHuMUI1/Akh+sq/iikxg6Z2j+fcMDXaaTQ==", + "dependencies": { + "@types/istanbul-lib-coverage": "*" + } + }, + "node_modules/@types/istanbul-reports": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.2.tgz", + "integrity": "sha512-kv43F9eb3Lhj+lr/Hn6OcLCs/sSM8bt+fIaP11rCYngfV6NVjzWXJ17owQtDQTL9tQ8WSLUrGsSJ6rJz0F1w1A==", + "dependencies": { + "@types/istanbul-lib-report": "*" + } + }, + "node_modules/@types/json-schema": { + "version": "7.0.13", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.13.tgz", + "integrity": "sha512-RbSSoHliUbnXj3ny0CNFOoxrIDV6SUGyStHsvDqosw6CkdPV8TtWGlfecuK4ToyMEAql6pzNxgCFKanovUzlgQ==" + }, + "node_modules/@types/mdast": { + "version": "3.0.13", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-3.0.13.tgz", + "integrity": "sha512-HjiGiWedR0DVFkeNljpa6Lv4/IZU1+30VY5d747K7lBudFc3R0Ibr6yJ9lN3BE28VnZyDfLF/VB1Ql1ZIbKrmg==", + "dependencies": { + "@types/unist": "^2" + } + }, + "node_modules/@types/mdx": { + "version": "2.0.8", + "resolved": "https://registry.npmjs.org/@types/mdx/-/mdx-2.0.8.tgz", + "integrity": "sha512-r7/zWe+f9x+zjXqGxf821qz++ld8tp6Z4jUS6qmPZUXH6tfh4riXOhAqb12tWGWAevCFtMt1goLWkQMqIJKpsA==" + }, + "node_modules/@types/mime": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.3.tgz", + "integrity": "sha512-Ys+/St+2VF4+xuY6+kDIXGxbNRO0mesVg0bbxEfB97Od1Vjpjx9KD1qxs64Gcb3CWPirk9Xe+PT4YiiHQ9T+eg==" + }, + "node_modules/@types/ms": { + "version": "0.7.32", + "resolved": "https://registry.npmjs.org/@types/ms/-/ms-0.7.32.tgz", + "integrity": "sha512-xPSg0jm4mqgEkNhowKgZFBNtwoEwF6gJ4Dhww+GFpm3IgtNseHQZ5IqdNwnquZEoANxyDAKDRAdVo4Z72VvD/g==" + }, + "node_modules/@types/node": { + "version": "20.8.5", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.8.5.tgz", + "integrity": "sha512-SPlobFgbidfIeOYlzXiEjSYeIJiOCthv+9tSQVpvk4PAdIIc+2SmjNVzWXk9t0Y7dl73Zdf+OgXKHX9XtkqUpw==", + "dependencies": { + "undici-types": "~5.25.1" + } + }, + "node_modules/@types/parse-json": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.0.tgz", + "integrity": "sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA==" + }, + "node_modules/@types/parse5": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/@types/parse5/-/parse5-6.0.3.tgz", + "integrity": "sha512-SuT16Q1K51EAVPz1K29DJ/sXjhSQ0zjvsypYJ6tlwVsRV9jwW5Adq2ch8Dq8kDBCkYnELS7N7VNCSB5nC56t/g==" + }, + "node_modules/@types/prop-types": { + "version": "15.7.8", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.8.tgz", + "integrity": "sha512-kMpQpfZKSCBqltAJwskgePRaYRFukDkm1oItcAbC3gNELR20XIBcN9VRgg4+m8DKsTfkWeA4m4Imp4DDuWy7FQ==" + }, + "node_modules/@types/qs": { + "version": "6.9.8", + "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.8.tgz", + "integrity": "sha512-u95svzDlTysU5xecFNTgfFG5RUWu1A9P0VzgpcIiGZA9iraHOdSzcxMxQ55DyeRaGCSxQi7LxXDI4rzq/MYfdg==" + }, + "node_modules/@types/range-parser": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.5.tgz", + "integrity": "sha512-xrO9OoVPqFuYyR/loIHjnbvvyRZREYKLjxV4+dY6v3FQR3stQ9ZxIGkaclF7YhI9hfjpuTbu14hZEy94qKLtOA==" + }, + "node_modules/@types/react": { + "version": "18.2.28", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.28.tgz", + "integrity": "sha512-ad4aa/RaaJS3hyGz0BGegdnSRXQBkd1CCYDCdNjBPg90UUpLgo+WlJqb9fMYUxtehmzF3PJaTWqRZjko6BRzBg==", + "dependencies": { + "@types/prop-types": "*", + "@types/scheduler": "*", + "csstype": "^3.0.2" + } + }, + "node_modules/@types/react-router": { + "version": "5.1.20", + "resolved": "https://registry.npmjs.org/@types/react-router/-/react-router-5.1.20.tgz", + "integrity": "sha512-jGjmu/ZqS7FjSH6owMcD5qpq19+1RS9DeVRqfl1FeBMxTDQAGwlMWOcs52NDoXaNKyG3d1cYQFMs9rCrb88o9Q==", + "dependencies": { + "@types/history": "^4.7.11", + "@types/react": "*" + } + }, + "node_modules/@types/react-router-config": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/@types/react-router-config/-/react-router-config-5.0.8.tgz", + "integrity": "sha512-zBzYZsr05V9xRG96oQ/xBXHy5+fDCX5wL7bboM0FFoOYQp9Gxmz8uvuKSkLesNWHlICl+W1l64F7fmp/KsOkuw==", + "dependencies": { + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router": "^5.1.0" + } + }, + "node_modules/@types/react-router-dom": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/@types/react-router-dom/-/react-router-dom-5.3.3.tgz", + "integrity": "sha512-kpqnYK4wcdm5UaWI3fLcELopqLrHgLqNsdpHauzlQktfkHL3npOSwtj1Uz9oKBAzs7lFtVkV8j83voAz2D8fhw==", + "dependencies": { + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router": "*" + } + }, + "node_modules/@types/retry": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", + "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==" + }, + "node_modules/@types/sax": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/@types/sax/-/sax-1.2.5.tgz", + "integrity": "sha512-9jWta97bBVC027/MShr3gLab8gPhKy4l6qpb+UJLF5pDm3501NvA7uvqVCW+REFtx00oTi6Cq9JzLwgq6evVgw==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/scheduler": { + "version": "0.16.4", + "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.4.tgz", + "integrity": "sha512-2L9ifAGl7wmXwP4v3pN4p2FLhD0O1qsJpvKmNin5VA8+UvNVb447UDaAEV6UdrkA+m/Xs58U1RFps44x6TFsVQ==" + }, + "node_modules/@types/send": { + "version": "0.17.2", + "resolved": "https://registry.npmjs.org/@types/send/-/send-0.17.2.tgz", + "integrity": "sha512-aAG6yRf6r0wQ29bkS+x97BIs64ZLxeE/ARwyS6wrldMm3C1MdKwCcnnEwMC1slI8wuxJOpiUH9MioC0A0i+GJw==", + "dependencies": { + "@types/mime": "^1", + "@types/node": "*" + } + }, + "node_modules/@types/serve-index": { + "version": "1.9.2", + "resolved": "https://registry.npmjs.org/@types/serve-index/-/serve-index-1.9.2.tgz", + "integrity": "sha512-asaEIoc6J+DbBKXtO7p2shWUpKacZOoMBEGBgPG91P8xhO53ohzHWGCs4ScZo5pQMf5ukQzVT9fhX1WzpHihig==", + "dependencies": { + "@types/express": "*" + } + }, + "node_modules/@types/serve-static": { + "version": "1.15.3", + "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.3.tgz", + "integrity": "sha512-yVRvFsEMrv7s0lGhzrggJjNOSmZCdgCjw9xWrPr/kNNLp6FaDfMC1KaYl3TSJ0c58bECwNBMoQrZJ8hA8E1eFg==", + "dependencies": { + "@types/http-errors": "*", + "@types/mime": "*", + "@types/node": "*" + } + }, + "node_modules/@types/sockjs": { + "version": "0.3.34", + "resolved": "https://registry.npmjs.org/@types/sockjs/-/sockjs-0.3.34.tgz", + "integrity": "sha512-R+n7qBFnm/6jinlteC9DBL5dGiDGjWAvjo4viUanpnc/dG1y7uDoacXPIQ/PQEg1fI912SMHIa014ZjRpvDw4g==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/trusted-types": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@types/trusted-types/-/trusted-types-2.0.4.tgz", + "integrity": "sha512-IDaobHimLQhjwsQ/NMwRVfa/yL7L/wriQPMhw1ZJall0KX6E1oxk29XMDeilW5qTIg5aoiqf5Udy8U/51aNoQQ==" + }, + "node_modules/@types/unist": { + "version": "2.0.8", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.8.tgz", + "integrity": "sha512-d0XxK3YTObnWVp6rZuev3c49+j4Lo8g4L1ZRm9z5L0xpoZycUPshHgczK5gsUMaZOstjVYYi09p5gYvUtfChYw==" + }, + "node_modules/@types/ws": { + "version": "8.5.7", + "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.7.tgz", + "integrity": "sha512-6UrLjiDUvn40CMrAubXuIVtj2PEfKDffJS7ychvnPU44j+KVeXmdHHTgqcM/dxLUTHxlXHiFM8Skmb8ozGdTnQ==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/yargs": { + "version": "17.0.28", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.28.tgz", + "integrity": "sha512-N3e3fkS86hNhtk6BEnc0rj3zcehaxx8QWhCROJkqpl5Zaoi7nAic3jH8q94jVD3zu5LGk+PUB6KAiDmimYOEQw==", + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/@types/yargs-parser": { + "version": "21.0.1", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.1.tgz", + "integrity": "sha512-axdPBuLuEJt0c4yI5OZssC19K2Mq1uKdrfZBzuxLvaztgqUtFYZUNw7lETExPYJR9jdEoIg4mb7RQKRQzOkeGQ==" + }, + "node_modules/@webassemblyjs/ast": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.11.6.tgz", + "integrity": "sha512-IN1xI7PwOvLPgjcf180gC1bqn3q/QaOCwYUahIOhbYUu8KA/3tw2RT/T0Gidi1l7Hhj5D/INhJxiICObqpMu4Q==", + "dependencies": { + "@webassemblyjs/helper-numbers": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6" + } + }, + "node_modules/@webassemblyjs/floating-point-hex-parser": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.6.tgz", + "integrity": "sha512-ejAj9hfRJ2XMsNHk/v6Fu2dGS+i4UaXBXGemOfQ/JfQ6mdQg/WXtwleQRLLS4OvfDhv8rYnVwH27YJLMyYsxhw==" + }, + "node_modules/@webassemblyjs/helper-api-error": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.6.tgz", + "integrity": "sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q==" + }, + "node_modules/@webassemblyjs/helper-buffer": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.11.6.tgz", + "integrity": "sha512-z3nFzdcp1mb8nEOFFk8DrYLpHvhKC3grJD2ardfKOzmbmJvEf/tPIqCY+sNcwZIY8ZD7IkB2l7/pqhUhqm7hLA==" + }, + "node_modules/@webassemblyjs/helper-numbers": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.6.tgz", + "integrity": "sha512-vUIhZ8LZoIWHBohiEObxVm6hwP034jwmc9kuq5GdHZH0wiLVLIPcMCdpJzG4C11cHoQ25TFIQj9kaVADVX7N3g==", + "dependencies": { + "@webassemblyjs/floating-point-hex-parser": "1.11.6", + "@webassemblyjs/helper-api-error": "1.11.6", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/helper-wasm-bytecode": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.6.tgz", + "integrity": "sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA==" + }, + "node_modules/@webassemblyjs/helper-wasm-section": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.11.6.tgz", + "integrity": "sha512-LPpZbSOwTpEC2cgn4hTydySy1Ke+XEu+ETXuoyvuyezHO3Kjdu90KK95Sh9xTbmjrCsUwvWwCOQQNta37VrS9g==", + "dependencies": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-buffer": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/wasm-gen": "1.11.6" + } + }, + "node_modules/@webassemblyjs/ieee754": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.6.tgz", + "integrity": "sha512-LM4p2csPNvbij6U1f19v6WR56QZ8JcHg3QIJTlSwzFcmx6WSORicYj6I63f9yU1kEUtrpG+kjkiIAkevHpDXrg==", + "dependencies": { + "@xtuc/ieee754": "^1.2.0" + } + }, + "node_modules/@webassemblyjs/leb128": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.6.tgz", + "integrity": "sha512-m7a0FhE67DQXgouf1tbN5XQcdWoNgaAuoULHIfGFIEVKA6tu/edls6XnIlkmS6FrXAquJRPni3ZZKjw6FSPjPQ==", + "dependencies": { + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/utf8": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.6.tgz", + "integrity": "sha512-vtXf2wTQ3+up9Zsg8sa2yWiQpzSsMyXj0qViVP6xKGCUT8p8YJ6HqI7l5eCnWx1T/FYdsv07HQs2wTFbbof/RA==" + }, + "node_modules/@webassemblyjs/wasm-edit": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.11.6.tgz", + "integrity": "sha512-Ybn2I6fnfIGuCR+Faaz7YcvtBKxvoLV3Lebn1tM4o/IAJzmi9AWYIPWpyBfU8cC+JxAO57bk4+zdsTjJR+VTOw==", + "dependencies": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-buffer": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/helper-wasm-section": "1.11.6", + "@webassemblyjs/wasm-gen": "1.11.6", + "@webassemblyjs/wasm-opt": "1.11.6", + "@webassemblyjs/wasm-parser": "1.11.6", + "@webassemblyjs/wast-printer": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wasm-gen": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.11.6.tgz", + "integrity": "sha512-3XOqkZP/y6B4F0PBAXvI1/bky7GryoogUtfwExeP/v7Nzwo1QLcq5oQmpKlftZLbT+ERUOAZVQjuNVak6UXjPA==", + "dependencies": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/ieee754": "1.11.6", + "@webassemblyjs/leb128": "1.11.6", + "@webassemblyjs/utf8": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wasm-opt": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.11.6.tgz", + "integrity": "sha512-cOrKuLRE7PCe6AsOVl7WasYf3wbSo4CeOk6PkrjS7g57MFfVUF9u6ysQBBODX0LdgSvQqRiGz3CXvIDKcPNy4g==", + "dependencies": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-buffer": "1.11.6", + "@webassemblyjs/wasm-gen": "1.11.6", + "@webassemblyjs/wasm-parser": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wasm-parser": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.11.6.tgz", + "integrity": "sha512-6ZwPeGzMJM3Dqp3hCsLgESxBGtT/OeCvCZ4TA1JUPYgmhAx38tTPR9JaKy0S5H3evQpO/h2uWs2j6Yc/fjkpTQ==", + "dependencies": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-api-error": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/ieee754": "1.11.6", + "@webassemblyjs/leb128": "1.11.6", + "@webassemblyjs/utf8": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wast-printer": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.11.6.tgz", + "integrity": "sha512-JM7AhRcE+yW2GWYaKeHL5vt4xqee5N2WcezptmgyhNS+ScggqcT1OtXykhAb13Sn5Yas0j2uv9tHgrjwvzAP4A==", + "dependencies": { + "@webassemblyjs/ast": "1.11.6", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@xtuc/ieee754": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", + "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==" + }, + "node_modules/@xtuc/long": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", + "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==" + }, + "node_modules/accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/accepts/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/accepts/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/acorn": { + "version": "8.10.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.10.0.tgz", + "integrity": "sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-import-assertions": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/acorn-import-assertions/-/acorn-import-assertions-1.9.0.tgz", + "integrity": "sha512-cmMwop9x+8KFhxvKrKfPYmN6/pKTYYHBqLa0DfvVZcKMJWNyWLnaqND7dx/qn66R7ewM1UX5XMaDVP5wlVTaVA==", + "peerDependencies": { + "acorn": "^8" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/acorn-walk": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz", + "integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/address": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/address/-/address-1.2.2.tgz", + "integrity": "sha512-4B/qKCfeE/ODUaAUpSwfzazo5x29WD4r3vXiWsB7I2mSDAihwEqKO+g8GELZUQSSAo5e1XTYh3ZVfLyxBc12nA==", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/aggregate-error": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", + "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", + "dependencies": { + "clean-stack": "^2.0.0", + "indent-string": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ajv": { + "version": "8.12.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", + "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ajv-formats": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", + "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", + "dependencies": { + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "node_modules/ajv-keywords": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "dependencies": { + "fast-deep-equal": "^3.1.3" + }, + "peerDependencies": { + "ajv": "^8.8.2" + } + }, + "node_modules/algoliasearch": { + "version": "4.20.0", + "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.20.0.tgz", + "integrity": "sha512-y+UHEjnOItoNy0bYO+WWmLWBlPwDjKHW6mNHrPi0NkuhpQOOEbrkwQH/wgKFDLh7qlKjzoKeiRtlpewDPDG23g==", + "dependencies": { + "@algolia/cache-browser-local-storage": "4.20.0", + "@algolia/cache-common": "4.20.0", + "@algolia/cache-in-memory": "4.20.0", + "@algolia/client-account": "4.20.0", + "@algolia/client-analytics": "4.20.0", + "@algolia/client-common": "4.20.0", + "@algolia/client-personalization": "4.20.0", + "@algolia/client-search": "4.20.0", + "@algolia/logger-common": "4.20.0", + "@algolia/logger-console": "4.20.0", + "@algolia/requester-browser-xhr": "4.20.0", + "@algolia/requester-common": "4.20.0", + "@algolia/requester-node-http": "4.20.0", + "@algolia/transporter": "4.20.0" + } + }, + "node_modules/algoliasearch-helper": { + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.14.2.tgz", + "integrity": "sha512-FjDSrjvQvJT/SKMW74nPgFpsoPUwZCzGbCqbp8HhBFfSk/OvNFxzCaCmuO0p7AWeLy1gD+muFwQEkBwcl5H4pg==", + "dependencies": { + "@algolia/events": "^4.0.1" + }, + "peerDependencies": { + "algoliasearch": ">= 3.1 < 6" + } + }, + "node_modules/ansi-align": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz", + "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==", + "dependencies": { + "string-width": "^4.1.0" + } + }, + "node_modules/ansi-align/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/ansi-align/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-html-community": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/ansi-html-community/-/ansi-html-community-0.0.8.tgz", + "integrity": "sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw==", + "engines": [ + "node >= 0.8.0" + ], + "bin": { + "ansi-html": "bin/ansi-html" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==" + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" + }, + "node_modules/array-flatten": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-2.1.2.tgz", + "integrity": "sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ==" + }, + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/asap": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/asap/-/asap-2.0.6.tgz", + "integrity": "sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA==" + }, + "node_modules/astring": { + "version": "1.8.6", + "resolved": "https://registry.npmjs.org/astring/-/astring-1.8.6.tgz", + "integrity": "sha512-ISvCdHdlTDlH5IpxQJIex7BWBywFWgjJSVdwst+/iQCoEYnyOaQ95+X1JGshuBjGp6nxKUy1jMgE3zPqN7fQdg==", + "bin": { + "astring": "bin/astring" + } + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" + }, + "node_modules/at-least-node": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", + "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==", + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/autoprefixer": { + "version": "10.4.16", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.16.tgz", + "integrity": "sha512-7vd3UC6xKp0HLfua5IjZlcXvGAGy7cBAXTg2lyQ/8WpNhd6SiZ8Be+xm3FyBSYJx5GKcpRCzBh7RH4/0dnY+uQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/autoprefixer" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "browserslist": "^4.21.10", + "caniuse-lite": "^1.0.30001538", + "fraction.js": "^4.3.6", + "normalize-range": "^0.1.2", + "picocolors": "^1.0.0", + "postcss-value-parser": "^4.2.0" + }, + "bin": { + "autoprefixer": "bin/autoprefixer" + }, + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/axios": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/axios/-/axios-0.27.2.tgz", + "integrity": "sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==", + "dependencies": { + "follow-redirects": "^1.14.9", + "form-data": "^4.0.0" + } + }, + "node_modules/babel-loader": { + "version": "9.1.3", + "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-9.1.3.tgz", + "integrity": "sha512-xG3ST4DglodGf8qSwv0MdeWLhrDsw/32QMdTO5T1ZIp9gQur0HkCyFs7Awskr10JKXFXwpAhiCuYX5oGXnRGbw==", + "dependencies": { + "find-cache-dir": "^4.0.0", + "schema-utils": "^4.0.0" + }, + "engines": { + "node": ">= 14.15.0" + }, + "peerDependencies": { + "@babel/core": "^7.12.0", + "webpack": ">=5" + } + }, + "node_modules/babel-plugin-dynamic-import-node": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", + "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", + "dependencies": { + "object.assign": "^4.1.0" + } + }, + "node_modules/babel-plugin-polyfill-corejs2": { + "version": "0.4.6", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.6.tgz", + "integrity": "sha512-jhHiWVZIlnPbEUKSSNb9YoWcQGdlTLq7z1GHL4AjFxaoOUMuuEVJ+Y4pAaQUGOGk93YsVCKPbqbfw3m0SM6H8Q==", + "dependencies": { + "@babel/compat-data": "^7.22.6", + "@babel/helper-define-polyfill-provider": "^0.4.3", + "semver": "^6.3.1" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-plugin-polyfill-corejs2/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/babel-plugin-polyfill-corejs3": { + "version": "0.8.5", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.8.5.tgz", + "integrity": "sha512-Q6CdATeAvbScWPNLB8lzSO7fgUVBkQt6zLgNlfyeCr/EQaEQR+bWiBYYPYAFyE528BMjRhL+1QBMOI4jc/c5TA==", + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.4.3", + "core-js-compat": "^3.32.2" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-plugin-polyfill-regenerator": { + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.5.3.tgz", + "integrity": "sha512-8sHeDOmXC8csczMrYEOf0UTNa4yE2SxV5JGeT/LP1n0OYVDUUFPxG9vdk2AlDlIit4t+Kf0xCtpgXPBwnn/9pw==", + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.4.3" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/bail": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", + "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" + }, + "node_modules/base16": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/base16/-/base16-1.0.0.tgz", + "integrity": "sha512-pNdYkNPiJUnEhnfXV56+sQy8+AaPcG3POZAUnwr4EeqCUZFz4u2PePbo3e5Gj4ziYPCWGUZT9RHisvJKnwFuBQ==" + }, + "node_modules/batch": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz", + "integrity": "sha512-x+VAiMRL6UPkx+kudNvxTl6hB2XNNCG2r+7wixVfIYwu/2HKRXimwQyaumLjMveWvT2Hkd/cAJw+QBMfJ/EKVw==" + }, + "node_modules/big.js": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz", + "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==", + "engines": { + "node": "*" + } + }, + "node_modules/binary-extensions": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", + "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", + "engines": { + "node": ">=8" + } + }, + "node_modules/body-parser": { + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.1.tgz", + "integrity": "sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==", + "dependencies": { + "bytes": "3.1.2", + "content-type": "~1.0.4", + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "on-finished": "2.4.1", + "qs": "6.11.0", + "raw-body": "2.5.1", + "type-is": "~1.6.18", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/body-parser/node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/body-parser/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/body-parser/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/bonjour-service": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/bonjour-service/-/bonjour-service-1.1.1.tgz", + "integrity": "sha512-Z/5lQRMOG9k7W+FkeGTNjh7htqn/2LMnfOvBZ8pynNZCM9MwkQkI3zeI4oz09uWdcgmgHugVvBqxGg4VQJ5PCg==", + "dependencies": { + "array-flatten": "^2.1.2", + "dns-equal": "^1.0.0", + "fast-deep-equal": "^3.1.3", + "multicast-dns": "^7.2.5" + } + }, + "node_modules/boolbase": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", + "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==" + }, + "node_modules/boxen": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/boxen/-/boxen-6.2.1.tgz", + "integrity": "sha512-H4PEsJXfFI/Pt8sjDWbHlQPx4zL/bvSQjcilJmaulGt5mLDorHOHpmdXAJcBcmru7PhYSp/cDMWRko4ZUMFkSw==", + "dependencies": { + "ansi-align": "^3.0.1", + "camelcase": "^6.2.0", + "chalk": "^4.1.2", + "cli-boxes": "^3.0.0", + "string-width": "^5.0.1", + "type-fest": "^2.5.0", + "widest-line": "^4.0.1", + "wrap-ansi": "^8.0.1" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dependencies": { + "fill-range": "^7.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.22.1.tgz", + "integrity": "sha512-FEVc202+2iuClEhZhrWy6ZiAcRLvNMyYcxZ8raemul1DYVOVdFsbqckWLdsixQZCpJlwe77Z3UTalE7jsjnKfQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "caniuse-lite": "^1.0.30001541", + "electron-to-chromium": "^1.4.535", + "node-releases": "^2.0.13", + "update-browserslist-db": "^1.0.13" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==" + }, + "node_modules/bytes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", + "integrity": "sha512-pMhOfFDPiv9t5jjIXkHosWmkSyQbvsgEVNkz0ERHbuLh2T/7j4Mqqpz523Fe8MVY89KC6Sh/QfS2sM+SjgFDcw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/cacheable-lookup": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/cacheable-lookup/-/cacheable-lookup-7.0.0.tgz", + "integrity": "sha512-+qJyx4xiKra8mZrcwhjMRMUhD5NR1R8esPkzIYxX96JiecFoxAXFuz/GpR3+ev4PE1WamHip78wV0vcmPQtp8w==", + "engines": { + "node": ">=14.16" + } + }, + "node_modules/cacheable-request": { + "version": "10.2.14", + "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-10.2.14.tgz", + "integrity": "sha512-zkDT5WAF4hSSoUgyfg5tFIxz8XQK+25W/TLVojJTMKBaxevLBBtLxgqguAuVQB8PVW79FVjHcU+GJ9tVbDZ9mQ==", + "dependencies": { + "@types/http-cache-semantics": "^4.0.2", + "get-stream": "^6.0.1", + "http-cache-semantics": "^4.1.1", + "keyv": "^4.5.3", + "mimic-response": "^4.0.0", + "normalize-url": "^8.0.0", + "responselike": "^3.0.0" + }, + "engines": { + "node": ">=14.16" + } + }, + "node_modules/cacheable-request/node_modules/normalize-url": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-8.0.0.tgz", + "integrity": "sha512-uVFpKhj5MheNBJRTiMZ9pE/7hD1QTeEvugSJW/OmLzAp78PB5O6adfMNTvmfKhXBkvCzC+rqifWcVYpGFwTjnw==", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/call-bind": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", + "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "dependencies": { + "function-bind": "^1.1.1", + "get-intrinsic": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/camel-case": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/camel-case/-/camel-case-4.1.2.tgz", + "integrity": "sha512-gxGWBrTT1JuMx6R+o5PTXMmUnhnVzLQ9SNutD4YqKtI6ap897t3tKECYla6gCWEkplXnlNybEkZg9GEGxKFCgw==", + "dependencies": { + "pascal-case": "^3.1.2", + "tslib": "^2.0.3" + } + }, + "node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/caniuse-api": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz", + "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==", + "dependencies": { + "browserslist": "^4.0.0", + "caniuse-lite": "^1.0.0", + "lodash.memoize": "^4.1.2", + "lodash.uniq": "^4.5.0" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001547", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001547.tgz", + "integrity": "sha512-W7CrtIModMAxobGhz8iXmDfuJiiKg1WADMO/9x7/CLNin5cpSbuBjooyoIUVB5eyCc36QuTVlkVa1iB2S5+/eA==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ] + }, + "node_modules/ccount": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz", + "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/character-entities": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", + "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-html4": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz", + "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-legacy": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz", + "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-reference-invalid": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz", + "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/cheerio": { + "version": "1.0.0-rc.12", + "resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.12.tgz", + "integrity": "sha512-VqR8m68vM46BNnuZ5NtnGBKIE/DfN0cRIzg9n40EIq9NOv90ayxLBXA8fXC5gquFRGJSTRqBq25Jt2ECLR431Q==", + "dependencies": { + "cheerio-select": "^2.1.0", + "dom-serializer": "^2.0.0", + "domhandler": "^5.0.3", + "domutils": "^3.0.1", + "htmlparser2": "^8.0.1", + "parse5": "^7.0.0", + "parse5-htmlparser2-tree-adapter": "^7.0.0" + }, + "engines": { + "node": ">= 6" + }, + "funding": { + "url": "https://github.com/cheeriojs/cheerio?sponsor=1" + } + }, + "node_modules/cheerio-select": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/cheerio-select/-/cheerio-select-2.1.0.tgz", + "integrity": "sha512-9v9kG0LvzrlcungtnJtpGNxY+fzECQKhK4EGJX2vByejiMX84MFNQw4UxPJl3bFbTMw+Dfs37XaIkCwTZfLh4g==", + "dependencies": { + "boolbase": "^1.0.0", + "css-select": "^5.1.0", + "css-what": "^6.1.0", + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3", + "domutils": "^3.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/chokidar": { + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", + "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ], + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chrome-trace-event": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz", + "integrity": "sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==", + "engines": { + "node": ">=6.0" + } + }, + "node_modules/ci-info": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "engines": { + "node": ">=8" + } + }, + "node_modules/clean-css": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/clean-css/-/clean-css-5.3.2.tgz", + "integrity": "sha512-JVJbM+f3d3Q704rF4bqQ5UUyTtuJ0JRKNbTKVEeujCCBoMdkEi+V+e8oktO9qGQNSvHrFTM6JZRXrUvGR1czww==", + "dependencies": { + "source-map": "~0.6.0" + }, + "engines": { + "node": ">= 10.0" + } + }, + "node_modules/clean-stack": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", + "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", + "engines": { + "node": ">=6" + } + }, + "node_modules/cli-boxes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz", + "integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-table3": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.3.tgz", + "integrity": "sha512-w5Jac5SykAeZJKntOxJCrm63Eg5/4dhMWIcuTbo9rpE+brgaSZo0RuNJZeOyMgsUdhDeojvgyQLmjI+K50ZGyg==", + "dependencies": { + "string-width": "^4.2.0" + }, + "engines": { + "node": "10.* || >= 12.*" + }, + "optionalDependencies": { + "@colors/colors": "1.5.0" + } + }, + "node_modules/cli-table3/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/cli-table3/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/clone-deep": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/clone-deep/-/clone-deep-4.0.1.tgz", + "integrity": "sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==", + "dependencies": { + "is-plain-object": "^2.0.4", + "kind-of": "^6.0.2", + "shallow-clone": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/clone-deep/node_modules/is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "dependencies": { + "isobject": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/clsx": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-1.2.1.tgz", + "integrity": "sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/colord": { + "version": "2.9.3", + "resolved": "https://registry.npmjs.org/colord/-/colord-2.9.3.tgz", + "integrity": "sha512-jeC1axXpnb0/2nn/Y1LPuLdgXBLH7aDcHu4KEKfqw3CUhX7ZpfBSlPKyqXE6btIgEzfWtrX3/tyBCaCvXvMkOw==" + }, + "node_modules/colorette": { + "version": "2.0.20", + "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz", + "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==" + }, + "node_modules/combine-promises": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/combine-promises/-/combine-promises-1.2.0.tgz", + "integrity": "sha512-VcQB1ziGD0NXrhKxiwyNbCDmRzs/OShMs2GqW2DlU2A/Sd0nQxE1oWDAE5O0ygSx5mgQOn9eIFh7yKPgFRVkPQ==", + "engines": { + "node": ">=10" + } + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/comma-separated-tokens": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", + "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/commander": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz", + "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==", + "engines": { + "node": ">= 6" + } + }, + "node_modules/common-path-prefix": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/common-path-prefix/-/common-path-prefix-3.0.0.tgz", + "integrity": "sha512-QE33hToZseCH3jS0qN96O/bSh3kaw/h+Tq7ngyY9eWDUnTlTNUyqfqvCXioLe5Na5jFsL78ra/wuBU4iuEgd4w==" + }, + "node_modules/compressible": { + "version": "2.0.18", + "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", + "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", + "dependencies": { + "mime-db": ">= 1.43.0 < 2" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/compressible/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/compression": { + "version": "1.7.4", + "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.4.tgz", + "integrity": "sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==", + "dependencies": { + "accepts": "~1.3.5", + "bytes": "3.0.0", + "compressible": "~2.0.16", + "debug": "2.6.9", + "on-headers": "~1.0.2", + "safe-buffer": "5.1.2", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/compression/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/compression/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/compression/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" + }, + "node_modules/config-chain": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.13.tgz", + "integrity": "sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==", + "dependencies": { + "ini": "^1.3.4", + "proto-list": "~1.2.1" + } + }, + "node_modules/configstore": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/configstore/-/configstore-6.0.0.tgz", + "integrity": "sha512-cD31W1v3GqUlQvbBCGcXmd2Nj9SvLDOP1oQ0YFuLETufzSPaKp11rYBsSOm7rCsW3OnIRAFM3OxRhceaXNYHkA==", + "dependencies": { + "dot-prop": "^6.0.1", + "graceful-fs": "^4.2.6", + "unique-string": "^3.0.0", + "write-file-atomic": "^3.0.3", + "xdg-basedir": "^5.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/yeoman/configstore?sponsor=1" + } + }, + "node_modules/connect-history-api-fallback": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-2.0.0.tgz", + "integrity": "sha512-U73+6lQFmfiNPrYbXqr6kZ1i1wiRqXnp2nhMsINseWXO8lDau0LGEffJ8kQi4EjLZympVgRdvqjAgiZ1tgzDDA==", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/consola": { + "version": "2.15.3", + "resolved": "https://registry.npmjs.org/consola/-/consola-2.15.3.tgz", + "integrity": "sha512-9vAdYbHj6x2fLKC4+oPH0kFzY/orMZyG2Aj+kNylHxKGJ/Ed4dpNyAQYwJOdqO4zdM7XpVHmyejQDcQHrnuXbw==" + }, + "node_modules/content-disposition": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz", + "integrity": "sha512-kRGRZw3bLlFISDBgwTSA1TMBFN6J6GWDeubmDE3AF+3+yXL8hTWv8r5rkLbqYXY4RjPk/EzHnClI3zQf1cFmHA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==" + }, + "node_modules/cookie": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz", + "integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-signature": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", + "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==" + }, + "node_modules/copy-text-to-clipboard": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/copy-text-to-clipboard/-/copy-text-to-clipboard-3.2.0.tgz", + "integrity": "sha512-RnJFp1XR/LOBDckxTib5Qjr/PMfkatD0MUCQgdpqS8MdKiNUzBjAQBEN6oUy+jW7LI93BBG3DtMB2KOOKpGs2Q==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/copy-webpack-plugin": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-11.0.0.tgz", + "integrity": "sha512-fX2MWpamkW0hZxMEg0+mYnA40LTosOSa5TqZ9GYIBzyJa9C3QUaMPSE2xAi/buNr8u89SfD9wHSQVBzrRa/SOQ==", + "dependencies": { + "fast-glob": "^3.2.11", + "glob-parent": "^6.0.1", + "globby": "^13.1.1", + "normalize-path": "^3.0.0", + "schema-utils": "^4.0.0", + "serialize-javascript": "^6.0.0" + }, + "engines": { + "node": ">= 14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.1.0" + } + }, + "node_modules/copy-webpack-plugin/node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/copy-webpack-plugin/node_modules/globby": { + "version": "13.2.2", + "resolved": "https://registry.npmjs.org/globby/-/globby-13.2.2.tgz", + "integrity": "sha512-Y1zNGV+pzQdh7H39l9zgB4PJqjRNqydvdYCDG4HFXM4XuvSaQQlEc91IU1yALL8gUTDomgBAfz3XJdmUS+oo0w==", + "dependencies": { + "dir-glob": "^3.0.1", + "fast-glob": "^3.3.0", + "ignore": "^5.2.4", + "merge2": "^1.4.1", + "slash": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/copy-webpack-plugin/node_modules/slash": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", + "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/core-js": { + "version": "3.33.0", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.33.0.tgz", + "integrity": "sha512-HoZr92+ZjFEKar5HS6MC776gYslNOKHt75mEBKWKnPeFDpZ6nH5OeF3S6HFT1mUAUZKrzkez05VboaX8myjSuw==", + "hasInstallScript": true, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-js-compat": { + "version": "3.33.0", + "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.33.0.tgz", + "integrity": "sha512-0w4LcLXsVEuNkIqwjjf9rjCoPhK8uqA4tMRh4Ge26vfLtUutshn+aRJU21I9LCJlh2QQHfisNToLjw1XEJLTWw==", + "dependencies": { + "browserslist": "^4.22.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-js-pure": { + "version": "3.33.0", + "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.33.0.tgz", + "integrity": "sha512-FKSIDtJnds/YFIEaZ4HszRX7hkxGpNKM7FC9aJ9WLJbSd3lD4vOltFuVIBLR8asSx9frkTSqL0dw90SKQxgKrg==", + "hasInstallScript": true, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-util-is": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" + }, + "node_modules/cosmiconfig": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.1.0.tgz", + "integrity": "sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA==", + "dependencies": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.2.1", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.10.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/cross-fetch": { + "version": "3.1.8", + "resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-3.1.8.tgz", + "integrity": "sha512-cvA+JwZoU0Xq+h6WkMvAUqPEYy92Obet6UdKLfW60qn99ftItKjB5T+BkyWOFWe2pUyfQ+IJHmpOTznqk1M6Kg==", + "dependencies": { + "node-fetch": "^2.6.12" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/crypto-random-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-4.0.0.tgz", + "integrity": "sha512-x8dy3RnvYdlUcPOjkEHqozhiwzKNSq7GcPuXFbnyMOCHxX8V3OgIg/pYuabl2sbUPfIJaeAQB7PMOK8DFIdoRA==", + "dependencies": { + "type-fest": "^1.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/crypto-random-string/node_modules/type-fest": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-1.4.0.tgz", + "integrity": "sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/css-declaration-sorter": { + "version": "6.4.1", + "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-6.4.1.tgz", + "integrity": "sha512-rtdthzxKuyq6IzqX6jEcIzQF/YqccluefyCYheovBOLhFT/drQA9zj/UbRAa9J7C0o6EG6u3E6g+vKkay7/k3g==", + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.0.9" + } + }, + "node_modules/css-loader": { + "version": "6.8.1", + "resolved": "https://registry.npmjs.org/css-loader/-/css-loader-6.8.1.tgz", + "integrity": "sha512-xDAXtEVGlD0gJ07iclwWVkLoZOpEvAWaSyf6W18S2pOC//K8+qUDIx8IIT3D+HjnmkJPQeesOPv5aiUaJsCM2g==", + "dependencies": { + "icss-utils": "^5.1.0", + "postcss": "^8.4.21", + "postcss-modules-extract-imports": "^3.0.0", + "postcss-modules-local-by-default": "^4.0.3", + "postcss-modules-scope": "^3.0.0", + "postcss-modules-values": "^4.0.0", + "postcss-value-parser": "^4.2.0", + "semver": "^7.3.8" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.0.0" + } + }, + "node_modules/css-minimizer-webpack-plugin": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-4.2.2.tgz", + "integrity": "sha512-s3Of/4jKfw1Hj9CxEO1E5oXhQAxlayuHO2y/ML+C6I9sQ7FdzfEV6QgMLN3vI+qFsjJGIAFLKtQK7t8BOXAIyA==", + "dependencies": { + "cssnano": "^5.1.8", + "jest-worker": "^29.1.2", + "postcss": "^8.4.17", + "schema-utils": "^4.0.0", + "serialize-javascript": "^6.0.0", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">= 14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.0.0" + }, + "peerDependenciesMeta": { + "@parcel/css": { + "optional": true + }, + "@swc/css": { + "optional": true + }, + "clean-css": { + "optional": true + }, + "csso": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "lightningcss": { + "optional": true + } + } + }, + "node_modules/css-select": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/css-select/-/css-select-5.1.0.tgz", + "integrity": "sha512-nwoRF1rvRRnnCqqY7updORDsuqKzqYJ28+oSMaJMMgOauh3fvwHqMS7EZpIPqK8GL+g9mKxF1vP/ZjSeNjEVHg==", + "dependencies": { + "boolbase": "^1.0.0", + "css-what": "^6.1.0", + "domhandler": "^5.0.2", + "domutils": "^3.0.1", + "nth-check": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/css-tree": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.1.3.tgz", + "integrity": "sha512-tRpdppF7TRazZrjJ6v3stzv93qxRcSsFmW6cX0Zm2NVKpxE1WV1HblnghVv9TreireHkqI/VDEsfolRF1p6y7Q==", + "dependencies": { + "mdn-data": "2.0.14", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/css-what": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/css-what/-/css-what-6.1.0.tgz", + "integrity": "sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==", + "engines": { + "node": ">= 6" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/cssnano": { + "version": "5.1.15", + "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-5.1.15.tgz", + "integrity": "sha512-j+BKgDcLDQA+eDifLx0EO4XSA56b7uut3BQFH+wbSaSTuGLuiyTa/wbRYthUXX8LC9mLg+WWKe8h+qJuwTAbHw==", + "dependencies": { + "cssnano-preset-default": "^5.2.14", + "lilconfig": "^2.0.3", + "yaml": "^1.10.2" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/cssnano" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/cssnano-preset-advanced": { + "version": "5.3.10", + "resolved": "https://registry.npmjs.org/cssnano-preset-advanced/-/cssnano-preset-advanced-5.3.10.tgz", + "integrity": "sha512-fnYJyCS9jgMU+cmHO1rPSPf9axbQyD7iUhLO5Df6O4G+fKIOMps+ZbU0PdGFejFBBZ3Pftf18fn1eG7MAPUSWQ==", + "dependencies": { + "autoprefixer": "^10.4.12", + "cssnano-preset-default": "^5.2.14", + "postcss-discard-unused": "^5.1.0", + "postcss-merge-idents": "^5.1.1", + "postcss-reduce-idents": "^5.2.0", + "postcss-zindex": "^5.1.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/cssnano-preset-default": { + "version": "5.2.14", + "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-5.2.14.tgz", + "integrity": "sha512-t0SFesj/ZV2OTylqQVOrFgEh5uanxbO6ZAdeCrNsUQ6fVuXwYTxJPNAGvGTxHbD68ldIJNec7PyYZDBrfDQ+6A==", + "dependencies": { + "css-declaration-sorter": "^6.3.1", + "cssnano-utils": "^3.1.0", + "postcss-calc": "^8.2.3", + "postcss-colormin": "^5.3.1", + "postcss-convert-values": "^5.1.3", + "postcss-discard-comments": "^5.1.2", + "postcss-discard-duplicates": "^5.1.0", + "postcss-discard-empty": "^5.1.1", + "postcss-discard-overridden": "^5.1.0", + "postcss-merge-longhand": "^5.1.7", + "postcss-merge-rules": "^5.1.4", + "postcss-minify-font-values": "^5.1.0", + "postcss-minify-gradients": "^5.1.1", + "postcss-minify-params": "^5.1.4", + "postcss-minify-selectors": "^5.2.1", + "postcss-normalize-charset": "^5.1.0", + "postcss-normalize-display-values": "^5.1.0", + "postcss-normalize-positions": "^5.1.1", + "postcss-normalize-repeat-style": "^5.1.1", + "postcss-normalize-string": "^5.1.0", + "postcss-normalize-timing-functions": "^5.1.0", + "postcss-normalize-unicode": "^5.1.1", + "postcss-normalize-url": "^5.1.0", + "postcss-normalize-whitespace": "^5.1.1", + "postcss-ordered-values": "^5.1.3", + "postcss-reduce-initial": "^5.1.2", + "postcss-reduce-transforms": "^5.1.0", + "postcss-svgo": "^5.1.0", + "postcss-unique-selectors": "^5.1.1" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/cssnano-utils": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/cssnano-utils/-/cssnano-utils-3.1.0.tgz", + "integrity": "sha512-JQNR19/YZhz4psLX/rQ9M83e3z2Wf/HdJbryzte4a3NSuafyp9w/I4U+hx5C2S9g41qlstH7DEWnZaaj83OuEA==", + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/csso": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/csso/-/csso-4.2.0.tgz", + "integrity": "sha512-wvlcdIbf6pwKEk7vHj8/Bkc0B4ylXZruLvOgs9doS5eOsOpuodOV2zJChSpkp+pRpYQLQMeF04nr3Z68Sta9jA==", + "dependencies": { + "css-tree": "^1.1.2" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/csstype": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.2.tgz", + "integrity": "sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==" + }, + "node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decode-named-character-reference": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.0.2.tgz", + "integrity": "sha512-O8x12RzrUF8xyVcY0KJowWsmaJxQbmy0/EtnNtHRpsOcT7dFk5W598coHqBVpmWo1oQQfsCqfCmkZN5DJrZVdg==", + "dependencies": { + "character-entities": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/decompress-response": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", + "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", + "dependencies": { + "mimic-response": "^3.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/decompress-response/node_modules/mimic-response": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", + "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/deep-extend": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/default-gateway": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-6.0.3.tgz", + "integrity": "sha512-fwSOJsbbNzZ/CUFpqFBqYfYNLj1NbMPm8MMCIzHjC83iSJRBEGmDUxU+WP661BaBQImeC2yHwXtz+P/O9o+XEg==", + "dependencies": { + "execa": "^5.0.0" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/defer-to-connect": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-2.0.1.tgz", + "integrity": "sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==", + "engines": { + "node": ">=10" + } + }, + "node_modules/define-data-property": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.0.tgz", + "integrity": "sha512-UzGwzcjyv3OtAvolTj1GoyNYzfFR+iqbGjcnBEENZVCpM4/Ng1yhGNvS3lR/xDS74Tb2wGG9WzNSNIOS9UVb2g==", + "dependencies": { + "get-intrinsic": "^1.2.1", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/define-lazy-prop": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", + "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==", + "engines": { + "node": ">=8" + } + }, + "node_modules/define-properties": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", + "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", + "dependencies": { + "define-data-property": "^1.0.1", + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/del": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/del/-/del-6.1.1.tgz", + "integrity": "sha512-ua8BhapfP0JUJKC/zV9yHHDW/rDoDxP4Zhn3AkA6/xT6gY7jYXJiaeyBZznYVujhZZET+UgcbZiQ7sN3WqcImg==", + "dependencies": { + "globby": "^11.0.1", + "graceful-fs": "^4.2.4", + "is-glob": "^4.0.1", + "is-path-cwd": "^2.2.0", + "is-path-inside": "^3.0.2", + "p-map": "^4.0.0", + "rimraf": "^3.0.2", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/destroy": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", + "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/detect-node": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", + "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==" + }, + "node_modules/detect-port": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/detect-port/-/detect-port-1.5.1.tgz", + "integrity": "sha512-aBzdj76lueB6uUst5iAs7+0H/oOjqI5D16XUWxlWMIMROhcM0rfsNVk93zTngq1dDNpoXRr++Sus7ETAExppAQ==", + "dependencies": { + "address": "^1.0.1", + "debug": "4" + }, + "bin": { + "detect": "bin/detect-port.js", + "detect-port": "bin/detect-port.js" + } + }, + "node_modules/detect-port-alt": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/detect-port-alt/-/detect-port-alt-1.1.6.tgz", + "integrity": "sha512-5tQykt+LqfJFBEYaDITx7S7cR7mJ/zQmLXZ2qt5w04ainYZw6tBf9dBunMjVeVOdYVRUzUOE4HkY5J7+uttb5Q==", + "dependencies": { + "address": "^1.0.1", + "debug": "^2.6.0" + }, + "bin": { + "detect": "bin/detect-port", + "detect-port": "bin/detect-port" + }, + "engines": { + "node": ">= 4.2.1" + } + }, + "node_modules/detect-port-alt/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/detect-port-alt/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/diff": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/diff/-/diff-5.1.0.tgz", + "integrity": "sha512-D+mk+qE8VC/PAUrlAU34N+VfXev0ghe5ywmpqrawphmVZc1bEfn56uo9qpyGp1p4xpzOHkSW4ztBd6L7Xx4ACw==", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/dns-equal": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/dns-equal/-/dns-equal-1.0.0.tgz", + "integrity": "sha512-z+paD6YUQsk+AbGCEM4PrOXSss5gd66QfcVBFTKR/HpFL9jCqikS94HYwKww6fQyO7IxrIIyUu+g0Ka9tUS2Cg==" + }, + "node_modules/dns-packet": { + "version": "5.6.1", + "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.6.1.tgz", + "integrity": "sha512-l4gcSouhcgIKRvyy99RNVOgxXiicE+2jZoNmaNmZ6JXiGajBOJAesk1OBlJuM5k2c+eudGdLxDqXuPCKIj6kpw==", + "dependencies": { + "@leichtgewicht/ip-codec": "^2.0.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/docusaurus-plugin-sass": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/docusaurus-plugin-sass/-/docusaurus-plugin-sass-0.2.5.tgz", + "integrity": "sha512-Z+D0fLFUKcFpM+bqSUmqKIU+vO+YF1xoEQh5hoFreg2eMf722+siwXDD+sqtwU8E4MvVpuvsQfaHwODNlxJAEg==", + "dependencies": { + "sass-loader": "^10.1.1" + }, + "peerDependencies": { + "@docusaurus/core": "^2.0.0-beta || ^3.0.0-alpha", + "sass": "^1.30.0" + } + }, + "node_modules/dom-converter": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz", + "integrity": "sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==", + "dependencies": { + "utila": "~0.4" + } + }, + "node_modules/dom-serializer": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz", + "integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==", + "dependencies": { + "domelementtype": "^2.3.0", + "domhandler": "^5.0.2", + "entities": "^4.2.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/domelementtype": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz", + "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ] + }, + "node_modules/domhandler": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz", + "integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==", + "dependencies": { + "domelementtype": "^2.3.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/domutils": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.1.0.tgz", + "integrity": "sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==", + "dependencies": { + "dom-serializer": "^2.0.0", + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, + "node_modules/dot-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/dot-case/-/dot-case-3.0.4.tgz", + "integrity": "sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==", + "dependencies": { + "no-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/dot-prop": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-6.0.1.tgz", + "integrity": "sha512-tE7ztYzXHIeyvc7N+hR3oi7FIbf/NIjVP9hmAt3yMXzrQ072/fpjGLx2GxNxGxUl5V73MEqYzioOMoVhGMJ5cA==", + "dependencies": { + "is-obj": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/dot-prop/node_modules/is-obj": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", + "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==", + "engines": { + "node": ">=8" + } + }, + "node_modules/duplexer": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", + "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==" + }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==" + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" + }, + "node_modules/electron-to-chromium": { + "version": "1.4.553", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.553.tgz", + "integrity": "sha512-HiRdtyKS2+VhiXvjhMvvxiMC33FJJqTA5EB2YHgFZW6v7HkK4Q9Ahv2V7O2ZPgAjw+MyCJVMQvigj13H8t+wvA==" + }, + "node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==" + }, + "node_modules/emojis-list": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz", + "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/emoticon": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/emoticon/-/emoticon-3.2.0.tgz", + "integrity": "sha512-SNujglcLTTg+lDAcApPNgEdudaqQFiAbJCqzjNxJkvN9vAwCGi0uu8IUVvx+f16h+V44KCY6Y2yboroc9pilHg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/enhanced-resolve": { + "version": "5.15.0", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.15.0.tgz", + "integrity": "sha512-LXYT42KJ7lpIKECr2mAXIaMldcNCh/7E0KBKOu4KSfkHmP+mZmSs+8V5gBAqisWBy0OO4W5Oyys0GO1Y8KtdKg==", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/entities": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/es-module-lexer": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.3.1.tgz", + "integrity": "sha512-JUFAyicQV9mXc3YRxPnDlrfBKpqt6hUYzz9/boprUJHs4e4KVr3XwOF70doO6gwXUor6EWZJAyWAfKki84t20Q==" + }, + "node_modules/escalade": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", + "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-goat": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-4.0.0.tgz", + "integrity": "sha512-2Sd4ShcWxbx6OY1IHyla/CVNwvg7XwZVoXZHcSu9w9SReNP1EzzD5T8NWKIR38fIqEns9kDWKUQTXXAmlDrdPg==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==" + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esrecurse/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estree-util-attach-comments": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/estree-util-attach-comments/-/estree-util-attach-comments-2.1.1.tgz", + "integrity": "sha512-+5Ba/xGGS6mnwFbXIuQiDPTbuTxuMCooq3arVv7gPZtYpjp+VXH/NkHAP35OOefPhNG/UGqU3vt/LTABwcHX0w==", + "dependencies": { + "@types/estree": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-build-jsx": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/estree-util-build-jsx/-/estree-util-build-jsx-2.2.2.tgz", + "integrity": "sha512-m56vOXcOBuaF+Igpb9OPAy7f9w9OIkb5yhjsZuaPm7HoGi4oTOQi0h2+yZ+AtKklYFZ+rPC4n0wYCJCEU1ONqg==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "estree-util-is-identifier-name": "^2.0.0", + "estree-walker": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-is-identifier-name": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-2.1.0.tgz", + "integrity": "sha512-bEN9VHRyXAUOjkKVQVvArFym08BTWB0aJPppZZr0UNyAqWsLaVfAqP7hbaTJjzHifmB5ebnR8Wm7r7yGN/HonQ==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-to-js": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/estree-util-to-js/-/estree-util-to-js-1.2.0.tgz", + "integrity": "sha512-IzU74r1PK5IMMGZXUVZbmiu4A1uhiPgW5hm1GjcOfr4ZzHaMPpLNJjR7HjXiIOzi25nZDrgFTobHTkV5Q6ITjA==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "astring": "^1.8.0", + "source-map": "^0.7.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-to-js/node_modules/source-map": { + "version": "0.7.4", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.4.tgz", + "integrity": "sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/estree-util-value-to-estree": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/estree-util-value-to-estree/-/estree-util-value-to-estree-2.1.0.tgz", + "integrity": "sha512-fcAWmZilY1+tEt7GSeLZoHDvp2NNgLkJznBRYkEpaholas41d+Y0zd/Acch7+qzZdxLtxLi+m04KjHFJSoMa6A==", + "dependencies": { + "@types/estree": "^1.0.0", + "is-plain-obj": "^4.0.0" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/estree-util-visit": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/estree-util-visit/-/estree-util-visit-1.2.1.tgz", + "integrity": "sha512-xbgqcrkIVbIG+lI/gzbvd9SGTJL4zqJKBFttUl5pP27KhAjtMKbX/mQXJ7qgyXpMgVy/zvpm0xoQQaGL8OloOw==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/unist": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eta": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/eta/-/eta-2.2.0.tgz", + "integrity": "sha512-UVQ72Rqjy/ZKQalzV5dCCJP80GrmPrMxh6NlNf+erV6ObL0ZFkhCstWRawS85z3smdr3d2wXPsZEY7rDPfGd2g==", + "engines": { + "node": ">=6.0.0" + }, + "funding": { + "url": "https://github.com/eta-dev/eta?sponsor=1" + } + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/eval": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/eval/-/eval-0.1.8.tgz", + "integrity": "sha512-EzV94NYKoO09GLXGjXj9JIlXijVck4ONSr5wiCWDvhsvj5jxSrzTmRU/9C1DyB6uToszLs8aifA6NQ7lEQdvFw==", + "dependencies": { + "@types/node": "*", + "require-like": ">= 0.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/eventemitter3": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", + "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==" + }, + "node_modules/events": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", + "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", + "engines": { + "node": ">=0.8.x" + } + }, + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/express": { + "version": "4.18.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.18.2.tgz", + "integrity": "sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==", + "dependencies": { + "accepts": "~1.3.8", + "array-flatten": "1.1.1", + "body-parser": "1.20.1", + "content-disposition": "0.5.4", + "content-type": "~1.0.4", + "cookie": "0.5.0", + "cookie-signature": "1.0.6", + "debug": "2.6.9", + "depd": "2.0.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "finalhandler": "1.2.0", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "merge-descriptors": "1.0.1", + "methods": "~1.1.2", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "path-to-regexp": "0.1.7", + "proxy-addr": "~2.0.7", + "qs": "6.11.0", + "range-parser": "~1.2.1", + "safe-buffer": "5.2.1", + "send": "0.18.0", + "serve-static": "1.15.0", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "type-is": "~1.6.18", + "utils-merge": "1.0.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/express/node_modules/array-flatten": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", + "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==" + }, + "node_modules/express/node_modules/content-disposition": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", + "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", + "dependencies": { + "safe-buffer": "5.2.1" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/express/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/express/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/express/node_modules/path-to-regexp": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", + "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==" + }, + "node_modules/express/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" + }, + "node_modules/extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==", + "dependencies": { + "is-extendable": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" + }, + "node_modules/fast-glob": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.1.tgz", + "integrity": "sha512-kNFPyjhh5cKjrUltxs+wFx+ZkbRaxxmZ+X0ZU31SOsxCEtP9VPgtq2teZw1DebupL5GmDaNQ6yKMMVcM41iqDg==", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" + }, + "node_modules/fast-url-parser": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/fast-url-parser/-/fast-url-parser-1.1.3.tgz", + "integrity": "sha512-5jOCVXADYNuRkKFzNJ0dCCewsZiYo0dz8QNYljkOpFC6r2U4OBmKtvm/Tsuh4w1YYdDqDb31a8TVhBJ2OJKdqQ==", + "dependencies": { + "punycode": "^1.3.2" + } + }, + "node_modules/fastq": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz", + "integrity": "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/faye-websocket": { + "version": "0.11.4", + "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.4.tgz", + "integrity": "sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g==", + "dependencies": { + "websocket-driver": ">=0.5.1" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/fbemitter": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/fbemitter/-/fbemitter-3.0.0.tgz", + "integrity": "sha512-KWKaceCwKQU0+HPoop6gn4eOHk50bBv/VxjJtGMfwmJt3D29JpN4H4eisCtIPA+a8GVBam+ldMMpMjJUvpDyHw==", + "dependencies": { + "fbjs": "^3.0.0" + } + }, + "node_modules/fbjs": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/fbjs/-/fbjs-3.0.5.tgz", + "integrity": "sha512-ztsSx77JBtkuMrEypfhgc3cI0+0h+svqeie7xHbh1k/IKdcydnvadp/mUaGgjAOXQmQSxsqgaRhS3q9fy+1kxg==", + "dependencies": { + "cross-fetch": "^3.1.5", + "fbjs-css-vars": "^1.0.0", + "loose-envify": "^1.0.0", + "object-assign": "^4.1.0", + "promise": "^7.1.1", + "setimmediate": "^1.0.5", + "ua-parser-js": "^1.0.35" + } + }, + "node_modules/fbjs-css-vars": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/fbjs-css-vars/-/fbjs-css-vars-1.0.2.tgz", + "integrity": "sha512-b2XGFAFdWZWg0phtAWLHCk836A1Xann+I+Dgd3Gk64MHKZO44FfoD1KxyvbSh0qZsIoXQGGlVztIY+oitJPpRQ==" + }, + "node_modules/feed": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/feed/-/feed-4.2.2.tgz", + "integrity": "sha512-u5/sxGfiMfZNtJ3OvQpXcvotFpYkL0n9u9mM2vkui2nGo8b4wvDkJ8gAkYqbA8QpGyFCv3RK0Z+Iv+9veCS9bQ==", + "dependencies": { + "xml-js": "^1.6.11" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/file-loader": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/file-loader/-/file-loader-6.2.0.tgz", + "integrity": "sha512-qo3glqyTa61Ytg4u73GultjHGjdRyig3tG6lPtyX/jOEJvHif9uB0/OCI2Kif6ctF3caQTW2G5gym21oAsI4pw==", + "dependencies": { + "loader-utils": "^2.0.0", + "schema-utils": "^3.0.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.0.0 || ^5.0.0" + } + }, + "node_modules/file-loader/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/file-loader/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/file-loader/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/file-loader/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/filesize": { + "version": "8.0.7", + "resolved": "https://registry.npmjs.org/filesize/-/filesize-8.0.7.tgz", + "integrity": "sha512-pjmC+bkIF8XI7fWaH8KxHcZL3DPybs1roSKP4rKDvy20tAWwIObE4+JIseG2byfGKhud5ZnM4YSGKBz7Sh0ndQ==", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/finalhandler": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz", + "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==", + "dependencies": { + "debug": "2.6.9", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "statuses": "2.0.1", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/finalhandler/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/finalhandler/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/find-cache-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-4.0.0.tgz", + "integrity": "sha512-9ZonPT4ZAK4a+1pUPVPZJapbi7O5qbbJPdYw/NOQWZZbVLdDTYM3A4R9z/DpAM08IDaFGsvPgiGZ82WEwUDWjg==", + "dependencies": { + "common-path-prefix": "^3.0.0", + "pkg-dir": "^7.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/find-up": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-6.3.0.tgz", + "integrity": "sha512-v2ZsoEuVHYy8ZIlYqwPe/39Cy+cFDzp4dXPaxNvkEuouymu+2Jbz0PxpKarJHYJTmv2HWT3O382qY8l4jMWthw==", + "dependencies": { + "locate-path": "^7.1.0", + "path-exists": "^5.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/follow-redirects": { + "version": "1.15.3", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.3.tgz", + "integrity": "sha512-1VzOtuEM8pC9SFU1E+8KfTjZyMztRsgEfwQl44z8A25uy13jSzTj6dyK2Df52iV0vgHCfBwLhDWevLn95w5v6Q==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/fork-ts-checker-webpack-plugin": { + "version": "6.5.3", + "resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-6.5.3.tgz", + "integrity": "sha512-SbH/l9ikmMWycd5puHJKTkZJKddF4iRLyW3DeZ08HTI7NGyLS38MXd/KGgeWumQO7YNQbW2u/NtPT2YowbPaGQ==", + "dependencies": { + "@babel/code-frame": "^7.8.3", + "@types/json-schema": "^7.0.5", + "chalk": "^4.1.0", + "chokidar": "^3.4.2", + "cosmiconfig": "^6.0.0", + "deepmerge": "^4.2.2", + "fs-extra": "^9.0.0", + "glob": "^7.1.6", + "memfs": "^3.1.2", + "minimatch": "^3.0.4", + "schema-utils": "2.7.0", + "semver": "^7.3.2", + "tapable": "^1.0.0" + }, + "engines": { + "node": ">=10", + "yarn": ">=1.0.0" + }, + "peerDependencies": { + "eslint": ">= 6", + "typescript": ">= 2.7", + "vue-template-compiler": "*", + "webpack": ">= 4" + }, + "peerDependenciesMeta": { + "eslint": { + "optional": true + }, + "vue-template-compiler": { + "optional": true + } + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/cosmiconfig": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-6.0.0.tgz", + "integrity": "sha512-xb3ZL6+L8b9JLLCx3ZdoZy4+2ECphCMo2PwqgP1tlfVq6M6YReyzBJtvWWtbDSpNr9hn96pkCiZqUcFEc+54Qg==", + "dependencies": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.1.0", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.7.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/fs-extra": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", + "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", + "dependencies": { + "at-least-node": "^1.0.0", + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/schema-utils": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz", + "integrity": "sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==", + "dependencies": { + "@types/json-schema": "^7.0.4", + "ajv": "^6.12.2", + "ajv-keywords": "^3.4.1" + }, + "engines": { + "node": ">= 8.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/tapable": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz", + "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/form-data": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", + "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/form-data-encoder": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-2.1.4.tgz", + "integrity": "sha512-yDYSgNMraqvnxiEXO4hi88+YZxaHC6QKzb5N84iRCTDeRO7ZALpir/lVmf/uXUhnwUr2O4HU8s/n6x+yNjQkHw==", + "engines": { + "node": ">= 14.17" + } + }, + "node_modules/forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fraction.js": { + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.7.tgz", + "integrity": "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==", + "engines": { + "node": "*" + }, + "funding": { + "type": "patreon", + "url": "https://github.com/sponsors/rawify" + } + }, + "node_modules/fresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", + "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fs-extra": { + "version": "11.1.1", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.1.1.tgz", + "integrity": "sha512-MGIE4HOvQCeUCzmlHs0vXpih4ysz4wg9qiSAu6cd42lVwPbTM1TjV7RusoyQqMmk/95gdQZX72u+YW+c3eEpFQ==", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=14.14" + } + }, + "node_modules/fs-monkey": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.5.tgz", + "integrity": "sha512-8uMbBjrhzW76TYgEV27Y5E//W2f/lTFmx78P2w19FZSxarhI/798APGQyuGCwmkNxgwGRhrLfvWyLBvNtuOmew==" + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-intrinsic": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.1.tgz", + "integrity": "sha512-2DcsyfABl+gVHEfCOaTrWgyt+tb6MSEGmKq+kI5HwLbIYgjgmMcV8KQ41uaKz1xxUcn9tJtgFbQUEVcEbd0FYw==", + "dependencies": { + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-own-enumerable-property-symbols": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz", + "integrity": "sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g==" + }, + "node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/giscus": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/giscus/-/giscus-1.3.0.tgz", + "integrity": "sha512-A3tVLgSmpnh2sX9uGjo9MbzmTTEJirSyFUPRvkipvy37y9rhxUYDoh9kO37QVrP7Sc7QuJ+gihB6apkO0yDyTw==", + "dependencies": { + "lit": "^2.7.5" + } + }, + "node_modules/github-slugger": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.5.0.tgz", + "integrity": "sha512-wIh+gKBI9Nshz2o46B0B3f5k/W+WI9ZAv6y5Dn5WJ5SK1t0TnDimB4WE5rmTD05ZAIn8HALCZVmCsvj0w0v0lw==" + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/glob-to-regexp": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", + "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==" + }, + "node_modules/global-dirs": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-3.0.1.tgz", + "integrity": "sha512-NBcGGFbBA9s1VzD41QXDG+3++t9Mn5t1FpLdhESY6oKY4gYTFpX4wO3sqGUa0Srjtbfj3szX0RnemmrVRUdULA==", + "dependencies": { + "ini": "2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/global-dirs/node_modules/ini": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz", + "integrity": "sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==", + "engines": { + "node": ">=10" + } + }, + "node_modules/global-modules": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz", + "integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==", + "dependencies": { + "global-prefix": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/global-prefix": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-3.0.0.tgz", + "integrity": "sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==", + "dependencies": { + "ini": "^1.3.5", + "kind-of": "^6.0.2", + "which": "^1.3.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/global-prefix/node_modules/which": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "which": "bin/which" + } + }, + "node_modules/globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/gopd": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", + "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", + "dependencies": { + "get-intrinsic": "^1.1.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/got": { + "version": "12.6.1", + "resolved": "https://registry.npmjs.org/got/-/got-12.6.1.tgz", + "integrity": "sha512-mThBblvlAF1d4O5oqyvN+ZxLAYwIJK7bpMxgYqPD9okW0C3qm5FFn7k811QrcuEBwaogR3ngOFoCfs6mRv7teQ==", + "dependencies": { + "@sindresorhus/is": "^5.2.0", + "@szmarczak/http-timer": "^5.0.1", + "cacheable-lookup": "^7.0.0", + "cacheable-request": "^10.2.8", + "decompress-response": "^6.0.0", + "form-data-encoder": "^2.1.2", + "get-stream": "^6.0.1", + "http2-wrapper": "^2.1.10", + "lowercase-keys": "^3.0.0", + "p-cancelable": "^3.0.0", + "responselike": "^3.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sindresorhus/got?sponsor=1" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" + }, + "node_modules/gray-matter": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz", + "integrity": "sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==", + "dependencies": { + "js-yaml": "^3.13.1", + "kind-of": "^6.0.2", + "section-matter": "^1.0.0", + "strip-bom-string": "^1.0.0" + }, + "engines": { + "node": ">=6.0" + } + }, + "node_modules/gray-matter/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/gray-matter/node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/gzip-size": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-6.0.0.tgz", + "integrity": "sha512-ax7ZYomf6jqPTQ4+XCpUGyXKHk5WweS+e05MBO4/y3WJ5RkmPXNKvX+bx1behVILVwr6JSQvZAku021CHPXG3Q==", + "dependencies": { + "duplexer": "^0.1.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/handle-thing": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz", + "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==" + }, + "node_modules/has": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/has/-/has-1.0.4.tgz", + "integrity": "sha512-qdSAmqLF6209RFj4VVItywPMbm3vWylknmB3nvNiUIs72xAimcM8nVYxYr7ncvZq5qzk9MKIZR8ijqD/1QuYjQ==", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.0.tgz", + "integrity": "sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==", + "dependencies": { + "get-intrinsic": "^1.1.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz", + "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-yarn": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-3.0.0.tgz", + "integrity": "sha512-IrsVwUHhEULx3R8f/aA8AHuEzAorplsab/v8HBzEiIukwq5i/EC+xmOW+HfP1OaDP+2JkgT1yILHN2O3UFIbcA==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/hast-util-from-parse5": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-7.1.2.tgz", + "integrity": "sha512-Nz7FfPBuljzsN3tCQ4kCBKqdNhQE2l0Tn+X1ubgKBPRoiDIu1mL08Cfw4k7q71+Duyaw7DXDN+VTAp4Vh3oCOw==", + "dependencies": { + "@types/hast": "^2.0.0", + "@types/unist": "^2.0.0", + "hastscript": "^7.0.0", + "property-information": "^6.0.0", + "vfile": "^5.0.0", + "vfile-location": "^4.0.0", + "web-namespaces": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-parse-selector": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-3.1.1.tgz", + "integrity": "sha512-jdlwBjEexy1oGz0aJ2f4GKMaVKkA9jwjr4MjAAI22E5fM/TXVZHuS5OpONtdeIkRKqAaryQ2E9xNQxijoThSZA==", + "dependencies": { + "@types/hast": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-raw": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-7.2.3.tgz", + "integrity": "sha512-RujVQfVsOrxzPOPSzZFiwofMArbQke6DJjnFfceiEbFh7S05CbPt0cYN+A5YeD3pso0JQk6O1aHBnx9+Pm2uqg==", + "dependencies": { + "@types/hast": "^2.0.0", + "@types/parse5": "^6.0.0", + "hast-util-from-parse5": "^7.0.0", + "hast-util-to-parse5": "^7.0.0", + "html-void-elements": "^2.0.0", + "parse5": "^6.0.0", + "unist-util-position": "^4.0.0", + "unist-util-visit": "^4.0.0", + "vfile": "^5.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-raw/node_modules/parse5": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz", + "integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==" + }, + "node_modules/hast-util-raw/node_modules/unist-util-visit": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz", + "integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-is": "^5.0.0", + "unist-util-visit-parents": "^5.1.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-estree": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/hast-util-to-estree/-/hast-util-to-estree-2.3.3.tgz", + "integrity": "sha512-ihhPIUPxN0v0w6M5+IiAZZrn0LH2uZomeWwhn7uP7avZC6TE7lIiEh2yBMPr5+zi1aUCXq6VoYRgs2Bw9xmycQ==", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^2.0.0", + "@types/unist": "^2.0.0", + "comma-separated-tokens": "^2.0.0", + "estree-util-attach-comments": "^2.0.0", + "estree-util-is-identifier-name": "^2.0.0", + "hast-util-whitespace": "^2.0.0", + "mdast-util-mdx-expression": "^1.0.0", + "mdast-util-mdxjs-esm": "^1.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-object": "^0.4.1", + "unist-util-position": "^4.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-parse5": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-7.1.0.tgz", + "integrity": "sha512-YNRgAJkH2Jky5ySkIqFXTQiaqcAtJyVE+D5lkN6CdtOqrnkLfGYYrEcKuHOJZlp+MwjSwuD3fZuawI+sic/RBw==", + "dependencies": { + "@types/hast": "^2.0.0", + "comma-separated-tokens": "^2.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-whitespace": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-2.0.1.tgz", + "integrity": "sha512-nAxA0v8+vXSBDt3AnRUNjyRIQ0rD+ntpbAp4LnPkumc5M9yUbSMa4XDU9Q6etY4f1Wp4bNgvc1yjiZtsTTrSng==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hastscript": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-7.2.0.tgz", + "integrity": "sha512-TtYPq24IldU8iKoJQqvZOuhi5CyCQRAbvDOX0x1eW6rsHSxa/1i2CCiptNTotGHJ3VoHRGmqiv6/D3q113ikkw==", + "dependencies": { + "@types/hast": "^2.0.0", + "comma-separated-tokens": "^2.0.0", + "hast-util-parse-selector": "^3.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "bin": { + "he": "bin/he" + } + }, + "node_modules/history": { + "version": "4.10.1", + "resolved": "https://registry.npmjs.org/history/-/history-4.10.1.tgz", + "integrity": "sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==", + "dependencies": { + "@babel/runtime": "^7.1.2", + "loose-envify": "^1.2.0", + "resolve-pathname": "^3.0.0", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0", + "value-equal": "^1.0.1" + } + }, + "node_modules/hoist-non-react-statics": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", + "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==", + "dependencies": { + "react-is": "^16.7.0" + } + }, + "node_modules/hpack.js": { + "version": "2.1.6", + "resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz", + "integrity": "sha512-zJxVehUdMGIKsRaNt7apO2Gqp0BdqW5yaiGHXXmbpvxgBYVZnAql+BJb4RO5ad2MgpbZKn5G6nMnegrH1FcNYQ==", + "dependencies": { + "inherits": "^2.0.1", + "obuf": "^1.0.0", + "readable-stream": "^2.0.1", + "wbuf": "^1.1.0" + } + }, + "node_modules/hpack.js/node_modules/isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" + }, + "node_modules/hpack.js/node_modules/readable-stream": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/hpack.js/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "node_modules/hpack.js/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/html-entities": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-2.4.0.tgz", + "integrity": "sha512-igBTJcNNNhvZFRtm8uA6xMY6xYleeDwn3PeBCkDz7tHttv4F2hsDI2aPgNERWzvRcNYHNT3ymRaQzllmXj4YsQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/mdevils" + }, + { + "type": "patreon", + "url": "https://patreon.com/mdevils" + } + ] + }, + "node_modules/html-minifier-terser": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-7.2.0.tgz", + "integrity": "sha512-tXgn3QfqPIpGl9o+K5tpcj3/MN4SfLtsx2GWwBC3SSd0tXQGyF3gsSqad8loJgKZGM3ZxbYDd5yhiBIdWpmvLA==", + "dependencies": { + "camel-case": "^4.1.2", + "clean-css": "~5.3.2", + "commander": "^10.0.0", + "entities": "^4.4.0", + "param-case": "^3.0.4", + "relateurl": "^0.2.7", + "terser": "^5.15.1" + }, + "bin": { + "html-minifier-terser": "cli.js" + }, + "engines": { + "node": "^14.13.1 || >=16.0.0" + } + }, + "node_modules/html-minifier-terser/node_modules/commander": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz", + "integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==", + "engines": { + "node": ">=14" + } + }, + "node_modules/html-tags": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.3.1.tgz", + "integrity": "sha512-ztqyC3kLto0e9WbNp0aeP+M3kTt+nbaIveGmUxAtZa+8iFgKLUOD4YKM5j+f3QD89bra7UeumolZHKuOXnTmeQ==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/html-void-elements": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-2.0.1.tgz", + "integrity": "sha512-0quDb7s97CfemeJAnW9wC0hw78MtW7NU3hqtCD75g2vFlDLt36llsYD7uB7SUzojLMP24N5IatXf7ylGXiGG9A==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/html-webpack-plugin": { + "version": "5.5.3", + "resolved": "https://registry.npmjs.org/html-webpack-plugin/-/html-webpack-plugin-5.5.3.tgz", + "integrity": "sha512-6YrDKTuqaP/TquFH7h4srYWsZx+x6k6+FbsTm0ziCwGHDP78Unr1r9F/H4+sGmMbX08GQcJ+K64x55b+7VM/jg==", + "dependencies": { + "@types/html-minifier-terser": "^6.0.0", + "html-minifier-terser": "^6.0.2", + "lodash": "^4.17.21", + "pretty-error": "^4.0.0", + "tapable": "^2.0.0" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/html-webpack-plugin" + }, + "peerDependencies": { + "webpack": "^5.20.0" + } + }, + "node_modules/html-webpack-plugin/node_modules/commander": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", + "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", + "engines": { + "node": ">= 12" + } + }, + "node_modules/html-webpack-plugin/node_modules/html-minifier-terser": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", + "integrity": "sha512-YXxSlJBZTP7RS3tWnQw74ooKa6L9b9i9QYXY21eUEvhZ3u9XLfv6OnFsQq6RxkhHygsaUMvYsZRV5rU/OVNZxw==", + "dependencies": { + "camel-case": "^4.1.2", + "clean-css": "^5.2.2", + "commander": "^8.3.0", + "he": "^1.2.0", + "param-case": "^3.0.4", + "relateurl": "^0.2.7", + "terser": "^5.10.0" + }, + "bin": { + "html-minifier-terser": "cli.js" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/htmlparser2": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-8.0.2.tgz", + "integrity": "sha512-GYdjWKDkbRLkZ5geuHs5NY1puJ+PXwP7+fHPRz06Eirsb9ugf6d8kkXav6ADhcODhFFPMIXyxkxSuMf3D6NCFA==", + "funding": [ + "https://github.com/fb55/htmlparser2?sponsor=1", + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "dependencies": { + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3", + "domutils": "^3.0.1", + "entities": "^4.4.0" + } + }, + "node_modules/http-cache-semantics": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz", + "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==" + }, + "node_modules/http-deceiver": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz", + "integrity": "sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw==" + }, + "node_modules/http-errors": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", + "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "dependencies": { + "depd": "2.0.0", + "inherits": "2.0.4", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "toidentifier": "1.0.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/http-parser-js": { + "version": "0.5.8", + "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.8.tgz", + "integrity": "sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q==" + }, + "node_modules/http-proxy": { + "version": "1.18.1", + "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", + "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", + "dependencies": { + "eventemitter3": "^4.0.0", + "follow-redirects": "^1.0.0", + "requires-port": "^1.0.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/http-proxy-middleware": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz", + "integrity": "sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw==", + "dependencies": { + "@types/http-proxy": "^1.17.8", + "http-proxy": "^1.18.1", + "is-glob": "^4.0.1", + "is-plain-obj": "^3.0.0", + "micromatch": "^4.0.2" + }, + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "@types/express": "^4.17.13" + }, + "peerDependenciesMeta": { + "@types/express": { + "optional": true + } + } + }, + "node_modules/http-proxy-middleware/node_modules/is-plain-obj": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-3.0.0.tgz", + "integrity": "sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/http2-wrapper": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/http2-wrapper/-/http2-wrapper-2.2.0.tgz", + "integrity": "sha512-kZB0wxMo0sh1PehyjJUWRFEd99KC5TLjZ2cULC4f9iqJBAmKQQXEICjxl5iPJRwP40dpeHFqqhm7tYCvODpqpQ==", + "dependencies": { + "quick-lru": "^5.1.1", + "resolve-alpn": "^1.2.0" + }, + "engines": { + "node": ">=10.19.0" + } + }, + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/icss-utils": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-5.1.0.tgz", + "integrity": "sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==", + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/ignore": { + "version": "5.2.4", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz", + "integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/image-size": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/image-size/-/image-size-1.0.2.tgz", + "integrity": "sha512-xfOoWjceHntRb3qFCrh5ZFORYH8XCdYpASltMhZ/Q0KZiOwjdE/Yl2QCiWdwD+lygV5bMCvauzgu5PxBX/Yerg==", + "dependencies": { + "queue": "6.0.2" + }, + "bin": { + "image-size": "bin/image-size.js" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/immer": { + "version": "9.0.21", + "resolved": "https://registry.npmjs.org/immer/-/immer-9.0.21.tgz", + "integrity": "sha512-bc4NBHqOqSfRW7POMkHd51LvClaeMXpm8dx0e8oE2GORbq5aRK7Bxl4FyzVLdGtLmvLKL7BTDBG5ACQm4HWjTA==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/immer" + } + }, + "node_modules/immutable": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/immutable/-/immutable-4.3.4.tgz", + "integrity": "sha512-fsXeu4J4i6WNWSikpI88v/PcVflZz+6kMhUfIwc5SY+poQRPnaf5V7qds6SUyUN3cVxEzuCab7QIoLOQ+DQ1wA==" + }, + "node_modules/import-fresh": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", + "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/import-lazy": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-4.0.0.tgz", + "integrity": "sha512-rKtvo6a868b5Hu3heneU+L4yEQ4jYKLtjpnPeUdK7h0yzXGmyBTypknlkCvHFBqfX9YlorEiMM6Dnq/5atfHkw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/infima": { + "version": "0.2.0-alpha.43", + "resolved": "https://registry.npmjs.org/infima/-/infima-0.2.0-alpha.43.tgz", + "integrity": "sha512-2uw57LvUqW0rK/SWYnd/2rRfxNA5DDNOh33jxF7fy46VWoNhGxiUQyVZHbBMjQ33mQem0cjdDVwgWVAmlRfgyQ==", + "engines": { + "node": ">=12" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==" + }, + "node_modules/inline-style-parser": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.1.1.tgz", + "integrity": "sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q==" + }, + "node_modules/interpret": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz", + "integrity": "sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/invariant": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", + "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", + "dependencies": { + "loose-envify": "^1.0.0" + } + }, + "node_modules/ipaddr.js": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.1.0.tgz", + "integrity": "sha512-LlbxQ7xKzfBusov6UMi4MFpEg0m+mAm9xyNGEduwXMEDuf4WfzB/RZwMVYEd7IKGvh4IUkEXYxtAVu9T3OelJQ==", + "engines": { + "node": ">= 10" + } + }, + "node_modules/is-alphabetical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz", + "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-alphanumerical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz", + "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==", + "dependencies": { + "is-alphabetical": "^2.0.0", + "is-decimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==" + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-buffer": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.5.tgz", + "integrity": "sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "engines": { + "node": ">=4" + } + }, + "node_modules/is-ci": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-3.0.1.tgz", + "integrity": "sha512-ZYvCgrefwqoQ6yTyYUbQu64HsITZ3NfKX1lzaEYdkTDcfKzzCI/wthRRYKkdjHKFVgNiXKAKm65Zo1pk2as/QQ==", + "dependencies": { + "ci-info": "^3.2.0" + }, + "bin": { + "is-ci": "bin.js" + } + }, + "node_modules/is-core-module": { + "version": "2.13.0", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.0.tgz", + "integrity": "sha512-Z7dk6Qo8pOCp3l4tsX2C5ZVas4V+UxwQodwZhLopL91TX8UyyHEXafPcyoeeWuLrwzHcr3igO78wNLwHJHsMCQ==", + "dependencies": { + "has": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-decimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz", + "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-docker": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", + "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-extendable": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", + "integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-hexadecimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz", + "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-installed-globally": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.4.0.tgz", + "integrity": "sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ==", + "dependencies": { + "global-dirs": "^3.0.0", + "is-path-inside": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-npm": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-6.0.0.tgz", + "integrity": "sha512-JEjxbSmtPSt1c8XTkVrlujcXdKV1/tvuQ7GwKcAlyiVLeYFQ2VHat8xfrDJsIkhCdF/tZ7CiIR3sy141c6+gPQ==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-obj": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz", + "integrity": "sha512-l4RyHgRqGN4Y3+9JHVrNqO+tN0rV5My76uW5/nuO4K1b6vw5G8d/cmFjP9tRfEsdhZNt0IFdZuK/c2Vr4Nb+Qg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-path-cwd": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-path-cwd/-/is-path-cwd-2.2.0.tgz", + "integrity": "sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-plain-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", + "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-plain-object": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz", + "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-reference": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/is-reference/-/is-reference-3.0.2.tgz", + "integrity": "sha512-v3rht/LgVcsdZa3O2Nqs+NMowLOxeOm7Ay9+/ARQ2F+qEoANRcqrjAZKGN0v8ymUetZGgkp26LTnGT7H0Qo9Pg==", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/is-regexp": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-regexp/-/is-regexp-1.0.0.tgz", + "integrity": "sha512-7zjFAPO4/gwyQAAgRRmqeEeyIICSdmCqa3tsVHMdBzaXXRiqopZL4Cyghg/XulGWrtABTpbnYYzzIRffLkP4oA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-root": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-root/-/is-root-2.1.0.tgz", + "integrity": "sha512-AGOriNp96vNBd3HtU+RzFEc75FfR5ymiYv8E553I71SCeXBiMsVDUtdio1OEFvrPyLIQ9tVR5RxXIFe5PUFjMg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==" + }, + "node_modules/is-wsl": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", + "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", + "dependencies": { + "is-docker": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-yarn-global": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.4.1.tgz", + "integrity": "sha512-/kppl+R+LO5VmhYSEWARUFjodS25D68gvj8W7z0I7OWhUla5xWu8KL6CtB2V0R6yqhnRgbcaREMr4EEM6htLPQ==", + "engines": { + "node": ">=12" + } + }, + "node_modules/isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==" + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" + }, + "node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", + "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", + "dependencies": { + "@types/node": "*", + "jest-util": "^29.7.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/jiti": { + "version": "1.20.0", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.20.0.tgz", + "integrity": "sha512-3TV69ZbrvV6U5DfQimop50jE9Dl6J8O1ja1dvBbMba/sZ3YBEQqJ2VZRoQPVnhlzjNtU1vaXRZVrVjU4qtm8yA==", + "bin": { + "jiti": "bin/jiti.js" + } + }, + "node_modules/joi": { + "version": "17.11.0", + "resolved": "https://registry.npmjs.org/joi/-/joi-17.11.0.tgz", + "integrity": "sha512-NgB+lZLNoqISVy1rZocE9PZI36bL/77ie924Ri43yEvi9GUUMPeyVIr8KdFTMUlby1p0PBYMk9spIxEUQYqrJQ==", + "dependencies": { + "@hapi/hoek": "^9.0.0", + "@hapi/topo": "^5.0.0", + "@sideway/address": "^4.1.3", + "@sideway/formula": "^3.0.1", + "@sideway/pinpoint": "^2.0.0" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", + "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==" + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==" + }, + "node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/jsonfile": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", + "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "engines": { + "node": ">=6" + } + }, + "node_modules/klona": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/klona/-/klona-2.0.6.tgz", + "integrity": "sha512-dhG34DXATL5hSxJbIexCft8FChFXtmskoZYnoPWjXQuebWYCNkVeV3KkGegCK9CP1oswI/vQibS2GY7Em/sJJA==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/latest-version": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-7.0.0.tgz", + "integrity": "sha512-KvNT4XqAMzdcL6ka6Tl3i2lYeFDgXNCuIX+xNx6ZMVR1dFq+idXd9FLKNMOIx0t9mJ9/HudyX4oZWXZQ0UJHeg==", + "dependencies": { + "package-json": "^8.1.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/launch-editor": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/launch-editor/-/launch-editor-2.6.1.tgz", + "integrity": "sha512-eB/uXmFVpY4zezmGp5XtU21kwo7GBbKB+EQ+UZeWtGb9yAM5xt/Evk+lYH3eRNAtId+ej4u7TYPFZ07w4s7rRw==", + "dependencies": { + "picocolors": "^1.0.0", + "shell-quote": "^1.8.1" + } + }, + "node_modules/leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "engines": { + "node": ">=6" + } + }, + "node_modules/lilconfig": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz", + "integrity": "sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==", + "engines": { + "node": ">=10" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==" + }, + "node_modules/lit": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/lit/-/lit-2.8.0.tgz", + "integrity": "sha512-4Sc3OFX9QHOJaHbmTMk28SYgVxLN3ePDjg7hofEft2zWlehFL3LiAuapWc4U/kYwMYJSh2hTCPZ6/LIC7ii0MA==", + "dependencies": { + "@lit/reactive-element": "^1.6.0", + "lit-element": "^3.3.0", + "lit-html": "^2.8.0" + } + }, + "node_modules/lit-element": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/lit-element/-/lit-element-3.3.3.tgz", + "integrity": "sha512-XbeRxmTHubXENkV4h8RIPyr8lXc+Ff28rkcQzw3G6up2xg5E8Zu1IgOWIwBLEQsu3cOVFqdYwiVi0hv0SlpqUA==", + "dependencies": { + "@lit-labs/ssr-dom-shim": "^1.1.0", + "@lit/reactive-element": "^1.3.0", + "lit-html": "^2.8.0" + } + }, + "node_modules/lit-html": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/lit-html/-/lit-html-2.8.0.tgz", + "integrity": "sha512-o9t+MQM3P4y7M7yNzqAyjp7z+mQGa4NS4CxiyLqFPyFWyc4O+nodLrkrxSaCTrla6M5YOLaT3RpbbqjszB5g3Q==", + "dependencies": { + "@types/trusted-types": "^2.0.2" + } + }, + "node_modules/loader-runner": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.0.tgz", + "integrity": "sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==", + "engines": { + "node": ">=6.11.5" + } + }, + "node_modules/loader-utils": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.4.tgz", + "integrity": "sha512-xXqpXoINfFhgua9xiqD8fPFHgkoq1mmmpE92WlDbm9rNRd/EbRb+Gqf908T2DMfuHjjJlksiK2RbHVOdD/MqSw==", + "dependencies": { + "big.js": "^5.2.2", + "emojis-list": "^3.0.0", + "json5": "^2.1.2" + }, + "engines": { + "node": ">=8.9.0" + } + }, + "node_modules/locate-path": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-7.2.0.tgz", + "integrity": "sha512-gvVijfZvn7R+2qyPX8mAuKcFGDf6Nc61GdvGafQsHL0sBIxfKzA+usWn4GFC/bk+QdwPUD4kWFJLhElipq+0VA==", + "dependencies": { + "p-locate": "^6.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, + "node_modules/lodash.curry": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/lodash.curry/-/lodash.curry-4.1.1.tgz", + "integrity": "sha512-/u14pXGviLaweY5JI0IUzgzF2J6Ne8INyzAZjImcryjgkZ+ebruBxy2/JaOOkTqScddcYtakjhSaeemV8lR0tA==" + }, + "node_modules/lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", + "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==" + }, + "node_modules/lodash.escape": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/lodash.escape/-/lodash.escape-4.0.1.tgz", + "integrity": "sha512-nXEOnb/jK9g0DYMr1/Xvq6l5xMD7GDG55+GSYIYmS0G4tBk/hURD4JR9WCavs04t33WmJx9kCyp9vJ+mr4BOUw==" + }, + "node_modules/lodash.flatten": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/lodash.flatten/-/lodash.flatten-4.4.0.tgz", + "integrity": "sha512-C5N2Z3DgnnKr0LOpv/hKCgKdb7ZZwafIrsesve6lmzvZIRZRGaZ/l6Q8+2W7NaT+ZwO3fFlSCzCzrDCFdJfZ4g==" + }, + "node_modules/lodash.flow": { + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/lodash.flow/-/lodash.flow-3.5.0.tgz", + "integrity": "sha512-ff3BX/tSioo+XojX4MOsOMhJw0nZoUEF011LX8g8d3gvjVbxd89cCio4BCXronjxcTUIJUoqKEUA+n4CqvvRPw==" + }, + "node_modules/lodash.invokemap": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/lodash.invokemap/-/lodash.invokemap-4.6.0.tgz", + "integrity": "sha512-CfkycNtMqgUlfjfdh2BhKO/ZXrP8ePOX5lEU/g0R3ItJcnuxWDwokMGKx1hWcfOikmyOVx6X9IwWnDGlgKl61w==" + }, + "node_modules/lodash.memoize": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", + "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==" + }, + "node_modules/lodash.pullall": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/lodash.pullall/-/lodash.pullall-4.2.0.tgz", + "integrity": "sha512-VhqxBKH0ZxPpLhiu68YD1KnHmbhQJQctcipvmFnqIBDYzcIHzf3Zpu0tpeOKtR4x76p9yohc506eGdOjTmyIBg==" + }, + "node_modules/lodash.uniq": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", + "integrity": "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==" + }, + "node_modules/lodash.uniqby": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/lodash.uniqby/-/lodash.uniqby-4.7.0.tgz", + "integrity": "sha512-e/zcLx6CSbmaEgFHCA7BnoQKyCtKMxnuWrJygbwPs/AIn+IMKl66L8/s+wBUn5LRw2pZx3bUHibiV1b6aTWIww==" + }, + "node_modules/longest-streak": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz", + "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lower-case": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-2.0.2.tgz", + "integrity": "sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==", + "dependencies": { + "tslib": "^2.0.3" + } + }, + "node_modules/lowercase-keys": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-3.0.0.tgz", + "integrity": "sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/markdown-extensions": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/markdown-extensions/-/markdown-extensions-1.1.1.tgz", + "integrity": "sha512-WWC0ZuMzCyDHYCasEGs4IPvLyTGftYwh6wIEOULOF0HXcqZlhwRzrK0w2VUlxWA98xnvb/jszw4ZSkJ6ADpM6Q==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/markdown-table": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.3.tgz", + "integrity": "sha512-Z1NL3Tb1M9wH4XESsCDEksWoKTdlUafKc4pt0GRwjUyXaCFZ+dc3g2erqB6zm3szA2IUSi7VnPI+o/9jnxh9hw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/mdast-util-definitions": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/mdast-util-definitions/-/mdast-util-definitions-5.1.2.tgz", + "integrity": "sha512-8SVPMuHqlPME/z3gqVwWY4zVXn8lqKv/pAhC57FuJ40ImXyBpmO5ukh98zB2v7Blql2FiHjHv9LVztSIqjY+MA==", + "dependencies": { + "@types/mdast": "^3.0.0", + "@types/unist": "^2.0.0", + "unist-util-visit": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-definitions/node_modules/unist-util-visit": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz", + "integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-is": "^5.0.0", + "unist-util-visit-parents": "^5.1.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-directive": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/mdast-util-directive/-/mdast-util-directive-2.2.4.tgz", + "integrity": "sha512-sK3ojFP+jpj1n7Zo5ZKvoxP1MvLyzVG63+gm40Z/qI00avzdPCYxt7RBMgofwAva9gBjbDBWVRB/i+UD+fUCzQ==", + "dependencies": { + "@types/mdast": "^3.0.0", + "@types/unist": "^2.0.0", + "mdast-util-from-markdown": "^1.3.0", + "mdast-util-to-markdown": "^1.5.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-visit-parents": "^5.1.3" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-2.2.2.tgz", + "integrity": "sha512-MTtdFRz/eMDHXzeK6W3dO7mXUlF82Gom4y0oOgvHhh/HXZAGvIQDUvQ0SuUx+j2tv44b8xTHOm8K/9OoRFnXKw==", + "dependencies": { + "@types/mdast": "^3.0.0", + "escape-string-regexp": "^5.0.0", + "unist-util-is": "^5.0.0", + "unist-util-visit-parents": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace/node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mdast-util-from-markdown": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-1.3.1.tgz", + "integrity": "sha512-4xTO/M8c82qBcnQc1tgpNtubGUW/Y1tBQ1B0i5CtSoelOLKFYlElIr3bvgREYYO5iRqbMY1YuqZng0GVOI8Qww==", + "dependencies": { + "@types/mdast": "^3.0.0", + "@types/unist": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "mdast-util-to-string": "^3.1.0", + "micromark": "^3.0.0", + "micromark-util-decode-numeric-character-reference": "^1.0.0", + "micromark-util-decode-string": "^1.0.0", + "micromark-util-normalize-identifier": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "unist-util-stringify-position": "^3.0.0", + "uvu": "^0.5.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-2.0.2.tgz", + "integrity": "sha512-qvZ608nBppZ4icQlhQQIAdc6S3Ffj9RGmzwUKUWuEICFnd1LVkN3EktF7ZHAgfcEdvZB5owU9tQgt99e2TlLjg==", + "dependencies": { + "mdast-util-from-markdown": "^1.0.0", + "mdast-util-gfm-autolink-literal": "^1.0.0", + "mdast-util-gfm-footnote": "^1.0.0", + "mdast-util-gfm-strikethrough": "^1.0.0", + "mdast-util-gfm-table": "^1.0.0", + "mdast-util-gfm-task-list-item": "^1.0.0", + "mdast-util-to-markdown": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-1.0.3.tgz", + "integrity": "sha512-My8KJ57FYEy2W2LyNom4n3E7hKTuQk/0SES0u16tjA9Z3oFkF4RrC/hPAPgjlSpezsOvI8ObcXcElo92wn5IGA==", + "dependencies": { + "@types/mdast": "^3.0.0", + "ccount": "^2.0.0", + "mdast-util-find-and-replace": "^2.0.0", + "micromark-util-character": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-footnote": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-1.0.2.tgz", + "integrity": "sha512-56D19KOGbE00uKVj3sgIykpwKL179QsVFwx/DCW0u/0+URsryacI4MAdNJl0dh+u2PSsD9FtxPFbHCzJ78qJFQ==", + "dependencies": { + "@types/mdast": "^3.0.0", + "mdast-util-to-markdown": "^1.3.0", + "micromark-util-normalize-identifier": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-strikethrough": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-1.0.3.tgz", + "integrity": "sha512-DAPhYzTYrRcXdMjUtUjKvW9z/FNAMTdU0ORyMcbmkwYNbKocDpdk+PX1L1dQgOID/+vVs1uBQ7ElrBQfZ0cuiQ==", + "dependencies": { + "@types/mdast": "^3.0.0", + "mdast-util-to-markdown": "^1.3.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-table": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-1.0.7.tgz", + "integrity": "sha512-jjcpmNnQvrmN5Vx7y7lEc2iIOEytYv7rTvu+MeyAsSHTASGCCRA79Igg2uKssgOs1i1po8s3plW0sTu1wkkLGg==", + "dependencies": { + "@types/mdast": "^3.0.0", + "markdown-table": "^3.0.0", + "mdast-util-from-markdown": "^1.0.0", + "mdast-util-to-markdown": "^1.3.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-task-list-item": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-1.0.2.tgz", + "integrity": "sha512-PFTA1gzfp1B1UaiJVyhJZA1rm0+Tzn690frc/L8vNX1Jop4STZgOE6bxUhnzdVSB+vm2GU1tIsuQcA9bxTQpMQ==", + "dependencies": { + "@types/mdast": "^3.0.0", + "mdast-util-to-markdown": "^1.3.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdx/-/mdast-util-mdx-2.0.1.tgz", + "integrity": "sha512-38w5y+r8nyKlGvNjSEqWrhG0w5PmnRA+wnBvm+ulYCct7nsGYhFVb0lljS9bQav4psDAS1eGkP2LMVcZBi/aqw==", + "dependencies": { + "mdast-util-from-markdown": "^1.0.0", + "mdast-util-mdx-expression": "^1.0.0", + "mdast-util-mdx-jsx": "^2.0.0", + "mdast-util-mdxjs-esm": "^1.0.0", + "mdast-util-to-markdown": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-expression": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-1.3.2.tgz", + "integrity": "sha512-xIPmR5ReJDu/DHH1OoIT1HkuybIfRGYRywC+gJtI7qHjCJp/M9jrmBEJW22O8lskDWm562BX2W8TiAwRTb0rKA==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^2.0.0", + "@types/mdast": "^3.0.0", + "mdast-util-from-markdown": "^1.0.0", + "mdast-util-to-markdown": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-jsx": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-2.1.4.tgz", + "integrity": "sha512-DtMn9CmVhVzZx3f+optVDF8yFgQVt7FghCRNdlIaS3X5Bnym3hZwPbg/XW86vdpKjlc1PVj26SpnLGeJBXD3JA==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^2.0.0", + "@types/mdast": "^3.0.0", + "@types/unist": "^2.0.0", + "ccount": "^2.0.0", + "mdast-util-from-markdown": "^1.1.0", + "mdast-util-to-markdown": "^1.3.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-remove-position": "^4.0.0", + "unist-util-stringify-position": "^3.0.0", + "vfile-message": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdxjs-esm": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-1.3.1.tgz", + "integrity": "sha512-SXqglS0HrEvSdUEfoXFtcg7DRl7S2cwOXc7jkuusG472Mmjag34DUDeOJUZtl+BVnyeO1frIgVpHlNRWc2gk/w==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^2.0.0", + "@types/mdast": "^3.0.0", + "mdast-util-from-markdown": "^1.0.0", + "mdast-util-to-markdown": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-phrasing": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-3.0.1.tgz", + "integrity": "sha512-WmI1gTXUBJo4/ZmSk79Wcb2HcjPJBzM1nlI/OUWA8yk2X9ik3ffNbBGsU+09BFmXaL1IBb9fiuvq6/KMiNycSg==", + "dependencies": { + "@types/mdast": "^3.0.0", + "unist-util-is": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast": { + "version": "12.3.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-12.3.0.tgz", + "integrity": "sha512-pits93r8PhnIoU4Vy9bjW39M2jJ6/tdHyja9rrot9uujkN7UTU9SDnE6WNJz/IGyQk3XHX6yNNtrBH6cQzm8Hw==", + "dependencies": { + "@types/hast": "^2.0.0", + "@types/mdast": "^3.0.0", + "mdast-util-definitions": "^5.0.0", + "micromark-util-sanitize-uri": "^1.1.0", + "trim-lines": "^3.0.0", + "unist-util-generated": "^2.0.0", + "unist-util-position": "^4.0.0", + "unist-util-visit": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast/node_modules/unist-util-visit": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz", + "integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-is": "^5.0.0", + "unist-util-visit-parents": "^5.1.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-markdown": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-1.5.0.tgz", + "integrity": "sha512-bbv7TPv/WC49thZPg3jXuqzuvI45IL2EVAr/KxF0BSdHsU0ceFHOmwQn6evxAh1GaoK/6GQ1wp4R4oW2+LFL/A==", + "dependencies": { + "@types/mdast": "^3.0.0", + "@types/unist": "^2.0.0", + "longest-streak": "^3.0.0", + "mdast-util-phrasing": "^3.0.0", + "mdast-util-to-string": "^3.0.0", + "micromark-util-decode-string": "^1.0.0", + "unist-util-visit": "^4.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-markdown/node_modules/unist-util-visit": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz", + "integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-is": "^5.0.0", + "unist-util-visit-parents": "^5.1.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-string": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-3.2.0.tgz", + "integrity": "sha512-V4Zn/ncyN1QNSqSBxTrMOLpjr+IKdHl2v3KVLoWmDPscP4r9GcCi71gjgvUV1SFSKh92AjAG4peFuBl2/YgCJg==", + "dependencies": { + "@types/mdast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdn-data": { + "version": "2.0.14", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.14.tgz", + "integrity": "sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow==" + }, + "node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/memfs": { + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/memfs/-/memfs-3.5.3.tgz", + "integrity": "sha512-UERzLsxzllchadvbPs5aolHh65ISpKpM+ccLbOJ8/vvpBKmAWf+la7dXFy7Mr0ySHbdHrFv5kGFCUHHe6GFEmw==", + "dependencies": { + "fs-monkey": "^1.0.4" + }, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/merge-descriptors": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", + "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==" + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==" + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/methods": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", + "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/micromark": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/micromark/-/micromark-3.2.0.tgz", + "integrity": "sha512-uD66tJj54JLYq0De10AhWycZWGQNUvDI55xPgk2sQM5kn1JYlhbCMTtEeT27+vAhW2FBQxLlOmS3pmA7/2z4aA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "micromark-core-commonmark": "^1.0.1", + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-chunked": "^1.0.0", + "micromark-util-combine-extensions": "^1.0.0", + "micromark-util-decode-numeric-character-reference": "^1.0.0", + "micromark-util-encode": "^1.0.0", + "micromark-util-normalize-identifier": "^1.0.0", + "micromark-util-resolve-all": "^1.0.0", + "micromark-util-sanitize-uri": "^1.0.0", + "micromark-util-subtokenize": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.1", + "uvu": "^0.5.0" + } + }, + "node_modules/micromark-core-commonmark": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-1.1.0.tgz", + "integrity": "sha512-BgHO1aRbolh2hcrzL2d1La37V0Aoz73ymF8rAcKnohLy93titmv62E0gP8Hrx9PKcKrqCZ1BbLGbP3bEhoXYlw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-factory-destination": "^1.0.0", + "micromark-factory-label": "^1.0.0", + "micromark-factory-space": "^1.0.0", + "micromark-factory-title": "^1.0.0", + "micromark-factory-whitespace": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-chunked": "^1.0.0", + "micromark-util-classify-character": "^1.0.0", + "micromark-util-html-tag-name": "^1.0.0", + "micromark-util-normalize-identifier": "^1.0.0", + "micromark-util-resolve-all": "^1.0.0", + "micromark-util-subtokenize": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.1", + "uvu": "^0.5.0" + } + }, + "node_modules/micromark-extension-directive": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/micromark-extension-directive/-/micromark-extension-directive-2.2.1.tgz", + "integrity": "sha512-ZFKZkNaEqAP86IghX1X7sE8NNnx6kFNq9mSBRvEHjArutTCJZ3LYg6VH151lXVb1JHpmIcW/7rX25oMoIHuSug==", + "dependencies": { + "micromark-factory-space": "^1.0.0", + "micromark-factory-whitespace": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "parse-entities": "^4.0.0", + "uvu": "^0.5.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-2.0.3.tgz", + "integrity": "sha512-vb9OoHqrhCmbRidQv/2+Bc6pkP0FrtlhurxZofvOEy5o8RtuuvTq+RQ1Vw5ZDNrVraQZu3HixESqbG+0iKk/MQ==", + "dependencies": { + "micromark-extension-gfm-autolink-literal": "^1.0.0", + "micromark-extension-gfm-footnote": "^1.0.0", + "micromark-extension-gfm-strikethrough": "^1.0.0", + "micromark-extension-gfm-table": "^1.0.0", + "micromark-extension-gfm-tagfilter": "^1.0.0", + "micromark-extension-gfm-task-list-item": "^1.0.0", + "micromark-util-combine-extensions": "^1.0.0", + "micromark-util-types": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-1.0.5.tgz", + "integrity": "sha512-z3wJSLrDf8kRDOh2qBtoTRD53vJ+CWIyo7uyZuxf/JAbNJjiHsOpG1y5wxk8drtv3ETAHutCu6N3thkOOgueWg==", + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-sanitize-uri": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-footnote": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-1.1.2.tgz", + "integrity": "sha512-Yxn7z7SxgyGWRNa4wzf8AhYYWNrwl5q1Z8ii+CSTTIqVkmGZF1CElX2JI8g5yGoM3GAman9/PVCUFUSJ0kB/8Q==", + "dependencies": { + "micromark-core-commonmark": "^1.0.0", + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-normalize-identifier": "^1.0.0", + "micromark-util-sanitize-uri": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-strikethrough": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-1.0.7.tgz", + "integrity": "sha512-sX0FawVE1o3abGk3vRjOH50L5TTLr3b5XMqnP9YDRb34M0v5OoZhG+OHFz1OffZ9dlwgpTBKaT4XW/AsUVnSDw==", + "dependencies": { + "micromark-util-chunked": "^1.0.0", + "micromark-util-classify-character": "^1.0.0", + "micromark-util-resolve-all": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-table": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-1.0.7.tgz", + "integrity": "sha512-3ZORTHtcSnMQEKtAOsBQ9/oHp9096pI/UvdPtN7ehKvrmZZ2+bbWhi0ln+I9drmwXMt5boocn6OlwQzNXeVeqw==", + "dependencies": { + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-tagfilter": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-1.0.2.tgz", + "integrity": "sha512-5XWB9GbAUSHTn8VPU8/1DBXMuKYT5uOgEjJb8gN3mW0PNW5OPHpSdojoqf+iq1xo7vWzw/P8bAHY0n6ijpXF7g==", + "dependencies": { + "micromark-util-types": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-task-list-item": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-1.0.5.tgz", + "integrity": "sha512-RMFXl2uQ0pNQy6Lun2YBYT9g9INXtWJULgbt01D/x8/6yJ2qpKyzdZD3pi6UIkzF++Da49xAelVKUeUMqd5eIQ==", + "dependencies": { + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdx-expression": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-expression/-/micromark-extension-mdx-expression-1.0.8.tgz", + "integrity": "sha512-zZpeQtc5wfWKdzDsHRBY003H2Smg+PUi2REhqgIhdzAa5xonhP03FcXxqFSerFiNUr5AWmHpaNPQTBVOS4lrXw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/estree": "^1.0.0", + "micromark-factory-mdx-expression": "^1.0.0", + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-events-to-acorn": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" + } + }, + "node_modules/micromark-extension-mdx-jsx": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-jsx/-/micromark-extension-mdx-jsx-1.0.5.tgz", + "integrity": "sha512-gPH+9ZdmDflbu19Xkb8+gheqEDqkSpdCEubQyxuz/Hn8DOXiXvrXeikOoBA71+e8Pfi0/UYmU3wW3H58kr7akA==", + "dependencies": { + "@types/acorn": "^4.0.0", + "@types/estree": "^1.0.0", + "estree-util-is-identifier-name": "^2.0.0", + "micromark-factory-mdx-expression": "^1.0.0", + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0", + "vfile-message": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdx-md": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-md/-/micromark-extension-mdx-md-1.0.1.tgz", + "integrity": "sha512-7MSuj2S7xjOQXAjjkbjBsHkMtb+mDGVW6uI2dBL9snOBCbZmoNgDAeZ0nSn9j3T42UE/g2xVNMn18PJxZvkBEA==", + "dependencies": { + "micromark-util-types": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdxjs": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs/-/micromark-extension-mdxjs-1.0.1.tgz", + "integrity": "sha512-7YA7hF6i5eKOfFUzZ+0z6avRG52GpWR8DL+kN47y3f2KhxbBZMhmxe7auOeaTBrW2DenbbZTf1ea9tA2hDpC2Q==", + "dependencies": { + "acorn": "^8.0.0", + "acorn-jsx": "^5.0.0", + "micromark-extension-mdx-expression": "^1.0.0", + "micromark-extension-mdx-jsx": "^1.0.0", + "micromark-extension-mdx-md": "^1.0.0", + "micromark-extension-mdxjs-esm": "^1.0.0", + "micromark-util-combine-extensions": "^1.0.0", + "micromark-util-types": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdxjs-esm": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs-esm/-/micromark-extension-mdxjs-esm-1.0.5.tgz", + "integrity": "sha512-xNRBw4aoURcyz/S69B19WnZAkWJMxHMT5hE36GtDAyhoyn/8TuAeqjFJQlwk+MKQsUD7b3l7kFX+vlfVWgcX1w==", + "dependencies": { + "@types/estree": "^1.0.0", + "micromark-core-commonmark": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-events-to-acorn": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "unist-util-position-from-estree": "^1.1.0", + "uvu": "^0.5.0", + "vfile-message": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-factory-destination": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-1.1.0.tgz", + "integrity": "sha512-XaNDROBgx9SgSChd69pjiGKbV+nfHGDPVYFs5dOoDd7ZnMAE+Cuu91BCpsY8RT2NP9vo/B8pds2VQNCLiu0zhg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-factory-label": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-1.1.0.tgz", + "integrity": "sha512-OLtyez4vZo/1NjxGhcpDSbHQ+m0IIGnT8BoPamh+7jVlzLJBH98zzuCoUeMxvM6WsNeh8wx8cKvqLiPHEACn0w==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" + } + }, + "node_modules/micromark-factory-mdx-expression": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/micromark-factory-mdx-expression/-/micromark-factory-mdx-expression-1.0.9.tgz", + "integrity": "sha512-jGIWzSmNfdnkJq05c7b0+Wv0Kfz3NJ3N4cBjnbO4zjXIlxJr+f8lk+5ZmwFvqdAbUy2q6B5rCY//g0QAAaXDWA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/estree": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-events-to-acorn": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "unist-util-position-from-estree": "^1.0.0", + "uvu": "^0.5.0", + "vfile-message": "^3.0.0" + } + }, + "node_modules/micromark-factory-space": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-1.1.0.tgz", + "integrity": "sha512-cRzEj7c0OL4Mw2v6nwzttyOZe8XY/Z8G0rzmWQZTBi/jjwyw/U4uqKtUORXQrR5bAZZnbTI/feRV/R7hc4jQYQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-factory-title": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-1.1.0.tgz", + "integrity": "sha512-J7n9R3vMmgjDOCY8NPw55jiyaQnH5kBdV2/UXCtZIpnHH3P6nHUKaH7XXEYuWwx/xUJcawa8plLBEjMPU24HzQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-factory-whitespace": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-1.1.0.tgz", + "integrity": "sha512-v2WlmiymVSp5oMg+1Q0N1Lxmt6pMhIHD457whWM7/GUlEks1hI9xj5w3zbc4uuMKXGisksZk8DzP2UyGbGqNsQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-util-character": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-1.2.0.tgz", + "integrity": "sha512-lXraTwcX3yH/vMDaFWCQJP1uIszLVebzUa3ZHdrgxr7KEU/9mL4mVgCpGbyhvNLNlauROiNUq7WN5u7ndbY6xg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-util-chunked": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-1.1.0.tgz", + "integrity": "sha512-Ye01HXpkZPNcV6FiyoW2fGZDUw4Yc7vT0E9Sad83+bEDiCJ1uXu0S3mr8WLpsz3HaG3x2q0HM6CTuPdcZcluFQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^1.0.0" + } + }, + "node_modules/micromark-util-classify-character": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-1.1.0.tgz", + "integrity": "sha512-SL0wLxtKSnklKSUplok1WQFoGhUdWYKggKUiqhX+Swala+BtptGCu5iPRc+xvzJ4PXE/hwM3FNXsfEVgoZsWbw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-util-combine-extensions": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-1.1.0.tgz", + "integrity": "sha512-Q20sp4mfNf9yEqDL50WwuWZHUrCO4fEyeDCnMGmG5Pr0Cz15Uo7KBs6jq+dq0EgX4DPwwrh9m0X+zPV1ypFvUA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-chunked": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-1.1.0.tgz", + "integrity": "sha512-m9V0ExGv0jB1OT21mrWcuf4QhP46pH1KkfWy9ZEezqHKAxkj4mPCy3nIH1rkbdMlChLHX531eOrymlwyZIf2iw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^1.0.0" + } + }, + "node_modules/micromark-util-decode-string": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-1.1.0.tgz", + "integrity": "sha512-YphLGCK8gM1tG1bd54azwyrQRjCFcmgj2S2GoJDNnh4vYtnL38JS8M4gpxzOPNyHdNEpheyWXCTnnTDY3N+NVQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-decode-numeric-character-reference": "^1.0.0", + "micromark-util-symbol": "^1.0.0" + } + }, + "node_modules/micromark-util-encode": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-1.1.0.tgz", + "integrity": "sha512-EuEzTWSTAj9PA5GOAs992GzNh2dGQO52UvAbtSOMvXTxv3Criqb6IOzJUBCmEqrrXSblJIJBbFFv6zPxpreiJw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-events-to-acorn": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/micromark-util-events-to-acorn/-/micromark-util-events-to-acorn-1.2.3.tgz", + "integrity": "sha512-ij4X7Wuc4fED6UoLWkmo0xJQhsktfNh1J0m8g4PbIMPlx+ek/4YdW5mvbye8z/aZvAPUoxgXHrwVlXAPKMRp1w==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/acorn": "^4.0.0", + "@types/estree": "^1.0.0", + "@types/unist": "^2.0.0", + "estree-util-visit": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0", + "vfile-message": "^3.0.0" + } + }, + "node_modules/micromark-util-html-tag-name": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-1.2.0.tgz", + "integrity": "sha512-VTQzcuQgFUD7yYztuQFKXT49KghjtETQ+Wv/zUjGSGBioZnkA4P1XXZPT1FHeJA6RwRXSF47yvJ1tsJdoxwO+Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-normalize-identifier": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-1.1.0.tgz", + "integrity": "sha512-N+w5vhqrBihhjdpM8+5Xsxy71QWqGn7HYNUvch71iV2PM7+E3uWGox1Qp90loa1ephtCxG2ftRV/Conitc6P2Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^1.0.0" + } + }, + "node_modules/micromark-util-resolve-all": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-1.1.0.tgz", + "integrity": "sha512-b/G6BTMSg+bX+xVCshPTPyAu2tmA0E4X98NSR7eIbeC6ycCqCeE7wjfDIgzEbkzdEVJXRtOG4FbEm/uGbCRouA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-1.2.0.tgz", + "integrity": "sha512-QO4GXv0XZfWey4pYFndLUKEAktKkG5kZTdUNaTAkzbuJxn2tNBOr+QtxR2XpWaMhbImT2dPzyLrPXLlPhph34A==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-encode": "^1.0.0", + "micromark-util-symbol": "^1.0.0" + } + }, + "node_modules/micromark-util-subtokenize": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-1.1.0.tgz", + "integrity": "sha512-kUQHyzRoxvZO2PuLzMt2P/dwVsTiivCK8icYTeR+3WgbuPqfHgPPy7nFKbeqRivBvn/3N3GBiNC+JRTMSxEC7A==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-chunked": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" + } + }, + "node_modules/micromark-util-symbol": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-1.1.0.tgz", + "integrity": "sha512-uEjpEYY6KMs1g7QfJ2eX1SQEV+ZT4rUD3UcF6l57acZvLNK7PBZL+ty82Z1qhK1/yXIY4bdx04FKMgR0g4IAag==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-types": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-1.1.0.tgz", + "integrity": "sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromatch": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", + "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "dependencies": { + "braces": "^3.0.2", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/mime-db": { + "version": "1.33.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", + "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.18", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz", + "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==", + "dependencies": { + "mime-db": "~1.33.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/mimic-response": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-4.0.0.tgz", + "integrity": "sha512-e5ISH9xMYU0DzrT+jl8q2ze9D6eWBto+I8CNpe+VI+K2J/F/k3PdkdTdz4wvGVH4NTpo+NRYTVIuMQEMMcsLqg==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mini-css-extract-plugin": { + "version": "2.7.6", + "resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-2.7.6.tgz", + "integrity": "sha512-Qk7HcgaPkGG6eD77mLvZS1nmxlao3j+9PkrT9Uc7HAE1id3F41+DdBRYRYkbyfNRGzm8/YWtzhw7nVPmwhqTQw==", + "dependencies": { + "schema-utils": "^4.0.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.0.0" + } + }, + "node_modules/minimalistic-assert": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", + "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==" + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/mri": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/mri/-/mri-1.2.0.tgz", + "integrity": "sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/mrmime": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/mrmime/-/mrmime-1.0.1.tgz", + "integrity": "sha512-hzzEagAgDyoU1Q6yg5uI+AorQgdvMCur3FcKf7NhMKWsaYg+RnbTyHRa/9IlLF9rf455MOCtcqqrQQ83pPP7Uw==", + "engines": { + "node": ">=10" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/multicast-dns": { + "version": "7.2.5", + "resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-7.2.5.tgz", + "integrity": "sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg==", + "dependencies": { + "dns-packet": "^5.2.2", + "thunky": "^1.0.2" + }, + "bin": { + "multicast-dns": "cli.js" + } + }, + "node_modules/nanoid": { + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz", + "integrity": "sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" + }, + "node_modules/no-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/no-case/-/no-case-3.0.4.tgz", + "integrity": "sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==", + "dependencies": { + "lower-case": "^2.0.2", + "tslib": "^2.0.3" + } + }, + "node_modules/node-emoji": { + "version": "1.11.0", + "resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-1.11.0.tgz", + "integrity": "sha512-wo2DpQkQp7Sjm2A0cq+sN7EHKO6Sl0ctXeBdFZrL9T9+UywORbufTcTZxom8YqpLQt/FqNMUkOpkZrJVYSKD3A==", + "dependencies": { + "lodash": "^4.17.21" + } + }, + "node_modules/node-fetch": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", + "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/node-forge": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-1.3.1.tgz", + "integrity": "sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==", + "engines": { + "node": ">= 6.13.0" + } + }, + "node_modules/node-releases": { + "version": "2.0.13", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.13.tgz", + "integrity": "sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ==" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/normalize-range": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", + "integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/normalize-url": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-6.1.0.tgz", + "integrity": "sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/nprogress": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/nprogress/-/nprogress-0.2.0.tgz", + "integrity": "sha512-I19aIingLgR1fmhftnbWWO3dXc0hSxqHQHQb3H8m+K3TnEn/iSeTZZOyvKXWqQESMwuUVnatlCnZdLBZZt2VSA==" + }, + "node_modules/nth-check": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz", + "integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==", + "dependencies": { + "boolbase": "^1.0.0" + }, + "funding": { + "url": "https://github.com/fb55/nth-check?sponsor=1" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-inspect": { + "version": "1.12.3", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz", + "integrity": "sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.assign": { + "version": "4.1.4", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.4.tgz", + "integrity": "sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ==", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.4", + "has-symbols": "^1.0.3", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/obuf": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz", + "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==" + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/on-headers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", + "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/open": { + "version": "8.4.2", + "resolved": "https://registry.npmjs.org/open/-/open-8.4.2.tgz", + "integrity": "sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==", + "dependencies": { + "define-lazy-prop": "^2.0.0", + "is-docker": "^2.1.1", + "is-wsl": "^2.2.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/opener": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/opener/-/opener-1.5.2.tgz", + "integrity": "sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A==", + "bin": { + "opener": "bin/opener-bin.js" + } + }, + "node_modules/p-cancelable": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-3.0.0.tgz", + "integrity": "sha512-mlVgR3PGuzlo0MmTdk4cXqXWlwQDLnONTAg6sm62XkMJEiRxN3GL3SffkYvqwonbkJBcrI7Uvv5Zh9yjvn2iUw==", + "engines": { + "node": ">=12.20" + } + }, + "node_modules/p-limit": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-4.0.0.tgz", + "integrity": "sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ==", + "dependencies": { + "yocto-queue": "^1.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-6.0.0.tgz", + "integrity": "sha512-wPrq66Llhl7/4AGC6I+cqxT07LhXvWL08LNXz1fENOw0Ap4sRZZ/gZpTTJ5jpurzzzfS2W/Ge9BY3LgLjCShcw==", + "dependencies": { + "p-limit": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-map": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", + "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", + "dependencies": { + "aggregate-error": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-retry": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", + "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", + "dependencies": { + "@types/retry": "0.12.0", + "retry": "^0.13.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/package-json": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/package-json/-/package-json-8.1.1.tgz", + "integrity": "sha512-cbH9IAIJHNj9uXi196JVsRlt7cHKak6u/e6AkL/bkRelZ7rlL3X1YKxsZwa36xipOEKAsdtmaG6aAJoM1fx2zA==", + "dependencies": { + "got": "^12.1.0", + "registry-auth-token": "^5.0.1", + "registry-url": "^6.0.0", + "semver": "^7.3.7" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/param-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/param-case/-/param-case-3.0.4.tgz", + "integrity": "sha512-RXlj7zCYokReqWpOPH9oYivUzLYZ5vAPIfEmCTNViosC78F8F0H9y7T7gG2M39ymgutxF5gcFEsyZQSph9Bp3A==", + "dependencies": { + "dot-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-entities": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.1.tgz", + "integrity": "sha512-SWzvYcSJh4d/SGLIOQfZ/CoNv6BTlI6YEQ7Nj82oDVnRpwe/Z/F1EMx42x3JAOwGBlCjeCH0BRJQbQ/opHL17w==", + "dependencies": { + "@types/unist": "^2.0.0", + "character-entities": "^2.0.0", + "character-entities-legacy": "^3.0.0", + "character-reference-invalid": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "is-alphanumerical": "^2.0.0", + "is-decimal": "^2.0.0", + "is-hexadecimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parse-numeric-range": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/parse-numeric-range/-/parse-numeric-range-1.3.0.tgz", + "integrity": "sha512-twN+njEipszzlMJd4ONUYgSfZPDxgHhT9Ahed5uTigpQn90FggW4SA/AIPq/6a149fTbE9qBEcSwE3FAEp6wQQ==" + }, + "node_modules/parse5": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.1.2.tgz", + "integrity": "sha512-Czj1WaSVpaoj0wbhMzLmWD69anp2WH7FXMB9n1Sy8/ZFF9jolSQVMu1Ij5WIyGmcBmhk7EOndpO4mIpihVqAXw==", + "dependencies": { + "entities": "^4.4.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/parse5-htmlparser2-tree-adapter": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-7.0.0.tgz", + "integrity": "sha512-B77tOZrqqfUfnVcOrUvfdLbz4pu4RopLD/4vmu3HUPswwTA8OH0EMW9BlWR2B0RCoiZRAHEUu7IxeP1Pd1UU+g==", + "dependencies": { + "domhandler": "^5.0.2", + "parse5": "^7.0.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/pascal-case": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/pascal-case/-/pascal-case-3.1.2.tgz", + "integrity": "sha512-uWlGT3YSnK9x3BQJaOdcZwrnV6hPpd8jFH1/ucpiLRPh/2zCVJKS19E4GvYHvaCcACn3foXZ0cLB9Wrx1KGe5g==", + "dependencies": { + "no-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/path-browserify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-1.0.1.tgz", + "integrity": "sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==" + }, + "node_modules/path-exists": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-5.0.0.tgz", + "integrity": "sha512-RjhtfwJOxzcFmNOi6ltcbcu4Iu+FL3zEj83dk4kAS+fVpTxXLO1b38RvJgT/0QwvV/L3aY9TAnyv0EOqW4GoMQ==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-is-inside": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", + "integrity": "sha512-DUWJr3+ULp4zXmol/SZkFf3JGsS9/SIv+Y3Rt93/UjPpDpklB5f1er4O3POIbUuUJ3FXgqte2Q7SrU6zAqwk8w==" + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" + }, + "node_modules/path-to-regexp": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz", + "integrity": "sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==", + "dependencies": { + "isarray": "0.0.1" + } + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/periscopic": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/periscopic/-/periscopic-3.1.0.tgz", + "integrity": "sha512-vKiQ8RRtkl9P+r/+oefh25C3fhybptkHKCZSPlcXiJux2tJF55GnEj3BVn4A5gKfq9NWWXXrxkHBwVPUfH0opw==", + "dependencies": { + "@types/estree": "^1.0.0", + "estree-walker": "^3.0.0", + "is-reference": "^3.0.0" + } + }, + "node_modules/picocolors": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", + "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pkg-dir": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-7.0.0.tgz", + "integrity": "sha512-Ie9z/WINcxxLp27BKOCHGde4ITq9UklYKDzVo1nhk5sqGEXU3FpkwP5GM2voTGJkGd9B3Otl+Q4uwSOeSUtOBA==", + "dependencies": { + "find-up": "^6.3.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pkg-up": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/pkg-up/-/pkg-up-3.1.0.tgz", + "integrity": "sha512-nDywThFk1i4BQK4twPQ6TA4RT8bDY96yeuCVBWL3ePARCiEKDRSrNGbFIgUJpLp+XeIR65v8ra7WuJOFUBtkMA==", + "dependencies": { + "find-up": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-up/node_modules/find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dependencies": { + "locate-path": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/pkg-up/node_modules/locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "dependencies": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/pkg-up/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pkg-up/node_modules/p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dependencies": { + "p-limit": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/pkg-up/node_modules/path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss": { + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "nanoid": "^3.3.6", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-calc": { + "version": "8.2.4", + "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-8.2.4.tgz", + "integrity": "sha512-SmWMSJmB8MRnnULldx0lQIyhSNvuDl9HfrZkaqqE/WHAhToYsAvDq+yAsA/kIyINDszOp3Rh0GFoNuH5Ypsm3Q==", + "dependencies": { + "postcss-selector-parser": "^6.0.9", + "postcss-value-parser": "^4.2.0" + }, + "peerDependencies": { + "postcss": "^8.2.2" + } + }, + "node_modules/postcss-colormin": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-5.3.1.tgz", + "integrity": "sha512-UsWQG0AqTFQmpBegeLLc1+c3jIqBNB0zlDGRWR+dQ3pRKJL1oeMzyqmH3o2PIfn9MBdNrVPWhDbT769LxCTLJQ==", + "dependencies": { + "browserslist": "^4.21.4", + "caniuse-api": "^3.0.0", + "colord": "^2.9.1", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-convert-values": { + "version": "5.1.3", + "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-5.1.3.tgz", + "integrity": "sha512-82pC1xkJZtcJEfiLw6UXnXVXScgtBrjlO5CBmuDQc+dlb88ZYheFsjTn40+zBVi3DkfF7iezO0nJUPLcJK3pvA==", + "dependencies": { + "browserslist": "^4.21.4", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-discard-comments": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-5.1.2.tgz", + "integrity": "sha512-+L8208OVbHVF2UQf1iDmRcbdjJkuBF6IS29yBDSiWUIzpYaAhtNl6JYnYm12FnkeCwQqF5LeklOu6rAqgfBZqQ==", + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-discard-duplicates": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-5.1.0.tgz", + "integrity": "sha512-zmX3IoSI2aoenxHV6C7plngHWWhUOV3sP1T8y2ifzxzbtnuhk1EdPwm0S1bIUNaJ2eNbWeGLEwzw8huPD67aQw==", + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-discard-empty": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-5.1.1.tgz", + "integrity": "sha512-zPz4WljiSuLWsI0ir4Mcnr4qQQ5e1Ukc3i7UfE2XcrwKK2LIPIqE5jxMRxO6GbI3cv//ztXDsXwEWT3BHOGh3A==", + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-discard-overridden": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-5.1.0.tgz", + "integrity": "sha512-21nOL7RqWR1kasIVdKs8HNqQJhFxLsyRfAnUDm4Fe4t4mCWL9OJiHvlHPjcd8zc5Myu89b/7wZDnOSjFgeWRtw==", + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-discard-unused": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-discard-unused/-/postcss-discard-unused-5.1.0.tgz", + "integrity": "sha512-KwLWymI9hbwXmJa0dkrzpRbSJEh0vVUd7r8t0yOGPcfKzyJJxFM8kLyC5Ev9avji6nY95pOp1W6HqIrfT+0VGw==", + "dependencies": { + "postcss-selector-parser": "^6.0.5" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-loader": { + "version": "7.3.3", + "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-7.3.3.tgz", + "integrity": "sha512-YgO/yhtevGO/vJePCQmTxiaEwER94LABZN0ZMT4A0vsak9TpO+RvKRs7EmJ8peIlB9xfXCsS7M8LjqncsUZ5HA==", + "dependencies": { + "cosmiconfig": "^8.2.0", + "jiti": "^1.18.2", + "semver": "^7.3.8" + }, + "engines": { + "node": ">= 14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "postcss": "^7.0.0 || ^8.0.1", + "webpack": "^5.0.0" + } + }, + "node_modules/postcss-loader/node_modules/cosmiconfig": { + "version": "8.3.6", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.3.6.tgz", + "integrity": "sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==", + "dependencies": { + "import-fresh": "^3.3.0", + "js-yaml": "^4.1.0", + "parse-json": "^5.2.0", + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/d-fischer" + }, + "peerDependencies": { + "typescript": ">=4.9.5" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/postcss-merge-idents": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-merge-idents/-/postcss-merge-idents-5.1.1.tgz", + "integrity": "sha512-pCijL1TREiCoog5nQp7wUe+TUonA2tC2sQ54UGeMmryK3UFGIYKqDyjnqd6RcuI4znFn9hWSLNN8xKE/vWcUQw==", + "dependencies": { + "cssnano-utils": "^3.1.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-merge-longhand": { + "version": "5.1.7", + "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-5.1.7.tgz", + "integrity": "sha512-YCI9gZB+PLNskrK0BB3/2OzPnGhPkBEwmwhfYk1ilBHYVAZB7/tkTHFBAnCrvBBOmeYyMYw3DMjT55SyxMBzjQ==", + "dependencies": { + "postcss-value-parser": "^4.2.0", + "stylehacks": "^5.1.1" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-merge-rules": { + "version": "5.1.4", + "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-5.1.4.tgz", + "integrity": "sha512-0R2IuYpgU93y9lhVbO/OylTtKMVcHb67zjWIfCiKR9rWL3GUk1677LAqD/BcHizukdZEjT8Ru3oHRoAYoJy44g==", + "dependencies": { + "browserslist": "^4.21.4", + "caniuse-api": "^3.0.0", + "cssnano-utils": "^3.1.0", + "postcss-selector-parser": "^6.0.5" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-minify-font-values": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-5.1.0.tgz", + "integrity": "sha512-el3mYTgx13ZAPPirSVsHqFzl+BBBDrXvbySvPGFnQcTI4iNslrPaFq4muTkLZmKlGk4gyFAYUBMH30+HurREyA==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-minify-gradients": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-5.1.1.tgz", + "integrity": "sha512-VGvXMTpCEo4qHTNSa9A0a3D+dxGFZCYwR6Jokk+/3oB6flu2/PnPXAh2x7x52EkY5xlIHLm+Le8tJxe/7TNhzw==", + "dependencies": { + "colord": "^2.9.1", + "cssnano-utils": "^3.1.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-minify-params": { + "version": "5.1.4", + "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-5.1.4.tgz", + "integrity": "sha512-+mePA3MgdmVmv6g+30rn57USjOGSAyuxUmkfiWpzalZ8aiBkdPYjXWtHuwJGm1v5Ojy0Z0LaSYhHaLJQB0P8Jw==", + "dependencies": { + "browserslist": "^4.21.4", + "cssnano-utils": "^3.1.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-minify-selectors": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-5.2.1.tgz", + "integrity": "sha512-nPJu7OjZJTsVUmPdm2TcaiohIwxP+v8ha9NehQ2ye9szv4orirRU3SDdtUmKH+10nzn0bAyOXZ0UEr7OpvLehg==", + "dependencies": { + "postcss-selector-parser": "^6.0.5" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-modules-extract-imports": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.0.0.tgz", + "integrity": "sha512-bdHleFnP3kZ4NYDhuGlVK+CMrQ/pqUm8bx/oGL93K6gVwiclvX5x0n76fYMKuIGKzlABOy13zsvqjb0f92TEXw==", + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-local-by-default": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.0.3.tgz", + "integrity": "sha512-2/u2zraspoACtrbFRnTijMiQtb4GW4BvatjaG/bCjYQo8kLTdevCUlwuBHx2sCnSyrI3x3qj4ZK1j5LQBgzmwA==", + "dependencies": { + "icss-utils": "^5.0.0", + "postcss-selector-parser": "^6.0.2", + "postcss-value-parser": "^4.1.0" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-scope": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-3.0.0.tgz", + "integrity": "sha512-hncihwFA2yPath8oZ15PZqvWGkWf+XUfQgUGamS4LqoP1anQLOsOJw0vr7J7IwLpoY9fatA2qiGUGmuZL0Iqlg==", + "dependencies": { + "postcss-selector-parser": "^6.0.4" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-values": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-4.0.0.tgz", + "integrity": "sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ==", + "dependencies": { + "icss-utils": "^5.0.0" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-normalize-charset": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-5.1.0.tgz", + "integrity": "sha512-mSgUJ+pd/ldRGVx26p2wz9dNZ7ji6Pn8VWBajMXFf8jk7vUoSrZ2lt/wZR7DtlZYKesmZI680qjr2CeFF2fbUg==", + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-display-values": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-5.1.0.tgz", + "integrity": "sha512-WP4KIM4o2dazQXWmFaqMmcvsKmhdINFblgSeRgn8BJ6vxaMyaJkwAzpPpuvSIoG/rmX3M+IrRZEz2H0glrQNEA==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-positions": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-5.1.1.tgz", + "integrity": "sha512-6UpCb0G4eofTCQLFVuI3EVNZzBNPiIKcA1AKVka+31fTVySphr3VUgAIULBhxZkKgwLImhzMR2Bw1ORK+37INg==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-repeat-style": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-5.1.1.tgz", + "integrity": "sha512-mFpLspGWkQtBcWIRFLmewo8aC3ImN2i/J3v8YCFUwDnPu3Xz4rLohDO26lGjwNsQxB3YF0KKRwspGzE2JEuS0g==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-string": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-5.1.0.tgz", + "integrity": "sha512-oYiIJOf4T9T1N4i+abeIc7Vgm/xPCGih4bZz5Nm0/ARVJ7K6xrDlLwvwqOydvyL3RHNf8qZk6vo3aatiw/go3w==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-timing-functions": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-5.1.0.tgz", + "integrity": "sha512-DOEkzJ4SAXv5xkHl0Wa9cZLF3WCBhF3o1SKVxKQAa+0pYKlueTpCgvkFAHfk+Y64ezX9+nITGrDZeVGgITJXjg==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-unicode": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-5.1.1.tgz", + "integrity": "sha512-qnCL5jzkNUmKVhZoENp1mJiGNPcsJCs1aaRmURmeJGES23Z/ajaln+EPTD+rBeNkSryI+2WTdW+lwcVdOikrpA==", + "dependencies": { + "browserslist": "^4.21.4", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-url": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-5.1.0.tgz", + "integrity": "sha512-5upGeDO+PVthOxSmds43ZeMeZfKH+/DKgGRD7TElkkyS46JXAUhMzIKiCa7BabPeIy3AQcTkXwVVN7DbqsiCew==", + "dependencies": { + "normalize-url": "^6.0.1", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-whitespace": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-5.1.1.tgz", + "integrity": "sha512-83ZJ4t3NUDETIHTa3uEg6asWjSBYL5EdkVB0sDncx9ERzOKBVJIUeDO9RyA9Zwtig8El1d79HBp0JEi8wvGQnA==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-ordered-values": { + "version": "5.1.3", + "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-5.1.3.tgz", + "integrity": "sha512-9UO79VUhPwEkzbb3RNpqqghc6lcYej1aveQteWY+4POIwlqkYE21HKWaLDF6lWNuqCobEAyTovVhtI32Rbv2RQ==", + "dependencies": { + "cssnano-utils": "^3.1.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-reduce-idents": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/postcss-reduce-idents/-/postcss-reduce-idents-5.2.0.tgz", + "integrity": "sha512-BTrLjICoSB6gxbc58D5mdBK8OhXRDqud/zodYfdSi52qvDHdMwk+9kB9xsM8yJThH/sZU5A6QVSmMmaN001gIg==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-reduce-initial": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-5.1.2.tgz", + "integrity": "sha512-dE/y2XRaqAi6OvjzD22pjTUQ8eOfc6m/natGHgKFBK9DxFmIm69YmaRVQrGgFlEfc1HePIurY0TmDeROK05rIg==", + "dependencies": { + "browserslist": "^4.21.4", + "caniuse-api": "^3.0.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-reduce-transforms": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-5.1.0.tgz", + "integrity": "sha512-2fbdbmgir5AvpW9RLtdONx1QoYG2/EtqpNQbFASDlixBbAYuTcJ0dECwlqNqH7VbaUnEnh8SrxOe2sRIn24XyQ==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.0.13", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.13.tgz", + "integrity": "sha512-EaV1Gl4mUEV4ddhDnv/xtj7sxwrwxdetHdWUGnT4VJQf+4d05v6lHYZr8N573k5Z0BViss7BDhfWtKS3+sfAqQ==", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-sort-media-queries": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/postcss-sort-media-queries/-/postcss-sort-media-queries-4.4.1.tgz", + "integrity": "sha512-QDESFzDDGKgpiIh4GYXsSy6sek2yAwQx1JASl5AxBtU1Lq2JfKBljIPNdil989NcSKRQX1ToiaKphImtBuhXWw==", + "dependencies": { + "sort-css-media-queries": "2.1.0" + }, + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "postcss": "^8.4.16" + } + }, + "node_modules/postcss-svgo": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-5.1.0.tgz", + "integrity": "sha512-D75KsH1zm5ZrHyxPakAxJWtkyXew5qwS70v56exwvw542d9CRtTo78K0WeFxZB4G7JXKKMbEZtZayTGdIky/eA==", + "dependencies": { + "postcss-value-parser": "^4.2.0", + "svgo": "^2.7.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-unique-selectors": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-5.1.1.tgz", + "integrity": "sha512-5JiODlELrz8L2HwxfPnhOWZYWDxVHWL83ufOv84NrcgipI7TaeRsatAhK4Tr2/ZiYldpK/wBvw5BD3qfaK96GA==", + "dependencies": { + "postcss-selector-parser": "^6.0.5" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==" + }, + "node_modules/postcss-zindex": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-zindex/-/postcss-zindex-5.1.0.tgz", + "integrity": "sha512-fgFMf0OtVSBR1va1JNHYgMxYk73yhn/qb4uQDq1DLGYolz8gHCyr/sesEuGUaYs58E3ZJRcpoGuPVoB7Meiq9A==", + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/pretty-error": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-4.0.0.tgz", + "integrity": "sha512-AoJ5YMAcXKYxKhuJGdcvse+Voc6v1RgnsR3nWcYU7q4t6z0Q6T86sv5Zq8VIRbOWWFpvdGE83LtdSMNd+6Y0xw==", + "dependencies": { + "lodash": "^4.17.20", + "renderkid": "^3.0.0" + } + }, + "node_modules/pretty-time": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/pretty-time/-/pretty-time-1.1.0.tgz", + "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/prism-react-renderer": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/prism-react-renderer/-/prism-react-renderer-1.3.5.tgz", + "integrity": "sha512-IJ+MSwBWKG+SM3b2SUfdrhC+gu01QkV2KmRQgREThBfSQRoufqRfxfHUxpG1WcaFjP+kojcFyO9Qqtpgt3qLCg==", + "peerDependencies": { + "react": ">=0.14.9" + } + }, + "node_modules/prismjs": { + "version": "1.29.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.29.0.tgz", + "integrity": "sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q==", + "engines": { + "node": ">=6" + } + }, + "node_modules/process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" + }, + "node_modules/promise": { + "version": "7.3.1", + "resolved": "https://registry.npmjs.org/promise/-/promise-7.3.1.tgz", + "integrity": "sha512-nolQXZ/4L+bP/UGlkfaIujX9BKxGwmQ9OT4mOt5yvy8iK1h3wqTEJCijzGANTCCl9nWjY41juyAn2K3Q1hLLTg==", + "dependencies": { + "asap": "~2.0.3" + } + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/prop-types": { + "version": "15.8.1", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", + "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", + "dependencies": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "node_modules/property-information": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-6.3.0.tgz", + "integrity": "sha512-gVNZ74nqhRMiIUYWGQdosYetaKc83x8oT41a0LlV3AAFCAZwCpg4vmGkq8t34+cUhp3cnM4XDiU/7xlgK7HGrg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/proto-list": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.4.tgz", + "integrity": "sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA==" + }, + "node_modules/proxy-addr": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "dependencies": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/proxy-addr/node_modules/ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/punycode": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", + "integrity": "sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ==" + }, + "node_modules/pupa": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/pupa/-/pupa-3.1.0.tgz", + "integrity": "sha512-FLpr4flz5xZTSJxSeaheeMKN/EDzMdK7b8PTOC6a5PYFKTucWbdqjgqaEyH0shFiSJrVB1+Qqi4Tk19ccU6Aug==", + "dependencies": { + "escape-goat": "^4.0.0" + }, + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pure-color": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/pure-color/-/pure-color-1.3.0.tgz", + "integrity": "sha512-QFADYnsVoBMw1srW7OVKEYjG+MbIa49s54w1MA1EDY6r2r/sTcKKYqRX1f4GYvnXP7eN/Pe9HFcX+hwzmrXRHA==" + }, + "node_modules/qs": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz", + "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", + "dependencies": { + "side-channel": "^1.0.4" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/queue": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/queue/-/queue-6.0.2.tgz", + "integrity": "sha512-iHZWu+q3IdFZFX36ro/lKBkSvfkztY5Y7HMiPlOUjhupPcG2JMfst2KKEpu5XndviX/3UhFbRngUPNKtgvtZiA==", + "dependencies": { + "inherits": "~2.0.3" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/quick-lru": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz", + "integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/range-parser": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz", + "integrity": "sha512-kA5WQoNVo4t9lNx2kQNFCxKeBl5IbbSNBl1M/tLkw9WCn+hxNBAW5Qh8gdhs63CJnhjJ2zQWFoqPJP2sK1AV5A==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.1.tgz", + "integrity": "sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==", + "dependencies": { + "bytes": "3.1.2", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/raw-body/node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/raw-loader": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/raw-loader/-/raw-loader-4.0.2.tgz", + "integrity": "sha512-ZnScIV3ag9A4wPX/ZayxL/jZH+euYb6FcUinPcgiQW0+UBtEv0O6Q3lGd3cqJ+GHH+rksEv3Pj99oxJ3u3VIKA==", + "dependencies": { + "loader-utils": "^2.0.0", + "schema-utils": "^3.0.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.0.0 || ^5.0.0" + } + }, + "node_modules/raw-loader/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/raw-loader/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/raw-loader/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/raw-loader/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/rc": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", + "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", + "dependencies": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "bin": { + "rc": "cli.js" + } + }, + "node_modules/rc/node_modules/strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react": { + "version": "18.2.0", + "resolved": "https://registry.npmjs.org/react/-/react-18.2.0.tgz", + "integrity": "sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==", + "dependencies": { + "loose-envify": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-base16-styling": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/react-base16-styling/-/react-base16-styling-0.6.0.tgz", + "integrity": "sha512-yvh/7CArceR/jNATXOKDlvTnPKPmGZz7zsenQ3jUwLzHkNUR0CvY3yGYJbWJ/nnxsL8Sgmt5cO3/SILVuPO6TQ==", + "dependencies": { + "base16": "^1.0.0", + "lodash.curry": "^4.0.1", + "lodash.flow": "^3.3.0", + "pure-color": "^1.2.0" + } + }, + "node_modules/react-dev-utils": { + "version": "12.0.1", + "resolved": "https://registry.npmjs.org/react-dev-utils/-/react-dev-utils-12.0.1.tgz", + "integrity": "sha512-84Ivxmr17KjUupyqzFode6xKhjwuEJDROWKJy/BthkL7Wn6NJ8h4WE6k/exAv6ImS+0oZLRRW5j/aINMHyeGeQ==", + "dependencies": { + "@babel/code-frame": "^7.16.0", + "address": "^1.1.2", + "browserslist": "^4.18.1", + "chalk": "^4.1.2", + "cross-spawn": "^7.0.3", + "detect-port-alt": "^1.1.6", + "escape-string-regexp": "^4.0.0", + "filesize": "^8.0.6", + "find-up": "^5.0.0", + "fork-ts-checker-webpack-plugin": "^6.5.0", + "global-modules": "^2.0.0", + "globby": "^11.0.4", + "gzip-size": "^6.0.0", + "immer": "^9.0.7", + "is-root": "^2.1.0", + "loader-utils": "^3.2.0", + "open": "^8.4.0", + "pkg-up": "^3.1.0", + "prompts": "^2.4.2", + "react-error-overlay": "^6.0.11", + "recursive-readdir": "^2.2.2", + "shell-quote": "^1.7.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/react-dev-utils/node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dev-utils/node_modules/loader-utils": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-3.2.1.tgz", + "integrity": "sha512-ZvFw1KWS3GVyYBYb7qkmRM/WwL2TQQBxgCK62rlvm4WpVQ23Nb4tYjApUlfjrEGvOs7KHEsmyUn75OHZrJMWPw==", + "engines": { + "node": ">= 12.13.0" + } + }, + "node_modules/react-dev-utils/node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dev-utils/node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dev-utils/node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dev-utils/node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "engines": { + "node": ">=8" + } + }, + "node_modules/react-dev-utils/node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dom": { + "version": "18.2.0", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz", + "integrity": "sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==", + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.23.0" + }, + "peerDependencies": { + "react": "^18.2.0" + } + }, + "node_modules/react-error-overlay": { + "version": "6.0.11", + "resolved": "https://registry.npmjs.org/react-error-overlay/-/react-error-overlay-6.0.11.tgz", + "integrity": "sha512-/6UZ2qgEyH2aqzYZgQPxEnz33NJ2gNsnHA2o5+o4wW9bLM/JYQitNP9xPhsXwC08hMMovfGe/8retsdDsczPRg==" + }, + "node_modules/react-fast-compare": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/react-fast-compare/-/react-fast-compare-3.2.2.tgz", + "integrity": "sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ==" + }, + "node_modules/react-helmet-async": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/react-helmet-async/-/react-helmet-async-1.3.0.tgz", + "integrity": "sha512-9jZ57/dAn9t3q6hneQS0wukqC2ENOBgMNVEhb/ZG9ZSxUetzVIw4iAmEU38IaVg3QGYauQPhSeUTuIUtFglWpg==", + "dependencies": { + "@babel/runtime": "^7.12.5", + "invariant": "^2.2.4", + "prop-types": "^15.7.2", + "react-fast-compare": "^3.2.0", + "shallowequal": "^1.1.0" + }, + "peerDependencies": { + "react": "^16.6.0 || ^17.0.0 || ^18.0.0", + "react-dom": "^16.6.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" + }, + "node_modules/react-lifecycles-compat": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/react-lifecycles-compat/-/react-lifecycles-compat-3.0.4.tgz", + "integrity": "sha512-fBASbA6LnOU9dOU2eW7aQ8xmYBSXUIWr+UmF9b1efZBazGNO+rcXT/icdKnYm2pTwcRylVUYwW7H1PHfLekVzA==" + }, + "node_modules/react-loadable": { + "name": "@docusaurus/react-loadable", + "version": "5.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz", + "integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==", + "dependencies": { + "@types/react": "*", + "prop-types": "^15.6.2" + }, + "peerDependencies": { + "react": "*" + } + }, + "node_modules/react-loadable-ssr-addon-v5-slorber": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/react-loadable-ssr-addon-v5-slorber/-/react-loadable-ssr-addon-v5-slorber-1.0.1.tgz", + "integrity": "sha512-lq3Lyw1lGku8zUEJPDxsNm1AfYHBrO9Y1+olAYwpUJ2IGFBskM0DMKok97A6LWUpHm+o7IvQBOWu9MLenp9Z+A==", + "dependencies": { + "@babel/runtime": "^7.10.3" + }, + "engines": { + "node": ">=10.13.0" + }, + "peerDependencies": { + "react-loadable": "*", + "webpack": ">=4.41.1 || 5.x" + } + }, + "node_modules/react-router": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-5.3.4.tgz", + "integrity": "sha512-Ys9K+ppnJah3QuaRiLxk+jDWOR1MekYQrlytiXxC1RyfbdsZkS5pvKAzCCr031xHixZwpnsYNT5xysdFHQaYsA==", + "dependencies": { + "@babel/runtime": "^7.12.13", + "history": "^4.9.0", + "hoist-non-react-statics": "^3.1.0", + "loose-envify": "^1.3.1", + "path-to-regexp": "^1.7.0", + "prop-types": "^15.6.2", + "react-is": "^16.6.0", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0" + }, + "peerDependencies": { + "react": ">=15" + } + }, + "node_modules/react-router-config": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/react-router-config/-/react-router-config-5.1.1.tgz", + "integrity": "sha512-DuanZjaD8mQp1ppHjgnnUnyOlqYXZVjnov/JzFhjLEwd3Z4dYjMSnqrEzzGThH47vpCOqPPwJM2FtthLeJ8Pbg==", + "dependencies": { + "@babel/runtime": "^7.1.2" + }, + "peerDependencies": { + "react": ">=15", + "react-router": ">=5" + } + }, + "node_modules/react-router-dom": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-5.3.4.tgz", + "integrity": "sha512-m4EqFMHv/Ih4kpcBCONHbkT68KoAeHN4p3lAGoNryfHi0dMy0kCzEZakiKRsvg5wHZ/JLrLW8o8KomWiz/qbYQ==", + "dependencies": { + "@babel/runtime": "^7.12.13", + "history": "^4.9.0", + "loose-envify": "^1.3.1", + "prop-types": "^15.6.2", + "react-router": "5.3.4", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0" + }, + "peerDependencies": { + "react": ">=15" + } + }, + "node_modules/react-textarea-autosize": { + "version": "8.3.4", + "resolved": "https://registry.npmjs.org/react-textarea-autosize/-/react-textarea-autosize-8.3.4.tgz", + "integrity": "sha512-CdtmP8Dc19xL8/R6sWvtknD/eCXkQr30dtvC4VmGInhRsfF8X/ihXCq6+9l9qbxmKRiq407/7z5fxE7cVWQNgQ==", + "dependencies": { + "@babel/runtime": "^7.10.2", + "use-composed-ref": "^1.3.0", + "use-latest": "^1.2.1" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/reading-time": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/reading-time/-/reading-time-1.5.0.tgz", + "integrity": "sha512-onYyVhBNr4CmAxFsKS7bz+uTLRakypIe4R+5A824vBSkQy/hB3fZepoVEf8OVAxzLvK+H/jm9TzpI3ETSm64Kg==" + }, + "node_modules/rechoir": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.6.2.tgz", + "integrity": "sha512-HFM8rkZ+i3zrV+4LQjwQ0W+ez98pApMGM3HUrN04j3CqzPOzl9nmP15Y8YXNm8QHGv/eacOVEjqhmWpkRV0NAw==", + "dependencies": { + "resolve": "^1.1.6" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/recursive-readdir": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.3.tgz", + "integrity": "sha512-8HrF5ZsXk5FAH9dgsx3BlUer73nIhuj+9OrQwEbLTPOBzGkL1lsFCR01am+v+0m2Cmbs1nP12hLDl5FA7EszKA==", + "dependencies": { + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/regenerate": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", + "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==" + }, + "node_modules/regenerate-unicode-properties": { + "version": "10.1.1", + "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.1.tgz", + "integrity": "sha512-X007RyZLsCJVVrjgEFVpLUTZwyOZk3oiL75ZcuYjlIWd6rNJtOjkBwQc5AsRrpbKVkxN6sklw/k/9m2jJYOf8Q==", + "dependencies": { + "regenerate": "^1.4.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/regenerator-runtime": { + "version": "0.14.0", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.0.tgz", + "integrity": "sha512-srw17NI0TUWHuGa5CFGGmhfNIeja30WMBfbslPNhf6JrqQlLN5gcrvig1oqPxiVaXb0oW0XRKtH6Nngs5lKCIA==" + }, + "node_modules/regenerator-transform": { + "version": "0.15.2", + "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.15.2.tgz", + "integrity": "sha512-hfMp2BoF0qOk3uc5V20ALGDS2ddjQaLrdl7xrGXvAIow7qeWRM2VA2HuCHkUKk9slq3VwEwLNK3DFBqDfPGYtg==", + "dependencies": { + "@babel/runtime": "^7.8.4" + } + }, + "node_modules/regexpu-core": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.3.2.tgz", + "integrity": "sha512-RAM5FlZz+Lhmo7db9L298p2vHP5ZywrVXmVXpmAD9GuL5MPH6t9ROw1iA/wfHkQ76Qe7AaPF0nGuim96/IrQMQ==", + "dependencies": { + "@babel/regjsgen": "^0.8.0", + "regenerate": "^1.4.2", + "regenerate-unicode-properties": "^10.1.0", + "regjsparser": "^0.9.1", + "unicode-match-property-ecmascript": "^2.0.0", + "unicode-match-property-value-ecmascript": "^2.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/registry-auth-token": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-5.0.2.tgz", + "integrity": "sha512-o/3ikDxtXaA59BmZuZrJZDJv8NMDGSj+6j6XaeBmHw8eY1i1qd9+6H+LjVvQXx3HN6aRCGa1cUdJ9RaJZUugnQ==", + "dependencies": { + "@pnpm/npm-conf": "^2.1.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/registry-url": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-6.0.1.tgz", + "integrity": "sha512-+crtS5QjFRqFCoQmvGduwYWEBng99ZvmFvF+cUJkGYF1L1BfU8C6Zp9T7f5vPAwyLkUExpvK+ANVZmGU49qi4Q==", + "dependencies": { + "rc": "1.2.8" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/regjsparser": { + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.9.1.tgz", + "integrity": "sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==", + "dependencies": { + "jsesc": "~0.5.0" + }, + "bin": { + "regjsparser": "bin/parser" + } + }, + "node_modules/regjsparser/node_modules/jsesc": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", + "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==", + "bin": { + "jsesc": "bin/jsesc" + } + }, + "node_modules/rehype-raw": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/rehype-raw/-/rehype-raw-6.1.1.tgz", + "integrity": "sha512-d6AKtisSRtDRX4aSPsJGTfnzrX2ZkHQLE5kiUuGOeEoLpbEulFF4hj0mLPbsa+7vmguDKOVVEQdHKDSwoaIDsQ==", + "dependencies": { + "@types/hast": "^2.0.0", + "hast-util-raw": "^7.2.0", + "unified": "^10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/relateurl": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/relateurl/-/relateurl-0.2.7.tgz", + "integrity": "sha512-G08Dxvm4iDN3MLM0EsP62EDV9IuhXPR6blNz6Utcp7zyV3tr4HVNINt6MpaRWbxoOHT3Q7YN2P+jaHX8vUbgog==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/remark-directive": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/remark-directive/-/remark-directive-2.0.1.tgz", + "integrity": "sha512-oosbsUAkU/qmUE78anLaJePnPis4ihsE7Agp0T/oqTzvTea8pOiaYEtfInU/+xMOVTS9PN5AhGOiaIVe4GD8gw==", + "dependencies": { + "@types/mdast": "^3.0.0", + "mdast-util-directive": "^2.0.0", + "micromark-extension-directive": "^2.0.0", + "unified": "^10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-emoji": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/remark-emoji/-/remark-emoji-2.2.0.tgz", + "integrity": "sha512-P3cj9s5ggsUvWw5fS2uzCHJMGuXYRb0NnZqYlNecewXt8QBU9n5vW3DUUKOhepS8F9CwdMx9B8a3i7pqFWAI5w==", + "dependencies": { + "emoticon": "^3.2.0", + "node-emoji": "^1.10.0", + "unist-util-visit": "^2.0.3" + } + }, + "node_modules/remark-gfm": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-3.0.1.tgz", + "integrity": "sha512-lEFDoi2PICJyNrACFOfDD3JlLkuSbOa5Wd8EPt06HUdptv8Gn0bxYTdbU/XXQ3swAPkEaGxxPN9cbnMHvVu1Ig==", + "dependencies": { + "@types/mdast": "^3.0.0", + "mdast-util-gfm": "^2.0.0", + "micromark-extension-gfm": "^2.0.0", + "unified": "^10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-mdx": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/remark-mdx/-/remark-mdx-2.3.0.tgz", + "integrity": "sha512-g53hMkpM0I98MU266IzDFMrTD980gNF3BJnkyFcmN+dD873mQeD5rdMO3Y2X+x8umQfbSE0PcoEDl7ledSA+2g==", + "dependencies": { + "mdast-util-mdx": "^2.0.0", + "micromark-extension-mdxjs": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-parse": { + "version": "10.0.2", + "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-10.0.2.tgz", + "integrity": "sha512-3ydxgHa/ZQzG8LvC7jTXccARYDcRld3VfcgIIFs7bI6vbRSxJJmzgLEIIoYKyrfhaY+ujuWaf/PJiMZXoiCXgw==", + "dependencies": { + "@types/mdast": "^3.0.0", + "mdast-util-from-markdown": "^1.0.0", + "unified": "^10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-rehype": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-10.1.0.tgz", + "integrity": "sha512-EFmR5zppdBp0WQeDVZ/b66CWJipB2q2VLNFMabzDSGR66Z2fQii83G5gTBbgGEnEEA0QRussvrFHxk1HWGJskw==", + "dependencies": { + "@types/hast": "^2.0.0", + "@types/mdast": "^3.0.0", + "mdast-util-to-hast": "^12.1.0", + "unified": "^10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/renderkid": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-3.0.0.tgz", + "integrity": "sha512-q/7VIQA8lmM1hF+jn+sFSPWGlMkSAeNYcPLmDQx2zzuiDfaLrOmumR8iaUKlenFgh0XRPIUeSPlH3A+AW3Z5pg==", + "dependencies": { + "css-select": "^4.1.3", + "dom-converter": "^0.2.0", + "htmlparser2": "^6.1.0", + "lodash": "^4.17.21", + "strip-ansi": "^6.0.1" + } + }, + "node_modules/renderkid/node_modules/css-select": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.3.0.tgz", + "integrity": "sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ==", + "dependencies": { + "boolbase": "^1.0.0", + "css-what": "^6.0.1", + "domhandler": "^4.3.1", + "domutils": "^2.8.0", + "nth-check": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/renderkid/node_modules/dom-serializer": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.4.1.tgz", + "integrity": "sha512-VHwB3KfrcOOkelEG2ZOfxqLZdfkil8PtJi4P8N2MMXucZq2yLp75ClViUlOVwyoHEDjYU433Aq+5zWP61+RGag==", + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.2.0", + "entities": "^2.0.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/domhandler": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.3.1.tgz", + "integrity": "sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==", + "dependencies": { + "domelementtype": "^2.2.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/domutils": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz", + "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==", + "dependencies": { + "dom-serializer": "^1.0.1", + "domelementtype": "^2.2.0", + "domhandler": "^4.2.0" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/entities": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", + "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==", + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/htmlparser2": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-6.1.0.tgz", + "integrity": "sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==", + "funding": [ + "https://github.com/fb55/htmlparser2?sponsor=1", + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.0.0", + "domutils": "^2.5.2", + "entities": "^2.0.0" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-like": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/require-like/-/require-like-0.1.2.tgz", + "integrity": "sha512-oyrU88skkMtDdauHDuKVrgR+zuItqr6/c//FXzvmxRGMexSDc6hNvJInGW3LL46n+8b50RykrvwSUIIQH2LQ5A==", + "engines": { + "node": "*" + } + }, + "node_modules/requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==" + }, + "node_modules/resolve": { + "version": "1.22.8", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz", + "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==", + "dependencies": { + "is-core-module": "^2.13.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-alpn": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/resolve-alpn/-/resolve-alpn-1.2.1.tgz", + "integrity": "sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g==" + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "engines": { + "node": ">=4" + } + }, + "node_modules/resolve-pathname": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-pathname/-/resolve-pathname-3.0.0.tgz", + "integrity": "sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng==" + }, + "node_modules/responselike": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/responselike/-/responselike-3.0.0.tgz", + "integrity": "sha512-40yHxbNcl2+rzXvZuVkrYohathsSJlMTXKryG5y8uciHv1+xDLHQpgjG64JUO9nrEq2jGLH6IZ8BcZyw3wrweg==", + "dependencies": { + "lowercase-keys": "^3.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/retry": { + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", + "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rtl-detect": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/rtl-detect/-/rtl-detect-1.0.4.tgz", + "integrity": "sha512-EBR4I2VDSSYr7PkBmFy04uhycIpDKp+21p/jARYXlCSjQksTBQcJ0HFUPOO79EPPH5JS6VAhiIQbycf0O3JAxQ==" + }, + "node_modules/rtlcss": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/rtlcss/-/rtlcss-4.1.1.tgz", + "integrity": "sha512-/oVHgBtnPNcggP2aVXQjSy6N1mMAfHg4GSag0QtZBlD5bdDgAHwr4pydqJGd+SUCu9260+Pjqbjwtvu7EMH1KQ==", + "dependencies": { + "escalade": "^3.1.1", + "picocolors": "^1.0.0", + "postcss": "^8.4.21", + "strip-json-comments": "^3.1.1" + }, + "bin": { + "rtlcss": "bin/rtlcss.js" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/rxjs": { + "version": "7.8.1", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.1.tgz", + "integrity": "sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==", + "dependencies": { + "tslib": "^2.1.0" + } + }, + "node_modules/sade": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/sade/-/sade-1.8.1.tgz", + "integrity": "sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A==", + "dependencies": { + "mri": "^1.1.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + }, + "node_modules/sass": { + "version": "1.69.3", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.69.3.tgz", + "integrity": "sha512-X99+a2iGdXkdWn1akFPs0ZmelUzyAQfvqYc2P/MPTrJRuIRoTffGzT9W9nFqG00S+c8hXzVmgxhUuHFdrwxkhQ==", + "dependencies": { + "chokidar": ">=3.0.0 <4.0.0", + "immutable": "^4.0.0", + "source-map-js": ">=0.6.2 <2.0.0" + }, + "bin": { + "sass": "sass.js" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/sass-loader": { + "version": "10.4.1", + "resolved": "https://registry.npmjs.org/sass-loader/-/sass-loader-10.4.1.tgz", + "integrity": "sha512-aX/iJZTTpNUNx/OSYzo2KsjIUQHqvWsAhhUijFjAPdZTEhstjZI9zTNvkTTwsx+uNUJqUwOw5gacxQMx4hJxGQ==", + "dependencies": { + "klona": "^2.0.4", + "loader-utils": "^2.0.0", + "neo-async": "^2.6.2", + "schema-utils": "^3.0.0", + "semver": "^7.3.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "fibers": ">= 3.1.0", + "node-sass": "^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0", + "sass": "^1.3.0", + "webpack": "^4.36.0 || ^5.0.0" + }, + "peerDependenciesMeta": { + "fibers": { + "optional": true + }, + "node-sass": { + "optional": true + }, + "sass": { + "optional": true + } + } + }, + "node_modules/sass-loader/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/sass-loader/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/sass-loader/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/sass-loader/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/sax": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/sax/-/sax-1.3.0.tgz", + "integrity": "sha512-0s+oAmw9zLl1V1cS9BtZN7JAd0cW5e0QH4W3LWEK6a4LaLEA2OTpGYWDY+6XasBLtz6wkm3u1xRw95mRuJ59WA==" + }, + "node_modules/scheduler": { + "version": "0.23.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz", + "integrity": "sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==", + "dependencies": { + "loose-envify": "^1.1.0" + } + }, + "node_modules/schema-utils": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.2.0.tgz", + "integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==", + "dependencies": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/section-matter": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz", + "integrity": "sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==", + "dependencies": { + "extend-shallow": "^2.0.1", + "kind-of": "^6.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/select-hose": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz", + "integrity": "sha512-mEugaLK+YfkijB4fx0e6kImuJdCIt2LxCRcbEYPqRGCs4F2ogyfZU5IAZRdjCP8JPq2AtdNoC/Dux63d9Kiryg==" + }, + "node_modules/selfsigned": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-2.1.1.tgz", + "integrity": "sha512-GSL3aowiF7wa/WtSFwnUrludWFoNhftq8bUkH9pkzjpN2XSPOAYEgg6e0sS9s0rZwgJzJiQRPU18A6clnoW5wQ==", + "dependencies": { + "node-forge": "^1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver": { + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver-diff": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-4.0.0.tgz", + "integrity": "sha512-0Ju4+6A8iOnpL/Thra7dZsSlOHYAHIeMxfhWQRI1/VLcT3WDBZKKtQt/QkBOsiIN9ZpuvHE6cGZ0x4glCMmfiA==", + "dependencies": { + "semver": "^7.3.5" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/semver/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" + }, + "node_modules/send": { + "version": "0.18.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz", + "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==", + "dependencies": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "2.4.1", + "range-parser": "~1.2.1", + "statuses": "2.0.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/send/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/send/node_modules/debug/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/send/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + }, + "node_modules/send/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serialize-javascript": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.1.tgz", + "integrity": "sha512-owoXEFjWRllis8/M1Q+Cw5k8ZH40e3zhp/ovX+Xr/vi1qj6QesbyXXViFbpNvWvPNAD62SutwEXavefrLJWj7w==", + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "node_modules/serve-handler": { + "version": "6.1.5", + "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.5.tgz", + "integrity": "sha512-ijPFle6Hwe8zfmBxJdE+5fta53fdIY0lHISJvuikXB3VYFafRjMRpOffSPvCYsbKyBA7pvy9oYr/BT1O3EArlg==", + "dependencies": { + "bytes": "3.0.0", + "content-disposition": "0.5.2", + "fast-url-parser": "1.1.3", + "mime-types": "2.1.18", + "minimatch": "3.1.2", + "path-is-inside": "1.0.2", + "path-to-regexp": "2.2.1", + "range-parser": "1.2.0" + } + }, + "node_modules/serve-handler/node_modules/path-to-regexp": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.1.tgz", + "integrity": "sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ==" + }, + "node_modules/serve-index": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz", + "integrity": "sha512-pXHfKNP4qujrtteMrSBb0rc8HJ9Ms/GrXwcUtUtD5s4ewDJI8bT3Cz2zTVRMKtri49pLx2e0Ya8ziP5Ya2pZZw==", + "dependencies": { + "accepts": "~1.3.4", + "batch": "0.6.1", + "debug": "2.6.9", + "escape-html": "~1.0.3", + "http-errors": "~1.6.2", + "mime-types": "~2.1.17", + "parseurl": "~1.3.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/serve-index/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/serve-index/node_modules/depd": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", + "integrity": "sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-index/node_modules/http-errors": { + "version": "1.6.3", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz", + "integrity": "sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A==", + "dependencies": { + "depd": "~1.1.2", + "inherits": "2.0.3", + "setprototypeof": "1.1.0", + "statuses": ">= 1.4.0 < 2" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-index/node_modules/inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==" + }, + "node_modules/serve-index/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/serve-index/node_modules/setprototypeof": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", + "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==" + }, + "node_modules/serve-index/node_modules/statuses": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", + "integrity": "sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-static": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", + "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", + "dependencies": { + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "parseurl": "~1.3.3", + "send": "0.18.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/setimmediate": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/setimmediate/-/setimmediate-1.0.5.tgz", + "integrity": "sha512-MATJdZp8sLqDl/68LfQmbP8zKPLQNV6BIZoIgrscFDQ+RsvK/BxeDQOgyxKKoh0y/8h3BqVFnCqQ/gd+reiIXA==" + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" + }, + "node_modules/shallow-clone": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/shallow-clone/-/shallow-clone-3.0.1.tgz", + "integrity": "sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==", + "dependencies": { + "kind-of": "^6.0.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shallowequal": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz", + "integrity": "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==" + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "engines": { + "node": ">=8" + } + }, + "node_modules/shell-quote": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.1.tgz", + "integrity": "sha512-6j1W9l1iAs/4xYBI1SYOVZyFcCis9b4KCLQ8fgAGG07QvzaRLVVRQvAy85yNmmZSjYjg4MWh4gNvlPujU/5LpA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/shelljs": { + "version": "0.8.5", + "resolved": "https://registry.npmjs.org/shelljs/-/shelljs-0.8.5.tgz", + "integrity": "sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow==", + "dependencies": { + "glob": "^7.0.0", + "interpret": "^1.0.0", + "rechoir": "^0.6.2" + }, + "bin": { + "shjs": "bin/shjs" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/side-channel": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", + "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "dependencies": { + "call-bind": "^1.0.0", + "get-intrinsic": "^1.0.2", + "object-inspect": "^1.9.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==" + }, + "node_modules/sirv": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/sirv/-/sirv-2.0.3.tgz", + "integrity": "sha512-O9jm9BsID1P+0HOi81VpXPoDxYP374pkOLzACAoyUQ/3OUVndNpsz6wMnY2z+yOxzbllCKZrM+9QrWsv4THnyA==", + "dependencies": { + "@polka/url": "^1.0.0-next.20", + "mrmime": "^1.0.0", + "totalist": "^3.0.0" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==" + }, + "node_modules/sitemap": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/sitemap/-/sitemap-7.1.1.tgz", + "integrity": "sha512-mK3aFtjz4VdJN0igpIJrinf3EO8U8mxOPsTBzSsy06UtjZQJ3YY3o3Xa7zSc5nMqcMrRwlChHZ18Kxg0caiPBg==", + "dependencies": { + "@types/node": "^17.0.5", + "@types/sax": "^1.2.1", + "arg": "^5.0.0", + "sax": "^1.2.4" + }, + "bin": { + "sitemap": "dist/cli.js" + }, + "engines": { + "node": ">=12.0.0", + "npm": ">=5.6.0" + } + }, + "node_modules/sitemap/node_modules/@types/node": { + "version": "17.0.45", + "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.45.tgz", + "integrity": "sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw==" + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "engines": { + "node": ">=8" + } + }, + "node_modules/sockjs": { + "version": "0.3.24", + "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.24.tgz", + "integrity": "sha512-GJgLTZ7vYb/JtPSSZ10hsOYIvEYsjbNU+zPdIHcUaWVNUEPivzxku31865sSSud0Da0W4lEeOPlmw93zLQchuQ==", + "dependencies": { + "faye-websocket": "^0.11.3", + "uuid": "^8.3.2", + "websocket-driver": "^0.7.4" + } + }, + "node_modules/sort-css-media-queries": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/sort-css-media-queries/-/sort-css-media-queries-2.1.0.tgz", + "integrity": "sha512-IeWvo8NkNiY2vVYdPa27MCQiR0MN0M80johAYFVxWWXQ44KU84WNxjslwBHmc/7ZL2ccwkM7/e6S5aiKZXm7jA==", + "engines": { + "node": ">= 6.3.0" + } + }, + "node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-js": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", + "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.21", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", + "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/space-separated-tokens": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", + "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/spdy": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz", + "integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==", + "dependencies": { + "debug": "^4.1.0", + "handle-thing": "^2.0.0", + "http-deceiver": "^1.2.7", + "select-hose": "^2.0.0", + "spdy-transport": "^3.0.0" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/spdy-transport": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz", + "integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==", + "dependencies": { + "debug": "^4.1.0", + "detect-node": "^2.0.4", + "hpack.js": "^2.1.6", + "obuf": "^1.1.2", + "readable-stream": "^3.0.6", + "wbuf": "^1.7.3" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==" + }, + "node_modules/srcset": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/srcset/-/srcset-4.0.0.tgz", + "integrity": "sha512-wvLeHgcVHKO8Sc/H/5lkGreJQVeYMm9rlmt8PuR1xE31rIuXhuzznUUqAt8MqLhB3MqJdFzlNAfpcWnxiFUcPw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/stable": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/stable/-/stable-0.1.8.tgz", + "integrity": "sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w==", + "deprecated": "Modern JS already guarantees Array#sort() is a stable sort, so this library is deprecated. See the compatibility table on MDN: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/sort#browser_compatibility" + }, + "node_modules/statuses": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", + "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/std-env": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.4.3.tgz", + "integrity": "sha512-f9aPhy8fYBuMN+sNfakZV18U39PbalgjXG3lLB9WkaYTxijru61wb57V9wxxNthXM5Sd88ETBWi29qLAsHO52Q==" + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/string-width/node_modules/ansi-regex": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", + "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/string-width/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/stringify-entities": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.3.tgz", + "integrity": "sha512-BP9nNHMhhfcMbiuQKCqMjhDP5yBCAxsPu4pHFFzJ6Alo9dZgY4VLDPutXqIjpRiMoKdp7Av85Gr73Q5uH9k7+g==", + "dependencies": { + "character-entities-html4": "^2.0.0", + "character-entities-legacy": "^3.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/stringify-object": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/stringify-object/-/stringify-object-3.3.0.tgz", + "integrity": "sha512-rHqiFh1elqCQ9WPLIC8I0Q/g/wj5J1eMkyoiD6eoQApWHP0FtlK7rqnhmabL5VUY9JQCcqwwvlOaSuutekgyrw==", + "dependencies": { + "get-own-enumerable-property-symbols": "^3.0.0", + "is-obj": "^1.0.1", + "is-regexp": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom-string": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz", + "integrity": "sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/style-to-object": { + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-0.4.2.tgz", + "integrity": "sha512-1JGpfPB3lo42ZX8cuPrheZbfQ6kqPPnPHlKMyeRYtfKD+0jG+QsXgXN57O/dvJlzlB2elI6dGmrPnl5VPQFPaA==", + "dependencies": { + "inline-style-parser": "0.1.1" + } + }, + "node_modules/stylehacks": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-5.1.1.tgz", + "integrity": "sha512-sBpcd5Hx7G6seo7b1LkpttvTz7ikD0LlH5RmdcBNb6fFR0Fl7LQwHDFr300q4cwUqi+IYrFGmsIHieMBfnN/Bw==", + "dependencies": { + "browserslist": "^4.21.4", + "postcss-selector-parser": "^6.0.4" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/svg-parser": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/svg-parser/-/svg-parser-2.0.4.tgz", + "integrity": "sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ==" + }, + "node_modules/svgo": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/svgo/-/svgo-2.8.0.tgz", + "integrity": "sha512-+N/Q9kV1+F+UeWYoSiULYo4xYSDQlTgb+ayMobAXPwMnLvop7oxKMo9OzIrX5x3eS4L4f2UHhc9axXwY8DpChg==", + "dependencies": { + "@trysound/sax": "0.2.0", + "commander": "^7.2.0", + "css-select": "^4.1.3", + "css-tree": "^1.1.3", + "csso": "^4.2.0", + "picocolors": "^1.0.0", + "stable": "^0.1.8" + }, + "bin": { + "svgo": "bin/svgo" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/svgo/node_modules/commander": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "engines": { + "node": ">= 10" + } + }, + "node_modules/svgo/node_modules/css-select": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.3.0.tgz", + "integrity": "sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ==", + "dependencies": { + "boolbase": "^1.0.0", + "css-what": "^6.0.1", + "domhandler": "^4.3.1", + "domutils": "^2.8.0", + "nth-check": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/svgo/node_modules/dom-serializer": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.4.1.tgz", + "integrity": "sha512-VHwB3KfrcOOkelEG2ZOfxqLZdfkil8PtJi4P8N2MMXucZq2yLp75ClViUlOVwyoHEDjYU433Aq+5zWP61+RGag==", + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.2.0", + "entities": "^2.0.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/svgo/node_modules/domhandler": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.3.1.tgz", + "integrity": "sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==", + "dependencies": { + "domelementtype": "^2.2.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/svgo/node_modules/domutils": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz", + "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==", + "dependencies": { + "dom-serializer": "^1.0.1", + "domelementtype": "^2.2.0", + "domhandler": "^4.2.0" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, + "node_modules/svgo/node_modules/entities": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", + "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==", + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/tapable": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz", + "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/terser": { + "version": "5.21.0", + "resolved": "https://registry.npmjs.org/terser/-/terser-5.21.0.tgz", + "integrity": "sha512-WtnFKrxu9kaoXuiZFSGrcAvvBqAdmKx0SFNmVNYdJamMu9yyN3I/QF0FbH4QcqJQ+y1CJnzxGIKH0cSj+FGYRw==", + "dependencies": { + "@jridgewell/source-map": "^0.3.3", + "acorn": "^8.8.2", + "commander": "^2.20.0", + "source-map-support": "~0.5.20" + }, + "bin": { + "terser": "bin/terser" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/terser-webpack-plugin": { + "version": "5.3.9", + "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.9.tgz", + "integrity": "sha512-ZuXsqE07EcggTWQjXUj+Aot/OMcD0bMKGgF63f7UxYcu5/AJF53aIpK1YoP5xR9l6s/Hy2b+t1AM0bLNPRuhwA==", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.17", + "jest-worker": "^27.4.5", + "schema-utils": "^3.1.1", + "serialize-javascript": "^6.0.1", + "terser": "^5.16.8" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.1.0" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "uglify-js": { + "optional": true + } + } + }, + "node_modules/terser-webpack-plugin/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/terser-webpack-plugin/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/terser-webpack-plugin/node_modules/jest-worker": { + "version": "27.5.1", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz", + "integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==", + "dependencies": { + "@types/node": "*", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": ">= 10.13.0" + } + }, + "node_modules/terser-webpack-plugin/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/terser-webpack-plugin/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/terser-webpack-plugin/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/terser/node_modules/commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==" + }, + "node_modules/thunky": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz", + "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==" + }, + "node_modules/tiny-invariant": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.1.tgz", + "integrity": "sha512-AD5ih2NlSssTCwsMznbvwMZpJ1cbhkGd2uueNxzv2jDlEeZdU04JQfRnggJQ8DrcVBGjAsCKwFBbDlVNtEMlzw==" + }, + "node_modules/tiny-warning": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", + "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==" + }, + "node_modules/to-fast-properties": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", + "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", + "engines": { + "node": ">=4" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/totalist": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/totalist/-/totalist-3.0.1.tgz", + "integrity": "sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" + }, + "node_modules/trim-lines": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz", + "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/trough": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/trough/-/trough-2.1.0.tgz", + "integrity": "sha512-AqTiAOLcj85xS7vQ8QkAV41hPDIJ71XJB4RCUrzo/1GM2CQwhkJGaf9Hgr7BOugMRpgGUrqRg/DrBDl4H40+8g==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/tslib": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", + "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" + }, + "node_modules/type-fest": { + "version": "2.19.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz", + "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==", + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/type-is": { + "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "dependencies": { + "media-typer": "0.3.0", + "mime-types": "~2.1.24" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/type-is/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/type-is/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/typedarray-to-buffer": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", + "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", + "dependencies": { + "is-typedarray": "^1.0.0" + } + }, + "node_modules/typescript": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz", + "integrity": "sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/ua-parser-js": { + "version": "1.0.36", + "resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-1.0.36.tgz", + "integrity": "sha512-znuyCIXzl8ciS3+y3fHJI/2OhQIXbXw9MWC/o3qwyR+RGppjZHrM27CGFSKCJXi2Kctiz537iOu2KnXs1lMQhw==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/ua-parser-js" + }, + { + "type": "paypal", + "url": "https://paypal.me/faisalman" + }, + { + "type": "github", + "url": "https://github.com/sponsors/faisalman" + } + ], + "engines": { + "node": "*" + } + }, + "node_modules/undici-types": { + "version": "5.25.3", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.25.3.tgz", + "integrity": "sha512-Ga1jfYwRn7+cP9v8auvEXN1rX3sWqlayd4HP7OKk4mZWylEmu3KzXDUGrQUN6Ol7qo1gPvB2e5gX6udnyEPgdA==" + }, + "node_modules/unicode-canonical-property-names-ecmascript": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz", + "integrity": "sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-ecmascript": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz", + "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==", + "dependencies": { + "unicode-canonical-property-names-ecmascript": "^2.0.0", + "unicode-property-aliases-ecmascript": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-value-ecmascript": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.1.0.tgz", + "integrity": "sha512-qxkjQt6qjg/mYscYMC0XKRn3Rh0wFPlfxB0xkt9CfyTvpX1Ra0+rAmdX2QyAobptSEvuy4RtpPRui6XkV+8wjA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-property-aliases-ecmascript": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz", + "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==", + "engines": { + "node": ">=4" + } + }, + "node_modules/unified": { + "version": "10.1.2", + "resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz", + "integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==", + "dependencies": { + "@types/unist": "^2.0.0", + "bail": "^2.0.0", + "extend": "^3.0.0", + "is-buffer": "^2.0.0", + "is-plain-obj": "^4.0.0", + "trough": "^2.0.0", + "vfile": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unique-string": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-3.0.0.tgz", + "integrity": "sha512-VGXBUVwxKMBUznyffQweQABPRRW1vHZAbadFZud4pLFAqRGvv/96vafgjWFqzourzr8YonlQiPgH0YCJfawoGQ==", + "dependencies": { + "crypto-random-string": "^4.0.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/unist-util-generated": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-2.0.1.tgz", + "integrity": "sha512-qF72kLmPxAw0oN2fwpWIqbXAVyEqUzDHMsbtPvOudIlUzXYFIeQIuxXQCRCFh22B7cixvU0MG7m3MW8FTq/S+A==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-is": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz", + "integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==", + "dependencies": { + "@types/unist": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-4.0.4.tgz", + "integrity": "sha512-kUBE91efOWfIVBo8xzh/uZQ7p9ffYRtUbMRZBNFYwf0RK8koUMx6dGUfwylLOKmaT2cs4wSW96QoYUSXAyEtpg==", + "dependencies": { + "@types/unist": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position-from-estree": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/unist-util-position-from-estree/-/unist-util-position-from-estree-1.1.2.tgz", + "integrity": "sha512-poZa0eXpS+/XpoQwGwl79UUdea4ol2ZuCYguVaJS4qzIOMDzbqz8a3erUCOmubSZkaOuGamb3tX790iwOIROww==", + "dependencies": { + "@types/unist": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-remove-position": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/unist-util-remove-position/-/unist-util-remove-position-4.0.2.tgz", + "integrity": "sha512-TkBb0HABNmxzAcfLf4qsIbFbaPDvMO6wa3b3j4VcEzFVaw1LBKwnW4/sRJ/atSLSzoIg41JWEdnE7N6DIhGDGQ==", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-visit": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-remove-position/node_modules/unist-util-visit": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz", + "integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-is": "^5.0.0", + "unist-util-visit-parents": "^5.1.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-stringify-position": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-3.0.3.tgz", + "integrity": "sha512-k5GzIBZ/QatR8N5X2y+drfpWG8IDBzdnVj6OInRNWm1oXrzydiaAT2OQiA8DPRRZyAKb9b6I2a6PxYklZD0gKg==", + "dependencies": { + "@types/unist": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz", + "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-is": "^4.0.0", + "unist-util-visit-parents": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit-parents": { + "version": "5.1.3", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz", + "integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-is": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit/node_modules/unist-util-is": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-4.1.0.tgz", + "integrity": "sha512-ZOQSsnce92GrxSqlnEEseX0gi7GH9zTJZ0p9dtu87WRb/37mMPO2Ilx1s/t9vBHrFhbgweUwb+t7cIn5dxPhZg==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit/node_modules/unist-util-visit-parents": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz", + "integrity": "sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-is": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/universalify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", + "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.0.13", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz", + "integrity": "sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "escalade": "^3.1.1", + "picocolors": "^1.0.0" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/update-notifier": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-6.0.2.tgz", + "integrity": "sha512-EDxhTEVPZZRLWYcJ4ZXjGFN0oP7qYvbXWzEgRm/Yql4dHX5wDbvh89YHP6PK1lzZJYrMtXUuZZz8XGK+U6U1og==", + "dependencies": { + "boxen": "^7.0.0", + "chalk": "^5.0.1", + "configstore": "^6.0.0", + "has-yarn": "^3.0.0", + "import-lazy": "^4.0.0", + "is-ci": "^3.0.1", + "is-installed-globally": "^0.4.0", + "is-npm": "^6.0.0", + "is-yarn-global": "^0.4.0", + "latest-version": "^7.0.0", + "pupa": "^3.1.0", + "semver": "^7.3.7", + "semver-diff": "^4.0.0", + "xdg-basedir": "^5.1.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/yeoman/update-notifier?sponsor=1" + } + }, + "node_modules/update-notifier/node_modules/boxen": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/boxen/-/boxen-7.1.1.tgz", + "integrity": "sha512-2hCgjEmP8YLWQ130n2FerGv7rYpfBmnmp9Uy2Le1vge6X3gZIfSmEzP5QTDElFxcvVcXlEn8Aq6MU/PZygIOog==", + "dependencies": { + "ansi-align": "^3.0.1", + "camelcase": "^7.0.1", + "chalk": "^5.2.0", + "cli-boxes": "^3.0.0", + "string-width": "^5.1.2", + "type-fest": "^2.13.0", + "widest-line": "^4.0.1", + "wrap-ansi": "^8.1.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/update-notifier/node_modules/camelcase": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-7.0.1.tgz", + "integrity": "sha512-xlx1yCK2Oc1APsPXDL2LdlNP6+uu8OCDdhOBSVT279M/S+y75O30C2VuD8T2ogdePBBl7PfPF4504tnLgX3zfw==", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/update-notifier/node_modules/chalk": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.3.0.tgz", + "integrity": "sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w==", + "engines": { + "node": "^12.17.0 || ^14.13 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/uri-js/node_modules/punycode": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz", + "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/url-loader": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/url-loader/-/url-loader-4.1.1.tgz", + "integrity": "sha512-3BTV812+AVHHOJQO8O5MkWgZ5aosP7GnROJwvzLS9hWDj00lZ6Z0wNak423Lp9PBZN05N+Jk/N5Si8jRAlGyWA==", + "dependencies": { + "loader-utils": "^2.0.0", + "mime-types": "^2.1.27", + "schema-utils": "^3.0.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "file-loader": "*", + "webpack": "^4.0.0 || ^5.0.0" + }, + "peerDependenciesMeta": { + "file-loader": { + "optional": true + } + } + }, + "node_modules/url-loader/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/url-loader/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/url-loader/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/url-loader/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/url-loader/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/url-loader/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/use-composed-ref": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/use-composed-ref/-/use-composed-ref-1.3.0.tgz", + "integrity": "sha512-GLMG0Jc/jiKov/3Ulid1wbv3r54K9HlMW29IWcDFPEqFkSO2nS0MuefWgMJpeHQ9YJeXDL3ZUF+P3jdXlZX/cQ==", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/use-isomorphic-layout-effect": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/use-isomorphic-layout-effect/-/use-isomorphic-layout-effect-1.1.2.tgz", + "integrity": "sha512-49L8yCO3iGT/ZF9QttjwLF/ZD9Iwto5LnH5LmEdk/6cFmXddqi2ulF0edxTwjj+7mqvpVVGQWvbXZdn32wRSHA==", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/use-latest": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/use-latest/-/use-latest-1.2.1.tgz", + "integrity": "sha512-xA+AVm/Wlg3e2P/JiItTziwS7FK92LWrDB0p+hgXloIMuVCeJJ8v6f0eeHyPZaJrM+usM1FkFfbNCrJGs8A/zw==", + "dependencies": { + "use-isomorphic-layout-effect": "^1.1.1" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" + }, + "node_modules/utila": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/utila/-/utila-0.4.0.tgz", + "integrity": "sha512-Z0DbgELS9/L/75wZbro8xAnT50pBVFQZ+hUEueGDU5FN51YSCYM+jdxsfCiHjwNP/4LCDD0i/graKpeBnOXKRA==" + }, + "node_modules/utility-types": { + "version": "3.10.0", + "resolved": "https://registry.npmjs.org/utility-types/-/utility-types-3.10.0.tgz", + "integrity": "sha512-O11mqxmi7wMKCo6HKFt5AhO4BwY3VV68YU07tgxfz8zJTIxr4BpsezN49Ffwy9j3ZpwwJp4fkRwjRzq3uWE6Rg==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/utils-merge": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", + "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/uvu": { + "version": "0.5.6", + "resolved": "https://registry.npmjs.org/uvu/-/uvu-0.5.6.tgz", + "integrity": "sha512-+g8ENReyr8YsOc6fv/NVJs2vFdHBnBNdfE49rshrTzDWOlUx4Gq7KOS2GD8eqhy2j+Ejq29+SbKH8yjkAqXqoA==", + "dependencies": { + "dequal": "^2.0.0", + "diff": "^5.0.0", + "kleur": "^4.0.3", + "sade": "^1.7.3" + }, + "bin": { + "uvu": "bin.js" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/uvu/node_modules/kleur": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-4.1.5.tgz", + "integrity": "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/value-equal": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz", + "integrity": "sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw==" + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/vfile": { + "version": "5.3.7", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz", + "integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==", + "dependencies": { + "@types/unist": "^2.0.0", + "is-buffer": "^2.0.0", + "unist-util-stringify-position": "^3.0.0", + "vfile-message": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-location": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-4.1.0.tgz", + "integrity": "sha512-YF23YMyASIIJXpktBa4vIGLJ5Gs88UB/XePgqPmTa7cDA+JeO3yclbpheQYCHjVHBn/yePzrXuygIL+xbvRYHw==", + "dependencies": { + "@types/unist": "^2.0.0", + "vfile": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-message": { + "version": "3.1.4", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-3.1.4.tgz", + "integrity": "sha512-fa0Z6P8HUrQN4BZaX05SIVXic+7kE3b05PWAtPuYP9QLHsLKYR7/AlLW3NtOrpXRLeawpDLMsVkmk5DG0NXgWw==", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-stringify-position": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/wait-on": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/wait-on/-/wait-on-7.0.1.tgz", + "integrity": "sha512-9AnJE9qTjRQOlTZIldAaf/da2eW0eSRSgcqq85mXQja/DW3MriHxkpODDSUEg+Gri/rKEcXUZHe+cevvYItaog==", + "dependencies": { + "axios": "^0.27.2", + "joi": "^17.7.0", + "lodash": "^4.17.21", + "minimist": "^1.2.7", + "rxjs": "^7.8.0" + }, + "bin": { + "wait-on": "bin/wait-on" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/watchpack": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.0.tgz", + "integrity": "sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg==", + "dependencies": { + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.1.2" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/wbuf": { + "version": "1.7.3", + "resolved": "https://registry.npmjs.org/wbuf/-/wbuf-1.7.3.tgz", + "integrity": "sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==", + "dependencies": { + "minimalistic-assert": "^1.0.0" + } + }, + "node_modules/web-namespaces": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-2.0.1.tgz", + "integrity": "sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" + }, + "node_modules/webpack": { + "version": "5.88.2", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.88.2.tgz", + "integrity": "sha512-JmcgNZ1iKj+aiR0OvTYtWQqJwq37Pf683dY9bVORwVbUrDhLhdn/PlO2sHsFHPkj7sHNQF3JwaAkp49V+Sq1tQ==", + "dependencies": { + "@types/eslint-scope": "^3.7.3", + "@types/estree": "^1.0.0", + "@webassemblyjs/ast": "^1.11.5", + "@webassemblyjs/wasm-edit": "^1.11.5", + "@webassemblyjs/wasm-parser": "^1.11.5", + "acorn": "^8.7.1", + "acorn-import-assertions": "^1.9.0", + "browserslist": "^4.14.5", + "chrome-trace-event": "^1.0.2", + "enhanced-resolve": "^5.15.0", + "es-module-lexer": "^1.2.1", + "eslint-scope": "5.1.1", + "events": "^3.2.0", + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.2.9", + "json-parse-even-better-errors": "^2.3.1", + "loader-runner": "^4.2.0", + "mime-types": "^2.1.27", + "neo-async": "^2.6.2", + "schema-utils": "^3.2.0", + "tapable": "^2.1.1", + "terser-webpack-plugin": "^5.3.7", + "watchpack": "^2.4.0", + "webpack-sources": "^3.2.3" + }, + "bin": { + "webpack": "bin/webpack.js" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependenciesMeta": { + "webpack-cli": { + "optional": true + } + } + }, + "node_modules/webpack-bundle-analyzer": { + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.9.1.tgz", + "integrity": "sha512-jnd6EoYrf9yMxCyYDPj8eutJvtjQNp8PHmni/e/ulydHBWhT5J3menXt3HEkScsu9YqMAcG4CfFjs3rj5pVU1w==", + "dependencies": { + "@discoveryjs/json-ext": "0.5.7", + "acorn": "^8.0.4", + "acorn-walk": "^8.0.0", + "commander": "^7.2.0", + "escape-string-regexp": "^4.0.0", + "gzip-size": "^6.0.0", + "is-plain-object": "^5.0.0", + "lodash.debounce": "^4.0.8", + "lodash.escape": "^4.0.1", + "lodash.flatten": "^4.4.0", + "lodash.invokemap": "^4.6.0", + "lodash.pullall": "^4.2.0", + "lodash.uniqby": "^4.7.0", + "opener": "^1.5.2", + "picocolors": "^1.0.0", + "sirv": "^2.0.3", + "ws": "^7.3.1" + }, + "bin": { + "webpack-bundle-analyzer": "lib/bin/analyzer.js" + }, + "engines": { + "node": ">= 10.13.0" + } + }, + "node_modules/webpack-bundle-analyzer/node_modules/commander": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "engines": { + "node": ">= 10" + } + }, + "node_modules/webpack-dev-middleware": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.3.tgz", + "integrity": "sha512-hj5CYrY0bZLB+eTO+x/j67Pkrquiy7kWepMHmUMoPsmcUaeEnQJqFzHJOyxgWlq746/wUuA64p9ta34Kyb01pA==", + "dependencies": { + "colorette": "^2.0.10", + "memfs": "^3.4.3", + "mime-types": "^2.1.31", + "range-parser": "^1.2.1", + "schema-utils": "^4.0.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.0.0 || ^5.0.0" + } + }, + "node_modules/webpack-dev-middleware/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack-dev-middleware/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack-dev-middleware/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack-dev-server": { + "version": "4.15.1", + "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.15.1.tgz", + "integrity": "sha512-5hbAst3h3C3L8w6W4P96L5vaV0PxSmJhxZvWKYIdgxOQm8pNZ5dEOmmSLBVpP85ReeyRt6AS1QJNyo/oFFPeVA==", + "dependencies": { + "@types/bonjour": "^3.5.9", + "@types/connect-history-api-fallback": "^1.3.5", + "@types/express": "^4.17.13", + "@types/serve-index": "^1.9.1", + "@types/serve-static": "^1.13.10", + "@types/sockjs": "^0.3.33", + "@types/ws": "^8.5.5", + "ansi-html-community": "^0.0.8", + "bonjour-service": "^1.0.11", + "chokidar": "^3.5.3", + "colorette": "^2.0.10", + "compression": "^1.7.4", + "connect-history-api-fallback": "^2.0.0", + "default-gateway": "^6.0.3", + "express": "^4.17.3", + "graceful-fs": "^4.2.6", + "html-entities": "^2.3.2", + "http-proxy-middleware": "^2.0.3", + "ipaddr.js": "^2.0.1", + "launch-editor": "^2.6.0", + "open": "^8.0.9", + "p-retry": "^4.5.0", + "rimraf": "^3.0.2", + "schema-utils": "^4.0.0", + "selfsigned": "^2.1.1", + "serve-index": "^1.9.1", + "sockjs": "^0.3.24", + "spdy": "^4.0.2", + "webpack-dev-middleware": "^5.3.1", + "ws": "^8.13.0" + }, + "bin": { + "webpack-dev-server": "bin/webpack-dev-server.js" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.37.0 || ^5.0.0" + }, + "peerDependenciesMeta": { + "webpack": { + "optional": true + }, + "webpack-cli": { + "optional": true + } + } + }, + "node_modules/webpack-dev-server/node_modules/ws": { + "version": "8.14.2", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.14.2.tgz", + "integrity": "sha512-wEBG1ftX4jcglPxgFCMJmZ2PLtSbJ2Peg6TmpJFTbe9GZYOQCDPdMYu/Tm0/bGZkw8paZnJY45J4K2PZrLYq8g==", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/webpack-merge": { + "version": "5.9.0", + "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-5.9.0.tgz", + "integrity": "sha512-6NbRQw4+Sy50vYNTw7EyOn41OZItPiXB8GNv3INSoe3PSFaHJEz3SHTrYVaRm2LilNGnFUzh0FAwqPEmU/CwDg==", + "dependencies": { + "clone-deep": "^4.0.1", + "wildcard": "^2.0.0" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/webpack-sources": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.2.3.tgz", + "integrity": "sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==", + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/webpack/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/webpack/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/webpack/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/webpack/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/webpackbar": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/webpackbar/-/webpackbar-5.0.2.tgz", + "integrity": "sha512-BmFJo7veBDgQzfWXl/wwYXr/VFus0614qZ8i9znqcl9fnEdiVkdbi0TedLQ6xAK92HZHDJ0QmyQ0fmuZPAgCYQ==", + "dependencies": { + "chalk": "^4.1.0", + "consola": "^2.15.3", + "pretty-time": "^1.1.0", + "std-env": "^3.0.1" + }, + "engines": { + "node": ">=12" + }, + "peerDependencies": { + "webpack": "3 || 4 || 5" + } + }, + "node_modules/websocket-driver": { + "version": "0.7.4", + "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz", + "integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==", + "dependencies": { + "http-parser-js": ">=0.5.1", + "safe-buffer": ">=5.1.0", + "websocket-extensions": ">=0.1.1" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/websocket-extensions": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz", + "integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/widest-line": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-4.0.1.tgz", + "integrity": "sha512-o0cyEG0e8GPzT4iGHphIOh0cJOV8fivsXxddQasHPHfoZf1ZexrfeA21w2NaEN1RHE+fXlfISmOE8R9N3u3Qig==", + "dependencies": { + "string-width": "^5.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/wildcard": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/wildcard/-/wildcard-2.0.1.tgz", + "integrity": "sha512-CC1bOL87PIWSBhDcTrdeLo6eGT7mCFtrg0uIJtqJUFyK+eJnzl8A1niH56uu7KMa5XFrtiV+AQuHO3n7DsHnLQ==" + }, + "node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-regex": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", + "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" + }, + "node_modules/write-file-atomic": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", + "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", + "dependencies": { + "imurmurhash": "^0.1.4", + "is-typedarray": "^1.0.0", + "signal-exit": "^3.0.2", + "typedarray-to-buffer": "^3.1.5" + } + }, + "node_modules/ws": { + "version": "7.5.9", + "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.9.tgz", + "integrity": "sha512-F+P9Jil7UiSKSkppIiD94dN07AwvFixvLIj1Og1Rl9GGMuNipJnV9JzjD6XuqmAeiswGvUmNLjr5cFuXwNS77Q==", + "engines": { + "node": ">=8.3.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": "^5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/xdg-basedir": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-5.1.0.tgz", + "integrity": "sha512-GCPAHLvrIH13+c0SuacwvRYj2SxJXQ4kaVTT5xgL3kPrz56XxkF21IGhjSE1+W0aw7gpBWRGXLCPnPby6lSpmQ==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/xml-js": { + "version": "1.6.11", + "resolved": "https://registry.npmjs.org/xml-js/-/xml-js-1.6.11.tgz", + "integrity": "sha512-7rVi2KMfwfWFl+GpPg6m80IVMWXLRjO+PxTq7V2CDhoGak0wzYzFgUY2m4XJ47OGdXd8eLE8EmwfAmdjw7lC1g==", + "dependencies": { + "sax": "^1.2.4" + }, + "bin": { + "xml-js": "bin/cli.js" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==" + }, + "node_modules/yaml": { + "version": "1.10.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", + "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + "engines": { + "node": ">= 6" + } + }, + "node_modules/yocto-queue": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.0.0.tgz", + "integrity": "sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==", + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/zwitch": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz", + "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + } + } +} diff --git a/package.json b/package.json new file mode 100644 index 0000000..410b4b2 --- /dev/null +++ b/package.json @@ -0,0 +1,51 @@ +{ + "name": "kubernetes-guide", + "version": "0.0.0", + "private": true, + "scripts": { + "docusaurus": "docusaurus", + "start": "docusaurus start", + "build": "docusaurus build", + "swizzle": "docusaurus swizzle", + "deploy": "docusaurus deploy", + "clear": "docusaurus clear", + "serve": "docusaurus serve", + "write-translations": "docusaurus write-translations", + "write-heading-ids": "docusaurus write-heading-ids", + "typecheck": "tsc" + }, + "dependencies": { + "@docusaurus/core": "3.0.0-beta.0", + "@docusaurus/preset-classic": "3.0.0-beta.0", + "@giscus/react": "^2.3.0", + "@mdx-js/react": "^2.3.0", + "clsx": "^1.2.1", + "docusaurus-plugin-sass": "^0.2.5", + "path-browserify": "^1.0.1", + "prism-react-renderer": "^1.3.5", + "raw-loader": "^4.0.2", + "react": "^18.0.0", + "react-dom": "^18.0.0", + "sass": "^1.69.3" + }, + "devDependencies": { + "@docusaurus/module-type-aliases": "3.0.0-beta.0", + "@docusaurus/tsconfig": "3.0.0-beta.0", + "typescript": "~5.2.2" + }, + "browserslist": { + "production": [ + ">0.5%", + "not dead", + "not op_mini all" + ], + "development": [ + "last 3 chrome version", + "last 3 firefox version", + "last 5 safari version" + ] + }, + "engines": { + "node": ">=16.14" + } +} diff --git a/src/components/Comment.tsx b/src/components/Comment.tsx new file mode 100644 index 0000000..46589c7 --- /dev/null +++ b/src/components/Comment.tsx @@ -0,0 +1,48 @@ +import React from 'react' +import { useThemeConfig, useColorMode } from '@docusaurus/theme-common' +import BrowserOnly from '@docusaurus/BrowserOnly' +import Giscus, { GiscusProps } from '@giscus/react' +import { useLocation } from '@docusaurus/router'; + +const defaultConfig: Partial = { + id: 'comments', + mapping: 'specific', + reactionsEnabled: '1', + emitMetadata: '0', + inputPosition: 'top', + loading: 'lazy', + strict: '0', + lang: 'zh-CN', +} + +export default function Comment(): JSX.Element { + const themeConfig = useThemeConfig() + + // merge default config + const giscus = { ...defaultConfig, ...themeConfig.giscus } + + if (!giscus.repo || !giscus.repoId || !giscus.categoryId) { + throw new Error( + 'You must provide `repo`, `repoId`, and `categoryId` to `themeConfig.giscus`.', + ) + } + + const path = useLocation().pathname.replace(/^\/|\/$/g, ''); + const firstSlashIndex = path.indexOf('/'); + var subPath: string = "" + if (firstSlashIndex !== -1) { + subPath = path.substring(firstSlashIndex + 1) + } else { + subPath = "index" + } + + giscus.term = subPath + giscus.theme = + useColorMode().colorMode === 'dark' ? 'transparent_dark' : 'light' + + return ( + 评论加载中...}> + {() => } + + ) +} diff --git a/src/components/FileBlock.tsx b/src/components/FileBlock.tsx new file mode 100644 index 0000000..7269099 --- /dev/null +++ b/src/components/FileBlock.tsx @@ -0,0 +1,56 @@ +import React from 'react'; +import CodeBlock from '@theme/CodeBlock'; +import { useLocation } from '@docusaurus/router'; +import * as path from 'path-browserify'; +import useDocusaurusContext from '@docusaurus/useDocusaurusContext'; + +let extToLang = new Map([ + ["sh", "bash"], + ["yml", "yaml"] +]); + +export default function FileBlock({ file, showFileName, ...prop }: { file: string, showFileName?: boolean }) { + // get url path without "/" prefix and suffix + var urlPath = useLocation().pathname.replace(/^\/|\/$/g, ''); + + // remove locale prefix in urlPath + const { i18n } = useDocusaurusContext() + if (i18n.currentLocale != i18n.defaultLocale) { + urlPath = urlPath.replace(/^[^\/]*\/?/g, '') + } + + // find file content according to topPath and file path param + var filepath = "" + if (file.startsWith("@site/")) { + filepath = file.replace(/^@site\//g, '') + } else { + filepath = "codeblock/" + file + } + + // load file raw content according to filepath + var content = require('!!raw-loader!@site/' + filepath)?.default + content = content.replace(/\t/g, " "); // replace tab to 2 spaces + + // infer language of code block based on filename extension if language is not set + const filename = path.basename(file); + if (!prop.language) { + var language = path.extname(filename).replace(/^\./, '') + const langMappingName = extToLang.get(language) + if (langMappingName) { + language = langMappingName + } + prop.language = language + } + + // set title to filename if showFileName is set and title is not set + if (!prop.title && showFileName) { + prop.title = filename + } + + return ( + + {content} + + ); +} + diff --git a/src/css/custom.scss b/src/css/custom.scss new file mode 100644 index 0000000..e558f64 --- /dev/null +++ b/src/css/custom.scss @@ -0,0 +1,59 @@ +/** + * Any CSS included here will be global. The classic template + * bundles Infima by default. Infima is a CSS framework designed to + * work well for content-centric websites. + */ + +/* You can override the default Infima variables here. */ +:root { + --ifm-color-primary: #2e8555; + --ifm-color-primary-dark: #29784c; + --ifm-color-primary-darker: #277148; + --ifm-color-primary-darkest: #205d3b; + --ifm-color-primary-light: #33925d; + --ifm-color-primary-lighter: #359962; + --ifm-color-primary-lightest: #3cad6e; + --ifm-code-font-size: 95%; + --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.1); + + // “喜爱” 图标的颜色 + --site-color-svg-icon-favorite: #e9669e; +} + +/* For readability concerns, you should choose a lighter palette in dark mode. */ +[data-theme='dark'] { + --ifm-color-primary: #25c2a0; + --ifm-color-primary-dark: #21af90; + --ifm-color-primary-darker: #1fa588; + --ifm-color-primary-darkest: #1a8870; + --ifm-color-primary-light: #29d5b0; + --ifm-color-primary-lighter: #32d8b4; + --ifm-color-primary-lightest: #4fddbf; + --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3); +} + +/* 代码高亮行的样式 */ +.code-block-highlighted-line { + background-color: rgb(72, 77, 91); + span[class*='codeLineNumber'] { + background-color: rgb(72, 77, 91); + } +} +.code-block-add-line { + background-color: #213227; + span[class*='codeLineNumber'] { + background-color: #213227; + } +} +.code-block-update-line { + background-color: #362d1e; + span[class*='codeLineNumber'] { + background-color: #362d1e; + } +} +.code-block-error-line { + background-color: #ff000020; + span[class*='codeLineNumber'] { + background-color: #ff000020; + } +} diff --git a/src/theme/DocItem/Layout/index.tsx b/src/theme/DocItem/Layout/index.tsx new file mode 100644 index 0000000..e79caa3 --- /dev/null +++ b/src/theme/DocItem/Layout/index.tsx @@ -0,0 +1,70 @@ +import React from 'react'; +import clsx from 'clsx'; +import { useWindowSize } from '@docusaurus/theme-common'; +import { useDoc } from '@docusaurus/theme-common/internal'; +import DocItemPaginator from '@theme/DocItem/Paginator'; +import DocVersionBanner from '@theme/DocVersionBanner'; +import DocVersionBadge from '@theme/DocVersionBadge'; +import DocItemFooter from '@theme/DocItem/Footer'; +import DocItemTOCMobile from '@theme/DocItem/TOC/Mobile'; +import DocItemTOCDesktop from '@theme/DocItem/TOC/Desktop'; +import DocItemContent from '@theme/DocItem/Content'; +import DocBreadcrumbs from '@theme/DocBreadcrumbs'; +import Unlisted from '@theme/Unlisted'; +import type { Props } from '@theme/DocItem/Layout'; + +import styles from './styles.module.css'; +import Comment from '../../../components/Comment'; + +/** + * Decide if the toc should be rendered, on mobile or desktop viewports + */ +function useDocTOC() { + const { frontMatter, toc } = useDoc(); + const windowSize = useWindowSize(); + + const hidden = frontMatter.hide_table_of_contents; + const canRender = !hidden && toc.length > 0; + + const mobile = canRender ? : undefined; + + const desktop = + canRender && (windowSize === 'desktop' || windowSize === 'ssr') ? ( + + ) : undefined; + + return { + hidden, + mobile, + desktop, + }; +} + +export default function DocItemLayout({ children }: Props): JSX.Element { + const docTOC = useDocTOC(); + const { frontMatter } = useDoc(); + const { hide_comment: hideComment } = frontMatter; + const { + metadata: { unlisted }, + } = useDoc(); + return ( +
+
+ {unlisted && } + +
+
+ + + {docTOC.mobile} + {children} + +
+ +
+ {!hideComment && } +
+ {docTOC.desktop &&
{docTOC.desktop}
} +
+ ); +} diff --git a/src/theme/DocItem/Layout/styles.module.css b/src/theme/DocItem/Layout/styles.module.css new file mode 100644 index 0000000..d5aaec1 --- /dev/null +++ b/src/theme/DocItem/Layout/styles.module.css @@ -0,0 +1,10 @@ +.docItemContainer header + *, +.docItemContainer article > *:first-child { + margin-top: 0; +} + +@media (min-width: 997px) { + .docItemCol { + max-width: 75% !important; + } +} diff --git a/static/img/logo.svg b/static/img/logo.svg new file mode 100644 index 0000000..9a61c19 --- /dev/null +++ b/static/img/logo.svg @@ -0,0 +1,91 @@ + + + + + Kubernetes logo with no border + + + + + + image/svg+xml + + Kubernetes logo with no border + "kubectl" is pronounced "kyoob kuttel" + + + + + + + + + + diff --git a/tsconfig.json b/tsconfig.json new file mode 100644 index 0000000..314eab8 --- /dev/null +++ b/tsconfig.json @@ -0,0 +1,7 @@ +{ + // This file is not used in compilation. It is here just for a nice editor experience. + "extends": "@docusaurus/tsconfig", + "compilerOptions": { + "baseUrl": "." + } +}