From 70c1f8b1720c94997f5b7e76d22bdb7229b8e87f Mon Sep 17 00:00:00 2001 From: gjmzj Date: Thu, 30 May 2019 21:37:01 +0800 Subject: [PATCH] =?UTF-8?q?=E8=B0=83=E6=95=B4ex-lb=E7=9A=84=E6=B5=81?= =?UTF-8?q?=E7=A8=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- 99.clean.yml | 23 ++++--- example/hosts.allinone | 6 +- example/hosts.multi-node | 6 +- roles/ex-lb/clean-ex-lb.yml | 22 +++---- roles/ex-lb/ex-lb.yml | 2 +- roles/ex-lb/tasks/main.yml | 13 +++- roles/ex-lb/templates/haproxy.cfg.j2 | 39 +++++++++-- .../ex-lb/templates/keepalived-backup.conf.j2 | 2 +- .../ex-lb/templates/keepalived-master.conf.j2 | 2 +- roles/kube-master/tasks/main.yml | 7 +- .../templates/kubernetes-csr.json.j2 | 8 ++- roles/kube-node/tasks/node_lb.yml | 5 ++ roles/lb/defaults/main.yml | 15 ----- roles/lb/lb.yml | 4 -- roles/lb/tasks/main.yml | 64 ------------------- roles/lb/templates/haproxy.cfg.j2 | 47 -------------- roles/lb/templates/haproxy.service.j2 | 13 ---- roles/lb/templates/keepalived-backup.conf.j2 | 30 --------- roles/lb/templates/keepalived-master.conf.j2 | 30 --------- tools/clean_one_node.yml | 10 +-- 20 files changed, 93 insertions(+), 255 deletions(-) delete mode 100644 roles/lb/defaults/main.yml delete mode 100644 roles/lb/lb.yml delete mode 100644 roles/lb/tasks/main.yml delete mode 100644 roles/lb/templates/haproxy.cfg.j2 delete mode 100644 roles/lb/templates/haproxy.service.j2 delete mode 100644 roles/lb/templates/keepalived-backup.conf.j2 delete mode 100644 roles/lb/templates/keepalived-master.conf.j2 diff --git a/99.clean.yml b/99.clean.yml index 507b63b..adcf540 100644 --- a/99.clean.yml +++ b/99.clean.yml @@ -205,21 +205,20 @@ - hosts: - kube-master - kube-node - - deploy - etcd - - lb + - ex-lb tasks: - - name: stop and disable chrony in Ubuntu - service: name=chrony state=stopped enabled=no - ignore_errors: true - tags: rm_ntp - when: ansible_distribution == "Ubuntu" or ansible_distribution == "Debian" + - block: + - name: stop and disable chrony in Ubuntu + service: name=chrony state=stopped enabled=no + ignore_errors: true + when: 'ansible_distribution in ["Ubuntu","Debian"]' - - name: stop and disable chronyd in CentOS/RedHat - service: name=chronyd state=stopped enabled=no - ignore_errors: true - tags: rm_ntp - when: ansible_distribution == "CentOS" or ansible_distribution == "RedHat" + - name: stop and disable chronyd in CentOS/RedHat + service: name=chronyd state=stopped enabled=no + ignore_errors: true + when: 'ansible_distribution in ["CentOS","RedHat","Amazon"]' + when: "groups['chrony']|length > 0" - name: clean certs and keys file: name={{ item }} state=absent diff --git a/example/hosts.allinone b/example/hosts.allinone index a78af90..b469602 100644 --- a/example/hosts.allinone +++ b/example/hosts.allinone @@ -18,10 +18,10 @@ # [optional] loadbalance for accessing k8s from outside [ex-lb] -#192.168.1.6 LB_ROLE=backup EX_APISERVER_VIP=192.168.1.250 -#192.168.1.7 LB_ROLE=master EX_APISERVER_VIP=192.168.1.250 +#192.168.1.6 LB_ROLE=backup EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443 +#192.168.1.7 LB_ROLE=master EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443 -# [optional] ntp server for cluster +# [optional] ntp server for the cluster [chrony] #192.168.1.1 diff --git a/example/hosts.multi-node b/example/hosts.multi-node index 4d4454b..562a61b 100644 --- a/example/hosts.multi-node +++ b/example/hosts.multi-node @@ -22,10 +22,10 @@ # [optional] loadbalance for accessing k8s from outside [ex-lb] -#192.168.1.6 LB_ROLE=backup EX_APISERVER_VIP=192.168.1.250 -#192.168.1.7 LB_ROLE=master EX_APISERVER_VIP=192.168.1.250 +#192.168.1.6 LB_ROLE=backup EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443 +#192.168.1.7 LB_ROLE=master EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443 -# [optional] ntp server for cluster +# [optional] ntp server for the cluster [chrony] #192.168.1.1 diff --git a/roles/ex-lb/clean-ex-lb.yml b/roles/ex-lb/clean-ex-lb.yml index cdff59c..1908e74 100644 --- a/roles/ex-lb/clean-ex-lb.yml +++ b/roles/ex-lb/clean-ex-lb.yml @@ -1,17 +1,17 @@ - hosts: - ex-lb tasks: - - name: stop and disable chrony in Ubuntu - service: name=chrony state=stopped enabled=no - ignore_errors: true - tags: rm_ntp - when: ansible_distribution == "Ubuntu" or ansible_distribution == "Debian" - - - name: stop and disable chronyd in CentOS/RedHat - service: name=chronyd state=stopped enabled=no - ignore_errors: true - tags: rm_ntp - when: ansible_distribution == "CentOS" or ansible_distribution == "RedHat" + - block: + - name: stop and disable chrony in Ubuntu + service: name=chrony state=stopped enabled=no + ignore_errors: true + when: 'ansible_distribution in ["Ubuntu","Debian"]' + + - name: stop and disable chronyd in CentOS/RedHat + service: name=chronyd state=stopped enabled=no + ignore_errors: true + when: 'ansible_distribution in ["CentOS","RedHat","Amazon"]' + when: "groups['chrony']|length > 0" - name: stop keepalived service service: name=keepalived state=stopped enabled=no diff --git a/roles/ex-lb/ex-lb.yml b/roles/ex-lb/ex-lb.yml index 20103c9..ac6b3eb 100644 --- a/roles/ex-lb/ex-lb.yml +++ b/roles/ex-lb/ex-lb.yml @@ -1,5 +1,5 @@ - hosts: ex-lb roles: - - { role: chrony, when: "hostvars[groups.deploy[0]]['NTP_ENABLED'] == 'yes'" } + - { role: chrony, when: "groups['chrony']|length > 0" } - prepare - ex-lb diff --git a/roles/ex-lb/tasks/main.yml b/roles/ex-lb/tasks/main.yml index a03978e..b0b1998 100644 --- a/roles/ex-lb/tasks/main.yml +++ b/roles/ex-lb/tasks/main.yml @@ -1,14 +1,21 @@ +# ex-lb 节点成员不能同时是 kube-node 节点,因为它们都需要安装 haproxy +- name: fail info1 + fail: msg="an 'ex-lb' node CAN NOT be a 'kube-node' node at the same time" + when: "inventory_hostname in groups['kube-node']" + # 自动设置LB节点变量'LB_IF' - name: 注册变量 LB_IF_TMP shell: "ip a|grep '{{ inventory_hostname }}/'|awk '{print $NF}'" register: LB_IF_TMP + tags: restart_lb - name: 设置变量 LB_IF - set_fact: LB_IF="{{ LB_IF_TMP.stdout }}" + set_fact: LB_IF={{ LB_IF_TMP.stdout }} + tags: restart_lb - name: apt更新缓存刷新 apt: update_cache=yes cache_valid_time=72000 - when: ansible_distribution == "Ubuntu" and ansible_distribution_major_version|int >= 16 + when: 'ansible_distribution in ["Ubuntu","Debian"]' - name: 安装 haproxy package: name=haproxy state=present @@ -18,7 +25,7 @@ - name: 修改centos的haproxy.service template: src=haproxy.service.j2 dest=/usr/lib/systemd/system/haproxy.service - when: (ansible_distribution == "CentOS" or ansible_distribution == "RedHat") and ansible_distribution_major_version == "7" + when: 'ansible_distribution in ["CentOS","RedHat","Amazon"]' tags: restart_lb - name: 配置 haproxy diff --git a/roles/ex-lb/templates/haproxy.cfg.j2 b/roles/ex-lb/templates/haproxy.cfg.j2 index f4cbe97..3cebcda 100644 --- a/roles/ex-lb/templates/haproxy.cfg.j2 +++ b/roles/ex-lb/templates/haproxy.cfg.j2 @@ -1,6 +1,5 @@ global - log /dev/log local0 - log /dev/log local1 notice + log /dev/log local1 warning chroot /var/lib/haproxy stats socket /run/haproxy/admin.sock mode 660 level admin stats timeout 30s @@ -11,29 +10,55 @@ global defaults log global - timeout connect 5000 + timeout connect 5s timeout client 10m timeout server 10m -{% if INGRESS_NODEPORT_LB == "yes" %} +listen kube-master + bind 0.0.0.0:{{ EX_APISERVER_PORT }} + mode tcp + option tcplog + option dontlognull + option dontlog-normal + balance {{ BALANCE_ALG }} +{% for host in groups['kube-master'] %} + server {{ host }} {{ host }}:6443 check inter 5s fall 2 rise 2 weight 1 +{% endfor %} +{% if INGRESS_NODEPORT_LB == "yes" %} listen ingress-node bind 0.0.0.0:80 mode tcp option tcplog + option dontlognull + option dontlog-normal balance {{ BALANCE_ALG }} +{% if groups['kube-node']|length > 3 %} + server {{ groups['kube-node'][0] }} {{ groups['kube-node'][0] }}:23456 check inter 5s fall 2 rise 2 weight 1 + server {{ groups['kube-node'][1] }} {{ groups['kube-node'][1] }}:23456 check inter 5s fall 2 rise 2 weight 1 + server {{ groups['kube-node'][2] }} {{ groups['kube-node'][2] }}:23456 check inter 5s fall 2 rise 2 weight 1 +{% else %} {% for host in groups['kube-node'] %} - server {{ host }} {{ host }}:23456 check inter 2000 fall 2 rise 2 weight 1 + server {{ host }} {{ host }}:23456 check inter 5s fall 2 rise 2 weight 1 {% endfor %} {% endif %} -{% if INGRESS_TLS_NODEPORT_LB == "yes" %} +{% endif %} +{% if INGRESS_TLS_NODEPORT_LB == "yes" %} listen ingress-node-tls bind 0.0.0.0:443 mode tcp option tcplog + option dontlognull + option dontlog-normal balance {{ BALANCE_ALG }} +{% if groups['kube-node']|length > 3 %} + server {{ groups['kube-node'][0] }} {{ groups['kube-node'][0] }}:23457 check inter 5s fall 2 rise 2 weight 1 + server {{ groups['kube-node'][1] }} {{ groups['kube-node'][1] }}:23457 check inter 5s fall 2 rise 2 weight 1 + server {{ groups['kube-node'][2] }} {{ groups['kube-node'][2] }}:23457 check inter 5s fall 2 rise 2 weight 1 +{% else %} {% for host in groups['kube-node'] %} - server {{ host }} {{ host }}:23457 check inter 2000 fall 2 rise 2 weight 1 + server {{ host }} {{ host }}:23456 check inter 5s fall 2 rise 2 weight 1 {% endfor %} {% endif %} +{% endif %} diff --git a/roles/ex-lb/templates/keepalived-backup.conf.j2 b/roles/ex-lb/templates/keepalived-backup.conf.j2 index ce315cd..5bfd83b 100644 --- a/roles/ex-lb/templates/keepalived-backup.conf.j2 +++ b/roles/ex-lb/templates/keepalived-backup.conf.j2 @@ -25,6 +25,6 @@ vrrp_instance VI-kube-master { check-haproxy } virtual_ipaddress { - {{ EX_VIP }} + {{ EX_APISERVER_VIP }} } } diff --git a/roles/ex-lb/templates/keepalived-master.conf.j2 b/roles/ex-lb/templates/keepalived-master.conf.j2 index e16e862..6226c55 100644 --- a/roles/ex-lb/templates/keepalived-master.conf.j2 +++ b/roles/ex-lb/templates/keepalived-master.conf.j2 @@ -25,6 +25,6 @@ vrrp_instance VI-kube-master { check-haproxy } virtual_ipaddress { - {{ EX_VIP }} + {{ EX_APISERVER_VIP }} } } diff --git a/roles/kube-master/tasks/main.yml b/roles/kube-master/tasks/main.yml index d4f1cbc..fd9076a 100644 --- a/roles/kube-master/tasks/main.yml +++ b/roles/kube-master/tasks/main.yml @@ -10,9 +10,11 @@ - name: 注册变量 KUBERNETES_SVC_IP shell: echo {{ SERVICE_CIDR }}|cut -d/ -f1|awk -F. '{print $1"."$2"."$3"."$4+1}' register: KUBERNETES_SVC_IP + tags: change_cert - name: 设置变量 CLUSTER_KUBERNETES_SVC_IP set_fact: CLUSTER_KUBERNETES_SVC_IP={{ KUBERNETES_SVC_IP.stdout }} + tags: change_cert - name: 创建 kubernetes 证书签名请求 template: src=kubernetes-csr.json.j2 dest={{ ca_dir }}/kubernetes-csr.json @@ -47,12 +49,11 @@ connection: local register: TMP_PASS run_once: true - tags: restart_master - name: 设置 basic-auth 随机密码 - set_fact: BASIC_AUTH_PASS="{{ TMP_PASS.stdout }}" - tags: restart_master + set_fact: BASIC_AUTH_PASS={{ TMP_PASS.stdout }} when: 'BASIC_AUTH_ENABLE == "yes" and BASIC_AUTH_PASS == "_pwd_"' + tags: restart_master - name: 创建 basic-auth.csv template: src=basic-auth.csv.j2 dest={{ ca_dir }}/basic-auth.csv diff --git a/roles/kube-master/templates/kubernetes-csr.json.j2 b/roles/kube-master/templates/kubernetes-csr.json.j2 index 6886db4..8429d97 100644 --- a/roles/kube-master/templates/kubernetes-csr.json.j2 +++ b/roles/kube-master/templates/kubernetes-csr.json.j2 @@ -2,11 +2,13 @@ "CN": "kubernetes", "hosts": [ "127.0.0.1", - "{{ MASTER_IP }}", +{% if groups['ex-lb']|length > 0 %} + "{{ hostvars[groups['ex-lb'][0]]['EX_APISERVER_VIP'] }}", +{% endif %} "{{ inventory_hostname }}", "{{ CLUSTER_KUBERNETES_SVC_IP }}", -{% for HOST in MASTER_CERT_HOSTS %} - "{{ HOST }}", +{% for host in MASTER_CERT_HOSTS %} + "{{ host }}", {% endfor %} "kubernetes", "kubernetes.default", diff --git a/roles/kube-node/tasks/node_lb.yml b/roles/kube-node/tasks/node_lb.yml index b2fa033..9e56bcb 100644 --- a/roles/kube-node/tasks/node_lb.yml +++ b/roles/kube-node/tasks/node_lb.yml @@ -1,3 +1,8 @@ +# kube-node 节点成员不能同时是 ex-lb 节点,因为它们都需要安装 haproxy +- name: fail info1 + fail: msg="an 'kube-node' node CAN NOT be a 'ex-lb' node at the same time" + when: "inventory_hostname in groups['ex-lb']" + - name: 安装 haproxy package: name=haproxy state=present diff --git a/roles/lb/defaults/main.yml b/roles/lb/defaults/main.yml deleted file mode 100644 index ecef738..0000000 --- a/roles/lb/defaults/main.yml +++ /dev/null @@ -1,15 +0,0 @@ -# 区分多个instance的VRRP组播,同网段不能重复,取值在0-255之间 -# 因项目已设置vrrp报文单播模式,所以这个ROUTER_ID 即便同网段里面有重复也没关系 -ROUTER_ID: 111 - -# haproxy负载均衡算法,常见如下: -# "roundrobin": 基于服务器权重的轮询 -# "leastconn": 基于服务器最小连接数 -# "source": 基于请求源IP地址 -# "uri": 基于请求的URI -BALANCE_ALG: "roundrobin" - -# 启用 ingress NodePort服务的负载均衡 (yes/no) -INGRESS_NODEPORT_LB: "no" -# 启用 ingress tls NodePort服务的负载均衡 (yes/no) -INGRESS_TLS_NODEPORT_LB: "no" diff --git a/roles/lb/lb.yml b/roles/lb/lb.yml deleted file mode 100644 index e621f85..0000000 --- a/roles/lb/lb.yml +++ /dev/null @@ -1,4 +0,0 @@ -- hosts: lb - roles: - - lb - diff --git a/roles/lb/tasks/main.yml b/roles/lb/tasks/main.yml deleted file mode 100644 index ae9767f..0000000 --- a/roles/lb/tasks/main.yml +++ /dev/null @@ -1,64 +0,0 @@ -# 自动设置LB节点变量'LB_IF' -- name: 注册变量 LB_IF_TMP - shell: "ip a|grep '{{ inventory_hostname }}/'|awk '{print $NF}'" - register: LB_IF_TMP - tags: restart_lb - -- name: 设置变量 LB_IF - set_fact: LB_IF="{{ LB_IF_TMP.stdout }}" - tags: restart_lb - -- name: apt更新缓存刷新 - apt: update_cache=yes cache_valid_time=72000 - when: ansible_distribution == "Ubuntu" and ansible_distribution_major_version|int >= 16 - -- name: 安装 haproxy - package: name=haproxy state=present - -- name: 创建haproxy配置目录 - file: name=/etc/haproxy state=directory - -- name: 修改centos的haproxy.service - template: src=haproxy.service.j2 dest=/usr/lib/systemd/system/haproxy.service - when: (ansible_distribution == "CentOS" or ansible_distribution == "RedHat") and ansible_distribution_major_version == "7" - tags: restart_lb - -- name: 配置 haproxy - template: src=haproxy.cfg.j2 dest=/etc/haproxy/haproxy.cfg - tags: restart_lb - -- name: 安装 keepalived - package: name=keepalived state=present - -- name: 创建keepalived配置目录 - file: name=/etc/keepalived state=directory - -- name: 配置 keepalived 主节点 - template: src=keepalived-master.conf.j2 dest=/etc/keepalived/keepalived.conf - when: LB_ROLE == "master" - tags: restart_lb - -- name: 配置 keepalived 备节点 - template: src=keepalived-backup.conf.j2 dest=/etc/keepalived/keepalived.conf - when: LB_ROLE == "backup" - tags: restart_lb - -- name: daemon-reload for haproxy.service - shell: systemctl daemon-reload - tags: restart_lb - -- name: 开机启用haproxy服务 - shell: systemctl enable haproxy - ignore_errors: true - -- name: 重启haproxy服务 - shell: systemctl restart haproxy - tags: restart_lb - -- name: 开机启用keepalived服务 - shell: systemctl enable keepalived - ignore_errors: true - -- name: 重启keepalived服务 - shell: systemctl restart keepalived - tags: restart_lb diff --git a/roles/lb/templates/haproxy.cfg.j2 b/roles/lb/templates/haproxy.cfg.j2 deleted file mode 100644 index b9b3409..0000000 --- a/roles/lb/templates/haproxy.cfg.j2 +++ /dev/null @@ -1,47 +0,0 @@ -global - log /dev/log local0 - log /dev/log local1 notice - chroot /var/lib/haproxy - stats socket /run/haproxy/admin.sock mode 660 level admin - stats timeout 30s - user haproxy - group haproxy - daemon - nbproc 1 - -defaults - log global - timeout connect 5000 - timeout client 10m - timeout server 10m - -listen kube-master - bind 0.0.0.0:{{ KUBE_APISERVER.split(':')[2] }} - mode tcp - option tcplog - balance {{ BALANCE_ALG }} -{% for host in groups['kube-master'] %} - server {{ host }} {{ host }}:6443 check inter 2000 fall 2 rise 2 weight 1 -{% endfor %} -{% if INGRESS_NODEPORT_LB == "yes" %} - -listen ingress-node - bind 0.0.0.0:80 - mode tcp - option tcplog - balance {{ BALANCE_ALG }} -{% for host in groups['kube-node'] %} - server {{ host }} {{ host }}:23456 check inter 2000 fall 2 rise 2 weight 1 -{% endfor %} -{% endif %} -{% if INGRESS_TLS_NODEPORT_LB == "yes" %} - -listen ingress-node-tls - bind 0.0.0.0:443 - mode tcp - option tcplog - balance {{ BALANCE_ALG }} -{% for host in groups['kube-node'] %} - server {{ host }} {{ host }}:23457 check inter 2000 fall 2 rise 2 weight 1 -{% endfor %} -{% endif %} diff --git a/roles/lb/templates/haproxy.service.j2 b/roles/lb/templates/haproxy.service.j2 deleted file mode 100644 index 938e1bd..0000000 --- a/roles/lb/templates/haproxy.service.j2 +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=HAProxy Load Balancer -After=syslog.target network.target - -[Service] -EnvironmentFile=/etc/sysconfig/haproxy -ExecStartPre=/usr/bin/mkdir -p /run/haproxy -ExecStart=/usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid $OPTIONS -ExecReload=/bin/kill -USR2 $MAINPID -KillMode=mixed - -[Install] -WantedBy=multi-user.target diff --git a/roles/lb/templates/keepalived-backup.conf.j2 b/roles/lb/templates/keepalived-backup.conf.j2 deleted file mode 100644 index 18512b9..0000000 --- a/roles/lb/templates/keepalived-backup.conf.j2 +++ /dev/null @@ -1,30 +0,0 @@ -global_defs { - router_id lb-backup-{{ inventory_hostname }} -} - -vrrp_script check-haproxy { - script "killall -0 haproxy" - interval 5 - weight -60 -} - -vrrp_instance VI-kube-master { - state BACKUP - priority {{ 119 | random(61, 1) }} - unicast_src_ip {{ inventory_hostname }} - unicast_peer { -{% for h in groups['lb'] %}{% if h != inventory_hostname %} - {{ h }} -{% endif %}{% endfor %} - } - dont_track_primary - interface {{ LB_IF }} - virtual_router_id {{ ROUTER_ID }} - advert_int 3 - track_script { - check-haproxy - } - virtual_ipaddress { - {{ MASTER_IP }} - } -} diff --git a/roles/lb/templates/keepalived-master.conf.j2 b/roles/lb/templates/keepalived-master.conf.j2 deleted file mode 100644 index ec43ef1..0000000 --- a/roles/lb/templates/keepalived-master.conf.j2 +++ /dev/null @@ -1,30 +0,0 @@ -global_defs { - router_id lb-master-{{ inventory_hostname }} -} - -vrrp_script check-haproxy { - script "killall -0 haproxy" - interval 5 - weight -60 -} - -vrrp_instance VI-kube-master { - state MASTER - priority 120 - unicast_src_ip {{ inventory_hostname }} - unicast_peer { -{% for h in groups['lb'] %}{% if h != inventory_hostname %} - {{ h }} -{% endif %}{% endfor %} - } - dont_track_primary - interface {{ LB_IF }} - virtual_router_id {{ ROUTER_ID }} - advert_int 3 - track_script { - check-haproxy - } - virtual_ipaddress { - {{ MASTER_IP }} - } -} diff --git a/tools/clean_one_node.yml b/tools/clean_one_node.yml index 97ba31a..c79f62b 100644 --- a/tools/clean_one_node.yml +++ b/tools/clean_one_node.yml @@ -210,14 +210,16 @@ - name: stop and disable chrony in Ubuntu service: name=chrony state=stopped enabled=no ignore_errors: true - tags: rm_ntp - when: ansible_distribution == "Ubuntu" or ansible_distribution == "Debian" + when: + - 'ansible_distribution in ["Ubuntu","Debian"]' + - "groups['chrony']|length > 0" - name: stop and disable chronyd in CentOS/RedHat service: name=chronyd state=stopped enabled=no ignore_errors: true - tags: rm_ntp - when: ansible_distribution == "CentOS" or ansible_distribution == "RedHat" + when: + - 'ansible_distribution in ["CentOS","RedHat","Amazon"]' + - "groups['chrony']|length > 0" - name: 清理证书目录和文件 file: name={{ item }} state=absent