#cloud-config %{ if ceph_partition_size > 0 || node_local_partition_size > 0} bootcmd: - [ cloud-init-per, once, move-second-header, sgdisk, --move-second-header, /dev/vda ] %{ if node_local_partition_size > 0 } # Create partition for node local storage - [ cloud-init-per, once, create-node-local-part, parted, --script, /dev/vda, 'mkpart extended ext4 ${root_partition_size}GB %{ if ceph_partition_size == 0 }-1%{ else }${root_partition_size + node_local_partition_size}GB%{ endif }' ] - [ cloud-init-per, once, create-fs-node-local-part, mkfs.ext4, /dev/vda2 ] %{ endif } %{ if ceph_partition_size > 0 } # Create partition for rook to use for ceph - [ cloud-init-per, once, create-ceph-part, parted, --script, /dev/vda, 'mkpart extended ${root_partition_size + node_local_partition_size}GB -1' ] %{ endif } %{ endif } write_files: - path: /etc/netplan/eth1.yaml content: | network: version: 2 ethernets: eth1: dhcp4: true runcmd: - netplan apply - /sbin/sysctl net.ipv4.conf.all.forwarding=1 %{ if node_type == "worker" } # TODO: When a VM is seen as healthy and is added to the EIP loadbalancer # pool it no longer can send traffic back to itself via the EIP IP # address. # Remove this if it ever gets solved. - iptables -t nat -A PREROUTING -d ${eip_ip_address} -j DNAT --to 127.0.0.1 %{ endif } %{ if node_local_partition_size > 0 } - mkdir -p /mnt/disks/node-local-storage - chown nobody:nogroup /mnt/disks/node-local-storage - mount /dev/vda2 /mnt/disks/node-local-storage %{ endif }