2016-05-08 23:11:09 +08:00
|
|
|
# -*- mode: ruby -*-
|
|
|
|
# # vi: set ft=ruby :
|
|
|
|
|
2024-06-25 11:03:40 +08:00
|
|
|
# For help on using kubespray with vagrant, check out docs/developers/vagrant.md
|
2018-11-16 22:28:29 +08:00
|
|
|
|
2016-05-08 23:11:09 +08:00
|
|
|
require 'fileutils'
|
|
|
|
|
2017-12-20 22:40:35 +08:00
|
|
|
Vagrant.require_version ">= 2.0.0"
|
2016-05-08 23:11:09 +08:00
|
|
|
|
2020-04-29 22:14:24 +08:00
|
|
|
CONFIG = File.join(File.dirname(__FILE__), ENV['KUBESPRAY_VAGRANT_CONFIG'] || 'vagrant/config.rb')
|
2016-05-08 23:11:09 +08:00
|
|
|
|
2020-04-28 15:08:05 +08:00
|
|
|
FLATCAR_URL_TEMPLATE = "https://%s.release.flatcar-linux.net/amd64-usr/current/flatcar_production_vagrant.json"
|
2017-06-04 23:31:39 +08:00
|
|
|
|
2018-01-12 02:28:33 +08:00
|
|
|
# Uniq disk UUID for libvirt
|
|
|
|
DISK_UUID = Time.now.utc.to_i
|
|
|
|
|
2017-06-03 06:53:47 +08:00
|
|
|
SUPPORTED_OS = {
|
2020-04-28 15:08:05 +08:00
|
|
|
"flatcar-stable" => {box: "flatcar-stable", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["stable"]},
|
|
|
|
"flatcar-beta" => {box: "flatcar-beta", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["beta"]},
|
|
|
|
"flatcar-alpha" => {box: "flatcar-alpha", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["alpha"]},
|
|
|
|
"flatcar-edge" => {box: "flatcar-edge", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["edge"]},
|
2020-06-17 04:04:05 +08:00
|
|
|
"ubuntu2004" => {box: "generic/ubuntu2004", user: "vagrant"},
|
2023-05-25 10:56:50 +08:00
|
|
|
"ubuntu2204" => {box: "generic/ubuntu2204", user: "vagrant"},
|
2024-05-18 15:35:20 +08:00
|
|
|
"ubuntu2404" => {box: "bento/ubuntu-24.04", user: "vagrant"},
|
2020-04-18 21:35:36 +08:00
|
|
|
"centos" => {box: "centos/7", user: "vagrant"},
|
|
|
|
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
|
|
|
"centos8" => {box: "centos/8", user: "vagrant"},
|
|
|
|
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
|
2022-01-05 18:20:33 +08:00
|
|
|
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
|
|
|
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
2024-05-06 10:59:02 +08:00
|
|
|
"rockylinux8" => {box: "rockylinux/8", user: "vagrant"},
|
|
|
|
"rockylinux9" => {box: "rockylinux/9", user: "vagrant"},
|
2023-08-08 15:50:12 +08:00
|
|
|
"fedora37" => {box: "fedora/37-cloud-base", user: "vagrant"},
|
|
|
|
"fedora38" => {box: "fedora/38-cloud-base", user: "vagrant"},
|
2022-08-15 10:02:13 +08:00
|
|
|
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
|
2020-04-08 22:37:44 +08:00
|
|
|
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
2020-04-18 21:35:36 +08:00
|
|
|
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
2020-06-12 16:59:56 +08:00
|
|
|
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
2020-11-25 00:33:00 +08:00
|
|
|
"rhel7" => {box: "generic/rhel7", user: "vagrant"},
|
|
|
|
"rhel8" => {box: "generic/rhel8", user: "vagrant"},
|
2024-05-14 09:43:10 +08:00
|
|
|
"debian11" => {box: "debian/bullseye64", user: "vagrant"},
|
|
|
|
"debian12" => {box: "debian/bookworm64", user: "vagrant"},
|
2017-06-03 06:53:47 +08:00
|
|
|
}
|
|
|
|
|
2020-02-18 16:13:28 +08:00
|
|
|
if File.exist?(CONFIG)
|
|
|
|
require CONFIG
|
|
|
|
end
|
|
|
|
|
2016-05-08 23:11:09 +08:00
|
|
|
# Defaults for config options defined in CONFIG
|
2020-02-18 16:13:28 +08:00
|
|
|
$num_instances ||= 3
|
|
|
|
$instance_name_prefix ||= "k8s"
|
|
|
|
$vm_gui ||= false
|
|
|
|
$vm_memory ||= 2048
|
2020-09-08 03:09:41 +08:00
|
|
|
$vm_cpus ||= 2
|
2020-02-18 16:13:28 +08:00
|
|
|
$shared_folders ||= {}
|
|
|
|
$forwarded_ports ||= {}
|
2020-04-18 21:09:35 +08:00
|
|
|
$subnet ||= "172.18.8"
|
2020-12-12 04:19:34 +08:00
|
|
|
$subnet_ipv6 ||= "fd3c:b398:0698:0756"
|
2023-05-25 10:56:50 +08:00
|
|
|
$os ||= "ubuntu2004"
|
2020-02-18 16:13:28 +08:00
|
|
|
$network_plugin ||= "flannel"
|
2023-03-06 10:52:57 +08:00
|
|
|
# Setting multi_networking to true will install Multus: https://github.com/k8snetworkplumbingwg/multus-cni
|
2022-03-26 09:57:58 +08:00
|
|
|
$multi_networking ||= "False"
|
2020-06-22 15:28:39 +08:00
|
|
|
$download_run_once ||= "True"
|
2021-11-25 22:54:33 +08:00
|
|
|
$download_force_cache ||= "False"
|
2017-02-17 04:46:04 +08:00
|
|
|
# The first three nodes are etcd servers
|
2023-04-12 07:40:31 +08:00
|
|
|
$etcd_instances ||= [$num_instances, 3].min
|
2017-10-03 18:16:13 +08:00
|
|
|
# The first two nodes are kube masters
|
2023-04-12 07:40:31 +08:00
|
|
|
$kube_master_instances ||= [$num_instances, 2].min
|
2017-10-03 18:16:13 +08:00
|
|
|
# All nodes are kube nodes
|
2020-02-18 16:13:28 +08:00
|
|
|
$kube_node_instances ||= $num_instances
|
2018-01-12 02:28:33 +08:00
|
|
|
# The following only works when using the libvirt provider
|
2020-02-18 16:13:28 +08:00
|
|
|
$kube_node_instances_with_disks ||= false
|
|
|
|
$kube_node_instances_with_disks_size ||= "20G"
|
|
|
|
$kube_node_instances_with_disks_number ||= 2
|
|
|
|
$override_disk_size ||= false
|
|
|
|
$disk_size ||= "20GB"
|
2022-03-26 09:57:58 +08:00
|
|
|
$local_path_provisioner_enabled ||= "False"
|
2020-02-18 16:13:28 +08:00
|
|
|
$local_path_provisioner_claim_root ||= "/opt/local-path-provisioner/"
|
2020-06-22 15:28:39 +08:00
|
|
|
$libvirt_nested ||= false
|
2022-03-22 21:11:44 +08:00
|
|
|
# boolean or string (e.g. "-vvv")
|
|
|
|
$ansible_verbosity ||= false
|
2022-04-09 14:58:04 +08:00
|
|
|
$ansible_tags ||= ENV['VAGRANT_ANSIBLE_TAGS'] || ""
|
2018-01-12 02:28:33 +08:00
|
|
|
|
2024-01-23 00:22:37 +08:00
|
|
|
$vagrant_dir ||= File.join(File.dirname(__FILE__), ".vagrant")
|
|
|
|
|
2020-05-28 15:46:02 +08:00
|
|
|
$playbook ||= "cluster.yml"
|
2024-02-19 18:58:20 +08:00
|
|
|
$extra_vars ||= {}
|
2018-08-11 01:42:48 +08:00
|
|
|
|
2016-05-08 23:11:09 +08:00
|
|
|
host_vars = {}
|
|
|
|
|
2023-04-11 14:43:18 +08:00
|
|
|
# throw error if os is not supported
|
|
|
|
if ! SUPPORTED_OS.key?($os)
|
|
|
|
puts "Unsupported OS: #{$os}"
|
|
|
|
puts "Supported OS are: #{SUPPORTED_OS.keys.join(', ')}"
|
|
|
|
exit 1
|
|
|
|
end
|
|
|
|
|
2017-06-04 23:31:39 +08:00
|
|
|
$box = SUPPORTED_OS[$os][:box]
|
2016-05-08 23:11:09 +08:00
|
|
|
# if $inventory is not set, try to use example
|
2018-11-16 22:28:29 +08:00
|
|
|
$inventory = "inventory/sample" if ! $inventory
|
|
|
|
$inventory = File.absolute_path($inventory, File.dirname(__FILE__))
|
2016-05-08 23:11:09 +08:00
|
|
|
|
2018-11-16 22:28:29 +08:00
|
|
|
# if $inventory has a hosts.ini file use it, otherwise copy over
|
|
|
|
# vars etc to where vagrant expects dynamic inventory to be
|
|
|
|
if ! File.exist?(File.join(File.dirname($inventory), "hosts.ini"))
|
2024-01-23 00:22:37 +08:00
|
|
|
$vagrant_ansible = File.join(File.absolute_path($vagrant_dir), "provisioners", "ansible")
|
2016-05-08 23:11:09 +08:00
|
|
|
FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
|
2021-02-05 16:50:52 +08:00
|
|
|
$vagrant_inventory = File.join($vagrant_ansible,"inventory")
|
|
|
|
FileUtils.rm_f($vagrant_inventory)
|
|
|
|
FileUtils.ln_s($inventory, $vagrant_inventory)
|
2016-05-08 23:11:09 +08:00
|
|
|
end
|
|
|
|
|
2016-10-29 05:32:56 +08:00
|
|
|
if Vagrant.has_plugin?("vagrant-proxyconf")
|
2020-11-25 00:33:00 +08:00
|
|
|
$no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
|
|
|
|
(1..$num_instances).each do |i|
|
|
|
|
$no_proxy += ",#{$subnet}.#{i+100}"
|
|
|
|
end
|
2016-10-29 05:32:56 +08:00
|
|
|
end
|
|
|
|
|
2016-05-08 23:11:09 +08:00
|
|
|
Vagrant.configure("2") do |config|
|
2018-11-16 22:28:29 +08:00
|
|
|
|
2016-05-18 10:52:57 +08:00
|
|
|
config.vm.box = $box
|
2017-06-04 23:31:39 +08:00
|
|
|
if SUPPORTED_OS[$os].has_key? :box_url
|
|
|
|
config.vm.box_url = SUPPORTED_OS[$os][:box_url]
|
|
|
|
end
|
2017-06-03 07:51:09 +08:00
|
|
|
config.ssh.username = SUPPORTED_OS[$os][:user]
|
2018-11-16 22:28:29 +08:00
|
|
|
|
2016-05-08 23:11:09 +08:00
|
|
|
# plugin conflict
|
|
|
|
if Vagrant.has_plugin?("vagrant-vbguest") then
|
|
|
|
config.vbguest.auto_update = false
|
|
|
|
end
|
|
|
|
|
2018-11-16 22:28:29 +08:00
|
|
|
# always use Vagrants insecure key
|
|
|
|
config.ssh.insert_key = false
|
2016-10-29 05:32:56 +08:00
|
|
|
|
2019-02-20 22:37:18 +08:00
|
|
|
if ($override_disk_size)
|
|
|
|
unless Vagrant.has_plugin?("vagrant-disksize")
|
|
|
|
system "vagrant plugin install vagrant-disksize"
|
|
|
|
end
|
|
|
|
config.disksize.size = $disk_size
|
|
|
|
end
|
|
|
|
|
2018-11-16 22:28:29 +08:00
|
|
|
(1..$num_instances).each do |i|
|
|
|
|
config.vm.define vm_name = "%s-%01d" % [$instance_name_prefix, i] do |node|
|
2016-05-08 23:11:09 +08:00
|
|
|
|
2018-11-16 22:28:29 +08:00
|
|
|
node.vm.hostname = vm_name
|
|
|
|
|
|
|
|
if Vagrant.has_plugin?("vagrant-proxyconf")
|
|
|
|
node.proxy.http = ENV['HTTP_PROXY'] || ENV['http_proxy'] || ""
|
|
|
|
node.proxy.https = ENV['HTTPS_PROXY'] || ENV['https_proxy'] || ""
|
|
|
|
node.proxy.no_proxy = $no_proxy
|
2016-05-08 23:11:09 +08:00
|
|
|
end
|
|
|
|
|
|
|
|
["vmware_fusion", "vmware_workstation"].each do |vmware|
|
2018-11-16 22:28:29 +08:00
|
|
|
node.vm.provider vmware do |v|
|
2016-05-08 23:11:09 +08:00
|
|
|
v.vmx['memsize'] = $vm_memory
|
|
|
|
v.vmx['numvcpus'] = $vm_cpus
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2018-11-16 22:28:29 +08:00
|
|
|
node.vm.provider :virtualbox do |vb|
|
2016-05-08 23:11:09 +08:00
|
|
|
vb.memory = $vm_memory
|
|
|
|
vb.cpus = $vm_cpus
|
2018-11-16 22:28:29 +08:00
|
|
|
vb.gui = $vm_gui
|
|
|
|
vb.linked_clone = true
|
2019-02-20 22:35:21 +08:00
|
|
|
vb.customize ["modifyvm", :id, "--vram", "8"] # ubuntu defaults to 256 MB which is a waste of precious RAM
|
2020-10-14 01:30:26 +08:00
|
|
|
vb.customize ["modifyvm", :id, "--audio", "none"]
|
2016-05-08 23:11:09 +08:00
|
|
|
end
|
|
|
|
|
2018-11-16 22:28:29 +08:00
|
|
|
node.vm.provider :libvirt do |lv|
|
2020-06-22 15:28:39 +08:00
|
|
|
lv.nested = $libvirt_nested
|
|
|
|
lv.cpu_mode = "host-model"
|
2018-11-16 22:28:29 +08:00
|
|
|
lv.memory = $vm_memory
|
|
|
|
lv.cpus = $vm_cpus
|
|
|
|
lv.default_prefix = 'kubespray'
|
|
|
|
# Fix kernel panic on fedora 28
|
|
|
|
if $os == "fedora"
|
|
|
|
lv.cpu_mode = "host-passthrough"
|
|
|
|
end
|
|
|
|
end
|
2017-10-28 00:57:12 +08:00
|
|
|
|
2018-01-12 02:28:33 +08:00
|
|
|
if $kube_node_instances_with_disks
|
|
|
|
# Libvirt
|
|
|
|
driverletters = ('a'..'z').to_a
|
2018-11-16 22:28:29 +08:00
|
|
|
node.vm.provider :libvirt do |lv|
|
2018-01-12 02:28:33 +08:00
|
|
|
# always make /dev/sd{a/b/c} so that CI can ensure that
|
|
|
|
# virtualbox and libvirt will have the same devices to use for OSDs
|
|
|
|
(1..$kube_node_instances_with_disks_number).each do |d|
|
2022-04-01 01:51:01 +08:00
|
|
|
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "scsi"
|
2018-01-12 02:28:33 +08:00
|
|
|
end
|
|
|
|
end
|
2024-05-03 17:20:45 +08:00
|
|
|
node.vm.provider :virtualbox do |vb|
|
|
|
|
# always make /dev/sd{a/b/c} so that CI can ensure that
|
|
|
|
# virtualbox and libvirt will have the same devices to use for OSDs
|
|
|
|
(1..$kube_node_instances_with_disks_number).each do |d|
|
|
|
|
vb.customize ['createhd', '--filename', "disk-#{i}-#{driverletters[d]}-#{DISK_UUID}.disk", '--size', $kube_node_instances_with_disks_size] # 10GB disk
|
|
|
|
vb.customize ['storageattach', :id, '--storagectl', 'SATA Controller', '--port', d, '--device', 0, '--type', 'hdd', '--medium', "disk-#{i}-#{driverletters[d]}-#{DISK_UUID}.disk", '--nonrotational', 'on', '--mtype', 'normal']
|
|
|
|
end
|
|
|
|
end
|
2018-01-12 02:28:33 +08:00
|
|
|
end
|
|
|
|
|
2018-11-16 22:28:29 +08:00
|
|
|
if $expose_docker_tcp
|
|
|
|
node.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true
|
|
|
|
end
|
|
|
|
|
|
|
|
$forwarded_ports.each do |guest, host|
|
|
|
|
node.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true
|
|
|
|
end
|
|
|
|
|
2020-12-01 17:22:50 +08:00
|
|
|
if ["rhel7","rhel8"].include? $os
|
2020-11-25 00:33:00 +08:00
|
|
|
# Vagrant synced_folder rsync options cannot be used for RHEL boxes as Rsync package cannot
|
|
|
|
# be installed until the host is registered with a valid Red Hat support subscription
|
|
|
|
node.vm.synced_folder ".", "/vagrant", disabled: false
|
|
|
|
$shared_folders.each do |src, dst|
|
|
|
|
node.vm.synced_folder src, dst
|
|
|
|
end
|
|
|
|
else
|
|
|
|
node.vm.synced_folder ".", "/vagrant", disabled: false, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] , rsync__exclude: ['.git','venv']
|
|
|
|
$shared_folders.each do |src, dst|
|
|
|
|
node.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
|
|
|
|
end
|
2018-11-16 22:28:29 +08:00
|
|
|
end
|
|
|
|
|
|
|
|
ip = "#{$subnet}.#{i+100}"
|
2023-08-08 15:50:12 +08:00
|
|
|
node.vm.network :private_network,
|
|
|
|
:ip => ip,
|
2020-12-12 04:19:34 +08:00
|
|
|
:libvirt__guest_ipv6 => 'yes',
|
|
|
|
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
|
|
|
|
:libvirt__ipv6_prefix => "64",
|
|
|
|
:libvirt__forward_mode => "none",
|
|
|
|
:libvirt__dhcp_enabled => false
|
2018-11-16 22:28:29 +08:00
|
|
|
|
|
|
|
# Disable swap for each vm
|
|
|
|
node.vm.provision "shell", inline: "swapoff -a"
|
|
|
|
|
2023-05-25 10:56:50 +08:00
|
|
|
# ubuntu2004 and ubuntu2204 have IPv6 explicitly disabled. This undoes that.
|
|
|
|
if ["ubuntu2004", "ubuntu2204"].include? $os
|
2020-12-12 04:52:51 +08:00
|
|
|
node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
|
|
|
|
node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
|
|
|
|
end
|
2023-08-08 15:50:12 +08:00
|
|
|
# Hack for fedora37/38 to get the IP address of the second interface
|
|
|
|
if ["fedora37", "fedora38"].include? $os
|
|
|
|
config.vm.provision "shell", inline: <<-SHELL
|
|
|
|
nmcli conn modify 'Wired connection 2' ipv4.addresses $(cat /etc/sysconfig/network-scripts/ifcfg-eth1 | grep IPADDR | cut -d "=" -f2)
|
|
|
|
nmcli conn modify 'Wired connection 2' ipv4.method manual
|
|
|
|
service NetworkManager restart
|
|
|
|
SHELL
|
|
|
|
end
|
2020-12-12 04:52:51 +08:00
|
|
|
|
2024-05-15 00:11:12 +08:00
|
|
|
# Rockylinux boxes needs UEFI
|
|
|
|
if ["rockylinux8", "rockylinux9"].include? $os
|
|
|
|
config.vm.provider "libvirt" do |domain|
|
|
|
|
domain.loader = "/usr/share/OVMF/x64/OVMF_CODE.fd"
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2020-11-25 00:33:00 +08:00
|
|
|
# Disable firewalld on oraclelinux/redhat vms
|
2023-06-27 09:02:30 +08:00
|
|
|
if ["oraclelinux","oraclelinux8","rhel7","rhel8","rockylinux8"].include? $os
|
2020-06-12 16:59:56 +08:00
|
|
|
node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld"
|
|
|
|
end
|
2020-07-31 14:31:07 +08:00
|
|
|
|
2018-11-16 22:28:29 +08:00
|
|
|
host_vars[vm_name] = {
|
|
|
|
"ip": ip,
|
2019-01-27 05:38:37 +08:00
|
|
|
"flannel_interface": "eth1",
|
2018-11-16 22:28:29 +08:00
|
|
|
"kube_network_plugin": $network_plugin,
|
|
|
|
"kube_network_plugin_multus": $multi_networking,
|
2020-06-22 15:28:39 +08:00
|
|
|
"download_run_once": $download_run_once,
|
2019-02-26 14:45:30 +08:00
|
|
|
"download_localhost": "False",
|
Added file and container image caching (#4828)
* File and container image downloads are now cached localy, so that repeated vagrant up/down runs do not trigger downloading of those files. This is especially useful on laptops with kubernetes runnig locally on vm's. The total size of the cache, after an ansible run, is currently around 800MB, so bandwidth (=time) savings can be quite significant.
* When download_run_once is false, the default is still not to cache, but setting download_force_cache will still enable caching.
* The local cache location can be set with download_cache_dir and defaults to /tmp/kubernetes_cache
* A local docker instance is no longer required to cache docker images; Images are cached to file. A local docker instance is still required, though, if you wish to download images on localhost.
* Fixed a FIXME, wher the argument was that delegate_to doesn't play nice with omit. That is a correct observation and the fix is to use default(inventory_host) instead of default(omit). See ansible/ansible#26009
* Removed "Register docker images info" task from download_container and set_docker_image_facts because it was faulty and unused.
* Removed redundant when:download.{container,enabled,run_once} conditions from {sync,download}_container.yml
* All features of commit d6fd0d2acaec9f53e75d82db30411f96a5bf2cc9 by Timoses <timosesu@gmail.com>, merged May 1st 2019, are included in this patch. Not all code was included verbatim, but each feature of that commit was checked to be working in this patch. One notable change: The actual downloading of the kubeadm images was moved to {download,sync)_container, to enable caching.
Note 1: I considered splitting this patch, but most changes that are not directly related to caching, are a pleasant by-product of implementing the caching code, so splitting would be impractical.
Note 2: I have my doubts about the usefulness of the upload, download and upgrade tags in the download role. Must they remain or can they be removed? If anybody knows, then please speak up.
2019-06-11 02:21:07 +08:00
|
|
|
"download_cache_dir": ENV['HOME'] + "/kubespray_cache",
|
|
|
|
# Make kubespray cache even when download_run_once is false
|
2020-06-22 15:28:39 +08:00
|
|
|
"download_force_cache": $download_force_cache,
|
Added file and container image caching (#4828)
* File and container image downloads are now cached localy, so that repeated vagrant up/down runs do not trigger downloading of those files. This is especially useful on laptops with kubernetes runnig locally on vm's. The total size of the cache, after an ansible run, is currently around 800MB, so bandwidth (=time) savings can be quite significant.
* When download_run_once is false, the default is still not to cache, but setting download_force_cache will still enable caching.
* The local cache location can be set with download_cache_dir and defaults to /tmp/kubernetes_cache
* A local docker instance is no longer required to cache docker images; Images are cached to file. A local docker instance is still required, though, if you wish to download images on localhost.
* Fixed a FIXME, wher the argument was that delegate_to doesn't play nice with omit. That is a correct observation and the fix is to use default(inventory_host) instead of default(omit). See ansible/ansible#26009
* Removed "Register docker images info" task from download_container and set_docker_image_facts because it was faulty and unused.
* Removed redundant when:download.{container,enabled,run_once} conditions from {sync,download}_container.yml
* All features of commit d6fd0d2acaec9f53e75d82db30411f96a5bf2cc9 by Timoses <timosesu@gmail.com>, merged May 1st 2019, are included in this patch. Not all code was included verbatim, but each feature of that commit was checked to be working in this patch. One notable change: The actual downloading of the kubeadm images was moved to {download,sync)_container, to enable caching.
Note 1: I considered splitting this patch, but most changes that are not directly related to caching, are a pleasant by-product of implementing the caching code, so splitting would be impractical.
Note 2: I have my doubts about the usefulness of the upload, download and upgrade tags in the download role. Must they remain or can they be removed? If anybody knows, then please speak up.
2019-06-11 02:21:07 +08:00
|
|
|
# Keeping the cache on the nodes can improve provisioning speed while debugging kubespray
|
|
|
|
"download_keep_remote_cache": "False",
|
2020-05-13 21:26:27 +08:00
|
|
|
"docker_rpm_keepcache": "1",
|
Added file and container image caching (#4828)
* File and container image downloads are now cached localy, so that repeated vagrant up/down runs do not trigger downloading of those files. This is especially useful on laptops with kubernetes runnig locally on vm's. The total size of the cache, after an ansible run, is currently around 800MB, so bandwidth (=time) savings can be quite significant.
* When download_run_once is false, the default is still not to cache, but setting download_force_cache will still enable caching.
* The local cache location can be set with download_cache_dir and defaults to /tmp/kubernetes_cache
* A local docker instance is no longer required to cache docker images; Images are cached to file. A local docker instance is still required, though, if you wish to download images on localhost.
* Fixed a FIXME, wher the argument was that delegate_to doesn't play nice with omit. That is a correct observation and the fix is to use default(inventory_host) instead of default(omit). See ansible/ansible#26009
* Removed "Register docker images info" task from download_container and set_docker_image_facts because it was faulty and unused.
* Removed redundant when:download.{container,enabled,run_once} conditions from {sync,download}_container.yml
* All features of commit d6fd0d2acaec9f53e75d82db30411f96a5bf2cc9 by Timoses <timosesu@gmail.com>, merged May 1st 2019, are included in this patch. Not all code was included verbatim, but each feature of that commit was checked to be working in this patch. One notable change: The actual downloading of the kubeadm images was moved to {download,sync)_container, to enable caching.
Note 1: I considered splitting this patch, but most changes that are not directly related to caching, are a pleasant by-product of implementing the caching code, so splitting would be impractical.
Note 2: I have my doubts about the usefulness of the upload, download and upgrade tags in the download role. Must they remain or can they be removed? If anybody knows, then please speak up.
2019-06-11 02:21:07 +08:00
|
|
|
# These two settings will put kubectl and admin.config in $inventory/artifacts
|
|
|
|
"kubeconfig_localhost": "True",
|
|
|
|
"kubectl_localhost": "True",
|
2019-02-26 14:45:30 +08:00
|
|
|
"local_path_provisioner_enabled": "#{$local_path_provisioner_enabled}",
|
2019-04-24 22:10:02 +08:00
|
|
|
"local_path_provisioner_claim_root": "#{$local_path_provisioner_claim_root}",
|
2024-03-14 22:37:23 +08:00
|
|
|
"ansible_ssh_user": SUPPORTED_OS[$os][:user],
|
2024-07-01 18:25:36 +08:00
|
|
|
"ansible_ssh_private_key_file": File.join(Dir.home, ".vagrant.d", "insecure_private_key"),
|
2024-03-14 22:37:23 +08:00
|
|
|
"unsafe_show_logs": "True"
|
2018-11-16 22:28:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
# Only execute the Ansible provisioner once, when all the machines are up and ready.
|
2022-03-18 09:05:39 +08:00
|
|
|
# And limit the action to gathering facts, the full playbook is going to be ran by testcases_run.sh
|
2016-05-08 23:11:09 +08:00
|
|
|
if i == $num_instances
|
2018-11-16 22:28:29 +08:00
|
|
|
node.vm.provision "ansible" do |ansible|
|
2018-08-11 01:42:48 +08:00
|
|
|
ansible.playbook = $playbook
|
2024-01-11 18:23:15 +08:00
|
|
|
ansible.compatibility_mode = "2.0"
|
2022-03-22 21:11:44 +08:00
|
|
|
ansible.verbose = $ansible_verbosity
|
2019-02-01 15:46:52 +08:00
|
|
|
$ansible_inventory_path = File.join( $inventory, "hosts.ini")
|
|
|
|
if File.exist?($ansible_inventory_path)
|
|
|
|
ansible.inventory_path = $ansible_inventory_path
|
2016-05-08 23:11:09 +08:00
|
|
|
end
|
2017-12-12 18:47:04 +08:00
|
|
|
ansible.become = true
|
2019-08-22 16:14:32 +08:00
|
|
|
ansible.limit = "all,localhost"
|
2016-05-08 23:11:09 +08:00
|
|
|
ansible.host_key_checking = false
|
2019-04-09 15:59:05 +08:00
|
|
|
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "-e ansible_become_pass=vagrant"]
|
2016-05-08 23:11:09 +08:00
|
|
|
ansible.host_vars = host_vars
|
2024-02-19 18:58:20 +08:00
|
|
|
ansible.extra_vars = $extra_vars
|
2022-04-09 14:58:04 +08:00
|
|
|
if $ansible_tags != ""
|
|
|
|
ansible.tags = [$ansible_tags]
|
|
|
|
end
|
2016-05-08 23:11:09 +08:00
|
|
|
ansible.groups = {
|
2018-11-16 22:28:29 +08:00
|
|
|
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
|
2021-03-24 08:26:05 +08:00
|
|
|
"kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
2021-04-29 20:20:50 +08:00
|
|
|
"kube_node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
|
|
|
|
"k8s_cluster:children" => ["kube_control_plane", "kube_node"],
|
2016-05-08 23:11:09 +08:00
|
|
|
}
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|