Merge pull request #11527 from VannTen/feat/vagrant_multi_inv

Cleanup Vagrantfile and allow to use multiples inventories
pull/11553/head
Kubernetes Prow Robot 2024-09-19 13:46:45 +01:00 committed by GitHub
commit 163697951c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 38 additions and 76 deletions

24
Vagrantfile vendored
View File

@ -55,6 +55,8 @@ $subnet ||= "172.18.8"
$subnet_ipv6 ||= "fd3c:b398:0698:0756"
$os ||= "ubuntu2004"
$network_plugin ||= "flannel"
$inventory ||= "inventory/sample"
$inventories ||= [$inventory]
# Setting multi_networking to true will install Multus: https://github.com/k8snetworkplumbingwg/multus-cni
$multi_networking ||= "False"
$download_run_once ||= "True"
@ -93,19 +95,6 @@ if ! SUPPORTED_OS.key?($os)
end
$box = SUPPORTED_OS[$os][:box]
# if $inventory is not set, try to use example
$inventory = "inventory/sample" if ! $inventory
$inventory = File.absolute_path($inventory, File.dirname(__FILE__))
# if $inventory has a hosts.ini file use it, otherwise copy over
# vars etc to where vagrant expects dynamic inventory to be
if ! File.exist?(File.join(File.dirname($inventory), "hosts.ini"))
$vagrant_ansible = File.join(File.absolute_path($vagrant_dir), "provisioners", "ansible")
FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
$vagrant_inventory = File.join($vagrant_ansible,"inventory")
FileUtils.rm_f($vagrant_inventory)
FileUtils.ln_s($inventory, $vagrant_inventory)
end
if Vagrant.has_plugin?("vagrant-proxyconf")
$no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
@ -286,14 +275,13 @@ Vagrant.configure("2") do |config|
ansible.playbook = $playbook
ansible.compatibility_mode = "2.0"
ansible.verbose = $ansible_verbosity
$ansible_inventory_path = File.join( $inventory, "hosts.ini")
if File.exist?($ansible_inventory_path)
ansible.inventory_path = $ansible_inventory_path
end
ansible.become = true
ansible.limit = "all,localhost"
ansible.host_key_checking = false
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "-e ansible_become_pass=vagrant"]
ansible.raw_arguments = ["--forks=#{$num_instances}",
"--flush-cache",
"-e ansible_become_pass=vagrant"] +
$inventories.map {|inv| ["-i", inv]}.flatten
ansible.host_vars = host_vars
ansible.extra_vars = $extra_vars
if $ansible_tags != ""

View File

@ -29,7 +29,26 @@ You can override the default settings in the `Vagrantfile` either by
directly modifying the `Vagrantfile` or through an override file.
In the same directory as the `Vagrantfile`, create a folder called
`vagrant` and create `config.rb` file in it.
An example of how to configure this file is given below.
Example:
```ruby
# vagrant/config.rb
$instance_name_prefix = "kub"
$vm_cpus = 1
$num_instances = 3
$os = "centos8-bento"
$subnet = "10.0.20"
$network_plugin = "flannel"
$extra_vars = {
dns_domain: my.custom.domain
}
# or
$extra_vars = "path/to/extra/vars/file.yml"
```
For all available options look at the Vagrantfile (search for "CONFIG")
## Use alternative OS for Vagrant
@ -57,73 +76,33 @@ see [download documentation](/docs/advanced/downloads.md).
## Example use of Vagrant
The following is an example of setting up and running kubespray using `vagrant`.
For repeated runs, you could save the script to a file in the root of the
kubespray and run it by executing `source <name_of_the_file>`.
Customize your settings as shown, above, then run the commands:
```ShellSession
# use virtualenv to install all python requirements
VENVDIR=venv
virtualenv --python=/usr/bin/python3.7 $VENVDIR
source $VENVDIR/bin/activate
pip install -r requirements.txt
$ virtualenv --python=/usr/bin/python3.7 $VENVDIR
$ source $VENVDIR/bin/activate
$ pip install -r requirements.txt
# prepare an inventory to test with
INV=inventory/my_lab
rm -rf ${INV}.bak &> /dev/null
mv ${INV} ${INV}.bak &> /dev/null
cp -a inventory/sample ${INV}
rm -f ${INV}/hosts.ini
$ vagrant up
# customize the vagrant environment
mkdir vagrant
cat << EOF > vagrant/config.rb
\$instance_name_prefix = "kub"
\$vm_cpus = 1
\$num_instances = 3
\$os = "centos8-bento"
\$subnet = "10.0.20"
\$network_plugin = "flannel"
\$inventory = "$INV"
\$shared_folders = { 'temp/docker_rpms' => "/var/cache/yum/x86_64/7/docker-ce/packages" }
\$extra_vars = {
dns_domain: my.custom.domain
}
# or
\$extra_vars = "path/to/extra/vars/file.yml"
EOF
# make the rpm cache
mkdir -p temp/docker_rpms
vagrant up
# make a copy of the downloaded docker rpm, to speed up the next provisioning run
scp kub-1:/var/cache/yum/x86_64/7/docker-ce/packages/* temp/docker_rpms/
# copy kubectl access configuration in place
mkdir $HOME/.kube/ &> /dev/null
ln -s $PWD/$INV/artifacts/admin.conf $HOME/.kube/config
# Access the cluster
$ export INV=.vagrant/provisionners/ansible/inventory
$ export KUBECONFIG=${INV}/artifacts/admin.conf
# make the kubectl binary available
sudo ln -s $PWD/$INV/artifacts/kubectl /usr/local/bin/kubectl
#or
export PATH=$PATH:$PWD/$INV/artifacts
$ export PATH=$PATH:$PWD/$INV/artifacts
```
If a vagrant run failed and you've made some changes to fix the issue causing
the fail, here is how you would re-run ansible:
```ShellSession
ansible-playbook -vvv -i .vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory cluster.yml
vagrant provision
```
If all went well, you check if it's all working as expected:
```ShellSession
kubectl get nodes
```
The output should look like this:
```ShellSession
$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
@ -134,12 +113,6 @@ kub-3 Ready <none> 3m7s v1.22.5
Another nice test is the following:
```ShellSession
kubectl get pods --all-namespaces -o wide
```
Which should yield something like the following:
```ShellSession
$ kubectl get pods --all-namespaces -o wide
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES

View File

@ -35,7 +35,8 @@ cleanup-packet:
create-vagrant:
vagrant up
cp $(CI_PROJECT_DIR)/inventory/sample/vagrant_ansible_inventory $(INVENTORY)
cp $(CI_PROJECT_DIR)/.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory $(INVENTORY)
delete-vagrant:
vagrant destroy -f