Updated Openstack to terraform 0.12 (#5062)

* update openstack to terraform 0.12(.5)

* replace cluter.tf with cluster.tfvars

* update README.md to terraform 0.12

* update Openstack CI tests to use terraform 0.12

* specify terraform version in openstack README

* gitlab CI to copy cluster.tfvars in case of openstack provider

* The terraform/openstack dynamic inventory can read
tfstate v4 (generated by terraform 0.12) and convert them internally
ro v3 (as generated by terraform 0.11.x).

Additionally the script has been updated to Python 3.
pull/5088/head
Hugo Blom 2019-08-18 10:30:05 +02:00 committed by Kubernetes Prow Robot
parent 554857da97
commit da015e0249
10 changed files with 83 additions and 37 deletions

View File

@ -9,7 +9,8 @@
# Set Ansible config
- cp ansible.cfg ~/.ansible.cfg
# Prepare inventory
- cp contrib/terraform/$PROVIDER/sample-inventory/cluster.tf .
- if [ "$PROVIDER" == "openstack" ]; then VARIABLEFILE="cluster.tfvars"; else VARIABLEFILE="cluster.tf"; fi
- cp contrib/terraform/$PROVIDER/sample-inventory/$VARIABLEFILE .
- ln -s contrib/terraform/$PROVIDER/hosts
- terraform init contrib/terraform/$PROVIDER
# Copy SSH keypair
@ -23,7 +24,8 @@
stage: unit-tests
only: ['master', /^pr-.*$/]
script:
- terraform validate -var-file=cluster.tf contrib/terraform/$PROVIDER
- if [ "$PROVIDER" == "openstack" ]; then VARIABLEFILE="cluster.tfvars"; else VARIABLEFILE="cluster.tf"; fi
- terraform validate -var-file=$VARIABLEFILE contrib/terraform/$PROVIDER
- terraform fmt -check -diff contrib/terraform/$PROVIDER
.terraform_apply:
@ -46,7 +48,7 @@
tf-validate-openstack:
extends: .terraform_validate
variables:
TF_VERSION: 0.11.11
TF_VERSION: 0.12.6
PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME
@ -108,7 +110,7 @@ tf-ovh_ubuntu18-calico:
when: on_success
variables:
<<: *ovh_variables
TF_VERSION: 0.11.11
TF_VERSION: 0.12.6
PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME
ANSIBLE_TIMEOUT: "60"
@ -136,7 +138,7 @@ tf-ovh_coreos-calico:
when: on_success
variables:
<<: *ovh_variables
TF_VERSION: 0.11.11
TF_VERSION: 0.12.6
PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME
ANSIBLE_TIMEOUT: "60"

View File

@ -1,4 +1,5 @@
.terraform
*.tfvars
!sample-inventory\/cluster.tfvars
*.tfstate
*.tfstate.backup

View File

@ -16,7 +16,7 @@ most modern installs of OpenStack that support the basic services.
- [ELASTX](https://elastx.se/)
- [EnterCloudSuite](https://www.entercloudsuite.com/)
- [FugaCloud](https://fuga.cloud/)
- [Open Telekom Cloud](https://cloud.telekom.de/) : requires to set the variable `wait_for_floatingip = "true"` in your cluster.tf
- [Open Telekom Cloud](https://cloud.telekom.de/) : requires to set the variable `wait_for_floatingip = "true"` in your cluster.tfvars
- [OVH](https://www.ovh.com/)
- [Rackspace](https://www.rackspace.com/)
- [Ultimum](https://ultimum.io/)
@ -69,7 +69,7 @@ binaries available on hyperkube v1.4.3_coreos.0 or higher.
## Requirements
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html) 0.12 or later
- [Install Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html)
- you already have a suitable OS image in Glance
- you already have a floating IP pool created
@ -219,7 +219,7 @@ set OS_PROJECT_DOMAIN_NAME=Default
The construction of the cluster is driven by values found in
[variables.tf](variables.tf).
For your cluster, edit `inventory/$CLUSTER/cluster.tf`.
For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|Variable | Description |
|---------|-------------|
@ -276,7 +276,7 @@ This should finish fairly quickly telling you Terraform has successfully initial
You can apply the Terraform configuration to your cluster with the following command
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
```ShellSession
$ terraform apply -var-file=cluster.tf ../../contrib/terraform/openstack
$ terraform apply -var-file=cluster.tfvars ../../contrib/terraform/openstack
```
if you chose to create a bastion host, this script will create
@ -290,7 +290,7 @@ pick it up automatically.
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
```ShellSession
$ terraform destroy -var-file=cluster.tf ../../contrib/terraform/openstack
$ terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/openstack
```
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:

View File

@ -3,7 +3,7 @@ provider "openstack" {
}
module "network" {
source = "modules/network"
source = "./modules/network"
external_net = "${var.external_net}"
network_name = "${var.network_name}"
@ -14,7 +14,7 @@ module "network" {
}
module "ips" {
source = "modules/ips"
source = "./modules/ips"
number_of_k8s_masters = "${var.number_of_k8s_masters}"
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
@ -27,7 +27,7 @@ module "ips" {
}
module "compute" {
source = "modules/compute"
source = "./modules/compute"
cluster_name = "${var.cluster_name}"
az_list = "${var.az_list}"

View File

@ -22,20 +22,20 @@ resource "openstack_networking_secgroup_rule_v2" "k8s_master" {
resource "openstack_networking_secgroup_v2" "bastion" {
name = "${var.cluster_name}-bastion"
count = "${var.number_of_bastions ? 1 : 0}"
count = "${var.number_of_bastions != "" ? 1 : 0}"
description = "${var.cluster_name} - Bastion Server"
delete_default_rules = true
}
resource "openstack_networking_secgroup_rule_v2" "bastion" {
count = "${var.number_of_bastions ? length(var.bastion_allowed_remote_ips) : 0}"
count = "${var.number_of_bastions != "" ? length(var.bastion_allowed_remote_ips) : 0}"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = "22"
port_range_max = "22"
remote_ip_prefix = "${var.bastion_allowed_remote_ips[count.index]}"
security_group_id = "${openstack_networking_secgroup_v2.bastion.id}"
security_group_id = "${openstack_networking_secgroup_v2.bastion[count.index].id}"
}
resource "openstack_networking_secgroup_v2" "k8s" {
@ -99,7 +99,7 @@ resource "openstack_compute_instance_v2" "bastion" {
}
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
"${openstack_networking_secgroup_v2.bastion.name}",
"${element(openstack_networking_secgroup_v2.bastion.*.name, count.index)}",
]
metadata = {

View File

@ -1,5 +1,5 @@
resource "null_resource" "dummy_dependency" {
triggers {
triggers = {
dependency_id = "${var.router_id}"
}
}

View File

@ -1,15 +1,15 @@
output "k8s_master_fips" {
value = ["${openstack_networking_floatingip_v2.k8s_master.*.address}"]
value = "${openstack_networking_floatingip_v2.k8s_master[*].address}"
}
output "k8s_master_no_etcd_fips" {
value = ["${openstack_networking_floatingip_v2.k8s_master_no_etcd.*.address}"]
value = "${openstack_networking_floatingip_v2.k8s_master_no_etcd[*].address}"
}
output "k8s_node_fips" {
value = ["${openstack_networking_floatingip_v2.k8s_node.*.address}"]
value = "${openstack_networking_floatingip_v2.k8s_node[*].address}"
}
output "bastion_fips" {
value = ["${openstack_networking_floatingip_v2.bastion.*.address}"]
value = "${openstack_networking_floatingip_v2.bastion[*].address}"
}

View File

@ -14,7 +14,7 @@ resource "openstack_networking_network_v2" "k8s" {
resource "openstack_networking_subnet_v2" "k8s" {
name = "${var.cluster_name}-internal-network"
count = "${var.use_neutron}"
network_id = "${openstack_networking_network_v2.k8s.id}"
network_id = "${openstack_networking_network_v2.k8s[count.index].id}"
cidr = "${var.subnet_cidr}"
ip_version = 4
dns_nameservers = "${var.dns_nameservers}"
@ -22,6 +22,6 @@ resource "openstack_networking_subnet_v2" "k8s" {
resource "openstack_networking_router_interface_v2" "k8s" {
count = "${var.use_neutron}"
router_id = "${openstack_networking_router_v2.k8s.id}"
subnet_id = "${openstack_networking_subnet_v2.k8s.id}"
router_id = "${openstack_networking_router_v2.k8s[count.index].id}"
subnet_id = "${openstack_networking_subnet_v2.k8s[count.index].id}"
}

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python2
#!/usr/bin/env python3
#
# Copyright 2015 Cisco Systems, Inc.
#
@ -20,15 +20,15 @@
Dynamic inventory for Terraform - finds all `.tfstate` files below the working
directory and generates an inventory based on them.
"""
from __future__ import unicode_literals, print_function
import argparse
from collections import defaultdict
import random
from functools import wraps
import json
import os
import re
VERSION = '0.3.0pre'
VERSION = '0.4.0pre'
def tfstates(root=None):
@ -38,15 +38,58 @@ def tfstates(root=None):
if os.path.splitext(name)[-1] == '.tfstate':
yield os.path.join(dirpath, name)
def convert_to_v3_structure(attributes, prefix=''):
""" Convert the attributes from v4 to v3
Receives a dict and return a dictionary """
result = {}
if isinstance(attributes, str):
# In the case when we receive a string (e.g. values for security_groups)
return {'{}{}'.format(prefix, random.randint(1,10**10)): attributes}
for key, value in attributes.items():
if isinstance(value, list):
if len(value):
result['{}{}.#'.format(prefix, key, hash)] = len(value)
for i, v in enumerate(value):
result.update(convert_to_v3_structure(v, '{}{}.{}.'.format(prefix, key, i)))
elif isinstance(value, dict):
result['{}{}.%'.format(prefix, key)] = len(value)
for k, v in value.items():
result['{}{}.{}'.format(prefix, key, k)] = v
else:
result['{}{}'.format(prefix, key)] = value
return result
def iterresources(filenames):
for filename in filenames:
with open(filename, 'r') as json_file:
state = json.load(json_file)
for module in state['modules']:
name = module['path'][-1]
for key, resource in module['resources'].items():
yield name, key, resource
tf_version = state['version']
if tf_version == 3:
for module in state['modules']:
name = module['path'][-1]
for key, resource in module['resources'].items():
yield name, key, resource
elif tf_version == 4:
# In version 4 the structure changes so we need to iterate
# each instance inside the resource branch.
for resource in state['resources']:
name = resource['module'].split('.')[-1]
for instance in resource['instances']:
key = "{}.{}".format(resource['type'], resource['name'])
if 'index_key' in instance:
key = "{}.{}".format(key, instance['index_key'])
data = {}
data['type'] = resource['type']
data['provider'] = resource['provider']
data['depends_on'] = instance.get('depends_on', [])
data['primary'] = {'attributes': convert_to_v3_structure(instance['attributes'])}
if 'id' in instance['attributes']:
data['primary']['id'] = instance['attributes']['id']
data['primary']['meta'] = instance['attributes'].get('meta',{})
yield name, key, data
else:
raise KeyError('tfstate version %d not supported' % tf_version)
## READ RESOURCES
PARSERS = {}
@ -109,7 +152,7 @@ def calculate_mantl_vars(func):
def _parse_prefix(source, prefix, sep='.'):
for compkey, value in source.items():
for compkey, value in list(source.items()):
try:
curprefix, rest = compkey.split(sep, 1)
except ValueError:
@ -127,7 +170,7 @@ def parse_attr_list(source, prefix, sep='.'):
idx, key = compkey.split(sep, 1)
attrs[idx][key] = value
return attrs.values()
return list(attrs.values())
def parse_dict(source, prefix, sep='.'):
@ -258,9 +301,9 @@ def openstack_host(resource, module_name):
if 'metadata.ssh_user' in raw_attrs:
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
if 'volume.#' in raw_attrs.keys() and int(raw_attrs['volume.#']) > 0:
if 'volume.#' in list(raw_attrs.keys()) and int(raw_attrs['volume.#']) > 0:
device_index = 1
for key, value in raw_attrs.items():
for key, value in list(raw_attrs.items()):
match = re.search("^volume.*.device$", key)
if match:
attrs['disk_volume_device_'+str(device_index)] = value
@ -278,7 +321,7 @@ def openstack_host(resource, module_name):
groups.append('os_image=' + attrs['image']['name'])
groups.append('os_flavor=' + attrs['flavor']['name'])
groups.extend('os_metadata_%s=%s' % item
for item in attrs['metadata'].items())
for item in list(attrs['metadata'].items()))
groups.append('os_region=' + attrs['region'])
# groups specific to Mantl