Pārlūkot izejas kodu

Adding ability to manualy configure network inside VM before boot.

This patchset obsoletes switching from "dhcp" to "static" during
deployment.

Example pillars:
salt:
  control:
    enabled: true
    virt_enabled: true
    size:
      small:
        cpu: 1
        ram: 1
    cluster:
      infra:
        domain: example.com
        engine: virt
        config:
          engine: salt
          host: master.domain.com
        cloud_init:
          network_data:
            links:
            - id: ens2
              name: ens2
              type: phy
          user_data:
            disable_ec2_metadata: true
            resize_rootfs: True
            timezone: UTC
            ssh_deletekeys: True
            ssh_genkeytypes: ['rsa', 'dsa', 'ecdsa']
            ssh_svcname: ssh
            locale: en_US.UTF-8
            disable_root: true
            apt_preserve_sources_list: false
            apt:
              sources_list: ""
              sources:
                ubuntu.list:
                  source: ${linux:system:repo:ubuntu:source}
                mcp_saltstack.list:
                  source: ${linux:system:repo:mcp_saltstack:source}
        node:
          ctl01:
            provider: kvm01.example.com
            image: ubuntu-16.04.qcow2
            size: small
            cloud_init:
              network_data:
                networks:
                - id: private-ipv4
                  ip_address: 192.168.0.161
                  link: ens2
                  netmask: 255.255.255.0
                  routes:
                  - gateway: 192.168.0.1
                    netmask: 0.0.0.0
                    network: 0.0.0.0
                  type: ipv4

Change-Id: I087518404c61e0bab2303e40957bacca94692102
pull/73/head
Dzmitry Stremkouski pirms 6 gadiem
vecāks
revīzija
97927ee35e
6 mainītis faili ar 260 papildinājumiem un 9 dzēšanām
  1. +35
    -0
      README.rst
  2. +114
    -0
      _modules/cfgdrive.py
  3. +10
    -2
      _modules/seedng.py
  4. +57
    -7
      _modules/virtng.py
  5. +6
    -0
      salt/control/virt.sls
  6. +38
    -0
      tests/pillar/control_virt_custom.sls

+ 35
- 0
README.rst Parādīt failu

@@ -394,6 +394,16 @@ Control VM provisioning:

.. code-block:: yaml

_param:
private-ipv4: &private-ipv4
- id: private-ipv4
type: ipv4
link: ens2
netmask: 255.255.255.0
routes:
- gateway: 192.168.0.1
netmask: 0.0.0.0
network: 0.0.0.0
virt:
disk:
three_disks:
@@ -440,6 +450,24 @@ Control VM provisioning:
engine: virt
#Option to set rng globaly
rng: false
cloud_init:
user_data:
disable_ec2_metadata: true
resize_rootfs: True
timezone: UTC
ssh_deletekeys: True
ssh_genkeytypes: ['rsa', 'dsa', 'ecdsa']
ssh_svcname: ssh
locale: en_US.UTF-8
disable_root: true
apt_preserve_sources_list: false
apt:
sources_list: ""
sources:
ubuntu.list:
source: ${linux:system:repo:ubuntu:source}
mcp_saltstack.list:
source: ${linux:system:repo:mcp_saltstack:source}
node:
ubuntu1:
provider: node01.domain.com
@@ -456,6 +484,13 @@ Control VM provisioning:
mac:
nic01: AC:DE:48:AA:AA:AA
nic02: AC:DE:48:AA:AA:BB
# netconfig affects: hostname during boot
# manual interfaces configuration
cloud_init:
network_data:
networks:
- <<: *private-ipv4
ip_address: 192.168.0.161

To enable Redis plugin for the Salt caching subsystem, use the
below pillar structure:

+ 114
- 0
_modules/cfgdrive.py Parādīt failu

@@ -0,0 +1,114 @@
# -*- coding: utf-8 -*-

import json
import logging
import os
import shutil
import six
import tempfile
import yaml

from oslo_utils import uuidutils
from oslo_utils import fileutils
from oslo_concurrency import processutils

class ConfigDriveBuilder(object):
"""Build config drives, optionally as a context manager."""

def __init__(self, image_file):
self.image_file = image_file
self.mdfiles=[]

def __enter__(self):
fileutils.delete_if_exists(self.image_file)
return self

def __exit__(self, exctype, excval, exctb):
self.make_drive()

def add_file(self, path, data):
self.mdfiles.append((path, data))

def _add_file(self, basedir, path, data):
filepath = os.path.join(basedir, path)
dirname = os.path.dirname(filepath)
fileutils.ensure_tree(dirname)
with open(filepath, 'wb') as f:
if isinstance(data, six.text_type):
data = data.encode('utf-8')
f.write(data)

def _write_md_files(self, basedir):
for data in self.mdfiles:
self._add_file(basedir, data[0], data[1])

def _make_iso9660(self, path, tmpdir):

processutils.execute('mkisofs',
'-o', path,
'-ldots',
'-allow-lowercase',
'-allow-multidot',
'-l',
'-V', 'config-2',
'-r',
'-J',
'-quiet',
tmpdir,
attempts=1,
run_as_root=False)

def make_drive(self):
"""Make the config drive.
:raises ProcessExecuteError if a helper process has failed.
"""
try:
tmpdir = tempfile.mkdtemp()
self._write_md_files(tmpdir)
self._make_iso9660(self.image_file, tmpdir)
finally:
shutil.rmtree(tmpdir)


def generate(
dst,
hostname,
domainname,
instance_id=None,
user_data=None,
network_data=None,
saltconfig=None
):

''' Generate config drive

:param dst: destination file to place config drive.
:param hostname: hostname of Instance.
:param domainname: instance domain.
:param instance_id: UUID of the instance.
:param user_data: custom user data dictionary. type: json
:param network_data: custom network info dictionary. type: json
:param saltconfig: salt minion configuration. type: json

'''

instance_md = {}
instance_md['uuid'] = instance_id or uuidutils.generate_uuid()
instance_md['hostname'] = '%s.%s' % (hostname, domainname)
instance_md['name'] = hostname

if user_data:
user_data = '#cloud-config\n\n' + yaml.dump(yaml.load(user_data), default_flow_style=False)
if saltconfig:
user_data += yaml.dump(yaml.load(str(saltconfig)), default_flow_style=False)

data = json.dumps(instance_md)

with ConfigDriveBuilder(dst) as cfgdrive:
cfgdrive.add_file('openstack/latest/meta_data.json', data)
if user_data:
cfgdrive.add_file('openstack/latest/user_data', user_data)
if network_data:
cfgdrive.add_file('openstack/latest/network_data.json', network_data)
cfgdrive.add_file('openstack/latest/vendor_data.json', '{}')
cfgdrive.add_file('openstack/latest/vendor_data2.json', '{}')

+ 10
- 2
_modules/seedng.py Parādīt failu

@@ -91,8 +91,16 @@ def _umount(mpt, ftype):
__salt__['mount.umount'](mpt, util='qemu_nbd')


def apply_(path, id_=None, config=None, approve_key=True, install=True,
prep_install=False, pub_key=None, priv_key=None, mount_point=None):
def apply_(
path, id_=None,
config=None,
approve_key=True,
install=True,
prep_install=False,
pub_key=None,
priv_key=None,
mount_point=None
):
'''
Seed a location (disk image, directory, or block device) with the
minion config, approve the minion's key, and/or install salt-minion.

+ 57
- 7
_modules/virtng.py Parādīt failu

@@ -20,6 +20,7 @@ import logging

# Import third party libs
import yaml
import json
import jinja2
import jinja2.exceptions
import salt.ext.six as six
@@ -558,6 +559,9 @@ def init(name,
diskp[0][disk_name]['image'] = image

# Create multiple disks, empty or from specified images.
cloud_init = None
cfg_drive = None

for disk in diskp:
log.debug("Creating disk for VM [ {0} ]: {1}".format(name, disk))

@@ -618,13 +622,39 @@ def init(name,
raise CommandExecutionError('problem while copying image. {0} - {1}'.format(args['image'], e))

if kwargs.get('seed'):
install = kwargs.get('install', True)
seed_cmd = kwargs.get('seed_cmd', 'seedng.apply')

__salt__[seed_cmd](img_dest,
id_=name,
config=kwargs.get('config'),
install=install)
seed_cmd = kwargs.get('seed_cmd', 'seedng.apply')
cloud_init = kwargs.get('cloud_init', None)
master = __salt__['config.option']('master')
cfg_drive = os.path.join(img_dir,'config-2.iso')

if cloud_init:
_tmp = name.split('.')

try:
user_data = json.dumps(cloud_init["user_data"])
except:
user_data = None

try:
network_data = json.dumps(cloud_init["network_data"])
except:
network_data = None

__salt__["cfgdrive.generate"](
dst = cfg_drive,
hostname = _tmp.pop(0),
domainname = '.'.join(_tmp),
user_data = user_data,
network_data = network_data,
saltconfig = { "salt_minion": { "conf": { "master": master, "id": name } } }
)
else:
__salt__[seed_cmd](
path = img_dest,
id_ = name,
config = kwargs.get('config'),
install = kwargs.get('install', True)
)
else:
# Create empty disk
try:
@@ -649,6 +679,26 @@ def init(name,

xml = _gen_xml(name, cpu, mem, diskp, nicp, hypervisor, **kwargs)

if cloud_init and cfg_drive:
xml_doc = minidom.parseString(xml)
iso_xml = xml_doc.createElement("disk")
iso_xml.setAttribute("type", "file")
iso_xml.setAttribute("device", "cdrom")
iso_xml.appendChild(xml_doc.createElement("readonly"))
driver = xml_doc.createElement("driver")
driver.setAttribute("name", "qemu")
driver.setAttribute("type", "raw")
target = xml_doc.createElement("target")
target.setAttribute("dev", "hdc")
target.setAttribute("bus", "ide")
source = xml_doc.createElement("source")
source.setAttribute("file", cfg_drive)
iso_xml.appendChild(driver)
iso_xml.appendChild(target)
iso_xml.appendChild(source)
xml_doc.getElementsByTagName("domain")[0].getElementsByTagName("devices")[0].appendChild(iso_xml)
xml = xml_doc.toxml()

# TODO: Remove this code and refactor module, when salt-common would have updated libvirt_domain.jinja template
for _nic in nicp:
if _nic['virtualport']:

+ 6
- 0
salt/control/virt.sls Parādīt failu

@@ -42,6 +42,9 @@ salt_libvirt_service:
{%- if node.provider == grains.id %}

{%- set size = control.size.get(node.size) %}
{%- set cluster_cloud_init = cluster.get('cloud_init', {}) %}
{%- set node_cloud_init = node.get('cloud_init', {}) %}
{%- set cloud_init = salt['grains.filter_by']({'default': cluster_cloud_init}, merge=node_cloud_init) %}

salt_control_virt_{{ cluster_name }}_{{ node_name }}:
module.run:
@@ -59,6 +62,9 @@ salt_control_virt_{{ cluster_name }}_{{ node_name }}:
- rng: {{ rng }}
{%- endif %}
- kwargs:
{%- if cloud_init is defined %}
cloud_init: {{ cloud_init }}
{%- endif %}
seed: True
serial_type: pty
console: True

+ 38
- 0
tests/pillar/control_virt_custom.sls Parādīt failu

@@ -1,3 +1,13 @@
_param:
private-ipv4: &private-ipv4
- id: private-ipv4
type: ipv4
link: ens2
netmask: 255.255.255.0
routes:
- gateway: 192.168.0.1
netmask: 0.0.0.0
network: 0.0.0.0
virt:
disk:
three_disks:
@@ -57,6 +67,29 @@ salt:
config:
engine: salt
host: master.domain.com
cloud_init:
user_data:
disable_ec2_metadata: true
resize_rootfs: True
timezone: UTC
ssh_deletekeys: True
ssh_genkeytypes: ['rsa', 'dsa', 'ecdsa']
ssh_svcname: ssh
locale: en_US.UTF-8
disable_root: true
apt_preserve_sources_list: false
apt:
sources_list: ""
sources:
ubuntu.list:
source: ${linux:system:repo:ubuntu:source}
mcp_saltstack.list:
source: ${linux:system:repo:mcp_saltstack:source}
network_data:
links:
- id: ens2
type: phy
name: ens2
node:
ubuntu1:
provider: node01.domain.com
@@ -72,6 +105,11 @@ salt:
provider: node03.domain.com
image: meowbuntu.qcom2
size: medium_three_disks
cloud_init:
network_data:
networks:
- <<: *private-ipv4
ip_address: 192.168.0.161
rng:
backend: /dev/urandom
model: random

Notiek ielāde…
Atcelt
Saglabāt