Quellcode durchsuchen

Merge branch 'master' into pr/fix/minion_dependencis

pr/fix/minion_dependencis
Petr Michalec vor 7 Jahren
Ursprung
Commit
0992a38cd2
54 geänderte Dateien mit 1717 neuen und 132 gelöschten Zeilen
  1. +6
    -0
      .kitchen.travis.yml
  2. +12
    -28
      .kitchen.yml
  3. +11
    -5
      .travis.yml
  4. +124
    -3
      README.rst
  5. +217
    -0
      _engines/saltgraph.py
  6. +109
    -0
      _modules/saltkey.py
  7. +247
    -0
      _modules/saltresource.py
  8. +319
    -0
      _returners/postgres_graph_db.py
  9. +1
    -0
      metadata/service/master/cluster.yml
  10. +6
    -0
      metadata/service/master/reactor/minion_start.yml
  11. +1
    -0
      metadata/service/master/single.yml
  12. +1
    -0
      metadata/service/minion/cluster.yml
  13. +1
    -0
      metadata/service/minion/local.yml
  14. +1
    -0
      metadata/service/minion/master.yml
  15. +15
    -1
      salt/api.sls
  16. +6
    -2
      salt/files/_api.conf
  17. +3
    -0
      salt/files/_beacon.conf
  18. +8
    -0
      salt/files/_engine.conf
  19. +12
    -0
      salt/files/master.conf
  20. +16
    -0
      salt/files/minion.conf
  21. +9
    -0
      salt/files/provider/_digital_ocean.conf
  22. +6
    -0
      salt/files/provider/_ec2.conf
  23. +34
    -0
      salt/files/provider/_openstack.conf
  24. +1
    -1
      salt/files/proxy.conf
  25. +10
    -3
      salt/map.jinja
  26. +28
    -0
      salt/master/env.sls
  27. +15
    -1
      salt/master/service.sls
  28. +18
    -0
      salt/master/test.sls
  29. +31
    -0
      salt/meta/meta.yml
  30. +22
    -4
      salt/meta/salt.yml
  31. +2
    -2
      salt/meta/sensu.yml
  32. +51
    -10
      salt/minion/ca.sls
  33. +51
    -41
      salt/minion/cert.sls
  34. +1
    -1
      salt/minion/init.sls
  35. +6
    -6
      salt/minion/proxy.sls
  36. +15
    -21
      salt/minion/service.sls
  37. +34
    -0
      salt/orchestrate/reactor/infra_install.sls
  38. +13
    -0
      salt/orchestrate/reactor/key_create.sls
  39. +11
    -0
      salt/orchestrate/reactor/key_remove.sls
  40. +17
    -0
      salt/orchestrate/reactor/master_update.sls
  41. +24
    -0
      salt/orchestrate/reactor/node_install.sls
  42. +6
    -0
      salt/reactor/infra_install.sls
  43. +28
    -0
      salt/reactor/key_create.sls
  44. +27
    -0
      salt/reactor/key_remove.sls
  45. +11
    -0
      salt/reactor/minion_start.sls
  46. +8
    -0
      salt/reactor/node_install.sls
  47. +6
    -0
      salt/reactor/orchestrate_start.sls
  48. +9
    -0
      salt/reactor_sls/key_create.sls
  49. +6
    -0
      salt/reactor_sls/key_remove.sls
  50. +51
    -2
      salt/syndic.sls
  51. +4
    -0
      tests/pillar/minion_backend_urllib.sls
  52. +25
    -0
      tests/pillar/minion_pki_ca.sls
  53. +20
    -0
      tests/pillar/minion_pki_cert.sls
  54. +1
    -1
      tests/pillar/minion_proxy.sls

+ 6
- 0
.kitchen.travis.yml Datei anzeigen

@@ -0,0 +1,6 @@
suites:

- name: <%= ENV['SUITE'] %>
provisioner:
pillars-from-files:
salt.sls: tests/pillar/<%= ENV['SUITE'] %>.sls

+ 12
- 28
.kitchen.yml Datei anzeigen

@@ -17,7 +17,7 @@ provisioner:
noservices: true
vendor_repo:
- type: apt
url: http://apt-mk.mirantis.com/trusty
url: http://apt-mk.mirantis.com/xenial
key_url: http://apt-mk.mirantis.com/public.gpg
components: salt
distribution: testing
@@ -37,6 +37,10 @@ provisioner:
enabled: true
master:
host: localhost
pkgs:
- python-m2crypto
- python-psutil
- python-yaml
linux:
system:
enabled: true
@@ -60,21 +64,7 @@ platforms:
image: <%=ENV['PLATFORM'] || 'trevorj/salty-whales:xenial'%>
platform: ubuntu

# - name: ubuntu-xenial
# driver_config:
# image: trevorj/salty-whales:xenial
# platform: ubuntu

# - name: debian-jessie
# driver_config:
# image: debian:jessie

# - name: debian-stretch
# driver_config:
# image: debian:stretch

suites:

- name: minion-default
provisioner:
pillars:
@@ -95,6 +85,7 @@ suites:
minion_handler.sls: tests/pillar/minion_custom_handler.sls
minion_local_pillar.sls: tests/pillar/minion_local_pillar.sls
minion_local_reclass.sls: tests/pillar/minion_local_reclass.sls
minion_backend_urllib.sls: tests/pillar/minion_backend_urllib.sls

- name: master-default
provisioner:
@@ -111,6 +102,12 @@ suites:
- master_ssh_root
- minion_pki_cert
- master_formulas
common.sls:
salt:
master:
#Use a useless package to avoid upgrading salt-master
pkgs:
- python-yaml
pillars-from-files:
minion_pki.sls: tests/pillar/minion_pki_ca.sls
minion_pki_cert.sls: tests/pillar/minion_pki_cert.sls
@@ -144,17 +141,4 @@ suites:
salt.sls: tests/pillar/minion_multi_master_failover.sls


# - name: minion-local
# provisioner:
# pillars:
# top.sls:
# base:
# "*":
# - common
# - minion_local_pillar
# - minion_local_reclass
# pillars-from-files:
# minion_local_pillar.sls: tests/pillar/minion_local_pillar.sls
# minion_local_reclass.sls: tests/pillar/minion_local_reclass.sls

# vim: ft=yaml sw=2 ts=2 sts=2 tw=125

+ 11
- 5
.travis.yml Datei anzeigen

@@ -17,17 +17,23 @@ install:
- bundle install

env:
- PLATFORM=trevorj/salty-whales:trusty
- PLATFORM=trevorj/salty-whales:xenial

- PLATFORM=trevorj/salty-whales:trusty SUITE=minion_default
- PLATFORM=trevorj/salty-whales:xenial SUITE=minion_default
- PLATFORM=trevorj/salty-whales:trusty SUITE=master_default
- PLATFORM=trevorj/salty-whales:xenial SUITE=master_default
- PLATFORM=trevorj/salty-whales:trusty SUITE=minion_default
- PLATFORM=trevorj/salty-whales:xenial SUITE=minion_default
- PLATFORM=trevorj/salty-whales:trusty SUITE=control_default
- PLATFORM=trevorj/salty-whales:xenial SUITE=control_default
- PLATFORM=trevorj/salty-whales:trusty SUITE=minion_multi_master_failover
- PLATFORM=trevorj/salty-whales:xenial SUITE=minion_multi_master_failover

before_script:
- set -o pipefail
- make test | tail

script:
- test ! -e .kitchen.yml || bundle exec kitchen converge || true
- test ! -e .kitchen.yml || bundle exec kitchen verify -t tests/integration
- KITCHEN_LOCAL_YAML=.kitchen.travis.yml bundle exec kitchen test -t tests/integration

notifications:
webhooks:

+ 124
- 3
README.rst Datei anzeigen

@@ -145,6 +145,32 @@ Salt master with logging handlers
host: 127.0.0.1
port: 9999


Salt engine definition for saltgraph metadata collector

.. code-block:: yaml

salt:
master:
engine:
graph_metadata:
engine: saltgraph
host: 127.0.0.1
port: 5432
user: salt
password: salt
database: salt

Salt engine definition for sending events from docker events

.. code-block:: yaml

salt:
master:
engine:
docker_events:
docker_url: unix://var/run/docker.sock

Salt master peer setup for remote certificate signing

.. code-block:: yaml
@@ -163,7 +189,7 @@ Configure verbosity of state output (used for `salt` command)
master:
state_output: changes

Salt Reactor system configuration
Salt synchronise node pillar and modules after start

.. code-block:: yaml

@@ -171,8 +197,84 @@ Salt Reactor system configuration
master:
reactor:
salt/minion/*/start:
- salt://reactor/minion-started.sls
- salt://salt/reactor/node_start.sls

Trigger basic node install

.. code-block:: yaml

salt:
master:
reactor:
salt/minion/install:
- salt://salt/reactor/node_install.sls

Sample event to trigger the node installation

.. code-block:: bash

salt-call event.send 'salt/minion/install'

Run any defined orchestration pipeline

.. code-block:: yaml

salt:
master:
reactor:
salt/orchestrate/start:
- salt://salt/reactor/orchestrate_start.sls

Event to trigger the orchestration pipeline

.. code-block:: bash

salt-call event.send 'salt/orchestrate/start' "{'orchestrate': 'salt/orchestrate/infra_install.sls'}"

Synchronise modules and pillars on minion start.

.. code-block:: yaml

salt:
master:
reactor:
'salt/minion/*/start':
- salt://salt/reactor/minion_start.sls

Add and/or remove the minion key

.. code-block:: yaml

salt:
master:
reactor:
salt/key/create:
- salt://salt/reactor/key_create.sls
salt/key/remove:
- salt://salt/reactor/key_remove.sls

Event to trigger the key creation

.. code-block:: bash

salt-call event.send 'salt/key/create' \
> "{'node_id': 'id-of-minion', 'node_host': '172.16.10.100', 'orch_post_create': 'kubernetes.orchestrate.compute_install', 'post_create_pillar': {'node_name': 'id-of-minion'}}"

.. note::

You can add pass additional `orch_pre_create`, `orch_post_create`,
`orch_pre_remove` or `orch_post_remove` parameters to the event to call
extra orchestrate files. This can be useful for example for
registering/unregistering nodes from the monitoring alarms or dashboards.

The key creation event needs to be run from other machine than the one
being registered.

Event to trigger the key removal

.. code-block:: bash

salt-call event.send 'salt/key/remove'

Salt syndic
-----------
@@ -219,7 +321,7 @@ Salt proxy pillar

salt:
minion:
proxy:
proxy_minion:
master: localhost
device:
vsrx01.mydomain.local:
@@ -316,6 +418,17 @@ Salt minion behind HTTP proxy
host: 127.0.0.1
port: 3128

Salt minion to specify non-default HTTP backend. The default tornado backend
does not respect HTTP proxy settings set as environment variables. This is
useful for cases where you need to set no_proxy lists.

.. code-block:: yaml

salt:
minion:
backend: urllib2


Salt minion with PKI certificate authority (CA)

.. literalinclude:: tests/pillar/minion_pki_ca.sls
@@ -326,6 +439,14 @@ Salt minion using PKI certificate
.. literalinclude:: tests/pillar/minion_pki_cert.sls
:language: yaml

Salt minion trust CA certificates issued by salt CA on a specific host (ie: salt-master node)

.. code-block:: yaml

salt:
minion:
trusted_ca_minions:
- cfg01

Salt control (cloud/kvm/docker)
-------------------------------

+ 217
- 0
_engines/saltgraph.py Datei anzeigen

@@ -0,0 +1,217 @@
# -*- coding: utf-8 -*-
'''
Saltgraph engine for catching returns of state runs, parsing them
and passing them to flat database of latest Salt resource runs.
'''

# Import python libs
from __future__ import absolute_import
import datetime
import json
import logging

# Import salt libs
import salt.utils.event

# Import third party libs
try:
import psycopg2
import psycopg2.extras
HAS_POSTGRES = True
except ImportError:
HAS_POSTGRES = False

__virtualname__ = 'saltgraph'
log = logging.getLogger(__name__)


def __virtual__():
if not HAS_POSTGRES:
return False, 'Could not import saltgraph engine. python-psycopg2 is not installed.'
return __virtualname__


def _get_conn(options={}):
'''
Return a postgres connection.
'''
host = options.get('host', '127.0.0.1')
user = options.get('user', 'salt')
passwd = options.get('passwd', 'salt')
datab = options.get('db', 'salt')
port = options.get('port', 5432)

return psycopg2.connect(
host=host,
user=user,
password=passwd,
database=datab,
port=port)


def _close_conn(conn):
'''
Close the Postgres connection
'''
conn.commit()
conn.close()


def _get_lowstate_data(options={}):
'''
TODO: document this method
'''
conn = _get_conn(options)
cur = conn.cursor()

try:
# you can only do this on Salt Masters minion
lowstate_req = __salt__['saltutil.cmd']('*', 'state.show_lowstate', **{'timeout': 15, 'concurrent': True, 'queue': True})
except:
lowstate_req = {}

for minion, lowstate_ret in lowstate_req.items():
if lowstate_ret.get('retcode') != 0:
continue
for resource in lowstate_ret.get('ret', []):
low_sql = '''INSERT INTO salt_resources
(id, resource_id, host, service, module, fun, status)
VALUES (%s, %s, %s, %s, %s, %s, %s)
ON CONFLICT (id) DO UPDATE
SET resource_id = excluded.resource_id,
host = excluded.host,
service = excluded.service,
module = excluded.module,
fun = excluded.fun,
alter_time = current_timestamp'''

rid = "%s|%s" % (minion, resource.get('__id__'))
cur.execute(
low_sql, (
rid,
resource.get('__id__'),
minion,
resource.get('__sls__'),
resource.get('state') if 'state' in resource else resource.get('module'),
resource.get('fun'),
'unknown'
)
)
conn.commit()

if lowstate_req:
meta_sql = '''INSERT INTO salt_resources_meta
(id, options)
VALUES (%s, %s)
ON CONFLICT (id) DO UPDATE
SET options = excluded.options,
alter_time = current_timestamp'''

cur.execute(
meta_sql, (
'lowstate_data',
'{}'
)
)
_close_conn(conn)


def _up_to_date(options={}):
'''
TODO: document this method
'''
conn = _get_conn(options)
cur = conn.cursor()
#cur_dict = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

ret = False

# if lowstate data are older than 1 day, refresh them
cur.execute('SELECT alter_time FROM salt_resources_meta WHERE id = %s', ('lowstate_data',))
alter_time = cur.fetchone()

if alter_time:
now = datetime.datetime.utcnow()
day = datetime.timedelta(days=1)
time_diff = now - alter_time[0].replace(tzinfo=None)
if time_diff < day:
ret = True
else:
skip = False

_close_conn(conn)

return ret


def _update_resources(event, options):
'''
TODO: document this method
'''
conn = _get_conn(options)
cur = conn.cursor()

cur.execute('SELECT id FROM salt_resources')
resources_db = [res[0] for res in cur.fetchall()]
resources = event.get('return', {}).values()

for resource in resources:
rid = '%s|%s' % (event.get('id'), resource.get('__id__'))
if rid in resources_db:
status = 'unknown'
if resource.get('result', None) is not None:
status = 'success' if resource.get('result') else 'failed'

resource_sql = '''UPDATE salt_resources SET (status, last_ret, alter_time) = (%s, %s, current_timestamp)
WHERE id = %s'''

cur.execute(
resource_sql, (
status,
repr(resource),
rid
)
)

conn.commit()

_close_conn(conn)


def start(host='salt', user='salt', password='salt', database='salt', port=5432, **kwargs):
'''
Listen to events and parse Salt state returns
'''
if __opts__['__role'] == 'master':
event_bus = salt.utils.event.get_master_event(
__opts__,
__opts__['sock_dir'],
listen=True)
else:
event_bus = salt.utils.event.get_event(
'minion',
transport=__opts__['transport'],
opts=__opts__,
sock_dir=__opts__['sock_dir'],
listen=True)
log.debug('Saltgraph engine started')

while True:
event = event_bus.get_event()
supported_funcs = ['state.sls', 'state.apply', 'state.highstate']
if event and event.get('fun', None) in supported_funcs:
test = 'test=true' in [arg.lower() for arg in event.get('fun_args', [])]
if not test:
options = {
'host': host,
'user': user,
'passwd': password,
'db': database,
'port': port
}
is_reclass = [arg for arg in event.get('fun_args', []) if arg.startswith('reclass')]
if is_reclass or not _up_to_date(options):
_get_lowstate_data(options)

_update_resources(event, options)


+ 109
- 0
_modules/saltkey.py Datei anzeigen

@@ -0,0 +1,109 @@
from __future__ import absolute_import

# Import python libs
import logging
import os

try:
import paramiko
HAS_PARAMIKO = True
except:
HAS_PARAMIKO = False

# Import Salt libs
import salt.config
import salt.wheel

LOG = logging.getLogger(__name__)


def __virtual__():
'''
Only load if paramiko library exist.
'''
if not HAS_PARAMIKO:
return (
False,
'Can not load module saltkey: paramiko library not found')
return True


def key_create(id_, host, force=False):
'''
Generates minion keypair, accepts it on master and injects it to minion via SSH.

:param id_: expected minion ID of target node
:param host: IP address or resolvable hostname/FQDN of target node

CLI Examples:

.. code-block:: bash

salt-call saltkey.key_create <MINION_ID> <MINION_IP_ADDRESS> force=False
'''
ret = {
'retcode': 0,
'msg': 'Salt Key for minion %s is already accepted' % id_,
}

opts = salt.config.master_config('/etc/salt/master')
wheel = salt.wheel.WheelClient(opts)
keys = wheel.cmd('key.gen_accept', arg=[id_], kwarg={'force': force})
pub_key = keys.get('pub', None)
priv_key = keys.get('priv', None)

if pub_key and priv_key:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# Establish SSH connection to minion
try:
ssh.connect(host)
except paramiko.ssh_exception.AuthenticationException:
msg = ('Could not establish SSH connection to minion "%s" on address %s, please ensure '
'that current user\'s SSH key is present in minions authorized_keys.') % (id_, host)
LOG.error(msg)
ret['retcode'] = 1
ret['msg'] = msg
wheel.cmd_async({'fun': 'key.delete', 'match': id_})
return ret
except Exception as e:
msg = ('Unknown error occured while establishing SSH connection '
'to minion "%s" on address %s: %s') % (id_, host, repr(e))
LOG.error(msg)
ret['retcode'] = 1
ret['msg'] = msg
wheel.cmd_async({'fun': 'key.delete', 'match': id_})
return ret
# Setup the keys on minion side the ugly way, nice one didn't work
key_path = '/etc/salt/pki/minion'
command = ('echo "%(pub_key)s" > %(pub_path)s && chmod 644 %(pub_path)s && '
'echo "%(priv_key)s" > %(priv_path)s && chmod 400 %(priv_path)s && '
'salt-call --local service.restart salt-minion') % {
'pub_path': os.path.join(key_path, 'minion.pub'),
'pub_key': pub_key,
'priv_path': os.path.join(key_path, 'minion.pem'),
'priv_key': priv_key
}

ssh_chan = ssh.get_transport().open_session()
ssh_chan.exec_command(command)
# Wait for command return
while True:
if ssh_chan.exit_status_ready():
exit_status = ssh_chan.recv_exit_status()
stderr = ssh_chan.recv_stderr(1000)
stdout = ssh_chan.recv(1000)
break
ssh.close()
# Evaluate SSH command exit status
if exit_status != 0:
msg = 'Keypair injection to Salt minion failed on target with following error: %s' % stderr
LOG.error(msg)
ret['retcode'] = exit_status
ret['msg'] = msg
return ret

ret['msg'] = 'Salt Key successfully created'

return ret


+ 247
- 0
_modules/saltresource.py Datei anzeigen

@@ -0,0 +1,247 @@
from __future__ import absolute_import
# Let's not allow PyLint complain about string substitution
# pylint: disable=W1321,E1321

# Import python libs
import logging

# Import Salt libs
import salt.returners

# Import third party libs
try:
import psycopg2
import psycopg2.extras
HAS_POSTGRES = True
except ImportError:
HAS_POSTGRES = False

__virtualname__ = 'saltresource'
LOG = logging.getLogger(__name__)


def __virtual__():
if not HAS_POSTGRES:
return False, 'Could not import saltresource module; psycopg2 is not installed.'
return __virtualname__


def _get_options(ret=None):
'''
Get the postgres options from salt.
'''
defaults = {'host': '127.0.0.1',
'user': 'salt',
'passwd': 'salt',
'db': 'salt',
'port': '5432'}

_options = {}
for key, default in defaults.items():
_options[key] = __salt__['config.get']('%s.%s' % (__virtualname__, key), default)

return _options


def _get_conn(ret=None):
'''
Return a postgres connection.
'''
_options = _get_options(ret)

host = _options.get('host')
user = _options.get('user')
passwd = _options.get('passwd')
datab = _options.get('db')
port = _options.get('port')

return psycopg2.connect(
host=host,
user=user,
password=passwd,
database=datab,
port=port)


def _close_conn(conn):
'''
Close the Postgres connection
'''
conn.commit()
conn.close()


def graph_data(*args, **kwargs):
'''
Returns graph data for visualization app

CLI Examples:

.. code-block:: bash

salt '*' saltresource.graph_data
'''
conn = _get_conn()
cur_dict = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

cur_dict.execute('SELECT host, service, status FROM salt_resources')
resources_db = [dict(res) for res in cur_dict]
db_dict = {}

for resource in resources_db:
host = resource.get('host')
service = '.'.join(resource.get('service').split('.')[:2])
status = resource.get('status')

if db_dict.get(host, None):
if db_dict[host].get(service, None):
service_data = db_dict[host][service]
service_data.append(status)
else:
db_dict[host][service] = [status]
else:
db_dict[host] = {service: []}

graph = []
for host, services in db_dict.items():
for service, statuses in services.items():
status = 'unknown'
if 'failed' in statuses:
status = 'failed'
elif 'success' in statuses and not ('failed' in statuses or 'unknown' in statuses):
status = 'success'
datum = {'host': host, 'service': service, 'status': status}
graph.append(datum)

_close_conn(conn)

return {'graph': graph}


def host_data(host, **kwargs):
'''
Returns data describing single host

CLI Examples:

.. code-block:: bash

salt-call saltresource.host_data '<minion_id>'
'''
conn = _get_conn()
cur_dict = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

sql = 'SELECT host, service, resource_id, last_ret, status FROM salt_resources WHERE host=%s'
cur_dict.execute(sql, (host,))
resources_db = [dict(res) for res in cur_dict]
db_dict = {}

for resource in resources_db:
host = resource.get('host')
service = '.'.join(resource.get('service').split('.')[:2])
status = resource.get('status')

if db_dict.get(host, None):
if db_dict[host].get(service, None):
service_data = db_dict[host][service]
service_data.append(status)
else:
db_dict[host][service] = [status]
else:
db_dict[host] = {service: []}

graph = []

for host, services in db_dict.items():
for service, statuses in services.items():
status = 'unknown'
if 'failed' in statuses:
status = 'failed'
elif 'success' in statuses and not ('failed' in statuses or 'unknown' in statuses):
status = 'success'
resources = [{'service': r.get('service', ''), 'resource_id': r.get('resource_id', ''), 'last_ret': r.get('last_ret', None), 'status': r.get('status', '')}
for r
in resources_db
if r.get('service', '').startswith(service)]
datum = {'host': host, 'service': service, 'status': status, 'resources': resources}
graph.append(datum)

_close_conn(conn)

return {'graph': graph}


def sync_db(*args, **kwargs):
conn = _get_conn()
cur = conn.cursor()

resources_sql = '''
CREATE TABLE IF NOT EXISTS salt_resources (
id varchar(255) NOT NULL UNIQUE,
resource_id varchar(255) NOT NULL,
host varchar(255) NOT NULL,
service varchar(255) NOT NULL,
module varchar(50) NOT NULL,
fun varchar(50) NOT NULL,
status varchar(50) NOT NULL,
options json NULL,
last_ret text NULL,
alter_time TIMESTAMP WITH TIME ZONE DEFAULT now()
);
'''
cur.execute(resources_sql)
conn.commit()

resources_meta_sql = '''
CREATE TABLE IF NOT EXISTS salt_resources_meta (
id varchar(255) NOT NULL UNIQUE,
options json NULL,
alter_time TIMESTAMP WITH TIME ZONE DEFAULT now()
);
'''
cur.execute(resources_meta_sql)
_close_conn(conn)

return True


def flush_db(*args, **kwargs):
conn = _get_conn()
cur = conn.cursor()
result = True

resources_sql = 'DELETE FROM salt_resources'
try:
cur.execute(resources_sql)
conn.commit()
except Exception as e:
LOG.warning(repr(e))
result = False

resources_meta_sql = 'DELETE FROM salt_resources_meta'
try:
cur.execute(resources_meta_sql)
_close_conn(conn)
except Exception as e:
LOG.warning(repr(e))
result = False

return result


def destroy_db(*args, **kwargs):
conn = _get_conn()
cur = conn.cursor()

resources_sql = 'DROP TABLE IF EXISTS salt_resources;'
cur.execute(resources_sql)
conn.commit()

resources_meta_sql = 'DROP TABLE IF EXISTS salt_resources_meta;'
cur.execute(resources_meta_sql)
_close_conn(conn)

return True


+ 319
- 0
_returners/postgres_graph_db.py Datei anzeigen

@@ -0,0 +1,319 @@
# -*- coding: utf-8 -*-
'''
Return data to a postgresql graph server

.. note::
Creates database of all Salt resources which are to be run on
all minions and then updates their last known state during state
file runs. It can't function as master nor minion external cache.

:maintainer: None
:maturity: New
:depends: psycopg2
:platform: all

To enable this returner the minion will need the psycopg2 installed and
the following values configured in the minion or master config:

.. code-block:: yaml

returner.postgres_graph_db.host: 'salt'
returner.postgres_graph_db.user: 'salt'
returner.postgres_graph_db.passwd: 'salt'
returner.postgres_graph_db.db: 'salt'
returner.postgres_graph_db.port: 5432

Alternative configuration values can be used by prefacing the configuration.
Any values not found in the alternative configuration will be pulled from
the default location:

.. code-block:: yaml

alternative.returner.postgres_graph_db.host: 'salt'
alternative.returner.postgres_graph_db.user: 'salt'
alternative.returner.postgres_graph_db.passwd: 'salt'
alternative.returner.postgres_graph_db.db: 'salt'
alternative.returner.postgres_graph_db.port: 5432

Running the following commands as the postgres user should create the database
correctly:

.. code-block:: sql
psql << EOF
CREATE ROLE salt WITH LOGIN;
ALTER ROLE salt WITH PASSWORD 'salt';
CREATE DATABASE salt WITH OWNER salt;
EOF
psql -h localhost -U salt << EOF
--
-- Table structure for table 'salt_resources'
--
DROP TABLE IF EXISTS salt_resources;
CREATE TABLE salt_resources (
id varchar(255) NOT NULL UNIQUE,
resource_id varchar(255) NOT NULL,
host varchar(255) NOT NULL,
service varchar(255) NOT NULL,
module varchar(50) NOT NULL,
fun varchar(50) NOT NULL,
status varchar(50) NOT NULL,
options json NULL,
last_ret text NULL,
alter_time TIMESTAMP WITH TIME ZONE DEFAULT now()
);
--
-- Table structure for table 'salt_resources_meta'
--
DROP TABLE IF EXISTS salt_resources_meta;
CREATE TABLE salt_resources_meta (
id varchar(255) NOT NULL UNIQUE,
options json NULL,
alter_time TIMESTAMP WITH TIME ZONE DEFAULT now()
);
EOF

Required python modules: psycopg2

To use the postgres_graph_db returner, append '--return postgres_graph_db' to the salt command.

.. code-block:: bash

salt '*' test.ping --return postgres_graph_db

To use the alternative configuration, append '--return_config alternative' to the salt command.

.. versionadded:: 2015.5.0

.. code-block:: bash

salt '*' test.ping --return postgres_graph_db --return_config alternative

To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command.

.. versionadded:: 2016.3.0

.. code-block:: bash

salt '*' test.ping --return postgres_graph_db --return_kwargs '{"db": "another-salt"}'

'''
from __future__ import absolute_import
# Let's not allow PyLint complain about string substitution
# pylint: disable=W1321,E1321

# Import python libs
import datetime
import json
import logging

# Import Salt libs
import salt.utils.jid
import salt.returners

# Import third party libs
try:
import psycopg2
import psycopg2.extras
HAS_POSTGRES = True
except ImportError:
HAS_POSTGRES = False

__virtualname__ = 'postgres_graph_db'
LOG = logging.getLogger(__name__)


def __virtual__():
if not HAS_POSTGRES:
return False, 'Could not import postgres returner; psycopg2 is not installed.'
return __virtualname__


def _get_options(ret=None):
'''
Get the postgres options from salt.
'''
attrs = {'host': 'host',
'user': 'user',
'passwd': 'passwd',
'db': 'db',
'port': 'port'}

_options = salt.returners.get_returner_options('returner.{0}'.format(__virtualname__),
ret,
attrs,
__salt__=__salt__,
__opts__=__opts__)
return _options


def _get_conn(ret=None):
'''
Return a postgres connection.
'''
_options = _get_options(ret)

host = _options.get('host')
user = _options.get('user')
passwd = _options.get('passwd')
datab = _options.get('db')
port = _options.get('port')

return psycopg2.connect(
host=host,
user=user,
password=passwd,
database=datab,
port=port)


def _close_conn(conn):
'''
Close the Postgres connection
'''
conn.commit()
conn.close()


def _get_lowstate_data():
'''
TODO: document this method
'''
conn = _get_conn()
cur = conn.cursor()

try:
# you can only do this on Salt Masters minion
lowstate_req = __salt__['saltutil.cmd']('*', 'state.show_lowstate', **{'timeout': 15, 'concurrent': True, 'queue': True})
except:
lowstate_req = {}
for minion, lowstate_ret in lowstate_req.items():
if lowstate_ret.get('retcode') != 0:
continue
for resource in lowstate_ret.get('ret', []):
low_sql = '''INSERT INTO salt_resources
(id, resource_id, host, service, module, fun, status)
VALUES (%s, %s, %s, %s, %s, %s, %s)
ON CONFLICT (id) DO UPDATE
SET resource_id = excluded.resource_id,
host = excluded.host,
service = excluded.service,
module = excluded.module,
fun = excluded.fun,
alter_time = current_timestamp'''

rid = "%s|%s" % (minion, resource.get('__id__'))

cur.execute(
low_sql, (
rid,
resource.get('__id__'),
minion,
resource.get('__sls__'),
resource.get('state') if 'state' in resource else resource.get('module'),
resource.get('fun'),
'unknown'
)
)

conn.commit()

if lowstate_req:
meta_sql = '''INSERT INTO salt_resources_meta
(id, options)
VALUES (%s, %s)
ON CONFLICT (id) DO UPDATE
SET options = excluded.options,
alter_time = current_timestamp'''

cur.execute(
meta_sql, (
'lowstate_data',
'{}'
)
)

_close_conn(conn)


def _up_to_date():
'''
TODO: document this method
'''
conn = _get_conn()
cur = conn.cursor()
#cur_dict = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

ret = False

# if lowstate data are older than 1 day, refresh them
cur.execute('SELECT alter_time FROM salt_resources_meta WHERE id = %s', ('lowstate_data',))
alter_time = cur.fetchone()

if alter_time:
now = datetime.datetime.utcnow()
day = datetime.timedelta(days=1)
time_diff = now - alter_time[0].replace(tzinfo=None)
if time_diff < day:
ret = True
else:
skip = False

_close_conn(conn)

return ret


def _update_resources(ret):
'''
TODO: document this method
'''
conn = _get_conn(ret)
cur = conn.cursor()

cur.execute('SELECT id FROM salt_resources')
resources_db = [res[0] for res in cur.fetchall()]
resources = ret.get('return', {}).values()

for resource in resources:
rid = '%s|%s' % (ret.get('id'), resource.get('__id__'))
if rid in resources_db:
status = 'unknown'
if resource.get('result', None) is not None:
status = 'success' if resource.get('result') else 'failed'

resource_sql = '''UPDATE salt_resources SET (status, last_ret, alter_time) = (%s, %s, current_timestamp)
WHERE id = %s'''

cur.execute(
resource_sql, (
status,
repr(resource),
rid
)
)

conn.commit()

_close_conn(conn)


def returner(ret):
'''
Return data to a postgres server
'''
#LOG.warning('RET: %s' % repr(ret))
supported_funcs = ['state.sls', 'state.apply', 'state.highstate']
test = 'test=true' in [arg.lower() for arg in ret.get('fun_args', [])]

if ret.get('fun') in supported_funcs and not test:
is_reclass = [arg for arg in ret.get('fun_args', []) if arg.startswith('reclass')]
if is_reclass or not _up_to_date():
_get_lowstate_data()

_update_resources(ret)


+ 1
- 0
metadata/service/master/cluster.yml Datei anzeigen

@@ -12,3 +12,4 @@ parameters:
engine: pkg
command_timeout: 5
worker_threads: 3
max_event_size: 100000000

+ 6
- 0
metadata/service/master/reactor/minion_start.yml Datei anzeigen

@@ -0,0 +1,6 @@
parameters:
salt:
master:
reactor:
'salt/minion/*/start':
- salt://salt/reactor/minion_start.sls

+ 1
- 0
metadata/service/master/single.yml Datei anzeigen

@@ -14,4 +14,5 @@ parameters:
engine: pkg
command_timeout: 5
worker_threads: 3
max_event_size: 100000000
base_environment: ${_param:salt_master_base_environment}

+ 1
- 0
metadata/service/minion/cluster.yml Datei anzeigen

@@ -6,6 +6,7 @@ parameters:
salt:
minion:
enabled: true
max_event_size: 100000000
source:
engine: pkg
masters:

+ 1
- 0
metadata/service/minion/local.yml Datei anzeigen

@@ -6,6 +6,7 @@ parameters:
salt:
minion:
enabled: true
max_event_size: 100000000
source:
engine: pkg
local: true

+ 1
- 0
metadata/service/minion/master.yml Datei anzeigen

@@ -6,6 +6,7 @@ parameters:
salt:
minion:
enabled: true
max_event_size: 100000000
source:
engine: pkg
master:

+ 15
- 1
salt/api.sls Datei anzeigen

@@ -15,6 +15,20 @@ salt_api_packages:
- watch_in:
- service: salt_api_service

{%- if api.get('ssl', {}).authority is defined %}

{%- set cert_file = "/etc/ssl/certs/" + api.ssl.get('name', grains.id) + ".crt" %}
{%- set ca_file = "/etc/ssl/certs/ca-" + api.ssl.authority + ".crt" %}

salt_api_init_tls:
cmd.run:
- name: "cat {{ cert_file }} {{ ca_file }} > /etc/ssl/certs/{{ api.ssl.get('name', grains.id) }}-chain.crt"
- creates: /etc/ssl/certs/{{ api.ssl.get('name', grains.id) }}-chain.crt
- watch_in:
- service: salt_api_service

{%- endif %}

salt_api_service:
service.running:
- name: salt-api
@@ -23,4 +37,4 @@ salt_api_service:
- watch:
- file: /etc/salt/master.d/_api.conf

{%- endif %}
{%- endif %}

+ 6
- 2
salt/files/_api.conf Datei anzeigen

@@ -9,8 +9,11 @@ rest_cherrypy:
ssl_crt: /etc/letsencrypt/live/{{ api.ssl.name }}/cert.pem
ssl_key: /etc/letsencrypt/live/{{ api.ssl.name }}/privkey.pem
{%- elif api.ssl.engine == 'salt' %}
ssl_crt: /etc/ssl/certs/{{ system.name }}.{{ system.domain }}.crt
ssl_key: /etc/ssl/private/{{ system.name }}.{{ system.domain }}.key
ssl_crt: /etc/ssl/certs/{{ api.ssl.get('name', grains.id) }}.crt
ssl_key: /etc/ssl/private/{{ api.ssl.get('name', grains.id) }}.key
{%- if api.ssl.authority is defined %}
ssl_chain: /etc/ssl/certs/{{ api.ssl.get('name', grains.id) }}-chain.crt
{%- endif %}
{%- else %}
ssl_crt: {{ api.ssl.get('cert_file')|default("/etc/ssl/certs/"+grains.get('fqdn')+".crt") }}
ssl_key: {{ api.ssl.get('key_file')|default("/etc/ssl/private/"+grains.get('fqdn')+".key") }}
@@ -25,3 +28,4 @@ rest_cherrypy:
{#-
vim: syntax=jinja
-#}


+ 3
- 0
salt/files/_beacon.conf Datei anzeigen

@@ -0,0 +1,3 @@
{% from "salt/map.jinja" import minion with context %}

beacons: {{ minion.beacon }}

+ 8
- 0
salt/files/_engine.conf Datei anzeigen

@@ -0,0 +1,8 @@
{% from "salt/map.jinja" import master with context %}

engines:
{%- for engine_name, engine in master.engine.items() %}
{%- set name = engine.get('engine', engine_name) %}
{%- if engine.engine is defined %}{%- do engine.pop('engine') %}{% endif %}
- {{ name }}: {{ engine|yaml }}
{%- endfor %}

+ 12
- 0
salt/files/master.conf Datei anzeigen

@@ -4,6 +4,10 @@
worker_threads: {{ master.worker_threads }}
timeout: {{ master.command_timeout }}

{%- if master.get('max_open_files') %}
max_open_files: {{ master.max_open_files }}
{%- endif %}

state_output: {{ master.get('state_output', 'changes') }}

{%- if master.system is defined %}
@@ -44,6 +48,10 @@ open_mode: True
auto_accept: True
{%- endif %}

{%- if master.get('max_event_size') %}
max_event_size: {{ master.max_event_size }}
{%- endif %}

{%- if master.pillar.engine == 'salt' %}

pillar_roots:
@@ -86,3 +94,7 @@ logstash_zmq_handler:
{%- if master.get('order_masters', False) %}
order_masters: True
{%- endif %}

{#-
vim: syntax=jinja
-#}

+ 16
- 0
salt/files/minion.conf Datei anzeigen

@@ -23,6 +23,14 @@ master: {{ minion.master.host }}

id: {{ system.name }}.{{ system.domain }}

{%- for opt in ['max_event_size', 'acceptance_wait_time_max',
'acceptance_wait_time', 'random_reauth_delay', 'recon_default', 'recon_max',
'recon_randomize', 'auth_timeout'] %}
{% if minion.get(opt) %}
{{ opt }}: {{ minion.get(opt) }}
{%- endif %}
{%- endfor %}

{%- set excluded_keys = ('master', 'system', 'public_keys', 'private_keys', 'known_hosts', '__reclass__', '_secret', '_param') %}

grains:
@@ -66,6 +74,10 @@ proxy_host: {{ minion.proxy.host }}
proxy_port: {{ minion.proxy.port }}
{%- endif %}

{%- if minion.backend is defined %}
backend: {{ minion.backend }}
{%- endif %}

{%- if minion.sentry is defined %}
sentry_handler:
{% for server in minion.sentry.servers %}
@@ -155,3 +167,7 @@ logstash_zmq_handler:
{%- endif %}

{%- endfor %}

{#-
vim: syntax=jinja
-#}

+ 9
- 0
salt/files/provider/_digital_ocean.conf Datei anzeigen

@@ -0,0 +1,9 @@

{{ provider_name }}:
provider: {{ provider.engine }}
{%- if provider.insecure is defined %}
insecure: true
{%- endif %}
{#- location: {{ provider.region }} #}
personal_access_token: {{ provider.api_key }}
{%- endif %}

+ 6
- 0
salt/files/provider/_ec2.conf Datei anzeigen

@@ -0,0 +1,6 @@

{{ provider_name }}:
provider: {{ provider.engine }}
{%- if provider.insecure is defined %}
insecure: true
{%- endif %}

+ 34
- 0
salt/files/provider/_openstack.conf Datei anzeigen

@@ -0,0 +1,34 @@

{{ provider_name }}:
provider: {{ provider.engine }}
{%- if provider.insecure is defined %}
insecure: true
{%- endif %}
identity_url: '{{ provider.identity_url }}'
{%- if provider.compute_name is defined %}
compute_name: {{ provider.get('compute_name', 'nova') }}
{%- endif %}
protocol: ipv4
compute_region: {{ provider.region }}
tenant: {{ provider.tenant }}
user: {{ provider.user }}
{%- if provider.api_key is defined %}
apikey: {{ provider.api_key }}
{%- elif provider.password is defined %}
password: {{ provider.password }}
{%- endif %}
ssh_key_name: salt-cloud
ssh_key_file: /root/.ssh/id_rsa
ssh_interface: {{ provider.get('interface', 'private') }}_ips
networks:
- fixed:
{%- for net in provider.fixed_networks %}
- {{ net }}
{%- endfor %}
- floating:
{%- for net in provider.floating_networks %}
- {{ net }}
{%- endfor %}
{%- if provider.ignore_cidr is defined %}
ignore_cidr: {{ provider.ignore_cidr }}
{%- endif %}

+ 1
- 1
salt/files/proxy.conf Datei anzeigen

@@ -3,6 +3,6 @@
# This configuration file is used to manage the behavior of all Salt Proxy
# Minions on this host.

master: {{ proxy.master|default('localhost') }}
master: {{ proxy_minion.master|default('localhost') }}
multiprocessing: False
mine_enabled: True

+ 10
- 3
salt/map.jinja Datei anzeigen

@@ -21,6 +21,8 @@ default:
files: /srv/salt/env
pillar:
engine: salt
max_event_size: 100000000
minion_data_cache: 'localfs'
{%- endload %}

{%- load_yaml as master_specific %}
@@ -83,6 +85,7 @@ default:
{%- if pillar.salt.get('minion', {}).get('source', {}).version is defined %}
version: {{ pillar.salt.minion.source.version }}
{%- endif %}
max_event_size: 100000000
{%- endload %}

{%- load_yaml as minion_specific %}
@@ -100,6 +103,8 @@ Debian:
- PyYAML
- M2Crypto
- psutil
cert_pkgs:
- ca-certificates
Gentoo:
pkgs:
- app-admin/salt
@@ -117,14 +122,16 @@ RedHat:
- PyYAML
- M2Crypto
- psutil
cert_pkgs:
- ca-certificates
{%- endload %}

{%- if pillar.salt.minion is defined %}
{%- set raw_minion = salt['grains.filter_by'](minion_specific, merge=salt['pillar.get']('salt:minion')) %}
{%- set minion = salt['grains.filter_by'](minion_common, merge=raw_minion) %}

{%- if pillar.salt.minion.proxy is defined %}
{% set proxy = salt['grains.filter_by']({
{%- if pillar.salt.minion.proxy_minion is defined %}
{% set proxy_minion = salt['grains.filter_by']({
'Debian': {
'napalm_pkgs': ['python-pip', 'libxml2-dev', 'libxslt1-dev', 'zlib1g-dev'],
'napalm_pip_pkgs': ['napalm', 'oauth']
@@ -132,7 +139,7 @@ RedHat:
'RedHat': {
'napalm_pkgs': ['libxml2-dev', 'libxslt1-dev', 'zlib1g-dev']
},
}, merge=pillar.salt.minion.get('proxy', {})) %}
}, merge=pillar.salt.minion.get('proxy_minion', {})) %}
{%- endif %}

{%- endif %}

+ 28
- 0
salt/master/env.sls Datei anzeigen

@@ -13,6 +13,7 @@ salt_env_{{ master.system.environment }}_dirs_obsolete:
- /srv/salt/env/{{ master.system.environment }}/_modules
- /srv/salt/env/{{ master.system.environment }}/_states
- /srv/salt/env/{{ master.system.environment }}/_grains
- /srv/salt/env/{{ master.system.environment }}/_engines
- /srv/salt/env/{{ master.system.environment }}
- makedirs: True

@@ -240,6 +241,8 @@ salt_env_{{ environment_name }}_{{ formula_name }}_link:
- target: /usr/share/salt-formulas/env/_formulas/{{ formula_name }}/{{ formula_name }}
- require:
- file: salt_env_{{ environment_name }}_dirs
- force: True
- makedirs: True

{%- for grain_name, grain in formula.get('grain', {}).iteritems() %}

@@ -247,6 +250,8 @@ salt_master_{{ environment_name }}_{{ grain_name }}_grain:
file.symlink:
- name: /usr/share/salt-formulas/env/_grains/{{ grain_name }}
- target: /usr/share/salt-formulas/env/_formulas/{{ formula_name }}/_grains/{{ grain_name }}
- force: True
- makedirs: True

{%- endfor %}

@@ -256,6 +261,8 @@ salt_master_{{ environment_name }}_{{ module_name }}_module:
file.symlink:
- name: /usr/share/salt-formulas/env/_modules/{{ module_name }}
- target: /usr/share/salt-formulas/env/_formulas/{{ formula_name }}/_modules/{{ module_name }}
- force: True
- makedirs: True

{%- endfor %}

@@ -265,6 +272,8 @@ salt_master_{{ environment_name }}_{{ state_name }}_state:
file.symlink:
- name: /usr/share/salt-formulas/env/_states/{{ state_name }}
- target: /usr/share/salt-formulas/env/_formulas/{{ formula_name }}/_states/{{ state_name }}
- force: True
- makedirs: True

{%- endfor %}

@@ -285,6 +294,8 @@ salt_env_{{ environment_name }}_{{ formula_name }}_link:
- target: /srv/salt/env/{{ environment_name }}/_formulas/{{ formula_name }}/{{ formula_name }}
- require:
- file: salt_env_{{ environment_name }}_dirs
- force: True
- makedirs: True

{%- for grain_name, grain in formula.get('grain', {}).iteritems() %}

@@ -292,6 +303,8 @@ salt_master_{{ environment_name }}_{{ grain_name }}_grain:
file.symlink:
- name: /srv/salt/env/{{ environment_name }}/_grains/{{ grain_name }}
- target: /srv/salt/env/{{ environment_name }}/_formulas/{{ formula_name }}/_grains/{{ grain_name }}
- force: True
- makedirs: True

{%- endfor %}

@@ -301,6 +314,8 @@ salt_master_{{ environment_name }}_{{ module_name }}_module:
file.symlink:
- name: /srv/salt/env/{{ environment_name }}/_grains/{{ module_name }}
- target: /srv/salt/env/{{ environment_name }}/_formulas/{{ formula_name }}/_grains/{{ module_name }}
- force: True
- makedirs: True

{%- endfor %}

@@ -310,6 +325,19 @@ salt_master_{{ environment_name }}_{{ state_name }}_state:
file.symlink:
- name: /srv/salt/env/{{ environment_name }}/_grains/{{ state_name }}
- target: /srv/salt/env/{{ environment_name }}/_formulas/{{ formula_name }}/_grains/{{ state_name }}
- force: True
- makedirs: True

{%- endfor %}

{%- for engine_name, engine in formula.get('engine', {}).iteritems() %}

salt_master_{{ environment_name }}_{{ engine_name }}_state:
file.symlink:
- name: /srv/salt/env/{{ environment_name }}/_engines/{{ engine_name }}
- target: /srv/salt/env/{{ environment_name }}/_formulas/{{ formula_name }}/_engines/{{ engine_name }}
- force: True
- makedirs: True

{%- endfor %}


+ 15
- 1
salt/master/service.sls Datei anzeigen

@@ -4,7 +4,7 @@
{%- if master.source.get('engine', 'pkg') == 'pkg' %}

salt_master_packages:
pkg.latest:
pkg.installed:
- names: {{ master.pkgs }}
{%- if master.source.version is defined %}
- version: {{ master.source.version }}
@@ -42,6 +42,20 @@ salt_master_packages:

{%- endif %}

{%- if master.engine is defined %}

/etc/salt/master.d/_engine.conf:
file.managed:
- source: salt://salt/files/_engine.conf
- user: root
- template: jinja
- require:
- {{ master.install_state }}
- watch_in:
- service: salt_master_service

{%- endif %}

{%- if master.peer is defined %}

/etc/salt/master.d/_peer.conf:

+ 18
- 0
salt/master/test.sls Datei anzeigen

@@ -0,0 +1,18 @@
{%- from "salt/map.jinja" import master with context %}
{%- if master.enabled %}

salt_master_test_packages:
pkg.latest:
- names: {{ master.test_pkgs }}

/etc/salt/roster:
file.managed:
- source: salt://salt/files/roster
- user: root
- template: jinja
- require:
- {{ master.install_state }}
- watch_in:
- service: salt_master_service

{%- endif %}

+ 31
- 0
salt/meta/meta.yml Datei anzeigen

@@ -0,0 +1,31 @@
graph:
{%- if pillar.get('salt', {}).minion is defined %}
{%- from "salt/map.jinja" import minion with context %}
- host: {{ grains.id }}
service: salt.minion
type: software-config
relations:
{%- if minion.master is defined %}
- service: salt.master
{%- if minion.master.host in ['127.0.0.1', 'localhost'] %}
host: {{ grains.id }}
{%- else %}
host_from_target: {{ minion.master.host }}
{%- endif %}
direction: source
type: tcp-0mq
{%- endif %}
{%- endif %}
{%- if pillar.get('salt', {}).master is defined %}
{%- from "salt/map.jinja" import master with context %}
- host: {{ grains.id }}
service: salt.master
type: software-config
relations:
{%- if master.pillar.engine == 'reclass' %}
- host: {{ grains.id }}
service: reclass.storage
direction: source
type: local-file
{%- endif %}
{%- endif %}

+ 22
- 4
salt/meta/salt.yml Datei anzeigen

@@ -1,3 +1,17 @@
{%- set service_grains = {'salt': {'graph': []}} %}
{%- for service_name, service in pillar.items() %}
{%- set grains_fragment_file = service_name+'/meta/meta.yml' %}
{%- macro load_grains_file() %}{% include grains_fragment_file ignore missing %}{% endmacro %}
{%- set grains_yaml = load_grains_file()|load_yaml %}
{%- if grains_yaml is mapping %}
{%- for node in grains_yaml.graph %}
{%- do service_grains.salt.graph.append(node) %}
{%- endfor %}
{%- endif %}
{%- endfor %}
grain:
salt:
{{ service_grains|yaml(False)|indent(4) }}
orchestrate:
master:
priority: 60
@@ -7,18 +21,22 @@ orchestrate:
priority: 400
require:
- salt: salt.master

minion:
{%- if pillar.get('salt', {}).get('minion', {}).get('ca') %}
pki:
{%- from "salt/map.jinja" import minion with context %}
x509_signing_policies:
{%- for ca_name,ca in minion.ca.items() %}

{%- set ca_file = ca.get('ca_file', '/etc/pki/ca/' ~ ca_name ~ '/ca.crt') %}
{%- set ca_key_file = ca.get('ca_key_file', '/etc/pki/ca/' ~ ca_name ~ '/ca.key') %}
{%- set ca_certs_dir = salt['file.dirname'](ca_file) ~ '/certs/' %}

{%- for signing_policy_name, signing_policy in ca.signing_policy.iteritems() %}
{{ ca_name }}_{{ signing_policy_name }}:
- minions: '{{ signing_policy.minions }}'
- signing_private_key: /etc/pki/ca/{{ ca_name }}/ca.key
- signing_cert: /etc/pki/ca/{{ ca_name }}/ca.crt
- signing_private_key: {{ ca_key_file }}
- signing_cert: {{ ca_file }}
{%- if ca.country is defined %}
- C: {{ ca.country }}
{%- endif %}
@@ -54,7 +72,7 @@ minion:
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid,issuer:always
- days_valid: {{ ca.days_valid.certificate }}
- copypath: /etc/pki/ca/{{ ca_name }}/certs/
- copypath: {{ ca_certs_dir }}
{%- endfor %}
{%- endfor %}
{%- endif %}

+ 2
- 2
salt/meta/sensu.yml Datei anzeigen

@@ -1,12 +1,12 @@
check:
local_salt_master_proc:
command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C salt-master -u root -c 1:50"
command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -a salt-master -u root -c 1:50"
interval: 60
occurrences: 1
subscribers:
- local-salt-master
local_salt_minion_proc:
command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C salt-minion -u root -c 1:10"
command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -a salt-minion -u root -c 1:10"
interval: 60
occurrences: 1
subscribers:

+ 51
- 10
salt/minion/ca.sls Datei anzeigen

@@ -6,20 +6,47 @@ include:

{%- for ca_name,ca in minion.ca.iteritems() %}

/etc/pki/ca/{{ ca_name }}/certs:
{%- set ca_file = ca.get('ca_file', '/etc/pki/ca/' ~ ca_name ~ '/ca.crt') %}
{%- set ca_key_file = ca.get('ca_key_file', '/etc/pki/ca/' ~ ca_name ~ '/ca.key') %}
{%- set ca_key_usage = ca.get('key_usage',"critical,cRLSign,keyCertSign") %}

{%- set ca_dir = salt['file.dirname'](ca_file) %}
{%- set ca_key_dir = salt['file.dirname'](ca_key_file) %}
{%- set ca_certs_dir = ca_dir ~ '/certs' %}

salt_minion_cert_{{ ca_name }}_dirs:
file.directory:
- makedirs: true
- names:
- {{ ca_dir }}
- {{ ca_key_dir }}
- {{ ca_certs_dir }}
- makedirs: true

/etc/pki/ca/{{ ca_name }}/ca.key:
{{ ca_key_file }}:
x509.private_key_managed:
- bits: 4096
- backup: True
- require:
- file: /etc/pki/ca/{{ ca_name }}/certs
- file: {{ ca_certs_dir }}

/etc/pki/ca/{{ ca_name }}/ca.crt:
# TODO: Squash this with the previous state after switch to Salt version >= 2016.11.2
{{ ca_name }}_key_permissions:
file.managed:
- name: {{ ca_key_file }}
- mode: {{ ca.get("mode", 0600) }}
{%- if salt['user.info'](ca.get("user", "root")) %}
- user: {{ ca.get("user", "root") }}
{%- endif %}
{%- if salt['group.info'](ca.get("group", "root")) %}
- group: {{ ca.get("group", "root") }}
{%- endif %}
- replace: false
- require:
- x509: {{ ca_key_file }}

{{ ca_file }}:
x509.certificate_managed:
- signing_private_key: /etc/pki/ca/{{ ca_name }}/ca.key
- signing_private_key: {{ ca_key_file }}
- CN: "{{ ca.common_name }}"
{%- if ca.country is defined %}
- C: {{ ca.country }}
@@ -37,23 +64,37 @@ include:
- OU: {{ ca.organization_unit }}
{%- endif %}
- basicConstraints: "critical,CA:TRUE"
- keyUsage: "critical,cRLSign,keyCertSign"
- keyUsage: {{ ca_key_usage }}
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid,issuer:always
- days_valid: {{ ca.days_valid.authority }}
- days_remaining: 0
- backup: True
- require:
- x509: /etc/pki/ca/{{ ca_name }}/ca.key
- x509: {{ ca_key_file }}

# TODO: Squash this with the previous state after switch to Salt version >= 2016.11.2
{{ ca_name }}_cert_permissions:
file.managed:
- name: {{ ca_file }}
- mode: 0644
{%- if salt['user.info'](ca.get("user", "root")) %}
- user: {{ ca.get("user", "root") }}
{%- endif %}
{%- if salt['group.info'](ca.get("group", "root")) %}
- group: {{ ca.get("group", "root") }}
{%- endif %}
- require:
- x509: {{ ca_file }}

salt_system_ca_mine_send_ca_{{ ca_name }}:
module.run:
- name: mine.send
- func: x509.get_pem_entries
- kwargs:
glob_path: /etc/pki/ca/{{ ca_name }}/ca.crt
glob_path: {{ ca_file }}
- require:
- x509: /etc/pki/ca/{{ ca_name }}/ca.crt
- x509: {{ ca_file }}

{%- endfor %}


+ 51
- 41
salt/minion/cert.sls Datei anzeigen

@@ -18,9 +18,9 @@
{%- set key_file = cert.get('key_file', '/etc/ssl/private/' + cert.common_name + '.key') %}
{%- set cert_file = cert.get('cert_file', '/etc/ssl/certs/' + cert.common_name + '.crt') %}
{%- set ca_file = cert.get('ca_file', '/etc/ssl/certs/ca-' + cert.authority + '.crt') %}
{%- set key_dir = key_file|replace(key_file.split('/')[-1], "") %}
{%- set cert_dir = cert_file|replace(cert_file.split('/')[-1], "") %}
{%- set ca_dir = ca_file|replace(ca_file.split('/')[-1], "") %}
{%- set key_dir = salt['file.dirname'](key_file) %}
{%- set cert_dir = salt['file.dirname'](cert_file) %}
{%- set ca_dir = salt['file.dirname'](ca_file) %}

{# Only ensure directories exists, don't touch permissions, etc. #}
salt_minion_cert_{{ cert_name }}_dirs:
@@ -35,9 +35,14 @@ salt_minion_cert_{{ cert_name }}_dirs:
{{ key_file }}:
x509.private_key_managed:
- bits: {{ cert.get('bits', 4096) }}
require:
- file: salt_minion_cert_{{ cert_name }}_dirs
- require:
- file: salt_minion_cert_{{ cert_name }}_dirs
{%- if cert.all_file is defined %}
- watch_in:
- cmd: salt_minion_cert_{{ cert_name }}_all
{%- endif %}

# TODO: Squash this with the previous state after switch to Salt version >= 2016.11.2
{{ key_file }}_key_permissions:
file.managed:
- name: {{ key_file }}
@@ -49,7 +54,7 @@ salt_minion_cert_{{ cert_name }}_dirs:
- group: {{ cert.get("group", "root") }}
{%- endif %}
- replace: false
- watch:
- require:
- x509: {{ key_file }}

{{ cert_file }}:
@@ -81,7 +86,12 @@ salt_minion_cert_{{ cert_name }}_dirs:
- backup: True
- watch:
- x509: {{ key_file }}
{%- if cert.all_file is defined %}
- watch_in:
- cmd: salt_minion_cert_{{ cert_name }}_all
{%- endif %}

# TODO: Squash this with the previous state after switch to Salt version >= 2016.11.2
{{ cert_file }}_cert_permissions:
file.managed:
- name: {{ cert_file }}
@@ -93,7 +103,7 @@ salt_minion_cert_{{ cert_name }}_dirs:
- group: {{ cert.get("group", "root") }}
{%- endif %}
- replace: false
- watch:
- require:
- x509: {{ cert_file }}

{%- if cert.host is defined and ca_file not in created_ca_files %}
@@ -107,35 +117,38 @@ salt_minion_cert_{{ cert_name }}_dirs:
- text: {{ ca_cert|replace('\n', '') }}
- watch:
- x509: {{ cert_file }}
{%- if cert.all_file is defined %}
- watch_in:
- cmd: salt_minion_cert_{{ cert_name }}_all
{%- endif %}


# TODO: Squash this with the previous state after switch to Salt version >= 2016.11.2
{{ ca_file }}_cert_permissions:
file.managed:
- name: {{ ca_file }}
- mode: 0644
- watch:
{%- if salt['user.info'](cert.get("user", "root")) %}
- user: {{ cert.get("user", "root") }}
{%- endif %}
{%- if salt['group.info'](cert.get("group", "root")) %}
- group: {{ cert.get("group", "root") }}
{%- endif %}
- require:
- x509: {{ ca_file }}

{{ ca_file }}_local_trusted_symlink:
file.symlink:
- name: "{{ cacerts_dir }}/ca-{{ cert.authority }}.crt"
- target: {{ ca_file }}
- watch_in:
- cmd: salt_update_certificates

{%- endif %}

{%- endfor %}

{%- do created_ca_files.append(ca_file) %}
{%- endif %}

{%- if cert.all_file is defined %}

salt_minion_cert_{{ cert_name }}_all:
cmd.wait:
- name: cat {{ key_file }} {{ cert_file }} {{ ca_file }} > {{ cert.all_file }}
- watch:
- x509: {{ key_file }}
- x509: {{ cert_file }}
- x509: {{ ca_file }}

{{ cert.all_file }}_cert_permissions:
file.managed:
@@ -148,7 +161,7 @@ salt_minion_cert_{{ cert_name }}_all:
- group: {{ cert.get("group", "root") }}
{%- endif %}
- replace: false
- watch:
- require:
- cmd: salt_minion_cert_{{ cert_name }}_all
{%- endif %}

@@ -158,13 +171,7 @@ salt_minion_cert_{{ cert_name }}_all:

salt_ca_certificates_packages:
pkg.installed:
{%- if grains.os_family == 'Debian' %}
- name: ca-certificates
{%- elif grains.os_family == 'RedHat' %}
- name: ca-certificates
{%- else %}
- name: []
{%- endif %}
- names: {{ minion.cert_pkgs }}

salt_update_certificates:
cmd.wait:
@@ -178,31 +185,34 @@ salt_update_certificates:
- require:
- pkg: salt_ca_certificates_packages

{%- if minion.get('cert', {}).get('trust_salt_ca', 'True') %}
{%- if minion.get('trust_salt_ca', True) %}

{%- for trusted_ca_minion in minion.get('trusted_ca_minions', []) %}
{%- for ca_host, certs in salt['mine.get'](trusted_ca_minion+'*', 'x509.get_pem_entries').iteritems() %}

{%- for ca_path, ca_cert in certs.iteritems() %}
{%- if not 'ca.crt' in ca_path %}{% continue %}{% endif %}
{%- if ca_path.endswith('ca.crt') %}

{%- set cacert_file="ca-"+ca_path.split("/")[4]+".crt" %}
{# authority name can be obtained only from a cacert path in case of mine.get #}
{%- set ca_authority = ca_path.split("/")[-2] %}
{%- set cacert_file="%s/ca-%s.crt" % (cacerts_dir,ca_authority) %}

salt_cert_{{ cacerts_dir }}/{{ cacert_file }}:
salt_trust_ca_{{ cacert_file }}:
x509.pem_managed:
- name: {{ cacert_file }}
- text: {{ ca_cert|replace('\n', '') }}
- watch_in:
- file: salt_trust_ca_{{ cacert_file }}_permissions
- cmd: salt_update_certificates

salt_trust_ca_{{ cacert_file }}_permissions:
file.managed:
- name: {{ cacerts_dir }}/{{ cacert_file }}
- contents: |
{{ ca_cert|replace(' ', '')|indent(8) }}
- makedirs: True
- show_changes: True
- follow_symlinks: True
- watch_in:
- cmd: salt_update_certificates
- name: {{ cacert_file }}
- mode: 0444

{%- endif %}
{%- endfor %}
{%- endfor %}
{%- endfor %}
{%- endif %}

{%- endif %}


+ 1
- 1
salt/minion/init.sls Datei anzeigen

@@ -8,6 +8,6 @@ include:
- salt.minion.ca
{%- endif %}
- salt.minion.cert
{%- if pillar.salt.minion.proxy is defined %}
{%- if pillar.salt.minion.proxy_minion is defined %}
- salt.minion.proxy
{%- endif %}

+ 6
- 6
salt/minion/proxy.sls Datei anzeigen

@@ -1,8 +1,8 @@
{%- from "salt/map.jinja" import proxy with context %}
{%- from "salt/map.jinja" import proxy_minion with context %}

{%- set napalm = false %}

{%- for proxy_name, proxy_device in proxy.get('device', {}).iteritems() %}
{%- for proxy_name, proxy_device in proxy_minion.get('device', {}).iteritems() %}

{%- if proxy_device.engine == 'napalm' %}

@@ -23,23 +23,23 @@
- template: jinja
- defaults:
napalm: {{ napalm }}
proxy: {{ proxy|yaml }}
proxy_minion: {{ proxy_minion|yaml }}

{%- if napalm %}

network_proxy_packages:
pkg.installed:
- names: {{ proxy.napalm_pkgs }}
- names: {{ proxy_minion.napalm_pkgs }}

napalm:
pip.installed:
- name: {{ proxy.napalm_pip_pkgs}}
- name: {{ proxy_minion.napalm_pip_pkgs}}
- require:
- pkg: python-pip

{%- endif %}

{%- for proxy_name, proxy_device in proxy.get('device', {}).iteritems() %}
{%- for proxy_name, proxy_device in proxy_minion.get('device', {}).iteritems() %}

salt_proxy_{{ proxy_name }}_service:
service.running:

+ 15
- 21
salt/minion/service.sls Datei anzeigen

@@ -4,7 +4,7 @@
{%- if minion.source.get('engine', 'pkg') == 'pkg' %}

salt_minion_packages:
pkg.latest:
pkg.installed:
- names: {{ minion.pkgs }}
{%- if minion.source.version is defined %}
- version: {{ minion.source.version }}
@@ -34,10 +34,6 @@ salt_minion_dependency_packages:
- template: jinja
- require:
- {{ minion.install_state }}
{%- if not grains.get('noservices', False) %}
- watch_in:
- service: salt_minion_service
{%- endif %}

{%- for service_name, service in pillar.items() %}
{%- set support_fragment_file = service_name+'/meta/salt.yml' %}
@@ -51,27 +47,20 @@ salt_minion_config_{{ service_name }}_{{ name }}:
- name: /etc/salt/minion.d/_{{ name }}.conf
- contents: |
{{ conf|yaml(False)|indent(8) }}
{%- if not grains.get('noservices', False) %}
- watch_in:
- cmd: salt_minion_service_restart
{%- endif %}
- require:
- {{ minion.install_state }}

salt_minion_config_{{ service_name }}_{{ name }}_validity_check:
cmd.wait:
cmd.run:
- name: python -c "import yaml; stream = file('/etc/salt/minion.d/_{{ name }}.conf', 'r'); yaml.load(stream); stream.close()"
- watch:
- onchanges:
- file: salt_minion_config_{{ service_name }}_{{ name }}
{%- if not grains.get('noservices', False) %}
- require_in:
- onchanges_in:
- cmd: salt_minion_service_restart
{%- endif %}
{%- endfor %}
{%- endif %}
{%- endfor %}

{%- if not grains.get('noservices', False) %}
salt_minion_service:
service.running:
- name: {{ minion.service }}
@@ -79,25 +68,30 @@ salt_minion_service:
- require:
- pkg: salt_minion_packages
- pkg: salt_minion_dependency_packages
{%- if grains.get('noservices') %}
- onlyif: /bin/false
{%- endif %}

{#- Restart salt-minion if needed but after all states are executed #}
salt_minion_service_restart:
cmd.wait:
cmd.run:
- name: 'while true; do salt-call saltutil.running|grep fun: && continue; salt-call --local service.restart {{ minion.service }}; break; done'
- shell: /bin/bash
- bg: true
- order: last
- onchanges:
- file: /etc/salt/minion.d/minion.conf
{%- if grains.get('noservices') %}
- onlyif: /bin/false
{%- endif %}
- require:
- service: salt_minion_service

{%- endif %}

salt_minion_sync_all:
module.run:
- name: 'saltutil.sync_all'
{%- if not grains.get('noservices', False) %}
- watch:
- onchanges:
- service: salt_minion_service
{%- endif %}
- require:
- pkg: salt_minion_packages
- pkg: salt_minion_dependency_packages

+ 34
- 0
salt/orchestrate/reactor/infra_install.sls Datei anzeigen

@@ -0,0 +1,34 @@

linux_state_all_nodes:
salt.state:
- tgt: 'linux:system'
- tgt_type: pillar
- sls: linux
- queue: True

salt_state_all_nodes:
salt.state:
- tgt: 'salt:minion'
- tgt_type: pillar
- sls: salt.minion
- queue: True
- require:
- salt: linux_state_all_nodes

openssh_state_all_nodes:
salt.state:
- tgt: 'openssh:server'
- tgt_type: pillar
- sls: openssh
- queue: True
- require:
- salt: salt_state_all_nodes

ntp_state_all_nodes:
salt.state:
- tgt: 'ntp:client'
- tgt_type: pillar
- sls: ntp
- queue: True
- require:
- salt: salt_state_all_nodes

+ 13
- 0
salt/orchestrate/reactor/key_create.sls Datei anzeigen

@@ -0,0 +1,13 @@
{%- set node_id = salt['pillar.get']('node_id') %}
{%- set node_host = salt['pillar.get']('node_host') %}

linux_state_all_nodes:
salt.state:
- tgt: 'salt:master'
- tgt_type: pillar
- sls: salt.reactor_sls.key_create
- queue: True
- pillar:
node_id: {{ node_id }}
node_host: {{ node_host }}


+ 11
- 0
salt/orchestrate/reactor/key_remove.sls Datei anzeigen

@@ -0,0 +1,11 @@
{%- set node_id = salt['pillar.get']('node_id') %}

linux_state_all_nodes:
salt.state:
- tgt: 'salt:master'
- tgt_type: pillar
- sls: salt.reactor_sls.key_remove
- queue: True
- pillar:
node_id: {{ node_id }}


+ 17
- 0
salt/orchestrate/reactor/master_update.sls Datei anzeigen

@@ -0,0 +1,17 @@

salt_state_config_node:
salt.state:
- tgt: 'salt:master'
- tgt_type: pillar
- sls: salt.master
- queue: True

reclass_state_config_nodes
salt.state:
- tgt: 'reclass:storage'
- tgt_type: pillar
- sls: reclass
- queue: True
- require:
- salt: salt_state_config_node


+ 24
- 0
salt/orchestrate/reactor/node_install.sls Datei anzeigen

@@ -0,0 +1,24 @@
{%- set node_name = salt['pillar.get']('event_originator') %}

linux_state:
salt.state:
- tgt: '{{ node_name }}'
- sls: linux
- queue: True

salt_state:
salt.state:
- tgt: '{{ node_name }}'
- sls: salt.minion
- queue: True
- require:
- salt: linux_state

misc_states:
salt.state:
- tgt: '{{ node_name }}'
- sls: ntp,openssh
- queue: True
- require:
- salt: salt_state


+ 6
- 0
salt/reactor/infra_install.sls Datei anzeigen

@@ -0,0 +1,6 @@

orchestrate_infra_install:
runner.state.orchestrate:
- mods: salt.orchestrate.reactor.infra_install
- queue: True


+ 28
- 0
salt/reactor/key_create.sls Datei anzeigen

@@ -0,0 +1,28 @@

{% if data.data.orch_pre_create is defined %}

orchestrate_node_key_pre_create:
runner.state.orchestrate:
- mods: {{ data.data.orch_pre_create }}
- queue: True
- pillar: {{ data.data.get('orch_pre_create_pillar', {}) }}

{% endif %}

node_key_create:
runner.state.orchestrate:
- mods: salt.orchestrate.reactor.key_create
- queue: True
- pillar:
node_id: {{ data.data['node_id'] }}
node_host: {{ data.data['node_host'] }}

{% if data.data.orch_post_create is defined %}

orchestrate_node_key_post_create:
runner.state.orchestrate:
- mods: {{ data.data.orch_post_create }}
- queue: True
- pillar: {{ data.data.get('orch_post_create_pillar', {}) }}

{% endif %}

+ 27
- 0
salt/reactor/key_remove.sls Datei anzeigen

@@ -0,0 +1,27 @@

{% if data.data.orch_pre_remove is defined %}

orchestrate_node_key_pre_remove:
runner.state.orchestrate:
- mods: {{ data.data.orch_pre_remove }}
- queue: True
- pillar: {{ data.data.get('orch_pre_remove_pillar', {}) }}

{% endif %}

node_key_remove:
runner.state.orchestrate:
- mods: salt.orchestrate.reactor.key_remove.sls
- queue: True
- pillar:
node_id: {{ data.data['node_id'] }}

{% if data.data.orch_post_remove is defined %}

orchestrate_node_key_post_remove:
runner.state.orchestrate:
- mods: {{ data.data.orch_post_remove }}
- queue: True
- pillar: {{ data.data.get('orch_post_remove_pillar', {}) }}

{% endif %}

+ 11
- 0
salt/reactor/minion_start.sls Datei anzeigen

@@ -0,0 +1,11 @@

minion_sync_all:
local.saltutil.sync_all:
- tgt: {{ data.id }}
- queue: True

minion_refresh_pillar:
local.saltutil.refresh_pillar:
- tgt: {{ data.id }}
- queue: True


+ 8
- 0
salt/reactor/node_install.sls Datei anzeigen

@@ -0,0 +1,8 @@

orchestrate_node_install:
runner.state.orchestrate:
- mods: salt.reactor.orchestrate.node_install
- queue: True
- pillar:
event_originator: {{ data.id }}


+ 6
- 0
salt/reactor/orchestrate_start.sls Datei anzeigen

@@ -0,0 +1,6 @@

orchestrate_orchestrate_run:
runner.state.orchestrate:
- mods: {{ data.data.orchestrate }}
- queue: {{ data.data.get('queue', True) }}


+ 9
- 0
salt/reactor_sls/key_create.sls Datei anzeigen

@@ -0,0 +1,9 @@
{%- set node_id = salt['pillar.get']('node_id') %}
{%- set node_host = salt['pillar.get']('node_host') %}

key_create_{{ node_id }}:
module.run:
saltkey.key_create:
- id_: {{ node_id }}
- host: {{ node_host }}


+ 6
- 0
salt/reactor_sls/key_remove.sls Datei anzeigen

@@ -0,0 +1,6 @@
{%- set node_id = salt['pillar.get']('node_id') %}

key_create_{{ node_id }}:
salt.wheel:
- name: key.delete
- match: {{ node_id }}

+ 51
- 2
salt/syndic.sls Datei anzeigen

@@ -1,4 +1,4 @@
{%- from "salt/map.jinja" import syndic with context %}
{%- from "salt/map.jinja" import master, syndic with context %}
{%- if syndic.enabled %}

include:
@@ -22,4 +22,53 @@ salt_syndic_service:
- name: {{ syndic.service }}
- enable: true

{%- endif %}
{%- if master.minion_data_cache == 'localfs' %}

{%- for master in syndic.get('masters', []) %}

salt_syndic_master_{{ master }}_fingerprint:
ssh_known_hosts.present:
- name: {{ master.host }}
- user: root

salt_syndic_master_{{ master }}_sync_cache:
rsync.synchronized:
- name: {{ master.host }}:/var/cache/salt/master/minions
- source: /var/cache/salt/master/minions/
- prepare: True
- update: True

salt_syndic_master_{{ master }}_sync_keys:
rsync.synchronized:
- name: {{ master.host }}:/etc/salt/pki/master/minions
- source: /etc/salt/pki/master/minions/
- prepare: True
- update: True

{%- else %}

salt_syndic_master_fingerprint:
ssh_known_hosts.present:
- name: {{ syndic.master.host }}
- user: root

salt_syndic_master_sync_cache:
rsync.synchronized:
- name: {{ syndic.master.host }}:/var/cache/salt/master/minions
- source: /var/cache/salt/master/minions/
- prepare: True
- update: True

salt_syndic_master_sync_keys:
rsync.synchronized:
- name: {{ syndic.master.host }}:/etc/salt/pki/master/minions
- source: /etc/salt/pki/master/minions/
- prepare: True
- update: True

{%- endfor %}

{%- endif %}

{%- endif %}


+ 4
- 0
tests/pillar/minion_backend_urllib.sls Datei anzeigen

@@ -0,0 +1,4 @@
salt:
minion:
enabled: true
backend: urllib2

+ 25
- 0
tests/pillar/minion_pki_ca.sls Datei anzeigen

@@ -44,3 +44,28 @@ salt:
ca_intermediate:
type: v3_intermediate_ca
minions: '*'
salt-ca-alt:
common_name: Alt CA Testing
country: Czech
state: Prague
locality: Cesky Krumlov
days_valid:
authority: 3650
certificate: 90
signing_policy:
cert_server:
type: v3_edge_cert_server
minions: '*'
cert_client:
type: v3_edge_cert_client
minions: '*'
ca_edge:
type: v3_edge_ca
minions: '*'
ca_intermediate:
type: v3_intermediate_ca
minions: '*'
ca_file: '/etc/test/ca.crt'
ca_key_file: '/etc/test/ca.key'
user: test
group: test

+ 20
- 0
tests/pillar/minion_pki_cert.sls Datei anzeigen

@@ -59,3 +59,23 @@ salt:
# salt.ci.local
#signing_policy:
# cert_server
test_cert:
alternative_names:
IP:127.0.0.1,DNS:salt.ci.local,DNS:test.ci.local
cert_file:
/srv/salt/pki/ci/test.ci.local.crt
common_name:
test.ci.local
key_file:
/srv/salt/pki/ci/test.ci.local.key
country: CZ
state: Prague
locality: Cesky Krumlov
signing_cert:
/etc/test/ca.crt
signing_private_key:
/etc/test/ca.key
# Kitchen-Salt CI trigger `salt-call --local`, below attributes
# can't be used as there is no required SaltMaster connectivity
authority:
salt-ca-alt

+ 1
- 1
tests/pillar/minion_proxy.sls Datei anzeigen

@@ -1,7 +1,7 @@
salt:
minion:
enabled: true
proxy:
proxy_minion:
master: localhost
device:
vsrx01.mydomain.local:

Laden…
Abbrechen
Speichern