Browse Source

Merge remote-tracking branch 'upstream/master'

pull/204/head
Irving Leonard 5 years ago
parent
commit
6d25280ad5
100 changed files with 9585 additions and 677 deletions
  1. +3
    -0
      .gitignore
  2. +0
    -36
      .kitchen.vagrant.yml
  3. +17
    -4
      .kitchen.yml
  4. +16
    -14
      .travis.yml
  5. +922
    -152
      README.rst
  6. +3
    -0
      _modules/linux_hosts.py
  7. +2
    -0
      _modules/linux_netlink.py
  8. +279
    -0
      _modules/net_checks.py
  9. +101
    -0
      _modules/ovs_config.py
  10. +81
    -0
      _states/ovs_config.py
  11. +1
    -1
      linux/files/95proxies
  12. +5
    -0
      linux/files/cron_users.jinja
  13. +360
    -0
      linux/files/grafana_dashboards/bond_prometheus.json
  14. +1134
    -0
      linux/files/grafana_dashboards/system_disk_prometheus.json
  15. +1488
    -0
      linux/files/grafana_dashboards/system_network_prometheus.json
  16. +1625
    -0
      linux/files/grafana_dashboards/system_overview_prometheus.json
  17. +62
    -0
      linux/files/login.defs.jinja
  18. +8
    -0
      linux/files/login_duo.conf
  19. +199
    -0
      linux/files/mcelog.conf
  20. +2
    -1
      linux/files/mkhomedir
  21. +48
    -6
      linux/files/modprobe.conf.jinja
  22. +44
    -0
      linux/files/openvswitch-switch.default
  23. +18
    -0
      linux/files/openvswitch-switch.systemd
  24. +14
    -0
      linux/files/ovs_bridge
  25. +67
    -0
      linux/files/pam-sshd
  26. +16
    -5
      linux/files/preferences_repo
  27. +5
    -7
      linux/files/setup-loopback-device.upstart
  28. +10
    -1
      linux/files/sysfs.conf
  29. +9
    -0
      linux/files/telegraf.conf
  30. +179
    -27
      linux/map.jinja
  31. +69
    -4
      linux/meta/fluentd.yml
  32. +17
    -2
      linux/meta/grafana.yml
  33. +229
    -87
      linux/meta/prometheus.yml
  34. +21
    -0
      linux/meta/telegraf.yml
  35. +50
    -48
      linux/network/dpdk.sls
  36. +13
    -10
      linux/network/hostname.sls
  37. +5
    -0
      linux/network/init.sls
  38. +151
    -45
      linux/network/interface.sls
  39. +37
    -0
      linux/network/openvswitch.sls
  40. +3
    -1
      linux/storage/lvm.sls
  41. +2
    -1
      linux/storage/mount.sls
  42. +1
    -1
      linux/storage/multipath.sls
  43. +18
    -0
      linux/storage/swap.sls
  44. +62
    -0
      linux/system/at.sls
  45. +127
    -31
      linux/system/auth.sls
  46. +36
    -0
      linux/system/auth/duo.sls
  47. +1
    -1
      linux/system/autoupdates.sls
  48. +10
    -0
      linux/system/banner.sls
  49. +87
    -0
      linux/system/cron.sls
  50. +20
    -6
      linux/system/file.sls
  51. +17
    -3
      linux/system/grub.sls
  52. +9
    -4
      linux/system/hugepages.sls
  53. +16
    -0
      linux/system/init.sls
  54. +25
    -24
      linux/system/job.sls
  55. +16
    -4
      linux/system/kernel.sls
  56. +13
    -0
      linux/system/login_defs.sls
  57. +32
    -0
      linux/system/mcelog.sls
  58. +7
    -15
      linux/system/motd.sls
  59. +1
    -1
      linux/system/package.sls
  60. +1
    -1
      linux/system/profile.sls
  61. +126
    -128
      linux/system/repo.sls
  62. +45
    -0
      linux/system/shell.sls
  63. +18
    -2
      linux/system/sysfs.sls
  64. +4
    -1
      linux/system/timezone.sls
  65. +39
    -2
      linux/system/user.sls
  66. +1
    -1
      metadata.yml
  67. +37
    -0
      metadata/service/system/cis/cis-1-1-1-1.yml
  68. +36
    -0
      metadata/service/system/cis/cis-1-1-1-2.yml
  69. +36
    -0
      metadata/service/system/cis/cis-1-1-1-3.yml
  70. +36
    -0
      metadata/service/system/cis/cis-1-1-1-4.yml
  71. +36
    -0
      metadata/service/system/cis/cis-1-1-1-5.yml
  72. +43
    -0
      metadata/service/system/cis/cis-1-1-1-6.yml
  73. +38
    -0
      metadata/service/system/cis/cis-1-1-1-7.yml
  74. +50
    -0
      metadata/service/system/cis/cis-1-1-1-8.yml
  75. +95
    -0
      metadata/service/system/cis/cis-1-1-14_15_16.yml
  76. +53
    -0
      metadata/service/system/cis/cis-1-1-21.yml
  77. +59
    -0
      metadata/service/system/cis/cis-1-5-1.yml
  78. +40
    -0
      metadata/service/system/cis/cis-1-5-3.yml
  79. +37
    -0
      metadata/service/system/cis/cis-1-5-4.yml
  80. +43
    -0
      metadata/service/system/cis/cis-2-3-1.yml
  81. +55
    -0
      metadata/service/system/cis/cis-2-3-2.yml
  82. +39
    -0
      metadata/service/system/cis/cis-2-3-3.yml
  83. +40
    -0
      metadata/service/system/cis/cis-2-3-4.yml
  84. +44
    -0
      metadata/service/system/cis/cis-3-1-2.yml
  85. +56
    -0
      metadata/service/system/cis/cis-3-2-1.yml
  86. +48
    -0
      metadata/service/system/cis/cis-3-2-2.yml
  87. +45
    -0
      metadata/service/system/cis/cis-3-2-3.yml
  88. +44
    -0
      metadata/service/system/cis/cis-3-2-4.yml
  89. +45
    -0
      metadata/service/system/cis/cis-3-2-5.yml
  90. +39
    -0
      metadata/service/system/cis/cis-3-2-6.yml
  91. +51
    -0
      metadata/service/system/cis/cis-3-2-7.yml
  92. +49
    -0
      metadata/service/system/cis/cis-3-2-8.yml
  93. +35
    -0
      metadata/service/system/cis/cis-3-3-3.yml
  94. +38
    -0
      metadata/service/system/cis/cis-3-5-1.yml
  95. +41
    -0
      metadata/service/system/cis/cis-3-5-2.yml
  96. +37
    -0
      metadata/service/system/cis/cis-3-5-3.yml
  97. +37
    -0
      metadata/service/system/cis/cis-3-5-4.yml
  98. +52
    -0
      metadata/service/system/cis/cis-5-4-1-1.yml
  99. +52
    -0
      metadata/service/system/cis/cis-5-4-1-2.yml
  100. +52
    -0
      metadata/service/system/cis/cis-5-4-1-3.yml

+ 3
- 0
.gitignore View File

@@ -1,5 +1,8 @@
.kitchen
.bundle
bundle/
tests/build/
*.swp
*.pyc
.ropeproject
Gemfile*

+ 0
- 36
.kitchen.vagrant.yml View File

@@ -1,36 +0,0 @@
---
driver:
name: vagrant
vm_hostname: linux.ci.local
use_sudo: false
customize:
memory: 1024


provisioner:
name: salt_solo
salt_install: bootstrap
salt_bootstrap_url: https://bootstrap.saltstack.com
salt_version: latest
require_chef: false
log_level: error
formula: linux
grains:
noservices: true
state_top:
base:
"*":
- linux
pillars:
top.sls:
base:
"*":
- linux

platforms:
- name: ubuntu-16.04
- name: ubuntu-14.04
- name: centos-7.3
- name: centos-6.8

# vim: ft=yaml sw=2 ts=2 sts=2 tw=125

+ 17
- 4
.kitchen.yml View File

@@ -1,8 +1,8 @@
---
driver:
name: docker
hostname: linux.ci.local
use_sudo: false
hostname: linux-formula
run_options: -v /dev/log:/dev/log:ro

provisioner:
name: salt_solo
@@ -29,9 +29,9 @@ verifier:
sudo: true

platforms:
- name: <%=ENV['PLATFORM'] || 'ubuntu-xenial-2017.7'%>
- name: <%=ENV['PLATFORM'] || 'saltstack-ubuntu-xenial-salt-stable' %>
driver_config:
image: <%=ENV['PLATFORM'] || 'trevorj/salty-whales:xenial-2017.7'%>
image: <%=ENV['PLATFORM'] || 'epcim/salt:saltstack-ubuntu-xenial-salt-stable'%>
platform: ubuntu


@@ -63,4 +63,17 @@ suites:
pillars-from-files:
linux.sls: tests/pillar/system.sls

- name: system_file
provisioner:
pillars-from-files:
linux.sls: tests/pillar/system_file.sls
pillars_from_directories:
- source: tests/example
dest: srv/salt/linux/files/test

- name: duo
provisioner:
pillars-from-files:
linux.sls: tests/pillar/system_duo.sls

# vim: ft=yaml sw=2 ts=2 sts=2 tw=125

+ 16
- 14
.travis.yml View File

@@ -1,3 +1,6 @@
language: python
python:
- "2.7.13"
sudo: required
services:
- docker
@@ -17,24 +20,23 @@ install:
gem 'test-kitchen'
gem 'kitchen-docker'
gem 'kitchen-inspec'
gem 'inspec'
gem 'inspec', '<3.0.0'
#Version was frozen, because of issues in the version of inspec >3.0.0 -- see https://mirantis.jira.com/browse/PROD-24324 for more info
gem 'kitchen-salt', :git => 'https://github.com/salt-formulas/kitchen-salt.git'
- bundle install

env:
- PLATFORM=trevorj/salty-whales:trusty-2017.7 SUITE=network
- PLATFORM=trevorj/salty-whales:xenial-2017.7 SUITE=network
# - PLATFORM=trevorj/salty-whales:trusty-2017.7 SUITE=storage
# - PLATFORM=trevorj/salty-whales:xenial-2017.7 SUITE=storage
- PLATFORM=trevorj/salty-whales:trusty-2017.7 SUITE=system
- PLATFORM=trevorj/salty-whales:xenial-2017.7 SUITE=system
- PLATFORM=trevorj/salty-whales:trusty SUITE=network
- PLATFORM=trevorj/salty-whales:xenial SUITE=network
# - PLATFORM=trevorj/salty-whales:trusty SUITE=storage
# - PLATFORM=trevorj/salty-whales:xenial SUITE=storage
- PLATFORM=trevorj/salty-whales:trusty SUITE=system
- PLATFORM=trevorj/salty-whales:xenial SUITE=system
## Test on both Salt version until there is new test policy accepted
- PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 SUITE=network
- PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 SUITE=system
- PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 SUITE=network
- PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 SUITE=system
- PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 SUITE=network
- PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 SUITE=system
- PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 SUITE=duo
# - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2017.7 SUITE=network
# - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2017.7 SUITE=system
# - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2018.3 SUITE=network
# - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2018.3 SUITE=system

before_script:
- set -o pipefail

+ 922
- 152
README.rst
File diff suppressed because it is too large
View File


+ 3
- 0
_modules/linux_hosts.py View File

@@ -6,6 +6,9 @@ host names/alias by FQDN first and alphabetically

from jinja2 import Undefined

def __virtual__():
return 'linux_hosts'

def fqdn_sort_fn(n1, n2):
l1 = n1.split('.')
l2 = n2.split('.')

+ 2
- 0
_modules/linux_netlink.py View File

@@ -2,6 +2,8 @@

import re

def __virtual__():
return 'linux_netlink'

def ls(regex):
"""

+ 279
- 0
_modules/net_checks.py View File

@@ -0,0 +1,279 @@
from os import listdir, path
from subprocess import Popen,PIPE
from re import findall as refindall
from re import search as research
import salt.utils
import socket, struct, fcntl
import logging

logger = logging.getLogger(__name__)
stream = logging.StreamHandler()
logger.addHandler(stream)

def get_ip(iface='ens2'):

''' Get ip address from an interface if applicable

:param iface: Interface name. Type: str

'''

sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockfd = sock.fileno()
SIOCGIFADDR = 0x8915
ifreq = struct.pack('16sH14s', iface, socket.AF_INET, '\x00'*14)

try:
res = fcntl.ioctl(sockfd, SIOCGIFADDR, ifreq)
except:
logger.debug("No ip addresses assigned to %s" % iface)
return None

ip = struct.unpack('16sH2x4s8x', res)[2]
return socket.inet_ntoa(ip)

def get_nics():

''' List nics '''

nics = []
nics_list = listdir('/sys/class/net/')
for nic_name in nics_list:
if research('(br|bond|ens|enp|eth|one|ten|fourty)[0-9]+', nic_name):

# Interface should be in "up" state in order to get carrier status
Popen("ip li set dev " + nic_name + " up", shell=True, stdout=PIPE)

with open("/sys/class/net/" + nic_name + "/carrier", 'r') as f:
try:
carrier = int(f.read())
except:
carrier = 0

bond = ""
if path.isfile("/sys/class/net/" + nic_name + "/master/uevent"):
with open("/sys/class/net/" + nic_name + "/master/uevent", 'r') as f:
for line in f:
sline = line.strip()
if 'INTERFACE=bond' in sline:
bond = sline.split('=')[1]
if len(bond) == 0:
with open("/sys/class/net/" + nic_name + "/address", 'r') as f:
macaddr = f.read().strip()
else:
with open("/proc/net/bonding/" + bond, 'r') as f:
line = f.readline()
if_struct = False
while line:
sline = line.strip()
if 'Slave Interface: ' + nic_name in sline and not if_struct:
if_struct = True
if 'Permanent HW addr: ' in sline and if_struct:
macaddr = sline.split()[3]
break
line = f.readline()

with open("/sys/class/net/" + nic_name + "/mtu", 'r') as f:
mtu = f.read()

ip = str(get_ip(nic_name))

nics.append([nic_name, ip, macaddr, carrier, mtu])

return sorted(nics)

def get_ten_pci():

''' List ten nics pci addresses '''

nics = []
nics_list = listdir('/sys/class/net/')
for nic_name in nics_list:
if research('ten[0-9]+', nic_name):
with open("/sys/class/net/" + nic_name + "/device/uevent", 'r') as f:
for line in f:
sline = line.strip()
if "PCI_SLOT_NAME=" in sline:
nics.append([nic_name , sline.split("=")[1]])

return sorted(nics)

def mesh_ping(mesh):

''' One to many ICMP check

:param hosts: Target hosts. Type: list of ip addresses

'''

io = []
minion_id = __salt__['config.get']('id')

for host, hostobj in mesh:
if host == minion_id:
for mesh_net, addr, targets in hostobj:
if addr in targets:
targets.remove(addr)
for tgt in targets:
# This one will run in parallel with everyone else
worker = Popen("ping -c 1 -w 1 -W 1 " + str(tgt), \
shell=True, stdout=PIPE, stderr=PIPE)
ping_out = worker.communicate()[0]
if worker.returncode != 0:
io.append(mesh_net + ': ' + addr + ' -> ' + tgt + ': Failed')

return io

def minion_list():

''' List registered minions '''

return listdir('/etc/salt/pki/master/minions/')

def verify_addresses():

''' Verify addresses taken from pillars '''

nodes = nodes_addresses()
verifier = {}
failed = []

for node, nodeobj in nodes:
for item in nodeobj:
addr = item[1]
if addr in verifier:
failed.append([node,verifier[addr],addr])
else:
verifier[addr] = node

if failed:
logger.error("FAILED. Duplicates found")
logger.error(failed)
return False
else:
logger.setLevel(logging.INFO)
logger.info(["PASSED"])
return True

def nodes_addresses():

''' List servers addresses '''

compound = 'linux:network:interface'
out = __salt__['saltutil.cmd']( tgt='I@' + compound,
tgt_type='compound',
fun='pillar.get',
arg=[compound],
timeout=10
) or None

servers = []
for minion in minion_list():
addresses = []
if minion in out:
ifaces = out[minion]['ret']
for iface in ifaces:
ifobj = ifaces[iface]
if ifobj['enabled'] and 'address' in ifobj:
if 'mesh' in ifobj:
mesh = ifobj['mesh']
else:
mesh = 'default'
addresses.append([mesh, ifobj['address']])
servers.append([minion,addresses])

return servers

def get_mesh():

''' Build addresses mesh '''

full_mesh = {}
nodes = nodes_addresses()

for node, nodeobj in nodes:
for item in nodeobj:
mesh = item[0]
addr = item[1]
if not mesh in full_mesh:
full_mesh[mesh] = []
full_mesh[mesh].append(addr)

for node, nodeobj in nodes:
for item in nodeobj:
mesh = item[0]
tgts = full_mesh[mesh]
item.append(tgts)

return nodes

def ping_check():

''' Ping addresses in a mesh '''

mesh = get_mesh()
out = __salt__['saltutil.cmd']( tgt='*',
tgt_type='glob',
fun='net_checks.mesh_ping',
arg=[mesh],
timeout=10
) or None

failed = []

if out:
for minion in out:
ret = out[minion]['ret']
if ret:
failed.append(ret)
else:
failed = ["No response from minions"]

if failed:
logger.error("FAILED")
logger.error('\n'.join(str(x) for x in failed))
return False
else:
logger.setLevel(logging.INFO)
logger.info(["PASSED"])
return True

def get_nics_csv(delim=","):

''' List nics in csv format

:param delim: Delimiter char. Type: str

'''

header = "server,nic_name,ip_addr,mac_addr,link,chassis_id,chassis_name,port_mac,port_descr\n"
io = ""

# Try to reuse lldp output if possible
try:
lldp_info = Popen("lldpcli -f keyvalue s n s", shell=True, stdout=PIPE).communicate()[0]
except:
lldp_info = ""

for nic in get_nics():
lldp = ""
nic_name = nic[0]
if research('(one|ten|fourty)[0-9]+', nic_name):
# Check if we can fetch lldp data for that nic
for line in lldp_info.splitlines():
chassis = 'lldp.' + nic[0] + '.chassis'
port = 'lldp.' + nic[0] + '.port'
if chassis in line or port in line:
lldp += delim + line.split('=')[1]
if not lldp:
lldp = delim + delim + delim + delim

io += __salt__['config.get']('id') + \
delim + nic_name + \
delim + str(nic[1]).strip() + \
delim + str(nic[2]).strip() + \
delim + str(nic[3]).strip() + \
delim + str(nic[4]).strip() + \
lldp + "\n"

return header + io

+ 101
- 0
_modules/ovs_config.py View File

@@ -0,0 +1,101 @@
# -*- coding: utf-8 -*-
'''
Support for Open vSwitch database configuration.

'''
from __future__ import absolute_import

import logging
import salt.utils

log = logging.getLogger(__name__)


def __virtual__():
'''
Only load the module if Open vSwitch is installed
'''
if salt.utils.which('ovs-vsctl'):
return 'ovs_config'
return False


def _retcode_to_bool(retcode):
'''
Evaulates ovs-vsctl command`s retcode value.

Args:
retcode: Value of retcode field from response.
'''
return True if retcode == 0 else False


def set(cfg, value, wait=True):
'''
Updates a specified configuration entry.

Args:
cfg/value: a config entry to update
wait: wait or not for ovs-vswitchd to reconfigure itself before it exits.

CLI Example:
.. code-block:: bash

salt '*' ovs_config.set other_config:dpdk-init true
'''
wait = '' if wait else '--no-wait '

cmd = 'ovs-vsctl {0}set Open_vSwitch . {1}="{2}"'.format(wait, cfg, str(value).lower())
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])


def remove(cfg):
'''
Removes a specified configuration entry.

Args:
cfg: a config entry to remove

CLI Example:
.. code-block:: bash

salt '*' ovs_config.remove other_config
'''
if ':' in cfg:
section, key = cfg.split(':')
cmd = 'ovs-vsctl remove Open_vSwitch . {} {}'.format(section, key)
else:
cmd = 'ovs-vsctl clear Open_vSwitch . ' + cfg

result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])


def list():
'''
Return a current config of Open vSwitch

CLI Example:

.. code-block:: bash

salt '*' ovs_config.list
'''
cmd = 'ovs-vsctl list Open_vSwitch .'
result = __salt__['cmd.run_all'](cmd)

if result['retcode'] == 0:
config = {}
for l in result['stdout'].splitlines():
cfg, value = map((lambda x: x.strip()), l.split(' : '))
if value.startswith('{') and len(value) > 2:
for i in value[1:-1].replace('"', '').split(', '):
_k, _v = i.split('=')
config['{}:{}'.format(cfg,_k)] = _v
else:
config[cfg] = value

return config
else:
return False

+ 81
- 0
_states/ovs_config.py View File

@@ -0,0 +1,81 @@
# -*- coding: utf-8 -*-
'''
Management of Open vSwitch configuration
========================================

The OVS config can be managed with the ovs_config state module:

.. code-block:: yaml

other_config:dpdk-init:
ovs_config.present:
- value: True

other_config:dpdk-extra:
ovs_config.present:
- value: -n 12 --vhost-owner libvirt-qemu:kvm --vhost-perm 0664

external_ids:
ovs_config.absent
'''


def __virtual__():
'''
Only make these states available if Open vSwitch is installed.
'''
return 'ovs_config.list' in __salt__


def present(name, value, wait=True):
'''
Ensures that the named config exists, eventually creates it.

Args:
name/value: The name/value of the config entry.
wait: Whether wait for ovs-vswitchd to reconfigure itself according to the modified database.
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
ovs_config = __salt__['ovs_config.list']()

if name in ovs_config and ovs_config[name] == str(value).lower():
ret['result'] = True
ret['comment'] = '{0} is already set to {1}.'.format(name, value)
else:
config_updated = __salt__['ovs_config.set'](name, value, wait)
if config_updated:
ret['result'] = True
ret['comment'] = '{0} is updated.'.format(name)
ret['changes'] = { name: 'Updated to {0}'.format(value) }
else:
ret['result'] = False
ret['comment'] = 'Unable to update config of {0}.'.format(name)

return ret


def absent(name):
'''
Ensures that the named config does not exist, eventually deletes it.

Args:
name: The name of the config entry.

'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
ovs_config = __salt__['ovs_config.list']()

if ':' in name and name not in ovs_config:
ret['result'] = True
ret['comment'] = '{0} does not exist.'.format(name)
else:
config_removed = __salt__['ovs_config.remove'](name)
if config_removed:
ret['result'] = True
ret['comment'] = '{0} is removed.'.format(name)
ret['changes'] = { name: '{0} removed'.format(name) }
else:
ret['result'] = False
ret['comment'] = 'Unable to delete config of {0}.'.format(name)

return ret

+ 1
- 1
linux/files/95proxies View File

@@ -1,4 +1,4 @@
{%- from "linux/map.jinja" import network with context %}
Acquire::http::proxy "http://{{ network.proxy.host }}:{{ network.proxy.port }}/";
Acquire::ftp::proxy "ftp://{{ network.proxy.host }}:{{ network.proxy.port }}/";
Acquire::https::proxy "https://{{ network.proxy.host }}:{{ network.proxy.port }}/";
Acquire::https::proxy "http://{{ network.proxy.host }}:{{ network.proxy.port }}/";

+ 5
- 0
linux/files/cron_users.jinja View File

@@ -0,0 +1,5 @@
# This file is managed by Salt, do not edit
{%- for user_name in users %}
{{ user_name }}
{%- endfor %}
{# IMPORTANT: This file SHOULD ends with a newline #}

+ 360
- 0
linux/files/grafana_dashboards/bond_prometheus.json View File

@@ -0,0 +1,360 @@
{% raw %}
{
"annotations": {
"list": []
},
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"hideControls": false,
"id": null,
"links": [],
"rows": [
{
"collapse": false,
"height": 333,
"panels": [
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "prometheus",
"decimals": 0,
"fill": 1,
"id": 1,
"legend": {
"avg": false,
"current": true,
"max": false,
"min": false,
"show": true,
"total": false,
"values": true
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"span": 6,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "bond_slave_failures{host=\"$host\"}",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{ bond}}: {{ interface }}",
"refId": "A",
"step": 2
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "Bond slave failures count",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"transparent": false,
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"decimals": 0,
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"decimals": null,
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "prometheus",
"decimals": 0,
"fill": 1,
"id": 2,
"legend": {
"alignAsTable": false,
"avg": false,
"current": true,
"max": false,
"min": false,
"show": true,
"total": false,
"values": true
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"span": 6,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "bond_status{host=\"$host\"}",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{ bond }}",
"refId": "A",
"step": 2
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "Bond status",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"decimals": 0,
"format": "none",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
}
],
"repeat": null,
"repeatIteration": null,
"repeatRowId": null,
"showTitle": false,
"title": "Dashboard Row",
"titleSize": "h6"
},
{
"collapse": false,
"height": 447,
"panels": [
{
"columns": [],
"datasource": "prometheus",
"fontSize": "100%",
"hideTimeOverride": false,
"id": 4,
"links": [],
"pageSize": null,
"scroll": true,
"showHeader": true,
"sort": {
"col": 0,
"desc": true
},
"span": 12,
"styles": [
{
"alias": "Time",
"dateFormat": "YYYY-MM-DD HH:mm:ss",
"pattern": "Time",
"type": "date"
},
{
"alias": "",
"colorMode": null,
"colors": [
"rgba(245, 54, 54, 0.9)",
"rgba(237, 129, 40, 0.89)",
"rgba(50, 172, 45, 0.97)"
],
"dateFormat": "YYYY-MM-DD HH:mm:ss",
"decimals": 2,
"pattern": "__name__",
"thresholds": [],
"type": "hidden",
"unit": "short"
},
{
"alias": "",
"colorMode": "row",
"colors": [
"rgba(245, 54, 54, 0.9)",
"rgba(26, 161, 12, 0.89)",
"rgba(50, 172, 45, 0.97)"
],
"dateFormat": "YYYY-MM-DD HH:mm:ss",
"decimals": 0,
"pattern": "Value",
"thresholds": [
"1"
],
"type": "number",
"unit": "none"
},
{
"alias": "",
"colorMode": null,
"colors": [
"rgba(245, 54, 54, 0.9)",
"rgba(237, 129, 40, 0.89)",
"rgba(50, 172, 45, 0.97)"
],
"dateFormat": "YYYY-MM-DD HH:mm:ss",
"decimals": 2,
"pattern": "job",
"thresholds": [],
"type": "hidden",
"unit": "short"
},
{
"alias": "",
"colorMode": null,
"colors": [
"rgba(245, 54, 54, 0.9)",
"rgba(237, 129, 40, 0.89)",
"rgba(50, 172, 45, 0.97)"
],
"decimals": 0,
"pattern": "/.*/",
"thresholds": [
""
],
"type": "number",
"unit": "none"
}
],
"targets": [
{
"expr": "bond_slave_status{host=\"$host\"}",
"format": "table",
"intervalFactor": 2,
"legendFormat": "",
"refId": "A",
"step": 2
}
],
"timeFrom": "1s",
"timeShift": null,
"title": "Bond slave status",
"transform": "table",
"transparent": false,
"type": "table"
}
],
"repeat": null,
"repeatIteration": null,
"repeatRowId": null,
"showTitle": false,
"title": "Dashboard Row",
"titleSize": "h6"
}
],
"schemaVersion": 14,
"style": "dark",
"tags": [],
"templating": {
"list": [
{
"allValue": null,
"current": {},
"datasource": "prometheus",
"hide": 0,
"includeAll": false,
"label": null,
"multi": false,
"name": "host",
"options": [],
"query": "label_values(bond_status,host)",
"refresh": 1,
"regex": "",
"sort": 1,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
}
]
},
"time": {
"from": "now-5m",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timezone": "",
"title": "Bond",
"version": 25
}
{% endraw %}

+ 1134
- 0
linux/files/grafana_dashboards/system_disk_prometheus.json
File diff suppressed because it is too large
View File


+ 1488
- 0
linux/files/grafana_dashboards/system_network_prometheus.json
File diff suppressed because it is too large
View File


+ 1625
- 0
linux/files/grafana_dashboards/system_overview_prometheus.json
File diff suppressed because it is too large
View File


+ 62
- 0
linux/files/login.defs.jinja View File

@@ -0,0 +1,62 @@
{%- from "linux/map.jinja" import login_defs with context -%}
# This file is managed by Salt, do not edit
{%- set allowed_options = [
'CHFN_RESTRICT',
'CONSOLE_GROUPS',
'CREATE_HOME',
'DEFAULT_HOME',
'ENCRYPT_METHOD',
'ENV_HZ',
'ENV_PATH',
'ENV_SUPATH',
'ERASECHAR',
'FAIL_DELAY',
'FAKE_SHELL',
'GID_MAX',
'GID_MIN',
'HUSHLOGIN_FILE',
'KILLCHAR',
'LOG_OK_LOGINS',
'LOG_UNKFAIL_ENAB',
'LOGIN_RETRIES',
'LOGIN_TIMEOUT',
'MAIL_DIR',
'MAIL_FILE',
'MAX_MEMBERS_PER_GROUP',
'MD5_CRYPT_ENAB',
'PASS_MAX_DAYS',
'PASS_MIN_DAYS',
'PASS_WARN_AGE',
'SHA_CRYPT_MIN_ROUNDS',
'SHA_CRYPT_MAX_ROUNDS',
'SULOG_FILE',
'SU_NAME',
'SUB_GID_MIN',
'SUB_GID_MAX',
'SUB_GID_COUNT',
'SUB_UID_MIN',
'SUB_UID_MAX',
'SUB_UID_COUNT',
'SYS_GID_MAX',
'SYS_GID_MIN',
'SYS_UID_MAX',
'SYS_UID_MIN',
'SYSLOG_SG_ENAB',
'SYSLOG_SU_ENAB',
'TTYGROUP',
'TTYPERM',
'TTYTYPE_FILE',
'UID_MAX',
'UID_MIN',
'UMASK',
'USERDEL_CMD',
'USERGROUPS_ENAB'
] %}
{%- for opt_name in allowed_options %}
{%- if opt_name in login_defs %}
{%- set opt_params = login_defs.get(opt_name) %}
{%- if opt_params.get('enabled', true) %}
{{ opt_name.ljust(20) }} {{ opt_params.value }}
{%- endif %}
{%- endif %}
{%- endfor %}

+ 8
- 0
linux/files/login_duo.conf View File

@@ -0,0 +1,8 @@
{%- from "linux/map.jinja" import auth with context %}
[duo]
ikey = {{ auth.duo.duo_ikey }}
skey = {{ auth.duo.duo_skey }}
host = {{ auth.duo.duo_host }}
pushinfo = yes
failmode = secure


+ 199
- 0
linux/files/mcelog.conf View File

@@ -0,0 +1,199 @@
{%- from "linux/map.jinja" import system with context %}
{%- set mcelog = system.mcelog %}
#
# Example config file for mcelog
# mcelog is the user space backend that decodes and process machine check events
# (cpu hardware errors) reported by the CPU to the kernel
#

# general format
#optionname = value
# white space is not allowed in value currently, except at the end where it is dropped
#

# In general all command line options that are not commands work here.
# See man mcelog or mcelog --help for a list.
# e.g. to enable the --no-syslog option use
#no-syslog = yes (or no to disable)
# when the option has a argument
#logfile = /tmp/logfile
# below are the options which are not command line options.

# Set CPU type for which mcelog decodes events:
#cpu = type
# For valid values for type please see mcelog --help.
# If this value is set incorrectly the decoded output will be likely incorrect.
# By default when this parameter is not set mcelog uses the CPU it is running on
# on very new kernels the mcelog events reported by the kernel also carry
# the CPU type which is used too when available and not overriden.

# Enable daemon mode:
#daemon = yes
# By default mcelog just processes the currently pending events and exits.
# In daemon mode it will keep running as a daemon in the background and poll
# the kernel for events and then decode them.

# Filter out known broken events by default.
filter = yes
# Don't log memory errors individually.
# They still get accounted if that is enabled.
#filter-memory-errors = yes

# output in undecoded raw format to be easier machine readable
# (default is decoded).
#raw = yes

# Set CPU Mhz to decode uptime from time stamp counter (output
# unreliable, not needed on new kernels which report the event time
# directly. A lot of systems don't have a linear time stamp clock
# and the output is wrong then.
# Normally mcelog tries to figure out if it the TSC is reliable
# and only uses the current frequency then.
# Setting a frequency forces timestamp decoding.
# This setting is obsolete with modern kernels which report the time
# directly.
#cpumhz = 1800.00

# log output options
# Log decoded machine checks in syslog (default stdout or syslog for daemon)
#syslog = yes
# Log decoded machine checks in syslog with error level
#syslog-error = yes
# Never log anything to syslog
#no-syslog = yes
# Append log output to logfile instead of stdout. Only when no syslog logging is active
#logfile = filename

{%- if mcelog.logging is defined %}

{%- if mcelog.logging.syslog is defined %}
syslog = {{ 'yes' if mcelog.logging.syslog else 'no' }}
{%- endif %}
{%- if mcelog.logging.syslog_error is defined %}
syslog-error = {{ 'yes' if mcelog.logging.syslog_error else 'no' }}
{%- endif %}
{%- if mcelog.logging.no_syslog is defined %}
no-syslog = {{ 'yes' if mcelog.logging.no_syslog else 'no' }}
{%- endif %}
{%- if mcelog.logging.logfile is defined %}
logfile = {{ mcelog.logging.logfile }}
{%- endif %}

{%- endif %}
# Use SMBIOS information to decode DIMMs (needs root).
# This function is not recommended to use right now and generally not needed.
# The exception is memdb prepopulation, which is configured separately below.
#dmi = no

# When in daemon mode run as this user after set up.
# Note that the triggers will run as this user too.
# Setting this to non root will mean that triggers cannot take some corrective
# action, like offlining objects.
#run-credentials-user = root

# group to run as daemon with
# default to the group of the run-credentials-user
#run-credentials-group = nobody

[server]
# user allowed to access client socket.
# when set to * match any
# root is always allowed to access.
# default: root only
client-user = root
# group allowed to access mcelog
# When no group is configured any group matches (but still user checking).
# when set to * match any
#client-group = root
# Path to the unix socket for client<->server communication.
# When no socket-path is configured the server will not start
#socket-path = /var/run/mcelog-client
# When mcelog starts it checks if a server is already running. This configures the timeout
# for this check.
#initial-ping-timeout = 2
#
[dimm]
# Is the in memory DIMM error tracking enabled?
# Only works on systems with integrated memory controller and
# which are supported.
# Only takes effect in daemon mode.
dimm-tracking-enabled = yes
# Use DMI information from the BIOS to prepopulate DIMM database.
# Note this might not work with all BIOS and requires mcelog to run as root.
# Alternative is to let mcelog create DIMM objects on demand.
dmi-prepopulate = yes
#
# Execute these triggers when the rate of corrected or uncorrected
# Errors per DIMM exceeds the threshold.
# Note when the hardware does not report DIMMs this might also
# be per channel.
# The default of 10/24h is reasonable for server quality
# DDR3 DIMMs as of 2009/10.
#uc-error-trigger = dimm-error-trigger
uc-error-threshold = 1 / 24h
#ce-error-trigger = dimm-error-trigger
ce-error-threshold = 10 / 24h

[socket]
# Enable memory error accounting per socket.
socket-tracking-enabled = yes

# Threshold and trigger for uncorrected memory errors on a socket.
# mem-uc-error-trigger = socket-memory-error-trigger

mem-uc-error-threshold = 100 / 24h

# Trigger script for corrected memory errors on a socket.
mem-ce-error-trigger = socket-memory-error-trigger

# Threshold on when to trigger a correct error for the socket.

mem-ce-error-threshold = 100 / 24h

# Log socket error threshold explicitely?
mem-ce-error-log = yes

# Trigger script for uncorrected bus error events
bus-uc-threshold-trigger = bus-error-trigger

# Trigger script for uncorrected IOMCA erors
iomca-threshold-trigger = iomca-error-trigger

# Trigger script for other uncategorized errors
unknown-threshold-trigger = unknown-error-trigger

[cache]
# Processing of cache error thresholds reported by Intel CPUs.
cache-threshold-trigger = cache-error-trigger

# Should cache threshold events be logged explicitely?
cache-threshold-log = yes

[page]
# Memory error accouting per 4K memory page.
# Threshold for the correct memory errors trigger script.
memory-ce-threshold = 10 / 24h

# Trigger script for corrected errors.
# memory-ce-trigger = page-error-trigger

# Should page threshold events be logged explicitely?
memory-ce-log = yes

# specify the internal action in mcelog to exceeding a page error threshold
# this is done in addition to executing the trigger script if available
# off no action
# account only account errors
# soft try to soft-offline page without killing any processes
# This requires an uptodate kernel. Might not be successfull.
# hard try to hard-offline page by killing processes
# Requires an uptodate kernel. Might not be successfull.
# soft-then-hard First try to soft offline, then try hard offlining
#memory-ce-action = off|account|soft|hard|soft-then-hard
memory-ce-action = soft

[trigger]
# Maximum number of running triggers
children-max = 2
# execute triggers in this directory
directory = /etc/mcelog

+ 2
- 1
linux/files/mkhomedir View File

@@ -1,6 +1,7 @@
{%- from "linux/map.jinja" import auth with context %}
Name: Create home directory during login
Default: yes
Priority: 0
Session-Type: Additional
Session-Final:
required pam_mkhomedir.so skel=/etc/skel umask=0022 silent
required pam_mkhomedir.so skel=/etc/skel umask={{ auth.mkhomedir.get('umask', '0022') }} silent

+ 48
- 6
linux/files/modprobe.conf.jinja View File

@@ -1,9 +1,51 @@
{% if module_content.get('blacklist', false) -%}
{%- from "linux/map.jinja" import system with context -%}
# This file is managed by Salt, do not edit.
{%- set module_content = system.kernel.module.get(module_name) %}
{%- if module_content.get('blacklist', false) %}
blacklist {{ module_name }}
{%- else -%}

{%- for option, value in module_content.get('option', {}) | dictsort -%}
options {{ module_name }} {{ option }}={{ value }}
{%- endif %}
{%- for alias, params in module_content.get('alias', {}) | dictsort %}
{%- if params.get('enabled', true) %}
alias {{ alias }} {{ module_name }}
{%- endif %}
{%- endfor %}
{%- set options = [] %}
{%- for option, params in module_content.get('option', {}) | dictsort %}
{%- if params is mapping %}
{%- if params.get('enabled', true) and params.value is defined %}
{%- do options.append(option ~ '=' ~ params.value) %}
{%- endif %}
{%- else %}
{%- do options.append(option ~ '=' ~ params) %}
{%- endif %}
{%- endfor %}

{%- if options | length > 0 %}
options {{ module_name }} {{ options | join(' ')}}
{%- endif %}
{%- if module_content.install is defined %}
{%- if module_content.install.get('enabled', true) and module_content.install.command is defined %}
install {{ module_name }} {{ module_content.install.command }}
{%- endif %}
{%- endif %}
{%- if module_content.remove is defined %}
{%- if module_content.remove.get('enabled', true) and module_content.remove.command is defined %}
remove {{ module_name }} {{ module_content.remove.command }}
{%- endif %}
{%- endif %}
{%- if module_content.softdep is defined %}
{%- set pre = [] %}
{%- set post = [] %}
{%- for pos, params in module_content.softdep.get('pre', {}) | dictsort %}
{%- if params.get('enabled', true) and params.value is defined %}
{%- do pre.append(params.value) %}
{%- endif %}
{%- endfor %}
{%- for pos, params in module_content.softdep.get('post', {}) | dictsort %}
{%- if params.get('enabled', true) and params.value is defined %}
{%- do post.append(params.value) %}
{%- endif %}
{%- endfor %}
{%- if pre | length + post | length > 0 %}
softdep {{ module_name }}{% if pre | length > 0 %} pre: {{ pre | join(' ') }}{% endif %}{% if post | length > 0 %} post: {{ post | join(' ') }}{% endif %}
{%- endif %}
{%- endif %}

+ 44
- 0
linux/files/openvswitch-switch.default View File

@@ -0,0 +1,44 @@
{%- from "linux/map.jinja" import network with context %}
{%- set openvswitch = network.openvswitch %}
# This is a POSIX shell fragment -*- sh -*-

# FORCE_COREFILES: If 'yes' then core files will be enabled.
# FORCE_COREFILES=yes

# OVS_CTL_OPTS: Extra options to pass to ovs-ctl. This is, for example,
# a suitable place to specify --ovs-vswitchd-wrapper=valgrind.
# OVS_CTL_OPTS=

# OVS_VSWITCHD_OPTS: Extra options to pass to ovs-ctl.
# Options to start Open vSwitch daemon with.
# Example: '-vconsole:dbg -vsyslog:dbg -vfile:dbg -vFACILITY:clock2'
# OVS_VSWITCHD_OPTS=
{%- if openvswitch.get('logging', {}).vswitchd is defined %}
{%- set _vswitchd_opts = [] %}
{%- for opt in ['console', 'file', 'syslog'] %}
{%- if openvswitch.logging.vswitchd.get(opt) %}
{%- do _vswitchd_opts.append("-v"+ opt + ":" + openvswitch.logging.vswitchd.get(opt)) %}
{%- endif %}
{%- endfor %}
{%- if openvswitch.logging.vswitchd.facility is defined %}
{%- do _vswitchd_opts.append("-vFACILITY:" + openvswitch.logging.vswitchd.facility) %}
{%- endif %}
OVS_VSWITCHD_OPTS="{{ ' '.join(_vswitchd_opts) }}"
{%- endif %}

# OVSDB_OPTS: Extra options to pass to ovs-ctl.
# Options to start Open vSwitch DB daemon with.
# Example: '-vconsole:dbg -vsyslog:dbg -vfile:dbg -vFACILITY:clock2'
# OVSDB_OPTS=
{%- if openvswitch.get('logging', {}).ovsdb is defined %}
{%- set _ovsdb_opts = [] %}
{%- for opt in ['console', 'file', 'syslog'] %}
{%- if openvswitch.logging.ovsdb.get(opt) %}
{%- do _ovsdb_opts.append("-v" + opt + ":" + openvswitch.logging.ovsdb.get(opt)) %}
{%- endif %}
{%- endfor %}
{%- if openvswitch.logging.ovsdb.facility is defined %}
{%- do _ovsdb_opts.append("-vFACILITY:" + openvswitch.logging.ovsdb.facility) %}
{%- endif %}
OVSDB_OPTS="{{ ' '.join(_ovsdb_opts) }}"
{%- endif %}

+ 18
- 0
linux/files/openvswitch-switch.systemd View File

@@ -0,0 +1,18 @@
[Unit]
Description=Open vSwitch
Before=network.target
After=network-pre.target ovsdb-server.service ovs-vswitchd.service
PartOf=network.target
Requires=ovsdb-server.service
Requires=ovs-vswitchd.service

[Service]
Type=oneshot
ExecStart=/usr/bin/ovs-vsctl set open . external-ids:hostname=%H
ExecReload=/bin/true
ExecStop=/bin/true
RemainAfterExit=yes

[Install]
WantedBy=multi-user.target


+ 14
- 0
linux/files/ovs_bridge View File

@@ -0,0 +1,14 @@
auto {{ bridge_name }}
iface {{ bridge_name }} inet {{ bridge.get('proto', 'static' if bridge.address is defined else 'manual') }}
ovs_type {{ bridge.get('ovs_bridge_type', 'OVSBridge') }}
mtu {{ bridge.get('mtu', '1500') }}
{%- if bridge.address is defined %}
address {{ bridge.address }}
netmask {{ bridge.netmask }}
{%- endif %}
{%- if bridge.gateway is defined %}
gateway {{ bridge.gateway }}
{%- endif %}
{%- if bridge.ovs_options is defined %}
ovs_options {{ bridge.ovs_options }}
{%- endif %}

+ 67
- 0
linux/files/pam-sshd View File

@@ -0,0 +1,67 @@
{%- from "linux/map.jinja" import auth with context %}

# PAM configuration for the Secure Shell service

{%- if auth.duo.enabled %}
auth required /lib64/security/pam_duo.so
account required pam_nologin.so

# Standard Un*x authentication.
#@include common-auth
{%- else %}
# Standard Un*x authentication.
@include common-auth
{%- endif %}

# Disallow non-root logins when /etc/nologin exists.
account required pam_nologin.so

# Uncomment and edit /etc/security/access.conf if you need to set complex
# access limits that are hard to express in sshd_config.
# account required pam_access.so

# Standard Un*x authorization.
@include common-account

# SELinux needs to be the first session rule. This ensures that any
# lingering context has been cleared. Without this it is possible that a
# module could execute code in the wrong domain.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close

# Set the loginuid process attribute.
session required pam_loginuid.so

# Create a new session keyring.
session optional pam_keyinit.so force revoke

# Standard Un*x session setup and teardown.
@include common-session

# Print the message of the day upon successful login.
# This includes a dynamically generated part from /run/motd.dynamic
# and a static (admin-editable) part from /etc/motd.
session optional pam_motd.so motd=/run/motd.dynamic
session optional pam_motd.so noupdate

# Print the status of the user's mailbox upon successful login.
session optional pam_mail.so standard noenv # [1]

# Set up user limits from /etc/security/limits.conf.
session required pam_limits.so


# Read environment variables from /etc/environment and
# /etc/security/pam_env.conf.
session required pam_env.so # [1]
# In Debian 4.0 (etch), locale-related environment variables were moved to
# /etc/default/locale, so read that as well.
session required pam_env.so user_readenv=1 envfile=/etc/default/locale

# SELinux needs to intervene at login time to ensure that the process starts
# in the proper default security context. Only sessions which are intended
# to run in the user's context should be run after this.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open

# Standard Un*x password updating.
@include common-password


+ 16
- 5
linux/files/preferences_repo View File

@@ -1,8 +1,19 @@
{%- from "linux/map.jinja" import system with context %}
{%- set repo = system.repo[repo_name] %}
{%- for pin in repo.pin %}
{%- set package = pin.get('package', '*') %}
{%- from "linux/map.jinja" import system with context -%}
{%- set repo = system.repo[repo_name] -%}
{%- if repo.pinning is defined -%}
{%- for id,pin in repo.pinning|dictsort -%}
{% if pin.get('enabled', False) %}

Package: {{ pin.get('package','*') }}
Pin: {{ pin.pin }}
Pin-Priority: {{ pin.priority }}
{%- endif %}
{%- endfor -%}
{%- elif repo.pin is defined -%}
{%- for pin in repo.pin -%}
{%- set package = pin.get('package', '*') %}
Package: {{ package }}
Pin: {{ pin.pin }}
Pin-Priority: {{ pin.priority }}
{% endfor %}
{%- endfor %}
{%- endif -%}

+ 5
- 7
linux/files/setup-loopback-device.upstart View File

@@ -1,12 +1,10 @@
description "Setup {{ device_name }} device"

start on filesystem
task

pre-start script
if /sbin/losetup {{ device_name }}; then
stop ; exit 0
fi
end script
pre-start exec losetup {{ device_name }} {{ file }}
post-stop exec losetup -d {{ device_name }}

exec losetup {{ device_name }} {{ file }}
script
while losetup {{ device_name }} ; do sleep 60 ; done
end script

+ 10
- 1
linux/files/sysfs.conf View File

@@ -1,7 +1,15 @@
# Sysfs file for {{ name }} managed by salt-minion(1)
# DO NOT EDIT THIS FILE BY HAND -- YOUR CHANGES WILL BE OVERWRITTEN

{%- for key, value in sysfs.items() %}
{%- if sysfs is mapping %}
{%- set sysfs_list = [sysfs] %}
{%- else %}
{%- set sysfs_list = sysfs %}
{%- endif %}


{%- for item in sysfs_list %}
{%- for key, value in item.items() %}
{%- if key in ["mode", "owner"] %}
{%- for attr, val in value.items() %}
mode {{ attr }} = {{ val }}
@@ -10,6 +18,7 @@ mode {{ attr }} = {{ val }}
{{ key }} = {{ value }}
{%- endif %}
{%- endfor %}
{%- endfor %}

{#-
vim: syntax=jinja

+ 9
- 0
linux/files/telegraf.conf View File

@@ -0,0 +1,9 @@
[[inputs.bond]]
{%- include 'telegraf/files/input/_common.conf' %}
{%- if values.bond_interfaces is defined %}
bond_interfaces = {{ values.bond_interfaces | json }}
{%- endif %}
{%- if values.host_proc is defined %}
host_proc = "{{ values.host_proc | json }}"
{%- endif %}
{%- include 'telegraf/files/input/_filters.conf' %}

+ 179
- 27
linux/map.jinja View File

@@ -26,6 +26,14 @@
'logpath': '/var/log/atop',
'outfile': '/var/log/atop/daily.log'
},
'at': {
'pkgs': [],
'services': []
},
'cron': {
'pkgs': [],
'services': []
},
},
'Debian': {
'pkgs': ['python-apt', 'apt-transport-https', 'libmnl0'],
@@ -54,6 +62,16 @@
'logpath': '/var/log/atop',
'outfile': '/var/log/atop/daily.log'
},
'at': {
'pkgs': ['at'],
'services': ['atd'],
'user': {}
},
'cron': {
'pkgs': ['cron'],
'services': ['cron'],
'user': {}
},
},
'RedHat': {
'pkgs': ['policycoreutils', 'policycoreutils-python', 'telnet', 'wget'],
@@ -82,25 +100,57 @@
'logpath': '/var/log/atop',
'outfile': '/var/log/atop/daily.log'
},
'at': {
'pkgs': [],
'services': []
},
'cron': {
'pkgs': [],
'services': []
},
},
}, grain='os_family', merge=salt['pillar.get']('linux:system')) %}

{% set banner = salt['grains.filter_by']({
'BaseDefaults': {
'enabled': false,
},
}, grain='os_family', merge=salt['pillar.get']('linux:system:banner'), base='BaseDefaults') %}

{% set auth = salt['grains.filter_by']({
'Arch': {
'enabled': false,
'duo': {
'enabled': false,
'duo_host': 'localhost',
'duo_ikey': '',
'duo_skey': ''
}
},
'RedHat': {
'enabled': false,
'duo': {
'enabled': false,
'duo_host': 'localhost',
'duo_ikey': '',
'duo_skey': ''
}
},
'Debian': {
'enabled': false,
'duo': {
'enabled': false,
'duo_host': 'localhost',
'duo_ikey': '',
'duo_skey': ''
}
},
}, grain='os_family', merge=salt['pillar.get']('linux:system:auth')) %}

{% set ldap = salt['grains.filter_by']({
'RedHat': {
'enabled': false,
'pkgs': ['openldap-clients', 'nss-pam-ldapd', 'authconfig'],
'pkgs': ['openldap-clients', 'nss-pam-ldapd', 'authconfig', 'nscd'],
'version': '3',
'scope': 'sub',
'uid': 'nslcd',
@@ -108,7 +158,7 @@
},
'Debian': {
'enabled': false,
'pkgs': ['libnss-ldapd', 'libpam-ldapd'],
'pkgs': ['libnss-ldapd', 'libpam-ldapd', 'nscd'],
'version': '3',
'scope': 'sub',
'uid': 'nslcd',
@@ -116,6 +166,70 @@
},
}, grain='os_family', merge=salt['pillar.get']('linux:system:auth:ldap')) %}

{%- load_yaml as login_defs_defaults %}
Debian:
CHFN_RESTRICT:
value: 'rwh'
DEFAULT_HOME:
value: 'yes'
ENCRYPT_METHOD:
value: 'SHA512'
ENV_PATH:
value: 'PATH=/usr/local/bin:/usr/bin:/bin:/usr/local/games:/usr/games'
ENV_SUPATH:
value: 'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'
ERASECHAR:
value: '0177'
FAILLOG_ENAB:
value: 'yes'
FTMP_FILE:
value: '/var/log/btmp'
GID_MAX:
value: '60000'
GID_MIN:
value: '1000'
HUSHLOGIN_FILE:
value: '.hushlogin'
KILLCHAR:
value: '025'
LOGIN_RETRIES:
value: '5'
LOGIN_TIMEOUT:
value: '60'
LOG_OK_LOGINS:
value: 'no'
LOG_UNKFAIL_ENAB:
value: 'no'
MAIL_DIR:
value: '/var/mail'
PASS_MAX_DAYS:
value: '99999'
PASS_MIN_DAYS:
value: '0'
PASS_WARN_AGE:
value: '7'
SU_NAME:
value: 'su'
SYSLOG_SG_ENAB:
value: 'yes'
SYSLOG_SU_ENAB:
value: 'yes'
TTYGROUP:
value: 'tty'
TTYPERM:
value: '0600'
UID_MAX:
value: '60000'
UID_MIN:
value: '1000'
UMASK:
value: '022'
USERGROUPS_ENAB:
value: 'yes'
{%- endload %}
{%- set login_defs = salt['grains.filter_by'](login_defs_defaults,
grain='os_family', merge=salt['pillar.get']('linux:system:login_defs')) %}

{# 'network_name', #}

{% set interface_params = [
@@ -141,6 +255,13 @@
'maxwait',
'stp',
'gro',
'rx',
'tx',
'sg',
'tso',
'ufo',
'gso',
'lro',
'lacp_rate',
'ad_select',
'downdelay',
@@ -152,12 +273,12 @@
'arp_ip_target',
'primary',
] %}
{% set debian_headers = "linux-headers-" + grains.get('kernelrelease')|string %}
{% set network = salt['grains.filter_by']({
'Arch': {
'pkgs': ['wpa_supplicant', 'dhclient', 'wireless_tools', 'ifenslave'],
'bridge_pkgs': ['bridge-utils'],
'ovs_pkgs': ['openvswitch-switch'],
'bridge_pkgs': ['bridge-utils', 'vlan'],
'ovs_pkgs': ['openvswitch-switch', 'vlan'],
'hostname_file': '/etc/hostname',
'network_manager': False,
'systemd': {},
@@ -170,13 +291,14 @@
'host': {},
'mine_dns_records': False,
'dhclient_config': '/etc/dhcp/dhclient.conf',
'ovs_nowait': False,
},
'Debian': {
'pkgs': ['ifenslave'],
'hostname_file': '/etc/hostname',
'bridge_pkgs': ['bridge-utils'],
'ovs_pkgs': ['openvswitch-switch', 'bridge-utils'],
'dpdk_pkgs': ['dpdk', 'dpdk-dev', 'dpdk-igb-uio-dkms', 'dpdk-rte-kni-dkms'],
'bridge_pkgs': ['bridge-utils', 'vlan'],
'ovs_pkgs': ['openvswitch-switch', 'bridge-utils', 'vlan'],
'dpdk_pkgs': ['dpdk', 'dpdk-dev', 'dpdk-igb-uio-dkms', 'dpdk-rte-kni-dkms', debian_headers.encode('utf8') ],
'network_manager': False,
'systemd': {},
'interface': {},
@@ -188,11 +310,12 @@
'host': {},
'mine_dns_records': False,
'dhclient_config': '/etc/dhcp/dhclient.conf',
'ovs_nowait': False,
},
'RedHat': {
'pkgs': ['iputils'],
'bridge_pkgs': ['bridge-utils'],
'ovs_pkgs': ['openvswitch-switch', 'bridge-utils'],
'bridge_pkgs': ['bridge-utils', 'vlan'],
'ovs_pkgs': ['openvswitch-switch', 'bridge-utils', 'vlan'],
'hostname_file': '/etc/sysconfig/network',
'network_manager': False,
'systemd': {},
@@ -205,6 +328,7 @@
'host': {},
'mine_dns_records': False,
'dhclient_config': '/etc/dhcp/dhclient.conf',
'ovs_nowait': False,
},
}, grain='os_family', merge=salt['pillar.get']('linux:network')) %}

@@ -258,11 +382,17 @@
'service': 'multipath'
},
},
}, grain='os_family', merge=salt['pillar.get']('linux:storage')) %}

}, merge=salt['grains.filter_by']({
'trusty': {
'lvm_services': ['udev'],
},
}, grain='oscodename', merge=salt['pillar.get']('linux:storage'))) %}

{% set monitoring = salt['grains.filter_by']({
'default': {
'bond_status': {
'interfaces': False
},
'zombie': {
'warn': 3,
'crit': 7,
@@ -288,30 +418,52 @@
'interface_regex': '^[a-z0-9]+$',
'ignore_selected': False,
},
'bond_status': {
'interfaces': False
'cpu_usage_percentage': {
'warn': 90.0,
},
'memory_usage_percentage': {
'warn': 90.0,
'major': 95.0,
},
'disk_usage_percentage': {
'warn': 85.0,
'major': 95.0,
},
'cpu_idle_percentage': {
'warn': 10.0,
'swap_usage_percentage': {
'warn': 50.0,
'minor': 90.0,
},
'free_memory_percentage': {
'warn': 10.0,
'crit': 5.0,
'inodes_usage_percentage': {
'warn': 85.0,
'major': 95.0,
},
'load_5': {
'warn': 3,
'system_load_threshold': {
'warn': 1,
'crit': 2,
},
'rx_packets_dropped_rate': {
'warn': 100,
'rx_packets_dropped_threshold': {
'warn': 100,
},
'tx_packets_dropped_rate': {
'warn': 100,
'tx_packets_dropped_threshold': {
'warn': 100,
},
'swap_in_rate': {
'warn': 1024 * 1024,
'warn': 1024 * 1024,
},
'swap_out_rate': {
'warn': 1024 * 1024,
'warn': 1024 * 1024,
},
'failed_auths_threshold': {
'warn': 5,
},
'net_rx_action_per_cpu_threshold': {
'warning': '500',
'minor': '5000'
},
'packets_dropped_per_cpu_threshold': {
'minor': '0',
'major': '100'
}
},
}, grain='os_family', merge=salt['pillar.get']('linux:monitoring')) %}


+ 69
- 4
linux/meta/fluentd.yml View File

@@ -1,8 +1,10 @@
{%- if pillar.get('fluentd', {}).get('agent', {}).get('enabled', False) %}
{%- set positiondb = pillar.fluentd.agent.dir.positiondb %}
{%- if grains.get('init') == 'systemd' %}
agent:
plugin:
fluent-plugin-systemd:
gem: ['fluent-plugin-systemd']
deb: ['td-agent-additional-plugins']
config:
label:
default_metric:
@@ -54,7 +56,7 @@ agent:
type: systemd
tag: systemd.source
path: /run/log/journal
pos_file: {{ pillar.fluentd.agent.dir.positiondb }}/systemd.source.pos
pos_file: {{ positiondb }}/systemd.source.pos
entry:
field_map:
MESSAGE: 'Payload'
@@ -75,6 +77,8 @@ agent:
record:
- name: severity_label
value: '${ {"TRACE"=>8,"DEBUG"=>7,"INFO"=>6,"NOTICE"=>5,"WARNING"=>4,"ERROR"=>3,"CRITICAL"=>2,"ALERT"=>1,"EMERGENCY"=>0}.key(record["Severity"].to_i) }'
- name: source
value: systemd
match:
rewrite_tag:
tag: systemd.source
@@ -82,9 +86,9 @@ agent:
rule:
- name: ident
regexp: '^(.*)$'
result: __TAG__.$1
result: $1.systemd
push_to_default:
tag: 'systemd.source.*'
tag: '*.systemd'
type: copy
store:
- type: relabel
@@ -109,4 +113,65 @@ agent:
tag: 'metric.**'
type: relabel
label: default_metric
{%- else %}
agent:
config:
label:
default_metric:
filter:
metric_hdd_errors_parse:
tag: metric.hdd_errors
type: parser
key_name: Payload
parser:
type: regexp
format: '/(?<device>[sv]d[a-z]+\d*)/'
metric_hdd_errors:
tag: metric.hdd_errors
require:
- metric_hdd_errors_parse
type: prometheus
metric:
- name: hdd_errors_total
type: counter
desc: The total number of hdd errors.
label:
- name: host
value: ${Hostname}
- name: device
value: ${device}
syslog:
input:
syslog_file:
type: tail
tag: linux.syslog
path: /var/log/syslog
pos_file: {{ positiondb }}/linux_syslog.pos
suppress_parse_error_log: true
parser:
type: regexp
format: >-
'/(?<Payload>.*(?<device>[sv]d[a-z]{1,2}\d{0,3}).*)/'
match:
push_to_default:
tag: 'linux.**'
type: copy
store:
- type: relabel
label: default_output
- type: rewrite_tag_filter
rule:
- name: Payload
regexp: >-
'error.*\b[sv]d[a-z]{1,2}\d{0,3}\b.*'
result: metric.hdd_errors
- name: Payload
regexp: >-
'\b[sv]d[a-z]{1,2}\d{0,3}\b.*error'
result: metric.hdd_errors
push_to_metric:
tag: 'metric.**'
type: relabel
label: default_metric
{%- endif %}
{%- endif %}

+ 17
- 2
linux/meta/grafana.yml View File

@@ -1,9 +1,24 @@
{%- from "linux/map.jinja" import monitoring with context %}
dashboard:
linux_prometheus:
linux_overview_prometheus:
datasource: prometheus
format: json
template: linux/files/grafana_dashboards/system_prometheus.json
template: linux/files/grafana_dashboards/system_overview_prometheus.json
linux_disk_prometheus:
datasource: prometheus
format: json
template: linux/files/grafana_dashboards/system_disk_prometheus.json
linux_network_prometheus:
datasource: prometheus
format: json
template: linux/files/grafana_dashboards/system_network_prometheus.json
linux_influxdb:
datasource: influxdb
format: json
template: linux/files/grafana_dashboards/system_influxdb.json
{%- if monitoring.bond_status.interfaces is defined and monitoring.bond_status.interfaces %}
linux_bond:
datasource: prometheus
format: json
template: linux/files/grafana_dashboards/bond_prometheus.json
{%- endif %}

+ 229
- 87
linux/meta/prometheus.yml View File

@@ -1,144 +1,286 @@
{%- from "linux/map.jinja" import monitoring with context %}
server:
alert:
SystemCpuIdleTooLow:
{%- set cpu_idle_threshold = monitoring.cpu_idle_percentage.warn|float %}
if: avg_over_time(cpu_usage_idle{cpu="cpu-total"}[5m]) < {{ cpu_idle_threshold }}
SystemCpuFullWarning:
{%- set cpu_usage_threshold = monitoring.cpu_usage_percentage.warn|float %}
if: >-
100 - avg_over_time(cpu_usage_idle{cpu="cpu-total"}[5m]) > {{ cpu_usage_threshold }}
{% raw %}
for: 2m
labels:
severity: warning
service: system
annotations:
summary: 'Idle CPU usage too low on {{ $labels.host }}'
description: 'The average idle CPU usage is too low on node {{ $labels.host }} (current value={{ $value }}%, threshold={% endraw %}{{ cpu_idle_threshold}}%).'
SystemDiskSpaceTooLow:
if: 'predict_linear(disk_free[1h], 8*3600) < 0'
{% raw %}
for: 15m
summary: "{%- endraw %}{{ cpu_usage_threshold }}{%- raw %}% CPU usage"
description: "The average CPU usage on the {{ $labels.host }} node is {{ $value }}% for 2 minutes."
SystemLoadTooHighWarning:
{%- endraw %}
{%- set load_threshold = monitoring.system_load_threshold.warn|float %}
if: >-
system_load5 / system_n_cpus > {{ load_threshold }}
{%- raw %}
for: 5m
labels:
severity: warning
service: system
annotations:
summary: 'Free space for {{ $labels.path }} too low on {{ $labels.host }}'
description: 'The disk partition ({{ $labels.path }}) will be full in less than 8 hours on {{ $labels.host }}.'
{% endraw %}
SystemFreeOpenFilesTooLow:
if: 'predict_linear(linux_sysctl_fs_file_nr[1h], 8*3600) > linux_sysctl_fs_file_max'
{% raw %}
summary: "System load is {%- endraw %}{{ load_threshold }}{%- raw %}"
description: "The system load per CPU on the {{ $labels.host }} node is {{ $value }} for 5 minutes."
SystemLoadTooHighCritical:
{%- endraw %}
{%- set load_threshold = monitoring.system_load_threshold.crit|float %}
if: >-
system_load5 / system_n_cpus > {{ load_threshold }}
{%- raw %}
for: 5m
labels:
severity: warning
service: system
annotations:
summary: 'Free open files for {{ $labels.path }} too low on {{ $labels.host }}'
description: 'Host {{ $labels.host }}) will run out of free open files in less than 8 hours.'
{% endraw %}
SystemDiskErrors:
if: 'increase(hdd_errors_total[5m]) > 0'
{% raw %}
summary: "System load is {%- endraw %}{{ load_threshold }}{%- raw %}"
description: "The system load per CPU on the {{ $labels.host }} node is {{ $value }} for 5 minutes."
SystemDiskFullWarning:
{%- endraw %}
{%- set disk_threshold = monitoring.disk_usage_percentage.warn|float %}
if: >-
disk_used_percent >= {{ disk_threshold }}
{%- raw %}
for: 2m
labels:
severity: critical
severity: warning
service: system
annotations:
summary: 'Disk {{ $labels.device }} is failing'
description: 'The disk ({{ $labels.device }}) is reporting errors on {{ $labels.host }}.'
{% endraw %}
SystemDiskSpaceFull:
if: 'disk_used_percent >= 99 and disk_inodes_total > 0'
{% raw %}
summary: "Disk partition {{ $labels.path }} is {%- endraw %} {{ disk_threshold }}{%- raw %}% full"
description: "The disk partition ({{ $labels.path }}) on the {{ $labels.host }} node is {{ $value }}% full for 2 minutes."
SystemDiskFullMajor:
{%- endraw %}
{%- set disk_threshold = monitoring.disk_usage_percentage.major|float %}
if: >-
disk_used_percent >= {{ disk_threshold }}
{%- raw %}
for: 2m
labels:
severity: critical
severity: major
service: system
annotations:
summary: 'Disk partition {{ $labels.path }} full on {{ $labels.host }}'
description: 'The disk partition ({{ $labels.path }}) is used at {{ $value }}% on {{ $labels.host }}.'
{% endraw %}
SystemDiskInodesTooLow:
if: 'predict_linear(disk_inodes_free[1h], 8*3600) < 0'
{% raw %}
for: 15m
summary: "Disk partition {{ $labels.path }} is {%- endraw %} {{ disk_threshold }}{%- raw %}% full"
description: "The disk partition ({{ $labels.path }}) on the {{ $labels.host }} node is {{ $value }}% full for 2 minutes."
SystemDiskInodesFullWarning:
{%- endraw %}
{%- set inodes_threshold = monitoring.inodes_usage_percentage.warn|float %}
if: >-
100 * disk_inodes_used / disk_inodes_total >= {{ inodes_threshold }}
for: 2m
labels:
severity: warning
service: system
annotations:
summary: 'Free inodes for {{ $labels.path }} too low on {{ $labels.host }}'
description: 'The disk inodes ({{ $labels.path }}) will be full in less than 8 hours on {{ $labels.host }}.'
{% endraw %}
SystemDiskInodesFull:
if: 'disk_inodes_used / disk_inodes_total >= 0.99'
{% raw %}
summary: "{{ inodes_threshold }}{%- raw %}% of inodes for {{ $labels.path }} are used"
description: "The {{ $labels.host }} node uses {{ $value }}% of disk inodes in the {{ $labels.path }} volume for 2 minutes."
SystemDiskInodesFullMajor:
{%- endraw %}
{%- set inodes_threshold = monitoring.inodes_usage_percentage.major|float %}
if: >-
100 * disk_inodes_used / disk_inodes_total >= {{ inodes_threshold }}
for: 2m
labels:
severity: critical
severity: major
service: system
annotations:
summary: 'Inodes for {{ $labels.path }} full on {{ $labels.host }}'
description: 'The disk inodes ({{ $labels.path }}) are used at {{ $value }}% on {{ $labels.host }}.'
{% endraw %}
SystemMemoryAvailableLow:
{%- set mem_avail_warn_threshold = monitoring.free_memory_percentage.warn|float %}
if: avg_over_time(mem_available_percent[5m]) < {{ mem_avail_warn_threshold }}
{% raw %}
summary: "{{ inodes_threshold }}{%- raw %}% of inodes for {{ $labels.path }} are used"
description: "The {{ $labels.host }} node uses {{ $value }}% of disk inodes in the {{ $labels.path }} volume for 2 minutes."
SystemDiskErrorsTooHigh:
if: >-
increase(hdd_errors_total[1m]) > 0
for: 5m
labels:
severity: warning
service: system
annotations:
summary: 'Free memory low on {{ $labels.host }}'
description: 'The percentage of free memory is low on node {{ $labels.host }} (current value={{ $value }}%, threshold={% endraw %}{{ mem_avail_warn_threshold }}%).'
SystemMemoryAvailableTooLow:
{%- set mem_avail_crit_threshold = monitoring.free_memory_percentage.crit|float %}
if: avg_over_time(mem_available_percent[5m]) < {{ mem_avail_crit_threshold }}
{% raw %}
summary: "Disk {{ $labels.device }} is failing"
description: "The {{ $labels.device }} disk on the {{ $labels.host }} node is reporting errors for 5 minutes."
SystemMemoryFullWarning:
{%- endraw %}
{%- set mem_threshold = monitoring.memory_usage_percentage.warn|float %}
if: >-
mem_used_percent >= {{ mem_threshold }}
for: 2m
labels:
severity: critical
severity: warning
service: system
annotations:
summary: 'Free memory too low on {{ $labels.host }}'
description: 'The percentage of free memory is too low on node {{ $labels.host }} (current value={{ $value }}%, threshold={% endraw %}{{ mem_avail_crit_threshold }}%).'
SystemLoad5TooHigh:
if: system_load5 / system_n_cpus > {{ monitoring.load_5.warn }}
{% raw %}
summary: "{{ mem_threshold }}{%- raw %}% of memory is used"
description: "The {{ $labels.host }} node uses {{ $value }}% of memory for 2 minutes."
SystemMemoryFullMajor:
{%- endraw %}
{%- set mem_threshold = monitoring.memory_usage_percentage.major|float %}
if: >-
mem_used_percent >= {{ mem_threshold }}
for: 2m
labels:
severity: major
service: system
annotations:
summary: "{{ mem_threshold }}{%- raw %}% of memory is used"
description: "The {{ $labels.host }} node uses {{ $value }}% of memory for 2 minutes."
SystemSwapFullWarning:
{%- endraw %}
{%- set swap_threshold = monitoring.swap_usage_percentage.warn|float %}
if: >-
swap_used_percent >= {{ swap_threshold }}
for: 2m
labels:
severity: warning
service: system
annotations:
summary: 'High system load (5m) on {{ $labels.host }}'
description: 'The 5-minutes system load is too high on node {{ $labels.host }} (current value={{ $value }}, threshold={% endraw %}{{ monitoring.load_5.warn }}).'
summary: "{{ swap_threshold }}{%- raw %}% of swap is used"
description: "The swap on the {{ $labels.host }} node is {{ $value }}% used for 2 minutes."
SystemSwapFullMinor:
{%- endraw %}
{%- set swap_threshold = monitoring.swap_usage_percentage.minor|float %}
if: >-
swap_used_percent >= {{ swap_threshold }}
for: 2m
labels:
severity: minor
service: system
annotations:
summary: "{{ swap_threshold }}{%- raw %}% of swap is used"
description: "The swap on the {{ $labels.host }} node is {{ $value }}% used for 2 minutes."
SystemRxPacketsDroppedTooHigh:
{%- set net_rx_dropped_threshold = monitoring.rx_packets_dropped_rate.warn %}
if: rate(net_drop_in[1m]) > {{ net_rx_dropped_threshold }}
{% raw %}
{%- endraw %}
{%- set net_rx_dropped_threshold = monitoring.rx_packets_dropped_threshold.warn %}
if: >-
increase(net_drop_in[1m]) > {{ net_rx_dropped_threshold }}
labels:
severity: critical
severity: warning
service: system
annotations:
summary: 'Too many received packets dropped on {{ $labels.host }} for interface {{ $labels.interface }}'
description: 'The rate of received packets which are dropped is too high on node {{ $labels.host }} for interface {{ $labels.interface }} (current value={{ $value }}/sec, threshold={% endraw %}{{ net_rx_dropped_threshold }}/sec)'
summary: "{{ net_rx_dropped_threshold }}{%- raw %} received packets were dropped"
description: "{{ $value }} packets received by the {{ $labels.interface }} interface on the {{ $labels.host }} node were dropped during the last minute."
SystemRxPacketsDroppedLongTermTooHigh:
if: >-
increase(net_drop_in[1m]) > 0
for: 10m
labels:
severity: major
service: system
annotations:
summary: "Received packets long term dropping"
description: "{{ $value }} packets received by the {{ $labels.interface }} interface on the {{ $labels.host }} node were dropped during the last 10 minutes."
SystemTxPacketsDroppedTooHigh:
{%- set net_tx_dropped_threshold = monitoring.tx_packets_dropped_rate.warn %}
if: rate(net_drop_out[1m]) > {{ net_tx_dropped_threshold }}
{% raw %}
{%- endraw %}
{%- set net_tx_dropped_threshold = monitoring.tx_packets_dropped_threshold.warn %}
if: >-
increase(net_drop_out[1m]) > {{ net_tx_dropped_threshold }}
labels:
severity: warning
service: system
annotations:
summary: "{{ net_tx_dropped_threshold }}{%- raw %} transmitted packets were dropped"
description: "{{ $value }} packets transmitted by the {{ $labels.interface }} interface on the {{ $labels.host }} node were dropped during the last minute."
CronProcessDown:
if: >-
procstat_running{process_name="cron"} == 0
labels:
severity: critical
service: system
annotations:
summary: 'Too many transmitted packets dropped on {{ $labels.host }} for interface {{ $labels.interface }}'
description: 'The rate of transmitted packets which are dropped is too high on node {{ $labels.host }} for interface {{ $labels.interface }} (current value={{ $value }}/sec, threshold={% endraw %}{{ net_tx_dropped_threshold }}/sec)'
SystemSwapIn:
{%- set swap_in_threshold = monitoring.swap_in_rate.warn %}
if: rate(swap_in[2m]) > {{ swap_in_threshold }}
{% raw %}
summary: "Cron process is down"
description: "The cron process on the {{ $labels.host }} node is down."
SshdProcessDown:
if: >-
procstat_running{process_name="sshd"} == 0
labels:
severity: critical
service: system
annotations:
summary: "SSH process is down"
description: "The SSH process on the {{ $labels.host }} node is down."
SshFailedLoginsTooHigh:
{%- endraw %}
{%- set threshold = monitoring.failed_auths_threshold.warn %}
if: >-
increase(failed_logins_total[5m]) > {{ threshold }}
labels:
severity: warning
service: system
annotations:
summary: 'Swap input throughput too high on {{ $labels.host }}'
description: 'The rate of swap input bytes is too high on node {{ $labels.host }} (current value={{ $value }}b/s, threshold={% endraw %}{{ swap_in_threshold }}b/s).'
SystemSwapOut:
{%- set swap_out_threshold = monitoring.swap_out_rate.warn %}
if: rate(swap_out[2m]) > {{ swap_out_threshold }}
{% raw %}
summary: "{{ threshold }}{%- raw %} failed SSH logins"
description: "{{ $value }} failed SSH login attempts on the {{ $labels.host }} node during the last 5 minutes."
PacketsDroppedByCpuMinor:
{%- endraw %}
{%- set packets_dropped_minor_threshold = monitoring.packets_dropped_per_cpu_threshold.minor %}
if: >-
floor(increase(nstat_packet_drop[24h])) > {{ packets_dropped_minor_threshold }}
labels:
severity: minor
service: system
annotations:
summary: "CPU dropped {{ packets_dropped_minor_threshold }}{%- raw %} packets"
description: "The {{ $labels.cpu }} CPU on the {{ $labels.host }} node dropped {{ $value }} packets during the last 24 hours."
PacketsDroppedByCpuMajor:
{%- endraw %}
{%- set packets_dropped_major_threshold = monitoring.packets_dropped_per_cpu_threshold.major %}
if: >-
floor(increase(nstat_packet_drop[24h])) > {{ packets_dropped_major_threshold }}
labels:
severity: major
service: system
annotations:
summary: "CPU dropped {{ packets_dropped_major_threshold }}{%- raw %} packets"
description: "The {{ $labels.cpu }} CPU on the {{ $labels.host }} node dropped {{ $value }} packets during the last 24 hours."
NetRxActionByCpuWarning:
{%- endraw %}
{%- set net_rx_action_warning_threshold = monitoring.net_rx_action_per_cpu_threshold.warning %}
if: >-
floor(increase(nstat_time_squeeze[1d])) > {{ net_rx_action_warning_threshold }}
labels:
severity: warning
service: system
annotations:
summary: 'Swap output throughput too high on {{ $labels.host }}'
description: 'The rate of swap output bytes is too high on node {{ $labels.host }} (current value={{ $value }}b/s, threshold={% endraw %}{{ swap_out_threshold }}b/s).'
summary: "CPU terminated {{ net_rx_action_warning_threshold }}{%- raw %} net_rx_action loops"
description: "The {{ $labels.cpu }} CPU on the {{ $labels.host }} node terminated {{ $value }} net_rx_action loops during the last 24 hours. Modify the net.core.netdev_budget kernel parameter."
NetRxActionByCpuMinor:
{%- endraw %}
{%- set net_rx_action_minor_threshold = monitoring.net_rx_action_per_cpu_threshold.minor %}
if: >-
floor(increase(nstat_time_squeeze[1d])) > {{ net_rx_action_minor_threshold }}
labels:
severity: minor
service: system
annotations:
summary: "CPU terminated {{ net_rx_action_minor_threshold }}{%- raw %} net_rx_action loops"
description: "The {{ $labels.cpu }} CPU on the {{ $labels.host }} node terminated {{ $value }} net_rx_action loops during the last 24 hours. Modify the net.core.netdev_budget kernel parameter."
{%- endraw %}
{%- if monitoring.bond_status.interfaces is defined and monitoring.bond_status.interfaces %}
{%- raw %}
BondInterfaceDown:
if: >-
bond_status < 1
labels:
severity: critical
service: system
annotations:
summary: "{{ $labels.bond }} bond interface is down"
description: "The {{ $labels.bond }} bond interface on the {{ $labels.host }} node has all ifaces down."
BondInterfaceSlaveDown:
if: >-
bond_slave_status < 1
labels:
severity: warning
service: system
annotations:
summary: "{{ $labels.bond }} bond interface slave {{ $labels.interface }} is down"
description: "The {{ $labels.bond }} bond interface slave {{ $labels.interface }} on the {{ $labels.host }} node is down."
BondInterfaceSlaveDownMajor:
if: >-
sum(bond_slave_status) by (bond,host) <= on (bond,host) 0.5 * count(bond_slave_status)
labels:
severity: major
service: system
annotations:
summary: "50% of bond interface slaves {{ $labels.bond }} are down"
description: "{{ $value }} {{ $labels.bond }} bond interface slaves on the {{ $labels.host }} node are down."
{% endraw %}
{%- endif %}

+ 21
- 0
linux/meta/telegraf.yml View File

@@ -1,3 +1,4 @@
{%- from "linux/map.jinja" import monitoring with context %}
agent:
input:
cpu:
@@ -19,7 +20,27 @@ agent:
kernel:
net:
mem:
nstat:
fieldpass:
- packet_drop
- time_squeeze
processes:
swap:
system:
procstat:
process:
sshd:
exe: sshd
cron:
exe: cron
linux_sysctl_fs:
{%- if monitoring.bond_status.interfaces is defined and monitoring.bond_status.interfaces %}
bond:
template: linux/files/telegraf.conf
{%- if monitoring.bond_status.interfaces is list %}
bond_interfaces: {{ monitoring.bond_status.interfaces }}
{%- endif %}
{%- if monitoring.bond_status.host_proc is defined %}
host_proc: {{ monitoring.bond_status.host_proc }}
{%- endif %}
{%- endif %}

+ 50
- 48
linux/network/dpdk.sls View File

@@ -4,7 +4,7 @@

linux_dpdk_pkgs:
pkg.installed:
- pkgs: {{ network.dpdk_pkgs }}
- pkgs: {{ network.dpdk_pkgs | json }}

linux_dpdk_kernel_module:
kmod.present:
@@ -64,7 +64,7 @@ linux_network_dpdk_ovs_service:

linux_network_dpdk_ovs_option_{{ option }}:
cmd.run:
- name: 'ovs-vsctl set Open_vSwitch . other_config:{{ option }}'
- name: 'ovs-vsctl{%- if network.ovs_nowait %} --no-wait{%- endif %} set Open_vSwitch . other_config:{{ option }}'
- watch_in:
- service: service_openvswitch
- require:
@@ -102,16 +102,36 @@ service_openvswitch:
{%- do bond_interfaces.update({iface_name: iface}) %}
{%- endfor %}


linux_network_dpdk_bond_interface_{{ interface_name }}:
cmd.run:
- name: "ovs-vsctl add-bond {{ interface.bridge }} {{ interface_name }} {{ bond_interfaces.keys()|join(' ') }} {% for iface_name, iface in bond_interfaces.items() %}-- set Interface {{ iface_name }} type=dpdk options:dpdk-devargs={{ iface.pci }} {% endfor %}"
- unless: "ovs-vsctl show | grep {{ interface_name }}"
- name: "ovs-vsctl{%- if network.ovs_nowait %} --no-wait{%- endif %} add-bond {{ interface.bridge }} {{ interface_name }} {{ bond_interfaces.keys()|join(' ') }}"
- unless: "ovs-vsctl list-ports {{ interface.bridge }} | grep -w {{ interface_name }}"
- require:
- cmd: linux_network_dpdk_bridge_interface_{{ interface.bridge }}
- cmd: linux_network_dpdk_bridge_interface_{{ interface.bridge }}

{% for iface_name, iface in bond_interfaces.items() %}
linux_network_dpdk_bond_interface_{{ iface_name }}_activate:
cmd.run:
- name: "timeout 5 /bin/sh -c -- 'while true; do ovs-vsctl get Interface {{ iface_name }} name 1>/dev/null 2>&1 && break || sleep 1; done'"
- unless: "ovs-vsctl get Interface {{ iface_name }} name 1>/dev/null 2>&1"
linux_network_dpdk_bond_interface_{{ iface_name }}_type:
cmd.run:
- name: "ovs-vsctl{%- if network.ovs_nowait %} --no-wait{%- endif %} set Interface {{ iface_name }} type=dpdk"
- unless: "ovs-vsctl get interface {{ iface_name }} type | grep -w dpdk"
- require:
- cmd: linux_network_dpdk_bond_interface_{{ iface_name }}_activate
linux_network_dpdk_bond_interface_{{ iface_name }}_options:
cmd.run:
- name: "ovs-vsctl{%- if network.ovs_nowait %} --no-wait{%- endif %} set Interface {{ iface_name }} options:dpdk-devargs={{ iface.pci }}"
- unless: "ovs-vsctl get interface {{ iface_name }} options:dpdk-devargs | grep -w {{ iface.pci }}"
- require:
- cmd: linux_network_dpdk_bond_interface_{{ iface_name }}_activate
{% endfor %}

linux_network_dpdk_bond_mode_{{ interface_name }}:
cmd.run:
- name: "ovs-vsctl set port {{ interface_name }} bond_mode={{ interface.mode }}"
- name: "ovs-vsctl{%- if network.ovs_nowait %} --no-wait{%- endif %} set port {{ interface_name }} bond_mode={{ interface.mode }}{%- if interface.mode == 'balance-slb' %} lacp=active{%- endif %}"
- unless: "ovs-appctl bond/show {{ interface_name }} | grep {{ interface.mode }}"
- require:
- cmd: linux_network_dpdk_bond_interface_{{ interface_name }}
@@ -120,45 +140,15 @@ linux_network_dpdk_bond_mode_{{ interface_name }}:

linux_network_dpdk_bridge_interface_{{ interface_name }}:
cmd.run:
- name: "ovs-vsctl add-br {{ interface_name }} -- set bridge {{ interface_name }} datapath_type=netdev{% if interface.tag is defined %} -- set port {{ interface_name }} tag={{ interface.tag }}{% endif %}"
- name: "ovs-vsctl{%- if network.ovs_nowait %} --no-wait{%- endif %} add-br {{ interface_name }} -- set bridge {{ interface_name }} datapath_type=netdev{% if interface.tag is defined %} -- set port {{ interface_name }} tag={{ interface.tag }}{% endif %}"
- unless: "ovs-vsctl show | grep {{ interface_name }}"

{# OVS dpdk needs ip address for vxlan termination on bridge br-prv #}
{%- if interface.address is defined %}

{# create override for openvswitch dependency for dpdk br-prv #}
/etc/systemd/system/ifup@{{ interface_name }}.service.d/override.conf:
file.managed:
- makedirs: true
- require:
- cmd: linux_network_dpdk_bridge_interface_{{ interface_name }}
- contents: |
[Unit]
Requires=openvswitch-switch.service
After=openvswitch-switch.service

{# enforce ip address and mtu for ovs dpdk br-prv #}
/etc/network/interfaces.d/ifcfg-{{ interface_name }}:
file.managed:
- contents: |
auto {{ interface_name }}
iface {{ interface_name }} inet static
address {{ interface.address }}
netmask {{ interface.netmask }}
{%- if interface.mtu is defined %}
mtu {{ interface.mtu }}
{%- endif %}
- require:
- file: /etc/systemd/system/ifup@{{ interface_name }}.service.d/override.conf

{%- endif %}

{%- elif interface.type == 'dpdk_ovs_port' and interface.bridge is defined %}

linux_network_dpdk_bridge_port_interface_{{ interface_name }}:
cmd.run:
- name: "ovs-vsctl add-port {{ interface.bridge }} dpdk0 -- set Interface dpdk0 type=dpdk options:dpdk-devargs={{ interface.pci }}"
- unless: "ovs-vsctl show | grep dpdk0"
- name: "ovs-vsctl{%- if network.ovs_nowait %} --no-wait{%- endif %} add-port {{ interface.bridge }} {{ interface_name }} -- set Interface {{ interface_name }} type=dpdk options:dpdk-devargs={{ interface.pci }}"
- unless: "ovs-vsctl list-ports {{ interface.bridge }} | grep -w {{ interface_name }}"
- require:
- cmd: linux_network_dpdk_bridge_interface_{{ interface.bridge }}

@@ -167,35 +157,47 @@ linux_network_dpdk_bridge_port_interface_{{ interface_name }}:
{# Multiqueue n_rxq, pmd_rxq_affinity and mtu setup on interfaces #}
{%- if interface.type == 'dpdk_ovs_port' %}

{%- if interface.n_rxq is defined %}
{%- if interface.n_rxq is defined %}

linux_network_dpdk_bridge_port_interface_n_rxq_{{ interface_name }}:
cmd.run:
- name: "ovs-vsctl set Interface {{ interface_name }} options:n_rxq={{ interface.n_rxq }} "
- name: "ovs-vsctl{%- if network.ovs_nowait %} --no-wait{%- endif %} set Interface {{ interface_name }} options:n_rxq={{ interface.n_rxq }} "
- unless: |
ovs-vsctl get Interface {{ interface_name }} options | grep 'n_rxq="{{ interface.n_rxq }}"'
{%- if interface.get("bond", "") != "" %}
- require:
- cmd: linux_network_dpdk_bond_interface_{{ interface.get("bond", "") }}
{%- endif %}

{%- endif %}
{%- endif %}

{%- if interface.pmd_rxq_affinity is defined %}
{%- if interface.pmd_rxq_affinity is defined %}

linux_network_dpdk_bridge_port_interface_pmd_rxq_affinity_{{ interface_name }}:
cmd.run:
- name: "ovs-vsctl set Interface {{ interface_name }} other_config:pmd-rxq-affinity={{ interface.pmd_rxq_affinity }} "
- name: "ovs-vsctl{%- if network.ovs_nowait %} --no-wait{%- endif %} set Interface {{ interface_name }} other_config:pmd-rxq-affinity={{ interface.pmd_rxq_affinity }} "
- unless: |
ovs-vsctl get Interface {{ interface_name }} other_config | grep 'pmd-rxq-affinity="{{ interface.pmd_rxq_affinity }}"'
{%- if interface.get("bond", "") != "" %}
- require:
- cmd: linux_network_dpdk_bond_interface_{{ interface.get("bond", "") }}
{%- endif %}

{%- endif %}
{%- endif %}

{%- if interface.mtu is defined %}
{%- if interface.mtu is defined %}

{# MTU ovs dpdk setup on interfaces #}
linux_network_dpdk_bridge_port_interface_mtu_{{ interface_name }}:
cmd.run:
- name: "ovs-vsctl set Interface {{ interface_name }} mtu_request={{ interface.mtu }} "
- name: "ovs-vsctl{%- if network.ovs_nowait %} --no-wait{%- endif %} set Interface {{ interface_name }} mtu_request={{ interface.mtu }} "
- unless: "ovs-vsctl get Interface {{ interface_name }} mtu_request | grep {{ interface.mtu }}"
{%- if interface.get("bond", "") != "" %}
- require:
- cmd: linux_network_dpdk_bond_interface_{{ interface.get("bond", "") }}
{%- endif %}

{%- endif %}
{%- endif %}

{%- endif %}


+ 13
- 10
linux/network/hostname.sls View File

@@ -16,18 +16,21 @@ linux_hostname_file:

{%- endif %}

{# Change state to proper one, after releasing patch:
https://github.com/saltstack/salt/pull/45748/files/74599bbdfcf99f45d3a31296887097fade31cbf1
linux_enforce_hostname:
cmd.wait:
network.system:
- enabled: True
- hostname: {{ network.hostname }}
- apply_hostname: True
- retain_settings: True
#}
linux_enforce_hostname:
cmd.run:
- name: hostname {{ network.hostname }}
- unless: test "$(hostname)" = "{{ network.hostname }}"

{#
linux_hostname_hosts:
host.present:
- ip: {{ grains.ip4_interfaces[network.get('default_interface', 'eth0')][0] }}
- names:
- {{ network.fqdn }}
- {{ network.hostname }}
#}
{%- if grains.get('noservices') %}
- onlyif: /bin/false
{%- endif %}

{%- endif %}

+ 5
- 0
linux/network/init.sls View File

@@ -1,6 +1,8 @@
{%- from "linux/map.jinja" import network with context %}
include:
{%- if network.hostname is defined %}
- linux.network.hostname
{%- endif %}
{%- if network.host|length > 0 or network.get('purge_hosts', True) %}
- linux.network.host
{%- endif %}
@@ -16,6 +18,9 @@ include:
{%- if network.systemd|length > 0 %}
- linux.network.systemd
{%- endif %}
{%- if network.openvswitch is defined %}
- linux.network.openvswitch
{%- endif %}
{%- if network.interface|length > 0 %}
- linux.network.interface
{%- endif %}

+ 151
- 45
linux/network/interface.sls View File

@@ -2,6 +2,12 @@
{%- from "linux/map.jinja" import system with context %}
{%- if network.enabled %}

{%- set dpdk_enabled = network.get('dpdk', {}).get('enabled', False) %}
{%- if dpdk_enabled %}
include:
- linux.network.dpdk
{%- endif %}

{%- macro set_param(param_name, param_dict) -%}
{%- if param_dict.get(param_name, False) -%}
- {{ param_name }}: {{ param_dict[param_name] }}
@@ -13,9 +19,9 @@
linux_network_bridge_pkgs:
pkg.installed:
{%- if network.bridge == 'openvswitch' %}
- pkgs: {{ network.ovs_pkgs }}
- pkgs: {{ network.ovs_pkgs | json }}
{%- else %}
- pkgs: {{ network.bridge_pkgs }}
- pkgs: {{ network.bridge_pkgs | json }}
{%- endif %}

{%- endif %}
@@ -57,6 +63,61 @@ remove_cloud_init_file:

{%- set interface_name = interface.get('name', interface_name) %}

{# add linux network interface into OVS dpdk bridge #}

{%- if interface.type == 'dpdk_ovs_bridge' %}

{%- for int_name, int in network.interface.items() %}

{%- set int_name = int.get('name', int_name) %}

{%- if int.ovs_bridge is defined and interface_name == int.ovs_bridge %}

add_int_{{ int_name }}_to_ovs_dpdk_bridge_{{ interface_name }}:
cmd.run:
- unless: ovs-vsctl show | grep -w {{ int_name }}
- name: ovs-vsctl{%- if network.ovs_nowait %} --no-wait{%- endif %} add-port {{ interface_name }} {{ int_name }}
{%- endif %}
{%- endfor %}

linux_interfaces_include_{{ interface_name }}:
file.prepend:
- name: /etc/network/interfaces
- text: |
source /etc/network/interfaces.d/*
# Workaround for Upstream-Bug: https://github.com/saltstack/salt/issues/40262
source /etc/network/interfaces.u/*

{# create override for openvswitch dependency for dpdk br-prv #}
/etc/systemd/system/ifup@{{ interface_name }}.service.d/override.conf:
file.managed:
- makedirs: true
- require:
- cmd: linux_network_dpdk_bridge_interface_{{ interface_name }}
- contents: |
[Unit]
Requires=openvswitch-switch.service
After=openvswitch-switch.service

dpdk_ovs_bridge_{{ interface_name }}:
file.managed:
- name: /etc/network/interfaces.u/ifcfg-{{ interface_name }}
- makedirs: True
- source: salt://linux/files/ovs_bridge
- defaults:
bridge: {{ interface|yaml }}
bridge_name: {{ interface_name }}
- template: jinja

dpdk_ovs_bridge_up_{{ interface_name }}:
cmd.run:
- name: ifup {{ interface_name }}
- require:
- file: dpdk_ovs_bridge_{{ interface_name }}
- file: linux_interfaces_final_include

{%- endif %}

{# it is not used for any interface with type preffix dpdk,eg. dpdk_ovs_port #}
{%- if interface.get('managed', True) and not 'dpdk' in interface.type %}

@@ -64,7 +125,7 @@ remove_cloud_init_file:

{%- if interface.type == 'ovs_bridge' %}

ovs_bridge_{{ interface_name }}:
ovs_bridge_{{ interface_name }}_present:
openvswitch_bridge.present:
- name: {{ interface_name }}

@@ -78,33 +139,75 @@ ovs_bridge_{{ interface_name }}:
add_int_{{ int_name }}_to_ovs_bridge_{{ interface_name }}:
cmd.run:
- unless: ovs-vsctl show | grep {{ int_name }}
- name: ovs-vsctl add-port {{ interface_name }} {{ int_name }}

- name: ovs-vsctl{%- if network.ovs_nowait %} --no-wait{%- endif %} add-port {{ interface_name }} {{ int_name }}
{%- endif %}

{%- endfor %}

linux_interfaces_include_{{ interface_name }}:
file.prepend:
- name: /etc/network/interfaces
- text: |
source /etc/network/interfaces.d/*
# Workaround for Upstream-Bug: https://github.com/saltstack/salt/issues/40262
source /etc/network/interfaces.u/*

ovs_bridge_{{ interface_name }}:
file.managed:
- name: /etc/network/interfaces.u/ifcfg-{{ interface_name }}
- makedirs: True
- source: salt://linux/files/ovs_bridge
- defaults:
bridge: {{ interface|yaml }}
bridge_name: {{ interface_name }}
- template: jinja

ovs_bridge_up_{{ interface_name }}:
cmd.run:
- name: ifup {{ interface_name }}
- require:
- file: ovs_bridge_{{ interface_name }}
- file: linux_interfaces_final_include

{%- elif interface.type == 'ovs_bond' %}
ovs_bond_{{ interface_name }}:
cmd.run:
- name: ovs-vsctl add-bond {{ interface.bridge }} {{ interface_name }} {{ interface.slaves }} bond_mode={{ interface.mode }}
- unless: ovs-vsctl show | grep -A 2 'Port.*{{ interface_name }}.'
- require:
- ovs_bridge_{{ interface.bridge }}_present

{%- elif interface.type == 'ovs_port' %}

{%- if interface.get('port_type','internal') == 'patch' %}

ovs_port_{{ interface_name }}:
ovs_port_{{ interface_name }}_present:
openvswitch_port.present:
- name: {{ interface_name }}
- bridge: {{ interface.bridge }}
- require:
- openvswitch_bridge: ovs_bridge_{{ interface.bridge }}
{%- if dpdk_enabled and network.interface.get(interface.bridge, {}).get('type', 'ovs_bridge') == 'dpdk_ovs_bridge' %}
- cmd: linux_network_dpdk_bridge_interface_{{ interface.bridge }}
{%- else %}
- openvswitch_bridge: ovs_bridge_{{ interface.bridge }}_present
{%- endif %}

ovs_port_set_type_{{ interface_name }}:
cmd.run:
- name: ovs-vsctl set interface {{ interface_name }} type=patch
- name: ovs-vsctl{%- if network.ovs_nowait %} --no-wait{%- endif %} set interface {{ interface_name }} type=patch
- unless: ovs-vsctl show | grep -A 1 'Interface {{ interface_name }}' | grep patch

ovs_port_set_peer_{{ interface_name }}:
cmd.run:
- name: ovs-vsctl set interface {{ interface_name }} options:peer={{ interface.peer }}
- name: ovs-vsctl{%- if network.ovs_nowait %} --no-wait{%- endif %} set interface {{ interface_name }} options:peer={{ interface.peer }}
- unless: ovs-vsctl show | grep -A 2 'Interface {{ interface_name }}' | grep {{ interface.peer }}

{% if interface.tag is defined %}
ovs_port_set_tag_{{ interface_name }}:
cmd.run:
- name: ovs-vsctl{%- if network.ovs_nowait %} --no-wait{%- endif %} set port {{ interface_name }} tag={{ interface.tag }}
- unless: ovs-vsctl get Port {{ interface_name }} tag | grep -Fx {{ interface.tag }}
{%- endif %}

{%- else %}

linux_interfaces_include_{{ interface_name }}:
@@ -123,28 +226,16 @@ ovs_port_{{ interface_name }}:
- defaults:
port: {{ interface|yaml }}
port_name: {{ interface_name }}
auto: ""
iface_inet: ""
- template: jinja

ovs_port_{{ interface_name }}_line1:
file.replace:
- name: /etc/network/interfaces
- pattern: auto {{ interface_name }}$
- repl: ""

ovs_port_{{ interface_name }}_line2:
file.replace:
- name: /etc/network/interfaces
- pattern: 'iface {{ interface_name }} inet .*'
- repl: ""

ovs_port_up_{{ interface_name }}:
cmd.run:
- name: ifup {{ interface_name }}
- require:
- file: ovs_port_{{ interface_name }}
- file: ovs_port_{{ interface_name }}_line1
- file: ovs_port_{{ interface_name }}_line2
- openvswitch_bridge: ovs_bridge_{{ interface.bridge }}
- openvswitch_bridge: ovs_bridge_{{ interface.bridge }}_present
- file: linux_interfaces_final_include

{%- endif %}
@@ -187,6 +278,9 @@ linux_interface_{{ interface_name }}:
- wireless-psk: {{ interface.wireless.key }}
{%- endif %}
{%- endif %}
{%- if pillar.linux.network.noifupdown is defined %}
- noifupdown: {{ pillar.linux.network.noifupdown }}
{%- endif %}
{%- for param in network.interface_params %}
{{ set_param(param, interface) }}
{%- endfor %}
@@ -221,25 +315,6 @@ linux_interface_{{ interface_name }}:
- mode: {{ interface.mode }}
{%- endif %}

{%- if interface.get('ipflush_onchange', False) %}

linux_interface_ipflush_onchange_{{ interface_name }}:
cmd.run:
- name: "/sbin/ip address flush dev {{ interface_name }}"
- onchanges:
- network: linux_interface_{{ interface_name }}

{%- if interface.get('restart_on_ipflush', False) %}

linux_interface_restart_on_ipflush_{{ interface_name }}:
cmd.run:
- name: "ifdown {{ interface_name }}; ifup {{ interface_name }};"
- onchanges:
- cmd: linux_interface_ipflush_onchange_{{ interface_name }}

{%- endif %}

{%- endif %}

{%- if salt['grains.get']('saltversion') < '2017.7' %}
# TODO(ddmitriev): Remove this 'if .. endif' block completely when
@@ -300,7 +375,7 @@ linux_system_network:

linux_network_packages:
pkg.installed:
- pkgs: {{ network.pkgs }}
- pkgs: {{ network.pkgs | json }}

/etc/netctl/network_{{ interface.wireless.essid }}:
file.managed:
@@ -348,6 +423,37 @@ linux_network_{{ interface_name }}_routes:
gateway: {{ route.gateway }}
{%- endif %}
{%- endfor %}
{%- if interface.noifupdown is defined %}
- require_reboot: {{ interface.noifupdown }}
{%- endif %}

{%- endif %}

{%- if interface.type in ('eth','ovs_port') %}
{%- if interface.get('ipflush_onchange', False) %}

linux_interface_ipflush_onchange_{{ interface_name }}:
cmd.run:
- name: "/sbin/ip address flush dev {{ interface_name }}"
{%- if interface.type == 'eth' %}
- onchanges:
- network: linux_interface_{{ interface_name }}
{%- elif interface.type == 'ovs_port' %}
- onchanges:
- file: ovs_port_{{ interface_name }}
{%- endif %}

{%- if interface.get('restart_on_ipflush', False) %}

linux_interface_restart_on_ipflush_{{ interface_name }}:
cmd.run:
- name: "ifdown {{ interface_name }}; ifup {{ interface_name }};"
- onchanges:
- cmd: linux_interface_ipflush_onchange_{{ interface_name }}

{%- endif %}

{%- endif %}

{%- endif %}


+ 37
- 0
linux/network/openvswitch.sls View File

@@ -0,0 +1,37 @@
{%- from "linux/map.jinja" import network with context %}

{%- if network.get('openvswitch', {}).get('enabled', False) %}

openvswitch_pkgs:
pkg.installed:
- pkgs: {{ network.ovs_pkgs | json }}

/etc/default/openvswitch-switch:
file.managed:
- source: salt://linux/files/openvswitch-switch.default
- template: jinja
- require:
- pkg: openvswitch_pkgs

/etc/systemd/system/openvswitch-switch.service:
file.managed:
- source: salt://linux/files/openvswitch-switch.systemd
- template: jinja
- require:
- pkg: openvswitch_pkgs
module.run:
- name: service.systemctl_reload
- onchanges:
- file: /etc/systemd/system/openvswitch-switch.service

openvswitch_switch_service:
service.running:
- name: openvswitch-switch
- enable: true
{%- if grains.get('noservices') %}
- onlyif: /bin/false
{%- endif %}
- watch:
- file: /etc/default/openvswitch-switch

{%- endif %}

+ 3
- 1
linux/storage/lvm.sls View File

@@ -3,7 +3,7 @@

linux_lvm_pkgs:
pkg.installed:
- pkgs: {{ storage.lvm_pkgs }}
- pkgs: {{ storage.lvm_pkgs | json }}


/etc/lvm/lvm.conf:
@@ -47,9 +47,11 @@ lvm_vg_{{ vg.get('name', vgname) }}:

lvm_{{ vg.get('name', vgname) }}_lv_{{ volume.get('name', lvname) }}:
lvm.lv_present:
- order: 1
- name: {{ volume.get('name', lvname) }}
- vgname: {{ vg.get('name', vgname) }}
- size: {{ volume.size }}
- force: true
- require:
- lvm: lvm_vg_{{ vg.get('name', vgname) }}
{%- if volume.mount is defined %}

+ 2
- 1
linux/storage/mount.sls View File

@@ -27,11 +27,12 @@ xfs_packages_{{ mount.device }}:
{%- if mount.file_system == 'nfs' %}
linux_storage_nfs_packages:
pkg.installed:
- pkgs: {{ storage.nfs.pkgs }}
- pkgs: {{ storage.nfs.pkgs | json }}
{%- endif %}

{{ mount.path }}:
mount.mounted:
- order: 1
- device: {{ mount.device }}
- fstype: {{ mount.file_system }}
- mkmnt: True

+ 1
- 1
linux/storage/multipath.sls View File

@@ -3,7 +3,7 @@

linux_storage_multipath_packages:
pkg.installed:
- pkgs: {{ storage.multipath.pkgs }}
- pkgs: {{ storage.multipath.pkgs | json }}

linux_storage_multipath_config:
file.managed:

+ 18
- 0
linux/storage/swap.sls View File

@@ -53,6 +53,24 @@ linux_set_swap_file_status_{{ swap.device }}:

{%- endif %}

{%- else %}

{{ swap.device }}:
module.run:
- name: mount.rm_fstab
- m_name: none
- device: {{ swap.device }}
- onlyif: grep -q {{ swap.device }} /etc/fstab

linux_disable_swap_{{ swap.engine }}_{{ swap.device }}:
cmd.run:
{%- if swap.engine == 'partition' %}
- name: 'swapoff {{ swap.device }}'
{%- elif swap.engine == 'file' %}
- name: 'swapoff {{ swap.device }} && rm -f {{ swap.device }}'
{%- endif %}
- onlyif: file -L -s {{ swap.device }} | grep -q 'swap file'

{%- endif %}

{%- endfor %}

+ 62
- 0
linux/system/at.sls View File

@@ -0,0 +1,62 @@
{%- from "linux/map.jinja" import system with context %}

{%- if system.at.enabled is defined and system.at.enabled %}

at_packages:
pkg.installed:
- names: {{ system.at.pkgs }}

at_services:
service.running:
- enable: true
- names: {{ system.at.services }}
- require:
- pkg: at_packages
{%- if grains.get('noservices') %}
- onlyif: /bin/false
{%- endif %}

{%- set allow_users = [] %}
{%- for user_name, user_params in system.at.get('user', {}).items() %}
{%- set user_enabled = user_params.get('enabled', false) and
system.get('user', {}).get(
user_name, {'enabled': true}).get('enabled', true) %}
{%- if user_enabled %}
{%- do allow_users.append(user_name) %}
{%- endif %}
{%- endfor %}

etc_at_allow:
{%- if allow_users %}
file.managed:
- name: /etc/at.allow
- template: jinja
- source: salt://linux/files/cron_users.jinja
- user: root
- group: daemon
- mode: 0640
- defaults:
users: {{ allow_users | yaml }}
- require:
- cron_packages
{%- else %}
file.absent:
- name: /etc/at.allow
{%- endif %}


{#
/etc/at.deny should be absent to comply with
CIS 5.1.8 Ensure at/cron is restricted to authorized users
#}
etc_at_deny:
file.absent:
- name: /etc/at.deny

{%- else %}

fake_linux_system_at:
test.nop:
- comment: Fake state to satisfy 'require sls:linux.system.at'

{%- endif %}

+ 127
- 31
linux/system/auth.sls View File

@@ -1,11 +1,55 @@
{%- from "linux/map.jinja" import auth with context %}

{%- if auth.enabled %}
{%- if auth.duo.enabled %}
include:
- linux.system.auth.duo
{%- else %}
{%- set pam_modules_enable = "" %}
{%- set pam_modules_disable = "" %}
{%- if grains.os_family == 'Debian' %}
linux_auth_pam_packages:
pkg.installed:
- pkgs: [ 'libpam-runtime' ]

linux_auth_pam_add_profile:
file.managed:
- name: /usr/local/bin/pam-add-profile
- source: salt://linux/files/pam-add-profile
- mode: 755
- require:
- pkg: linux_auth_pam_packages
{%- endif %}

{%- if auth.get('mkhomedir', {}).get('enabled', False) %}
{%- if grains.os_family == 'Debian' %}
{%- set pam_modules_enable = pam_modules_enable + ' mkhomedir' %}
linux_auth_mkhomedir_debconf_package:
pkg.installed:
- pkgs: [ 'debconf-utils' ]

linux_auth_mkhomedir_config:
file.managed:
- name: /usr/share/pam-configs/mkhomedir
- source: salt://linux/files/mkhomedir
- template: jinja

{%- endif %}
{%- else %}
{%- if grains.os_family == 'Debian' %}
{%- set pam_modules_disable = pam_modules_disable + ' mkhomedir' %}
{%- endif %}
{%- endif %}

{%- if auth.get('ldap', {}).get('enabled', False) %}
{%- from "linux/map.jinja" import ldap with context %}
{%- if auth.get('ldap', {}).get('enabled', False) %}
{%- from "linux/map.jinja" import ldap with context %}

{%- if grains.os_family == 'Debian' %}
{%- if grains.os_family == 'Debian' %}
{%- set pam_modules_enable = pam_modules_enable + ' ldap' %}

linux_auth_ldap_debconf_package:
pkg.installed:
- pkgs: [ 'debconf-utils' ]

linux_auth_debconf_libnss-ldapd:
debconf.set:
@@ -19,6 +63,8 @@ linux_auth_debconf_libnss-ldapd:
value: 'false'
- require_in:
- pkg: linux_auth_ldap_packages
- require:
- pkg: linux_auth_ldap_debconf_package

linux_auth_debconf_libpam-ldapd:
debconf.set:
@@ -27,44 +73,96 @@ linux_auth_debconf_libpam-ldapd:
libpam-ldapd/enable_shadow:
type: 'boolean'
value: 'true'

{#- Setup mkhomedir and ldap PAM profiles #}
linux_auth_mkhomedir_config:
file.managed:
- name: /usr/share/pam-configs/mkhomedir
- source: salt://linux/files/mkhomedir
- require:
- pkg: linux_auth_ldap_packages

linux_auth_pam_add_profile:
file.managed:
- name: /usr/local/bin/pam-add-profile
- source: salt://linux/files/pam-add-profile
- mode: 755

linux_auth_pam_add_profiles:
{%- endif %}
{%- else %}
{%- if grains.os_family == 'Debian' %}
{%- set pam_modules_disable = pam_modules_disable + ' ldap' %}
{%- endif %}
{%- endif %}

{#- Setup PAM profiles #}
{%- if grains.os_family == 'Debian' %}
{%- if auth.get('mkhomedir', {}).get('enabled', False) %}
linux_auth_pam_add_profiles_mkhomedir_enable:
cmd.run:
- name: /usr/local/bin/pam-add-profile ldap mkhomedir
- unless: "debconf-get-selections | grep libpam-runtime/profiles | grep mkhomedir | grep ldap"
- name: /usr/local/bin/pam-add-profile {{ pam_modules_enable }}
- unless: "[[ `grep -c pam_mkhomedir.so /etc/pam.d/common-session` -ne 0 ]]"
- require:
- file: linux_auth_pam_add_profile
linux_auth_pam_add_profiles_mkhomedir_update:
cmd.wait:
- name: /usr/local/bin/pam-add-profile {{ pam_modules_enable }}
- watch:
- file: linux_auth_mkhomedir_config
- require:
- file: linux_auth_pam_add_profile
{%- if auth.get('ldap', {}).get('enabled', False) %}
- pkg: linux_auth_ldap_packages
{%- endif %}
{%- else %}
linux_auth_pam_remove_profiles_mkhomedir:
cmd.run:
- name: /usr/sbin/pam-auth-update --remove {{ pam_modules_disable }}
- onlyif: "[[ `grep -c pam_mkhomedir.so /etc/pam.d/common-session` -ne 0 ]]"
- require:
- pkg: linux_auth_pam_packages
{%- endif %}

{%- elif grains.os_family == 'RedHat' %}
{%- if auth.get('ldap', {}).get('enabled', False) %}
linux_auth_pam_add_profiles_ldap:
cmd.run:
- name: /usr/local/bin/pam-add-profile {{ pam_modules_enable }}
- unless: "[[ `debconf-get-selections | grep libpam-runtime/profiles | grep -c ldap` -ne 0 ]]"
- require:
- file: linux_auth_pam_add_profile
- pkg: linux_auth_ldap_packages
{%- else %}
linux_auth_pam_remove_profiles_ldap:
cmd.run:
- name: /usr/sbin/pam-auth-update --remove {{ pam_modules_disable }}
- onlyif: "[[ `debconf-get-selections | grep libpam-runtime/profiles | grep -c ldap` -ne 0 ]]"
- require:
- pkg: linux_auth_pam_packages
{%- endif %}

linux_auth_config:
{%- elif grains.os_family == 'RedHat' %}
{%- if auth.get('mkhomedir', {}).get('enabled', False) %}
linux_auth_config_enable_mkhomedir:
cmd.run:
- name: "authconfig --enablemkhomedir --update"
- require:
{%- if auth.get('ldap', {}).get('enabled', False) %}
- pkg: linux_auth_ldap_packages
{%- endif %}
{%- else %}
linux_auth_config_disable_mkhomedir:
cmd.run:
- name: "authconfig --disablemkhomedir --update"
- require:
- pkg: linux_auth_ldap_packages
{%- endif %}
{%- if auth.get('ldap', {}).get('enabled', False) %}
linux_auth_config_enable_ldap:
cmd.run:
- name: "authconfig --enableldap --enableldapauth --enablemkhomedir --update"
- name: "authconfig --enableldap --enableldapauth --update"
- require:
{%- if auth.get('ldap', {}).get('enabled', False) %}
- pkg: linux_auth_ldap_packages
{%- endif %}
{%- else %}
linux_auth_config_disable_ldap:
cmd.run:
- name: "authconfig --disableldap --disableldapauth --update"
- require:
- pkg: linux_auth_ldap_packages
{%- endif %}
{%- endif %}

{%- else %}
{%- if auth.get('ldap', {}).get('enabled', False) %}

linux_auth_nsswitch_config_file:
file.managed:
- name: /etc/nsswitch.conf
- name: /etc/nsswitch.conf
- source: salt://linux/files/nsswitch.conf
- template: jinja
- mode: 644
@@ -73,11 +171,9 @@ linux_auth_nsswitch_config_file:
- watch_in:
- service: linux_auth_nslcd_service

{%- endif %}

linux_auth_ldap_packages:
pkg.installed:
- pkgs: {{ ldap.pkgs }}
- pkgs: {{ ldap.pkgs | json }}

linux_auth_nslcd_config_file:
file.managed:
@@ -95,6 +191,6 @@ linux_auth_nslcd_service:
- enable: true
- name: nslcd

{%- endif %}
{%- endif %}
{%- endif %}
{%- endif %}

+ 36
- 0
linux/system/auth/duo.sls View File

@@ -0,0 +1,36 @@
{%- if grains['os'] == 'Ubuntu' %}

package_duo:
pkg.installed:
- name: duo-unix

login_duo:
file.managed:
- name: /etc/duo/login_duo.conf
- source: salt://linux/files/login_duo.conf
- template: jinja
- user: 'root'
- group: 'root'
- mode: '0600'


pam_duo:
file.managed:
- name: /etc/duo/pam_duo.conf
- source: salt://linux/files/login_duo.conf
- template: jinja
- user: 'root'
- group: 'root'
- mode: '0600'

pam-sshd_config:
file.managed:
- name: /etc/pam.d/sshd
- user: root
- group: root
- source: salt://linux/files/pam-sshd
- mode: 600
- template: jinja

{%- endif %}


+ 1
- 1
linux/system/autoupdates.sls View File

@@ -6,7 +6,7 @@
{%- if system.autoupdates.pkgs %}
linux_autoupdates_packages:
pkg.installed:
- pkgs: {{ system.autoupdates.pkgs }}
- pkgs: {{ system.autoupdates.pkgs | json }}
{%- endif %}

{%- if grains.os_family == 'Debian' %}

+ 10
- 0
linux/system/banner.sls View File

@@ -0,0 +1,10 @@
{%- from "linux/map.jinja" import banner with context %}

{%- if banner.get('enabled', False) %}
/etc/issue:
file.managed:
- user: root
- group: root
- mode: 644
- contents_pillar: linux:system:banner:contents
{%- endif %}

+ 87
- 0
linux/system/cron.sls View File

@@ -0,0 +1,87 @@
{%- from "linux/map.jinja" import system with context %}

{%- if system.cron.enabled is defined and system.cron.enabled %}

cron_packages:
pkg.installed:
- names: {{ system.cron.pkgs }}

cron_services:
service.running:
- enable: true
- names: {{ system.cron.services }}
- require:
- pkg: cron_packages
{%- if grains.get('noservices') %}
- onlyif: /bin/false
{%- endif %}

{%- set allow_users = [] %}
{%- for user_name, user_params in system.cron.get('user', {}).items() %}
{%- set user_enabled = user_params.get('enabled', false) and
system.get('user', {}).get(
user_name, {'enabled': true}).get('enabled', true) %}
{%- if user_enabled %}
{%- do allow_users.append(user_name) %}
{%- endif %}
{%- endfor %}

etc_cron_allow:
{%- if allow_users %}
file.managed:
- name: /etc/cron.allow
- template: jinja
- source: salt://linux/files/cron_users.jinja
- user: root
- group: crontab
- mode: 0640
- defaults:
users: {{ allow_users | yaml }}
- require:
- cron_packages
{%- else %}
file.absent:
- name: /etc/cron.allow
{%- endif %}

{#
/etc/cron.deny should be absent to comply with
CIS 5.1.8 Ensure at/cron is restricted to authorized users
#}
etc_cron_deny:
file.absent:
- name: /etc/cron.deny

etc_crontab:
file.managed:
- name: /etc/crontab
- user: root
- group: root
- mode: 0600
- replace: False
- require:
- cron_packages

etc_cron_dirs:
file.directory:
- names:
- /etc/cron.d
- /etc/cron.daily
- /etc/cron.hourly
- /etc/cron.monthly
- /etc/cron.weekly
- user: root
- group: root
- dir_mode: 0600
- recurse:
- ignore_files
- require:
- cron_packages

{%- else %}

fake_linux_system_cron:
test.nop:
- comment: Fake state to satisfy 'require sls:linux.system.cron'

{%- endif %}

+ 20
- 6
linux/system/file.sls View File

@@ -4,12 +4,16 @@
{%- for file_name, file in system.file.items() %}

linux_file_{{ file_name }}:
{%- if file.serialize is defined %}
file.serialize:
- formatter: {{ file.serialize }}
{%- if file.contents is defined %}
- dataset: {{ file.contents|json }}
{%- elif file.contents_pillar is defined %}
- dataset_pillar: {{ file.contents_pillar }}
{%- endif %}
{%- else %}
file.managed:
{%- if file.name is defined %}
- name: {{ file.name }}
{%- else %}
- name: {{ file_name }}
{%- endif %}
{%- if file.source is defined %}
- source: {{ file.source }}
{%- if file.hash is defined %}
@@ -17,13 +21,23 @@ linux_file_{{ file_name }}:
{%- else %}
- skip_verify: True
{%- endif %}
{%- if file.template is defined %}
- template: {{ file.template }}
{%- endif %}
{%- elif file.contents is defined %}
- contents: {{ file.contents|yaml }}
- contents: {{ file.contents|json }}
{%- elif file.contents_pillar is defined %}
- contents_pillar: {{ file.contents_pillar }}
{%- elif file.contents_grains is defined %}
- contents_grains: {{ file.contents_grains }}
{%- endif %}

{%- endif %}
{%- if file.name is defined %}
- name: {{ file.name }}
{%- else %}
- name: {{ file_name }}
{%- endif %}
- makedirs: {{ file.get('makedirs', 'True') }}
- user: {{ file.get('user', 'root') }}
- group: {{ file.get('group', 'root') }}

+ 17
- 3
linux/system/grub.sls View File

@@ -7,6 +7,7 @@ grub_d_directory:
- makedirs: True

{%- if grains['os_family'] == 'RedHat' %}
{%- set boot_grub_cfg = '/boot/grub2/grub.cfg' %}
/etc/default/grub:
file.append:
- text:
@@ -14,14 +15,27 @@ grub_d_directory:

grub_update:
cmd.wait:
- name: grub2-mkconfig -o /boot/grub2/grub.cfg
- name: grub2-mkconfig -o {{ boot_grub_cfg }}

{%- else %}
{%- set boot_grub_cfg = '/boot/grub/grub.cfg' %}

{%- if grains.get('virtual_subtype', None) not in ['Docker', 'LXC'] %}
grub_update:
cmd.wait:
- name: update-grub
{%- endif %}
{%- if grains.get('virtual_subtype') in ['Docker', 'LXC'] %}
- onlyif: /bin/false
{%- endif %}

{%- endif %}

grub_cfg_permissions:
file.managed:
- name: {{ boot_grub_cfg }}
- user: 'root'
- owner: 'root'
- mode: '400'
- replace: false
- onlyif: test -f {{ boot_grub_cfg }}
- require:
- cmd: grub_update

+ 9
- 4
linux/system/hugepages.sls View File

@@ -19,25 +19,30 @@ include:

{%- for hugepages_type, hugepages in system.kernel.hugepages.items() %}

{%- if hugepages.get('mount', False) or hugepages.get('default', False) %}

hugepages_mount_{{ hugepages_type }}:
mount.mounted:
- name: {{ hugepages.mount_point }}
- device: Hugetlbfs-kvm
- device: Hugetlbfs-kvm-{{ hugepages.size|lower }}
- fstype: hugetlbfs
- mkmnt: true
- opts: mode=775,pagesize={{ hugepages.size }}
- mount: {{ hugepages.mount|default('true') }}

# Make hugepages available right away with a temporary systctl write
# This will be handled via krn args after reboot, so don't use `sysctl.present`
{%- if hugepages.get('default', False) %}
hugepages_sysctl_vm_nr_hugepages:
cmd.run:
- name: "sysctl vm.nr_hugepages={{ hugepages.count }}"
- unless: "sysctl vm.nr_hugepages | grep -qE '{{ hugepages.count }}'"

{%- endif %}

{%- endfor %}

{%- endif %}

# systemd always creates default mount point at /dev/hugepages
# we have to disable it, as we configure our own mount point for DPDK.
mask_dev_hugepages:
cmd.run:
- name: "systemctl mask dev-hugepages.mount"

+ 16
- 0
linux/system/init.sls View File

@@ -3,6 +3,16 @@
include:
- linux.system.env
- linux.system.profile
- linux.system.shell
{%- if system.login_defs is defined %}
- linux.system.login_defs
{%- endif %}
{%- if system.at is defined %}
- linux.system.at
{%- endif %}
{%- if system.cron is defined %}
- linux.system.cron
{%- endif %}
{%- if system.repo|length > 0 %}
- linux.system.repo
{%- endif %}
@@ -114,3 +124,9 @@ include:
{%- if system.auth is defined %}
- linux.system.auth
{%- endif %}
{%- if system.banner is defined %}
- linux.system.banner
{%- endif %}
{%- if system.mcelog is defined %}
- linux.system.mcelog
{%- endif %}

+ 25
- 24
linux/system/job.sls View File

@@ -3,45 +3,46 @@

include:
- linux.system.user
- linux.system.cron

{%- for name, job in system.job.items() %}
{%- for name, job in system.job.items() %}
{%- set job_user = job.get('user', 'root') %}

linux_job_{{ job.command }}:
{%- if job.enabled|default(True) %}
{%- if job.get('enabled', True) %}
cron.present:
- name: >
{{ job.command }}
{%- if job.get('identifier', True) %}
{%- if job.get('identifier', True) %}
- identifier: {{ job.get('identifier', job.get('name', name)) }}
{%- endif %}
- user: {{ job.user|default("root") }}
{%- if job.minute is defined %}
{%- endif %}
- user: {{ job_user }}
{%- if job.minute is defined %}
- minute: '{{ job.minute }}'
{%- endif %}
{%- if job.hour is defined %}
{%- endif %}
{%- if job.hour is defined %}
- hour: '{{ job.hour }}'
{%- endif %}
{%- if job.daymonth is defined %}
{%- endif %}
{%- if job.daymonth is defined %}
- daymonth: '{{ job.daymonth }}'
{%- endif %}
{%- if job.month is defined %}
{%- endif %}
{%- if job.month is defined %}
- month: '{{ job.month }}'
{%- endif %}
{%- if job.dayweek is defined %}
{%- endif %}
{%- if job.dayweek is defined %}
- dayweek: '{{ job.dayweek }}'
{%- endif %}
{%- if job.user|default("root") in system.get('user', {}).keys() %}
{%- endif %}
- require:
- user: system_user_{{ job.user|default("root") }}
{%- endif %}
{%- else %}
- sls: linux.system.cron
{%- if job_user in system.get('user', {}).keys() %}
- user: system_user_{{ job_user }}
{%- endif %}
{%- else %}
cron.absent:
- name: {{ job.command }}
{%- if job.get('identifier', True) %}
{%- if job.get('identifier', True) %}
- identifier: {{ job.get('identifier', job.get('name', name)) }}
{%- endif %}
{%- endif %}
{%- endif %}

{%- endfor %}

{%- endfor %}
{%- endif %}

+ 16
- 4
linux/system/kernel.sls View File

@@ -6,12 +6,13 @@
{%- set kernel_boot_opts = [] %}
{%- do kernel_boot_opts.append('isolcpus=' ~ system.kernel.isolcpu) if system.kernel.isolcpu is defined %}
{%- do kernel_boot_opts.append('elevator=' ~ system.kernel.elevator) if system.kernel.elevator is defined %}
{%- do kernel_boot_opts.append('transparent_hugepage=' ~ system.kernel.transparent_hugepage) if system.kernel.transparent_hugepage is defined %}
{%- do kernel_boot_opts.extend(system.kernel.boot_options) if system.kernel.boot_options is defined %}

{%- if kernel_boot_opts %}
include:
- linux.system.grub

{%- if kernel_boot_opts %}
/etc/default/grub.d/99-custom-settings.cfg:
file.managed:
- contents: 'GRUB_CMDLINE_LINUX_DEFAULT="$GRUB_CMDLINE_LINUX_DEFAULT {{ kernel_boot_opts|join(' ') }}"'
@@ -56,7 +57,16 @@ linux_kernel_module_{{ module }}:

{%- endfor %}

{%- for module_name, module_content in system.kernel.get('module', {}).items() %}
{%- if system.kernel.module is defined %}

modprobe_d_directory:
file.directory:
- name: /etc/modprobe.d
- user: root
- group: root
- mode: 755

{%- for module_name in system.kernel.module %}

/etc/modprobe.d/{{ module_name }}.conf:
file.managed:
@@ -66,10 +76,12 @@ linux_kernel_module_{{ module }}:
- template: jinja
- source: salt://linux/files/modprobe.conf.jinja
- defaults:
module_content: {{ module_content }}
module_name: {{ module_name }}
- require:
- file: modprobe_d_directory

{%- endfor %}
{%- endfor %}
{%- endif %}

{%- for sysctl_name, sysctl_value in system.kernel.get('sysctl', {}).items() %}


+ 13
- 0
linux/system/login_defs.sls View File

@@ -0,0 +1,13 @@
{%- from "linux/map.jinja" import system with context %}
{%- if system.enabled %}
{%- if system.login_defs is defined %}
login_defs:
file.managed:
- name: /etc/login.defs
- source: salt://linux/files/login.defs.jinja
- template: jinja
- user: root
- group: root
- mode: 644
{%- endif %}
{%- endif %}

+ 32
- 0
linux/system/mcelog.sls View File

@@ -0,0 +1,32 @@
{%- from "linux/map.jinja" import system with context %}
{%- if system.enabled %}

{%- if system.get('mcelog',{}).get('enabled', False) %}

mcelog_packages:
pkg.installed:
- name: mcelog

mcelog_conf:
file.managed:
- name: /etc/mcelog/mcelog.conf
- source: salt://linux/files/mcelog.conf
- template: jinja
- user: root
- group: root
- mode: 644
- require:
- pkg: mcelog_packages

mce_service:
service.running:
- name: mcelog
- enable: true
- require:
- pkg: mcelog_packages
- watch:
- file: mcelog_conf

{%- endif %}

{%- endif %}

+ 7
- 15
linux/system/motd.sls View File

@@ -1,27 +1,19 @@
{%- from "linux/map.jinja" import system with context %}
{%- if system.enabled %}
{%- if system.enabled and system.motd|length > 0 %}

{%- if grains.os_family == 'RedHat' %}
/etc/update-motd.d:
file.directory:
- clean: true

{#- update-motd is not available in RedHat, so support only static motd #}
{%- if system.motd is string %}

{#- Set static motd only #}
/etc/motd:
file.managed:
- contents_pillar: linux:system:motd

{%- else %}

{%- if grains.os == 'Ubuntu' %}
package_update_motd:
pkg.installed:
- name: update-motd
- require_in:
- file: /etc/update-motd.d
{%- endif %}

/etc/update-motd.d:
file.directory:
- clean: true

{%- if grains.oscodename == "jessie" %}
motd_fix_pam_sshd:
file.replace:

+ 1
- 1
linux/system/package.sls View File

@@ -51,7 +51,7 @@ linux_extra_package_{{ name }}:
{%- if pkgs %}
linux_extra_packages_{{ pkgs_group }}:
pkg.{{ pkgs_group }}:
- pkgs: {{ pkgs }}
- pkgs: {{ pkgs | json }}
{%- endif %}
{%- endfor %}


+ 1
- 1
linux/system/profile.sls View File

@@ -20,7 +20,7 @@ profile.d_script_{{ name }}:
- defaults:
script: {{ script|yaml }}
- require_in:
- service: profile.d_clean
- file: profile.d_clean
{% endfor %}

{%- endif %}

+ 126
- 128
linux/system/repo.sls View File

@@ -1,14 +1,15 @@
{%- from "linux/map.jinja" import system with context %}
{%- if system.enabled %}

{% if system.pkgs %}
linux_repo_prereq_pkgs:
pkg.installed:
- pkgs: {{ system.pkgs }}

# global proxy setup
{%- if system.proxy.get('pkg', {}).get('enabled', False) %}
{%- if grains.os_family == 'Debian' %}
- pkgs: {{ system.pkgs | json }}
{%- endif %}

# global proxy setup
{%- if grains.os_family == 'Debian' %}
{%- if system.proxy.get('pkg', {}).get('enabled', False) %}
/etc/apt/apt.conf.d/99proxies-salt:
file.managed:
- template: jinja
@@ -18,33 +19,30 @@ linux_repo_prereq_pkgs:
https: {{ system.proxy.get('pkg', {}).get('https', None) | default(system.proxy.get('https', None), true) }}
http: {{ system.proxy.get('pkg', {}).get('http', None) | default(system.proxy.get('http', None), true) }}
ftp: {{ system.proxy.get('pkg', {}).get('ftp', None) | default(system.proxy.get('ftp', None), true) }}

{%- else %}

{%- else %}
/etc/apt/apt.conf.d/99proxies-salt:
file.absent
{%- endif %}
{%- else %}
# Implement grobal proxy configiration for non-debian OS.
{%- endif %}

{%- endif %}
{%- endif %}

{% set default_repos = {} %}

{%- if system.purge_repos|default(False) %}
{% set default_repos = {} %}

{%- if system.purge_repos|default(False) %}
purge_sources_list_d_repos:
file.directory:
- name: /etc/apt/sources.list.d/
- clean: True

{%- endif %}
file.directory:
- name: /etc/apt/sources.list.d/
- clean: True
{%- endif %}

{%- for name, repo in system.repo.items() %}
{%- set name=repo.get('name', name) %}
{%- if grains.os_family == 'Debian' %}
{%- for name, repo in system.repo.items() %}
{%- set name=repo.get('name', name) %}
{%- if grains.os_family == 'Debian' %}

# per repository proxy setup
{%- if repo.get('proxy', {}).get('enabled', False) %}
{%- set external_host = repo.proxy.get('host', None) or repo.source.split('/')[2] %}
{%- if repo.get('proxy', {}).get('enabled', False) %}
{%- set external_host = repo.proxy.get('host', None) or repo.source.split('/')[2] %}
/etc/apt/apt.conf.d/99proxies-salt-{{ name }}:
file.managed:
- template: jinja
@@ -54,13 +52,12 @@ purge_sources_list_d_repos:
https: {{ repo.proxy.get('https', None) or system.proxy.get('pkg', {}).get('https', None) | default(system.proxy.get('https', None), True) }}
http: {{ repo.proxy.get('http', None) or system.proxy.get('pkg', {}).get('http', None) | default(system.proxy.get('http', None), True) }}
ftp: {{ repo.proxy.get('ftp', None) or system.proxy.get('pkg', {}).get('ftp', None) | default(system.proxy.get('ftp', None), True) }}
{%- else %}
{%- else %}
/etc/apt/apt.conf.d/99proxies-salt-{{ name }}:
file.absent
{%- endif %}

{%- if repo.pin is defined %}
{%- endif %}

{%- if repo.pin is defined or repo.pinning is defined %}
linux_repo_{{ name }}_pin:
file.managed:
- name: /etc/apt/preferences.d/{{ name }}
@@ -68,143 +65,144 @@ linux_repo_{{ name }}_pin:
- template: jinja
- defaults:
repo_name: {{ name }}

{%- else %}

{%- else %}
linux_repo_{{ name }}_pin:
file.absent:
- name: /etc/apt/preferences.d/{{ name }}
{%- endif %}

{%- endif %}

{%- if repo.get('default', False) %}

{%- do default_repos.update({name: repo}) %}

{%- if repo.get('key') %}

{%- if repo.get('key') %}
linux_repo_{{ name }}_key:
cmd.wait:
- name: "echo '{{ repo.key }}' | apt-key add -"
- watch:
{% set repo_key = salt['hashutil.base64_b64encode'](repo.key) %}
cmd.run:
- name: "echo '{{ repo_key }}' | base64 -d | apt-key add -"
- require_in:
{%- if repo.get('default', False) %}
- file: default_repo_list

{%- elif repo.key_url|default(False) %}

{% else %}
- pkgrepo: linux_repo_{{ name }}
{% endif %}

{# key_url fetch by curl when salt <2017.7, higher version of salt has
fixed bug for using a proxy_host/port specified at minion.conf

NOTE: curl/cmd.run usage to fetch gpg key has limited functionality behind proxy.
Environments with salt >= 2017.7 should use key_url specified at
pkgrepo.manage state (which uses properly configured http_host at
minion.conf). Older versions of salt require to have proxy set at
ENV and curl way to fetch gpg key here can have a sense for backward
compatibility. Be aware that as of salt 2018.3 no_proxy option is
not implemented at all.
#}
{%- elif repo.key_url|default(False) and grains['saltversioninfo'] < [2017, 7] and not repo.key_url.startswith('salt://') %}
linux_repo_{{ name }}_key:
cmd.wait:
- name: "curl -s {{ repo.key_url }} | apt-key add -"
- watch:
cmd.run:
- name: "curl -sL {{ repo.key_url }} | apt-key add -"
- require_in:
{%- if repo.get('default', False) %}
- file: default_repo_list
{% else %}
- pkgrepo: linux_repo_{{ name }}
{% endif %}
{%- endif %}

{%- endif %}

{%- else %}

{%- if repo.get('enabled', True) %}
{%- if repo.get('default', False) %}
{%- do default_repos.update({name: repo}) %}
{%- else %}

{%- if repo.get('enabled', True) %}
linux_repo_{{ name }}:
pkgrepo.managed:
{%- if repo.ppa is defined %}
- refresh_db: False
- require_in:
- refresh_db
{%- if repo.ppa is defined %}
- ppa: {{ repo.ppa }}
{%- else %}
{%- else %}
- humanname: {{ name }}
- name: {{ repo.source }}
{%- if repo.architectures is defined %}
{%- if repo.architectures is defined %}
- architectures: {{ repo.architectures }}
{%- endif %}
{%- endif %}
- file: /etc/apt/sources.list.d/{{ name }}.list
- clean_file: {{ repo.clean|default(True) }}
{%- if repo.key_id is defined %}
- clean_file: {{ repo.get('clean_file', True) }}
{%- if repo.key_id is defined %}
- keyid: {{ repo.key_id }}
{%- endif %}
{%- if repo.key_server is defined %}
{%- endif %}
{%- if repo.key_server is defined %}
- keyserver: {{ repo.key_server }}
{%- endif %}
{%- if repo.key_url is defined %}
{%- endif %}
{%- if repo.key_url is defined and (grains['saltversioninfo'] >= [2017, 7] or repo.key_url.startswith('salt://')) %}
- key_url: {{ repo.key_url }}
{%- endif %}
{%- endif %}
- consolidate: {{ repo.get('consolidate', False) }}
- clean_file: {{ repo.get('clean_file', False) }}
- refresh_db: {{ repo.get('refresh_db', True) }}
- require:
- pkg: linux_repo_prereq_pkgs
{%- if repo.get('proxy', {}).get('enabled', False) %}
- file: /etc/apt/apt.conf.d/99proxies-salt-{{ name }}
{%- endif %}
{%- if system.proxy.get('pkg', {}).get('enabled', False) %}
- file: /etc/apt/apt.conf.d/99proxies-salt
{%- endif %}
{%- if system.purge_repos|default(False) %}
{%- if system.purge_repos|default(False) %}
- file: purge_sources_list_d_repos
{%- endif %}
{%- endif %}

{%- else %}

linux_repo_{{ name }}_absent:
{%- endif %}
{%- endif %}
{%- else %}
linux_repo_{{ name }}:
pkgrepo.absent:
{%- if repo.ppa is defined %}
- refresh_db: False
- require:
- file: /etc/apt/apt.conf.d/99proxies-salt-{{ name }}
- require_in:
- refresh_db
{%- if repo.ppa is defined %}
- ppa: {{ repo.ppa }}
{%- if repo.key_id is defined %}
{%- if repo.key_id is defined %}
- keyid_ppa: {{ repo.keyid_ppa }}
{%- endif %}
{%- else %}
{%- endif %}
{%- else %}
- file: /etc/apt/sources.list.d/{{ name }}.list
{%- if repo.key_id is defined %}
{%- if repo.key_id is defined %}
- keyid: {{ repo.key_id }}
{%- endif %}
{%- endif %}
{%- endif %}
{%- endif %}
{%- endif %}
{%- endif %}
file.absent:
- name: /etc/apt/sources.list.d/{{ name }}.list

{%- endif %}
{%- if grains.os_family == "RedHat" %}

{%- endif %}

{#- os_family Debian #}
{%- endif %}

{%- if grains.os_family == "RedHat" %}

{%- if repo.get('enabled', True) %}

{%- if repo.get('proxy', {}).get('enabled', False) %}
{%- if repo.get('enabled', True) %}
{%- if repo.get('proxy', {}).get('enabled', False) %}
# PLACEHOLDER
# TODO, implement per proxy configuration for Yum
{%- endif %}
{%- endif %}

{%- if not repo.get('default', False) %}
{%- if not repo.get('default', False) %}
linux_repo_{{ name }}:
pkgrepo.managed:
- refresh_db: False
- require_in:
- refresh_db
- name: {{ name }}
- humanname: {{ repo.get('humanname', name) }}
{%- if repo.mirrorlist is defined %}
{%- if repo.mirrorlist is defined %}
- mirrorlist: {{ repo.mirrorlist }}
{%- else %}
{%- else %}
- baseurl: {{ repo.source }}
{%- endif %}
{%- endif %}
- gpgcheck: {% if repo.get('gpgcheck', False) %}1{% else %}0{% endif %}
{%- if repo.gpgkey is defined %}
{%- if repo.gpgkey is defined %}
- gpgkey: {{ repo.gpgkey }}
{%- endif %}
- require:
- pkg: linux_repo_prereq_pkgs
{%- endif %}

{#- repo.enabled is false #}
{%- else %}
{%- endif %}
{%- endif %}
{%- else %}
pkgrepo.absent:
- refresh_db: False
- require_in:
- refresh_db
- name: {{ repo.source }}
{%- endif %}

{#- os_family Redhat #}
{%- endif %}

{#- repo.items() loop #}
{%- endfor %}
{%- endif %}
{%- endif %}
{%- endfor %}

{%- if default_repos|length > 0 and grains.os_family == 'Debian' %}
{%- if default_repos|length > 0 and grains.os_family == 'Debian' %}

default_repo_list:
file.managed:
@@ -214,20 +212,20 @@ default_repo_list:
- user: root
- group: root
- mode: 0644
{%- if system.purge_repos|default(False) %}
{%- if system.purge_repos|default(False) %}
- replace: True
{%- endif %}
{%- endif %}
- defaults:
default_repos: {{ default_repos }}
- require:
- pkg: linux_repo_prereq_pkgs

refresh_default_repo:
module.wait:
- name: pkg.refresh_db
- watch:
- file: default_repo_list
{%- endif %}

{%- endif %}
refresh_db:
{%- if system.get('refresh_repos_meta', True) %}
module.run:
- name: pkg.refresh_db
{%- else %}
test.succeed_without_changes
{%- endif %}

{%- endif %}

+ 45
- 0
linux/system/shell.sls View File

@@ -0,0 +1,45 @@
{%- from "linux/map.jinja" import system with context %}
{%- if system.enabled %}
{%- if system.shell is defined %}

{%- if system.shell.umask is defined %}
etc_bash_bashrc_umask:
file.blockreplace:
- name: /etc/bash.bashrc
- marker_start: "# BEGIN CIS 5.4.4 default user umask"
- marker_end: "# END CIS 5.4.4 default user umask"
- content: "umask {{ system.shell.umask }}"
- append_if_not_found: True
- onlyif: test -f /etc/bash.bashrc

etc_profile_umask:
file.blockreplace:
- name: /etc/profile
- marker_start: "# BEGIN CIS 5.4.4 default user umask"
- marker_end: "# END CIS 5.4.4 default user umask"
- content: "umask {{ system.shell.umask }}"
- append_if_not_found: True
- onlyif: test -f /etc/profile
{%- endif %}

{%- if system.shell.timeout is defined %}
etc_bash_bashrc_timeout:
file.blockreplace:
- name: /etc/bash.bashrc
- marker_start: "# BEGIN CIS 5.4.5 default user shell timeout"
- marker_end: "# END CIS 5.4.5 default user shell timeout"
- content: "TMOUT={{ system.shell.timeout }}"
- append_if_not_found: True
- onlyif: test -f /etc/bash.bashrc

etc_profile_timeout:
file.blockreplace:
- name: /etc/profile
- marker_start: "# BEGIN CIS 5.4.5 default user shell timeout"
- marker_end: "# END CIS 5.4.5 default user shell timeout"
- content: "TMOUT={{ system.shell.timeout }}"
- append_if_not_found: True
- onlyif: test -f /etc/profile
{%- endif %}
{%- endif %}
{%- endif %}

+ 18
- 2
linux/system/sysfs.sls View File

@@ -11,6 +11,8 @@ linux_sysfs_package:
- require:
- pkg: linux_sysfs_package

{% set apply = system.get('sysfs', {}).pop('enable_apply', True) %}

{%- for name, sysfs in system.get('sysfs', {}).items() %}

/etc/sysfs.d/{{ name }}.conf:
@@ -26,11 +28,21 @@ linux_sysfs_package:
- require:
- file: /etc/sysfs.d

{%- for key, value in sysfs.items() %}
{%- if sysfs is mapping %}
{%- set sysfs_list = [sysfs] %}
{%- else %}
{%- set sysfs_list = sysfs %}
{%- endif %}

{%- if apply %}

{%- for item in sysfs_list %}
{%- set list_idx = loop.index %}
{%- for key, value in item.items() %}
{%- if key not in ["mode", "owner"] %}
{%- if grains.get('virtual_subtype', None) not in ['Docker', 'LXC'] %}
{#- Sysfs cannot be set in docker, LXC, etc. #}
linux_sysfs_write_{{ name }}_{{ key }}:
linux_sysfs_write_{{ list_idx }}_{{ name }}_{{ key }}:
module.run:
- name: sysfs.write
- key: {{ key }}
@@ -40,3 +52,7 @@ linux_sysfs_write_{{ name }}_{{ key }}:
{%- endfor %}

{%- endfor %}

{%- endif %}

{%- endfor %}

+ 4
- 1
linux/system/timezone.sls View File

@@ -5,8 +5,11 @@

{{ system.timezone }}:
timezone.system:
{%- if grains.get('noservices') %}
- onlyif: /bin/false
{%- endif %}
- utc: {{ system.utc }}

{%- endif %}

{%- endif %}
{%- endif %}

+ 39
- 2
linux/system/user.sls View File

@@ -15,6 +15,23 @@ include:
{%- endif %}
{%- endfor %}

{%- if user.gid is not defined %}
system_group_{{ name }}:
group.present:
- name: {{ name }}
- require_in:
- user: system_user_{{ name }}
{%- endif %}

{%- if user.get('makedirs') %}
system_user_home_parentdir_{{ user.home }}:
file.directory:
- name: {{ user.home | path_join("..") }}
- makedirs: true
- require_in:
- user: system_user_{{ name }}
{%- endif %}

system_user_{{ name }}:
user.present:
- name: {{ name }}
@@ -29,18 +46,38 @@ system_user_{{ name }}:
- password: {{ user.password }}
- hash_password: {{ user.get('hash_password', False) }}
{% endif %}
{%- if user.gid is defined and user.gid %}
- gid: {{ user.gid }}
{%- else %}
- gid_from_name: true
{%- endif %}
{%- if user.groups is defined %}
- groups: {{ user.groups }}
{%- endif %}
{%- if user.system is defined and user.system %}
- system: True
- shell: {{ user.get('shell', '/bin/false') }}
{%- else %}
- shell: {{ user.get('shell', '/bin/bash') }}
{%- endif %}
{%- if user.uid is defined and user.uid %}
- uid: {{ user.uid }}
{%- endif %}
{%- if user.unique is defined %}
- unique: {{ user.unique }}
{%- endif %}
{%- if user.maxdays is defined %}
- maxdays: {{ user.maxdays }}
{%- endif %}
{%- if user.mindays is defined %}
- mindays: {{ user.mindays }}
{%- endif %}
{%- if user.warndays is defined %}
- warndays: {{ user.warndays }}
{%- endif %}
{%- if user.inactdays is defined %}
- inactdays: {{ user.inactdays }}
{%- endif %}
- require: {{ requires|yaml }}
{%- if user.allow_uid_change is defined and user.allow_uid_change %}
- allow_uid_change: true
@@ -50,7 +87,7 @@ system_user_home_{{ user.home }}:
file.directory:
- name: {{ user.home }}
- user: {{ name }}
- mode: 700
- mode: {{ user.get('home_dir_mode', 700) }}
- makedirs: true
- require:
- user: system_user_{{ name }}
@@ -74,7 +111,7 @@ system_user_home_{{ user.home }}:

/etc/sudoers.d/90-salt-user-{{ name|replace('.', '-') }}:
file.absent
{%- endif %}

{%- else %}

+ 1
- 1
metadata.yml View File

@@ -1,3 +1,3 @@
name: "linux"
version: "2017.4.1"
source: "https://github.com/tcpcloud/salt-formula-linux"
source: "https://github.com/salt-formulas/salt-formula-linux"

+ 37
- 0
metadata/service/system/cis/cis-1-1-1-1.yml View File

@@ -0,0 +1,37 @@
# 1.1.1.1 Ensure mounting of cramfs filesystems is disabled
#
# Description
# ===========
# The cramfs filesystem type is a compressed read-only Linux filesystem
# embedded in small footprint systems. A cramfs image can be used without
# having to first decompress the image.
#
# Rationale
# =========
# Removing support for unneeded filesystem types reduces the local attack
# surface of the server. If this filesystem type is not needed, disable it.
#
# Audit
# =====
# Run the following commands and verify the output is as indicated:
#
# # modprobe -n -v cramfs
# install /bin/true
# # lsmod | grep cramfs
# <No output>
#
# Remediation
# ===========
# Edit or create the file /etc/modprobe.d/CIS.conf and add the following line:
#
# install cramfs /bin/true
#
parameters:
linux:
system:
kernel:
module:
cramfs:
install:
command: /bin/true


+ 36
- 0
metadata/service/system/cis/cis-1-1-1-2.yml View File

@@ -0,0 +1,36 @@
# 1.1.1.2 Ensure mounting of freevxfs filesystems is disabled
#
# Description
# ===========
# The freevxfs filesystem type is a free version of the Veritas type
# filesystem. This is the primary filesystem type for HP-UX operating systems.
#
# Rationale
# =========
# Removing support for unneeded filesystem types reduces the local attack
# surface of the system. If this filesystem type is not needed, disable it.
#
# Audit
# =====
# Run the following commands and verify the output is as indicated:
#
# # modprobe -n -v freevxfs
# install /bin/true
# # lsmod | grep freevxfs
# <No output>
#
# Remediation
# ===========
# Edit or create the file /etc/modprobe.d/CIS.conf and add the following line:
#
# install freevxfs /bin/true
#
parameters:
linux:
system:
kernel:
module:
freevxfs:
install:
command: /bin/true


+ 36
- 0
metadata/service/system/cis/cis-1-1-1-3.yml View File

@@ -0,0 +1,36 @@
# 1.1.1.3 Ensure mounting of jffs2 filesystems is disabled
#
# Description
# ===========
# The jffs2 (journaling flash filesystem 2) filesystem type is a
# log-structured filesystem used in flash memory devices.
#
# Rationale
# =========
# Removing support for unneeded filesystem types reduces the local attack
# surface of the system. If this filesystem type is not needed, disable it.
#
# Audit
# =====
# Run the following commands and verify the output is as indicated:
#
# # modprobe -n -v jffs2
# install /bin/true
# # lsmod | grep jffs2
# <No output>
#
# Remediation
# ===========
# Edit or create the file /etc/modprobe.d/CIS.conf and add the following line:
#
# install jffs2 /bin/true
#
parameters:
linux:
system:
kernel:
module:
jffs2:
install:
command: /bin/true


+ 36
- 0
metadata/service/system/cis/cis-1-1-1-4.yml View File

@@ -0,0 +1,36 @@
# 1.1.1.4 Ensure mounting of hfs filesystems is disabled
#
# Description
# ===========
# The hfs filesystem type is a hierarchical filesystem that allows
# you to mount Mac OS filesystems.
#
# Rationale
# =========
# Removing support for unneeded filesystem types reduces the local attack
# surface of the system. If this filesystem type is not needed, disable it.
#
# Audit
# =====
# Run the following commands and verify the output is as indicated:
#
# # modprobe -n -v hfs
# install /bin/true
# # lsmod | grep hfs
# <No output>
#
# Remediation
# ===========
# Edit or create the file /etc/modprobe.d/CIS.conf and add the following line:
#
# install hfs /bin/true
#
parameters:
linux:
system:
kernel:
module:
hfs:
install:
command: /bin/true


+ 36
- 0
metadata/service/system/cis/cis-1-1-1-5.yml View File

@@ -0,0 +1,36 @@
# 1.1.1.5 Ensure mounting of hfsplus filesystems is disabled
#
# Description
# ===========
# The hfsplus filesystem type is a hierarchical filesystem designed to
# replace hfs that allows you to mount Mac OS filesystems.
#
# Rationale
# =========
# Removing support for unneeded filesystem types reduces the local attack
# surface of the system. If this filesystem type is not needed, disable it.
#
# Audit
# =====
# Run the following commands and verify the output is as indicated:
#
# # modprobe -n -v hfsplus
# install /bin/true
# # lsmod | grep hfsplus
# <No output>
#
# Remediation
# ===========
# Edit or create the file /etc/modprobe.d/CIS.conf and add the following line:
#
# install hfsplus /bin/true
#
parameters:
linux:
system:
kernel:
module:
hfsplus:
install:
command: /bin/true


+ 43
- 0
metadata/service/system/cis/cis-1-1-1-6.yml View File

@@ -0,0 +1,43 @@
# 1.1.1.6 Ensure mounting of squashfs filesystems is disabled
#
# Description
# ===========
# The squashfs filesystem type is a compressed read-only Linux filesystem
# embedded in small footprint systems (similar to cramfs). A squashfs image
# can be used without having to first decompress the image.
#
# Rationale
# =========
# Removing support for unneeded filesystem types reduces the local attack
# surface of the server. If this filesystem type is not needed, disable it.
#
# Audit
# =====
# Run the following commands and verify the output is as indicated:
#
# # modprobe -n -v squashfs
# install /bin/true
# # lsmod | grep squashfs
# <No output>
#
# Remediation
# ===========
# Edit or create the file /etc/modprobe.d/CIS.conf and add the following line:
#
# install squashfs /bin/true
#
# NOTE
# ====
# In Ubuntu 16.04 squashfs is built into kernel, and 'install' command
# from modprobe.d dir has no effect. However, this is still checked by
# CIS-CAT in Ubuntu 16.04 benchmark v.1.0.0. This was removed in v.1.1.0.
#
parameters:
linux:
system:
kernel:
module:
squashfs:
install:
command: /bin/true


+ 38
- 0
metadata/service/system/cis/cis-1-1-1-7.yml View File

@@ -0,0 +1,38 @@
# 1.1.1.7 Ensure mounting of udf filesystems is disabled
#
# Description
# ===========
# The udf filesystem type is the universal disk format used to implement
# ISO/IEC 13346 and ECMA-167 specifications. This is an open vendor filesystem
# type for data storage on a broad range of media. This filesystem type is
# necessary to support writing DVDs and newer optical disc formats.
#
# Rationale
# =========
# Removing support for unneeded filesystem types reduces the local attack
# surface of the server. If this filesystem type is not needed, disable it.
#
# Audit
# =====
# Run the following commands and verify the output is as indicated:
#
# # modprobe -n -v udf
# install /bin/true
# # lsmod | grep udf
# <No output>
#
# Remediation
# ===========
# Edit or create the file /etc/modprobe.d/CIS.conf and add the following line:
#
# install udf /bin/true
#
parameters:
linux:
system:
kernel:
module:
udf:
install:
command: /bin/true


+ 50
- 0
metadata/service/system/cis/cis-1-1-1-8.yml View File

@@ -0,0 +1,50 @@
# 1.1.1.8 Ensure mounting of FAT filesystems is disabled
#
# Description
# ===========
# The FAT filesystem format is primarily used on older windows systems and
# portable USB drives or flash modules. It comes in three types FAT12, FAT16,
# and FAT32 all of which are supported by the vfat kernel module.
#
# Rationale
# =========
# Removing support for unneeded filesystem types reduces the local attack
# surface of the server. If this filesystem type is not needed, disable it.
#
# Audit
# =====
# Run the following commands and verify the output is as indicated:
#
# # modprobe -n -v vfat
# install /bin/true
# # lsmod | grep vfat
# <No output>
#
# Remediation
# ===========
#
# Edit or create the file /etc/modprobe.d/CIS.conf and add the following line:
#
# install vfat /bin/true
#
# Impact
# ======
# FAT filesystems are often used on portable USB sticks and other flash
# media are commonly used to transfer files between workstations, removing
# VFAT support may prevent the ability to transfer files in this way.
#
# NOTE
# ====
# In Ubuntu 16.04 vfat is built into kernel, and 'install' command
# from modprobe.d dir has no effect. However, this is still checked by
# CIS-CAT in Ubuntu 16.04 benchmark v.1.0.0. This was removed in v.1.1.0.
#
parameters:
linux:
system:
kernel:
module:
vfat:
install:
command: /bin/true


+ 95
- 0
metadata/service/system/cis/cis-1-1-14_15_16.yml View File

@@ -0,0 +1,95 @@
# CIS 1.1.14 Ensure nodev option set on /dev/shm partition (Scored)
#
# Description
# ===========
# The nodev mount option specifies that the filesystem cannot contain special
# devices.
#
# Rationale
# =========
# Since the /run/shm filesystem is not intended to support devices, set this
# option to ensure that users cannot attempt to create special devices in
# /dev/shm partitions.
#
# Audit
# =====
# Run the following command and verify that the nodev option is set on /dev/shm .
#
# # mount | grep /dev/shm
# shm on /dev/shm type tmpfs (rw,nosuid,nodev,noexec,relatime)
#
# Remediation
# ===========
#
# Edit the /etc/fstab file and add nodev to the fourth field (mounting options)
# for the /dev/shm partition. See the fstab(5) manual page for more information.
# Run the following command to remount /dev/shm :
#
# # mount -o remount,nodev /dev/shm
#
# CIS 1.1.15 Ensure nosuid option set on /dev/shm partition (Scored)
#
# Description
# ===========
# The nosuid mount option specifies that the filesystem cannot contain setuid
# files.
#
# Rationale
# =========
# Setting this option on a file system prevents users from introducing
# privileged programs onto the system and allowing non-root users to execute them.
#
# Audit
# =====
# Run the following command and verify that the no suid option is set on /dev/shm .
#
# # mount | grep /dev/shm
# shm on /dev/shm type tmpfs (rw,nosuid,nodev,noexec,relatime)
#
# Remediation
# ===========
# Edit the /etc/fstab file and add nosuid to the fourth field (mounting options)
# for the /dev/shm partition. See the fstab(5) manual page for more information.
# Run the following command to remount /dev/shm :
#
# # mount -o remount,nosuid /dev/shm
#
# 1.1.16 Ensure noexec option set on /dev/shm partition (Scored)
#
# Description
# ===========
# The noexec mount option specifies that the filesystem cannot contain
# executable binaries.
#
# Rationale
# =========
# Setting this option on a file system prevents users from executing programs
# from shared memory. This deters users from introducing potentially malicious
# software on the system.
#
# Audit
# =====
# Run the following command and verify that the noexec option is set on /run/shm .
#
# # mount | grep /dev/shm
# shm on /dev/shm type tmpfs (rw,nosuid,nodev,noexec,relatime)
#
# Remediation
# ===========
# Edit the /etc/fstab file and add noexec to the fourth field (mounting options)
# for the /dev/shm partition. See the fstab(5) manual page for more information.
# Run the following command to remount /dev/shm :
#
# # mount -o remount,noexec /dev/shm
#
parameters:
linux:
storage:
mount:
ensure_dev_shm_mount_options:
enabled: true
file_system: tmpfs
device: shm
path: /dev/shm
opts: rw,nosuid,nodev,noexec,relatime


+ 53
- 0
metadata/service/system/cis/cis-1-1-21.yml View File

@@ -0,0 +1,53 @@
# CIS 1.1.21 Disable Automounting
#
# Description
# ===========
# autofs allows automatic mounting of devices, typically including CD/DVDs
# and USB drives.
#
# Rationale
# =========
# With automounting enabled anyone with physical access could attach a USB
# drive or disc and have its contents available in system even if they lacked
# permissions to mount it themselves.
#
# Audit
# =====
# Run the following command to verify autofs is not enabled:
#
# # systemctl is-enabled autofs
# disabled
#
# Verify result is not "enabled".
#
# Remediation
# ===========
#
# Run the following command to disable autofs :
#
# # systemctl disable autofs
#
# Impact
# ======
# The use portable hard drives is very common for workstation users. If your
# organization allows the use of portable storage or media on workstations
# and physical access controls to workstations is considered adequate there
# is little value add in turning off automounting.
#
# Notes
# =====
# This control should align with the tolerance of the use of portable drives
# and optical media in the organization. On a server requiring an admin to
# manually mount media can be part of defense-in-depth to reduce the risk of
# unapproved software or information being introduced or proprietary software
# or information being exfiltrated. If admins commonly use flash drives and
# Server access has sufficient physical controls, requiring manual mounting
# may not increase security.
#
parameters:
linux:
system:
service:
autofs:
status: disabled


+ 59
- 0
metadata/service/system/cis/cis-1-5-1.yml View File

@@ -0,0 +1,59 @@
# CIS 1.5.1 Ensure core dumps are restricted (Scored)
#
# Description
# ===========
#
# A core dump is the memory of an executable program. It is generally used to determine
# why a program aborted. It can also be used to glean confidential information from a core
# file. The system provides the ability to set a soft limit for core dumps, but this can be
# overridden by the user.
#
# Rationale
# =========
#
# Setting a hard limit on core dumps prevents users from overriding the soft variable. If core
# dumps are required, consider setting limits for user groups (see limits.conf(5) ). In
# addition, setting the fs.suid_dumpable variable to 0 will prevent setuid programs from
# dumping core.
#
# Audit
# =====
#
# Run the following commands and verify output matches:
#
# # grep "hard core" /etc/security/limits.conf /etc/security/limits.d/*
# * hard core 0
# # sysctl fs.suid_dumpable
# fs.suid_dumpable = 0
#
# Remediation
# ===========
#
# Add the following line to the /etc/security/limits.conf file or a
# /etc/security/limits.d/* file:
#
# * hard core 0
#
# Set the following parameter in the /etc/sysctl.conf file:
#
# fs.suid_dumpable = 0
#
# Run the following command to set the active kernel parameter:
#
# # sysctl -w fs.suid_dumpable=0

parameters:
linux:
system:
limit:
cis:
enabled: true
domain: '*'
limits:
- type: 'hard'
item: 'core'
value: 0
kernel:
sysctl:
fs.suid_dumpable: 0


+ 40
- 0
metadata/service/system/cis/cis-1-5-3.yml View File

@@ -0,0 +1,40 @@
# 1.5.3 Ensure address space layout randomization (ASLR) is enabled
#
# Description
# ===========
#
# Address space layout randomization (ASLR) is an exploit mitigation technique which
# randomly arranges the address space of key data areas of a process.
#
# Rationale
# =========
#
# Randomly placing virtual memory regions will make it difficult to write memory page
# exploits as the memory placement will be consistently shifting.
#
# Audit
# =====
#
# Run the following command and verify output matches:
#
# # sysctl kernel.randomize_va_space
# kernel.randomize_va_space = 2
#
# Remediation
# ===========
#
# Set the following parameter in the /etc/sysctl.conf file:
#
# kernel.randomize_va_space = 2
#
# Run the following command to set the active kernel parameter:
#
# # sysctl -w kernel.randomize_va_space=2

parameters:
linux:
system:
kernel:
sysctl:
kernel.randomize_va_space: 2


+ 37
- 0
metadata/service/system/cis/cis-1-5-4.yml View File

@@ -0,0 +1,37 @@
# CIS 1.5.4 Ensure prelink is disabled
#
# Description
# ===========
# prelink is a program that modifies ELF shared libraries and ELF dynamically
# linked binaries in such a way that the time needed for the dynamic linker to
# perform relocations at startup significantly decreases.
#
# Rationale
# =========
# The prelinking feature can interfere with the operation of AIDE, because it
# changes binaries. Prelinking can also increase the vulnerability of the system
# if a malicious user is able to compromise a common library such as libc.
#
# Audit
# =====
# Run the following command and verify prelink is not installed:
#
# # dpkg -s prelink
#
# Remediation
# ===========
# Run the following command to restore binaries to normal:
#
# # prelink -ua
#
# Run the following command to uninstall prelink :
#
# # apt-get remove prelink
#
parameters:
linux:
system:
package:
prelink:
version: removed


+ 43
- 0
metadata/service/system/cis/cis-2-3-1.yml View File

@@ -0,0 +1,43 @@
# 2.3.1 Ensure NIS Client is not installed
#
# Description
# ===========
# The Network Information Service (NIS), formerly known as Yellow Pages,
# is a client-server directory service protocol used to distribute system
# configuration files. The NIS client ( ypbind ) was used to bind a machine
# to an NIS server and receive the distributed configuration files.
#
# Rationale
# =========
# The NIS service is inherently an insecure system that has been vulnerable
# to DOS attacks, buffer overflows and has poor authentication for querying
# NIS maps. NIS generally has been replaced by such protocols as Lightweight
# Directory Access Protocol (LDAP). It is recommended that the service be
# removed.
#
# Audit
# =====
# Run the following command and verify nis is not installed:
#
# dpkg -s nis
#
# Remediation
# ===========
# Run the following command to uninstall nis:
#
# apt-get remove nis
#
# Impact
# ======
# Many insecure service clients are used as troubleshooting tools and in
# testing environments. Uninstalling them can inhibit capability to test
# and troubleshoot. If they are required it is advisable to remove the clients
# after use to prevent accidental or intentional misuse.
#
parameters:
linux:
system:
package:
nis:
version: removed


+ 55
- 0
metadata/service/system/cis/cis-2-3-2.yml View File

@@ -0,0 +1,55 @@
# 2.3.2 Ensure rsh client is not installed
#
# Description
# ===========
# The rsh package contains the client commands for the rsh services.
#
# Rationale
# =========
# These legacy clients contain numerous security exposures and have been
# replaced with the more secure SSH package. Even if the server is removed,
# it is best to ensure the clients are also removed to prevent users from
# inadvertently attempting to use these commands and therefore exposing
# their credentials. Note that removing the rsh package removes the
# clients for rsh , rcp and rlogin .
#
# Audit
# =====
# Run the following commands and verify rsh is not installed:
#
# dpkg -s rsh-client
# dpkg -s rsh-redone-client
#
# Remediation
# ===========
# Run the following command to uninstall rsh :
#
# apt-get remove rsh-client rsh-redone-client
#
# Impact
# ======
# Many insecure service clients are used as troubleshooting tools and in
# testing environments. Uninstalling them can inhibit capability to test
# and troubleshoot. If they are required it is advisable to remove the
# clients after use to prevent accidental or intentional misuse.
#
# NOTE
# ====
# It is not possible to remove rsh-client by means of SaltStack because
# of the way SaltStack checks that package was really removed. 'rsh-client'
# is "provided" by openssh-client package, and SaltStack thinks that
# it is the same as 'rsh-client is installed'. So each time we try to
# remove 'rsh-client' on a system where 'openssh-client' is installed
# (that's almost every system), we got state failure.
# This was fixed in upstream SaltStack in 2018, not sure where we start using
# this version. Until that moment 'rsh-client' should remain unmanaged.
#
parameters:
linux:
system:
package:
# rsh-client:
# version: removed
rsh-redone-client:
version: removed


+ 39
- 0
metadata/service/system/cis/cis-2-3-3.yml View File

@@ -0,0 +1,39 @@
# 2.3.3 Ensure talk client is not installed
#
# Description
# ===========
# The talk software makes it possible for users to send and receive messages
# across systems through a terminal session. The talk client, which allows
# initialization of talk sessions, is installed by default.
#
# Rationale
# =========
# The software presents a security risk as it uses unencrypted protocols
# for communication.
#
# Audit
# =====
# Run the following command and verify talk is not installed:
#
# dpkg -s talk
#
# Remediation
# ===========
# Run the following command to uninstall talk :
#
# apt-get remove talk
#
# Impact
# ======
# Many insecure service clients are used as troubleshooting tools and in
# testing environments. Uninstalling them can inhibit capability to test
# and troubleshoot. If they are required it is advisable to remove the clients
# after use to prevent accidental or intentional misuse.
#
parameters:
linux:
system:
package:
talk:
version: removed


+ 40
- 0
metadata/service/system/cis/cis-2-3-4.yml View File

@@ -0,0 +1,40 @@
# 2.3.4 Ensure telnet client is not installed
#
# Description
# ===========
# The telnet package contains the telnet client, which allows users to start
# connections to other systems via the telnet protocol.
#
# Rationale
# =========
# The telnet protocol is insecure and unencrypted. The use of an unencrypted
# transmission medium could allow an unauthorized user to steal credentials.
# The ssh package provides an encrypted session and stronger security and is
# included in most Linux distributions.
#
# Audit
# =====
# Run the following command and verify telnet is not installed:
#
# # dpkg -s telnet
#
# Remediation
# ===========
# Run the following command to uninstall telnet :
#
# # apt-get remove telnet
#
# Impact
# ======
# Many insecure service clients are used as troubleshooting tools and in
# testing environments. Uninstalling them can inhibit capability to test and
# troubleshoot. If they are required it is advisable to remove the clients
# after use to prevent accidental or intentional misuse.
#
parameters:
linux:
system:
package:
telnet:
version: removed


+ 44
- 0
metadata/service/system/cis/cis-3-1-2.yml View File

@@ -0,0 +1,44 @@
# 3.1.2 Ensure packet redirect sending is disabled
#
# Description
# ===========
# ICMP Redirects are used to send routing information to other hosts. As a host
# itself does not act as a router (in a host only configuration), there is
# no need to send redirects.
#
# Rationale
# =========
# An attacker could use a compromised host to send invalid ICMP redirects to
# other router devices in an attempt to corrupt routing and have users access
# a system set up by the attacker as opposed to a valid system.
#
# Audit
# =====
#
# Run the following commands and verify output matches:
#
# # sysctl net.ipv4.conf.all.send_redirects
# net.ipv4.conf.all.send_redirects = 0
# # sysctl net.ipv4.conf.default.send_redirects
# net.ipv4.conf.default.send_redirects = 0
#
# Remediation
# ===========
#
# Set the following parameters in the /etc/sysctl.conf file:
#
# net.ipv4.conf.all.send_redirects = 0
# net.ipv4.conf.default.send_redirects = 0
#
# Run the following commands to set the active kernel parameters:
#
# # sysctl -w net.ipv4.conf.all.send_redirects=0
# # sysctl -w net.ipv4.conf.default.send_red

parameters:
linux:
system:
kernel:
sysctl:
net.ipv4.conf.all.send_redirects: 0
net.ipv4.conf.default.send_redirects: 0

+ 56
- 0
metadata/service/system/cis/cis-3-2-1.yml View File

@@ -0,0 +1,56 @@
# 3.2.1 Ensure source routed packets are not accepted
#
# Description
# ===========
# In networking, source routing allows a sender to partially or fully specify
# the route packets take through a network. In contrast, non-source routed
# packets travel a path determined by routers in the network. In some cases,
# systems may not be routable or reachable from some locations (e.g. private
# addresses vs. Internet routable), and so source routed packets would need
# to be used.
#
# Rationale
# =========
# Setting `net.ipv4.conf.all.accept_source_route` and
# `net.ipv4.conf.default.accept_source_route` to 0 disables the system from
# accepting source routed packets. Assume this system was capable of routing
# packets to Internet routable addresses on one interface and private addresses
# on another interface. Assume that the private addresses were not routable to
# the Internet routable addresses and vice versa. Under normal routing
# circumstances, an attacker from the Internet routable addresses could not use
# the system as a way to reach the private address systems. If, however, source
# routed packets were allowed, they could be used to gain access to the private
# address systems as the route could be specified, rather than rely on routing
# protocols that did not allow this routing.
#
# Audit
# =====
#
# Run the following commands and verify output matches:
#
# # sysctl net.ipv4.conf.all.accept_source_route
# net.ipv4.conf.all.accept_source_route = 0
# # sysctl net.ipv4.conf.default.accept_source_route
# net.ipv4.conf.default.accept_source_route = 0
#
# Remediation
# ===========
#
# Set the following parameters in the /etc/sysctl.conf file:
#
# net.ipv4.conf.all.accept_source_route = 0
# net.ipv4.conf.default.accept_source_route = 0
#
# Run the following commands to set the active kernel parameters:
#
# # sysctl -w net.ipv4.conf.all.accept_source_route=0
# # sysctl -w net.ipv4.conf.default.accept_source_route=0
# # sysctl -w net.ipv4.route.flush=1

parameters:
linux:
system:
kernel:
sysctl:
net.ipv4.conf.all.accept_source_route: 0
net.ipv4.conf.default.accept_source_route: 0

+ 48
- 0
metadata/service/system/cis/cis-3-2-2.yml View File

@@ -0,0 +1,48 @@
# 3.2.2 Ensure ICMP redirects are not accepted
#
# Description
# ===========
# ICMP redirect messages are packets that convey routing information and tell
# your host (acting as a router) to send packets via an alternate path. It is
# a way of allowing an outside routing device to update your system routing
# tables. By setting net.ipv4.conf.all.accept_redirects to 0, the system will
# not accept any ICMP redirect messages, and therefore, won't allow outsiders
# to update the system's routing tables.
#
# Rationale
# =========
# Attackers could use bogus ICMP redirect messages to maliciously alter the
# system routing tables and get them to send packets to incorrect networks and
# allow your system packets to be captured.
#
# Audit
# =====
#
# Run the following commands and verify output matches:
#
# # sysctl net.ipv4.conf.all.accept_redirects
# net.ipv4.conf.all.accept_redirects = 0
# # sysctl net.ipv4.conf.default.accept_redirects
# net.ipv4.conf.default.accept_redirects = 0
#
# Remediation
# ===========
#
# Set the following parameters in the /etc/sysctl.conf file:
#
# net.ipv4.conf.all.accept_redirects = 0
# net.ipv4.conf.default.accept_redirects = 0
#
# Run the following commands to set the active kernel parameters:
#
# # sysctl -w net.ipv4.conf.all.accept_redirects=0
# # sysctl -w net.ipv4.conf.default.accept_redirects=0
# # sysctl -w net.ipv4.route.flush=1

parameters:
linux:
system:
kernel:
sysctl:
net.ipv4.conf.all.accept_redirects: 0
net.ipv4.conf.default.accept_redirects: 0

+ 45
- 0
metadata/service/system/cis/cis-3-2-3.yml View File

@@ -0,0 +1,45 @@
# 3.2.3 Ensure secure ICMP redirects are not accepted
#
# Description
# ===========
# Secure ICMP redirects are the same as ICMP redirects, except they come from
# gateways listed on the default gateway list. It is assumed that these
# gateways are known to your system, and that they are likely to be secure.
#
# Rationale
# =========
# It is still possible for even known gateways to be compromised. Setting
# net.ipv4.conf.all.secure_redirects to 0 protects the system from routing
# table updates by possibly compromised known gateways.
#
# Audit
# =====
#
# Run the following commands and verify output matches:
#
# # sysctl net.ipv4.conf.all.secure_redirects
# net.ipv4.conf.all.secure_redirects = 0
# # sysctl net.ipv4.conf.default.secure_redirects
# net.ipv4.conf.default.secure_redirects = 0
#
# Remediation
# ===========
#
# Set the following parameters in the /etc/sysctl.conf file:
#
# net.ipv4.conf.all.secure_redirects = 0
# net.ipv4.conf.default.secure_redirects = 0
#
# Run the following commands to set the active kernel parameters:
#
# # sysctl -w net.ipv4.conf.all.secure_redirects=0
# # sysctl -w net.ipv4.conf.default.secure_redirects=0
# # sysctl -w net.ipv4.route.flush=1

parameters:
linux:
system:
kernel:
sysctl:
net.ipv4.conf.all.secure_redirects: 0
net.ipv4.conf.default.secure_redirects: 0

+ 44
- 0
metadata/service/system/cis/cis-3-2-4.yml View File

@@ -0,0 +1,44 @@
# 3.2.4 Ensure suspicious packets are logged
#
# Description
# ===========
# When enabled, this feature logs packets with un-routable source
# addresses to the kernel log.
#
# Rationale
# =========
# Enabling this feature and logging these packets allows an administrator
# to investigate the possibility that an attacker is sending spoofed
# packets to their system.
#
# Audit
# =====
#
# Run the following commands and verify output matches:
#
# # sysctl net.ipv4.conf.all.log_martians
# net.ipv4.conf.all.log_martians = 1
# # sysctl net.ipv4.conf.default.log_martians
# net.ipv4.conf.default.log_martians = 1
#
# Remediation
# ===========
#
# Set the following parameters in the /etc/sysctl.conf file:
#
# net.ipv4.conf.all.log_martians = 1
# net.ipv4.conf.default.log_martians = 1
#
# Run the following commands to set the active kernel parameters:
#
# # sysctl -w net.ipv4.conf.all.log_martians=1
# # sysctl -w net.ipv4.conf.default.log_martians=1
# # sysctl -w net.ipv4.route.flush=1

parameters:
linux:
system:
kernel:
sysctl:
net.ipv4.conf.all.log_martians: 1
net.ipv4.conf.default.log_martians: 1

+ 45
- 0
metadata/service/system/cis/cis-3-2-5.yml View File

@@ -0,0 +1,45 @@
# 3.2.5 Ensure broadcast ICMP requests are ignored
#
# Description
# ===========
# Setting net.ipv4.icmp_echo_ignore_broadcasts to 1 will cause the
# system to ignore all ICMP echo and timestamp requests to broadcast
# and multicast addresses.
#
# Rationale
# =========
# Accepting ICMP echo and timestamp requests with broadcast or multicast
# destinations for your network could be used to trick your host into starting
# (or participating) in a Smurf attack. A Smurf attack relies on an attacker
# sending large amounts of ICMP broadcast messages with a spoofed source
# address. All hosts receiving this message and responding would send
# echo-reply messages back to the spoofed address, which is probably not
# routable. If many hosts respond to the packets, the amount of traffic on
# the network could be significantly multiplied.
#
# Audit
# =====
#
# Run the following commands and verify output matches:
#
# # sysctl net.ipv4.icmp_echo_ignore_broadcasts
# net.ipv4.icmp_echo_ignore_broadcasts = 1
#
# Remediation
# ===========
#
# Set the following parameter in the /etc/sysctl.conf file:
#
# net.ipv4.icmp_echo_ignore_broadcasts = 1
#
# Run the following commands to set the active kernel parameters:
#
# # sysctl -w net.ipv4.icmp_echo_ignore_broadcasts=1
# # sysctl -w net.ipv4.route.flush=1

parameters:
linux:
system:
kernel:
sysctl:
net.ipv4.icmp_echo_ignore_broadcasts: 1

+ 39
- 0
metadata/service/system/cis/cis-3-2-6.yml View File

@@ -0,0 +1,39 @@
# 3.2.6 Ensure bogus ICMP responses are ignored
#
# Description
# ===========
# Setting icmp_ignore_bogus_error_responses to 1 prevents the kernel from
# logging bogus responses (RFC-1122 non-compliant) from broadcast reframes,
# keeping file systems from filling up with useless log messages.
#
# Rationale
# =========
# Some routers (and some attackers) will send responses that violate RFC-1122
# and attempt to fill up a log file system with many useless error messages.
#
# Audit
# =====
#
# Run the following commands and verify output matches:
#
# # sysctl net.ipv4.icmp_ignore_bogus_error_responses
# net.ipv4.icmp_ignore_bogus_error_responses = 1
#
# Remediation
# ===========
#
# Set the following parameter in the /etc/sysctl.conf file:
#
# net.ipv4.icmp_ignore_bogus_error_responses = 1
#
# Run the following commands to set the active kernel parameters:
#
# # sysctl -w net.ipv4.icmp_ignore_bogus_error_responses=1
# # sysctl -w net.ipv4.route.flush=1

parameters:
linux:
system:
kernel:
sysctl:
net.ipv4.icmp_ignore_bogus_error_responses: 1

+ 51
- 0
metadata/service/system/cis/cis-3-2-7.yml View File

@@ -0,0 +1,51 @@
# 3.2.7 Ensure Reverse Path Filtering is enabled
#
# Description
# ===========
# Setting net.ipv4.conf.all.rp_filter and net.ipv4.conf.default.rp_filter to 1
# forces the Linux kernel to utilize reverse path filtering on a received
# packet to determine if the packet was valid. Essentially, with reverse path
# filtering, if the return packet does not go out the same interface that the
# corresponding source packet came from, the packet is dropped (and logged if
# log_martians is set).
#
# Rationale
# =========
# Setting these flags is a good way to deter attackers from sending your system
# bogus packets that cannot be responded to. One instance where this feature
# breaks down is if asymmetrical routing is employed. This would occur when
# using dynamic routing protocols (bgp, ospf, etc) on your system. If you are
# using asymmetrical routing on your system, you will not be able to enable
# this feature without breaking the routing.
#
# Audit
# =====
#
# Run the following commands and verify output matches:
#
# # sysctl net.ipv4.conf.all.rp_filter
# net.ipv4.conf.all.rp_filter = 1
# # sysctl net.ipv4.conf.default.rp_filter
# net.ipv4.conf.default.rp_filter = 1
#
# Remediation
# ===========
#
# Set the following parameters in the /etc/sysctl.conf file:
#
# net.ipv4.conf.all.rp_filter = 1
# net.ipv4.conf.default.rp_filter = 1
#
# Run the following commands to set the active kernel parameters:
#
# # sysctl -w net.ipv4.conf.all.rp_filter=1
# # sysctl -w net.ipv4.conf.default.rp_filter=1
# # sysctl -w net.ipv4.route.flush=1

parameters:
linux:
system:
kernel:
sysctl:
net.ipv4.conf.all.rp_filter: 1
net.ipv4.conf.default.rp_filter: 1

+ 49
- 0
metadata/service/system/cis/cis-3-2-8.yml View File

@@ -0,0 +1,49 @@
# 3.2.8 Ensure TCP SYN Cookies is enabled
#
# Description
# ===========
# When tcp_syncookies is set, the kernel will handle TCP SYN packets normally
# until the half-open connection queue is full, at which time, the SYN cookie
# functionality kicks in. SYN cookies work by not using the SYN queue at all.
# Instead, the kernel simply replies to the SYN with a SYN|ACK, but will
# include a specially crafted TCP sequence number that encodes the source and
# destination IP address and port number and the time the packet was sent.
# A legitimate connection would send the ACK packet of the three way handshake
# with the specially crafted sequence number. This allows the system to verify
# that it has received a valid response to a SYN cookie and allow the
# connection, even though there is no corresponding SYN in the queue.
#
# Rationale
# =========
# Attackers use SYN flood attacks to perform a denial of service attacked on a
# system by sending many SYN packets without completing the three way handshake.
# This will quickly use up slots in the kernel's half-open connection queue and
# prevent legitimate connections from succeeding. SYN cookies allow the system
# to keep accepting valid connections, even if under a denial of service attack.
#
# Audit
# =====
#
# Run the following commands and verify output matches:
#
# # sysctl net.ipv4.tcp_syncookies
# net.ipv4.tcp_syncookies = 1
#
# Remediation
# ===========
#
# Set the following parameter in the /etc/sysctl.conf file:
#
# net.ipv4.tcp_syncookies = 1
#
# Run the following commands to set the active kernel parameters:
#
# # sysctl -w net.ipv4.tcp_syncookies=1
# # sysctl -w net.ipv4.route.flush=1

parameters:
linux:
system:
kernel:
sysctl:
net.ipv4.tcp_syncookies: 1

+ 35
- 0
metadata/service/system/cis/cis-3-3-3.yml View File

@@ -0,0 +1,35 @@
# CIS 3.3.3 Ensure IPv6 is disabled
#
# Description
# ===========
# Although IPv6 has many advantages over IPv4, few organizations have
# implemented IPv6.
#
# Rationale
# =========
# If IPv6 is not to be used, it is recommended that it be disabled to
# reduce the attack surface of the system.
#
# Audit
# ======
# Run the following command and verify that each linux line has
# the 'ipv6.disable=1' parameter set:
#
# # grep "^\s*linux" /boot/grub/grub.cfg
#
# Remediation
# ===========
# Edit /etc/default/grub and add 'ipv6.disable=1' to GRUB_CMDLINE_LINUX:
#
# GRUB_CMDLINE_LINUX="ipv6.disable=1"
#
# Run the following command to update the grub2 configuration:
#
# # update-grub
#
parameters:
linux:
system:
kernel:
boot_options:
- ipv6.disable=1

+ 38
- 0
metadata/service/system/cis/cis-3-5-1.yml View File

@@ -0,0 +1,38 @@
# 3.5.2 Ensure DCCP is disabled
#
# Description
# ===========
# The Datagram Congestion Control Protocol (DCCP) is a transport layer protocol
# that supports streaming media and telephony. DCCP provides a way to gain
# access to congestion control, without having to do it at the application
# layer, but does not provide in-sequence delivery.
#
# Rationale
# =========
# If the protocol is not required, it is recommended that the drivers not be
# installed to reduce the potential attack surface.
#
# Audit
# =====
# Run the following commands and verify the output is as indicated:
#
# # modprobe -n -v dccp
# install /bin/true
# # lsmod | grep dccp
# <No output>
#
# Remediation
# ===========
# Edit or create the file /etc/modprobe.d/CIS.conf and add the following line:
#
# install dccp /bin/true
#
parameters:
linux:
system:
kernel:
module:
dccp:
install:
command: /bin/true


+ 41
- 0
metadata/service/system/cis/cis-3-5-2.yml View File

@@ -0,0 +1,41 @@
# 3.5.2 Ensure SCTP is disabled
#
# Description
# ===========
# The Stream Control Transmission Protocol (SCTP) is a transport layer
# protocol used to support message oriented communication, with several
# streams of messages in one connection. It serves a similar function as
# TCP and UDP, incorporating features of both. It is message-oriented
# like UDP, and ensures reliable in-sequence transport of messages with
# congestion control like TCP.
#
# Rationale
# =========
# If the protocol is not being used, it is recommended that kernel module
# not be loaded, disabling the service to reduce the potential attack surface.
#
# Audit
# =====
# Run the following commands and verify the output is as indicated:
#
# # modprobe -n -v sctp
# install /bin/true
# # lsmod | grep sctp
# <No output>
#
# Remediation
# ===========
#
# Edit or create the file /etc/modprobe.d/CIS.conf and add the following line:
#
# install sctp /bin/true
#
parameters:
linux:
system:
kernel:
module:
sctp:
install:
command: /bin/true


+ 37
- 0
metadata/service/system/cis/cis-3-5-3.yml View File

@@ -0,0 +1,37 @@
# 3.5.3 Ensure RDS is disabled
#
# Description
# ===========
# The Reliable Datagram Sockets (RDS) protocol is a transport layer protocol
# designed to provide low-latency, high-bandwidth communications between
# cluster nodes. It was developed by the Oracle Corporation.
#
# Rationale
# =========
# If the protocol is not being used, it is recommended that kernel module
# not be loaded, disabling the service to reduce the potential attack surface.
#
# Audit
# =====
# Run the following commands and verify the output is as indicated:
#
# # modprobe -n -v rds
# install /bin/true
# # lsmod | grep rds
# <No output>
#
# Remediation
# ===========
# Edit or create the file /etc/modprobe.d/CIS.conf and add the following line:
#
# install rds /bin/true
#
parameters:
linux:
system:
kernel:
module:
rds:
install:
command: /bin/true


+ 37
- 0
metadata/service/system/cis/cis-3-5-4.yml View File

@@ -0,0 +1,37 @@
# 3.5.4 Ensure TIPC is disabled
#
# Description
# ===========
# The Transparent Inter-Process Communication (TIPC) protocol is designed
# to provide communication between cluster nodes.
#
# Rationale
# =========
# If the protocol is not being used, it is recommended that kernel module
# not be loaded, disabling the service to reduce the potential attack surface.
#
# Audit
# =====
# Run the following commands and verify the output is as indicated:
#
# # modprobe -n -v tipc
# install /bin/true
# # lsmod | grep tipc
# <No output>
#
# Remediation
# ===========
#
# Edit or create the file /etc/modprobe.d/CIS.conf and add the following line:
#
# install tipc /bin/true
#
parameters:
linux:
system:
kernel:
module:
tipc:
install:
command: /bin/true


+ 52
- 0
metadata/service/system/cis/cis-5-4-1-1.yml View File

@@ -0,0 +1,52 @@
# CIS 5.4.1.1 Ensure password expiration is 90 days or less (Scored)
#
# Description
# ===========
# The PASS_MAX_DAYS parameter in /etc/login.defs allows an administrator to
# force passwords to expire once they reach a defined age. It is recommended
# that the PASS_MAX_DAYS parameter be set to less than or equal to 90 days.
#
# Rationale
# =========
# The window of opportunity for an attacker to leverage compromised credentials
# or successfully compromise credentials via an online brute force attack is
# limited by the age of the password. Therefore, reducing the maximum age of a
# password also reduces an attacker's window of opportunity.
#
# Audit
# =====
# Run the following command and verify PASS_MAX_DAYS is 90 or less:
#
# # grep PASS_MAX_DAYS /etc/login.defs
# PASS_MAX_DAYS 90
#
# Verify all users with a password have their maximum days between password
# change set to 90 or less:
#
# # egrep ^[^:]+:[^\!*] /etc/shadow | cut -d: -f1
# <list of users>
# # chage --list <user>
# Maximum number of days between password change: 90
#
# Remediation
# ===========
# Set the PASS_MAX_DAYS parameter to 90 in /etc/login.defs :
#
# PASS_MAX_DAYS 90
#
# Modify user parameters for all users with a password set to match:
#
# # chage --maxdays 90 <user>
#
# Notes
# =====
# You can also check this setting in /etc/shadow directly. The 5th field
# should be 90 or less for all users with a password.
#
parameters:
linux:
system:
login_defs:
PASS_MAX_DAYS:
value: 90


+ 52
- 0
metadata/service/system/cis/cis-5-4-1-2.yml View File

@@ -0,0 +1,52 @@
# CIS 5.4.1.2 Ensure minimum days between password changes is 7 or more (Scored)
#
# Description
# ===========
# The PASS_MIN_DAYS parameter in /etc/login.defs allows an administrator to
# prevent users from changing their password until a minimum number of days
# have passed since the last time the user changed their password. It is
# recommended that PASS_MIN_DAYS parameter be set to 7 or more days.
#
# Rationale
# =========
# By restricting the frequency of password changes, an administrator can
# prevent users from repeatedly changing their password in an attempt to
# circumvent password reuse controls.
#
# Audit
# =====
# Run the following command and verify PASS_MIN_DAYS is 7 or more:
#
# # grep PASS_MIN_DAYS /etc/login.defs
# PASS_MIN_DAYS 7
#
# Verify all users with a password have their minimum days between password
# change set to 7 or more:
#
# # egrep ^[^:]+:[^\!*] /etc/shadow | cut -d: -f1
# <list of users>
# # chage --list <user>
# Minimum number of days between password change: 7
#
# Remediation
# ===========
# Set the PASS_MIN_DAYS parameter to 7 in /etc/login.defs :
#
# PASS_MIN_DAYS 7
#
# Modify user parameters for all users with a password set to match:
#
# # chage --mindays 7 <user>
#
# Notes
# =====
# You can also check this setting in /etc/shadow directly. The 5th field
# should be 7 or more for all users with a password.
#
parameters:
linux:
system:
login_defs:
PASS_MIN_DAYS:
value: 7


+ 52
- 0
metadata/service/system/cis/cis-5-4-1-3.yml View File

@@ -0,0 +1,52 @@
# CIS 5.4.1.3 Ensure password expiration warning days is 7 or more (Scored)
#
# Description
# ===========
# The PASS_WARN_AGE parameter in /etc/login.defs allows an administrator to
# notify users that their password will expire in a defined number of days.
# It is recommended that the PASS_WARN_AGE parameter be set to 7 or more days.
#
# Rationale
# =========
# Providing an advance warning that a password will be expiring gives users
# time to think of a secure password. Users caught unaware may choose a simple
# password or write it down where it may be discovered.
#
# Audit
# =====
# Run the following command and verify PASS_WARN_AGE is 7 or more:
#
# # grep PASS_WARN_AGE /etc/login.defs
# PASS_WARN_AGE 7
#
# Verify all users with a password have their number of days of warning before
# password expires set to 7 or more:
#
# # egrep ^[^:]+:[^\!*] /etc/shadow | cut -d: -f1
# <list of users>
# # chage --list <user>
# Number of days of warning before password expires: 7
#
# Remediation
# ===========
#
# Set the PASS_WARN_AGE parameter to 7 in /etc/login.defs :
#
# PASS_WARN_AGE 7
#
# Modify user parameters for all users with a password set to match:
#
# # chage --warndays 7 <user>
#
# Notes
# =====
# You can also check this setting in /etc/shadow directly. The 6th field
# should be 7 or more for all users with a password.
#
parameters:
linux:
system:
login_defs:
PASS_WARN_AGE:
value: 7


Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save