@@ -14,6 +14,13 @@ provisioner: | |||
formula: galera | |||
grains: | |||
noservices: True | |||
dependencies: | |||
- name: mysql | |||
repo: git | |||
source: https://github.com/salt-formulas/salt-formula-mysql.git | |||
- name: linux | |||
repo: git | |||
source: https://github.com/salt-formulas/salt-formula-linux.git | |||
state_top: | |||
base: | |||
"*": | |||
@@ -27,14 +34,6 @@ provisioner: | |||
- galeracluster_debian_repo | |||
pillars-from-files: | |||
galeracluster_debian_repo.sls: tests/pillar/repo_galeracluster.sls | |||
dependencies: | |||
- name: mysql | |||
repo: git | |||
source: https://github.com/salt-formulas/salt-formula-mysql.git | |||
dependencies: | |||
- name: linux | |||
repo: git | |||
source: https://github.com/salt-formulas/salt-formula-linux.git | |||
verifier: | |||
name: inspec |
@@ -25,7 +25,7 @@ before_script: | |||
- make test | tail | |||
script: | |||
- test ! -e .kitchen.yml || bundle exec kitchen test -t tests/integration | |||
- bundle exec kitchen test -t tests/integration | |||
notifications: | |||
webhooks: |
@@ -56,13 +56,50 @@ Galera cluster slave node | |||
user: root | |||
password: pass | |||
Additional mysql users: | |||
.. code-block:: yaml | |||
mysql: | |||
server: | |||
users: | |||
- name: clustercheck | |||
password: clustercheck | |||
database: '*.*' | |||
grants: PROCESS | |||
- name: inspector | |||
host: 127.0.0.1 | |||
password: password | |||
databases: | |||
mydb: | |||
- database: mydb | |||
- table: mytable | |||
- grant_option: True | |||
- grants: | |||
- all privileges | |||
Additional check params: | |||
.. code-block:: yaml | |||
galera: | |||
clustercheck: | |||
- enabled: True | |||
- user: clustercheck | |||
- password: clustercheck | |||
- available_when_donor: 0 | |||
- available_when_readonly: 1 | |||
- port 9200 | |||
Usage | |||
===== | |||
MySQL Galera check sripts | |||
.. code-block:: bash | |||
mysql> SHOW STATUS LIKE 'wsrep%'; | |||
mysql> SHOW STATUS LIKE 'wsrep_cluster_size' ;" |
@@ -0,0 +1,51 @@ | |||
{%- from "galera/map.jinja" import clustercheck %} | |||
{%- if clustercheck.get('enabled', False) %} | |||
clustercheck_dir: | |||
file.directory: | |||
- name: /usr/local/bin/ | |||
- user: root | |||
- group: root | |||
- mode: 750 | |||
- makedirs: True | |||
/usr/local/bin/mysql_clustercheck: | |||
file.managed: | |||
- source: salt://galera/files/clustercheck.sh | |||
- user: root | |||
- group: root | |||
- mode: 755 | |||
- require: | |||
- file: clustercheck_dir | |||
/etc/xinetd.d/mysql_clustercheck.conf: | |||
file.managed: | |||
- source: salt://galera/files/xinet.d.conf | |||
- template: jinja | |||
- makedirs: True | |||
- defaults: | |||
user: nobody | |||
server: '/usr/local/bin/clustercheck {{ clustercheck.get('user', 'clustercheck') }} {{ clustercheck.get('password', 'clustercheck') }} {{ clustercheck.get('available_when_donor', 0) }} {{ clustercheck.get('available_when_readonly', 0) }}' | |||
port: {{ clustercheck.get('port', 9200) }} | |||
flags: REUSE | |||
per_source: UNLIMITED | |||
- require: | |||
- file: /usr/local/bin/mysql_clustercheck | |||
{%- if not grains.get('noservices', False) %} | |||
- watch_in: | |||
- galera_xinetd_service | |||
{%- endif %} | |||
galera_xinetd_package: | |||
pkg.installed: | |||
- name: xinetd | |||
{%- if not grains.get('noservices', False) %} | |||
galera_xinetd_service: | |||
service.running: | |||
- name: xinetd | |||
- require: | |||
- pkg: xinetd | |||
{%- endif %} | |||
{%- endif %} | |||
@@ -0,0 +1,104 @@ | |||
#!/bin/bash | |||
# | |||
# Script to make a proxy (ie HAProxy) capable of monitoring Percona XtraDB Cluster nodes properly | |||
# | |||
# Author: Olaf van Zandwijk <olaf.vanzandwijk@nedap.com> | |||
# Author: Raghavendra Prabhu <raghavendra.prabhu@percona.com> | |||
# | |||
# Documentation and download: https://github.com/olafz/percona-clustercheck | |||
# | |||
# Based on the original script from Unai Rodriguez | |||
# | |||
if [[ $1 == '-h' || $1 == '--help' ]];then | |||
echo "Usage: $0 <user> <pass> <available_when_donor=0|1> <log_file> <available_when_readonly=0|1> <defaults_extra_file>" | |||
exit | |||
fi | |||
# if the disabled file is present, return 503. This allows | |||
# admins to manually remove a node from a cluster easily. | |||
if [ -e "/var/tmp/clustercheck.disabled" ]; then | |||
# Shell return-code is 1 | |||
echo -en "HTTP/1.1 503 Service Unavailable\r\n" | |||
echo -en "Content-Type: text/plain\r\n" | |||
echo -en "Connection: close\r\n" | |||
echo -en "Content-Length: 51\r\n" | |||
echo -en "\r\n" | |||
echo -en "Percona XtraDB Cluster Node is manually disabled.\r\n" | |||
sleep 0.1 | |||
exit 1 | |||
fi | |||
MYSQL_USERNAME="${1-clustercheckuser}" | |||
MYSQL_PASSWORD="${2-clustercheckpassword!}" | |||
AVAILABLE_WHEN_DONOR=${3:-0} | |||
ERR_FILE="${4:-/dev/null}" | |||
AVAILABLE_WHEN_READONLY=${5:-1} | |||
DEFAULTS_EXTRA_FILE=${6:-/etc/my.cnf} | |||
#Timeout exists for instances where mysqld may be hung | |||
TIMEOUT=10 | |||
EXTRA_ARGS="" | |||
if [[ -n "$MYSQL_USERNAME" ]]; then | |||
EXTRA_ARGS="$EXTRA_ARGS --user=${MYSQL_USERNAME}" | |||
fi | |||
if [[ -n "$MYSQL_PASSWORD" ]]; then | |||
EXTRA_ARGS="$EXTRA_ARGS --password=${MYSQL_PASSWORD}" | |||
fi | |||
if [[ -r $DEFAULTS_EXTRA_FILE ]];then | |||
MYSQL_CMDLINE="mysql --defaults-extra-file=$DEFAULTS_EXTRA_FILE -nNE --connect-timeout=$TIMEOUT \ | |||
${EXTRA_ARGS}" | |||
else | |||
MYSQL_CMDLINE="mysql -nNE --connect-timeout=$TIMEOUT ${EXTRA_ARGS}" | |||
fi | |||
# | |||
# Perform the query to check the wsrep_local_state | |||
# | |||
WSREP_STATUS=$($MYSQL_CMDLINE -e "SHOW STATUS LIKE 'wsrep_local_state';" \ | |||
2>${ERR_FILE} | tail -1 2>>${ERR_FILE}) | |||
if [[ "${WSREP_STATUS}" == "4" ]] || [[ "${WSREP_STATUS}" == "2" && ${AVAILABLE_WHEN_DONOR} == 1 ]] | |||
then | |||
# Check only when set to 0 to avoid latency in response. | |||
if [[ $AVAILABLE_WHEN_READONLY -eq 0 ]];then | |||
READ_ONLY=$($MYSQL_CMDLINE -e "SHOW GLOBAL VARIABLES LIKE 'read_only';" \ | |||
2>${ERR_FILE} | tail -1 2>>${ERR_FILE}) | |||
if [[ "${READ_ONLY}" == "ON" ]];then | |||
# Percona XtraDB Cluster node local state is 'Synced', but it is in | |||
# read-only mode. The variable AVAILABLE_WHEN_READONLY is set to 0. | |||
# => return HTTP 503 | |||
# Shell return-code is 1 | |||
echo -en "HTTP/1.1 503 Service Unavailable\r\n" | |||
echo -en "Content-Type: text/plain\r\n" | |||
echo -en "Connection: close\r\n" | |||
echo -en "Content-Length: 43\r\n" | |||
echo -en "\r\n" | |||
echo -en "Percona XtraDB Cluster Node is read-only.\r\n" | |||
sleep 0.1 | |||
exit 1 | |||
fi | |||
fi | |||
# Percona XtraDB Cluster node local state is 'Synced' => return HTTP 200 | |||
# Shell return-code is 0 | |||
echo -en "HTTP/1.1 200 OK\r\n" | |||
echo -en "Content-Type: text/plain\r\n" | |||
echo -en "Connection: close\r\n" | |||
echo -en "Content-Length: 40\r\n" | |||
echo -en "\r\n" | |||
echo -en "Percona XtraDB Cluster Node is synced.\r\n" | |||
sleep 0.1 | |||
exit 0 | |||
else | |||
# Percona XtraDB Cluster node local state is not 'Synced' => return HTTP 503 | |||
# Shell return-code is 1 | |||
echo -en "HTTP/1.1 503 Service Unavailable\r\n" | |||
echo -en "Content-Type: text/plain\r\n" | |||
echo -en "Connection: close\r\n" | |||
echo -en "Content-Length: 44\r\n" | |||
echo -en "\r\n" | |||
echo -en "Percona XtraDB Cluster Node is not synced.\r\n" | |||
sleep 0.1 | |||
exit 1 | |||
fi |
@@ -0,0 +1,20 @@ | |||
# default: {{ default_state|default('on') }} | |||
# description: {{ name }} | |||
service {{ name }}: | |||
{ | |||
disable = {{ disable|default('no') }} | |||
{%- if flags is defined %} | |||
flags = {{ flags }} | |||
{%- endif %} | |||
socket_type = {{ socket_type|default('stream') }} | |||
port = {{ port }} | |||
wait = {{ wait|default('no') }} | |||
user = {{ user }} | |||
server = {{ server }} | |||
log_on_failure += {{ log_on_failure|default('USERID') }} | |||
only_from = {{ only_from|default('0.0.0.0/0') }} | |||
{%- if per_source is defined %} | |||
per_source = {{ per_source }} | |||
{%- endif %} | |||
} |
@@ -7,6 +7,9 @@ include: | |||
{%- if pillar.galera.slave is defined %} | |||
- galera.slave | |||
{%- endif %} | |||
{%- if pillar.galera.clustercheck is defined %} | |||
- galera.clustercheck | |||
{%- endif %} | |||
{%- if pillar.galera.monitor is defined %} | |||
- galera.monitor | |||
{%- endif %} |
@@ -55,3 +55,15 @@ | |||
'config': '/etc/mysql/my.cnf', | |||
}, | |||
}, grain='oscodename', merge=pillar.galera.get('slave', {}))) %} | |||
{% set clustercheck = salt['grains.filter_by']({ | |||
'default': { | |||
'clustercheck': { | |||
'enabled': True, | |||
'user': clustercheck, | |||
'password': clustercheck, | |||
'port': '9200' | |||
}, | |||
}, | |||
}, merge=pillar.galera.get('clustercheck', {})) %} | |||
@@ -115,6 +115,7 @@ galera_init_script: | |||
- defaults: | |||
service: {{ master|yaml }} | |||
- template: jinja | |||
- timeout: 1800 | |||
galera_bootstrap_script: | |||
file.managed: | |||
@@ -146,6 +147,7 @@ galera_init_start_service: | |||
- require: | |||
- file: galera_run_dir | |||
- file: galera_init_script | |||
- timeout: 1800 | |||
galera_bootstrap_set_root_password: | |||
cmd.run: |
@@ -1,20 +1,33 @@ | |||
{%- if pillar.get('mysql', {}).server is defined %} | |||
{%- from "mysql/map.jinja" import mysql_connection_args as connection with context %} | |||
{%- set server = pillar.mysql.server %} | |||
{%- for database_name, database in server.get('database', {}).iteritems() %} | |||
{%- if not grains.get('noservices', False) %} | |||
mysql_database_{{ database_name }}: | |||
mysql_database.present: | |||
- name: {{ database_name }} | |||
- character_set: {{ database.get('encoding', 'utf8') }} | |||
- connection_user: {{ connection.user }} | |||
- connection_pass: {{ connection.password }} | |||
- connection_charset: {{ connection.charset }} | |||
{%- endif %} | |||
{%- for user in database.users %} | |||
{%- if not grains.get('noservices', False) %} | |||
mysql_user_{{ user.name }}_{{ database_name }}_{{ user.host }}: | |||
mysql_user.present: | |||
- host: '{{ user.host }}' | |||
- name: '{{ user.name }}' | |||
{%- if user.password is defined %} | |||
- password: {{ user.password }} | |||
{%- else %} | |||
- allow_passwordless: true | |||
{%- endif %} | |||
- connection_user: {{ connection.user }} | |||
- connection_pass: {{ connection.password }} | |||
- connection_charset: {{ connection.charset }} | |||
mysql_grants_{{ user.name }}_{{ database_name }}_{{ user.host }}: | |||
mysql_grants.present: | |||
@@ -22,14 +35,16 @@ mysql_grants_{{ user.name }}_{{ database_name }}_{{ user.host }}: | |||
- database: '{{ database_name }}.*' | |||
- user: '{{ user.name }}' | |||
- host: '{{ user.host }}' | |||
- connection_user: {{ connection.user }} | |||
- connection_pass: {{ connection.password }} | |||
- connection_charset: {{ connection.charset }} | |||
- require: | |||
- mysql_user: mysql_user_{{ user.name }}_{{ database_name }}_{{ user.host }} | |||
- mysql_database: mysql_database_{{ database_name }} | |||
{%- endif %} | |||
{%- endfor %} | |||
{%- if database.initial_data is defined %} | |||
/root/mysql/scripts/restore_{{ database_name }}.sh: | |||
file.managed: | |||
- source: salt://mysql/conf/restore.sh | |||
@@ -49,24 +64,66 @@ restore_mysql_database_{{ database_name }}: | |||
- cwd: /root | |||
- require: | |||
- file: /root/mysql/scripts/restore_{{ database_name }}.sh | |||
{%- endif %} | |||
{%- endfor %} | |||
{%- if not grains.get('noservices', False) %} | |||
{%- for user in server.get('users', []) %} | |||
mysql_user_{{ user.name }}_{{ user.host }}: | |||
{%- for host in user.get('hosts', user.get('host', 'localhost'))|sequence %} | |||
{%- if not grains.get('noservices', False) %} | |||
mysql_user_{{ user.name }}_{{ host }}: | |||
mysql_user.present: | |||
- host: '{{ user.host }}' | |||
- host: '{{ host }}' | |||
- name: '{{ user.name }}' | |||
{%- if user.password is defined %} | |||
- password: {{ user.password }} | |||
{%- if user['password_hash'] is defined %} | |||
- password_hash: '{{ user.password_hash }}' | |||
{%- elif user['password'] is defined and user['password'] != None %} | |||
- password: '{{ user.password }}' | |||
{%- else %} | |||
- allow_passwordless: True | |||
{%- endif %} | |||
- connection_user: {{ connection.user }} | |||
- connection_pass: {{ connection.password }} | |||
- connection_charset: {{ connection.charset }} | |||
{%- if 'grants' in user %} | |||
mysql_user_{{ user.name }}_{{ host }}_grants: | |||
mysql_grants.present: | |||
- name: {{ user.name }} | |||
- grant: {{ user['grants']|sequence|join(",") }} | |||
- database: user.get('database','*.*') | |||
- grant_option: {{ user['grant_option'] | default(False) }} | |||
- user: {{ user.name }} | |||
- host: '{{ host }}' | |||
- connection_user: {{ connection.user }} | |||
- connection_pass: {{ connection.password }} | |||
- connection_charset: {{ connection.charset }} | |||
- require: | |||
- mysql_user_{{ user.name }}_{{ host }} | |||
{%- endif %} | |||
{%- if 'databases' in user %} | |||
{%- for db in user['databases'] %} | |||
mysql_user_{{ user.name }}_{{ host }}_grants_db_{{ db.database }}_{{ loop.index0 }}: | |||
mysql_grants.present: | |||
- name: {{ user.name ~ '_' ~ db['database'] ~ '_' ~ db['table'] | default('all') }} | |||
- grant: {{db['grants']|sequence|join(",")}} | |||
- database: '{{ db['database'] }}.{{ db['table'] | default('*') }}' | |||
- grant_option: {{ db['grant_option'] | default(False) }} | |||
- user: {{ user.name }} | |||
- host: '{{ host }}' | |||
- connection_user: {{ connection.user }} | |||
- connection_pass: {{ connection.password }} | |||
- connection_charset: {{ connection.charset }} | |||
- require: | |||
- mysql_user_{{ user.name }}_{{ host }} | |||
- mysql_database_{{ db.database }} | |||
{%- endfor %} | |||
{%- endif %} | |||
{%- endif %} | |||
{%- endfor %} | |||
{%- endfor %} | |||
{%- endif %} | |||
{%- endif %} |
@@ -146,6 +146,7 @@ galera_init_start_service: | |||
- require: | |||
- file: galera_run_dir | |||
- file: galera_init_script | |||
- timeout: 1800 | |||
galera_bootstrap_set_root_password: | |||
cmd.run: | |||
@@ -186,6 +187,7 @@ galera_bootstrap_start_service_final: | |||
- require: | |||
- file: galera_bootstrap_init_config | |||
- file: galera_bootstrap_script | |||
- timeout: 1800 | |||
galera_bootstrap_finish_flag: | |||
file.touch: | |||
@@ -210,12 +212,12 @@ galera_config: | |||
{%- endif %} | |||
{%- if not grains.get('noservices', False) %} | |||
galera_service: | |||
service.running: | |||
- name: {{ slave.service }} | |||
- enable: true | |||
- reload: true | |||
{%- endif %} | |||
{%- endif %} |
@@ -0,0 +1,8 @@ | |||
describe file('/etc/xinetd.d/mysql_clustercheck.conf') do | |||
it('should exist') | |||
its('content') { should match /clustercheck clustercheck password 1 1/ } | |||
end | |||
describe file('/usr/local/bin/mysql_clustercheck') do | |||
it('should exist') | |||
end |
@@ -0,0 +1,8 @@ | |||
describe file('/etc/xinetd.d/mysql_clustercheck.conf') do | |||
it('should exist') | |||
its('content') { should match /clustercheck clustercheck password 1 1/ } | |||
end | |||
describe file('/usr/local/bin/mysql_clustercheck') do | |||
it('should exist') | |||
end |
@@ -7,7 +7,7 @@ | |||
port: 3306 | |||
maintenance_password: password | |||
admin: | |||
user: user | |||
user: root | |||
password: password | |||
members: | |||
- host: 127.0.0.1 | |||
@@ -16,6 +16,13 @@ | |||
port: 4567 | |||
- host: 127.0.0.1 | |||
port: 4567 | |||
clustercheck: | |||
enabled: True | |||
user: clustercheck | |||
password: password | |||
available_when_donor: 1 | |||
available_when_readonly: 1 | |||
port: 9200 | |||
mysql: | |||
server: | |||
users: | |||
@@ -25,3 +32,17 @@ | |||
host: '%' | |||
- name: haproxy | |||
host: 127.0.0.1 | |||
- name: clustercheck | |||
#host: localhost | |||
password: password | |||
database: '*.*' | |||
grants: PROCESS | |||
- name: inspector | |||
host: 127.0.0.1 | |||
password: password | |||
databases: | |||
- database: mydb | |||
table: mytable | |||
grant_option: True | |||
grants: | |||
- all privileges |
@@ -7,7 +7,7 @@ | |||
port: 3306 | |||
maintenance_password: password | |||
admin: | |||
user: user | |||
user: root | |||
password: password | |||
members: | |||
- host: 127.0.0.1 | |||
@@ -16,6 +16,13 @@ | |||
port: 4567 | |||
- host: 127.0.0.1 | |||
port: 4567 | |||
clustercheck: | |||
enabled: True | |||
user: clustercheck | |||
password: password | |||
available_when_donor: 1 | |||
available_when_readonly: 1 | |||
port: 9200 | |||
mysql: | |||
server: | |||
users: | |||
@@ -24,4 +31,9 @@ | |||
- name: haproxy | |||
host: '%' | |||
- name: haproxy | |||
host: 127.0.0.1 | |||
host: 127.0.0.1 | |||
- name: clustercheck | |||
#host: localhost | |||
password: password | |||
database: '*.*' | |||
grants: PROCESS |
@@ -126,6 +126,7 @@ prepare() { | |||
run() { | |||
for pillar in ${PILLARDIR}/*.sls; do | |||
state_name=$(basename ${pillar%.sls}) | |||
salt_run grains.set 'noservices' False force=True | |||
salt_run --id=${state_name} state.show_sls ${FORMULA_NAME} || (log_err "Execution of ${FORMULA_NAME}.${state_name} failed"; exit 1) | |||
done | |||
} |