Browse Source

add clustercheck script (avoid splitbrain)

pr/27
Petr Michalec 7 years ago
parent
commit
89c8c3fc5b
No account linked to committer's email address
16 changed files with 345 additions and 45 deletions
  1. +7
    -8
      .kitchen.yml
  2. +1
    -1
      .travis.yml
  3. +16
    -3
      README.rst
  4. +45
    -0
      galera/clustercheck.sls
  5. +138
    -0
      galera/files/clustercheck.sh
  6. +24
    -0
      galera/files/xinet.d.conf
  7. +3
    -0
      galera/init.sls
  8. +16
    -0
      galera/map.jinja
  9. +2
    -0
      galera/master.sls
  10. +40
    -20
      galera/server.sls
  11. +4
    -2
      galera/slave.sls
  12. +10
    -0
      tests/integration/master_cluster/checks_clustercheck_spec.rb
  13. +10
    -0
      tests/integration/slave_cluster/checks_clustercheck_spec.rb
  14. +14
    -9
      tests/pillar/master_cluster.sls
  15. +14
    -2
      tests/pillar/slave_cluster.sls
  16. +1
    -0
      tests/run_tests.sh

+ 7
- 8
.kitchen.yml View File

formula: galera formula: galera
grains: grains:
noservices: True noservices: True
dependencies:
- name: mysql
repo: git
source: https://github.com/salt-formulas/salt-formula-mysql.git
- name: linux
repo: git
source: https://github.com/salt-formulas/salt-formula-linux.git
state_top: state_top:
base: base:
"*": "*":
- galeracluster_debian_repo - galeracluster_debian_repo
pillars-from-files: pillars-from-files:
galeracluster_debian_repo.sls: tests/pillar/repo_galeracluster.sls galeracluster_debian_repo.sls: tests/pillar/repo_galeracluster.sls
dependencies:
- name: mysql
repo: git
source: https://github.com/salt-formulas/salt-formula-mysql.git
dependencies:
- name: linux
repo: git
source: https://github.com/salt-formulas/salt-formula-linux.git


verifier: verifier:
name: inspec name: inspec

+ 1
- 1
.travis.yml View File

- make test | tail - make test | tail


script: script:
- test ! -e .kitchen.yml || bundle exec kitchen test -t tests/integration
- bundle exec kitchen test -t tests/integration


notifications: notifications:
webhooks: webhooks:

+ 16
- 3
README.rst View File

password: clustercheck password: clustercheck
database: '*.*' database: '*.*'
grants: PROCESS grants: PROCESS
grant_option: False
- name: inspector - name: inspector
host: 127.0.0.1 host: 127.0.0.1
password: password password: password
mydb: mydb:
- database: mydb - database: mydb
- table: mytable - table: mytable
- grant_option: False
- grant_option: True
- grants: - grants:
- all privileges - all privileges


Additional check params:

.. code-block:: yaml

galera:
clustercheck:
- enabled: True
- user: clustercheck
- password: clustercheck
- available_when_donor: 0
- available_when_readonly: 1
- port 9200


Usage Usage
===== =====


MySQL Galera check sripts MySQL Galera check sripts


.. code-block:: bash .. code-block:: bash
mysql> SHOW STATUS LIKE 'wsrep%'; mysql> SHOW STATUS LIKE 'wsrep%';


mysql> SHOW STATUS LIKE 'wsrep_cluster_size' ;" mysql> SHOW STATUS LIKE 'wsrep_cluster_size' ;"

+ 45
- 0
galera/clustercheck.sls View File

{%- from "galera/map.jinja" import clustercheck with context %}

{%- if clustercheck.get('enabled', False) %}
/usr/local/bin/mysql_clustercheck:
file.managed:
- source: salt://galera/files/clustercheck.sh
- user: root
- group: root
- mode: 755
- dir_mode: 755
- makedirs: True

/etc/xinetd.d/mysql_clustercheck:
file.managed:
- source: salt://galera/files/xinet.d.conf
- template: jinja
- makedirs: True
- defaults:
name: mysqlchk
user: nobody
server: '/usr/local/bin/mysql_clustercheck'
server_args: '{{ clustercheck.get('user', 'clustercheck') }} {{ clustercheck.get('password', 'clustercheck') }} available_when_donor={{ clustercheck.get('available_when_donor', 0) }} /dev/null available_when_readonly={{ clustercheck.get('available_when_readonly', 0) }} {{ clustercheck.config }}'
port: {{ clustercheck.get('port', 9200) }}
flags: REUSE
per_source: UNLIMITED
- require:
- file: /usr/local/bin/mysql_clustercheck
{%- if not grains.get('noservices', False) %}
- watch_in:
- galera_xinetd_service
{%- endif %}

galera_xinetd_package:
pkg.installed:
- name: xinetd

{%- if not grains.get('noservices', False) %}
galera_xinetd_service:
service.running:
- name: xinetd
- require:
- pkg: xinetd
{%- endif %}
{%- endif %}


+ 138
- 0
galera/files/clustercheck.sh View File

#!/bin/bash
#
# Script to make a proxy (ie HAProxy) capable of monitoring MySQL Cluster nodes properly
#
# Author: Olaf van Zandwijk <olaf.vanzandwijk@nedap.com>
# Author: Raghavendra Prabhu <raghavendra.prabhu@percona.com>
# Author: Petr Michalec <pmichalec@mirantis.com>
#
# Documentation and download: https://github.com/epcim/percona-clustercheck
#
# Based on the original script from Unai Rodriguez
#

function httpReply(){
HTTP_STATUS="${1}"
RESPONSE_CONTENT="${2}"

# https://serverfault.com/questions/504756/curl-failure-when-receiving-data-from-peer-using-percona-xtradb-cluster-check
sleep 0.1
if [[ "${HTTP_STATUS}" == "503" ]]
then
echo -en "HTTP/1.1 503 Service Unavailable\r\n"
elif [[ "${HTTP_STATUS}" == "404" ]]
then
echo -en "HTTP/1.1 404 Not Found\r\n"
elif [[ "${HTTP_STATUS}" == "401" ]]
then
echo -en "HTTP/1.1 401 Unauthorized\r\n"
elif [[ "${HTTP_STATUS}" == "200" ]]
then
echo -en "HTTP/1.1 200 OK\r\n"
else
echo -en "HTTP/1.1 ${HTTP_STATUS}\r\n"
fi

echo -en "Content-Type: text/plain\r\n"
echo -en "Connection: close\r\n"
echo -en "Content-Length: ${#RESPONSE_CONTENT}\r\n"
echo -en "\r\n"
echo -en "${RESPONSE_CONTENT}"
echo -en "\r\n"
sleep 0.1
}

if [[ $1 == '-h' || $1 == '--help' ]];then
echo "Usage: $0 <user> <pass> <available_when_donor=0|1> <log_file> <available_when_readonly=0|1> <defaults_extra_file> <timeout>"
exit
fi

# if the disabled file is present, return 503. This allows
# admins to manually remove a node from a cluster easily.
if [ -e "/var/tmp/clustercheck.disabled" ]; then
# Shell return-code is 1
httpReply "503" "MySQL Cluster Node is manually disabled.\r\n"
exit 1
fi

MYSQL_USERNAME="${1-clustercheckuser}"
MYSQL_PASSWORD="${2-clustercheckpassword!}"
AVAILABLE_WHEN_DONOR=${3:-0}
ERR_FILE="${4:-/dev/null}"
AVAILABLE_WHEN_READONLY=${5:-1}
DEFAULTS_EXTRA_FILE=${6:-/etc/my.cnf}
# Timeout exists for instances where mysqld may be hung
# Default value considers the Galera timeouts
TIMEOUT=${7:-18}

EXTRA_ARGS=""
if [[ -n "$MYSQL_USERNAME" ]]; then
EXTRA_ARGS="$EXTRA_ARGS --user=${MYSQL_USERNAME}"
fi
if [[ -n "$MYSQL_PASSWORD" ]]; then
EXTRA_ARGS="$EXTRA_ARGS --password=${MYSQL_PASSWORD}"
fi
if [[ -r $DEFAULTS_EXTRA_FILE ]];then
MYSQL_CMDLINE="mysql --defaults-extra-file=$DEFAULTS_EXTRA_FILE -nNE --connect-timeout=$TIMEOUT \
${EXTRA_ARGS}"
else
MYSQL_CMDLINE="mysql -nNE --connect-timeout=$TIMEOUT ${EXTRA_ARGS}"
fi
#
# Perform the query to check the wsrep_local_state
#
WSREP_STATUS=$($MYSQL_CMDLINE -e "SHOW STATUS LIKE 'wsrep_local_state';" \
2>${ERR_FILE} | tail -1 2>>${ERR_FILE}; exit ${PIPESTATUS[0]})
mysql_ret=$?

if [[ $mysql_ret -eq 1 || $mysql_ret -eq 127 ]]; then
# hash or command can be used here, but command is POSIX
command -v "$MYSQL_CMD"; mysql_ret=$?
if [[ $mysql_ret -eq 1 ]]; then
# mysql program not found
# => return HTTP 404
# Shell return-code is 3
httpReply "404" "Mysql command not found or service is not running.\r\n"
exit 2
fi

# Failed mysql login
# => return HTTP 401
# Shell return-code is 2
httpReply "401" "Access denied to database.\r\n"
exit 2
fi



if [[ "${WSREP_STATUS}" == "4" ]] || [[ "${WSREP_STATUS}" == "2" && ${AVAILABLE_WHEN_DONOR} == 1 ]]
then
# Check only when set to 0 to avoid latency in response.
if [[ $AVAILABLE_WHEN_READONLY -eq 0 ]];then
READ_ONLY=$($MYSQL_CMDLINE -e "SHOW GLOBAL VARIABLES LIKE 'read_only';" \
2>${ERR_FILE} | tail -1 2>>${ERR_FILE})

if [[ "${READ_ONLY}" == "ON" ]];then
# MySQL Cluster node local state is 'Synced', but it is in
# read-only mode. The variable AVAILABLE_WHEN_READONLY is set to 0.
# => return HTTP 503
# Shell return-code is 1
httpReply "503" "MySQL Cluster Node is read-only.\r\n"
exit 1
fi
fi
# MySQL Cluster node local state is 'Synced' => return HTTP 200
# Shell return-code is 0
httpReply "200" "MySQL Cluster Node is synced.\r\n"
exit 0
else
# MySQL Cluster node local state is not 'Synced' => return HTTP 503
# Shell return-code is 1
if [[ -z "${WSREP_STATUS}" ]]
then
httpReply "503" "Received empty reply from MySQL Cluster Node.\r\nMight be a permission issue, check the credentials used by ${0}\r\n"
else
httpReply "503" "MySQL Cluster Node is not synced.\r\n"
fi
exit 1
fi

+ 24
- 0
galera/files/xinet.d.conf View File

# default: {{ default_state|default('on') }}
# description: {{ name }}

service {{ name }}:
{
disable = {{ disable|default('no') }}
{%- if flags is defined %}
flags = {{ flags }}
{%- endif %}
socket_type = {{ socket_type|default('stream') }}
port = {{ port }}
wait = {{ wait|default('no') }}
user = {{ user }}
server = {{ server }}
{%- if server_args is defined %}
server_args = {{ server_args }}
{%- endif %}
log_on_failure += {{ log_on_failure|default('USERID') }}
only_from = {{ only_from|default('0.0.0.0/0') }}
type = {{ type|default('UNLISTED') }}
{%- if per_source is defined %}
per_source = {{ per_source }}
{%- endif %}
}

+ 3
- 0
galera/init.sls View File

{%- if pillar.galera.slave is defined %} {%- if pillar.galera.slave is defined %}
- galera.slave - galera.slave
{%- endif %} {%- endif %}
{%- if pillar.galera.clustercheck is defined %}
- galera.clustercheck
{%- endif %}
{%- if pillar.galera.monitor is defined %} {%- if pillar.galera.monitor is defined %}
- galera.monitor - galera.monitor
{%- endif %} {%- endif %}

+ 16
- 0
galera/map.jinja View File

'config': '/etc/mysql/my.cnf', 'config': '/etc/mysql/my.cnf',
}, },
}, grain='oscodename', merge=pillar.galera.get('slave', {}))) %} }, grain='oscodename', merge=pillar.galera.get('slave', {}))) %}

{% set clustercheck = salt['grains.filter_by']({
'default': {
'enabled': False,
'user': clustercheck,
'password': clustercheck,
'port': '9200'
},
'Debian': {
'config': '/etc/mysql/my.cnf',
},
'RedHat': {
'config': '/etc/my.cnf',
},
}, merge=pillar.galera.get('clustercheck', {})) %}


+ 2
- 0
galera/master.sls View File

- defaults: - defaults:
service: {{ master|yaml }} service: {{ master|yaml }}
- template: jinja - template: jinja
- timeout: 1800


galera_bootstrap_script: galera_bootstrap_script:
file.managed: file.managed:
- require: - require:
- file: galera_run_dir - file: galera_run_dir
- file: galera_init_script - file: galera_init_script
- timeout: 1800


galera_bootstrap_set_root_password: galera_bootstrap_set_root_password:
cmd.run: cmd.run:

+ 40
- 20
galera/server.sls View File

{%- if pillar.get('mysql', {}).server is defined %} {%- if pillar.get('mysql', {}).server is defined %}
{%- from "mysql/map.jinja" import mysql_connection_args as connection with context %}
{%- set server = pillar.mysql.server %} {%- set server = pillar.mysql.server %}


{%- for database_name, database in server.get('database', {}).iteritems() %} {%- for database_name, database in server.get('database', {}).iteritems() %}


{%- if not grains.get('noservices', False) %}
mysql_database_{{ database_name }}: mysql_database_{{ database_name }}:
mysql_database.present: mysql_database.present:
- name: {{ database_name }} - name: {{ database_name }}
- character_set: {{ database.get('encoding', 'utf8') }}
#- connection_user: {{ connection.user }}
#- connection_pass: {{ connection.password }}
#- connection_charset: {{ connection.charset }}
{%- endif %}


{%- for user in database.users %} {%- for user in database.users %}

{%- if not grains.get('noservices', False) %}
mysql_user_{{ user.name }}_{{ database_name }}_{{ user.host }}: mysql_user_{{ user.name }}_{{ database_name }}_{{ user.host }}:
mysql_user.present: mysql_user.present:
- host: '{{ user.host }}' - host: '{{ user.host }}'
- name: '{{ user.name }}' - name: '{{ user.name }}'
{%- if user.password is defined %}
- password: {{ user.password }} - password: {{ user.password }}
{%- else %}
- allow_passwordless: true
{%- endif %}
#- connection_user: {{ connection.user }}
#- connection_pass: {{ connection.password }}
#- connection_charset: {{ connection.charset }}


mysql_grants_{{ user.name }}_{{ database_name }}_{{ user.host }}: mysql_grants_{{ user.name }}_{{ database_name }}_{{ user.host }}:
mysql_grants.present: mysql_grants.present:
- database: '{{ database_name }}.*' - database: '{{ database_name }}.*'
- user: '{{ user.name }}' - user: '{{ user.name }}'
- host: '{{ user.host }}' - host: '{{ user.host }}'
#- connection_user: {{ connection.user }}
#- connection_pass: {{ connection.password }}
#- connection_charset: {{ connection.charset }}
- require: - require:
- mysql_user: mysql_user_{{ user.name }}_{{ database_name }}_{{ user.host }} - mysql_user: mysql_user_{{ user.name }}_{{ database_name }}_{{ user.host }}
- mysql_database: mysql_database_{{ database_name }} - mysql_database: mysql_database_{{ database_name }}
{%- endif %}
{%- endfor %} {%- endfor %}


{%- if database.initial_data is defined %} {%- if database.initial_data is defined %}

/root/mysql/scripts/restore_{{ database_name }}.sh: /root/mysql/scripts/restore_{{ database_name }}.sh:
file.managed: file.managed:
- source: salt://mysql/conf/restore.sh - source: salt://mysql/conf/restore.sh
- cwd: /root - cwd: /root
- require: - require:
- file: /root/mysql/scripts/restore_{{ database_name }}.sh - file: /root/mysql/scripts/restore_{{ database_name }}.sh

{%- endif %} {%- endif %}


{%- endfor %} {%- endfor %}


{%- if not grains.get('noservices', False) %}
{%- for user in server.get('users', []) %} {%- for user in server.get('users', []) %}
{%- set user_hosts = user.get('hosts', user.get('host', 'localhost'))|sequence %}
{%- for host in user_hosts %}
{%- for host in user.get('hosts', user.get('host', 'localhost'))|sequence %}
{%- if not grains.get('noservices', False) %}
mysql_user_{{ user.name }}_{{ host }}: mysql_user_{{ user.name }}_{{ host }}:
mysql_user.present: mysql_user.present:
- host: '{{ user.host }}'
- host: '{{ host }}'
- name: '{{ user.name }}' - name: '{{ user.name }}'
{%- if user['password_hash'] is defined %} {%- if user['password_hash'] is defined %}
- password_hash: '{{ user.password_hash }}'
- password_hash: '{{ user.password_hash }}'
{%- elif user['password'] is defined and user['password'] != None %} {%- elif user['password'] is defined and user['password'] != None %}
- password: '{{ user.password }}'
- password: '{{ user.password }}'
{%- else %} {%- else %}
- allow_passwordless: True - allow_passwordless: True
{%- endif %} {%- endif %}
- connection_charset: utf8
#- connection_user: {{ connection.user }}
#- connection_pass: {{ connection.password }}
#- connection_charset: {{ connection.charset }}


{%- if 'grants' in user %} {%- if 'grants' in user %}
mysql_user_{{ user.name }}_{{ host }}_grants: mysql_user_{{ user.name }}_{{ host }}_grants:
mysql_grants.present: mysql_grants.present:
- name: {{ user.name }} - name: {{ user.name }}
- grant: {{ user['grants']|sequence|join(",") }} - grant: {{ user['grants']|sequence|join(",") }}
- database: '*.*'
- database: '{{ user.get('database','*.*') }}'
- grant_option: {{ user['grant_option'] | default(False) }} - grant_option: {{ user['grant_option'] | default(False) }}
- user: {{ user.name }} - user: {{ user.name }}
- host: '{{ host }}' - host: '{{ host }}'
- connection_charset: utf8
#- connection_user: {{ connection.user }}
#- connection_pass: {{ connection.password }}
#- connection_charset: {{ connection.charset }}
- require: - require:
- mysql_user_{{ user.name }}_{{ host }} - mysql_user_{{ user.name }}_{{ host }}
{%- endif %} {%- endif %}


{%- if 'databases' in user %} {%- if 'databases' in user %}
{% for db in user['databases'] %}
mysql_user_{{ user.name }}_{{ host }}_grants_db_{{ db }} ~ '_' ~ loop.index0:
{%- for db in user['databases'] %}
mysql_user_{{ user.name }}_{{ host }}_grants_db_{{ db.database }}_{{ loop.index0 }}:
mysql_grants.present: mysql_grants.present:
- name: {{ user.name ~ '_' ~ db['database'] ~ '_' ~ db['table'] | default('all') }} - name: {{ user.name ~ '_' ~ db['database'] ~ '_' ~ db['table'] | default('all') }}
- grant: {{db['grants']|sequence|join(",")}}
- grant: {{ db['grants']|sequence|join(",") }}
- database: '{{ db['database'] }}.{{ db['table'] | default('*') }}' - database: '{{ db['database'] }}.{{ db['table'] | default('*') }}'
- grant_option: {{ db['grant_option'] | default(False) }} - grant_option: {{ db['grant_option'] | default(False) }}
- user: {{ user.name }} - user: {{ user.name }}
- host: '{{ host }}' - host: '{{ host }}'
- connection_charset: utf8
#- connection_user: {{ connection.user }}
#- connection_pass: {{ connection.password }}
#- connection_charset: {{ connection.charset }}
- require: - require:
- mysql_user_{{ user.name }}_{{ host }} - mysql_user_{{ user.name }}_{{ host }}
- mysql_database_{{ db }}
- mysql_database_{{ db.database }}
{%- endfor %} {%- endfor %}
{%- endif %} {%- endif %}


{%- endif %}
{%- endfor %} {%- endfor %}
{%- endfor %} {%- endfor %}


{%- endif %}
{%- endif %} {%- endif %}

+ 4
- 2
galera/slave.sls View File

- require: - require:
- file: galera_run_dir - file: galera_run_dir
- file: galera_init_script - file: galera_init_script
- timeout: 1800


galera_bootstrap_set_root_password: galera_bootstrap_set_root_password:
cmd.run: cmd.run:
- require: - require:
- file: galera_bootstrap_init_config - file: galera_bootstrap_init_config
- file: galera_bootstrap_script - file: galera_bootstrap_script
- timeout: 1800


galera_bootstrap_finish_flag: galera_bootstrap_finish_flag:
file.touch: file.touch:
{%- endif %} {%- endif %}


{%- if not grains.get('noservices', False) %} {%- if not grains.get('noservices', False) %}

galera_service: galera_service:
service.running: service.running:
- name: {{ slave.service }} - name: {{ slave.service }}
- enable: true - enable: true
- reload: true - reload: true

{%- endif %} {%- endif %}


{%- endif %} {%- endif %}

+ 10
- 0
tests/integration/master_cluster/checks_clustercheck_spec.rb View File

describe file('/etc/xinetd.d/mysql_clustercheck') do
it('should exist')
its('content') { should match /server.*\/usr\/local\/bin\/mysql_clustercheck/ }
its('content') { should match /server_args.*clustercheck password available_when_donor=1 \/dev\/null available_when_readonly=1/ }
end

describe file('/usr/local/bin/mysql_clustercheck') do
it('should exist')
it('should be_executable')
end

+ 10
- 0
tests/integration/slave_cluster/checks_clustercheck_spec.rb View File

describe file('/etc/xinetd.d/mysql_clustercheck') do
it('should exist')
its('content') { should match /server.*\/usr\/local\/bin\/mysql_clustercheck/ }
its('content') { should match /server_args.*clustercheck password available_when_donor=1 \/dev\/null available_when_readonly=1/ }
end

describe file('/usr/local/bin/mysql_clustercheck') do
it('should exist')
it('should be_executable')
end

+ 14
- 9
tests/pillar/master_cluster.sls View File

port: 3306 port: 3306
maintenance_password: password maintenance_password: password
admin: admin:
user: user
user: root
password: password password: password
members: members:
- host: 127.0.0.1 - host: 127.0.0.1
port: 4567 port: 4567
- host: 127.0.0.1 - host: 127.0.0.1
port: 4567 port: 4567
clustercheck:
enabled: True
user: clustercheck
password: password
available_when_donor: 1
available_when_readonly: 1
port: 9200
mysql: mysql:
server: server:
users: users:
host: 127.0.0.1 host: 127.0.0.1
- name: clustercheck - name: clustercheck
#host: localhost #host: localhost
password: clustercheck
password: password
database: '*.*' database: '*.*'
grants: PROCESS grants: PROCESS
grant_option: False
- name: inspector - name: inspector
host: 127.0.0.1 host: 127.0.0.1
password: password password: password
databases: databases:
mydb:
- database: mydb
- table: mytable
- grant_option: False
- grants:
- all privileges
- database: mydb
table: mytable
grant_option: True
grants:
- all privileges

+ 14
- 2
tests/pillar/slave_cluster.sls View File

port: 3306 port: 3306
maintenance_password: password maintenance_password: password
admin: admin:
user: user
user: root
password: password password: password
members: members:
- host: 127.0.0.1 - host: 127.0.0.1
port: 4567 port: 4567
- host: 127.0.0.1 - host: 127.0.0.1
port: 4567 port: 4567
clustercheck:
enabled: True
user: clustercheck
password: password
available_when_donor: 1
available_when_readonly: 1
port: 9200
mysql: mysql:
server: server:
users: users:
- name: haproxy - name: haproxy
host: '%' host: '%'
- name: haproxy - name: haproxy
host: 127.0.0.1
host: 127.0.0.1
- name: clustercheck
#host: localhost
password: password
database: '*.*'
grants: PROCESS

+ 1
- 0
tests/run_tests.sh View File

run() { run() {
for pillar in ${PILLARDIR}/*.sls; do for pillar in ${PILLARDIR}/*.sls; do
state_name=$(basename ${pillar%.sls}) state_name=$(basename ${pillar%.sls})
salt_run grains.set 'noservices' False force=True
salt_run --id=${state_name} state.show_sls ${FORMULA_NAME} || (log_err "Execution of ${FORMULA_NAME}.${state_name} failed"; exit 1) salt_run --id=${state_name} state.show_sls ${FORMULA_NAME} || (log_err "Execution of ${FORMULA_NAME}.${state_name} failed"; exit 1)
done done
} }

Loading…
Cancel
Save