Quellcode durchsuchen

splitbrain condition through clustercheck script in xinetd

pull/27/head
TomasHradecky vor 7 Jahren
Ursprung
Commit
38898baef4
5 geänderte Dateien mit 258 neuen und 0 gelöschten Zeilen
  1. +12
    -0
      README.rst
  2. +104
    -0
      galera/files/clustercheck.sh
  3. +20
    -0
      galera/files/xinet.d.cfg
  4. +60
    -0
      galera/server.sls
  5. +62
    -0
      galera/slave.sls

+ 12
- 0
README.rst Datei anzeigen

@@ -80,6 +80,18 @@ Additional mysql users:
- grants:
- all privileges

Additional check params:
mysql:
server:
clustercheck:
- user: clustercheck
- password: clustercheck
- available_when_donor: 0
- available_when_readonly: 1
- enabled: True
- xinetd_port 9200


Usage
=====


+ 104
- 0
galera/files/clustercheck.sh Datei anzeigen

@@ -0,0 +1,104 @@
#!/bin/bash
#
# Script to make a proxy (ie HAProxy) capable of monitoring Percona XtraDB Cluster nodes properly
#
# Author: Olaf van Zandwijk <olaf.vanzandwijk@nedap.com>
# Author: Raghavendra Prabhu <raghavendra.prabhu@percona.com>
#
# Documentation and download: https://github.com/olafz/percona-clustercheck
#
# Based on the original script from Unai Rodriguez
#

if [[ $1 == '-h' || $1 == '--help' ]];then
echo "Usage: $0 <user> <pass> <available_when_donor=0|1> <log_file> <available_when_readonly=0|1> <defaults_extra_file>"
exit
fi

# if the disabled file is present, return 503. This allows
# admins to manually remove a node from a cluster easily.
if [ -e "/var/tmp/clustercheck.disabled" ]; then
# Shell return-code is 1
echo -en "HTTP/1.1 503 Service Unavailable\r\n"
echo -en "Content-Type: text/plain\r\n"
echo -en "Connection: close\r\n"
echo -en "Content-Length: 51\r\n"
echo -en "\r\n"
echo -en "Percona XtraDB Cluster Node is manually disabled.\r\n"
sleep 0.1
exit 1
fi

MYSQL_USERNAME="${1-clustercheckuser}"
MYSQL_PASSWORD="${2-clustercheckpassword!}"
AVAILABLE_WHEN_DONOR=${3:-0}
ERR_FILE="${4:-/dev/null}"
AVAILABLE_WHEN_READONLY=${5:-1}
DEFAULTS_EXTRA_FILE=${6:-/etc/my.cnf}

#Timeout exists for instances where mysqld may be hung
TIMEOUT=10

EXTRA_ARGS=""
if [[ -n "$MYSQL_USERNAME" ]]; then
EXTRA_ARGS="$EXTRA_ARGS --user=${MYSQL_USERNAME}"
fi
if [[ -n "$MYSQL_PASSWORD" ]]; then
EXTRA_ARGS="$EXTRA_ARGS --password=${MYSQL_PASSWORD}"
fi
if [[ -r $DEFAULTS_EXTRA_FILE ]];then
MYSQL_CMDLINE="mysql --defaults-extra-file=$DEFAULTS_EXTRA_FILE -nNE --connect-timeout=$TIMEOUT \
${EXTRA_ARGS}"
else
MYSQL_CMDLINE="mysql -nNE --connect-timeout=$TIMEOUT ${EXTRA_ARGS}"
fi
#
# Perform the query to check the wsrep_local_state
#
WSREP_STATUS=$($MYSQL_CMDLINE -e "SHOW STATUS LIKE 'wsrep_local_state';" \
2>${ERR_FILE} | tail -1 2>>${ERR_FILE})

if [[ "${WSREP_STATUS}" == "4" ]] || [[ "${WSREP_STATUS}" == "2" && ${AVAILABLE_WHEN_DONOR} == 1 ]]
then
# Check only when set to 0 to avoid latency in response.
if [[ $AVAILABLE_WHEN_READONLY -eq 0 ]];then
READ_ONLY=$($MYSQL_CMDLINE -e "SHOW GLOBAL VARIABLES LIKE 'read_only';" \
2>${ERR_FILE} | tail -1 2>>${ERR_FILE})

if [[ "${READ_ONLY}" == "ON" ]];then
# Percona XtraDB Cluster node local state is 'Synced', but it is in
# read-only mode. The variable AVAILABLE_WHEN_READONLY is set to 0.
# => return HTTP 503
# Shell return-code is 1
echo -en "HTTP/1.1 503 Service Unavailable\r\n"
echo -en "Content-Type: text/plain\r\n"
echo -en "Connection: close\r\n"
echo -en "Content-Length: 43\r\n"
echo -en "\r\n"
echo -en "Percona XtraDB Cluster Node is read-only.\r\n"
sleep 0.1
exit 1
fi
fi
# Percona XtraDB Cluster node local state is 'Synced' => return HTTP 200
# Shell return-code is 0
echo -en "HTTP/1.1 200 OK\r\n"
echo -en "Content-Type: text/plain\r\n"
echo -en "Connection: close\r\n"
echo -en "Content-Length: 40\r\n"
echo -en "\r\n"
echo -en "Percona XtraDB Cluster Node is synced.\r\n"
sleep 0.1
exit 0
else
# Percona XtraDB Cluster node local state is not 'Synced' => return HTTP 503
# Shell return-code is 1
echo -en "HTTP/1.1 503 Service Unavailable\r\n"
echo -en "Content-Type: text/plain\r\n"
echo -en "Connection: close\r\n"
echo -en "Content-Length: 44\r\n"
echo -en "\r\n"
echo -en "Percona XtraDB Cluster Node is not synced.\r\n"
sleep 0.1
exit 1
fi

+ 20
- 0
galera/files/xinet.d.cfg Datei anzeigen

@@ -0,0 +1,20 @@
# default: {{ default_state|default('on') }}
# description: {{ name }}

service {{ name }}:
{
disable = {{ disable|default('no') }}
{%- if flags is defined %}
flags = {{ flags }}
{%- endif %}
socket_type = {{ socket_type|default('stream') }}
port = {{ port }}
wait = {{ wait|default('no') }}
user = {{ user }}
server = {{ server }}
log_on_failure += {{ log_on_failure|default('USERID') }}
only_from = {{ only_from|default('0.0.0.0/0') }}
{%- if per_source is defined %}
per_source = {{ per_source }}
{%- endif %}
}

+ 60
- 0
galera/server.sls Datei anzeigen

@@ -106,4 +106,64 @@ mysql_user_{{ user.name }}_{{ host }}_grants_db_{{ db }} ~ '_' ~ loop.index0:
{%- endfor %}

{%- endif %}

{%- set _galera_xinetd_srv = [] %}

{%- for server_name, server in slave.get('bind', {}).iteritems() %}
{%- if server.get.get('clustercheck', {}).get('enabled', False) == True %}
{%- for bind in slave.bind %}
{%- set index = '_{0}_{1}'.format(bind.address, bind.port) %}
{%- set _ccheck = server.clustercheck %}
{%- do _galera_xinetd_srv.append('clustercheck') %}
/etc/xinetd.d/mysql_clustercheck{{ index }}_{{ _ccheck.get('clustercheckport', 9200) }}:
file.managed:
- source: salt://galera/files/xinet.d.conf
- template: jinja
- defaults:
user: nobody
# FIXME, add optins if check_attr host/port is defined etc..
server: '/usr/local/bin/clustercheck {{ _ccheck.get('user', 'clustercheck') }} {{ _ccheck.get('password', 'clustercheck') }} {{ _ccheck.get('available_when_donor', 0) }} {{ _ccheck.get('available_when_readonly', 0) }}'
port: _ccheck.get('port', 9200)
flags: REUSE
per_source: UNLIMITED
- require:
- file: /usr/local/bin/mysql_clustercheck
- watch_in:
- galera_xinetd_service

{%- endfor %}
{%- endif %}
{%- endfor %}

{% if 'clustercheck' in _galera_xinetd_srv %}
clustercheck_dir:
file.directory:
- name: /usr/local/bin/
- user: root
- group: root
- mode: 750

/usr/local/bin/mysql_clustercheck:
file.managed:
- source: salt://galera/files/clustercheck.sh
- user: root
- group: root
- mode: 755
- require:
- file: clustercheck_dir
{%- endif %}

{%- if _galera_xinetd_srv|length > 0 %}
haproxy_xinetd_package:
pkg.installed:
- name: xinetd

galera_xinetd_service:
service.running:
- name: xinetd
- require:
- pkg: xinetd
{%- endif %}


{%- endif %}

+ 62
- 0
galera/slave.sls Datei anzeigen

@@ -187,4 +187,66 @@ galera_service:
- reload: true

{%- endif %}



{%- set _galera_xinetd_srv = [] %}

{%- for server_name, server in master.get('bind', {}).iteritems() %}
{%- if server.get.get('clustercheck', {}).get('enabled', False) == True %}
{%- for bind in slave.bind %}
{%- set index = '_{0}_{1}'.format(bind.address, bind.port) %}
{%- set _ccheck = server.clustercheck %}
{%- do _galera_xinetd_srv.append('clustercheck') %}
/etc/xinetd.d/mysql_clustercheck{{ index }}_{{ _ccheck.get('clustercheckport', 9200) }}:
file.managed:
- source: salt://galera/files/xinet.d.conf
- template: jinja
- defaults:
user: nobody
# FIXME, add optins if check_attr host/port is defined etc..
server: '/usr/local/bin/clustercheck {{ _ccheck.get('user', 'clustercheck') }} {{ _ccheck.get('password', 'clustercheck') }} {{ _ccheck.get('available_when_donor', 0) }} {{ _ccheck.get('available_when_readonly', 0) }}'
port: _ccheck.get('port', 9200)
flags: REUSE
per_source: UNLIMITED
- require:
- file: /usr/local/bin/mysql_clustercheck
- watch_in:
- galera_xinetd_service

{%- endfor %}
{%- endif %}
{%- endfor %}

{% if 'clustercheck' in _galera_xinetd_srv %}
clustercheck_dir:
file.directory:
- name: /usr/local/bin/
- user: root
- group: root
- mode: 750

/usr/local/bin/mysql_clustercheck:
file.managed:
- source: salt://galera/files/clustercheck.sh
- user: root
- group: root
- mode: 755
- require:
- file: clustercheck_dir
{%- endif %}

{%- if _galera_xinetd_srv|length > 0 %}
haproxy_xinetd_package:
pkg.installed:
- name: xinetd

galera_xinetd_service:
service.running:
- name: xinetd
- require:
- pkg: xinetd
{%- endif %}


{%- endif %}

Laden…
Abbrechen
Speichern