Просмотр исходного кода

Merge pull request #30 from salt-formulas/pr/27

Add clustercheck (replaces PR #27)
pull/37/head
Petr Michalec 7 лет назад
Родитель
Сommit
e3adc7bfd9
14 измененных файлов: 416 добавлений и 25 удалений
  1. +7
    -8
      .kitchen.yml
  2. +38
    -2
      README.rst
  3. +45
    -0
      galera/clustercheck.sls
  4. +138
    -0
      galera/files/clustercheck.sh
  5. +24
    -0
      galera/files/xinet.d.conf
  6. +3
    -0
      galera/init.sls
  7. +17
    -0
      galera/map.jinja
  8. +2
    -0
      galera/master.sls
  9. +80
    -12
      galera/server.sls
  10. +3
    -0
      galera/slave.sls
  11. +10
    -0
      tests/integration/master_cluster/checks_clustercheck_spec.rb
  12. +10
    -0
      tests/integration/slave_cluster/checks_clustercheck_spec.rb
  13. +25
    -1
      tests/pillar/master_cluster.sls
  14. +14
    -2
      tests/pillar/slave_cluster.sls

+ 7
- 8
.kitchen.yml Просмотреть файл

@@ -14,6 +14,13 @@ provisioner:
formula: galera
grains:
noservices: True
dependencies:
- name: mysql
repo: git
source: https://github.com/salt-formulas/salt-formula-mysql.git
- name: linux
repo: git
source: https://github.com/salt-formulas/salt-formula-linux.git
state_top:
base:
"*":
@@ -27,14 +34,6 @@ provisioner:
- galeracluster_debian_repo
pillars-from-files:
galeracluster_debian_repo.sls: tests/pillar/repo_galeracluster.sls
dependencies:
- name: mysql
repo: git
source: https://github.com/salt-formulas/salt-formula-mysql.git
dependencies:
- name: linux
repo: git
source: https://github.com/salt-formulas/salt-formula-linux.git

verifier:
name: inspec

+ 38
- 2
README.rst Просмотреть файл

@@ -56,7 +56,6 @@ Galera cluster slave node
user: root
password: pass


Enable TLS support:

.. code-block:: yaml
@@ -77,20 +76,57 @@ Enable TLS support:
cacert_chain: << body of ca certs chain >>


Additional mysql users:

.. code-block:: yaml

mysql:
server:
users:
- name: clustercheck
password: clustercheck
database: '*.*'
grants: PROCESS
- name: inspector
host: 127.0.0.1
password: password
databases:
mydb:
- database: mydb
- table: mytable
- grant_option: True
- grants:
- all privileges

Additional check params:
========================

.. code-block:: yaml

galera:
clustercheck:
- enabled: True
- user: clustercheck
- password: clustercheck
- available_when_donor: 0
- available_when_readonly: 1
- port 9200

Configurable soft parameters
============================


- **galera_innodb_buffer_pool_size** - the default value is 3138M
- **galera_max_connections** - the default value is 20000

Usage:

.. code-block:: yaml

_param:
galera_innodb_buffer_pool_size: 1024M
galera_max_connections: 200


Usage
=====


+ 45
- 0
galera/clustercheck.sls Просмотреть файл

@@ -0,0 +1,45 @@
{%- from "galera/map.jinja" import clustercheck with context %}

{%- if clustercheck.get('enabled', False) %}
/usr/local/bin/mysql_clustercheck:
file.managed:
- source: salt://galera/files/clustercheck.sh
- user: root
- group: root
- mode: 755
- dir_mode: 755
- makedirs: True

/etc/xinetd.d/mysql_clustercheck:
file.managed:
- source: salt://galera/files/xinet.d.conf
- template: jinja
- makedirs: True
- defaults:
name: mysqlchk
user: nobody
server: '/usr/local/bin/mysql_clustercheck'
server_args: '{{ clustercheck.get('user', 'clustercheck') }} {{ clustercheck.get('password', 'clustercheck') }} available_when_donor={{ clustercheck.get('available_when_donor', 0) }} /dev/null available_when_readonly={{ clustercheck.get('available_when_readonly', 0) }} {{ clustercheck.config }}'
port: {{ clustercheck.get('port', 9200) }}
flags: REUSE
per_source: UNLIMITED
- require:
- file: /usr/local/bin/mysql_clustercheck
{%- if not grains.get('noservices', False) %}
- watch_in:
- galera_xinetd_service
{%- endif %}

galera_xinetd_package:
pkg.installed:
- name: xinetd

{%- if not grains.get('noservices', False) %}
galera_xinetd_service:
service.running:
- name: xinetd
- require:
- pkg: xinetd
{%- endif %}
{%- endif %}


+ 138
- 0
galera/files/clustercheck.sh Просмотреть файл

@@ -0,0 +1,138 @@
#!/bin/bash
#
# Script to make a proxy (ie HAProxy) capable of monitoring MySQL Cluster nodes properly
#
# Author: Olaf van Zandwijk <olaf.vanzandwijk@nedap.com>
# Author: Raghavendra Prabhu <raghavendra.prabhu@percona.com>
# Author: Petr Michalec <pmichalec@mirantis.com>
#
# Documentation and download: https://github.com/epcim/percona-clustercheck
#
# Based on the original script from Unai Rodriguez
#

function httpReply(){
HTTP_STATUS="${1}"
RESPONSE_CONTENT="${2}"

# https://serverfault.com/questions/504756/curl-failure-when-receiving-data-from-peer-using-percona-xtradb-cluster-check
sleep 0.1
if [[ "${HTTP_STATUS}" == "503" ]]
then
echo -en "HTTP/1.1 503 Service Unavailable\r\n"
elif [[ "${HTTP_STATUS}" == "404" ]]
then
echo -en "HTTP/1.1 404 Not Found\r\n"
elif [[ "${HTTP_STATUS}" == "401" ]]
then
echo -en "HTTP/1.1 401 Unauthorized\r\n"
elif [[ "${HTTP_STATUS}" == "200" ]]
then
echo -en "HTTP/1.1 200 OK\r\n"
else
echo -en "HTTP/1.1 ${HTTP_STATUS}\r\n"
fi

echo -en "Content-Type: text/plain\r\n"
echo -en "Connection: close\r\n"
echo -en "Content-Length: ${#RESPONSE_CONTENT}\r\n"
echo -en "\r\n"
echo -en "${RESPONSE_CONTENT}"
echo -en "\r\n"
sleep 0.1
}

if [[ $1 == '-h' || $1 == '--help' ]];then
echo "Usage: $0 <user> <pass> <available_when_donor=0|1> <log_file> <available_when_readonly=0|1> <defaults_extra_file> <timeout>"
exit
fi

# if the disabled file is present, return 503. This allows
# admins to manually remove a node from a cluster easily.
if [ -e "/var/tmp/clustercheck.disabled" ]; then
# Shell return-code is 1
httpReply "503" "MySQL Cluster Node is manually disabled.\r\n"
exit 1
fi

MYSQL_USERNAME="${1-clustercheckuser}"
MYSQL_PASSWORD="${2-clustercheckpassword!}"
AVAILABLE_WHEN_DONOR=${3:-0}
ERR_FILE="${4:-/dev/null}"
AVAILABLE_WHEN_READONLY=${5:-1}
DEFAULTS_EXTRA_FILE=${6:-/etc/my.cnf}
# Timeout exists for instances where mysqld may be hung
# Default value considers the Galera timeouts
TIMEOUT=${7:-18}

EXTRA_ARGS=""
if [[ -n "$MYSQL_USERNAME" ]]; then
EXTRA_ARGS="$EXTRA_ARGS --user=${MYSQL_USERNAME}"
fi
if [[ -n "$MYSQL_PASSWORD" ]]; then
EXTRA_ARGS="$EXTRA_ARGS --password=${MYSQL_PASSWORD}"
fi
if [[ -r $DEFAULTS_EXTRA_FILE ]];then
MYSQL_CMDLINE="mysql --defaults-extra-file=$DEFAULTS_EXTRA_FILE -nNE --connect-timeout=$TIMEOUT \
${EXTRA_ARGS}"
else
MYSQL_CMDLINE="mysql -nNE --connect-timeout=$TIMEOUT ${EXTRA_ARGS}"
fi
#
# Perform the query to check the wsrep_local_state
#
WSREP_STATUS=$($MYSQL_CMDLINE -e "SHOW STATUS LIKE 'wsrep_local_state';" \
2>${ERR_FILE} | tail -1 2>>${ERR_FILE}; exit ${PIPESTATUS[0]})
mysql_ret=$?

if [[ $mysql_ret -eq 1 || $mysql_ret -eq 127 ]]; then
# hash or command can be used here, but command is POSIX
command -v "$MYSQL_CMD"; mysql_ret=$?
if [[ $mysql_ret -eq 1 ]]; then
# mysql program not found
# => return HTTP 404
# Shell return-code is 3
httpReply "404" "Mysql command not found or service is not running.\r\n"
exit 2
fi

# Failed mysql login
# => return HTTP 401
# Shell return-code is 2
httpReply "401" "Access denied to database.\r\n"
exit 2
fi



if [[ "${WSREP_STATUS}" == "4" ]] || [[ "${WSREP_STATUS}" == "2" && ${AVAILABLE_WHEN_DONOR} == 1 ]]
then
# Check only when set to 0 to avoid latency in response.
if [[ $AVAILABLE_WHEN_READONLY -eq 0 ]];then
READ_ONLY=$($MYSQL_CMDLINE -e "SHOW GLOBAL VARIABLES LIKE 'read_only';" \
2>${ERR_FILE} | tail -1 2>>${ERR_FILE})

if [[ "${READ_ONLY}" == "ON" ]];then
# MySQL Cluster node local state is 'Synced', but it is in
# read-only mode. The variable AVAILABLE_WHEN_READONLY is set to 0.
# => return HTTP 503
# Shell return-code is 1
httpReply "503" "MySQL Cluster Node is read-only.\r\n"
exit 1
fi
fi
# MySQL Cluster node local state is 'Synced' => return HTTP 200
# Shell return-code is 0
httpReply "200" "MySQL Cluster Node is synced.\r\n"
exit 0
else
# MySQL Cluster node local state is not 'Synced' => return HTTP 503
# Shell return-code is 1
if [[ -z "${WSREP_STATUS}" ]]
then
httpReply "503" "Received empty reply from MySQL Cluster Node.\r\nMight be a permission issue, check the credentials used by ${0}\r\n"
else
httpReply "503" "MySQL Cluster Node is not synced.\r\n"
fi
exit 1
fi

+ 24
- 0
galera/files/xinet.d.conf Просмотреть файл

@@ -0,0 +1,24 @@
# default: {{ default_state|default('on') }}
# description: {{ name }}

service {{ name }}:
{
disable = {{ disable|default('no') }}
{%- if flags is defined %}
flags = {{ flags }}
{%- endif %}
socket_type = {{ socket_type|default('stream') }}
port = {{ port }}
wait = {{ wait|default('no') }}
user = {{ user }}
server = {{ server }}
{%- if server_args is defined %}
server_args = {{ server_args }}
{%- endif %}
log_on_failure += {{ log_on_failure|default('USERID') }}
only_from = {{ only_from|default('0.0.0.0/0') }}
type = {{ type|default('UNLISTED') }}
{%- if per_source is defined %}
per_source = {{ per_source }}
{%- endif %}
}

+ 3
- 0
galera/init.sls Просмотреть файл

@@ -8,6 +8,9 @@ include:
{%- if pillar.galera.slave is defined %}
- galera.slave
{%- endif %}
{%- if pillar.galera.clustercheck is defined %}
- galera.clustercheck
{%- endif %}
{%- if pillar.galera.monitor is defined %}
- galera.monitor
{%- endif %}

+ 17
- 0
galera/map.jinja Просмотреть файл

@@ -63,3 +63,20 @@
'config': '/etc/mysql/my.cnf',
},
}, grain='oscodename', merge=pillar.galera.get('slave', {})), base='default') %}

{% set clustercheck = salt['grains.filter_by']({
'Debian': {
'config': '/etc/mysql/my.cnf',
'enabled': False,
'user': clustercheck,
'password': clustercheck,
'port': '9200'
},
'RedHat': {
'config': '/etc/my.cnf',
'enabled': False,
'user': clustercheck,
'password': clustercheck,
'port': '9200'
},
}, merge=pillar.galera.get('clustercheck', {})) %}

+ 2
- 0
galera/master.sls Просмотреть файл

@@ -115,6 +115,7 @@ galera_init_script:
- defaults:
service: {{ master|yaml }}
- template: jinja
- timeout: 1800

galera_bootstrap_script:
file.managed:
@@ -147,6 +148,7 @@ galera_init_start_service:
- require:
- file: galera_run_dir
- file: galera_init_script
- timeout: 1800

galera_bootstrap_set_root_password:
cmd.run:

+ 80
- 12
galera/server.sls Просмотреть файл

@@ -1,5 +1,5 @@
{%- if pillar.get('mysql', {}).server is defined %}
{%- from "mysql/map.jinja" import mysql_connection_args as connection with context %}
{%- set server = pillar.mysql.server %}

{%- for database_name, database in server.get('database', {}).iteritems() %}
@@ -7,14 +7,30 @@
mysql_database_{{ database_name }}:
mysql_database.present:
- name: {{ database_name }}
- character_set: {{ database.get('encoding', 'utf8') }}
#- connection_user: {{ connection.user }}
#- connection_pass: {{ connection.password }}
#- connection_charset: {{ connection.charset }}
{%- if grains.get('noservices') %}
- onlyif: /bin/false
{%- endif %}

{%- for user in database.users %}

{%- for user in database.get('users', {}) %}
mysql_user_{{ user.name }}_{{ database_name }}_{{ user.host }}:
mysql_user.present:
- host: '{{ user.host }}'
- name: '{{ user.name }}'
{%- if user.password is defined %}
- password: {{ user.password }}
{%- else %}
- allow_passwordless: true
{%- endif %}
#- connection_user: {{ connection.user }}
#- connection_pass: {{ connection.password }}
#- connection_charset: {{ connection.charset }}
{%- if grains.get('noservices') %}
- onlyif: /bin/false
{%- endif %}

mysql_grants_{{ user.name }}_{{ database_name }}_{{ user.host }}:
mysql_grants.present:
@@ -22,14 +38,18 @@ mysql_grants_{{ user.name }}_{{ database_name }}_{{ user.host }}:
- database: '{{ database_name }}.*'
- user: '{{ user.name }}'
- host: '{{ user.host }}'
#- connection_user: {{ connection.user }}
#- connection_pass: {{ connection.password }}
#- connection_charset: {{ connection.charset }}
- require:
- mysql_user: mysql_user_{{ user.name }}_{{ database_name }}_{{ user.host }}
- mysql_database: mysql_database_{{ database_name }}

{%- if grains.get('noservices') %}
- onlyif: /bin/false
{%- endif %}
{%- endfor %}

{%- if database.initial_data is defined %}

/root/mysql/scripts/restore_{{ database_name }}.sh:
file.managed:
- source: salt://mysql/conf/restore.sh
@@ -49,25 +69,73 @@ restore_mysql_database_{{ database_name }}:
- cwd: /root
- require:
- file: /root/mysql/scripts/restore_{{ database_name }}.sh

{%- endif %}

{%- endfor %}

{%- for user in server.get('users', []) %}
mysql_user_{{ user.name }}_{{ user.host }}:
{%- for host in user.get('hosts', user.get('host', 'localhost'))|sequence %}
mysql_user_{{ user.name }}_{{ host }}:
mysql_user.present:
- host: '{{ user.host }}'
- host: '{{ host }}'
- name: '{{ user.name }}'
{%- if user.password is defined %}
- password: {{ user.password }}
{%- if user['password_hash'] is defined %}
- password_hash: '{{ user.password_hash }}'
{%- elif user['password'] is defined and user['password'] != None %}
- password: '{{ user.password }}'
{%- else %}
- allow_passwordless: True
{%- endif %}
#- connection_user: {{ connection.user }}
#- connection_pass: {{ connection.password }}
#- connection_charset: {{ connection.charset }}
{%- if grains.get('noservices') %}
- onlyif: /bin/false
{%- endif %}
{%- if 'grants' in user %}
mysql_user_{{ user.name }}_{{ host }}_grants:
mysql_grants.present:
- name: {{ user.name }}
- grant: {{ user['grants']|sequence|join(",") }}
- database: '{{ user.get('database','*.*') }}'
- grant_option: {{ user['grant_option'] | default(False) }}
- user: {{ user.name }}
- host: '{{ host }}'
#- connection_user: {{ connection.user }}
#- connection_pass: {{ connection.password }}
#- connection_charset: {{ connection.charset }}
- require:
- mysql_user_{{ user.name }}_{{ host }}
{%- if grains.get('noservices') %}
- onlyif: /bin/false
{%- endif %}
{%- endif %}

{%- if 'databases' in user %}
{%- for db in user['databases'] %}
mysql_user_{{ user.name }}_{{ host }}_grants_db_{{ db.database }}_{{ loop.index0 }}:
mysql_grants.present:
- name: {{ user.name ~ '_' ~ db['database'] ~ '_' ~ db['table'] | default('all') }}
- grant: {{ db['grants']|sequence|join(",") }}
- database: '{{ db['database'] }}.{{ db['table'] | default('*') }}'
- grant_option: {{ db['grant_option'] | default(False) }}
- user: {{ user.name }}
- host: '{{ host }}'
#- connection_user: {{ connection.user }}
#- connection_pass: {{ connection.password }}
#- connection_charset: {{ connection.charset }}
- require:
- mysql_user_{{ user.name }}_{{ host }}
# the following line is not mandatory as database might not be managed by salt formula
#- mysql_database_{{ db.database }}
{%- if grains.get('noservices') %}
- onlyif: /bin/false
{%- endif %}
{%- endfor %}
{%- endif %}

{%- endfor %}
{%- endif %}
{%- endfor %}

{%- endif %}

+ 3
- 0
galera/slave.sls Просмотреть файл

@@ -147,6 +147,7 @@ galera_init_start_service:
- require:
- file: galera_run_dir
- file: galera_init_script
- timeout: 1800

galera_bootstrap_set_root_password:
cmd.run:
@@ -194,6 +195,7 @@ galera_bootstrap_start_service_final:
- require:
- file: galera_bootstrap_init_config
- file: galera_bootstrap_script
- timeout: 1800

galera_bootstrap_finish_flag:
file.touch:
@@ -224,3 +226,4 @@ galera_service:
{%- endif %}

{%- endif %}


+ 10
- 0
tests/integration/master_cluster/checks_clustercheck_spec.rb Просмотреть файл

@@ -0,0 +1,10 @@
describe file('/etc/xinetd.d/mysql_clustercheck') do
it('should exist')
its('content') { should match /server.*\/usr\/local\/bin\/mysql_clustercheck/ }
its('content') { should match /server_args.*clustercheck password available_when_donor=1 \/dev\/null available_when_readonly=1/ }
end

describe file('/usr/local/bin/mysql_clustercheck') do
it('should exist')
it('should be_executable')
end

+ 10
- 0
tests/integration/slave_cluster/checks_clustercheck_spec.rb Просмотреть файл

@@ -0,0 +1,10 @@
describe file('/etc/xinetd.d/mysql_clustercheck') do
it('should exist')
its('content') { should match /server.*\/usr\/local\/bin\/mysql_clustercheck/ }
its('content') { should match /server_args.*clustercheck password available_when_donor=1 \/dev\/null available_when_readonly=1/ }
end

describe file('/usr/local/bin/mysql_clustercheck') do
it('should exist')
it('should be_executable')
end

+ 25
- 1
tests/pillar/master_cluster.sls Просмотреть файл

@@ -7,7 +7,7 @@
port: 3306
maintenance_password: password
admin:
user: user
user: root
password: password
members:
- host: 127.0.0.1
@@ -16,10 +16,20 @@
port: 4567
- host: 127.0.0.1
port: 4567
clustercheck:
enabled: True
user: clustercheck
password: password
available_when_donor: 1
available_when_readonly: 1
port: 9200
max_connections: 20000
innodb_buffer_pool_size: 3138M
mysql:
server:
database:
mydb:
encoding: 'utf8'
users:
- name: haproxy
host: localhost
@@ -27,3 +37,17 @@
host: '%'
- name: haproxy
host: 127.0.0.1
- name: clustercheck
#host: localhost
password: password
database: '*.*'
grants: PROCESS
- name: inspector
host: 127.0.0.1
password: password
databases:
- database: mydb
table: mytable
grant_option: True
grants:
- all privileges

+ 14
- 2
tests/pillar/slave_cluster.sls Просмотреть файл

@@ -7,7 +7,7 @@
port: 3306
maintenance_password: password
admin:
user: user
user: root
password: password
members:
- host: 127.0.0.1
@@ -16,6 +16,13 @@
port: 4567
- host: 127.0.0.1
port: 4567
clustercheck:
enabled: True
user: clustercheck
password: password
available_when_donor: 1
available_when_readonly: 1
port: 9200
max_connections: 20000
innodb_buffer_pool_size: 3138M
mysql:
@@ -26,4 +33,9 @@
- name: haproxy
host: '%'
- name: haproxy
host: 127.0.0.1
host: 127.0.0.1
- name: clustercheck
#host: localhost
password: password
database: '*.*'
grants: PROCESS

Загрузка…
Отмена
Сохранить