Pgpool-II + PostgreSQL one main source and two standby source compiling environment

Host IP Virtual IP server1 192.168.222.141 192.168.222.200 server2 192.168.222.142 server3 192.168.222.143 Item Value Detail PostgreSQL version 10.10...

Host IP Virtual IP server1 192.168.222.141 192.168.222.200 server2 192.168.222.142 server3 192.168.222.143 Item Value Detail PostgreSQL version 10.10 Installation path /opt/PG-10.10 port 5432 $PGDATA /opt/PG-10.10/data Archive mode on /opt/PG-10.10/archivedir Boot from boot Disable Pgpool-II Version 4.0.6 Installation path /opt/pgpool-406 port 9999 Pgpool connection port port 9898 PCP port port 9000 watchdog port port 9694 Watchdog heartbeat Master profile /opt/pgpool-406/etc/pgpool.conf Pgpool start user root Non root operation can be realized Operation mode streaming replication mode Watchdog on Boot from boot Disable I. configuration of all nodes Three nodes turn off the firewall (or open the required port): systemctl stop firewalld.service systemctl disable firewalld.service Three nodes create users: adduser postgres && passwd postgres

1. Compile and install database and pgpool

# pg ./configure --prefix=/opt/PG-10.10 --enable-debug make && make install && cd contrib && make && make install && cd .. # pgpool export PATH=/opt/PG-10.10/bin:$PATH ./configure --prefix=/opt/pgpool-406 make && make install cd pgpool-II-4.0.6/src/sql/pgpool-recovery make && make install
# /etc/profile configuration environment variable export PATH=/opt/pgpool-406/bin:/opt/PG-10.10/bin:$PATH sudo source /etc/profile #Take effect

2. Three nodes mutual trust

# root ssh-keygen -t rsa ssh-copy-id -i .ssh/id_rsa.pub [email protected] ssh-copy-id -i .ssh/id_rsa.pub [email protected] ssh-copy-id -i .ssh/id_rsa.pub [email protected] ssh-copy-id -i .ssh/id_rsa.pub [email protected] ssh-copy-id -i .ssh/id_rsa.pub [email protected] ssh-copy-id -i .ssh/id_rsa.pub [email protected] # postgres ssh-keygen -t rsa ssh-copy-id -i .ssh/id_rsa.pub [email protected] ssh-copy-id -i .ssh/id_rsa.pub [email protected] ssh-copy-id -i .ssh/id_rsa.pub [email protected]

2. Create password file in the user's home directory to realize password free access

# postgres user su - postgres echo "192.168.222.141:5432:replication:repl:123456" >> ~/.pgpass echo "192.168.222.142:5432:replication:repl:123456" >> ~/.pgpass echo "192.168.222.143:5432:replication:repl:123456" >> ~/.pgpass chmod 600 ~/.pgpass scp /home/postgres/.pgpass [email protected]:/home/postgres/ scp /home/postgres/.pgpass [email protected]:/home/postgres/ # -------------------------------------------------------------- # root users echo 'localhost:9898:pgpool:pgpool' > ~/.pcppass chmod 600 ~/.pcppass scp /root/.pcppass [email protected]:/root/ scp /root/.pcppass [email protected]:/root/

3. Create related directories

# root chmod 777 /opt/PG-10.10/ mkdir /opt/pgpool-406/log/ && touch /opt/pgpool-406/log/pgpool.log ssh [email protected] "chmod 777 /opt/PG-10.10/ && mkdir /opt/pgpool-406/log/ && touch /opt/pgpool-406/log/pgpool.log" ssh [email protected] "chmod 777 /opt/PG-10.10/ && mkdir /opt/pgpool-406/log/ && touch /opt/pgpool-406/log/pgpool.log" # postgres su - postgres mkdir /opt/PG-10.10/archivedir ssh [email protected] "mkdir /opt/PG-10.10/archivedir" ssh [email protected] "mkdir /opt/PG-10.10/archivedir"
II. PRIMARY master node configuration 1. Database configuration

Initialization

su - postgres /opt/PG-10.10/bin/initdb /opt/PG-10.10/data

vim /opt/PG-10.10/data/postgresql.conf

listen_addresses = '*' archive_mode = on archive_command = 'cp "%p" "/opt/PG-10.10/archivedir"' max_wal_senders = 10 max_replication_slots = 10 wal_level = replica

Start database

su - postgres /opt/PG-10.10/bin/pg_ctl -D /opt/PG-10.10/data start

The main database modifies the password of postgres and creates a stream replication user repl

ALTER USER postgres WITH PASSWORD '123456'; CREATE ROLE pgpool WITH PASSWORD '123456' LOGIN; CREATE ROLE repl WITH PASSWORD '123456' REPLICATION LOGIN;

Create test table tb? Pgpool

CREATE TABLE tb_pgpool (id serial,age bigint,insertTime timestamp default now()); insert into tb_pgpool (age) values (1);

/opt/PG-10.10/data/pg_hba.conf

echo "host all all 192.168.222.0/24 trust" >> /opt/PG-10.10/data/pg_hba.conf echo "host replication all 192.168.222.0/24 trust" >> /opt/PG-10.10/data/pg_hba.conf

Boot main library

pg_ctl -D /opt/PG-10.10/data restart #You can query the active and standby databases psql -U postgres -c 'select * from pg_is_in_recovery();'
2. pgpool configuration
cp /opt/pgpool-406/etc/pgpool.conf.sample-stream /opt/pgpool-406/etc/pgpool.conf
listen_addresses = '*' sr_check_user = 'pgpool' sr_check_password = '' health_check_period = 5 health_check_timeout = 30 health_check_user = 'pgpool' health_check_password = '' health_check_max_retries = 3 backend_hostname0 = '192.168.222.141' backend_port0 = 5432 backend_weight0 = 1 backend_data_directory0 = '/opt/PG-10.10/data' backend_flag0 = 'ALLOW_TO_FAILOVER' backend_hostname1 = '192.168.222.142' backend_port1 = 5432 backend_weight1 = 1 backend_data_directory1 = '/opt/PG-10.10/data' backend_flag1 = 'ALLOW_TO_FAILOVER' backend_hostname2 = '192.168.222.143' backend_port2 = 5432 backend_weight2 = 1 backend_data_directory2 = '/opt/PG-10.10/data' backend_flag2 = 'ALLOW_TO_FAILOVER' failover_command = '/opt/pgpool-406/etc/failover.sh %d %h %p %D %m %H %M %P %r %R' follow_master_command = '/opt/pgpool-406/etc/follow_master.sh %d %h %p %D %m %M %H %P %r %R' recovery_user = 'postgres' recovery_password = '' recovery_1st_stage_command = 'recovery_1st_stage' enable_pool_hba = on use_watchdog = on delegate_IP = '192.168.222.200' if_up_cmd = 'ip addr add $_IP_$/24 dev ens33 label ens33:0' if_down_cmd = 'ip addr del $_IP_$/24 dev ens33' arping_cmd = 'arping -U $_IP_$ -w 1 -I ens33' if_cmd_path = '/sbin' arping_path = '/usr/sbin' wd_hostname = '192.168.222.141' wd_port = 9000 other_pgpool_hostname0 = '192.168.222.142' other_pgpool_port0 = 9999 other_wd_port0 = 9000 other_pgpool_hostname1 = '192.168.222.143' other_pgpool_port1 = 9999 other_wd_port1 = 9000 heartbeat_destination0 = '192.168.222.142' heartbeat_destination_port0 = 9694 heartbeat_device0 = '' heartbeat_destination1 = '192.168.222.143' heartbeat_destination_port1 = 9694 heartbeat_device1 = '' log_destination = 'syslog' syslog_facility = 'LOCAL1' pid_file_name = '/opt/pgpool-406/pgpool.pid' memqcache_oiddir = '/opt/pgpool-406/log/pgpool/oiddir'

Create script (see Appendix for details)

# root vim /opt/pgpool-406/etc/failover.sh vim /opt/pgpool-406/etc/follow_master.sh chmod +x /opt/pgpool-406/etc/ # postgres su - postgres vim /opt/PG-10.10/data/recovery_1st_stage vim /opt/PG-10.10/data/pgpool_remote_start chmod +x /opt/PG-10.10/data/

PRIMARY primary node create extension

psql template1 -c "CREATE EXTENSION pgpool_recovery"

/opt/pgpool-406/etc/pool_hba.conf
(cp /opt/pgpool-406/etc/pool_hba.conf.sample /opt/pgpool-406/etc/pool_hba.conf)

host all pgpool 0.0.0.0/0 md5 host all postgres 0.0.0.0/0 md5

/opt/pgpool-406/etc/pool_passwd

# root pg_md5 -p -m -u postgres pool_passwd pg_md5 -p -m -u pgpool pool_passwd cat /opt/pgpool-406/etc/pool_passwd

/opt/pgpool-406/etc/pcp.conf
(cp /opt/pgpool-406/etc/pcp.conf.sample /opt/pgpool-406/etc/pcp.conf)

pg_md5 123456 # Generate encrypted text echo "postgres:e10adc3949ba59abbe56e057f20f883e" >> /opt/pgpool-406/etc/pcp.conf echo "pgpool:e10adc3949ba59abbe56e057f20f883e" >> /opt/pgpool-406/etc/pcp.conf

Each configuration file is sent to the standby node

cd /opt/pgpool-406/etc/ scp pcp.conf pgpool.conf pool_passwd pool_hba.conf [email protected]:/opt/pgpool-406/etc/ scp pcp.conf pgpool.conf pool_passwd pool_hba.conf [email protected]:/opt/pgpool-406/etc/
III. configuration of standby node

1. Configuration of pgpool - Ⅱ in standby01

Based on the configuration file sent by the master node, modify some parameters of pgpool.conf, as follows

wd_hostname = '192.168.222.142' #This machine wd_port = 9000 other_pgpool_hostname0 = '192.168.222.141' # Node 1 other_pgpool_port0 = 9999 other_wd_port0 = 9000 other_pgpool_hostname1 = '192.168.222.143' # Node 3 other_pgpool_port1 = 9999 other_wd_port1 = 9000 heartbeat_destination0 = '192.168.222.141' # Node 1 heartbeat_destination_port0 = 9694 heartbeat_device0 = '' heartbeat_destination1 = '192.168.222.143' # Node 3 heartbeat_destination_port1 = 9694 heartbeat_device1 = ''

2. Configuration of pgpool - Ⅱ in standby02

Based on the configuration file sent by the master node, modify some parameters of pgpool.conf, as follows

wd_hostname = '192.168.222.143' #This machine wd_port = 9000 other_pgpool_hostname0 = '192.168.222.141' # Node 1 other_pgpool_port0 = 9999 other_wd_port0 = 9000 other_pgpool_hostname1 = '192.168.222.142' # Node 2 other_pgpool_port1 = 9999 other_wd_port1 = 9000 heartbeat_destination0 = '192.168.222.141' # Node 1 heartbeat_destination_port0 = 9694 heartbeat_device0 = '' heartbeat_destination1 = '192.168.222.142' # Node 2 heartbeat_destination_port1 = 9694 heartbeat_device1 = ''
Four, use

1. Startup and shutdown

Start up: 1. Start the database service first 2. Start the pgpool service: /opt/pgpool-406/bin/pgpool -n -d 3. Master before slave Close: 1. Turn on and off the pgpool service: /opt/pgpool-406/bin/pgpool stop 2. Close database service 3. First from then on

2. Set up spare parts points

pcp_recovery_node -h 192.168.222.200 -p 9898 -U pgpool -n 1 pcp_recovery_node -h 192.168.222.200 -p 9898 -U pgpool -n 2

3. View nodes

psql -h 192.168.222.200 -p 9999 -U pgpool postgres -c "show pool_nodes"

4. View watchdog

pcp_watchdog_info -h 192.168.222.200 -p 9898 -U pgpool

5. Failover test

pg_ctl -D /opt/PG-10.10/data -m immediate stop # The primary node shuts down the database and simulates a failure psql -h 192.168.222.200 -p 9999 -U pgpool postgres -c "show pool_nodes" # View state

6. Node recovery

pcp_recovery_node -h 192.168.222.200 -p 9898 -U pgpool -n 0
Enclosure: 1. / opt/pgpool-406/etc/pgpool.conf complete file
# ---------------------------- # pgPool-II configuration file # ---------------------------- # # This file consists of lines of the form: # # name = value # # Whitespace may be used. Comments are introduced with "#" anywhere on a line. # The complete list of parameter names and allowed values can be found in the # pgPool-II documentation. # # This file is read on server startup and when the server receives a SIGHUP # signal. If you edit the file on a running system, you have to SIGHUP the # server for the changes to take effect, or use "pgpool reload". Some # parameters, which are marked below, require a server shutdown and restart to # take effect. # #------------------------------------------------------------------------------ # CONNECTIONS #------------------------------------------------------------------------------ # - pgpool Connection Settings - listen_addresses = '*' # Host name or IP address to listen on: # '*' for all, '' for no TCP/IP connections # (change requires restart) port = 9999 # Port number # (change requires restart) socket_dir = '/tmp' # Unix domain socket path # The Debian package defaults to # /var/run/postgresql # (change requires restart) # - pgpool Communication Manager Connection Settings - pcp_listen_addresses = '*' # Host name or IP address for pcp process to listen on: # '*' for all, '' for no TCP/IP connections # (change requires restart) pcp_port = 9898 # Port number for pcp # (change requires restart) pcp_socket_dir = '/tmp' # Unix domain socket path for pcp # The Debian package defaults to # /var/run/postgresql # (change requires restart) listen_backlog_multiplier = 2 # Set the backlog parameter of listen(2) to # num_init_children * listen_backlog_multiplier. # (change requires restart) serialize_accept = off # whether to serialize accept() call to avoid thundering herd problem # (change requires restart) # - Backend Connection Settings - backend_hostname0 = '192.168.222.141' # Host name or IP address to connect to for backend 0 backend_port0 = 5432 # Port number for backend 0 backend_weight0 = 1 # Weight for backend 0 (only in load balancing mode) backend_data_directory0 = '/opt/PG-10.10/data' # Data directory for backend 0 backend_flag0 = 'ALLOW_TO_FAILOVER' # Controls various backend behavior # ALLOW_TO_FAILOVER, DISALLOW_TO_FAILOVER # or ALWAYS_MASTER backend_hostname1 = '192.168.222.142' backend_port1 = 5432 backend_weight1 = 1 backend_data_directory1 = '/opt/PG-10.10/data' backend_flag1 = 'ALLOW_TO_FAILOVER' backend_hostname2 = '192.168.222.143' backend_port2 = 5432 backend_weight2 = 1 backend_data_directory2 = '/opt/PG-10.10/data' backend_flag2 = 'ALLOW_TO_FAILOVER' # - Authentication - enable_pool_hba = on # Use pool_hba.conf for client authentication pool_passwd = 'pool_passwd' # File name of pool_passwd for md5 authentication. # "" disables pool_passwd. # (change requires restart) authentication_timeout = 60 # Delay in seconds to complete client authentication # 0 means no timeout. allow_clear_text_frontend_auth = off # Allow Pgpool-II to use clear text password authentication # with clients, when pool_passwd does not # contain the user password # - SSL Connections - ssl = off # Enable SSL support # (change requires restart) #ssl_key = './server.key' # Path to the SSL private key file # (change requires restart) #ssl_cert = './server.cert' # Path to the SSL public certificate file # (change requires restart) #ssl_ca_cert = '' # Path to a single PEM format file # containing CA root certificate(s) # (change requires restart) #ssl_ca_cert_dir = '' # Directory containing CA root certificate(s) # (change requires restart) ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # Allowed SSL ciphers # (change requires restart) ssl_prefer_server_ciphers = off # Use server's SSL cipher preferences, # rather than the client's # (change requires restart) #------------------------------------------------------------------------------ # POOLS #------------------------------------------------------------------------------ # - Concurrent session and pool size - num_init_children = 32 # Number of concurrent sessions allowed # (change requires restart) max_pool = 4 # Number of connection pool caches per connection # (change requires restart) # - Life time - child_life_time = 300 # Pool exits after being idle for this many seconds child_max_connections = 0 # Pool exits after receiving that many connections # 0 means no exit connection_life_time = 0 # Connection to backend closes after being idle for this many seconds # 0 means no close client_idle_limit = 0 # Client is disconnected after being idle for that many seconds # (even inside an explicit transactions!) # 0 means no disconnection #------------------------------------------------------------------------------ # LOGS #------------------------------------------------------------------------------ # - Where to log - log_destination = 'syslog' # Where to log # Valid values are combinations of stderr, # and syslog. Default to stderr. # - What to log - log_line_prefix = '%t: pid %p: ' # printf-style string to output at beginning of each log line. log_connections = off # Log connections log_hostname = off # Hostname will be shown in ps status # and in logs if connections are logged log_statement = off # Log all statements log_per_node_statement = off # Log all statements # with node and backend informations log_client_messages = off # Log any client messages log_standby_delay = 'if_over_threshold' # Log standby delay # Valid values are combinations of always, # if_over_threshold, none # - Syslog specific - syslog_facility = 'LOCAL1' # Syslog local facility. Default to LOCAL0 syslog_ident = 'pgpool' # Syslog program identification string # Default to 'pgpool' # - Debug - #log_error_verbosity = default # terse, default, or verbose messages #client_min_messages = notice # values in order of decreasing detail: # debug5 # debug4 # debug3 # debug2 # debug1 # log # notice # warning # error #log_min_messages = warning # values in order of decreasing detail: # debug5 # debug4 # debug3 # debug2 # debug1 # info # notice # warning # error # log # fatal # panic #------------------------------------------------------------------------------ # FILE LOCATIONS #------------------------------------------------------------------------------ pid_file_name = '/opt/pgpool-406/pgpool.pid' # PID file name # Can be specified as relative to the" # location of pgpool.conf file or # as an absolute path # (change requires restart) logdir = '/tmp' # Directory of pgPool status file # (change requires restart) #------------------------------------------------------------------------------ # CONNECTION POOLING #------------------------------------------------------------------------------ connection_cache = on # Activate connection pools # (change requires restart) # Semicolon separated list of queries # to be issued at the end of a session # The default is for 8.3 and later reset_query_list = 'ABORT; DISCARD ALL' # The following one is for 8.2 and before #reset_query_list = 'ABORT; RESET ALL; SET SESSION AUTHORIZATION DEFAULT' #------------------------------------------------------------------------------ # REPLICATION MODE #------------------------------------------------------------------------------ replication_mode = off # Activate replication mode # (change requires restart) replicate_select = off # Replicate SELECT statements # when in replication mode # replicate_select is higher priority than # load_balance_mode. insert_lock = off # Automatically locks a dummy row or a table # with INSERT statements to keep SERIAL data # consistency # Without SERIAL, no lock will be issued lobj_lock_table = '' # When rewriting lo_creat command in # replication mode, specify table name to # lock # - Degenerate handling - replication_stop_on_mismatch = off # On disagreement with the packet kind # sent from backend, degenerate the node # which is most likely "minority" # If off, just force to exit this session failover_if_affected_tuples_mismatch = off # On disagreement with the number of affected # tuples in UPDATE/DELETE queries, then # degenerate the node which is most likely # "minority". # If off, just abort the transaction to # keep the consistency #------------------------------------------------------------------------------ # LOAD BALANCING MODE #------------------------------------------------------------------------------ load_balance_mode = on # Activate load balancing mode # (change requires restart) ignore_leading_white_space = on # Ignore leading white spaces of each query white_function_list = '' # Comma separated list of function names # that don't write to database # Regexp are accepted black_function_list = 'currval,lastval,nextval,setval' # Comma separated list of function names # that write to database # Regexp are accepted black_query_pattern_list = '' # Semicolon separated list of query patterns # that should be sent to primary node # Regexp are accepted # valid for streaming replicaton mode only. database_redirect_preference_list = '' # comma separated list of pairs of database and node id. # example: postgres:primary,mydb[0-4]:1,mydb[5-9]:2' # valid for streaming replicaton mode only. app_name_redirect_preference_list = '' # comma separated list of pairs of app name and node id. # example: 'psql:primary,myapp[0-4]:1,myapp[5-9]:standby' # valid for streaming replicaton mode only. allow_sql_comments = off # if on, ignore SQL comments when judging if load balance or # query cache is possible. # If off, SQL comments effectively prevent the judgment # (pre 3.4 behavior). disable_load_balance_on_write = 'transaction' # Load balance behavior when write query is issued # in an explicit transaction. # Note that any query not in an explicit transaction # is not affected by the parameter. # 'transaction' (the default): if a write query is issued, # subsequent read queries will not be load balanced # until the transaction ends. # 'trans_transaction': if a write query is issued, # subsequent read queries in an explicit transaction # will not be load balanced until the session ends. # 'always': if a write query is issued, read queries will # not be load balanced until the session ends. #------------------------------------------------------------------------------ # MASTER/SLAVE MODE #------------------------------------------------------------------------------ master_slave_mode = on # Activate master/slave mode # (change requires restart) master_slave_sub_mode = 'stream' # Master/slave sub mode # Valid values are combinations stream, slony # or logical. Default is stream. # (change requires restart) # - Streaming - sr_check_period = 10 # Streaming replication check period # Disabled (0) by default sr_check_user = 'pgpool' # Streaming replication check user # This is neccessary even if you disable streaming # replication delay check by sr_check_period = 0 sr_check_password = '' # Password for streaming replication check user # Leaving it empty will make Pgpool-II to first look for the # Password in pool_passwd file before using the empty password sr_check_database = 'postgres' # Database name for streaming replication check delay_threshold = 10000000 # Threshold before not dispatching query to standby node # Unit is in bytes # Disabled (0) by default # - Special commands - follow_master_command = '/opt/pgpool-406/etc/follow_master.sh %d %h %p %D %m %M %H %P %r %R' # Executes this command after master failover # Special values: # %d = node id # %h = host name # %p = port number # %D = database cluster path # %m = new master node id # %H = hostname of the new master node # %M = old master node id # %P = old primary node id # %r = new master port number # %R = new master database cluster path # %% = '%' character #------------------------------------------------------------------------------ # HEALTH CHECK GLOBAL PARAMETERS #------------------------------------------------------------------------------ health_check_period = 5 # Health check period # Disabled (0) by default health_check_timeout = 30 # Health check timeout # 0 means no timeout health_check_user = 'pgpool' # Health check user health_check_password = '' # Password for health check user # Leaving it empty will make Pgpool-II to first look for the # Password in pool_passwd file before using the empty password health_check_database = '' # Database name for health check. If '', tries 'postgres' frist, health_check_max_retries = 3 # Maximum number of times to retry a failed health check before giving up. health_check_retry_delay = 1 # Amount of time to wait (in seconds) between retries. connect_timeout = 10000 # Timeout value in milliseconds before giving up to connect to backend. # Default is 10000 ms (10 second). Flaky network user may want to increase # the value. 0 means no timeout. # Note that this value is not only used for health check, # but also for ordinary conection to backend. #------------------------------------------------------------------------------ # HEALTH CHECK PER NODE PARAMETERS (OPTIONAL) #------------------------------------------------------------------------------ #health_check_period0 = 0 #health_check_timeout0 = 20 #health_check_user0 = 'nobody' #health_check_password0 = '' #health_check_database0 = '' #health_check_max_retries0 = 0 #health_check_retry_delay0 = 1 #connect_timeout0 = 10000 #------------------------------------------------------------------------------ # FAILOVER AND FAILBACK #------------------------------------------------------------------------------ failover_command = '/opt/pgpool-406/etc/failover.sh %d %h %p %D %m %H %M %P %r %R' # Executes this command at failover # Special values: # %d = node id # %h = host name # %p = port number # %D = database cluster path # %m = new master node id # %H = hostname of the new master node # %M = old master node id # %P = old primary node id # %r = new master port number # %R = new master database cluster path # %% = '%' character failback_command = '' # Executes this command at failback. # Special values: # %d = node id # %h = host name # %p = port number # %D = database cluster path # %m = new master node id # %H = hostname of the new master node # %M = old master node id # %P = old primary node id # %r = new master port number # %R = new master database cluster path # %% = '%' character failover_on_backend_error = on # Initiates failover when reading/writing to the # backend communication socket fails # If set to off, pgpool will report an # error and disconnect the session. detach_false_primary = off # Detach false primary if on. Only # valid in streaming replicaton # mode and with PostgreSQL 9.6 or # after. search_primary_node_timeout = 300 # Timeout in seconds to search for the # primary node when a failover occurs. # 0 means no timeout, keep searching # for a primary node forever. #------------------------------------------------------------------------------ # ONLINE RECOVERY #------------------------------------------------------------------------------ recovery_user = 'postgres' # Online recovery user recovery_password = '' # Online recovery password # Leaving it empty will make Pgpool-II to first look for the # Password in pool_passwd file before using the empty password recovery_1st_stage_command = 'recovery_1st_stage' # Executes a command in first stage recovery_2nd_stage_command = '' # Executes a command in second stage recovery_timeout = 90 # Timeout in seconds to wait for the # recovering node's postmaster to start up # 0 means no wait client_idle_limit_in_recovery = 0 # Client is disconnected after being idle # for that many seconds in the second stage # of online recovery # 0 means no disconnection # -1 means immediate disconnection #------------------------------------------------------------------------------ # WATCHDOG #------------------------------------------------------------------------------ # - Enabling - use_watchdog = on # Activates watchdog # (change requires restart) # -Connection to up stream servers - trusted_servers = '' # trusted server list which are used # to confirm network connection # (hostA,hostB,hostC,...) # (change requires restart) ping_path = '/bin' # ping command path # (change requires restart) # - Watchdog communication Settings - wd_hostname = '192.168.222.141' # Host name or IP address of this watchdog # (change requires restart) wd_port = 9000 # port number for watchdog service # (change requires restart) wd_priority = 1 # priority of this watchdog in leader election # (change requires restart) wd_authkey = '' # Authentication key for watchdog communication # (change requires restart) wd_ipc_socket_dir = '/tmp' # Unix domain socket path for watchdog IPC socket # The Debian package defaults to # /var/run/postgresql # (change requires restart) # - Virtual IP control Setting - delegate_IP = '192.168.222.200' # delegate IP address # If this is empty, virtual IP never bring up. # (change requires restart) if_cmd_path = '/sbin' # path to the directory where if_up/down_cmd exists # (change requires restart) if_up_cmd = 'ip addr add $_IP_$/24 dev ens33 label ens33:0' # startup delegate IP command # (change requires restart) if_down_cmd = 'ip addr del $_IP_$/24 dev ens33' # shutdown delegate IP command # (change requires restart) arping_path = '/usr/sbin' # arping command path # (change requires restart) arping_cmd = 'arping -U $_IP_$ -w 1 -I ens33' # arping command # (change requires restart) # - Behaivor on escalation Setting - clear_memqcache_on_escalation = on # Clear all the query cache on shared memory # when standby pgpool escalate to active pgpool # (= virtual IP holder). # This should be off if client connects to pgpool # not using virtual IP. # (change requires restart) wd_escalation_command = '' # Executes this command at escalation on new active pgpool. # (change requires restart) wd_de_escalation_command = '' # Executes this command when master pgpool resigns from being master. # (change requires restart) # - Watchdog consensus settings for failover - failover_when_quorum_exists = on # Only perform backend node failover # when the watchdog cluster holds the quorum # (change requires restart) failover_require_consensus = on # Perform failover when majority of Pgpool-II nodes # aggrees on the backend node status change # (change requires restart) allow_multiple_failover_requests_from_node = off # A Pgpool-II node can cast multiple votes # for building the consensus on failover # (change requires restart) # - Lifecheck Setting - # -- common -- wd_monitoring_interfaces_list = '' # Comma separated list of interfaces names to monitor. # if any interface from the list is active the watchdog will # consider the network is fine # 'any' to enable monitoring on all interfaces except loopback # '' to disable monitoring # (change requires restart) wd_lifecheck_method = 'heartbeat' # Method of watchdog lifecheck ('heartbeat' or 'query' or 'external') # (change requires restart) wd_interval = 10 # lifecheck interval (sec) > 0 # (change requires restart) # -- heartbeat mode -- wd_heartbeat_port = 9694 # Port number for receiving heartbeat signal # (change requires restart) wd_heartbeat_keepalive = 2 # Interval time of sending heartbeat signal (sec) # (change requires restart) wd_heartbeat_deadtime = 30 # Deadtime interval for heartbeat signal (sec) # (change requires restart) heartbeat_destination0 = '192.168.222.142' # Host name or IP address of destination 0 # for sending heartbeat signal. # (change requires restart) heartbeat_destination_port0 = 9694 # Port number of destination 0 for sending # heartbeat signal. Usually this is the # same as wd_heartbeat_port. # (change requires restart) heartbeat_device0 = '' # Name of NIC device (such like 'eth0') # used for sending/receiving heartbeat # signal to/from destination 0. # This works only when this is not empty # and pgpool has root privilege. # (change requires restart) heartbeat_destination1 = '192.168.222.143' heartbeat_destination_port1 = 9694 heartbeat_device1 = '' # -- query mode -- wd_life_point = 3 # lifecheck retry times # (change requires restart) wd_lifecheck_query = 'SELECT 1' # lifecheck query to pgpool from watchdog # (change requires restart) wd_lifecheck_dbname = 'template1' # Database name connected for lifecheck # (change requires restart) wd_lifecheck_user = 'nobody' # watchdog user monitoring pgpools in lifecheck # (change requires restart) wd_lifecheck_password = '' # Password for watchdog user in lifecheck # Leaving it empty will make Pgpool-II to first look for the # Password in pool_passwd file before using the empty password # (change requires restart) # - Other pgpool Connection Settings - other_pgpool_hostname0 = '192.168.222.142' # Host name or IP address to connect to for other pgpool 0 # (change requires restart) other_pgpool_port0 = 9999 # Port number for other pgpool 0 # (change requires restart) other_wd_port0 = 9000 # Port number for other watchdog 0 # (change requires restart) other_pgpool_hostname1 = '192.168.222.143' other_pgpool_port1 = 9999 other_wd_port1 = 9000 #------------------------------------------------------------------------------ # OTHERS #------------------------------------------------------------------------------ relcache_expire = 0 # Life time of relation cache in seconds. # 0 means no cache expiration(the default). # The relation cache is used for cache the # query result against PostgreSQL system # catalog to obtain various information # including table structures or if it's a # temporary table or not. The cache is # maintained in a pgpool child local memory # and being kept as long as it survives. # If someone modify the table by using # ALTER TABLE or some such, the relcache is # not consistent anymore. # For this purpose, cache_expiration # controls the life time of the cache. relcache_size = 256 # Number of relation cache # entry. If you see frequently: # "pool_search_relcache: cache replacement happend" # in the pgpool log, you might want to increate this number. check_temp_table = on # If on, enable temporary table check in SELECT statements. # This initiates queries against system catalog of primary/master # thus increases load of master. # If you are absolutely sure that your system never uses temporary tables # and you want to save access to primary/master, you could turn this off. # Default is on. check_unlogged_table = on # If on, enable unlogged table check in SELECT statements. # This initiates queries against system catalog of primary/master # thus increases load of master. # If you are absolutely sure that your system never uses unlogged tables # and you want to save access to primary/master, you could turn this off. # Default is on. #------------------------------------------------------------------------------ # IN MEMORY QUERY MEMORY CACHE #------------------------------------------------------------------------------ memory_cache_enabled = off # If on, use the memory cache functionality, off by default # (change requires restart) memqcache_method = 'shmem' # Cache storage method. either 'shmem'(shared memory) or # 'memcached'. 'shmem' by default # (change requires restart) memqcache_memcached_host = 'localhost' # Memcached host name or IP address. Mandatory if # memqcache_method = 'memcached'. # Defaults to localhost. # (change requires restart) memqcache_memcached_port = 11211 # Memcached port number. Mondatory if memqcache_method = 'memcached'. # Defaults to 11211. # (change requires restart) memqcache_total_size = 67108864 # Total memory size in bytes for storing memory cache. # Mandatory if memqcache_method = 'shmem'. # Defaults to 64MB. # (change requires restart) memqcache_max_num_cache = 1000000 # Total number of cache entries. Mandatory # if memqcache_method = 'shmem'. # Each cache entry consumes 48 bytes on shared memory. # Defaults to 1,000,000(45.8MB). # (change requires restart) memqcache_expire = 0 # Memory cache entry life time specified in seconds. # 0 means infinite life time. 0 by default. # (change requires restart) memqcache_auto_cache_invalidation = on # If on, invalidation of query cache is triggered by corresponding # DDL/DML/DCL(and memqcache_expire). If off, it is only triggered # by memqcache_expire. on by default. # (change requires restart) memqcache_maxcache = 409600 # Maximum SELECT result size in bytes. # Must be smaller than memqcache_cache_block_size. Defaults to 400KB. # (change requires restart) memqcache_cache_block_size = 1048576 # Cache block size in bytes. Mandatory if memqcache_method = 'shmem'. # Defaults to 1MB. # (change requires restart) memqcache_oiddir = '/opt/pgpool-406/log/pgpool/oiddir' # Temporary work directory to record table oids # (change requires restart) white_memqcache_table_list = '' # Comma separated list of table names to memcache # that don't write to database # Regexp are accepted black_memqcache_table_list = '' # Comma separated list of table names not to memcache # that don't write to database # Regexp are accepted
2,/opt/pgpool-406/etc/failover.sh
#!/bin/bash # This script is run by failover_command. set -o xtrace exec > >(logger -i -p local1.info) 2>&1 # Special values: # %d = node id # %h = host name # %p = port number # %D = database cluster path # %m = new master node id # %H = hostname of the new master node # %M = old master node id # %P = old primary node id # %r = new master port number # %R = new master database cluster path # %% = '%' character FAILED_NODE_ID="$1" FAILED_NODE_HOST="$2" FAILED_NODE_PORT="$3" FAILED_NODE_PGDATA="$4" NEW_MASTER_NODE_ID="$5" NEW_MASTER_NODE_HOST="$6" OLD_MASTER_NODE_ID="$7" OLD_PRIMARY_NODE_ID="$8" NEW_MASTER_NODE_PORT="$9" NEW_MASTER_NODE_PGDATA="$" PGHOME=/opt/PG-10.10 logger -i -p local1.info failover.sh: start: failed_node_id=$ old_primary_node_id=$ \ failed_host=$ new_master_host=$ ## Test passwrodless SSH ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null postgres@$ -i ~/.ssh/id_rsa ls /tmp > /dev/null if [ $? -ne 0 ]; then logger -i -p local1.error failover.sh: passwrodless SSH to postgres@$ failed. Please setup passwrodless SSH. exit 1 fi # If standby node is down, skip failover. if [ $ -ne $ ]; then logger -i -p local1.info failover.sh: Standby node is down. Skipping failover. exit 0 fi # Promote standby node. logger -i -p local1.info failover.sh: Primary node is down, promote standby node PostgreSQL@$. ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \ postgres@$ -i ~/.ssh/id_rsa $/bin/pg_ctl -D $ -w promote if [ $? -ne 0 ]; then logger -i -p local1.error failover.sh: new_master_host=$ promote failed exit 1 fi logger -i -p local1.info failover.sh: end: new_master_node_id=$NEW_MASTER_NODE_ID started as the primary node exit 0
3,/opt/pgpool-406/etc/follow_master.sh
#!/bin/bash # This script is run after failover_command to synchronize the Standby with the new Primary. set -o xtrace exec > >(logger -i -p local1.info) 2>&1 # special values: %d = node id # %h = host name # %p = port number # %D = database cluster path # %m = new master node id # %M = old master node id # %H = new master node host name # %P = old primary node id # %R = new master database cluster path # %r = new master port number # %% = '%' character FAILED_NODE_ID="$1" FAILED_NODE_HOST="$2" FAILED_NODE_PORT="$3" FAILED_NODE_PGDATA="$4" NEW_MASTER_NODE_ID="$5" OLD_MASTER_NODE_ID="$6" NEW_MASTER_NODE_HOST="$7" OLD_PRIMARY_NODE_ID="$8" NEW_MASTER_NODE_PORT="$9" NEW_MASTER_NODE_PGDATA="$" PGHOME=/opt/PG-10.10 ARCHIVEDIR=/opt/PG-10.10/archivedir REPL_USER=repl PCP_USER=pgpool PGPOOL_PATH=/opt/pgpool-406/bin PCP_PORT=9898 # Recovery the slave from the new primary logger -i -p local1.info follow_master.sh: start: synchronize the Standby node PostgreSQL@$ with the new Primary node PostgreSQL@$ ## Test passwrodless SSH ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null postgres@$ -i ~/.ssh/id_rsa ls /tmp > /dev/null if [ $? -ne 0 ]; then logger -i -p local1.error follow_master.sh: passwrodless SSH to postgres@$ failed. Please setup passwrodless SSH. exit 1 fi ## Get PostgreSQL major version PGVERSION=`$/bin/initdb -V | awk '' | sed 's/\..*//' | sed 's/\([0-9]*\)[a-zA-Z].*/\1/'` if [ $ -ge 12 ]; then RECOVERYCONF=$/myrecovery.conf else RECOVERYCONF=$/recovery.conf fi # Check the status of standby ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \ postgres@$ -i ~/.ssh/id_rsa $/bin/pg_ctl -w -D $ status ## If Standby is running, run pg_basebackup. if [ $? -eq 0 ]; then # Execute pg_basebackup ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null postgres@$ -i ~/.ssh/id_rsa " set -o errexit $/bin/pg_ctl -w -m f -D $ stop rm -rf $ rm -rf $/* $/bin/pg_basebackup -h $ -U $ -p $ -D $ -X stream if [ $ -ge 12 ]; then sed -i -e \"\\\$ainclude_if_exists = '$(echo $ | sed -e 's/\//\\\//g')'\" \ -e \"/^include_if_exists = '$(echo $ | sed -e 's/\//\\\//g')'/d\" $/postgresql.conf fi cat > $ << EOT primary_conninfo = 'host=$ port=$ user=$ passfile=''/home/postgres/.pgpass''' recovery_target_timeline = 'latest' restore_command = 'scp $:$/%f %p' EOT if [ $ -ge 12 ]; then touch $/standby.signal else echo \"standby_mode = 'on'\" >> $ fi " if [ $? -ne 0 ]; then logger -i -p local1.error follow_master.sh: end: pg_basebackup failed exit 1 fi # start Standby node on $ ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \ postgres@$ -i ~/.ssh/id_rsa $PGHOME/bin/pg_ctl -l /dev/null -w -D $ start # If start Standby successfully, attach this node if [ $? -eq 0 ]; then # Run pcp_attact_node to attach Standby node to Pgpool-II. $/pcp_attach_node -w -h localhost -U $PCP_USER -p $ -n $ if [ $? -ne 0 ]; then logger -i -p local1.error follow_master.sh: end: pcp_attach_node failed exit 1 fi # If start Standby failed, drop replication slot "$" else logger -i -p local1.error follow_master.sh: end: follow master command failed exit 1 fi else logger -i -p local1.info follow_master.sh: failed_nod_id=$ is not running. skipping follow master command exit 0 fi logger -i -p local1.info follow_master.sh: end: follow master command complete exit 0
4,/opt/PG-10.10/data/recovery_1st_stage
#!/bin/bash # This script is executed by "recovery_1st_stage" to recovery a Standby node. set -o xtrace exec > >(logger -i -p local1.info) 2>&1 PRIMARY_NODE_PGDATA="$1" DEST_NODE_HOST="$2" DEST_NODE_PGDATA="$3" PRIMARY_NODE_PORT="$4" DEST_NODE_PORT=5432 source /etc/profile PRIMARY_NODE_HOST=`ifconfig ens33 | grep "inet " | awk ''` PGHOME=/opt/PG-10.10 ARCHIVEDIR=/opt/PG-10.10/archivedir REPL_USER=repl logger -i -p local1.info recovery_1st_stage: start: pg_basebackup for Standby node PostgreSQL@{$DEST_NODE_HOST} ## Test passwrodless SSH ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null postgres@$ -i ~/.ssh/id_rsa ls /tmp > /dev/null if [ $? -ne 0 ]; then logger -i -p local1.error recovery_1st_stage: passwrodless SSH to postgres@$ failed. Please setup passwrodless SSH. exit 1 fi ## Get PostgreSQL major version PGVERSION=`$/bin/initdb -V | awk '' | sed 's/\..*//' | sed 's/\([0-9]*\)[a-zA-Z].*/\1/'` if [ $PGVERSION -ge 12 ]; then RECOVERYCONF=$/myrecovery.conf else RECOVERYCONF=$/recovery.conf fi ## Execute pg_basebackup to recovery Standby node ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null postgres@$DEST_NODE_HOST -i ~/.ssh/id_rsa " set -o errexit rm -rf $DEST_NODE_PGDATA rm -rf $ARCHIVEDIR/* $/bin/pg_basebackup -h $ -U $ -p $ -D $ -X stream if [ $ -ge 12 ]; then sed -i -e \"\\\$ainclude_if_exists = '$(echo $ | sed -e 's/\//\\\//g')'\" \ -e \"/^include_if_exists = '$(echo $ | sed -e 's/\//\\\//g')'/d\" $/postgresql.conf fi cat > $ << EOT primary_conninfo = 'host=$ port=$ user=$ passfile=''/var/lib/pgsql/.pgpass''' recovery_target_timeline = 'latest' restore_command = 'scp $:$/%f %p' EOT if [ $ -ge 12 ]; then touch $/standby.signal else echo \"standby_mode = 'on'\" >> $ fi sed -i \"s/#*port = .*/port = $/\" $/postgresql.conf " if [ $? -ne 0 ]; then logger -i -p local1.error recovery_1st_stage: end: pg_basebackup failed. online recovery failed exit 1 fi logger -i -p local1.info recovery_1st_stage: end: recovery_1st_stage complete exit 0
5,/opt/PG-10.10/data/pgpool_remote_start
#!/bin/bash # This script is run after recovery_1st_stage to start Standby node. set -o xtrace exec > >(logger -i -p local1.info) 2>&1 PGHOME=/opt/PG-10.10 DEST_NODE_HOST="$1" DEST_NODE_PGDATA="$2" logger -i -p local1.info pgpool_remote_start: start: remote start Standby node PostgreSQL@$DEST_NODE_HOST ## Test passwrodless SSH ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null postgres@$ -i ~/.ssh/id_rsa ls /tmp > /dev/null if [ $? -ne 0 ]; then logger -i -p local1.error pgpool_remote_start: passwrodless SSH to postgres@$ failed. Please setup passwrodless SSH. exit 1 fi ## Start Standby node ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null postgres@$DEST_NODE_HOST -i ~/.ssh/id_rsa " $PGHOME/bin/pg_ctl -l /dev/null -w -D $DEST_NODE_PGDATA start " if [ $? -ne 0 ]; then logger -i -p local1.error pgpool_remote_start: PostgreSQL@$DEST_NODE_HOST start failed. exit 1 fi logger -i -p local1.info pgpool_remote_start: end: PostgreSQL@$DEST_NODE_HOST started successfully. exit 0

Reference resources:
https://www.pgpool.net/docs/pgpool-II-4.0.6/en/html/example-cluster.html

17 October 2019, 12:23 | Views: 3933

Add new comment

For adding a comment, please log in
or create account

0 comments