tk
apnh9****@gmail*****
2015年 11月 4日 (水) 10:28:27 JST
いつもお世話になっております、笹と申します。 下記Pacemaker稼働環境にて、ホスト名の変更を実施する必要があり 評価環境にて評価を実施しました。 ・OS CentOS6.5 ・SW pacemaker-1.1.12-1.el6.x86_64 corosync-2.3.4-1.el6.x86_64 drbd84-utils-8.9.2-1.el6.elrepo.x86_64 kmod-drbd84-8.4.5-1.el6.elrepo.x86_64 手順としましては ①Pacemakerのサービス停止 ②ホスト名変更 ③drbd.resのonパラメータのホスト名を変更 ④OS再起動 ⑤Pacemaker起動 上記手順で実施し、Pacemaker、Corosync、DRBDの正常起動を 確認し、crm configure showにてnodeパラメータのホスト名が新ホスト名に 変更されていることも確認しております。 Pacemaker環境にてホスト名を変更する際、他に変更すべきパラメータ 設定ファイル、留意すべき点等ございましたら、ご指摘いただけると幸いです。 ご参考までに、crmの設定、corosync.conf、drbd.resを記載致します。 ●crm node 1: test-node1 node 2: test-node2 primitive r0_drbd ocf:linbit:drbd \ meta migration-threshold=3 \ params drbd_resource=r0 \ op start interval=0s timeout=240s on-fail=restart \ op stop interval=0s timeout=240s on-fail=block \ op monitor interval=45s role=Master timeout=240s on-fail=restart \ op monitor interval=46s role=Slave timeout=240s on-fail=restart primitive r0_fs Filesystem \ meta migration-threshold=2 \ params device="/dev/drbd0" directory="/opt/test_vol1" fstype=ext4 \ op start interval=0s timeout=60s on-fail=restart \ op stop interval=0s timeout=60s on-fail=block \ op monitor interval=45s timeout=60s on-fail=restart primitive ping ocf:pacemaker:ping \ meta migration-threshold=2 \ params host_list=10.18.49.60 multiplier=100 dampen=0 \ op start interval=0s timeout=60s on-fail=restart \ op monitor interval=45s timeout=60s on-fail=restart \ op stop interval=0s timeout=60s on-fail=block primitive ldap_drbd ocf:linbit:drbd \ meta migration-threshold=2 \ params drbd_resource=r1 \ op start interval=0s timeout=240s on-fail=restart \ op stop interval=0s timeout=240s on-fail=block \ op monitor interval=45s role=Master timeout=240s on-fail=restart \ op monitor interval=46s role=Slave timeout=240s on-fail=restart primitive r1_fs Filesystem \ meta migration-threshold=2 \ params device="/dev/drbd1" directory="/opt/test_vol2" fstype=ext4 \ op start interval=0s timeout=60s on-fail=restart \ op stop interval=0s timeout=60s on-fail=block \ op monitor interval=45s timeout=60s on-fail=restart primitive vip IPaddr2 \ meta migration-threshold=2 \ params ip=10.18.49.38 nic=eth0 cidr_netmask=26 iflabel=0 \ op start interval=0s timeout=60s on-fail=restart \ op stop interval=0s timeout=60s on-fail=block \ op monitor interval=45s timeout=60s on-fail=restart primitive vip_check VIPcheck \ meta migration-threshold=2 \ params target_ip=10.18.49.38 count=3 wait=5 \ op start interval=0s timeout=60s on-fail=restart \ op stop interval=0s timeout=60s on-fail=ignore \ op monitor interval=45s timeout=60s on-fail=restart group test_grp vip_check r0_fs r1_fs vip ms r0_ms r0_drbd \ meta master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify=true ms r1_ms ldap_drbd \ meta master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify=true clone ping_cln ping \ meta clone-max=2 clone-node-max=1 target-role=Started colocation col_drbd1 inf: test_grp r0_ms:Master colocation col_drbd2 inf: test_grp r1_ms:Master colocation col_ping inf: test_grp ping_cln order drbd_odr inf: r0_ms:promote r1_ms:start order mail_odr inf: r1_ms:promote test_grp:start property cib-bootstrap-options: \ dc-version=1.1.12-561c4cf \ cluster-infrastructure=corosync \ stonith-enabled=false \ no-quorum-policy=ignore \ pe-input-series-max=100 \ pe-error-series-max=100 \ pe-warn-series-max=100 \ maintenance-mode=false rsc_defaults rsc-options: \ resource-stickiness=INFINITY ●corosync.conf # Please read the corosync.conf.5 manual page totem { version: 2 token: 1000 crypto_cipher: none crypto_hash: none rrp_mode: active interface { ringnumber: 0 bindnetaddr: 10.18.49.0 mcastport: 5405 ttl: 1 } interface { ringnumber: 1 bindnetaddr: 192.168.10.0 mcastport: 5407 ttl: 1 } transport: udpu } logging { fileline: off to_logfile: yes to_syslog: no logfile: /var/log/cluster/corosync.log debug: off timestamp: on logger_subsys { subsys: QUORUM debug: off } } nodelist { node { ring0_addr: 10.18.49.36 ring1_addr: 192.168.10.21 nodeid: 1 } node { ring0_addr: 10.18.49.37 ring1_addr: 192.168.10.22 nodeid: 2 } } quorum { # Enable and configure quorum subsystem (default: off) # see also corosync.conf.5 and votequorum.5 provider: corosync_votequorum expected_votes: 2 two_node: 1 } ●drbd.res resource r0 { protocol C; disk { on-io-error detach; } syncer { rate 30M; verify-alg sha1; } on test-node1 { device /dev/drbd0; disk /dev/sdc; address 172.21.50.10:7789; meta-disk internal; } on test-node2 { device /dev/drbd0; disk /dev/sdc; address 172.21.50.11:7789; meta-disk internal; } } resource r1 { protocol C; disk { on-io-error detach; } syncer { rate 30M; verify-alg sha1; } on test-node1 { device /dev/drbd1; disk /dev/sdd; address 172.21.50.10:7790; meta-disk internal; } on test-node2 { device /dev/drbd1; disk /dev/sdd; address 172.21.50.11:7790; meta-disk internal; } }