corosync(pacemaker)+drbd+web(apache)

corosync(pacemaker)+drbd+web(apache)

环境:
    vm1-hong:172.16.3.2/16
    vm2-ning:172.16.3.10/16
    VIP:172.16.3.100/16
一、drbd安装:
案例:配置主从primary/secondary的drbd设备(主从节点在高可用集群中,中从节点切换比较慢)

前提:
    1、两节点之间必须时间同步、基于主机名能相互通信
    2、准备的磁盘设备必须是同样大小的
    3、系统架构得一样   
包:
drbd-8.4.3-33.el6.x86_64.rpm                   
drbd-kmdl-2.6.32-431.el6-8.4.3-33.el6.x86_64.rpm    

1、安装drbd(两个节点上同时安装即可,没有依赖关系)
    (1) # yum install drbd*
    (2) # rpm -ivh drbd*

实例配置文件
2、定义全局配置文件
    # cat /etc/drbd.d/global_common.conf
    global {
            usage-count no;
            # minor-count dialog-refresh disable-ip-verification
    }

    common {
            protocol C;             

            handlers {
                    # pri-on-incon-degr "/usr/lib/drbd/notify-pri-on-incon-degr.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f";
                    # pri-lost-after-sb "/usr/lib/drbd/notify-pri-lost-after-sb.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f";
                    # local-io-error "/usr/lib/drbd/notify-io-error.sh; /usr/lib/drbd/notify-emergency-shutdown.sh; echo o > /proc/sysrq-trigger ; halt -f";
                    # fence-peer "/usr/lib/drbd/crm-fence-peer.sh";
                    # split-brain "/usr/lib/drbd/notify-split-brain.sh root";
                    # out-of-sync "/usr/lib/drbd/notify-out-of-sync.sh root";
                    # before-resync-target "/usr/lib/drbd/snapshot-resync-target-lvm.sh -p 15 -- -c 16k";
                    # after-resync-target /usr/lib/drbd/unsnapshot-resync-target-lvm.sh;
            }

            startup {
                    #wfc-timeout 120;
                    #degr-wfc-timeout 120;
            }

            disk {
                    on-io-error detach;                  
                    #fencing resource-only;
            }

            net {
                    cram-hmac-alg "sha1";                  
                    shared-secret "mydrbdlab";            
            }

            syncer {
                    rate 1000M;                        
            }
    }

3、 资源专用的配置信息

    (1)、两个节点上各添加同样大小的磁盘分区
        # fdisk /dev/sda
        n
        p
        3
        +10G
        w
        # kpartx -af /dev/sda
        # partx -a /dev/sda
        # cat /proc/partitions
            major minor  #blocks  name

               8        0  125829120 sda
               8        1     204800 sda1
               8        2   62914560 sda2
               8        3   10489446 sda3
             253        0   20971520 dm-0
             253        1    2097152 dm-1
             253        2   10485760 dm-2
             253        3   20971520 dm-3
    (2)、添加资源配置文件:/etc/drbd.d/(需要在两节点上同时操作)
        [root@ning drbd.d]# cat web.res    (需要自己创建)
        resource web {              定义资源名称为web
          protocol B;
          on ning {              定义对端的节点
          device   /dev/drbd0;
          disk       /dev/sda3;
          address 172.16.3.10:7789;      并 指定其IP地址
          meta-disk internal;
            }
          on hong {                  定义自己端的节点名称(主机名)
          device   /dev/drbd0;
          disk       /dev/sda3;
          address 172.16.3.2:7789;      并指定自己的IP地址
          meta-disk internal;
            }
        }
4、在两个节点上初始化已定义的资源并启动服务:

    1)初始化资源,在Node1和Node2上分别执行:
    # drbdadm create-md web  (web是资源的名称)创建DRDB设备

    2)启动服务,在Node1和Node2上分别执行:
    /etc/init.d/drbd start

5、启动各节点,并查看drbd状态:
    # service drbd start
    3)查看启动状态:
    # cat /proc/drbd
    version: 8.4.3 (api:1/proto:86-101)
    GIT-hash: 89a294209144b68adb3ee85a73221f964d3ee515 build by gardner@, 2013-11-29 12:28:00
     0: cs:Connected ro:Secondary/Secondary ds:Inconsistent/Inconsistent C r-----(从状态中可以查看到两个节点都是从节点Secondary)
        ns:0 nr:0 dw:0 dr:0 al:0 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:10489084

    也可以使用 drbd-overview命令来查看:
    # drbd-overview
        0:web/0  Connected Secondary/Secondary Inconsistent/Inconsistent C r----- (从命令中可以查看到drbd名称叫什么,双节点的状态)
6、怎么把从节点提升为主节点?
    从上面的信息中可以看出此时两个节点均处于Secondary状态。
    于是,我们接下来需要将其中一个节点设置为Primary。
    在要设置为Primary的节点上执行如下命令:
      # drbdadm primary --force “resource”(直接指定资源即可)

      注: 也可以在要设置为Primary的节点上使用如下命令来设置主节点:
         # drbdadm -- --overwrite-data-of-peer primary web
     [root@ning drbd.d]# drbdadm primary --force web     强制把ning节点提升为主节点
     [root@ning drbd.d]# cat /proc/drbd  
        version: 8.4.3 (api:1/proto:86-101)
        GIT-hash: 89a294209144b68adb3ee85a73221f964d3ee515 build by gardner@, 2013-11-29 12:28:00
         0: cs:SyncSource ro:Primary/Secondary ds:UpToDate/Inconsistent C r---n-  (从这里可以看到两节点状态)
            ns:429840 nr:0 dw:0 dr:437920 al:0 bm:26 lo:1 pe:2 ua:8 ap:0 ep:1 wo:f oos:10061052
            [>....................] sync‘ed:  4.1% (9824/10240)M(这里显示的数据是按照位对齐进行同步(同步的快慢和网络带宽有关系))
            finish: 0:03:07 speed: 53,504 (53,504) K/sec
        可以使用这个命令# watch -n1 ‘cat /proc/drbd‘来动态观测磁盘对位同步的过程。(同步不能完成是不能挂载使用的。)
    [root@ning drbd.d]# drbd-overview
    0:web/0  Connected Primary/Secondary UpToDate/UpToDate C r----- (再次查看就能看到都是UpToDate/UpToDate最新状态)
7、创建文件系统(只能在主节点上设置挂载drbd)
        [root@ning drbd.d]# cat /proc/partitions (用这个命令可以查看当前系统上的磁盘状态信息)
        major minor  #blocks  name

           8        0  125829120 sda
           8        1     204800 sda1
           8        2   62914560 sda2
           8        3   10489446 sda3
         253        0   20971520 dm-0
         253        1    2097152 dm-1
         253        2   10485760 dm-2
         253        3   20971520 dm-3
         147        0   10489084 drbd0
    格式化drbd
        [root@ning drbd.d]# mke2fs -t ext4 /dev/drbd0  (格式化drbd设备,必须在主节点上执行,从节点也会被格式化。)
    挂载文件系统
        [root@ning drbd.d]# mount /dev/drbd0 /mnt
    测试下是否数据可以同步到从节点:
        [root@ning mnt]# cp /etc/fstab /mnt   (复制个数据)
        [root@ning mnt]# ls /mnt (查看下复制的文件)
        fstab  lost+found
8、怎么把正在使用的主机点降级为从节点?
        [root@ning /]# umount /mnt              (首先卸载设备)
        [root@ning /]# drbdadm secondary web    (把当前节点降级为从节点)
        [root@ning /]# drbd-overview            (查看双节点状态)
          0:web/0  Connected Secondary/Secondary UpToDate/UpToDate C r----- (可以看到当前都为从节点)
    怎么把提升从节点为主节点:?
        [root@hong drbd.d]# drbdadm  primary web  (提升从节点为主节点)
        [root@hong drbd.d]# drbd-overview          (查看双节点状态)
          0:web/0  Connected Primary/Secondary UpToDate/UpToDate C r----- (可以看到主机为主节点)
        [root@hong drbd.d]# mount /dev/drbd0 /mnt (挂载drbd0设备)
        [root@hong drbd.d]# ls /mnt                    (查看是否有在ning节点上上传的文件)
        fstab  lost+found
********************************************************************************************************************8
*********************************************************************************************************************

corosync+pacemaker+drdb的web和drdb高可用服务配置实例:

drbd高可以配置使用案例:(# chkconfig drbd off不能让资源开机启动,常识问题)

1、安装corosync(用ansible安装的)在节点liang上安装(corosync包在ansible工具里面有)
        # yum install ansible
        [root@liang corosync]# vim /etc/ansible/hosts        
        [hbhosts]
        172.16.3.2 ansible_ssh_pass=mageedu
        172.16.3.10 ansible_ssh_pass=mageedu
        ~                                      
        [root@liang corosync]# ansible-playbook ha.yml

2、查看ning和liang节点corosync状态。
    [root@hong drbd.d]# crm status    (安装完成后查看两个节点是否在线)
    Last updated: Sun Aug 17 17:51:43 2014
    Last change: Sun Aug 17 17:46:54 2014 via crmd on hong
    Stack: classic openais (with plugin)
    Current DC: hong - partition with quorum
    Version: 1.1.10-14.el6-368c726
    2 Nodes configured, 2 expected votes
    0 Resources configured

    Online: [ hong ning ]   (显示这两个节点都在线)
3、定义克隆资源

    任何主从首先是克隆
        下面解释下一些参数:
        clone-max :所有节点上最多克隆几份资源(几份决定于节点数目)
        clone-node-max:单个节点上运行几份克隆资源
        notify:克隆的时候是否通知给其他节点(肯定是通知的)默认为true
        globlly-unique:每一份克隆资源是唯一的,默认为true
        ordered:多份克隆是否安装顺序启动还是多个克隆并行一起启动。默认为true(有先后顺序)
        interleave:大多数不做配置这个参数   

        master-max :定义有几份主克隆节点,默认为1
        master-node-max:单个节点上最多允许几份主资源
    [root@hong drbd.d]# crm
    crm(live)# ra
    crm(live)ra# classes
        lsb
        ocf / heartbeat linbit pacemaker
        service
        stonith      
    crm(live)ra# list ocf linbit    linbit是个公司
        drbd            drbd资源
    crm(live)ra# list ocf
        CTDB           ClusterMon     Dummy          Filesystem     HealthCPU      HealthSMART
        IPaddr         IPaddr2        IPsrcaddr      LVM            MailTo         Route
        SendArp        Squid          Stateful       SysInfo        SystemHealth   VirtualDomain
        Xinetd         apache         conntrackd     controld       dhcpd          drbd (这里是有drbd)
        ethmonitor     exportfs       mysql          mysql-proxy    named          nfsserver
        nginx          pgsql          ping           pingd          postfix        remote
        rsyncd         rsyslog        slapd          symlink        tomcat 

    crm(live)ra# meta ocf:linbit:drbd (查看drbd资源的命令)meta ocf:drbd

(1)定义主资源:主从资源
    crm(live)configure# property stonith-enabled=false (由于是刚安装的需要指定这个两个参数全局属性)
    crm(live)configure# property no-quorum-policy=ignore
    crm(live)configure# primitive webdrbd ocf:linbit:drbd params drbd_resource=web op monitor role=Master interval=30s timeout=20s op monitor role=Slave interval=40s timeout=20s op start timeout=240s op stop timeout=100s
    crm(live)configure# verify
        命令解释:
            webdrbd:是定义的资源的名称
            ocf:linbit:drbd params 定义drbd参数
            drbd_resource=web  定义drbd的资源web
            op monitor role=Master interval=30s timeout=20s  指定角色为Master 每个30秒监控一次超时时间为20秒  
            op monitor role=Slave interval=40s timeout=20s   指定从节点Slave 每个40秒监控一次超时时间为20秒
            op start timeout=240s op stop timeout=100s      定义drbd启动时间为240s 停止时间为100s

(2)定义主从克隆资源
    crm(live)configure# master ms_webdrbd webdrbd meta master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify=true
    crm(live)configure# verify

        命令解释:
            ms_webdrbd:克隆后的资源名称为ms_wedrbd
            webdrbd:克隆的资源的名称;克隆后的资源名称为ms_wedrbd
            meta:定义元数据
            master-max=1 :主节点有几个
            master-node-max=1  每个主机点运行几份
            clone-max=2 : 一共有几份克隆
            clone-node-max=1 :每个节点运行几份克隆
            notify=true:是否发送通知(默认就是通知的,可以不用写)
    crm(live)configure# commit   提交配置
    crm(live)configure# show      查看下配置
        node hong
        node ning
        primitive webdrbd ocf:linbit:drbd \
            params drbd_resource="web" \
            op monitor role="Master" interval="30s" timeout="20s" \
            op monitor role="Slave" interval="40s" timeout="20s" \
            op start timeout="240s" interval="0" \
            op stop timeout="100s" interval="0"
        ms ms_webdrbd webdrbd \
            meta master-max="1" master-node-max="1" clone-max="2" clone-node-max="1" notify="true"
        property $id="cib-bootstrap-options" \
            dc-version="1.1.10-14.el6-368c726" \
            cluster-infrastructure="classic openais (with plugin)" \
            expected-quorum-votes="2" \
            stonith-enabled="false" \
            no-quorum-policy="ignore"       

    crm(live)configure# cd
    crm(live)# status        查看状态
        Last updated: Sun Aug 17 18:55:26 2014
        Last change: Sun Aug 17 18:54:21 2014 via cibadmin on hong
        Stack: classic openais (with plugin)
        Current DC: hong - partition with quorum
        Version: 1.1.10-14.el6-368c726
        2 Nodes configured, 2 expected votes
        2 Resources configured

        Online: [ hong ning ]

         Master/Slave Set: ms_webdrbd [webdrbd]
             Masters: [ hong ]     节点hong为masters
             Slaves: [ ning ]        节点ning为slaves

4、测试corosync-drdb是否安装成功
    测试:查看hong节点是否被设置为Primary节点
        [root@hong ~]# drbd-overview   (在节点hong上校验下是否为配置的是主节点)
          0:web/0  Connected Primary/Secondary UpToDate/UpToDate C r-----
    下线一个节点:
        crm(live)# node standby(下线一个节点hong节点)
        crm(live)# status
        Last updated: Sun Aug 17 19:01:07 2014
        Last change: Sun Aug 17 19:00:41 2014 via crm_attribute on hong
        Stack: classic openais (with plugin)
        Current DC: hong - partition with quorum
        Version: 1.1.10-14.el6-368c726
        2 Nodes configured, 2 expected votes
        2 Resources configured

        Node hong: standby
        Online: [ ning ] (这里只有个ning节点在线)

         Master/Slave Set: ms_webdrbd [webdrbd]
             Masters: [ ning ]  (ning节点自动成为了主节点)
             Stopped: [ hong ] (这里看到hong节点已经停止)

        crm(live)# node online (重新上线hong节点)
        crm(live)# status
        Last updated: Sun Aug 17 19:03:38 2014
        Last change: Sun Aug 17 19:03:37 2014 via crm_attribute on hong
        Stack: classic openais (with plugin)
        Current DC: hong - partition with quorum
        Version: 1.1.10-14.el6-368c726
        2 Nodes configured, 2 expected votes
        2 Resources configured

        Online: [ hong ning ]  (这里可以看到两节点都在线)

         Master/Slave Set: ms_webdrbd [webdrbd]
             Masters: [ ning ]  (这里可以看到,ning节点是主节点)
             Slaves: [ hong ]    (hong节点为从节点)
        在ning节点上查看drbd双节点状态    
        [root@ning yum.repos.d]# drbd-overview
            0:web/0  Connected Primary/Secondary UpToDate/UpToDate C r-----

5、为Primary节点上的web资源创建自动挂载的集群服务(    一个常用命令删除定义的命令(crm(live)configure# delete webstore))
    (1)定义文件系统
        crm(live)configure# primitive webstore ocf:Filesystem params device="/dev/drbd0" directory="/var/www/html" fstype="ext4" op monitor interval=60s timeout=40s op start timeout=60s op stop timeout=60s
            命令解释
             webstore :指定定义文件系统的文件名称为webstore
             ocf:Filesystem params   指定文件系统
             device="/dev/drbd0"    指定drbd0的设备位置
             directory="/var/www/html" 指定挂载的目录
             fstype="ext4"         指定磁盘分区格式
             op monitor interval=60s timeout=40s  指定监控时间为60秒,超时时间为40秒
             op start timeout=60s         开启超时时间为60秒,
             op stop timeout=60s         关闭超时时间为60秒
     (2)定义文件系统规则
        定义webstore和ms_webdrbd在一起
            crm(live)configure# colocation webstore_with_ms_webdrbd_master inf: webstore ms_webdrbd:Master
            crm(live)configure# verify
        定义webstore和ms_webdrbd谁先启动顺序
            crm(live)configure# order webstore_after_ms_webdrbd_master mandatory: ms_webdrbd:promote webstore:start
            crm(live)configure# verify
            crm(live)configure# commit
            crm(live)configure# show
            node hong \
                attributes standby="off"
            node ning
            primitive webdrbd ocf:linbit:drbd \
                params drbd_resource="web" \
                op monitor role="Master" interval="30s" timeout="20s" \
                op monitor role="Slave" interval="40s" timeout="20s" \
                op start timeout="240s" interval="0" \
                op stop timeout="100s" interval="0"
            primitive webstore ocf:heartbeat:Filesystem \
                params device="/dev/drbd0" directory="/var/www/html" fstype="ext4" \
                op monitor interval="60s" timeout="40s" \
                op start timeout="60s" interval="0" \
                op stop timeout="60s" interval="0"
            ms ms_webdrbd webdrbd \
                meta master-max="1" master-node-max="1" clone-max="2" clone-node-max="1" notify="true"
            colocation webstore_with_ms_webdrbd_master inf: webstore ms_webdrbd:Master
            order webstore_after_ms_webdrbd_master inf: ms_webdrbd:promote webstore:start
            property $id="cib-bootstrap-options" \
                dc-version="1.1.10-14.el6-368c726" \
                cluster-infrastructure="classic openais (with plugin)" \
                expected-quorum-votes="2" \
                stonith-enabled="false" \
                no-quorum-policy="ignore"
            crm(live)configure# cd
            crm(live)# status (查看节点状态)
            Last updated: Sun Aug 17 19:29:54 2014
            Last change: Sun Aug 17 19:29:43 2014 via cibadmin on hong
            Stack: classic openais (with plugin)
            Current DC: hong - partition with quorum
            Version: 1.1.10-14.el6-368c726
            2 Nodes configured, 2 expected votes
            3 Resources configured

            Online: [ hong ning ]

             Master/Slave Set: ms_webdrbd [webdrbd]
                 Masters: [ ning ]
                 Slaves: [ hong ]
             webstore    (ocf::heartbeat:Filesystem):    Started ning  (现在ning为主节点,则webstore就在ning节点上)

6、测试文件系统
    在ning节点的挂载目录/var/www/html下创建个文件,节点ning下线,看看在hong节点上能看到这个文件
        [root@ning yum.repos.d]#echo "drbd" > /var/www/html/index.html
        [root@ning yum.repos.d]# crm node standby  下线节点
        [root@ning yum.repos.d]# crm status
        Last updated: Tue Sep 30 21:39:15 2014
        Last change: Sun Aug 17 19:35:32 2014 via crm_attribute on ning
        Stack: classic openais (with plugin)
        Current DC: hong - partition with quorum
        Version: 1.1.10-14.el6-368c726
        2 Nodes configured, 2 expected votes
        3 Resources configured

        Node ning: standby
        Online: [ hong ]

         Master/Slave Set: ms_webdrbd [webdrbd]
             Masters: [ hong ]             看hong节点自动为主节点
             Stopped: [ ning ]                ning节点已经下线
         webstore    (ocf::heartbeat:Filesystem):    Started hong    文件系统被转移到了hong节点
    查看hong节点是否有ning节点上创建的文件
        [root@hong ~]# cat /var/www/html/index.html (看这里是的文件内容和上面设置的是一致的)
            drbd

        [root@hong ~]# crm status
        Last updated: Sun Aug 17 19:39:56 2014
        Last change: Sun Aug 17 19:35:32 2014 via crm_attribute on ning
        Stack: classic openais (with plugin)
        Current DC: hong - partition with quorum
        Version: 1.1.10-14.el6-368c726
        2 Nodes configured, 2 expected votes
        3 Resources configured

        Node ning: standby
        Online: [ hong ]

         Master/Slave Set: ms_webdrbd [webdrbd]
             Masters: [ hong ]
             Stopped: [ ning ]
         webstore    (ocf::heartbeat:Filesystem):    Started hong
    ning节点重新上线
        [root@ning yum.repos.d]# crm node online
        [root@ning yum.repos.d]# crm status
        Last updated: Tue Sep 30 21:45:26 2014
        Last change: Sun Aug 17 19:41:48 2014 via crm_attribute on ning
        Stack: classic openais (with plugin)
        Current DC: hong - partition with quorum
        Version: 1.1.10-14.el6-368c726
        2 Nodes configured, 2 expected votes
        3 Resources configured

        Online: [ hong ning ]

         Master/Slave Set: ms_webdrbd [webdrbd]
             Masters: [ hong ]
             Slaves: [ ning ]
         webstore    (ocf::heartbeat:Filesystem):    Started hong

7、定义corosync-httpd的设置
    定义IP资源
    crm(live)configure# primitive webip ocf:heartbeat:IPaddr params ip=172.16.3.100 op monitor interval=30s timeout=20s
    crm(live)configure# verify
    定义httpd资源
    crm(live)configure# primitive webserver lsb:httpd op monitor interval=30s timeout=20s
    crm(live)configure# verify
    定义组资源名称为webservice ;webip sebstore webserver 在一个组
    crm(live)configure# group webservice webip webstore webserver
    INFO: resource references in colocation:webstore_with_ms_webdrbd_master updated
    INFO: resource references in order:webstore_after_ms_webdrbd_master updated
    定义webip和ms_webdrbd在一起,如果上面没有定义组也可以定义这个
    crm(live)configure# colocation webip_with_mswebdrbd_master inf: webip ms_webdrbd:Master
    crm(live)configure# verify
    定义webstore文件系统和webserver启动顺序
    crm(live)configure# order webserver_after_webstore mandatory: webstore webserver
    crm(live)configure# verify
    crm(live)configure# commit
    crm(live)# status 查看节点状态
    Last updated: Sun Aug 17 20:21:47 2014
    Last change: Sun Aug 17 19:50:08 2014 via cibadmin on hong
    Stack: classic openais (with plugin)
    Current DC: ning - partition with quorum
    Version: 1.1.10-14.el6-368c726
    2 Nodes configured, 2 expected votes
    5 Resources configured

    Online: [ hong ning ]

     Master/Slave Set: ms_webdrbd [webdrbd]
         Masters: [ ning ] 可以看到主节点为ning节点
         Slaves: [ hong ]
     Resource Group: webservice
         webip    (ocf::heartbeat:IPaddr):    Started ning
         webstore    (ocf::heartbeat:Filesystem):    Started ning 
         webserver    (lsb:httpd):    Started ning
web整个corosync的配置
    node hong \
            attributes standby="off"
    node ning \
            attributes standby="off"
    primitive webdrbd ocf:linbit:drbd \
            params drbd_resource="web" \
            op monitor role="Master" interval="30s" timeout="20s" \
            op monitor role="Slave" interval="40s" timeout="20s" \
            op start timeout="240s" interval="0" \
            op stop timeout="100s" interval="0"
    primitive webip ocf:heartbeat:IPaddr \
            params ip="172.16.3.100" \
            op monitor interval="30s" timeout="20s"
    primitive webserver lsb:httpd \
            op monitor interval="30s" timeout="20s"
    primitive webstore ocf:heartbeat:Filesystem \
            params device="/dev/drbd0" directory="/var/www/html" fstype="ext4" \
            op monitor interval="60s" timeout="40s" \
            op start timeout="60s" interval="0" \
            op stop timeout="60s" interval="0"
    group webservice webip webstore webserver
    ms ms_webdrbd webdrbd \
            meta master-max="1" master-node-max="1" clone-max="2" clone-node-max="1" notify="true"
    colocation webip_with_mswebdrbd_master inf: webip ms_webdrbd:Master
    colocation webstore_with_ms_webdrbd_master inf: webservice ms_webdrbd:Master
    order webserver_after_webstore inf: webstore webserver
    order webstore_after_ms_webdrbd_master inf: ms_webdrbd:promote webservice:start
    property $id="cib-bootstrap-options" \
            dc-version="1.1.10-14.el6-368c726" \
            cluster-infrastructure="classic openais (with plugin)" \
            expected-quorum-votes="2" \
            stonith-enabled="false" \
            no-quorum-policy="ignore"
8、测试corosync+pacemaker+drdb的web和drdb高可用服务
    查看当前节点状态:
    [root@hong ~]# crm status
    Last updated: Sat Aug 16 15:11:19 2014
    Last change: Fri Sep 19 07:35:10 2014 via cibadmin on hong
    Stack: classic openais (with plugin)
    Current DC: ning - partition with quorum
    Version: 1.1.10-14.el6-368c726
    2 Nodes configured, 2 expected votes
    5 Resources configured

    Online: [ hong ning ]

     Master/Slave Set: ms_webdrbd [webdrbd]
         Masters: [ ning ]
         Slaves: [ hong ]
     Resource Group: webservice
         webip    (ocf::heartbeat:IPaddr):    Started ning
         webstore    (ocf::heartbeat:Filesystem):    Started ning
         webserver    (lsb:httpd):    Started ning

    (1)让ning节点下线,节点是否把资源转移到hong节点上
        [root@ning ~]# crm node standby
        [root@ning ~]# crm status;
        Last updated: Fri Sep 19 07:44:16 2014
        Last change: Fri Sep 19 07:43:51 2014 via crm_attribute on ning
        Stack: classic openais (with plugin)
        Current DC: ning - partition with quorum
        Version: 1.1.10-14.el6-368c726
        2 Nodes configured, 2 expected votes
        5 Resources configured

        Node ning: standby  (这里显示ning节点已经下线)
        Online: [ hong ]      (只有hong节点在线)

         Master/Slave Set: ms_webdrbd [webdrbd]
             Masters: [ hong ]        (这里显示已经把资源转移到了hong节点)
             Stopped: [ ning ]
         Resource Group: webservice
             webip    (ocf::heartbeat:IPaddr):    Started hong    
             webstore    (ocf::heartbeat:Filesystem):    Started hong
             webserver    (lsb:httpd):    Started hong
    (2)让ning节点重新上线,不会转移到ning节点
        [root@ning ~]# crm node online
        [root@ning ~]# crm status;
        Last updated: Fri Sep 19 07:47:47 2014
        Last change: Fri Sep 19 07:47:44 2014 via crm_attribute on ning
        Stack: classic openais (with plugin)
        Current DC: ning - partition with quorum
        Version: 1.1.10-14.el6-368c726
        2 Nodes configured, 2 expected votes
        5 Resources configured

        Online: [ hong ning ]   这里显示ning节点再次上线了

         Master/Slave Set: ms_webdrbd [webdrbd]
             Masters: [ hong ]
             Slaves: [ ning ]
         Resource Group: webservice
             webip    (ocf::heartbeat:IPaddr):    Started hong
             webstore    (ocf::heartbeat:Filesystem):    Started hong
             webserver    (lsb:httpd):    Started hong

    (3)测试手动把drbd的hong节点降级为从节点,资源会不会主动转移到ning节点上。
        [root@hong ~]# umount /dev/drbd0
        [root@hong ~]# drbdadm secondary web 
        [root@hong ~]# crm status
        Last updated: Sat Aug 16 15:20:43 2014
        Last change: Fri Sep 19 07:47:44 2014 via crm_attribute on ning
        Stack: classic openais (with plugin)
        Current DC: ning - partition with quorum
        Version: 1.1.10-14.el6-368c726
        2 Nodes configured, 2 expected votes
        5 Resources configured

        Online: [ hong ning ]

         Master/Slave Set: ms_webdrbd [webdrbd]
             Masters: [ ning ]    这里的资源的确被转移到了ning节点上
             Slaves: [ hong ]
         Resource Group: webservice
             webip    (ocf::heartbeat:IPaddr):    Started ning
             webstore    (ocf::heartbeat:Filesystem):    Started ning
             webserver    (lsb:httpd):    Started ning

        Failed actions:
            webdrbd_monitor_30000 on hong ‘ok‘ (0): call=63, status=complete, last-rc-change=‘Sat Aug 16 15:20:37 2014‘, queued=0ms, exec=0ms(会有提示信息说你的webdrbd资源检测失败)

    (4)测试手动把httpd的ning节点上关闭,30秒后将会自动重启
            用命令ss -tnl将可以观测到80端口的动态
        [root@ning ~]# ip addr show(用查看IP的方式也可以发现资源在那个节点上)
        1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
            link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
            inet 127.0.0.1/8 scope host lo
            inet6 ::1/128 scope host
               valid_lft forever preferred_lft forever
        2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
            link/ether 00:0c:29:7d:9a:22 brd ff:ff:ff:ff:ff:ff
            inet 172.16.3.1/16 brd 172.16.255.255 scope global eth0
            inet 172.16.3.100/16 brd 172.16.255.255 scope global secondary eth0   虚拟IP为172.16.3.100/16
            inet6 fe80::20c:29ff:fe7d:9a22/64 scope link
               valid_lft forever preferred_lft forever
     (也可以通过访问的方式进行测试:http://172.16.3.100)

郑重声明:本站内容如果来自互联网及其他传播媒体,其版权均属原媒体及文章作者所有。转载目的在于传递更多信息及用于网络分享,并不代表本站赞同其观点和对其真实性负责,也不构成任何其他建议。