**********************集群规划***************************************
--配置主机,共需要8台主机,其中6台做grid集群,1台作为存储服务器,一台作为dns服务器,分配网段在60~80之间
>存储服务器:gridcluster-datastore
ip:192.168.100.75
磁盘:data01
主机名:dtstore
为了统一管理,要配置ocr和vf的磁盘组、oracle数据文件磁盘组、acfs磁盘组,制作虚拟磁盘过程略。
>dns服务器:主dns服务器:dns-server
解析的IP地址:192.168.100.60
192.168.100.70
192.168.100.80
dns服务器ip:192.168.100.73
磁盘:data02
主机名:dns
备用dns服务器:dnsbak-server
解析的IP地址:192.168.100.60
192.168.100.70
192.168.100.80
dns服务器ip:192.168.100.71
磁盘:data03
主机名:dns
dns服务器主要用来解析scan ip地址及其他公司内部域名管理。
##################################附加:redhat/CentOS/OEL 5 DNS服务器配置方法########################################
1)安装dns包
[root@dns mnt]# ll
total 1112
-rw-r--r-- 1 root root 1012559 Jan 22 11:11 bind-9.3.6-20.P1.el5.x86_64.rpm
-rw-r--r-- 1 root root 47941 Jan 22 11:14 bind-chroot-9.3.6-20.P1.el5.x86_64.rpm
-rw-r--r-- 1 root root 64667 Jan 22 11:11 caching-nameserver-9.3.6-20.P1.el5.x86_64.rpm
[root@dns mnt]# rpm -ivh bind-9.3.6-20.P1.el5.x86_64.rpm
[root@dns mnt]# rpm -ivh bind-chroot-9.3.6-20.P1.el5.x86_64.rpm
[root@dns mnt]# rpm -ivh caching-nameserver-9.3.6-20.P1.el5.x86_64.rpm
2)配置named.conf文件
>生成named.conf文件:
[root@dns mnt]# cd /var/named/chroot/etc/
[root@dns etc]# ll
total 24
-rw-r--r-- 1 root root 3519 Feb 27 2006 localtime
-rw-r----- 1 root named 1230 Dec 2 2011 named.caching-nameserver.conf
-rw-r----- 1 root named 955 Dec 2 2011 named.rfc1912.zones
-rw-r----- 1 root named 113 Jan 22 10:51 rndc.key
[root@dns etc]# cp -p named.caching-nameserver.conf named.conf
[root@dns etc]# ll
total 28
-rw-r--r-- 1 root root 3519 Feb 27 2006 localtime
-rw-r----- 1 root named 1230 Dec 2 2011 named.caching-nameserver.conf
-rw-r----- 1 root named 1230 Dec 2 2011 named.conf
-rw-r----- 1 root named 955 Dec 2 2011 named.rfc1912.zones
-rw-r----- 1 root named 113 Jan 22 10:51 rndc.key
>配置named.conf文件:
[root@dns etc]# vim named.conf
将127.0.0.1和localhost全部改成any。
3)配置named.rfc1912.zones文件
[root@dns etc]# vim named.rfc1912.zones --在最后添加下面内容
zone "100.168.192.in-addr.arpa" IN {
type master;
file "100.168.192.in-addr.arpa";
allow-update { none; };
};
4)生成正反向解析数据库文件
[root@dns named]# pwd
/var/named/chroot/var/named
[root@dns named]# ll
total 44
drwxrwx--- 2 named named 4096 Aug 26 2004 data
-rw-r----- 1 root named 198 Dec 2 2011 localdomain.zone
-rw-r----- 1 root named 195 Dec 2 2011 localhost.zone
-rw-r----- 1 root named 427 Dec 2 2011 named.broadcast
-rw-r----- 1 root named 1892 Dec 2 2011 named.ca
-rw-r----- 1 root named 424 Dec 2 2011 named.ip6.local
-rw-r----- 1 root named 426 Dec 2 2011 named.local
-rw-r----- 1 root named 427 Dec 2 2011 named.zero
drwxrwx--- 2 named named 4096 Jul 27 2004 slaves
[root@dns named]# cp -p named.local 100.168.192.in-addr.arpa
[root@dns named]# ll
total 48
-rw-r----- 1 root named 426 Dec 2 2011 100.168.192.in-addr.arpa
drwxrwx--- 2 named named 4096 Aug 26 2004 data
-rw-r----- 1 root named 198 Dec 2 2011 localdomain.zone
-rw-r----- 1 root named 195 Dec 2 2011 localhost.zone
-rw-r----- 1 root named 427 Dec 2 2011 named.broadcast
-rw-r----- 1 root named 1892 Dec 2 2011 named.ca
-rw-r----- 1 root named 424 Dec 2 2011 named.ip6.local
-rw-r----- 1 root named 426 Dec 2 2011 named.local
-rw-r----- 1 root named 427 Dec 2 2011 named.zero
drwxrwx--- 2 named named 4096 Jul 27 2004 slaves
5)配置正向解析数剧库文件
[root@dns named]# cat localdomain.zone
$TTL 86400
@ IN SOA localhost root (
42 ; serial (d. adams)
3H ; refresh
15M ; retry
1W ; expiry
1D ) ; minimum
IN NS localhost
localhost IN A 127.0.0.1
lt-cluster IN A 192.168.100.60
lt-cluster IN A 192.168.100.70
lt-cluster IN A 192.168.100.80
6)配置反向解析数据库文件
[root@dns named]# cat 100.168.192.in-addr.arpa
$TTL 86400
@ IN SOA localhost. root.localhost. (
1997022700 ; Serial
28800 ; Refresh
14400 ; Retry
3600000 ; Expire
86400 ) ; Minimum
IN NS localhost.
1 IN PTR localhost.
60 IN PTR lt-cluster.
70 IN PTR lt-cluster.
80 IN PTR lt-cluster.
7)服务器上启动DNS服务器
[root@dns named]# /etc/init.d/named status
rndc: connect failed: 127.0.0.1#953: connection refused
named is stopped
[root@dns named]# /etc/init.d/named start
Starting named: [ OK ]
[root@dns named]#chkconfig named on
8)配置/etc/resolv.conf文件(集群各个节点执行)
[root@node1 ~]# cat /etc/resolv.conf
search localdomain
nameserver 192.168.100.73
9)nslookup检查(集群各个节点均要要检查)
[root@node1 ~]# nslookup 192.168.100.60
Server: 192.168.100.73
Address: 192.168.100.73#53
60.100.168.192.in-addr.arpa name = lt-cluster.
[root@node1 ~]# nslookup 192.168.100.70
Server: 192.168.100.73
Address: 192.168.100.73#53
70.100.168.192.in-addr.arpa name = lt-cluster.
[root@node1 ~]# nslookup 192.168.100.80
Server: 192.168.100.73
Address: 192.168.100.73#53
80.100.168.192.in-addr.arpa name = lt-cluster.
[root@node1 ~]# nslookup lt-cluster
Server: 192.168.100.73
Address: 192.168.100.73#53
Name: lt-cluster.localdomain
Address: 192.168.100.60
Name: lt-cluster.localdomain
Address: 192.168.100.70
Name: lt-cluster.localdomain
Address: 192.168.100.80
#####################################################################################################################
>cluster服务器:共6台,规划如下
服务器名称 主机名 public-ip vip priv-ip 磁盘
11ggrid-node1 node1 192.168.100.61 192.168.100.81 172.168.5.61 data01
11ggrid-node2 node2 192.168.100.62 192.168.100.82 172.168.5.62 data02
11ggrid-node3 node3 192.168.100.63 192.168.100.83 172.168.5.63 data03
11ggrid-node4 node4 192.168.100.64 192.168.100.84 172.168.5.64 data04
11ggrid-node5 node5 192.168.100.65 192.168.100.85 172.168.5.65 data01
11ggrid-node6 node6 192.168.100.66 192.168.100.86 172.168.5.66 data02
**************************集群配置****************************
--配置/etc/hosts文件(各个节点均需要配置)
# Do not remove the following line, or various programs
# that require network functionality will fail.
127.0.0.1 localhost
::1 localhost6.localdomain6 localhost6
#node1
192.168.100.61 node1
172.168.5.61 node1-priv
192.168.100.81 node1-vip
#node2
192.168.100.62 node2
172.168.5.62 node2-priv
192.168.100.82 node2-vip
#node3
192.168.100.63 node3
172.168.5.63 node3-priv
192.168.100.83 node3-vip
#node4
192.168.100.64 node4
172.168.5.64 node4-priv
192.168.100.84 node4-vip
#node5
192.168.100.65 node5
172.168.5.65 node5-priv
192.168.100.85 node5-vip
#node6
192.168.100.66 node6
172.168.5.66 node6-priv
192.168.100.86 node6-vip
--执行以下脚本,完成cluster安装前期配置工作(各个节点均需要配置)
#!/bin/bash
#Usage:Log on as the superuser('root'),and then execute the command:#./1preusers.sh
echo -n "Please input ASM_SID:"
read asmsid
#echo -n "Please input ORACLE_HOSTNAME:"
#read orahostname
#echo -n "Please input ORACLE_SID:"
#read orasid
#echo -n "Please input ORACLE_UNQNAME:"
#read oraunq
groupadd -g 1000 oinstall
groupadd -g 1200 asmadmin
groupadd -g 1201 asmdba
groupadd -g 1202 asmoper
groupadd -g 1300 dba
groupadd -g 1301 oper
useradd -u 1100 -g oinstall -G asmadmin,asmdba,asmoper,dba -d /home/grid -s /bin/bash -c "grid Infrastructure Owner" grid
echo "grid" | passwd --stdin grid
echo "export TMP=/tmp">> /home/grid/.bash_profile
echo 'export TMPDIR=$TMP'>>/home/grid/.bash_profile
echo "export ORACLE_SID=$asmsid">> /home/grid/.bash_profile
echo "export ORACLE_BASE=/u01/app/grid">> /home/grid/.bash_profile
echo "export ORACLE_HOME=/u01/app/11.2.0.4/grid">> /home/grid/.bash_profile
echo "export ORACLE_TERM=xterm">> /home/grid/.bash_profile
echo "export NLS_DATE_FORMAT='yyyy/mm/dd hh24:mi:ss'">> /home/grid/.bash_profile
echo 'export TNS_ADMIN=$ORACLE_HOME/network/admin' >> /home/grid/.bash_profile
echo 'export PATH=/usr/sbin:$PATH'>> /home/grid/.bash_profile
echo 'export PATH=$ORACLE_HOME/bin:$PATH'>> /home/grid/.bash_profile
echo 'export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib'>> /home/grid/.bash_profile
echo 'export CLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib'>> /home/grid/.bash_profile
echo "export LANG=en_US">> /home/grid/.bash_profile
echo "export NLS_LANG=american_america.AL32UTF8">> /home/grid/.bash_profile
useradd -u 1101 -g oinstall -G dba,oper,asmdba -d /home/oracle -s /bin/bash -c "Oracle Software Owner" oracle
echo "oracle" | passwd --stdin oracle
echo "export TMP=/tmp">> /home/oracle/.bash_profile
echo 'export TMPDIR=$TMP'>>/home/oracle/.bash_profile
#echo "export ORACLE_HOSTNAME=$orahostname">> /home/oracle/.bash_profile
#echo "export ORACLE_SID=$orasid">> /home/oracle/.bash_profile
echo "export ORACLE_BASE=/u01/app/oracle">> /home/oracle/.bash_profile
echo 'export ORACLE_HOME=$ORACLE_BASE/product/11.2.0.4/dbhome_1'>> /home/oracle/.bash_profile
#echo "export ORACLE_UNQNAME=$oraunq">> /home/oracle/.bash_profile
echo 'export TNS_ADMIN=$ORACLE_HOME/network/admin' >> /home/oracle/.bash_profile
echo "export ORACLE_TERM=xterm">> /home/oracle/.bash_profile
echo 'export PATH=/usr/sbin:$PATH'>> /home/oracle/.bash_profile
echo 'export PATH=$ORACLE_HOME/bin:$PATH'>> /home/oracle/.bash_profile
echo 'export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib'>> /home/oracle/.bash_profile
echo 'export CLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib'>> /home/oracle/.bash_profile
echo "export LANG=en_US">> /home/oracle/.bash_profile
echo "export NLS_LANG=american_america.AL32UTF8">> /home/oracle/.bash_profile
echo "export NLS_DATE_FORMAT='yyyy/mm/dd hh24:mi:ss'">> /home/oracle/.bash_profile
echo "The Groups and users has been created"
echo "The Environment for grid,oracle also has been set successfully"
mkdir -p /u01/app/grid
mkdir -p /u01/app/11.2.0.4/grid
mkdir -p /u01/app/oracle
chown -R oracle:oinstall /u01
chown -R grid:oinstall /u01/app/grid
chown -R grid:oinstall /u01/app/11.2.0.4
chmod -R 775 /u01
cp /etc/security/limits.conf /etc/security/limits.conf.bak
echo "oracle soft nproc 2047">>/etc/security/limits.conf
echo "oracle hard nproc 16384">>/etc/security/limits.conf
echo "oracle soft nofile 1024">>/etc/security/limits.conf
echo "oracle hard nofile 65536">>/etc/security/limits.conf
echo "grid soft nproc 2047">>/etc/security/limits.conf
echo "grid hard nproc 16384">>/etc/security/limits.conf
echo "grid soft nofile 1024">>/etc/security/limits.conf
echo "grid hard nofile 65536">>/etc/security/limits.conf
cp /etc/pam.d/login /etc/pam.d/login.bak
echo "session required /lib/security/pam_limits.so">>/etc/pam.d/login
echo "session required pam_limits.so">>/etc/pam.d/login
cp /etc/profile /etc/profile.bak
echo 'if [ $USER = "oracle" ]||[ $USER = "grid" ]; then' >> /etc/profile
echo 'if [ $SHELL = "/bin/ksh" ]; then' >> /etc/profile
echo 'ulimit -p 16384' >> /etc/profile
echo 'ulimit -n 65536' >> /etc/profile
echo 'else' >> /etc/profile
echo 'ulimit -u 16384 -n 65536' >> /etc/profile
echo 'fi' >> /etc/profile
echo 'fi' >> /etc/profile
cp /etc/sysctl.conf /etc/sysctl.conf.bak
echo "fs.aio-max-nr = 1048576">> /etc/sysctl.conf
echo "fs.file-max = 6815744">> /etc/sysctl.conf
echo "kernel.shmall = 2097152">> /etc/sysctl.conf
echo "kernel.shmmax = 4294967295">> /etc/sysctl.conf
echo "kernel.shmmni = 4096">> /etc/sysctl.conf
echo "kernel.sem = 250 32000 100 128">> /etc/sysctl.conf
echo "net.ipv4.ip_local_port_range = 9000 65500">> /etc/sysctl.conf
echo "net.core.rmem_default = 262144">> /etc/sysctl.conf
echo "net.core.rmem_max = 4194304">> /etc/sysctl.conf
echo "net.core.wmem_default = 262144">> /etc/sysctl.conf
echo "net.core.wmem_max = 1048586">> /etc/sysctl.conf
echo "net.ipv4.tcp_wmem = 262144 262144 262144">> /etc/sysctl.conf
echo "net.ipv4.tcp_rmem = 4194304 4194304 4194304">> /etc/sysctl.conf
sysctl -p
--停止ntp服务脚本(各个节点均需要配置)
service ntpd stop
chkconfig ntpd off
mv /etc/ntp.conf /etc/ntp.conf.`date +%Y%m%d`
echo "down."
--配置asm服务(各个节点均需要配置,另外如果用存储服务器挂载磁盘,该存储服务器也要进行相关配置)
>安装asm相关软件包(各个节点均需要配置)
[root@node1 mnt]# rpm -ivh oracleasm-support-2.1.8-1.el5.x86_64.rpm
warning: oracleasm-support-2.1.8-1.el5.x86_64.rpm: Header V3 DSA signature: NOKEY, key ID 1e5e0159
Preparing... ########################################### [100%]
1:oracleasm-support ########################################### [100%]
[root@node1 mnt]# rpm -ivh oracleasm-2.6.18-308.el5-2.0.5-1.el5.x86_64.rpm
warning: oracleasm-2.6.18-308.el5-2.0.5-1.el5.x86_64.rpm: Header V3 DSA signature: NOKEY, key ID 1e5e0159
Preparing... ########################################### [100%]
1:oracleasm-2.6.18-308.el########################################### [100%]
[root@node1 mnt]# rpm -ivh oracleasmlib-2.0.4-1.el5.x86_64.rpm
warning: oracleasmlib-2.0.4-1.el5.x86_64.rpm: Header V3 DSA signature: NOKEY, key ID 1e5e0159
Preparing... ########################################### [100%]
1:oracleasmlib ########################################### [100%]
[root@node1 mnt]# rpm -qa|grep oracleasm
oracleasm-2.6.18-308.el5-2.0.5-1.el5
oracleasm-support-2.1.8-1.el5
oracleasmlib-2.0.4-1.el5
>配置asm服务(各个节点均需要配置)
[root@node4 mnt]# oracleasm status
Checking if ASM is loaded: no
Checking if /dev/oracleasm is mounted: no
[root@node4 mnt]# oracleasm configure -i
Configuring the Oracle ASM library driver.
This will configure the on-boot properties of the Oracle ASM library
driver. The following questions will determine whether the driver is
loaded on boot and what permissions it will have. The current values
will be shown in brackets ('[]'). Hitting <ENTER> without typing an
answer will keep that current value. Ctrl-C will abort.
Default user to own the driver interface []: grid
Default group to own the driver interface []: asmadmin
Start Oracle ASM library driver on boot (y/n) [n]: y
Scan for Oracle ASM disks on boot (y/n) [y]: y
Writing Oracle ASM library driver configuration: done
[root@node4 mnt]# oracleasm status
Checking if ASM is loaded: no
Checking if /dev/oracleasm is mounted: no
[root@node4 mnt]# oracleasm init
Creating /dev/oracleasm mount point: /dev/oracleasm
Loading module "oracleasm": oracleasm
Mounting ASMlib driver filesystem: /dev/oracleasm
[root@node4 mnt]# oracleasm status
Checking if ASM is loaded: yes
Checking if /dev/oracleasm is mounted: yes
>创建asm磁盘(此处在存储服务器上执行,集群服务器直接scandisks即可)
[root@dtstore mnt]# oracleasm createdisk GRIDVOL1 /dev/sdb1
Writing disk header: done
Instantiating disk: done
[root@dtstore mnt]# oracleasm createdisk GRIDVOL2 /dev/sdc1
Writing disk header: done
Instantiating disk: done
[root@dtstore mnt]# oracleasm createdisk ORAVOL1 /dev/sdd1
Writing disk header: done
Instantiating disk: done
[root@dtstore mnt]# oracleasm createdisk ORAVOL2 /dev/sde1
Writing disk header: done
Instantiating disk: done
[root@dtstore mnt]# oracleasm createdisk ORAVOL3 /dev/sdf1
Writing disk header: done
Instantiating disk: done
[root@dtstore mnt]# oracleasm createdisk ORAVOL4 /dev/sdg1
Writing disk header: done
Instantiating disk: done
[root@dtstore mnt]# oracleasm createdisk ORAVOL5 /dev/sdh1
Writing disk header: done
Instantiating disk: done
[root@dtstore mnt]# oracleasm createdisk ORAVOL6 /dev/sdi1
Writing disk header: done
Instantiating disk: done
[root@dtstore mnt]# oracleasm createdisk BAKVOL1 /dev/sdj1
Writing disk header: done
Instantiating disk: done
[root@dtstore mnt]# oracleasm createdisk LTADVM1 /dev/sdk1
Writing disk header: done
Instantiating disk: done
[root@dtstore mnt]# oracleasm createdisk LTADVM2 /dev/sdl1
Writing disk header: done
Instantiating disk: done
[root@dtstore mnt]# oracleasm listdisks
BAKVOL1
GRIDVOL1
GRIDVOL2
LTADVM1
LTADVM2
ORAVOL1
ORAVOL2
ORAVOL3
ORAVOL4
ORAVOL5
ORAVOL6
集群服务器扫描磁盘(各个节点均需要配置)
[root@node2 ~]# oracleasm scandisks
Reloading disk partitions: done
Cleaning any stale ASM disks...
Scanning system for ASM disks...
Instantiating disk "GRIDVOL1"
Instantiating disk "GRIDVOL2"
Instantiating disk "ORAVOL1"
Instantiating disk "ORAVOL2"
Instantiating disk "ORAVOL3"
Instantiating disk "ORAVOL4"
Instantiating disk "ORAVOL5"
Instantiating disk "ORAVOL6"
Instantiating disk "BAKVOL1"
Instantiating disk "LTADVM1"
Instantiating disk "LTADVM2"
[root@node2 ~]# oracleasm listdisks
BAKVOL1
GRIDVOL1
GRIDVOL2
LTADVM1
LTADVM2
ORAVOL1
ORAVOL2
ORAVOL3
ORAVOL4
ORAVOL5
ORAVOL6
--图形化界面配置SSH对等性-略
--安装grid钱cvu先决条件检查
./runcluvfy.sh stage -pre crsinst -n node1,node2,node3,node4,node5,node6 -fixup -verbose
--图形化界面安装grid集群、图形化界面安装oracle软件、asmca创建磁盘组、dbca创建数据库等-略
--安装完成后-环境检查结果如下:
[grid@node1 ~]$ olsnodes -n -t
node1 1 Unpinned
node2 2 Unpinned
node3 3 Unpinned
node4 4 Unpinned
node5 5 Unpinned
node6 6 Unpinned
[grid@node1 ~]$ crsctl status resource -t
--------------------------------------------------------------------------------
NAME TARGET STATE SERVER STATE_DETAILS
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.FLASH.dg
ONLINE ONLINE node1
ONLINE ONLINE node2
ONLINE ONLINE node3
ONLINE ONLINE node4
ONLINE ONLINE node5
ONLINE ONLINE node6
ora.GRIDDG.dg
ONLINE ONLINE node1
ONLINE ONLINE node2
ONLINE ONLINE node3
ONLINE ONLINE node4
ONLINE ONLINE node5
ONLINE ONLINE node6
ora.LISTENER.lsnr
ONLINE ONLINE node1
ONLINE ONLINE node2
ONLINE ONLINE node3
ONLINE ONLINE node4
ONLINE ONLINE node5
ONLINE ONLINE node6
ora.ORADG.dg
ONLINE ONLINE node1
ONLINE ONLINE node2
ONLINE ONLINE node3
ONLINE ONLINE node4
ONLINE ONLINE node5
ONLINE ONLINE node6
ora.asm
ONLINE ONLINE node1 Started
ONLINE ONLINE node2 Started
ONLINE ONLINE node3 Started
ONLINE ONLINE node4 Started
ONLINE ONLINE node5 Started
ONLINE ONLINE node6 Started
ora.gsd
OFFLINE OFFLINE node1
OFFLINE OFFLINE node2
OFFLINE OFFLINE node3
OFFLINE OFFLINE node4
OFFLINE OFFLINE node5
OFFLINE OFFLINE node6
ora.net1.network
ONLINE ONLINE node1
ONLINE ONLINE node2
ONLINE ONLINE node3
ONLINE ONLINE node4
ONLINE ONLINE node5
ONLINE ONLINE node6
ora.ons
ONLINE ONLINE node1
ONLINE ONLINE node2
ONLINE ONLINE node3
ONLINE ONLINE node4
ONLINE ONLINE node5
ONLINE ONLINE node6
ora.registry.acfs
ONLINE ONLINE node1
ONLINE ONLINE node2
ONLINE ONLINE node3
ONLINE ONLINE node4
ONLINE ONLINE node5
ONLINE ONLINE node6
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.LISTENER_SCAN1.lsnr
1 ONLINE ONLINE node2
ora.LISTENER_SCAN2.lsnr
1 ONLINE ONLINE node3
ora.LISTENER_SCAN3.lsnr
1 ONLINE ONLINE node1
ora.cvu
1 ONLINE ONLINE node1
ora.ltdb.db
1 ONLINE ONLINE node1 Open
2 ONLINE ONLINE node2 Open
ora.node1.vip
1 ONLINE ONLINE node1
ora.node2.vip
1 ONLINE ONLINE node2
ora.node3.vip
1 ONLINE ONLINE node3
ora.node4.vip
1 ONLINE ONLINE node4
ora.node5.vip
1 ONLINE ONLINE node5
ora.node6.vip
1 ONLINE ONLINE node6
ora.oc4j
1 ONLINE ONLINE node1
ora.scan1.vip
1 ONLINE ONLINE node2
ora.scan2.vip
1 ONLINE ONLINE node3
ora.scan3.vip
1 ONLINE ONLINE node1
[grid@node1 ~]$ crsctl status resource -t -init
--------------------------------------------------------------------------------
NAME TARGET STATE SERVER STATE_DETAILS
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.asm
1 ONLINE ONLINE node1 Started
ora.cluster_interconnect.haip
1 ONLINE ONLINE node1
ora.crf
1 ONLINE ONLINE node1
ora.crsd
1 ONLINE ONLINE node1
ora.cssd
1 ONLINE ONLINE node1
ora.cssdmonitor
1 ONLINE ONLINE node1
ora.ctssd
1 ONLINE ONLINE node1 ACTIVE:0
ora.diskmon
1 OFFLINE OFFLINE
ora.drivers.acfs
1 ONLINE ONLINE node1
ora.evmd
1 ONLINE ONLINE node1
ora.gipcd
1 ONLINE ONLINE node1
ora.gpnpd
1 ONLINE ONLINE node1
ora.mdnsd
1 ONLINE ONLINE node1
--dbca继续创建其他类型数据库-略