在一套两节点的rac上增加一个新的节点,详细的操作记录如下:
一,环境及版本:
公司环境:Vmware
Esxi 5.5
操作系统:Redhat
5.8 x86_64
数据库版本:oracle
11g release 2 11.2.0.1
二,安装操作系统
1.准备一套全新的操作系统
三,挂载共享存储
四,安装前准备
1.一些参数文件的配置
/etc/hosts
--- 三个节点一致
# Do not remove the following line, or various programs # that require network functionality will fail. 127.0.0.1 dbrac1 localhost.localdomain localhost #::1 localhost6.localdomain6 localhost6 ##Public Network - (eth0) 192.168.1.70 dbrac1 192.168.1.71 dbrac2 192.168.1.73 dbrac3 ##Private Interconnect - (eth1) 192.168.2.80 dbrac1-priv 192.168.2.81 dbrac2-priv 192.168.2.82 dbrac3-priv ##Public Virtual IP (VIP) addresses - (eth0) 192.168.1.180 dbrac1-vip 192.168.1.183 dbrac2-vip 192.168.1.184 dbrac3-vip 192.168.1.188 dbrac-scan 192.168.1.189 dbrac-scan 192.168.1.190 dbrac-scan
/etc/security/limits.conf ---新节点配置
#ORACLE SETTING grid soft nproc 2047 grid hard nproc 16384 grid soft nofile 1024 grid hard nofile 65536 oracle soft nproc 2047 oracle hard nproc 16384 oracle soft nofile 1024 oracle hard nofile 65536
/etc/pam.d/login
session required pam_limits.so
/etc/sysctl.conf
#ORACLE SETTING fs.aio-max-nr = 1048576 fs.file-max = 6815744 kernel.shmall = 2097152 kernel.shmmax = 536870912 kernel.shmmni = 4096 kernel.sem = 250 32000 100 128 net.ipv4.ip_local_port_range = 9000 65500 net.core.rmem_default = 262144 net.core.rmem_max = 4194304 net.core.wmem_default = 262144 net.core.wmem_max = 1048576
[root@dbrac3 ~]# sysctl -p net.ipv4.ip_forward = 0 net.ipv4.conf.default.rp_filter = 1 net.ipv4.conf.default.accept_source_route = 0 kernel.sysrq = 0 kernel.core_uses_pid = 1 net.ipv4.tcp_syncookies = 1 kernel.msgmnb = 65536 kernel.msgmax = 65536 kernel.shmmax = 68719476736 kernel.shmall = 4294967296 fs.aio-max-nr = 1048576 fs.file-max = 6815744 kernel.shmall = 2097152 kernel.shmmax = 536870912 kernel.shmmni = 4096 kernel.sem = 250 32000 100 128 net.ipv4.ip_local_port_range = 9000 65500 net.core.rmem_default = 262144 net.core.rmem_max = 4194304 net.core.wmem_default = 262144 net.core.wmem_max = 1048576
[root@db11 ~]# vi /etc/fstab tmpfs /dev/shm tmpfs defaults,size=2048M 0 0 [root@db11 ~]# mount -o remount /dev/shm [root@db11 ~]# df -h Filesystem Size Used Avail Use% Mounted on /dev/mapper/VolGroup00-LogVol00 69G 6.0G 59G 10% / /dev/mapper/VolGroup00-LogVol02 19G 417M 18G 3% /tmp /dev/sda1 99M 13M 81M 14% /boot tmpfs 2.0G 0 2.0G 0% /dev/shm
创建所需的组和用户
groupadd -g 501 oinstall groupadd -g 502 dba groupadd -g 503 oper groupadd -g 504 asmadmin groupadd -g 505 asmoper groupadd -g 506 asmdba useradd -g oinstall -G dba,asmdba,oper oracle useradd -g oinstall -G asmadmin,asmdba,asmoper,oper,dba grid
创建所需的目录(创建的目录要和两个节点的目录完全相同)
mkdir -p /u01/app/oracle mkdir -p /u01/app/grid chown -R oracle:oinstall /u01/app/oracle/ chown -R grid:oinstall /u01/app/grid/ chmod -R 775 /u01/app/grid/ chmod -R 775 /u01/app/oracle/
还需要手动创建清单目录
mkdir -p /u01/app/oraInventory
chown -R grid:oinstall /u01/app/oraInventory
创建密码
[root@db11 ~]# passwd oracle Changing password for user oracle. New UNIX password: BAD PASSWORD: it is based on a dictionary word Retype new UNIX password: passwd: all authentication tokens updated successfully. [root@db11 ~]# passwd grid Changing password for user grid. New UNIX password: BAD PASSWORD: it is too short Retype new UNIX password: passwd: all authentication tokens updated successfully.
环境变量
[root@dbrac3 ~]# su - grid +ASM3@dbrac3 /home/grid$ cat .bash_profile # .bash_profile # Get the aliases and functions if [ -f ~/.bashrc ]; then . ~/.bashrc fi # User specific environment and startup programs PATH=$PATH:$HOME/bin export PATH ################################################## # User specific environment and startup programs ################################################## export ORACLE_BASE=/u01/app/grid export ORACLE_HOME=/u01/app/11.2.0/grid export ORACLE_PATH=$ORACLE_BASE/common/oracle/sql:.:$ORACLE_HOME/rdbms/admin ################################################## # Each RAC node must have a unique ORACLE_SID. (i.e. orcl1,orcl2,...) ################################################## export ORACLE_SID=+ASM3 export PATH=$ORA_CRS_HOME/bin:.:${JAVA_HOME}/bin:${PATH}:$HOME/bin:$ORACLE_HOME/bin export PATH=${PATH}:/usr/bin:/bin:/usr/bin/X11:/usr/local/bin export PATH=${PATH}:$ORACLE_BASE/common/oracle/bin:$ORA_CRS_HOME/bin:/sbin export ORACLE_TERM=xterm #export TNS_ADMIN=$ORACLE_HOME/network/admin export LD_LIBRARY_PATH=$ORACLE_HOME/lib export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:$ORACLE_HOME/oracm/lib export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/lib:/usr/lib:/usr/local/lib export CLASSPATH=$ORACLE_HOME/JRE export CLASSPATH=${CLASSPATH}:$ORACLE_HOME/jlib export CLASSPATH=${CLASSPATH}:$ORACLE_HOME/rdbms/jlib export CLASSPATH=${CLASSPATH}:$ORACLE_HOME/network/jlib export THREADS_FLAG=native export TEMP=/tmp export TMPDIR=/tmp ################################################## # set NLS_LANG to resolve messy code in SQLPLUS ################################################## export NLS_LANG=AMERICAN_AMERICA.WE8ISO8859P1 ################################################## # Shell setting. ################################################## umask 022 set -o vi export PS1="\${ORACLE_SID}@`hostname` \${PWD}$ " ################################################## # Oracle Alias ################################################## alias ls="ls -FA" alias vi=vim alias base=‘cd $ORACLE_BASE‘ alias home=‘cd $ORACLE_HOME‘ alias alert=‘tail -200f $ORACLE_BASE/admin/RACDB/bdump/alert_$ORACLE_SID.log‘ alias tnsnames=‘vi $ORACLE_HOME/network/admin/tnsnames.ora‘ alias listener=‘vi $ORACLE_HOME/network/admin/listener.ora‘
[root@dbrac3 ~]# su - oracle DB113@dbrac3 /home/oracle$ DB113@dbrac3 /home/oracle$ DB113@dbrac3 /home/oracle$ DB113@dbrac3 /home/oracle$ DB113@dbrac3 /home/oracle$ cat .bash_profile # .bash_profile # Get the aliases and functions if [ -f ~/.bashrc ]; then . ~/.bashrc fi # User specific environment and startup programs PATH=$PATH:$HOME/bin export PATH ################################################## # User specific environment and startup programs ################################################## export ORACLE_BASE=/u01/app/oracle export ORACLE_HOME=$ORACLE_BASE/product/11.2.0 export ORACLE_PATH=$ORACLE_BASE/common/oracle/sql:.:$ORACLE_HOME/rdbms/admin ################################################## # Each RAC node must have a unique ORACLE_SID. (i.e. orcl1,orcl2,...) ################################################## export ORACLE_SID=DB113 export PATH=$ORA_CRS_HOME/bin:.:${JAVA_HOME}/bin:${PATH}:$HOME/bin:$ORACLE_HOME/bin export PATH=${PATH}:/usr/bin:/bin:/usr/bin/X11:/usr/local/bin export PATH=${PATH}:$ORACLE_BASE/common/oracle/bin:$ORA_CRS_HOME/bin:/sbin export ORACLE_TERM=xterm #export TNS_ADMIN=$ORACLE_HOME/network/admin export LD_LIBRARY_PATH=$ORACLE_HOME/lib export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:$ORACLE_HOME/oracm/lib export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/lib:/usr/lib:/usr/local/lib export CLASSPATH=$ORACLE_HOME/JRE export CLASSPATH=${CLASSPATH}:$ORACLE_HOME/jlib export CLASSPATH=${CLASSPATH}:$ORACLE_HOME/rdbms/jlib export CLASSPATH=${CLASSPATH}:$ORACLE_HOME/network/jlib export THREADS_FLAG=native export TEMP=/tmp export TMPDIR=/tmp ################################################## # set NLS_LANG to resolve messy code in SQLPLUS ################################################## export NLS_LANG=AMERICAN_AMERICA.WE8ISO8859P1 ################################################## # Shell setting. ################################################## umask 022 set -o vi export PS1="\${ORACLE_SID}@`hostname` \${PWD}$ " ################################################## # Oracle Alias ################################################## alias ls="ls -FA" alias vi=vim alias base=‘cd $ORACLE_BASE‘ alias home=‘cd $ORACLE_HOME‘ alias alert=‘tail -200f $ORACLE_BASE/admin/RACDB/bdump/alert_$ORACLE_SID.log‘ alias tnsnames=‘vi $ORACLE_HOME/network/admin/tnsnames.ora‘ alias listener=‘vi $ORACLE_HOME/network/admin/listener.ora‘
2.安装所需的rpm包
(1)配置本地yum源 1.mount /dev/cdrom /media/cdrom 2.cat >> /etc/yum.repos.d/dvd.repo<<EOF [dvd] name=install dvd baseurl=file:///media/cdrom/Server ? ?#repodata/repomd.xml enabled=1 gpgcheck=0 EOF 3.yum clean all yum list 说明: 在yum list的时候报下面的错误: Error: Cannot retrieve repository metadata (repomd.xml) for repository: dvd. 参考redhat6配置yum源问题 (2)通过yum安装所需的依赖包 下面为官方文档中列出的安装包 binutils-2.17.50.0.6 compat-libstdc++-33-3.2.3 compat-libstdc++-33-3.2.3 (32 bit) elfutils-libelf-0.125 elfutils-libelf-devel-0.125 gcc-4.1.2 gcc-c++-4.1.2 glibc-2.5-24 glibc-2.5-24 (32 bit) glibc-common-2.5 glibc-devel-2.5 glibc-devel-2.5 (32 bit) glibc-headers-2.5 ksh-20060214 libaio-0.3.106 libaio-0.3.106 (32 bit) libaio-devel-0.3.106 libaio-devel-0.3.106 (32 bit) libgcc-4.1.2 libgcc-4.1.2 (32 bit) libstdc++-4.1.2 libstdc++-4.1.2 (32 bit) libstdc++-devel 4.1.2 make-3.81 sysstat-7.0.2 安装 yum install libaio-devel -y yum install gcc -y yum install elfutils-libelf -y yum install glibc -y yum install libstdc++ -y yum install sysstat -y yum install unixODBC -y yum -y install unixODBC-devel -y yum -y install gcc-c++ -y yum -y install glibc-devel -y yum -y install elfutils-libelf-devel -y
3.配置互信
操作步骤: 1、配置ssh: 在dbrac3: su - oracle mkdir ~/.ssh ssh-keygen -t rsa ssh-keygen -t dsa 在dbrac1和dbrac2: su - oracle ssh dbrac3 cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys ssh dbrac3 cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys scp ~/.ssh/authorized_keys dbrac3:~/.ssh/authorized_keys 在dbrac3: su - grid mkdir ~/.ssh ssh-keygen -t rsa ssh-keygen -t dsa 在dbrac1和dbrac2:
su - grid ssh dbrac3 cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys ssh dbrac3 cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys scp ~/.ssh/authorized_keys dbrac3:~/.ssh/authorized_keys
检测
在每个节点上的oracle和grid用户下执行:
ssh dbrac1 date
ssh dbrac2 date
ssh dbrac3 date
ssh dbrac1-priv date
ssh dbrac2-priv date
ssh dbrac3-priv date
ssh dbrac1
五,正式安装
(1)dbrac3先安装下cvuqdisk包,这个包在GI安装文件中有。
[root@dbrac3 grid]# cd rpm/ [root@dbrac3 rpm]# ll total 8 -rw-rw-r-- 1 grid oinstall 8173 Jul 15 2009 cvuqdisk-1.0.7-1.rpm [root@dbrac3 rpm]# rpm -ivh cvuqdisk-1.0.7-1.rpm Preparing... ########################################### [100%] Using default group oinstall to install package 1:cvuqdisk ########################################### [100%]
(2)检测是否具备增加节点的条件
在节点一或者节点二上执行
[grid@dbrac1 ~]$ cluvfy stage -pre nodeadd -n dbrac3 Performing pre-checks for node addition Checking node reachability... Node reachability check passed from node "dbrac1" Checking user equivalence... User equivalence check passed for user "grid" Checking node connectivity... Checking hosts config file... Verification of the hosts config file successful Check: Node connectivity for interface "eth0" Node connectivity passed for interface "eth0" Node connectivity check passed Checking CRS integrity... CRS integrity check passed Checking shared resources... Shared resources check for node addition failed Check failed on nodes: dbrac3 Checking node connectivity... Checking hosts config file... Verification of the hosts config file successful Node connectivity passed for subnet "192.168.1.0" with node(s) dbrac1,dbrac2,dbrac3 TCP connectivity check passed for subnet "192.168.1.0" Node connectivity passed for subnet "192.168.2.0" with node(s) dbrac1,dbrac2,dbrac3 TCP connectivity check passed for subnet "192.168.2.0" Interfaces found on subnet "192.168.2.0" that are likely candidates for a private interconnect are: dbrac1 eth1:192.168.2.80 dbrac2 eth1:192.168.2.81 dbrac3 eth3:192.168.2.82 WARNING: Could not find a suitable set of interfaces for VIPs WARNING: Could not find a suitable set of interfaces with the same name for the private interconnect Node connectivity check passed Total memory check passed Available memory check passed Swap space check passed Free disk space check passed for "dbrac3:/tmp" User existence check passed for "grid" Run level check passed Hard limits check passed for "maximum open file descriptors" Soft limits check passed for "maximum open file descriptors" Hard limits check passed for "maximum user processes" Soft limits check passed for "maximum user processes" System architecture check passed Kernel version check passed Kernel parameter check passed for "semmsl" Kernel parameter check passed for "semmns" Kernel parameter check passed for "semopm" Kernel parameter check passed for "semmni" Kernel parameter check passed for "shmmax" Kernel parameter check passed for "shmmni" Kernel parameter check passed for "shmall" Kernel parameter check passed for "file-max" Kernel parameter check passed for "ip_local_port_range" Kernel parameter check passed for "rmem_default" Kernel parameter check passed for "rmem_max" Kernel parameter check passed for "wmem_default" Kernel parameter check passed for "wmem_max" Kernel parameter check passed for "aio-max-nr" Package existence check passed for "make-3.81" Package existence check passed for "binutils-2.17.50.0.6" Package existence check failed for "gcc-4.1" Check failed on nodes: dbrac3 Package existence check passed for "libaio-0.3.106 (i386)" Package existence check passed for "libaio-0.3.106 (x86_64)" Package existence check passed for "glibc-2.5-24 (i686)" Package existence check passed for "glibc-2.5-24 (x86_64)" Package existence check passed for "compat-libstdc++-33-3.2.3 (i386)" Package existence check passed for "compat-libstdc++-33-3.2.3 (x86_64)" Package existence check passed for "elfutils-libelf-0.125 (x86_64)" Package existence check failed for "elfutils-libelf-devel-0.125" Check failed on nodes: dbrac3 Package existence check passed for "glibc-common-2.5" Package existence check failed for "glibc-devel-2.5 (i386)" Check failed on nodes: dbrac3 Package existence check failed for "glibc-devel-2.5 (x86_64)" Check failed on nodes: dbrac3 Package existence check failed for "glibc-headers-2.5" Check failed on nodes: dbrac3 Package existence check failed for "gcc-c++-4.1.2" Check failed on nodes: dbrac3 Package existence check failed for "libaio-devel-0.3.106 (i386)" Check failed on nodes: dbrac3 Package existence check failed for "libaio-devel-0.3.106 (x86_64)" Check failed on nodes: dbrac3 Package existence check passed for "libgcc-4.1.2 (i386)" Package existence check passed for "libgcc-4.1.2 (x86_64)" Package existence check passed for "libstdc++-4.1.2 (i386)" Package existence check passed for "libstdc++-4.1.2 (x86_64)" Package existence check failed for "libstdc++-devel-4.1.2 (x86_64)" Check failed on nodes: dbrac3 Package existence check failed for "sysstat-7.0.2" Check failed on nodes: dbrac3 Package existence check failed for "unixODBC-2.2.11 (i386)" Check failed on nodes: dbrac3 Package existence check failed for "unixODBC-2.2.11 (x86_64)" Check failed on nodes: dbrac3 Package existence check failed for "unixODBC-devel-2.2.11 (i386)" Check failed on nodes: dbrac3 Package existence check failed for "unixODBC-devel-2.2.11 (x86_64)" Check failed on nodes: dbrac3 Package existence check passed for "ksh-20060214" Check for multiple users with UID value 0 passed User "grid" is not part of "root" group. Check passed Starting Clock synchronization checks using Network Time Protocol(NTP)... NTP Configuration file check started... Network Time Protocol(NTP) configuration file not found on any of the nodes. Oracle Cluster Time Synchronization Service(CTSS) can be used instead of NTP for time synchronization on the cluster nodes Clock synchronization check using Network Time Protocol(NTP) passed Pre-check for node addition was unsuccessful on all the nodes.
上面的一些fail项可以跳过
然后选择一个节点执行增加节点脚本
(3)扩展grid(大概一个小时)
+ASM2@dbrac2 /u01/app/11.2.0/grid/oui/bin$ ./addNode.sh "CLUSTER_NEW_NODES={dbrac3}" "CLUSTER_NEW_VIRTUAL_HOSTNAMES={dbrac3-vip}" Starting Oracle Universal Installer... Checking swap space: must be greater than 500 MB. Actual 10111 MB Passed Checking monitor: must be configured to display at least 256 colors >>> Could not execute auto check for display colors using command /usr/bin/xdpyinfo. Check if the DISPLAY variable is set. Failed <<<< Some requirement checks failed. You must fulfill these requirements before continuing with the installation, Continue? (y/n) [n] y >>> Ignoring required pre-requisite failures. Continuing... Oracle Universal Installer, Version 11.2.0.1.0 Production Copyright (C) 1999, 2009, Oracle. All rights reserved. Performing tests to see whether nodes dbrac1,dbrac3 are available ............................................................... 100% Done. . ----------------------------------------------------------------------------- Cluster Node Addition Summary Global Settings Source: /u01/app/11.2.0/grid New Nodes Space Requirements New Nodes dbrac3 /: Required 8.01GB : Available 6.83GB Installed Products Product Names Oracle Grid Infrastructure 11.2.0.1.0 Sun JDK 1.5.0.17.0 Installer SDK Component 11.2.0.1.0 Oracle One-Off Patch Installer 11.2.0.0.2 Oracle Universal Installer 11.2.0.1.0 Oracle Configuration Manager Deconfiguration 10.3.1.0.0 Enterprise Manager Common Core Files 10.2.0.4.2 Oracle DBCA Deconfiguration 11.2.0.1.0 Oracle RAC Deconfiguration 11.2.0.1.0 Oracle Quality of Service Management (Server) 11.2.0.1.0 Installation Plugin Files 11.2.0.1.0 Universal Storage Manager Files 11.2.0.1.0 Oracle Text Required Support Files 11.2.0.1.0 Automatic Storage Management Assistant 11.2.0.1.0 Oracle Database 11g Multimedia Files 11.2.0.1.0 Oracle Multimedia Java Advanced Imaging 11.2.0.1.0 Oracle Globalization Support 11.2.0.1.0 Oracle Multimedia Locator RDBMS Files 11.2.0.1.0 Oracle Core Required Support Files 11.2.0.1.0 Bali Share 1.1.18.0.0 Oracle Database Deconfiguration 11.2.0.1.0 Oracle Quality of Service Management (Client) 11.2.0.1.0 Expat libraries 2.0.1.0.1 Oracle Containers for Java 11.2.0.1.0 Perl Modules 5.10.0.0.1 Secure Socket Layer 11.2.0.1.0 Oracle JDBC/OCI Instant Client 11.2.0.1.0 Oracle Multimedia Client Option 11.2.0.1.0 LDAP Required Support Files 11.2.0.1.0 Character Set Migration Utility 11.2.0.1.0 Perl Interpreter 5.10.0.0.1 PL/SQL Embedded Gateway 11.2.0.1.0 OLAP SQL Scripts 11.2.0.1.0 Database SQL Scripts 11.2.0.1.0 Oracle Extended Windowing Toolkit 3.4.47.0.0 SSL Required Support Files for InstantClient 11.2.0.1.0 SQL*Plus Files for Instant Client 11.2.0.1.0 Oracle Net Required Support Files 11.2.0.1.0 Oracle Database User Interface 2.2.13.0.0 RDBMS Required Support Files for Instant Client 11.2.0.1.0 Enterprise Manager Minimal Integration 11.2.0.1.0 XML Parser for Java 11.2.0.1.0 Oracle Security Developer Tools 11.2.0.1.0 Oracle Wallet Manager 11.2.0.1.0 Enterprise Manager plugin Common Files 11.2.0.1.0 Platform Required Support Files 11.2.0.1.0 Oracle JFC Extended Windowing Toolkit 4.2.36.0.0 RDBMS Required Support Files 11.2.0.1.0 Oracle Ice Browser 5.2.3.6.0 Oracle Help For Java 4.2.9.0.0 Enterprise Manager Common Files 10.2.0.4.2 Deinstallation Tool 11.2.0.1.0 Oracle Java Client 11.2.0.1.0 Cluster Verification Utility Files 11.2.0.1.0 Oracle Notification Service (eONS) 11.2.0.1.0 Oracle LDAP administration 11.2.0.1.0 Cluster Verification Utility Common Files 11.2.0.1.0 Oracle Clusterware RDBMS Files 11.2.0.1.0 Oracle Locale Builder 11.2.0.1.0 Oracle Globalization Support 11.2.0.1.0 Buildtools Common Files 11.2.0.1.0 Oracle RAC Required Support Files-HAS 11.2.0.1.0 SQL*Plus Required Support Files 11.2.0.1.0 XDK Required Support Files 11.2.0.1.0 Agent Required Support Files 10.2.0.4.2 Parser Generator Required Support Files 11.2.0.1.0 Precompiler Required Support Files 11.2.0.1.0 Installation Common Files 11.2.0.1.0 Required Support Files 11.2.0.1.0 Oracle JDBC/THIN Interfaces 11.2.0.1.0 Oracle Multimedia Locator 11.2.0.1.0 Oracle Multimedia 11.2.0.1.0 HAS Common Files 11.2.0.1.0 Assistant Common Files 11.2.0.1.0 PL/SQL 11.2.0.1.0 HAS Files for DB 11.2.0.1.0 Oracle Recovery Manager 11.2.0.1.0 Oracle Database Utilities 11.2.0.1.0 Oracle Notification Service 11.2.0.0.0 SQL*Plus 11.2.0.1.0 Oracle Netca Client 11.2.0.1.0 Oracle Net 11.2.0.1.0 Oracle JVM 11.2.0.1.0 Oracle Internet Directory Client 11.2.0.1.0 Oracle Net Listener 11.2.0.1.0 Cluster Ready Services Files 11.2.0.1.0 Oracle Database 11g 11.2.0.1.0 ----------------------------------------------------------------------------- Instantiating scripts for add node (Friday, March 21, 2014 10:20:59 AM CST) . 1% Done. Instantiation of add node scripts complete Copying to remote nodes (Friday, March 21, 2014 10:21:02 AM CST) ............................................................................................... 96% Done. Home copied to new nodes Saving inventory on nodes (Friday, March 21, 2014 11:21:01 AM CST) SEVERE:Remote ‘UpdateNodeList‘ failed on nodes: ‘dbrac1‘. Refer to ‘/u01/app/oraInventory/logs/addNodeActions2014-03-21_10-20-33AM.log‘ for details. You can manually re-run the following command on the failed nodes after the installation: /u01/app/11.2.0/grid/oui/bin/runInstaller -updateNodeList -noClusterEnabled ORACLE_HOME=/u01/app/11.2.0/grid CLUSTER_NODES=dbrac1,dbrac2,dbrac3 CRS=true "INVENTORY_LOCATION=/u01/app/oraInventory" -invPtrLoc "/u01/app/11.2.0/grid/oraInst.loc" LOCAL_NODE=<node on which command is to be run>. Please refer ‘UpdateNodeList‘ logs under central inventory of remote nodes where failure occurred for more details. . 100% Done. Save inventory complete WARNING:A new inventory has been created on one or more nodes in this session. However, it has not yet been registered as the central inventory of this system. To register the new inventory please run the script at ‘/u01/app/oraInventory/orainstRoot.sh‘ with root privileges on nodes ‘dbrac3‘. If you do not register the inventory, you may not be able to update or patch the products you installed. The following configuration scripts need to be executed as the "root" user in each cluster node. /u01/app/oraInventory/orainstRoot.sh #On nodes dbrac3 /u01/app/11.2.0/grid/root.sh #On nodes dbrac3 To execute the configuration scripts: 1. Open a terminal window 2. Log in as "root" 3. Run the scripts in each cluster node The Cluster Node Addition of /u01/app/11.2.0/grid was successful. Please check ‘/tmp/silentInstall.log‘ for more details.
有个failed,提示手动执行
##在dbrac1 grid用户下 [grid@dbrac1 ~]$ /u01/app/11.2.0/grid/oui/bin/runInstaller -updateNodeList -noClusterEnabled ORACLE_HOME=/u01/app/11.2.0/grid CLUSTER_NODES=dbrac1,dbrac2,dbrac3 CRS=true "INVENTORY_LOCATION=/u01/app/oraInventory" -invPtrLoc "/u01/app/11.2.0/grid/oraInst.loc" LOCAL_NODE=dbrac1 Starting Oracle Universal Installer... Checking swap space: must be greater than 500 MB. Actual 9991 MB Passed The inventory pointer is located at /u01/app/11.2.0/grid/oraInst.loc The inventory is located at /u01/app/oraInventory
执行两个脚本:
[root@dbrac3 /]# /u01/app/oraInventory/orainstRoot.sh Creating the Oracle inventory pointer file (/etc/oraInst.loc) Changing permissions of /u01/app/oraInventory. Adding read,write permissions for group. Removing read,write,execute permissions for world. Changing groupname of /u01/app/oraInventory to oinstall. The execution of the script is complete. [root@dbrac3 /]# /u01/app/11.2.0/grid/root.sh Running Oracle 11g root.sh script... The following environment variables are set as: ORACLE_OWNER= grid ORACLE_HOME= /u01/app/11.2.0/grid Enter the full pathname of the local bin directory: [/usr/local/bin]: Copying dbhome to /usr/local/bin ... Copying oraenv to /usr/local/bin ... Copying coraenv to /usr/local/bin ... Creating /etc/oratab file... Entries will be added to the /etc/oratab file as needed by Database Configuration Assistant when a database is created Finished running generic part of root.sh script. Now product-specific root actions will be performed. 2014-03-21 11:31:12: Parsing the host name 2014-03-21 11:31:12: Checking for super user privileges 2014-03-21 11:31:12: User has super user privileges Using configuration parameter file: /u01/app/11.2.0/grid/crs/install/crsconfig_params Creating trace directory LOCAL ADD MODE Creating OCR keys for user ‘root‘, privgrp ‘root‘.. Operation successful. Adding daemon to inittab CRS-4123: Oracle High Availability Services has been started. ohasd is starting CRS-4402: The CSS daemon was started in exclusive mode but found an active CSS daemon on node dbrac1, number 1, and is terminating An active cluster was found during exclusive startup, restarting to join the cluster CRS-2672: Attempting to start ‘ora.mdnsd‘ on ‘dbrac3‘ CRS-2676: Start of ‘ora.mdnsd‘ on ‘dbrac3‘ succeeded CRS-2672: Attempting to start ‘ora.gipcd‘ on ‘dbrac3‘ CRS-2676: Start of ‘ora.gipcd‘ on ‘dbrac3‘ succeeded CRS-2672: Attempting to start ‘ora.gpnpd‘ on ‘dbrac3‘ CRS-2676: Start of ‘ora.gpnpd‘ on ‘dbrac3‘ succeeded CRS-2672: Attempting to start ‘ora.cssdmonitor‘ on ‘dbrac3‘ CRS-2676: Start of ‘ora.cssdmonitor‘ on ‘dbrac3‘ succeeded CRS-2672: Attempting to start ‘ora.cssd‘ on ‘dbrac3‘ CRS-2672: Attempting to start ‘ora.diskmon‘ on ‘dbrac3‘ CRS-2676: Start of ‘ora.diskmon‘ on ‘dbrac3‘ succeeded CRS-2676: Start of ‘ora.cssd‘ on ‘dbrac3‘ succeeded CRS-2672: Attempting to start ‘ora.ctssd‘ on ‘dbrac3‘ CRS-2676: Start of ‘ora.ctssd‘ on ‘dbrac3‘ succeeded CRS-2672: Attempting to start ‘ora.drivers.acfs‘ on ‘dbrac3‘ CRS-2676: Start of ‘ora.drivers.acfs‘ on ‘dbrac3‘ succeeded CRS-2672: Attempting to start ‘ora.asm‘ on ‘dbrac3‘ CRS-2676: Start of ‘ora.asm‘ on ‘dbrac3‘ succeeded CRS-2672: Attempting to start ‘ora.crsd‘ on ‘dbrac3‘ CRS-2676: Start of ‘ora.crsd‘ on ‘dbrac3‘ succeeded CRS-2672: Attempting to start ‘ora.evmd‘ on ‘dbrac3‘ CRS-2676: Start of ‘ora.evmd‘ on ‘dbrac3‘ succeeded Timed out waiting for the CRS stack to start.
验证
(4)扩展数据库软件
验证
(5)扩展实例
验证
六,问题总结
一,在多次执行下面的语句的时候报下面的错误:
+ASM2@dbrac2 /u01/app/11.2.0/grid/oui/bin$ ./addNode.sh "CLUSTER_NEW_NODES={dbrac3}" "CLUSTER_NEW_VIRTUAL_HOSTNAMES={dbrac3-vip}" Starting Oracle Universal Installer... Checking swap space: must be greater than 500 MB. Actual 10111 MB Passed Checking monitor: must be configured to display at least 256 colors >>> Could not execute auto check for display colors using command /usr/bin/xdpyinfo. Check if the DISPLAY variable is set. Failed <<<< ###跳过
Some requirement checks failed. You must fulfill these requirements before continuing with the installation, Continue? (y/n) [n] y >>> Ignoring required pre-requisite failures. Continuing... Oracle Universal Installer, Version 11.2.0.1.0 Production Copyright (C) 1999, 2009, Oracle. All rights reserved. Performing tests to see whether nodes dbrac1,dbrac3 are available ............................................................... 100% Done. . ----------------------------------------------------------------------------- Cluster Node Addition Summary Global Settings Source: /u01/app/11.2.0/grid New Nodes Space Requirements New Nodes dbrac3 /: Required 7.11GB : Available 7.23GB Installed Products Product Names Oracle Grid Infrastructure 11.2.0.1.0 Sun JDK 1.5.0.17.0 Installer SDK Component 11.2.0.1.0 Oracle One-Off Patch Installer 11.2.0.0.2 Oracle Universal Installer 11.2.0.1.0 Oracle Configuration Manager Deconfiguration 10.3.1.0.0 Enterprise Manager Common Core Files 10.2.0.4.2 Oracle DBCA Deconfiguration 11.2.0.1.0 Oracle RAC Deconfiguration 11.2.0.1.0 Oracle Quality of Service Management (Server) 11.2.0.1.0 Installation Plugin Files 11.2.0.1.0 Universal Storage Manager Files 11.2.0.1.0 Oracle Text Required Support Files 11.2.0.1.0 Automatic Storage Management Assistant 11.2.0.1.0 Oracle Database 11g Multimedia Files 11.2.0.1.0 Oracle Multimedia Java Advanced Imaging 11.2.0.1.0 Oracle Globalization Support 11.2.0.1.0 Oracle Multimedia Locator RDBMS Files 11.2.0.1.0 Oracle Core Required Support Files 11.2.0.1.0 Bali Share 1.1.18.0.0 Oracle Database Deconfiguration 11.2.0.1.0 Oracle Quality of Service Management (Client) 11.2.0.1.0 Expat libraries 2.0.1.0.1 Oracle Containers for Java 11.2.0.1.0 Perl Modules 5.10.0.0.1 Secure Socket Layer 11.2.0.1.0 Oracle JDBC/OCI Instant Client 11.2.0.1.0 Oracle Multimedia Client Option 11.2.0.1.0 LDAP Required Support Files 11.2.0.1.0 Character Set Migration Utility 11.2.0.1.0 Perl Interpreter 5.10.0.0.1 PL/SQL Embedded Gateway 11.2.0.1.0 OLAP SQL Scripts 11.2.0.1.0 Database SQL Scripts 11.2.0.1.0 Oracle Extended Windowing Toolkit 3.4.47.0.0 SSL Required Support Files for InstantClient 11.2.0.1.0 SQL*Plus Files for Instant Client 11.2.0.1.0 Oracle Net Required Support Files 11.2.0.1.0 Oracle Database User Interface 2.2.13.0.0 RDBMS Required Support Files for Instant Client 11.2.0.1.0 Enterprise Manager Minimal Integration 11.2.0.1.0 XML Parser for Java 11.2.0.1.0 Oracle Security Developer Tools 11.2.0.1.0 Oracle Wallet Manager 11.2.0.1.0 Enterprise Manager plugin Common Files 11.2.0.1.0 Platform Required Support Files 11.2.0.1.0 Oracle JFC Extended Windowing Toolkit 4.2.36.0.0 RDBMS Required Support Files 11.2.0.1.0 Oracle Ice Browser 5.2.3.6.0 Oracle Help For Java 4.2.9.0.0 Enterprise Manager Common Files 10.2.0.4.2 Deinstallation Tool 11.2.0.1.0 Oracle Java Client 11.2.0.1.0 Cluster Verification Utility Files 11.2.0.1.0 Oracle Notification Service (eONS) 11.2.0.1.0 Oracle LDAP administration 11.2.0.1.0 Cluster Verification Utility Common Files 11.2.0.1.0 Oracle Clusterware RDBMS Files 11.2.0.1.0 Oracle Locale Builder 11.2.0.1.0 Oracle Globalization Support 11.2.0.1.0 Buildtools Common Files 11.2.0.1.0 Oracle RAC Required Support Files-HAS 11.2.0.1.0 SQL*Plus Required Support Files 11.2.0.1.0 XDK Required Support Files 11.2.0.1.0 Agent Required Support Files 10.2.0.4.2 Parser Generator Required Support Files 11.2.0.1.0 Precompiler Required Support Files 11.2.0.1.0 Installation Common Files 11.2.0.1.0 Required Support Files 11.2.0.1.0 Oracle JDBC/THIN Interfaces 11.2.0.1.0 Oracle Multimedia Locator 11.2.0.1.0 Oracle Multimedia 11.2.0.1.0 HAS Common Files 11.2.0.1.0 Assistant Common Files 11.2.0.1.0 PL/SQL 11.2.0.1.0 HAS Files for DB 11.2.0.1.0 Oracle Recovery Manager 11.2.0.1.0 Oracle Database Utilities 11.2.0.1.0 Oracle Notification Service 11.2.0.0.0 SQL*Plus 11.2.0.1.0 Oracle Netca Client 11.2.0.1.0 Oracle Net 11.2.0.1.0 Oracle JVM 11.2.0.1.0 Oracle Internet Directory Client 11.2.0.1.0 Oracle Net Listener 11.2.0.1.0 Cluster Ready Services Files 11.2.0.1.0 Oracle Database 11g 11.2.0.1.0 ----------------------------------------------------------------------------- Instantiating scripts for add node (Thursday, March 20, 2014 5:22:11 PM CST) . 1% Done. Instantiation of add node scripts complete Copying to remote nodes (Thursday, March 20, 2014 5:22:13 PM CST) ...SEVERE:Abnormal program termination. An internal error has occured. Please provide the following files to Oracle Support : "/u01/app/oraInventory/logs/addNodeActions2014-03-20_05-21-54PM.log" "/u01/app/oraInventory/logs/oraInstall2014-03-20_05-21-54PM.err" "/u01/app/oraInventory/logs/oraInstall2014-03-20_05-21-54PM.out" +ASM2@dbrac2 /u01/app/11.2.0/grid/oui/bin$ more /u01/app/oraInventory/logs/oraInstall2014-03-20_05-21-54PM.err Exception in thread "Thread-57" java.lang.OutOfMemoryError: Java heap space at java.nio.CharBuffer.wrap(CharBuffer.java:350) at sun.nio.cs.StreamEncoder$CharsetSE.implWrite(StreamEncoder.java:378) at sun.nio.cs.StreamEncoder.write(StreamEncoder.java:136) at java.io.OutputStreamWriter.write(OutputStreamWriter.java:191) at java.io.BufferedWriter.flushBuffer(BufferedWriter.java:111) at java.io.PrintStream.write(PrintStream.java:458) at java.io.PrintStream.print(PrintStream.java:602) at oracle.sysman.oii.oiic.OiicSilentInstallPhaseProgressListener.displayProgress(OiicSilentInstallPhaseProgressListener.java: 372) at oracle.sysman.oii.oiic.OiicSilentInstallPhaseProgressListener.addBytes(OiicSilentInstallPhaseProgressListener.java:341) at oracle.sysman.oii.oiic.OiicSilentInstallPhaseProgressListener.addPercentage(OiicSilentInstallPhaseProgressListener.java:55 5) at oracle.sysman.oii.oiix.OiixProgressUpdator$OiixProgressUpdatingWorker.run(OiixProgressUpdator.java:199) at java.lang.Thread.run(Thread.java:595) Exception java.lang.OutOfMemoryError: Java heap space occurred.. java.lang.OutOfMemoryError: Java heap space at java.lang.StringBuilder.toString(StringBuilder.java:431) at java.io.UnixFileSystem.resolve(UnixFileSystem.java:93) at java.io.File.<init>(File.java:179) at java.io.File.listFiles(File.java:1020) at oracle.cluster.deployment.ractrans.DirectoryMap.processDir(DirectoryMap.java:225) at oracle.cluster.deployment.ractrans.DirectoryMap.processDir(DirectoryMap.java:281) at oracle.cluster.deployment.ractrans.DirectoryMap.processDir(DirectoryMap.java:281) at oracle.cluster.deployment.ractrans.DirectoryMap.<init>(DirectoryMap.java:166) at oracle.cluster.deployment.ractrans.DirListing.<init>(DirListing.java:283) at oracle.cluster.deployment.ractrans.DirListing.<init>(DirListing.java:159) at oracle.cluster.deployment.ractrans.RACTransferCore.createDirListing(RACTransferCore.java:209) at oracle.cluster.deployment.ractrans.RACTransfer.createDirListing(RACTransfer.java:1838) at oracle.cluster.deployment.ractrans.RACTransfer.transferDirStructureToNodes(RACTransfer.java:608) at oracle.cluster.deployment.ractrans.RACTransfer.transferDirToNodes(RACTransfer.java:252) at oracle.ops.mgmt.cluster.ClusterCmd.transferDirToNodes(ClusterCmd.java:3103) at oracle.ops.mgmt.cluster.ClusterCmd.transferDirToNodes(ClusterCmd.java:3022) at oracle.sysman.oii.oiip.oiipg.OiipgClusterOps.transferDirToNodes(OiipgClusterOps.java:947) at oracle.sysman.oii.oiif.oiifw.OiifwClusterCopyWCCE.doOperation(OiifwClusterCopyWCCE.java:544) at oracle.sysman.oii.oiif.oiifb.OiifbCondIterator.iterate(OiifbCondIterator.java:171) at oracle.sysman.oii.oiif.oiifw.OiifwAddNodePhaseWCDE.doOperation(OiifwAddNodePhaseWCDE.java:313) at oracle.sysman.oii.oiif.oiifb.OiifbCondIterator.iterate(OiifbCondIterator.java:171) at oracle.sysman.oii.oiic.OiicPullSession.doOperation(OiicPullSession.java:1371) at oracle.sysman.oii.oiic.OiicSessionWrapper.doOperation(OiicSessionWrapper.java:294) at oracle.sysman.oii.oiic.OiicInstaller.run(OiicInstaller.java:579) at oracle.sysman.oii.oiic.OiicInstaller.runInstaller(OiicInstaller.java:969) at oracle.sysman.oii.oiic.OiicInstaller.main(OiicInstaller.java:906)
解决方法是:
+ASM2@dbrac2 /u01/app/11.2.0/grid/oui/bin$ find /u01/app -name oraparam.ini /u01/app/11.2.0/grid/inventory/Templates/oui/oraparam.ini /u01/app/11.2.0/grid/oui/oraparam.ini +ASM2@dbrac2 /u01/app/11.2.0/grid/oui/bin$ vi /u01/app/11.2.0/grid/oui/oraparam.ini [Oracle] DISTRIBUTION=FALSE SOURCE= LICENSE_LOCATION= JRE_LOCATION=../../jdk/jre OUI_LOCATION=.. OUI_CORE_LOCATION=.. OPATCH_LOCATION=.. DEFAULT_HOME_LOCATION= DEFAULT_HOME_NAME= NLS_ENABLED=TRUE JRE_MEMORY_OPTIONS=" -mx2000m"
####把原来的值修改成-mx2000m
NO_BROWSE=/net BOOTSTRAP=TRUE CLUSTERWARE={"oracle.crs","10.1.0.2.0"} VENDORCLUSTERWARE=TRUE #THIN_JDBC_FILENAME is optional and defaults to classes12.jar #The value specified for this should be packaged with OUI, and should #be relative to <OUI expanded stagedir>/jlib/ THIN_JDBC_FILENAME=classes12.jar #JRE_OSDPARAM is to set OS dependent param for JRE ( mainly for native VM in 1.3.1) #JRE_OSDPARAM is optional and should be set to -native for the JRE‘s #that support native VM ( mainly for Unix platforms ), in JRE 1.3.1 #For JRE 1.4.1 this should be set to empty or the type of VM that is #supported client/server. The default value is -native in UNIX platforms #that supports native VM #Unix supporting native - JRE_OSDPARAM="-native" #Unix NOT supporting native and 1.4.1 - JRE_OSDPARAM="" #Windows : DO NOT SET or JRE_OSDPARAM="" OUI_VERSION=11.2.0.1.0 #RUN_OUICA specifies the batch script name that needs to be run #The script is ouica.bat for win32, and ouica.sh for solaris. #If the value is not specified, then the OUICA script is not run RUN_OUICA=%OUICA_SCRIPT% #SHOW_HOSTNAME=ALWAYS_SHOW shows the hostname panel always #SHOW_HOSTNAME=NEVER_SHOW does not the hostname panel
二,直接删除3节点grid用户下$ORACLE_HOME目录下的内容后,再次在节点二上执行addNodes脚本的时候报下面的错误:
+ASM2@dbrac2 /u01/app/11.2.0/grid/oui/bin$ ./addNode.sh -silent "CLUSTER_NEW_NODES={dbrac3}" "CLUSTER_NEW_VIRTUAL_HOSTNAMES={dbrac3-vip}" "CLUSTER_NEW_PRIVATE_NODE_NAMES={dbrac3-priv}" Starting Oracle Universal Installer... Checking swap space: must be greater than 500 MB. Actual 10111 MB Passed Oracle Universal Installer, Version 11.2.0.1.0 Production Copyright (C) 1999, 2009, Oracle. All rights reserved. Performing tests to see whether nodes dbrac1,dbrac3,dbrac3 are available ............................................................... 100% Done. Error ocurred while retrieving node numbers of the existing nodes. Please check if clusterware home is properly configured. SEVERE:Error ocurred while retrieving node numbers of the existing nodes. Please check if clusterware home is properly configured.
解决方法:
+ASM2@dbrac2 /u01/app/11.2.0/grid/oui/bin$ ./runInstaller -silent -updateNodeList ORACLE_HOME=$ORACLE_HOME "CLUSTER_NODES={dbrac1,dbrac2}" Starting Oracle Universal Installer... Checking swap space: must be greater than 500 MB. Actual 10111 MB Passed The inventory pointer is located at /etc/oraInst.loc The inventory is located at /u01/app/oraInventory SEVERE:Remote ‘UpdateNodeList‘ failed on nodes: ‘dbrac1‘. Refer to ‘/u01/app/oraInventory/logs/UpdateNodeList2014-03-21_10-18-20AM.log‘ for details. You can manually re-run the following command on the failed nodes after the installation: /u01/app/11.2.0/grid/oui/bin/runInstaller -updateNodeList -noClusterEnabled ORACLE_HOME=/u01/app/11.2.0/grid CLUSTER_NODES=dbrac1,dbrac2 CRS=true "INVENTORY_LOCATION=/u01/app/oraInventory" LOCAL_NODE=<node on which command is to be run>. Please refer ‘UpdateNodeList‘ logs under central inventory of remote nodes where failure occurred for more details.
提示手动
执行脚本,dbrac2上执行
+ASM2@dbrac2 /u01/app/11.2.0/grid/oui/bin$ /u01/app/11.2.0/grid/oui/bin/runInstaller -updateNodeList -noClusterEnabled ORACLE_HOME=/u01/app/11.2.0/grid CLUSTER_NODES=dbrac1,dbrac2 CRS=true "INVENTORY_LOCATION=/u01/app/oraInventory" LOCAL_NODE=dbrac2 Starting Oracle Universal Installer... Checking swap space: must be greater than 500 MB. Actual 10111 MB Passed The inventory pointer is located at /etc/oraInst.loc The inventory is located at /u01/app/oraInventory ‘UpdateNodeList‘ was successful.
dbrac1上执行
[grid@dbrac1 ~]$ /u01/app/11.2.0/grid/oui/bin/runInstaller -updateNodeList -noClusterEnabled ORACLE_HOME=/u01/app/11.2.0/grid CLUSTER_NODES=dbrac1,dbrac2 CRS=true "INVENTORY_LOCATION=/u01/app/oraInventory" LOCAL_NODE=dbrac1 Starting Oracle Universal Installer... Checking swap space: must be greater than 500 MB. Actual 9991 MB Passed The inventory pointer is located at /etc/oraInst.loc The inventory is located at /u01/app/oraInventory
然后再次执行addNodes过程添加