ITPub博客

11gR2 RAC添加和删除节点步骤

转载 作者:lhrbest 时间:2018-07-03 15:16:38 0 删除 编辑
 

分类: Oracle

2016-06-22 14:35:18


添加节点node3,3个节点的hosts配置如下:

127.0.0.1   localhost

::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

#node1

192.168.8.221 rac1 rac1.oracle.com

192.168.8.222 rac1-vip

172.168.1.18  rac1-priv

#node2

192.168.8.223 rac2 rac2.oracle.com

192.168.8.224 rac2-vip

172.168.1.19  rac2-priv

#node3

192.168.8.227 rac3 rac3.oracle.com

192.168.8.228 rac3-vip

172.168.1.20  rac3-priv

#scan-ip

192.168.8.225 rac-cluster rac-cluster-scan

关闭防火墙

service iptables stop

chkconfig iptables off

关闭Selinux

vim /etc/selinux/config

SELINUX=disabled

创建用户和组

--创建用户:

groupadd -g 1000 oinstall

groupadd -g 1200 asmadmin

groupadd -g 1201 asmdba

groupadd -g 1202 asmoper

groupadd -g 1300 dba

groupadd -g 1301 oper

--创建组:

useradd -u 1100 -g oinstall -G asmadmin,asmdba,asmoper,dba grid

useradd -u 1101 -g oinstall -G dba,oper,asmdba oracle

--创建密码:

passwd grid

passwd oracle

配置用户的环境变量

--grid用户:

export PATH

export TMP=/tmp

export TMPDIR=$TMP

export ORACLE_SID=+ASM3

export ORACLE_BASE=/u01/app/grid

export ORACLE_HOME=/u01/app/11.2.0/grid

export NLS_DATE_FORMAT='yyyy/mm/dd hh24:mi:ss'

export TNS_ADMIN=$ORACLE_HOME/network/admin

export PATH=/usr/sbin:$PATH

export PATH=$ORACLE_HOME/bin:$PATH

export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib

export CLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib

export LANG=en_US

export NLS_LANG=AMERICAN_AMERICA.ZHS16GBK

umask 022

--oracle用户:

export PATH

export TMP=/tmp

export TMPDIR=$TMP

export ORACLE_HOSTNAME=rac3

export ORACLE_SID=orcl3

export ORACLE_BASE=/u01/app/oracle

export ORACLE_HOME=$ORACLE_BASE/11.2.0/db_1

export ORACLE_UNQNAME=orcl

export TNS_ADMIN=$ORACLE_HOME/network/admin

#export ORACLE_TERM=xterm

export PATH=/usr/sbin:$PATH

export PATH=$ORACLE_HOME/bin:$PATH

export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib

export CLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib

export LANG=en_US

export NLS_LANG=AMERICAN_AMERICA.ZHS16GBK

export NLS_DATE_FORMAT='yyyy/mm/dd hh24:mi:ss'

umask 022

创建所需目录

mkdir -p /u01/app/11.2.0/grid

mkdir -p /u01/app/grid

mkdir -p /u01/app/oracle

chown -R grid:oinstall /u01/app/grid

chown -R grid:oinstall /u01/app/11.2.0/grid

chown -R oracle:oinstall /u01/app/oracle

chmod -R 775 /u01

chown -R grid:oinstall /u01

配置limits.conf增加如配置

vim /etc/security/limits.conf

oracle           soft   nofile           1024

oracle           hard   nofile           65536

oracle           soft   nproc            2047

oracle           hard   nproc            16384

grid             soft   nofile           1024

grid             hard   nofile           65536

grid             soft   nproc            2047

grid             hard   nproc            16384

修改内核参数

--注意将原来参数shmmallshmmax注释掉

vim /etc/sysctl.conf

fs.aio-max-nr = 1048576

fs.file-max = 6815744

kernel.shmall = 1073741824

kernel.shmmax = 4398046511104

kernel.shmmni = 4096

kernel.sem = 250 32000 100 128

net.ipv4.ip_local_port_range = 9000 65500

net.core.rmem_default = 262144

net.core.rmem_max = 4194304

net.core.wmem_default = 262144

net.core.wmem_max = 1048576

--使sysctl生效

sysctl -p

停止NTP服务

service ntpd stop

chkconfig ntpd off

mv /etc/ntp.conf /etc/ntp.conf.bak

10 安装相关依赖包

yum install gcc compat-libstdc++-33 elfutils-libelf-devel glibc-devel glibc-headers gcc-c++ libaio-devel libstdc++-devel pdksh compat-libcap1-*

11 配置共享存储

for i in b c d e f g h;

do

echo "KERNEL==\"sd*\", BUS==\"scsi\", PROGRAM==\"/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/\$name\", RESULT==\"`/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/sd$i`\", NAME=\"asm-disk$i\", OWNER=\"grid\", GROUP=\"asmadmin\", MODE=\"0660\""      >> /etc/udev/rules.d/99-oracle-asmdevices.rules

done

--执行

/sbin/start_udev

[root@rac3 ~]# ll /dev/asm*

brw-rw---- 1 grid asmadmin 8,  16 Jun 14 05:42 /dev/asm-diskb

brw-rw---- 1 grid asmadmin 8,  32 Jun 14 05:42 /dev/asm-diskc

brw-rw---- 1 grid asmadmin 8,  48 Jun 14 05:42 /dev/asm-diskd

brw-rw---- 1 grid asmadmin 8,  64 Jun 14 05:42 /dev/asm-diske

brw-rw---- 1 grid asmadmin 8,  80 Jun 14 05:42 /dev/asm-diskf

brw-rw---- 1 grid asmadmin 8,  96 Jun 14 05:42 /dev/asm-diskg

brw-rw---- 1 grid asmadmin 8, 112 Jun 14 05:42 /dev/asm-diskh

以上10个步骤要和之前2个节点配置一样

12 配置ORACLEGRID用户的对等性

在节点1上执行

[oracle@rac1 rac1]# $ORACLE_HOME/oui/bin/runSSHSetup.sh -user oracle -hosts 'rac1 rac2 rac3' -advanced -exverify

[grid@rac1 rac1]# $ORACLE_HOME/oui/bin/runSSHSetup.sh -user grid -hosts 'rac1 rac2 rac3' -advanced -exverify

13 验证对等性

[grid@rac1 ~]$ cluvfy comp nodecon -n rac1,rac2,rac3

Verifying node connectivity

Checking node connectivity...

Checking hosts config file...

Verification of the hosts config file successful

Node connectivity passed for subnet "192.168.8.0" with node(s) rac2,rac1,rac3

TCP connectivity check passed for subnet "192.168.8.0"

Node connectivity passed for subnet "172.168.0.0" with node(s) rac2,rac1,rac3

TCP connectivity check passed for subnet "172.168.0.0"

Node connectivity passed for subnet "169.254.0.0" with node(s) rac2,rac1

TCP connectivity check passed for subnet "169.254.0.0"

Interfaces found on subnet "192.168.8.0" that are likely candidates for VIP are:

rac2 eth0:192.168.8.223 eth0:192.168.8.224

rac1 eth0:192.168.8.221 eth0:192.168.8.222 eth0:192.168.8.225

rac3 eth0:192.168.8.227

Interfaces found on subnet "172.168.0.0" that are likely candidates for VIP are:

rac2 eth1:172.168.1.19

rac1 eth1:172.168.1.18

rac3 eth1:172.168.1.20

WARNING:

Could not find a suitable set of interfaces for the private interconnect

Checking subnet mask consistency...

Subnet mask consistency check passed for subnet "192.168.8.0".

Subnet mask consistency check passed for subnet "172.168.0.0".

Subnet mask consistency check passed for subnet "169.254.0.0".

Subnet mask consistency check passed.

Node connectivity check passed

Verification of node connectivity was successful.

14 备份OCR

[root@rac1 tmp]# ocrconfig -manualbackup

rac1     2016/06/14 05:47:56     /u01/app/11.2.0/grid/cdata/rac-cluster/backup_20160614_054756.ocr

[root@rac1 tmp]# ocrconfig -showbackup manual

rac1     2016/06/14 05:47:56     /u01/app/11.2.0/grid/cdata/rac-cluster/backup_20160614_054756.ocr

15 对新节点安装Clusterware

[grid@rac1 ~]$ cluvfy stage -post hwos -n rac3      

Performing post-checks for hardware and operating system setup

Checking node reachability...

Node reachability check passed from node "rac1"

Checking user equivalence...

User equivalence check passed for user "grid"

Checking node connectivity...

Checking hosts config file...

Verification of the hosts config file successful

Check: Node connectivity for interface "eth0"

Node connectivity passed for interface "eth0"

TCP connectivity check passed for subnet "192.168.8.0"

Check: Node connectivity for interface "eth1"

Node connectivity passed for interface "eth1"

ERROR:                                        /*错误原因由于BUG,检查了网络和对等性都没问题,这里忽略*/

PRVF-7617 : Node connectivity between "rac1 : 192.168.8.221" and "rac3 : 172.168.1.20" failed

TCP connectivity check failed for subnet "172.168.0.0"

Node connectivity check failed

Checking multicast communication...

Checking subnet "192.168.8.0" for multicast communication with multicast group "230.0.1.0"...

Check of subnet "192.168.8.0" for multicast communication with multicast group "230.0.1.0" passed.

Checking subnet "172.168.0.0" for multicast communication with multicast group "230.0.1.0"...

Check of subnet "172.168.0.0" for multicast communication with multicast group "230.0.1.0" passed.

Check of multicast communication passed.

Check for multiple users with UID value 0 passed

Time zone consistency check passed

Checking shared storage accessibility...

  Disk                                  Sharing Nodes (1 in count)

  ------------------------------------  ------------------------

  /dev/sda                              rac3                   

  Disk                                  Sharing Nodes (1 in count)

  ------------------------------------  ------------------------

  /dev/sdb                              rac3                    

  /dev/sdc                              rac3                   

  /dev/sdd                              rac3                   

  /dev/sde                              rac3                   

  /dev/sdf                              rac3                    

  /dev/sdg                              rac3                   

  /dev/sdh                              rac3                   

Shared storage check was successful on nodes "rac3"

Checking integrity of name service switch configuration file "/etc/nsswitch.conf" ...

Check for integrity of name service switch configuration file "/etc/nsswitch.conf" passed

Post-check for hardware and operating system setup was unsuccessful on all the nodes.

[grid@rac1 ~]$ cluvfy stage -pre crsinst -n rac3

Performing pre-checks for cluster services setup

Checking node reachability...

Node reachability check passed from node "rac1"

Checking user equivalence...

User equivalence check passed for user "grid"

Checking node connectivity...

Checking hosts config file...

Verification of the hosts config file successful

Check: Node connectivity for interface "eth0"

Node connectivity passed for interface "eth0"

TCP connectivity check passed for subnet "192.168.8.0"

Check: Node connectivity for interface "eth1"

Node connectivity passed for interface "eth1"

ERROR:

PRVF-7617 : Node connectivity between "rac1 : 192.168.8.221" and "rac3 : 172.168.1.20" failed

TCP connectivity check failed for subnet "172.168.0.0"

Node connectivity check failed

Checking multicast communication...

Checking subnet "192.168.8.0" for multicast communication with multicast group "230.0.1.0"...

Check of subnet "192.168.8.0" for multicast communication with multicast group "230.0.1.0" passed.

Checking subnet "172.168.0.0" for multicast communication with multicast group "230.0.1.0"...

Check of subnet "172.168.0.0" for multicast communication with multicast group "230.0.1.0" passed.

Check of multicast communication passed.

Checking ASMLib configuration.

Check for ASMLib configuration passed.

Total memory check passed

Available memory check passed

Swap space check passed

Free disk space check passed for "rac3:/u01/app/11.2.0/grid,rac3:/tmp"

Check for multiple users with UID value 1100 passed

User existence check passed for "grid"

Group existence check passed for "oinstall"

Group existence check passed for "dba"

Membership check for user "grid" in group "oinstall" [as Primary] passed

Membership check for user "grid" in group "dba" passed

Run level check passed

Hard limits check passed for "maximum open file descriptors"

Soft limits check passed for "maximum open file descriptors"

Hard limits check passed for "maximum user processes"

Soft limits check passed for "maximum user processes"

System architecture check passed

Kernel version check passed

Kernel parameter check passed for "semmsl"

Kernel parameter check passed for "semmns"

Kernel parameter check passed for "semopm"

Kernel parameter check passed for "semmni"

Kernel parameter check passed for "shmmax"

Kernel parameter check passed for "shmmni"

Kernel parameter check passed for "shmall"

Kernel parameter check passed for "file-max"

Kernel parameter check passed for "ip_local_port_range"

Kernel parameter check passed for "rmem_default"

Kernel parameter check passed for "rmem_max"

Kernel parameter check passed for "wmem_default"

Kernel parameter check passed for "wmem_max"

Kernel parameter check passed for "aio-max-nr"

Package existence check passed for "make"

Package existence check passed for "binutils"

Package existence check passed for "gcc(x86_64)"

Package existence check passed for "libaio(x86_64)"

Package existence check passed for "glibc(x86_64)"

Package existence check passed for "compat-libstdc++-33(x86_64)"

Package existence check passed for "elfutils-libelf(x86_64)"

Package existence check passed for "elfutils-libelf-devel"

Package existence check passed for "glibc-common"

Package existence check passed for "glibc-devel(x86_64)"

Package existence check passed for "glibc-headers"

Package existence check passed for "gcc-c++(x86_64)"

Package existence check passed for "libaio-devel(x86_64)"

Package existence check passed for "libgcc(x86_64)"

Package existence check passed for "libstdc++(x86_64)"

Package existence check passed for "libstdc++-devel(x86_64)"

Package existence check passed for "sysstat"

Package existence check failed for "pdksh"                       /*节点3没装pdksh,这个包可装可不装*/

Check failed on nodes:

    rac3

Package existence check passed for "expat(x86_64)"

Check for multiple users with UID value 0 passed

Current group ID check passed

Starting check for consistency of primary group of root user

Check for consistency of root user's primary group passed

Starting Clock synchronization checks using Network Time Protocol(NTP)...

NTP Configuration file check started...

No NTP Daemons or Services were found to be running

Clock synchronization check using Network Time Protocol(NTP) passed

Core file name pattern consistency check passed.

User "grid" is not part of "root" group. Check passed

Default user file creation mask check passed

Checking consistency of file "/etc/resolv.conf" across nodes

File "/etc/resolv.conf" does not have both domain and search entries defined

domain entry in file "/etc/resolv.conf" is consistent across nodes

search entry in file "/etc/resolv.conf" is consistent across nodes

The DNS response time for an unreachable node is within acceptable limit on all nodes

File "/etc/resolv.conf" is consistent across nodes

Time zone consistency check passed

Pre-check for cluster services setup was unsuccessful on all the nodes.

[grid@rac1 ~]$ cluvfy stage -pre nodeadd -n rac3 -fixup -verbose

Performing pre-checks for node addition

Checking node reachability...

Check: Node reachability from node "rac1"

  Destination Node                      Reachable?             

  ------------------------------------  ------------------------

  rac3                                  yes                    

Result: Node reachability check passed from node "rac1"

Checking user equivalence...

Check: User equivalence for user "grid"

  Node Name                             Status                 

  ------------------------------------  ------------------------

  rac3                                  passed                 

Result: User equivalence check passed for user "grid"

Checking CRS integrity...

Clusterware version consistency passed

The Oracle Clusterware is healthy on node "rac1"

The Oracle Clusterware is healthy on node "rac2"

CRS integrity check passed

Checking shared resources...

Checking CRS home location...

"/u01/app/11.2.0/grid" is shared

Result: Shared resources check for node addition passed

Checking node connectivity...

Checking hosts config file...

  Node Name                             Status                 

  ------------------------------------  ------------------------

  rac1                                  passed                 

  rac2                                  passed                  

  rac3                                  passed                 

Verification of the hosts config file successful

Interface information for node "rac1"

 Name   IP Address      Subnet          Gateway         Def. Gateway    HW Address        MTU  

 ------ --------------- --------------- --------------- --------------- ----------------- ------

 eth0   192.168.8.221   192.168.8.0     0.0.0.0         192.168.8.1     08:00:27:A7:60:61 1500 

 eth0   192.168.8.222   192.168.8.0     0.0.0.0         192.168.8.1     08:00:27:A7:60:61 1500 

 eth0   192.168.8.225   192.168.8.0     0.0.0.0         192.168.8.1     08:00:27:A7:60:61 1500 

 eth1   172.168.1.18    172.168.0.0     0.0.0.0         192.168.8.1     08:00:27:4A:6A:15 1500 

 eth1   169.254.93.171  169.254.0.0     0.0.0.0         192.168.8.1     08:00:27:4A:6A:15 1500 

Interface information for node "rac2"

 Name   IP Address      Subnet          Gateway         Def. Gateway    HW Address        MTU  

 ------ --------------- --------------- --------------- --------------- ----------------- ------

 eth0   192.168.8.223   192.168.8.0     0.0.0.0         192.168.8.1     08:00:27:41:AC:86 1500 

 eth0   192.168.8.224   192.168.8.0     0.0.0.0         192.168.8.1     08:00:27:41:AC:86 1500 

 eth1   172.168.1.19    172.168.0.0     0.0.0.0         192.168.8.1     08:00:27:E0:B4:FA 1500 

 eth1   169.254.205.237 169.254.0.0     0.0.0.0         192.168.8.1     08:00:27:E0:B4:FA 1500 

Interface information for node "rac3"

 Name   IP Address      Subnet          Gateway         Def. Gateway    HW Address        MTU  

 ------ --------------- --------------- --------------- --------------- ----------------- ------

 eth0   192.168.8.227   192.168.8.0     0.0.0.0         192.168.8.1     08:00:27:39:2C:07 1500 

 eth1   172.168.1.20    172.168.0.0     0.0.0.0         192.168.8.1     08:00:27:31:D4:B0 1500 

Check: Node connectivity for interface "eth0"

  Source                          Destination                     Connected?     

  ------------------------------  ------------------------------  ----------------

  rac1[192.168.8.221]             rac1[192.168.8.222]             yes            

  rac1[192.168.8.221]             rac1[192.168.8.225]             yes            

  rac1[192.168.8.221]             rac2[192.168.8.223]             yes            

  rac1[192.168.8.221]             rac2[192.168.8.224]             yes            

  rac1[192.168.8.221]             rac3[192.168.8.227]             yes            

  rac1[192.168.8.222]             rac1[192.168.8.225]             yes            

  rac1[192.168.8.222]             rac2[192.168.8.223]             yes            

  rac1[192.168.8.222]             rac2[192.168.8.224]             yes            

  rac1[192.168.8.222]             rac3[192.168.8.227]             yes            

  rac1[192.168.8.225]             rac2[192.168.8.223]             yes            

  rac1[192.168.8.225]             rac2[192.168.8.224]             yes            

  rac1[192.168.8.225]             rac3[192.168.8.227]             yes            

  rac2[192.168.8.223]             rac2[192.168.8.224]             yes            

  rac2[192.168.8.223]             rac3[192.168.8.227]             yes            

  rac2[192.168.8.224]             rac3[192.168.8.227]             yes            

Result: Node connectivity passed for interface "eth0"

Check: TCP connectivity of subnet "192.168.8.0"

  Source                          Destination                     Connected?     

  ------------------------------  ------------------------------  ----------------

  rac1:192.168.8.221              rac1:192.168.8.222              passed         

  rac1:192.168.8.221              rac1:192.168.8.225              passed         

  rac1:192.168.8.221              rac2:192.168.8.223              passed         

  rac1:192.168.8.221              rac2:192.168.8.224              passed         

  rac1:192.168.8.221              rac3:192.168.8.227              passed         

Result: TCP connectivity check passed for subnet "192.168.8.0"

Check: Node connectivity for interface "eth1"

  Source                          Destination                     Connected?     

  ------------------------------  ------------------------------  ----------------

  rac1[172.168.1.18]              rac2[172.168.1.19]              yes            

  rac1[172.168.1.18]              rac3[172.168.1.20]              yes            

  rac2[172.168.1.19]              rac3[172.168.1.20]              yes            

Result: Node connectivity passed for interface "eth1"

Check: TCP connectivity of subnet "172.168.0.0"

  Source                          Destination                     Connected?     

  ------------------------------  ------------------------------  ----------------

  rac1:172.168.1.18               rac2:172.168.1.19               passed         

  rac1:172.168.1.18               rac3:172.168.1.20               passed         

Result: TCP connectivity check passed for subnet "172.168.0.0"

Checking subnet mask consistency...

Subnet mask consistency check passed for subnet "192.168.8.0".

Subnet mask consistency check passed for subnet "172.168.0.0".

Subnet mask consistency check passed.

Result: Node connectivity check passed

Checking multicast communication...

Checking subnet "192.168.8.0" for multicast communication with multicast group "230.0.1.0"...

Check of subnet "192.168.8.0" for multicast communication with multicast group "230.0.1.0" passed.

Checking subnet "172.168.0.0" for multicast communication with multicast group "230.0.1.0"...

Check of subnet "172.168.0.0" for multicast communication with multicast group "230.0.1.0" passed.

Check of multicast communication passed.

Check: Total memory

  Node Name     Available                 Required                  Status   

  ------------  ------------------------  ------------------------  ----------

  rac1          1.8334GB (1922488.0KB)    1.5GB (1572864.0KB)       passed   

  rac3          1.8334GB (1922488.0KB)    1.5GB (1572864.0KB)       passed   

Result: Total memory check passed

Check: Available memory

  Node Name     Available                 Required                  Status   

  ------------  ------------------------  ------------------------  ----------

  rac1          611.7266MB (626408.0KB)   50MB (51200.0KB)          passed   

  rac3          1.7658GB (1851556.0KB)    50MB (51200.0KB)          passed   

Result: Available memory check passed

Check: Swap space

  Node Name     Available                 Required                  Status   

  ------------  ------------------------  ------------------------  ----------

  rac1          4GB (4194296.0KB)         2.7501GB (2883732.0KB)    passed   

  rac3          4GB (4194296.0KB)         2.7501GB (2883732.0KB)    passed   

Result: Swap space check passed

Check: Free disk space for "rac1:/u01/app/11.2.0/grid,rac1:/tmp"

  Path              Node Name     Mount point   Available     Required      Status     

  ----------------  ------------  ------------  ------------  ------------  ------------

  /u01/app/11.2.0/grid  rac1          /             90.5684GB     7.5GB         passed     

  /tmp              rac1          /             90.5684GB     7.5GB         passed     

Result: Free disk space check passed for "rac1:/u01/app/11.2.0/grid,rac1:/tmp"

Check: Free disk space for "rac3:/u01/app/11.2.0/grid,rac3:/tmp"

  Path              Node Name     Mount point   Available     Required      Status     

  ----------------  ------------  ------------  ------------  ------------  ------------

  /u01/app/11.2.0/grid  rac3          /             111.8486GB    7.5GB         passed     

  /tmp              rac3          /             111.8486GB    7.5GB         passed     

Result: Free disk space check passed for "rac3:/u01/app/11.2.0/grid,rac3:/tmp"

Check: User existence for "grid"

  Node Name     Status                    Comment                 

  ------------  ------------------------  ------------------------

  rac1          passed                    exists(1100)           

  rac3          passed                    exists(1100)           

Checking for multiple users with UID value 1100

Result: Check for multiple users with UID value 1100 passed

Result: User existence check passed for "grid"

Check: Run level

  Node Name     run level                 Required                  Status   

  ------------  ------------------------  ------------------------  ----------

  rac1          3                         3,5                       passed   

  rac3          3                         3,5                       passed   

Result: Run level check passed

Check: Hard limits for "maximum open file descriptors"

  Node Name         Type          Available     Required      Status         

  ----------------  ------------  ------------  ------------  ----------------

  rac1              hard          65536         65536         passed         

  rac3              hard          65536         65536         passed         

Result: Hard limits check passed for "maximum open file descriptors"

Check: Soft limits for "maximum open file descriptors"

  Node Name         Type          Available     Required      Status         

  ----------------  ------------  ------------  ------------  ----------------

  rac1              soft          1024          1024          passed          

  rac3              soft          1024          1024          passed         

Result: Soft limits check passed for "maximum open file descriptors"

Check: Hard limits for "maximum user processes"

  Node Name         Type          Available     Required      Status         

  ----------------  ------------  ------------  ------------  ----------------

  rac1              hard          16384         16384         passed         

  rac3              hard          16384         16384         passed         

Result: Hard limits check passed for "maximum user processes"

Check: Soft limits for "maximum user processes"

  Node Name         Type          Available     Required      Status         

  ----------------  ------------  ------------  ------------  ----------------

  rac1              soft          2047          2047          passed         

  rac3              soft          2047          2047          passed         

Result: Soft limits check passed for "maximum user processes"

Check: System architecture

  Node Name     Available                 Required                  Status   

  ------------  ------------------------  ------------------------  ----------

  rac1          x86_64                    x86_64                    passed   

  rac3          x86_64                    x86_64                    passed   

Result: System architecture check passed

Check: Kernel version

  Node Name     Available                 Required                  Status   

  ------------  ------------------------  ------------------------  ----------

  rac1          2.6.32-431.el6.x86_64     2.6.9                     passed   

  rac3          2.6.32-431.el6.x86_64     2.6.9                     passed   

Result: Kernel version check passed

Check: Kernel parameter for "semmsl"

  Node Name         Current       Configured    Required      Status        Comment    

  ----------------  ------------  ------------  ------------  ------------  ------------

  rac1              250           250           250           passed         

  rac3              250           250           250           passed         

Result: Kernel parameter check passed for "semmsl"

Check: Kernel parameter for "semmns"

  Node Name         Current       Configured    Required      Status        Comment    

  ----------------  ------------  ------------  ------------  ------------  ------------

  rac1              32000         32000         32000         passed         

  rac3              32000         32000         32000         passed         

Result: Kernel parameter check passed for "semmns"

Check: Kernel parameter for "semopm"

  Node Name         Current       Configured    Required      Status        Comment    

  ----------------  ------------  ------------  ------------  ------------  ------------

  rac1              100           100           100           passed         

  rac3              100           100           100           passed         

Result: Kernel parameter check passed for "semopm"

Check: Kernel parameter for "semmni"

  Node Name         Current       Configured    Required      Status        Comment    

  ----------------  ------------  ------------  ------------  ------------  ------------

  rac1              128           128           128           passed         

  rac3              128           128           128           passed         

Result: Kernel parameter check passed for "semmni"

Check: Kernel parameter for "shmmax"

  Node Name         Current       Configured    Required      Status        Comment    

  ----------------  ------------  ------------  ------------  ------------  ------------

  rac1              4398046511104  4398046511104  984313856     passed         

  rac3              4398046511104  4398046511104  984313856     passed         

Result: Kernel parameter check passed for "shmmax"

Check: Kernel parameter for "shmmni"

  Node Name         Current       Configured    Required      Status        Comment    

  ----------------  ------------  ------------  ------------  ------------  ------------

  rac1              4096          4096          4096          passed         

  rac3              4096          4096          4096          passed         

Result: Kernel parameter check passed for "shmmni"

Check: Kernel parameter for "shmall"

  Node Name         Current       Configured    Required      Status        Comment    

  ----------------  ------------  ------------  ------------  ------------  ------------

  rac1              1073741824    1073741824    2097152       passed         

  rac3              1073741824    1073741824    2097152       passed         

Result: Kernel parameter check passed for "shmall"

Check: Kernel parameter for "file-max"

  Node Name         Current       Configured    Required      Status        Comment    

  ----------------  ------------  ------------  ------------  ------------  ------------

  rac1              6815744       6815744       6815744       passed         

  rac3              6815744       6815744       6815744       passed         

Result: Kernel parameter check passed for "file-max"

Check: Kernel parameter for "ip_local_port_range"

  Node Name         Current       Configured    Required      Status        Comment    

  ----------------  ------------  ------------  ------------  ------------  ------------

  rac1              between 9000.0 & 65500.0  between 9000.0 & 65500.0  between 9000.0 & 65500.0  passed         

  rac3              between 9000.0 & 65500.0  between 9000.0 & 65500.0  between 9000.0 & 65500.0  passed         

Result: Kernel parameter check passed for "ip_local_port_range"

Check: Kernel parameter for "rmem_default"

  Node Name         Current       Configured    Required      Status        Comment    

  ----------------  ------------  ------------  ------------  ------------  ------------

  rac1              262144        262144        262144        passed         

  rac3              262144        262144        262144        passed         

Result: Kernel parameter check passed for "rmem_default"

Check: Kernel parameter for "rmem_max"

  Node Name         Current       Configured    Required      Status        Comment    

  ----------------  ------------  ------------  ------------  ------------  ------------

  rac1              4194304       4194304       4194304       passed         

  rac3              4194304       4194304       4194304       passed         

Result: Kernel parameter check passed for "rmem_max"

Check: Kernel parameter for "wmem_default"

  Node Name         Current       Configured    Required      Status        Comment    

  ----------------  ------------  ------------  ------------  ------------  ------------

  rac1              262144        262144        262144        passed         

  rac3              262144        262144        262144        passed         

Result: Kernel parameter check passed for "wmem_default"

Check: Kernel parameter for "wmem_max"

  Node Name         Current       Configured    Required      Status        Comment    

  ----------------  ------------  ------------  ------------  ------------  ------------

  rac1              1048576       1048576       1048576       passed         

  rac3              1048576       1048576       1048576       passed         

Result: Kernel parameter check passed for "wmem_max"

Check: Kernel parameter for "aio-max-nr"

  Node Name         Current       Configured    Required      Status        Comment    

  ----------------  ------------  ------------  ------------  ------------  ------------

  rac1              1048576       1048576       1048576       passed         

  rac3              1048576       1048576       1048576       passed         

Result: Kernel parameter check passed for "aio-max-nr"

Check: Package existence for "make"

  Node Name     Available                 Required                  Status   

  ------------  ------------------------  ------------------------  ----------

  rac1          make-3.81-20.el6          make-3.80                 passed   

  rac3          make-3.81-20.el6          make-3.80                 passed   

Result: Package existence check passed for "make"

Check: Package existence for "binutils"

  Node Name     Available                 Required                  Status   

  ------------  ------------------------  ------------------------  ----------

  rac1          binutils-2.20.51.0.2-5.36.el6  binutils-2.15.92.0.2      passed   

  rac3          binutils-2.20.51.0.2-5.36.el6  binutils-2.15.92.0.2      passed   

Result: Package existence check passed for "binutils"

Check: Package existence for "gcc(x86_64)"

  Node Name     Available                 Required                  Status   

  ------------  ------------------------  ------------------------  ----------

  rac1          gcc(x86_64)-4.4.7-17.el6  gcc(x86_64)-3.4.6         passed   

  rac3          gcc(x86_64)-4.4.7-17.el6  gcc(x86_64)-3.4.6         passed   

Result: Package existence check passed for "gcc(x86_64)"

Check: Package existence for "libaio(x86_64)"

  Node Name     Available                 Required                  Status   

  ------------  ------------------------  ------------------------  ----------

  rac1          libaio(x86_64)-0.3.107-10.el6  libaio(x86_64)-0.3.105    passed   

  rac3          libaio(x86_64)-0.3.107-10.el6  libaio(x86_64)-0.3.105    passed   

Result: Package existence check passed for "libaio(x86_64)"

Check: Package existence for "glibc(x86_64)"

  Node Name     Available                 Required                  Status   

  ------------  ------------------------  ------------------------  ----------

  rac1          glibc(x86_64)-2.12-1.192.el6  glibc(x86_64)-2.3.4-2.41  passed   

  rac3          glibc(x86_64)-2.12-1.192.el6  glibc(x86_64)-2.3.4-2.41  passed   

Result: Package existence check passed for "glibc(x86_64)"

Check: Package existence for "compat-libstdc++-33(x86_64)"

  Node Name     Available                 Required                  Status   

  ------------  ------------------------  ------------------------  ----------

  rac1          compat-libstdc++-33(x86_64)-3.2.3-69.el6  compat-libstdc++-33(x86_64)-3.2.3  passed   

  rac3          compat-libstdc++-33(x86_64)-3.2.3-69.el6  compat-libstdc++-33(x86_64)-3.2.3  passed   

Result: Package existence check passed for "compat-libstdc++-33(x86_64)"

Check: Package existence for "elfutils-libelf(x86_64)"

  Node Name     Available                 Required                  Status   

  ------------  ------------------------  ------------------------  ----------

  rac1          elfutils-libelf(x86_64)-0.164-2.el6  elfutils-libelf(x86_64)-0.97  passed   

  rac3          elfutils-libelf(x86_64)-0.164-2.el6  elfutils-libelf(x86_64)-0.97  passed   

Result: Package existence check passed for "elfutils-libelf(x86_64)"

Check: Package existence for "elfutils-libelf-devel"

  Node Name     Available                 Required                  Status   

  ------------  ------------------------  ------------------------  ----------

  rac1          elfutils-libelf-devel-0.164-2.el6  elfutils-libelf-devel-0.97  passed   

  rac3          elfutils-libelf-devel-0.164-2.el6  elfutils-libelf-devel-0.97  passed   

Result: Package existence check passed for "elfutils-libelf-devel"

Check: Package existence for "glibc-common"

  Node Name     Available                 Required                  Status   

  ------------  ------------------------  ------------------------  ----------

  rac1          glibc-common-2.12-1.192.el6  glibc-common-2.3.4        passed   

  rac3          glibc-common-2.12-1.192.el6  glibc-common-2.3.4        passed   

Result: Package existence check passed for "glibc-common"

Check: Package existence for "glibc-devel(x86_64)"

  Node Name     Available                 Required                  Status   

  ------------  ------------------------  ------------------------  ----------

  rac1          glibc-devel(x86_64)-2.12-1.192.el6  glibc-devel(x86_64)-2.3.4  passed   

  rac3          glibc-devel(x86_64)-2.12-1.192.el6  glibc-devel(x86_64)-2.3.4  passed   

Result: Package existence check passed for "glibc-devel(x86_64)"

Check: Package existence for "glibc-headers"

  Node Name     Available                 Required                  Status   

  ------------  ------------------------  ------------------------  ----------

  rac1          glibc-headers-2.12-1.192.el6  glibc-headers-2.3.4       passed   

  rac3          glibc-headers-2.12-1.192.el6  glibc-headers-2.3.4       passed   

Result: Package existence check passed for "glibc-headers"

Check: Package existence for "gcc-c++(x86_64)"

  Node Name     Available                 Required                  Status   

  ------------  ------------------------  ------------------------  ----------

  rac1          gcc-c++(x86_64)-4.4.7-17.el6  gcc-c++(x86_64)-3.4.6     passed   

  rac3          gcc-c++(x86_64)-4.4.7-17.el6  gcc-c++(x86_64)-3.4.6     passed   

Result: Package existence check passed for "gcc-c++(x86_64)"

Check: Package existence for "libaio-devel(x86_64)"

  Node Name     Available                 Required                  Status   

  ------------  ------------------------  ------------------------  ----------

  rac1          libaio-devel(x86_64)-0.3.107-10.el6  libaio-devel(x86_64)-0.3.105  passed   

  rac3          libaio-devel(x86_64)-0.3.107-10.el6  libaio-devel(x86_64)-0.3.105  passed   

Result: Package existence check passed for "libaio-devel(x86_64)"

Check: Package existence for "libgcc(x86_64)"

  Node Name     Available                 Required                  Status   

  ------------  ------------------------  ------------------------  ----------

  rac1          libgcc(x86_64)-4.4.7-17.el6  libgcc(x86_64)-3.4.6      passed   

  rac3          libgcc(x86_64)-4.4.7-17.el6  libgcc(x86_64)-3.4.6      passed   

Result: Package existence check passed for "libgcc(x86_64)"

Check: Package existence for "libstdc++(x86_64)"

  Node Name     Available                 Required                  Status   

  ------------  ------------------------  ------------------------  ----------

  rac1          libstdc++(x86_64)-4.4.7-17.el6  libstdc++(x86_64)-3.4.6   passed   

  rac3          libstdc++(x86_64)-4.4.7-17.el6  libstdc++(x86_64)-3.4.6   passed   

Result: Package existence check passed for "libstdc++(x86_64)"

Check: Package existence for "libstdc++-devel(x86_64)"

  Node Name     Available                 Required                  Status   

  ------------  ------------------------  ------------------------  ----------

  rac1          libstdc++-devel(x86_64)-4.4.7-17.el6  libstdc++-devel(x86_64)-3.4.6  passed   

  rac3          libstdc++-devel(x86_64)-4.4.7-17.el6  libstdc++-devel(x86_64)-3.4.6  passed   

Result: Package existence check passed for "libstdc++-devel(x86_64)"

Check: Package existence for "sysstat"

  Node Name     Available                 Required                  Status   

  ------------  ------------------------  ------------------------  ----------

  rac1          sysstat-9.0.4-22.el6      sysstat-5.0.5             passed   

  rac3          sysstat-9.0.4-22.el6      sysstat-5.0.5             passed   

Result: Package existence check passed for "sysstat"

Check: Package existence for "pdksh"

  Node Name     Available                 Required                  Status   

  ------------  ------------------------  ------------------------  ----------

  rac1          pdksh-5.2.14-30           pdksh-5.2.14              passed   

  rac3          missing                   pdksh-5.2.14              failed   

Result: Package existence check failed for "pdksh"

Check: Package existence for "expat(x86_64)"

  Node Name     Available                 Required                  Status   

  ------------  ------------------------  ------------------------  ----------

  rac1          expat(x86_64)-2.0.1-11.el6_2  expat(x86_64)-1.95.7      passed   

  rac3          expat(x86_64)-2.0.1-11.el6_2  expat(x86_64)-1.95.7      passed   

Result: Package existence check passed for "expat(x86_64)"

Checking for multiple users with UID value 0

Result: Check for multiple users with UID value 0 passed

Check: Current group ID

Result: Current group ID check passed

Starting check for consistency of primary group of root user

  Node Name                             Status                 

  ------------------------------------  ------------------------

  rac1                                  passed                 

  rac3                                  passed                 

Check for consistency of root user's primary group passed

Checking OCR integrity...

OCR integrity check passed

Checking Oracle Cluster Voting Disk configuration...

Oracle Cluster Voting Disk configuration check passed

Check: Time zone consistency

Result: Time zone consistency check passed

Starting Clock synchronization checks using Network Time Protocol(NTP)...

NTP Configuration file check started...

Network Time Protocol(NTP) configuration file not found on any of the nodes. Oracle Cluster Time Synchronization Service(CTSS) can be used instead of NTP for time synchronization on the cluster nodes

No NTP Daemons or Services were found to be running

Result: Clock synchronization check using Network Time Protocol(NTP) passed

Checking to make sure user "grid" is not in "root" group

  Node Name     Status                    Comment                

  ------------  ------------------------  ------------------------

  rac1          passed                    does not exist         

  rac3          passed                    does not exist         

Result: User "grid" is not part of "root" group. Check passed

Checking consistency of file "/etc/resolv.conf" across nodes

Checking the file "/etc/resolv.conf" to make sure only one of domain and search entries is defined

File "/etc/resolv.conf" does not have both domain and search entries defined

Checking if domain entry in file "/etc/resolv.conf" is consistent across the nodes...

domain entry in file "/etc/resolv.conf" is consistent across nodes

Checking if search entry in file "/etc/resolv.conf" is consistent across the nodes...

search entry in file "/etc/resolv.conf" is consistent across nodes

Checking DNS response time for an unreachable node

  Node Name                             Status                 

  ------------------------------------  ------------------------

  rac1                                  passed                 

  rac3                                  passed                 

The DNS response time for an unreachable node is within acceptable limit on all nodes

File "/etc/resolv.conf" is consistent across nodes

Checking integrity of name service switch configuration file "/etc/nsswitch.conf" ...

Checking if "hosts" entry in file "/etc/nsswitch.conf" is consistent across nodes...

Checking file "/etc/nsswitch.conf" to make sure that only one "hosts" entry is defined

More than one "hosts" entry does not exist in any "/etc/nsswitch.conf" file

Check for integrity of name service switch configuration file "/etc/nsswitch.conf" passed

Pre-check for node addition was unsuccessful on all the nodes.

2.3 GI用户执行addNode.sh命令

addNode.sh正式添加节点之前它也会调用cluvfy工具来验证新加入节点是否满足条件,如果不满足则拒绝下一步操作。

因为addNode.sh 脚本会cluvfy工具来验证新加入节点是否满足条件,而我们DNS 没有配置,所以直接运行,肯定会报错。

所以在运行addNode.sh 之前需要设置环境变量,跳过对节点新增的预检查。 这个参数就是从addNode.sh 脚本里找出来的:

export IGNORE_PREADDNODE_CHECKS=Y

[grid@rac1 ~]$ export IGNORE_PREADDNODE_CHECKS=Y

[grid@rac1 ~]$ cd $ORACLE_HOME/oui/bin

[grid@rac1 bin]$ ./addNode.sh "CLUSTER_NEW_NODES={rac3}" "CLUSTER_NEW_VIRTUAL_HOSTNAMES={rac3-vip}" "CLUSTER_NEW_PRIVATE_NODE_NAMES={rac3-priv}" > /u01/app/grid/add_node.log 2>&1

[root@rac1 ~]# tail -f /u01/app/grid/add_node.log

Starting Oracle Universal Installer...

Checking swap space: must be greater than 500 MB.   Actual 3646 MB    Passed

Oracle Universal Installer, Version 11.2.0.4.0 Production

Copyright (C) 1999, 2013, Oracle. All rights reserved.

Performing tests to see whether nodes rac2,rac3 are available

............................................................... 100% Done.

.

-----------------------------------------------------------------------------

Cluster Node Addition Summary

Global Settings

   Source: /u01/app/11.2.0/grid

   New Nodes

Space Requirements

   New Nodes

      rac3

         /: Required 4.45GB : Available 104.16GB

Installed Products

   Product Names

      Oracle Grid Infrastructure 11g 11.2.0.4.0

      Java Development Kit 1.5.0.51.10

      Installer SDK Component 11.2.0.4.0

      Oracle One-Off Patch Installer 11.2.0.3.4

      Oracle Universal Installer 11.2.0.4.0

      Oracle RAC Required Support Files-HAS 11.2.0.4.0

      Oracle USM Deconfiguration 11.2.0.4.0

      Oracle Configuration Manager Deconfiguration 10.3.1.0.0

      Enterprise Manager Common Core Files 10.2.0.4.5

      Oracle DBCA Deconfiguration 11.2.0.4.0

      Oracle RAC Deconfiguration 11.2.0.4.0

      Oracle Quality of Service Management (Server) 11.2.0.4.0

      Installation Plugin Files 11.2.0.4.0

      Universal Storage Manager Files 11.2.0.4.0

      Oracle Text Required Support Files 11.2.0.4.0

      Automatic Storage Management Assistant 11.2.0.4.0

      Oracle Database 11g Multimedia Files 11.2.0.4.0

      Oracle Multimedia Java Advanced Imaging 11.2.0.4.0

      Oracle Globalization Support 11.2.0.4.0

      Oracle Multimedia Locator RDBMS Files 11.2.0.4.0

      Oracle Core Required Support Files 11.2.0.4.0

      Bali Share 1.1.18.0.0

      Oracle Database Deconfiguration 11.2.0.4.0

      Oracle Quality of Service Management (Client) 11.2.0.4.0

      Expat libraries 2.0.1.0.1

      Oracle Containers for Java 11.2.0.4.0

      Perl Modules 5.10.0.0.1

      Secure Socket Layer 11.2.0.4.0

      Oracle JDBC/OCI Instant Client 11.2.0.4.0

      Oracle Multimedia Client Option 11.2.0.4.0

      LDAP Required Support Files 11.2.0.4.0

      Character Set Migration Utility 11.2.0.4.0

      Perl Interpreter 5.10.0.0.2

      PL/SQL Embedded Gateway 11.2.0.4.0

      OLAP SQL Scripts 11.2.0.4.0

      Database SQL Scripts 11.2.0.4.0

      Oracle Extended Windowing Toolkit 3.4.47.0.0

      SSL Required Support Files for InstantClient 11.2.0.4.0

      SQL*Plus Files for Instant Client 11.2.0.4.0

      Oracle Net Required Support Files 11.2.0.4.0

      Oracle Database User Interface 2.2.13.0.0

      RDBMS Required Support Files for Instant Client 11.2.0.4.0

      RDBMS Required Support Files Runtime 11.2.0.4.0

      XML Parser for Java 11.2.0.4.0

      Oracle Security Developer Tools 11.2.0.4.0

      Oracle Wallet Manager 11.2.0.4.0

      Enterprise Manager plugin Common Files 11.2.0.4.0

      Platform Required Support Files 11.2.0.4.0

      Oracle JFC Extended Windowing Toolkit 4.2.36.0.0

      RDBMS Required Support Files 11.2.0.4.0

      Oracle Ice Browser 5.2.3.6.0

      Oracle Help For Java 4.2.9.0.0

      Enterprise Manager Common Files 10.2.0.4.5

      Deinstallation Tool 11.2.0.4.0

      Oracle Java Client 11.2.0.4.0

      Cluster Verification Utility Files 11.2.0.4.0

      Oracle Notification Service (eONS) 11.2.0.4.0

      Oracle LDAP administration 11.2.0.4.0

      Cluster Verification Utility Common Files 11.2.0.4.0

      Oracle Clusterware RDBMS Files 11.2.0.4.0

      Oracle Locale Builder 11.2.0.4.0

      Oracle Globalization Support 11.2.0.4.0

      Buildtools Common Files 11.2.0.4.0

      HAS Common Files 11.2.0.4.0

      SQL*Plus Required Support Files 11.2.0.4.0

      XDK Required Support Files 11.2.0.4.0

      Agent Required Support Files 10.2.0.4.5

      Parser Generator Required Support Files 11.2.0.4.0

      Precompiler Required Support Files 11.2.0.4.0

      Installation Common Files 11.2.0.4.0

      Required Support Files 11.2.0.4.0

      Oracle JDBC/THIN Interfaces 11.2.0.4.0

      Oracle Multimedia Locator 11.2.0.4.0

      Oracle Multimedia 11.2.0.4.0

      Assistant Common Files 11.2.0.4.0

      Oracle Net 11.2.0.4.0

      PL/SQL 11.2.0.4.0

      HAS Files for DB 11.2.0.4.0

      Oracle Recovery Manager 11.2.0.4.0

      Oracle Database Utilities 11.2.0.4.0

      Oracle Notification Service 11.2.0.3.0

      SQL*Plus 11.2.0.4.0

      Oracle Netca Client 11.2.0.4.0

      Oracle Advanced Security 11.2.0.4.0

      Oracle JVM 11.2.0.4.0

      Oracle Internet Directory Client 11.2.0.4.0

      Oracle Net Listener 11.2.0.4.0

      Cluster Ready Services Files 11.2.0.4.0

      Oracle Database 11g 11.2.0.4.0

-----------------------------------------------------------------------------

Instantiating scripts for add node (Tuesday, June 14, 2016 6:16:50 AM CST)

.                                                                 1% Done.

Instantiation of add node scripts complete

Copying to remote nodes (Tuesday, June 14, 2016 6:16:53 AM CST)

...............................................................................................                                 96% Done.

Home copied to new nodes

Saving inventory on nodes (Tuesday, June 14, 2016 6:27:28 AM CST)

.                                                               100% Done.

Save inventory complete

WARNING:A new inventory has been created on one or more nodes in this session. However, it has not yet been registered as the central inventory of this system.

To register the new inventory please run the script at '/u01/app/oraInventory/orainstRoot.sh' with root privileges on nodes 'rac3'.

If you do not register the inventory, you may not be able to update or patch the products you installed.

The following configuration scripts need to be executed as the "root" user in each new cluster node. Each script in the list below is followed by a list of nodes.

/u01/app/oraInventory/orainstRoot.sh #On nodes rac3

/u01/app/11.2.0/grid/root.sh #On nodes rac3

To execute the configuration scripts:

    1. Open a terminal window

    2. Log in as "root"

    3. Run the scripts in each cluster node

   

The Cluster Node Addition of /u01/app/11.2.0/grid was successful.

Please check '/tmp/silentInstall.log' for more details.

[root@rac3 ~]# /u01/app/oraInventory/orainstRoot.sh

Creating the Oracle inventory pointer file (/etc/oraInst.loc)

Changing permissions of /u01/app/oraInventory.

Adding read,write permissions for group.

Removing read,write,execute permissions for world.

Changing groupname of /u01/app/oraInventory to oinstall.

The execution of the script is complete.

[root@rac3 ~]# /u01/app/11.2.0/grid/root.sh

Performing root user operation for Oracle 11g

The following environment variables are set as:

    ORACLE_OWNER= grid

    ORACLE_HOME=  /u01/app/11.2.0/grid

Enter the full pathname of the local bin directory: [/usr/local/bin]:

   Copying dbhome to /usr/local/bin ...

   Copying oraenv to /usr/local/bin ...

   Copying coraenv to /usr/local/bin ...

Creating /etc/oratab file...

Entries will be added to the /etc/oratab file as needed by

Database Configuration Assistant when a database is created

Finished running generic part of root script.

Now product-specific root actions will be performed.

Using configuration parameter file: /u01/app/11.2.0/grid/crs/install/crsconfig_params

Creating trace directory

User ignored Prerequisites during installation

Installing Trace File Analyzer

OLR initialization - successful

Adding Clusterware entries to upstart

CRS-4402: The CSS daemon was started in exclusive mode but found an active CSS daemon on node rac1, number 1, and is terminating

An active cluster was found during exclusive startup, restarting to join the cluster

clscfg: EXISTING configuration version 5 detected.

clscfg: version 5 is 11g Release 2.

Successfully accumulated necessary OCR keys.

Creating OCR keys for user 'root', privgrp 'root'..

Operation successful.

Configure Oracle Grid Infrastructure for a Cluster ... succeeded

[root@rac1 ~]# su - oracle

[oracle@rac1 ~]$ cd $ORACLE_HOME/oui/bin

[oracle@rac1 bin]$ export IGNORE_PREADDNODE_CHECKS=Y

[oracle@rac1 bin]$ ./addNode.sh "CLUSTER_NEW_NODES={rac3}" > /u01/app/oracle/add_node.log 2>&1

[root@rac1 ~]# tail -f /u01/app/oracle/add_node.log

Starting Oracle Universal Installer...

Checking swap space: must be greater than 500 MB.   Actual 1916 MB    Passed

Oracle Universal Installer, Version 11.2.0.4.0 Production

Copyright (C) 1999, 2013, Oracle. All rights reserved.

Performing tests to see whether nodes rac2,rac3 are available

............................................................... 100% Done.

....

-----------------------------------------------------------------------------

Cluster Node Addition Summary

Global Settings

   Source: /u01/app/oracle/product/11.2.0/db_1

   New Nodes

Space Requirements

   New Nodes

      rac3

         /: Required 4.26GB : Available 101.88GB

Installed Products

   Product Names

      Oracle Database 11g 11.2.0.4.0

      Java Development Kit 1.5.0.51.10

      Installer SDK Component 11.2.0.4.0

      Oracle One-Off Patch Installer 11.2.0.3.4

      Oracle Universal Installer 11.2.0.4.0

      Oracle USM Deconfiguration 11.2.0.4.0

      Oracle Configuration Manager Deconfiguration 10.3.1.0.0

      Oracle DBCA Deconfiguration 11.2.0.4.0

      Oracle RAC Deconfiguration 11.2.0.4.0

      Oracle Database Deconfiguration 11.2.0.4.0

      Oracle Configuration Manager Client 10.3.2.1.0

      Oracle Configuration Manager 10.3.8.1.0

      Oracle ODBC Driverfor Instant Client 11.2.0.4.0

      LDAP Required Support Files 11.2.0.4.0

      SSL Required Support Files for InstantClient 11.2.0.4.0

      Bali Share 1.1.18.0.0

      Oracle Extended Windowing Toolkit 3.4.47.0.0

      Oracle JFC Extended Windowing Toolkit 4.2.36.0.0

      Oracle Real Application Testing 11.2.0.4.0

      Oracle Database Vault J2EE Application 11.2.0.4.0

      Oracle Label Security 11.2.0.4.0

      Oracle Data Mining RDBMS Files 11.2.0.4.0

      Oracle OLAP RDBMS Files 11.2.0.4.0

      Oracle OLAP API 11.2.0.4.0

      Platform Required Support Files 11.2.0.4.0

      Oracle Database Vault option 11.2.0.4.0

      Oracle RAC Required Support Files-HAS 11.2.0.4.0

      SQL*Plus Required Support Files 11.2.0.4.0

      Oracle Display Fonts 9.0.2.0.0

      Oracle Ice Browser 5.2.3.6.0

      Oracle JDBC Server Support Package 11.2.0.4.0

      Oracle SQL Developer 11.2.0.4.0

      Oracle Application Express 11.2.0.4.0

      XDK Required Support Files 11.2.0.4.0

      RDBMS Required Support Files for Instant Client 11.2.0.4.0

      SQLJ Runtime 11.2.0.4.0

      Database Workspace Manager 11.2.0.4.0

      RDBMS Required Support Files Runtime 11.2.0.4.0

      Oracle Globalization Support 11.2.0.4.0

      Exadata Storage Server 11.2.0.1.0

      Provisioning Advisor Framework 10.2.0.4.3

      Enterprise Manager Database Plugin -- Repository Support 11.2.0.4.0

      Enterprise Manager Repository Core Files 10.2.0.4.5

      Enterprise Manager Database Plugin -- Agent Support 11.2.0.4.0

      Enterprise Manager Grid Control Core Files 10.2.0.4.5

      Enterprise Manager Common Core Files 10.2.0.4.5

      Enterprise Manager Agent Core Files 10.2.0.4.5

      RDBMS Required Support Files 11.2.0.4.0

      regexp 2.1.9.0.0

      Agent Required Support Files 10.2.0.4.5

      Oracle 11g Warehouse Builder Required Files 11.2.0.4.0

      Oracle Notification Service (eONS) 11.2.0.4.0

      Oracle Text Required Support Files 11.2.0.4.0

      Parser Generator Required Support Files 11.2.0.4.0

      Oracle Database 11g Multimedia Files 11.2.0.4.0

      Oracle Multimedia Java Advanced Imaging 11.2.0.4.0

      Oracle Multimedia Annotator 11.2.0.4.0

      Oracle JDBC/OCI Instant Client 11.2.0.4.0

      Oracle Multimedia Locator RDBMS Files 11.2.0.4.0

      Precompiler Required Support Files 11.2.0.4.0

      Oracle Core Required Support Files 11.2.0.4.0

      Sample Schema Data 11.2.0.4.0

      Oracle Starter Database 11.2.0.4.0

      Oracle Message Gateway Common Files 11.2.0.4.0

      Oracle XML Query 11.2.0.4.0

      XML Parser for Oracle JVM 11.2.0.4.0

      Oracle Help For Java 4.2.9.0.0

      Installation Plugin Files 11.2.0.4.0

      Enterprise Manager Common Files 10.2.0.4.5

      Expat libraries 2.0.1.0.1

      Deinstallation Tool 11.2.0.4.0

      Oracle Quality of Service Management (Client) 11.2.0.4.0

      Perl Modules 5.10.0.0.1

      JAccelerator (COMPANION) 11.2.0.4.0

      Oracle Containers for Java 11.2.0.4.0

      Perl Interpreter 5.10.0.0.2

      Oracle Net Required Support Files 11.2.0.4.0

      Secure Socket Layer 11.2.0.4.0

      Oracle Universal Connection Pool 11.2.0.4.0

      Oracle JDBC/THIN Interfaces 11.2.0.4.0

      Oracle Multimedia Client Option 11.2.0.4.0

      Oracle Java Client 11.2.0.4.0

      Character Set Migration Utility 11.2.0.4.0

      Oracle Code Editor 1.2.1.0.0I

      PL/SQL Embedded Gateway 11.2.0.4.0

      OLAP SQL Scripts 11.2.0.4.0

      Database SQL Scripts 11.2.0.4.0

      Oracle Locale Builder 11.2.0.4.0

      Oracle Globalization Support 11.2.0.4.0

      SQL*Plus Files for Instant Client 11.2.0.4.0

      Required Support Files 11.2.0.4.0

      Oracle Database User Interface 2.2.13.0.0

      Oracle ODBC Driver 11.2.0.4.0

      Oracle Notification Service 11.2.0.3.0

      XML Parser for Java 11.2.0.4.0

      Oracle Security Developer Tools 11.2.0.4.0

      Oracle Wallet Manager 11.2.0.4.0

      Cluster Verification Utility Common Files 11.2.0.4.0

      Oracle Clusterware RDBMS Files 11.2.0.4.0

      Oracle UIX 2.2.24.6.0

      Enterprise Manager plugin Common Files 11.2.0.4.0

      HAS Common Files 11.2.0.4.0

      Precompiler Common Files 11.2.0.4.0

      Installation Common Files 11.2.0.4.0

      Oracle Help for the  Web 2.0.14.0.0

      Oracle LDAP administration 11.2.0.4.0

      Buildtools Common Files 11.2.0.4.0

      Assistant Common Files 11.2.0.4.0

      Oracle Recovery Manager 11.2.0.4.0

      PL/SQL 11.2.0.4.0

      Generic Connectivity Common Files 11.2.0.4.0

      Oracle Database Gateway for ODBC 11.2.0.4.0

      Oracle Programmer 11.2.0.4.0

      Oracle Database Utilities 11.2.0.4.0

      Enterprise Manager Agent 10.2.0.4.5

      SQL*Plus 11.2.0.4.0

      Oracle Netca Client 11.2.0.4.0

      Oracle Multimedia Locator 11.2.0.4.0

      Oracle Call Interface (OCI) 11.2.0.4.0

      Oracle Multimedia 11.2.0.4.0

      Oracle Net 11.2.0.4.0

      Oracle XML Development Kit 11.2.0.4.0

      Oracle Internet Directory Client 11.2.0.4.0

      Database Configuration and Upgrade Assistants 11.2.0.4.0

      Oracle JVM 11.2.0.4.0

      Oracle Advanced Security 11.2.0.4.0

      Oracle Net Listener 11.2.0.4.0

      Oracle Enterprise Manager Console DB 11.2.0.4.0

      HAS Files for DB 11.2.0.4.0

      Oracle Text 11.2.0.4.0

      Oracle Net Services 11.2.0.4.0

      Oracle Database 11g 11.2.0.4.0

      Oracle OLAP 11.2.0.4.0

      Oracle Spatial 11.2.0.4.0

      Oracle Partitioning 11.2.0.4.0

      Enterprise Edition Options 11.2.0.4.0

-----------------------------------------------------------------------------

Instantiating scripts for add node (Thursday, June 9, 2016 8:58:09 AM CST)

.                                                                 1% Done.

Instantiation of add node scripts complete

Copying to remote nodes (Thursday, June 9, 2016 8:58:17 AM CST)

...............................................................................................                                 96% Done.

Home copied to new nodes

Saving inventory on nodes (Thursday, June 9, 2016 9:12:28 AM CST)

.                                                               100% Done.

Save inventory complete

WARNING:A new inventory has been created on one or more nodes in this session. However, it has not yet been registered as the central inventory of this system.

To register the new inventory please run the script at '/u01/app/oraInventory/orainstRoot.sh' with root privileges on nodes 'rac3'.

If you do not register the inventory, you may not be able to update or patch the products you installed.

The following configuration scripts need to be executed as the "root" user in each new cluster node. Each script in the list below is followed by a list of nodes.

/u01/app/oraInventory/orainstRoot.sh #On nodes rac3

/u01/app/oracle/product/11.2.0/db_1/root.sh #On nodes rac3

To execute the configuration scripts:

    1. Open a terminal window

    2. Log in as "root"

    3. Run the scripts in each cluster node

   

The Cluster Node Addition of /u01/app/oracle/product/11.2.0/db_1 was successful.

Please check '/tmp/silentInstall.log' for more details.

[root@rac3 ~]# /u01/app/oraInventory/orainstRoot.sh

Creating the Oracle inventory pointer file (/etc/oraInst.loc)

Changing permissions of /u01/app/oraInventory.

Adding read,write permissions for group.

Removing read,write,execute permissions for world.

Changing groupname of /u01/app/oraInventory to oinstall.

The execution of the script is complete.

[root@rac3 ~]# /u01/app/oracle/product/11.2.0/db_1/root.sh

Performing root user operation for Oracle 11g

The following environment variables are set as:

    ORACLE_OWNER= oracle

    ORACLE_HOME=  /u01/app/oracle/product/11.2.0/db_1

Enter the full pathname of the local bin directory: [/usr/local/bin]:

The contents of "dbhome" have not changed. No need to overwrite.

The contents of "oraenv" have not changed. No need to overwrite.

The contents of "coraenv" have not changed. No need to overwrite.

Entries will be added to the /etc/oratab file as needed by

Database Configuration Assistant when a database is created

Finished running generic part of root script.

Now product-specific root actions will be performed.

Finished product-specific root actions.

[root@rac3 ~]# ps -ef|grep ora

root      2410     1  0 08:43 ?        00:00:04 /u01/app/11.2.0/grid/bin/orarootagent.bin

grid      5593     1  0 08:51 ?        00:00:00 oracle+ASM3 (DESCRIPTION=(LOCAL=YES)(ADDRESS=(PROTOCOL=beq)))

grid      5612     1  0 08:51 ?        00:00:00 oracle+ASM3_ocr (DESCRIPTION=(LOCAL=YES)(ADDRESS=(PROTOCOL=beq)))

grid      5637     1  0 08:51 ?        00:00:00 oracle+ASM3_asmb_+asm3 (DESCRIPTION=(LOCAL=YES)(ADDRESS=(PROTOCOL=beq)))

grid      5742     1  0 08:51 ?        00:00:02 /u01/app/11.2.0/grid/bin/oraagent.bin

root      5746     1  0 08:51 ?        00:00:02 /u01/app/11.2.0/grid/bin/orarootagent.bin

grid      5779     1  0 08:51 ?        00:00:00 oracle+ASM3 (DESCRIPTION=(LOCAL=YES)(ADDRESS=(PROTOCOL=beq)))

grid      5784     1  0 08:51 ?        00:00:00 oracle+ASM3 (DESCRIPTION=(LOCAL=YES)(ADDRESS=(PROTOCOL=beq)))

root      9530 10313  0 09:16 pts/0    00:00:00 grep ora

root     10615     1  0 08:36 ?        00:00:12 /u01/app/11.2.0/grid/jdk/jre/bin/java -Xms64m -Xmx256m -classpath /u01/app/11.2.0/grid/tfa/rac3/tfa_home/jar/RATFA.jar:/u01/app/11.2.0/grid/tfa/rac3/tfa_home/jar/je-4.0.103.jar:/u01/app/11.2.0/grid/tfa/rac3/tfa_home/jar/ojdbc6.jar oracle.rat.tfa.TFAMain /u01/app/11.2.0/grid/tfa/rac3/tfa_home

grid     24396     1  0 08:43 ?        00:00:02 /u01/app/11.2.0/grid/bin/oraagent.bin

在节点1上用dbca命令把oracle 实例添加到数据库。

dbca -> instance manager -> add an instance -> 选择实例,输入sys用户和密码 -> 选择节点和实例名-> Finish.

或者通过dbca的图形化管理,也可以使用dbca的静默安装。

[oracle@rac1 bin]$ dbca -silent -addInstance -nodeList rac3 -gdbName orcl -instanceName orcl3 -sysDBAUserName sys -sysDBAPassword oracle

Adding instance

1% complete

2% complete

6% complete

13% complete

20% complete

26% complete

33% complete

40% complete

46% complete

53% complete

66% complete

Completing instance management.

76% complete

100% complete

Look at the log file "/u01/app/oracle/cfgtoollogs/dbca/orcl/orcl.log" for further details.

4.2 oracle 用户将实例添加到CRS资源里

注意,用oracle用户执行,还要注意oracle 用户的属组信息

[oracle@rac1 bin]$ srvctl config database -d orcl

Database unique name: orcl

Database name: orcl

Oracle home: /u01/app/oracle/product/11.2.0/db_1

Oracle user: oracle

Spfile: +DATADG/orcl/spfileorcl.ora

Domain:

Start options: open

Stop options: immediate

Database role: PRIMARY

Management policy: AUTOMATIC

Server pools: orcl

Database instances: orcl1,orcl2,orcl3

Disk Groups: DATADG,SYSTEMDG

Mount point paths:

Services: orcl_taf

Type: RAC

Database is administrator managed

发现orcl3的实例已经和数据库关联。如果没有关联,执行如下语句:

[oracle@rac1 bin]$ srvctl add instance -d orcl -i orcl3 -n rac3

修改Client-side TAF 配置

5.1 修改Oracle 用户下tnsnames.ora 文件

修改所有节点,Oracle用户下的tnsnames.ora 文件,修改内容如下:

NODE1_LOCAL=(ADDRESS = (PROTOCOL = TCP)(HOST= rac1-vip)(PORT = 1521))

NODE2_LOCAL=(ADDRESS = (PROTOCOL = TCP)(HOST =rac2-vip)(PORT = 1521))

NODE3_LOCAL=(ADDRESS = (PROTOCOL = TCP)(HOST =rac3-vip)(PORT = 1521))

DAVE_REMOTE =

 (DESCRIPTION =

  (ADDRESS_LIST =

    (ADDRESS = (PROTOCOL = TCP)(HOST=rac1-vip)(PORT = 1521))

    (ADDRESS = (PROTOCOL = TCP)(HOST=rac2-vip)(PORT = 1521))

    (ADDRESS = (PROTOCOL = TCP)(HOST=rac3-vip)(PORT = 1521))

   )

 )

 

5.2 修改 LOCAL_LISTENER REMOTE_LISTENER 参数

执行如下操作:

alter system set LOCAL_LISTENER='NODE1_LOCAL' scope=both sid='orcl1';

alter system set LOCAL_LISTENER='NODE2_LOCAL' scope=both sid='orcl2';

alter system set LOCAL_LISTENER='NODE3_LOCAL' scope=both sid='orcl3';

alter system set REMOTE_LISTENER='ORCL_REMOTE' scope=both sid='*';

修改Service-Side TAF 配置

[oracle@rac1 admin]$ srvctl status service -d orcl

Service orcl_taf is running on instance(s) orcl1

[oracle@rac1 admin]$ srvctl config service -d orcl

Service name: orcl_taf

Service is enabled

Server pool: orcl_orcl_taf

Cardinality: 1

Disconnect: false

Service role: PRIMARY

Management policy: AUTOMATIC

DTP transaction: false

AQ HA notifications: false

Failover type: SELECT

Failover method: BASIC

TAF failover retries: 180

TAF failover delay: 5

Connection Load Balancing Goal: LONG

Runtime Load Balancing Goal: NONE

TAF policy specification: BASIC

Edition:

Preferred instances: orcl1

Available instances: orcl2

--修改之前的service,添加节点3实例:orcl3

[oracle@rac1 admin]$ srvctl modify service -d orcl -s orcl_taf -n -i orcl1,orcl2,orcl3

[oracle@rac1 admin]$ srvctl config service -d orcl

Service name: orcl_taf

Service is enabled

Server pool: orcl_orcl_taf

Cardinality: 3

Disconnect: false

Service role: PRIMARY

Management policy: AUTOMATIC

DTP transaction: false

AQ HA notifications: false

Failover type: SELECT

Failover method: BASIC

TAF failover retries: 180

TAF failover delay: 5

Connection Load Balancing Goal: LONG

Runtime Load Balancing Goal: NONE

TAF policy specification: BASIC

Edition:

Preferred instances: orcl1,orcl2,orcl3

Available instances:

[oracle@rac1 admin]$ srvctl start service -d orcl -s orcl_taf -i orcl3

[oracle@rac1 admin]$ srvctl status service -d orcl

Service orcl_taf is running on instance(s) orcl1,orcl3

#原来orcl2没启用,这里顺便启动下

[oracle@rac1 admin]$ srvctl start service -d orcl -s orcl_taf -i orcl2

[oracle@rac1 admin]$ srvctl status service -d orcl

Service orcl_taf is running on instance(s) orcl1,orcl2,orcl3

验证

[grid@rac3 ~]$ olsnodes -s

rac1   Active

rac2   Active

rac3   Active

[grid@rac3 ~]$ olsnodes -n

rac1   1

rac2   2

rac3   3

[grid@rac1 ~]$ crsctl stat res -t

--------------------------------------------------------------------------------

NAME           TARGET  STATE        SERVER                   STATE_DETAILS      

--------------------------------------------------------------------------------

Local Resources

--------------------------------------------------------------------------------

ora.DATADG.dg

               ONLINE  ONLINE       rac1                                        

               ONLINE  ONLINE       rac2                                        

               ONLINE  ONLINE       rac3                                        

ora.LISTENER.lsnr

               ONLINE  ONLINE       rac1                                        

               ONLINE  ONLINE       rac2                                         

               ONLINE  ONLINE       rac3                                        

ora.SYSTEMDG.dg

               ONLINE  ONLINE       rac1                                        

               ONLINE  ONLINE       rac2                                         

               ONLINE  ONLINE       rac3                                        

ora.asm

               ONLINE  ONLINE       rac1                     Started            

               ONLINE  ONLINE       rac2                     Started             

               ONLINE  ONLINE       rac3                     Started            

ora.gsd

               OFFLINE OFFLINE      rac1                                        

               OFFLINE OFFLINE      rac2                                         

               OFFLINE OFFLINE      rac3                                        

ora.net1.network

               ONLINE  ONLINE       rac1                                        

               ONLINE  ONLINE       rac2                                         

               ONLINE  ONLINE       rac3                                        

ora.ons

               ONLINE  ONLINE       rac1                                        

               ONLINE  ONLINE       rac2                                         

               ONLINE  ONLINE       rac3                                        

--------------------------------------------------------------------------------

Cluster Resources

--------------------------------------------------------------------------------

ora.LISTENER_SCAN1.lsnr

      1        ONLINE  ONLINE       rac2                                        

ora.cvu

      1        ONLINE  ONLINE       rac2                                         

ora.oc4j

      1        ONLINE  ONLINE       rac2                                        

ora.orcl.db

      1        ONLINE  ONLINE       rac1                     Open               

      2        ONLINE  ONLINE       rac2                     Open               

      3        ONLINE  ONLINE       rac3                     Open               

ora.orcl.orcl_taf.svc

      1        ONLINE  ONLINE       rac1                                        

      2        ONLINE  ONLINE       rac3                                        

      3        ONLINE  ONLINE       rac2                                        

ora.rac1.vip

      1        ONLINE  ONLINE       rac1                                        

ora.rac2.vip

      1        ONLINE  ONLINE       rac2                                        

ora.rac3.vip

      1        ONLINE  ONLINE       rac3                                        

ora.scan1.vip

      1        ONLINE  ONLINE       rac2          
                              

[oracle@rac1 ~]$ sqlplus / as sysdba

SQL*Plus: Release 11.2.0.4.0 Production on Thu Jun 9 10:09:31 2016

Copyright (c) 1982, 2013, Oracle.  All rights reserved.

Connected to:

Oracle Database 11g Enterprise Edition Release 11.2.0.4.0 - 64bit Production

With the Partitioning, Real Application Clusters, Automatic Storage Management, OLAP,

Data Mining and Real Application Testing options

SQL> col host_name for a20

SQL> select inst_id,host_name,instance_name,status from gv$instance;

   INST_ID HOST_NAME     INSTANCE_NAME STATUS

---------- -------------------- ---------------- ------------

     1 rac1       orcl1      OPEN

     3 rac3       orcl3      OPEN

     2 rac2       orcl2      OPEN

[root@rac1 ~]# ./crs_stat.sh

Name                             Target     State      Host     

------------------------       ---------- ---------  -------  

ora.DATADG.dg                  ONLINE     ONLINE     rac1     

ora.LISTENER.lsnr              ONLINE     ONLINE     rac1     

ora.LISTENER_SCAN1.lsnr        ONLINE     ONLINE     rac2     

ora.SYSTEMDG.dg                ONLINE     ONLINE     rac1     

ora.asm                        ONLINE     ONLINE     rac1     

ora.cvu                        ONLINE     ONLINE     rac2     

ora.gsd                        OFFLINE    OFFLINE             

ora.net1.network               ONLINE     ONLINE     rac1     

ora.oc4j                       ONLINE     ONLINE     rac2     

ora.ons                        ONLINE     ONLINE     rac1     

ora.orcl.db                    ONLINE     ONLINE     rac1     

ora.orcl.orcl_taf.svc          ONLINE     ONLINE     rac1     

ora.rac1.ASM1.asm              ONLINE     ONLINE     rac1     

ora.rac1.LISTENER_RAC1.lsnr    ONLINE     ONLINE     rac1     

ora.rac1.gsd                   OFFLINE    OFFLINE             

ora.rac1.ons                   ONLINE     ONLINE     rac1     

ora.rac1.vip                   ONLINE     ONLINE     rac1     

ora.rac2.ASM2.asm              ONLINE     ONLINE     rac2     

ora.rac2.LISTENER_RAC2.lsnr    ONLINE     ONLINE     rac2     

ora.rac2.gsd                   OFFLINE    OFFLINE             

ora.rac2.ons                   ONLINE     ONLINE     rac2     

ora.rac2.vip                   ONLINE     ONLINE     rac2     

ora.rac3.ASM3.asm              ONLINE     ONLINE     rac3     

ora.rac3.LISTENER_RAC3.lsnr    ONLINE     ONLINE     rac3     

ora.rac3.gsd                   OFFLINE    OFFLINE             

ora.rac3.ons                   ONLINE     ONLINE     rac3     

ora.rac3.vip                   ONLINE     ONLINE     rac3     

ora.scan1.vip                  ONLINE     ONLINE     rac2     


. 11gR2 添加删除节点小结

11gR2 RAC 添加节点分3个阶段:

1)第一阶段主要工作是复制GIRD HOME到新节点,配置GRID,并且启动GRID,同时更新OCR信息,更新inventory信息。

2)第二阶段主要工作是复制RDBMS HOME到新节点,更新inventory信息。

3)第三阶段主要工作是DBCA创建新的数据库实例(包括创建undo 表空间,redo log,初始化参数等),更新OCR信息(包括注册新的数据库实例等)。

11gR2 的卸载步骤正好和上面的步骤相反。 步骤还是三个步骤。

在添加/删除节点的过程中,原有的节点一直是online状态,不需要停机,对客户端业务没有影响。新节点的ORACLE_BASEORACLE_HOME 路径在添加过程中会自动创建,无需手动创建。

注意事项:

1)在添加/删除节点前,建议手工备份一下OCR,在某些情况下添加/删除节点失败,可以通过恢复原来的OCR来解决问题。

2)在正常安装11.2 GRID时,OUI界面提供SSH 配置功能,但是添加节点脚本addNode.sh没有这个功能,因此需要手动配置oracle用户和grid用户的SSH 用户等效性。




 

分类: Oracle

2016-06-14 08:35:52


环境概述

现有的RAC 环境是3节点的11.2.0.4RAC,在本文档中,我们要演示删除一个节点:rac3 所有删除操作都在环境正常运行状态下进行。

RAC 删除节点操作正好和添加节点完全相反。

[root@rac1 ~]# cat /etc/hosts

127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4

::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

#node1

192.168.8.221   rac1 rac1.oracle.com

192.168.8.242   rac1-vip 

172.168.0.18    rac1-priv

#node2

192.168.8.223   rac2 rac2.oracle.com

192.168.8.244   rac2-vip

172.168.0.19    rac2-priv

#node3

192.168.8.228   rac3 rac3.oracle.com

192.168.8.247   rac3-vip

172.168.0.15    rac3-priv

#scan-ip

192.168.8.245   rac-cluster rac-cluster-scan

当前RAC 环境的相关信息:

[root@rac1 ~]# olsnodes -s

rac1   Active

rac2   Active

rac3   Active

[root@rac1 ~]# olsnodes -i

rac1   rac1-vip

rac2   rac2-vip

rac3   rac3-vip

[root@rac1 ~]# crsctl stat res -t

--------------------------------------------------------------------------------

NAME           TARGET  STATE        SERVER                   STATE_DETAILS      

--------------------------------------------------------------------------------

Local Resources

--------------------------------------------------------------------------------

ora.DATADG.dg

               ONLINE  ONLINE       rac1                                        

               ONLINE  ONLINE       rac2                                        

               ONLINE  ONLINE       rac3                                        

ora.LISTENER.lsnr

               ONLINE  ONLINE       rac1                                        

               ONLINE  ONLINE       rac2                                        

               ONLINE  ONLINE       rac3                                        

ora.SYSTEMDG.dg

               ONLINE  ONLINE       rac1                                        

               ONLINE  ONLINE       rac2                                        

               ONLINE  ONLINE       rac3                                        

ora.asm

               ONLINE  ONLINE       rac1                     Started            

               ONLINE  ONLINE       rac2                     Started            

               ONLINE  ONLINE       rac3                     Started            

ora.gsd

               OFFLINE OFFLINE      rac1                                        

               OFFLINE OFFLINE      rac2                                        

               OFFLINE OFFLINE      rac3                                        

ora.net1.network

               ONLINE  ONLINE       rac1                                        

               ONLINE  ONLINE       rac2                                        

               ONLINE  ONLINE       rac3                                        

ora.ons

               ONLINE  ONLINE       rac1                                        

               ONLINE  ONLINE       rac2                                        

               ONLINE  ONLINE       rac3                                        

--------------------------------------------------------------------------------

Cluster Resources

--------------------------------------------------------------------------------

ora.LISTENER_SCAN1.lsnr

      1        ONLINE  ONLINE       rac2                                         

ora.cvu

      1        ONLINE  ONLINE       rac2                                        

ora.oc4j

      1        ONLINE  ONLINE       rac2                                        

ora.orcl.db

      1        ONLINE  ONLINE       rac1                     Open               

      2        ONLINE  ONLINE       rac2                     Open               

      3        ONLINE  ONLINE       rac3                     Open               

ora.orcl.orcl_taf.svc

      1        ONLINE  ONLINE       rac1                                        

      2        ONLINE  ONLINE       rac3                                        

      3        ONLINE  ONLINE       rac2                                        

ora.rac1.vip

      1        ONLINE  ONLINE       rac1                                        

ora.rac2.vip

      1        ONLINE  ONLINE       rac2                                        

ora.rac3.vip

      1        ONLINE  ONLINE       rac3                                         

ora.scan1.vip

      1        ONLINE  ONLINE       rac2   
                                     

[root@rac1 ~]# su - oracle

[oracle@rac1 ~]$ sqlplus / as sysdba

SQL> col host_name for a20

SQL> select inst_id,host_name,instance_name,status from gv$instance;

   INST_ID HOST_NAME     INSTANCE_NAME STATUS

---------- -------------------- ---------------- ------------

     1 rac1       orcl1      OPEN

     3 rac3       orcl3      OPEN

     2 rac2       orcl2      OPEN

[root@rac1 ~]# ./crs_stat.sh

Name                             Target     State      Host     

------------------------       ---------- ---------  -------  

ora.DATADG.dg                  ONLINE     ONLINE     rac1     

ora.LISTENER.lsnr              ONLINE     ONLINE     rac1     

ora.LISTENER_SCAN1.lsnr        ONLINE     ONLINE     rac2     

ora.SYSTEMDG.dg                ONLINE     ONLINE     rac1     

ora.asm                        ONLINE     ONLINE     rac1     

ora.cvu                        ONLINE     ONLINE     rac2     

ora.gsd                        OFFLINE    OFFLINE             

ora.net1.network               ONLINE     ONLINE     rac1     

ora.oc4j                       ONLINE     ONLINE     rac2     

ora.ons                        ONLINE     ONLINE     rac1     

ora.orcl.db                    ONLINE     ONLINE     rac1     

ora.orcl.orcl_taf.svc          ONLINE     ONLINE     rac1     

ora.rac1.ASM1.asm              ONLINE     ONLINE     rac1     

ora.rac1.LISTENER_RAC1.lsnr    ONLINE     ONLINE     rac1     

ora.rac1.gsd                   OFFLINE    OFFLINE             

ora.rac1.ons                   ONLINE     ONLINE     rac1     

ora.rac1.vip                   ONLINE     ONLINE     rac1     

ora.rac2.ASM2.asm              ONLINE     ONLINE     rac2     

ora.rac2.LISTENER_RAC2.lsnr    ONLINE     ONLINE     rac2     

ora.rac2.gsd                   OFFLINE    OFFLINE             

ora.rac2.ons                   ONLINE     ONLINE     rac2     

ora.rac2.vip                   ONLINE     ONLINE     rac2     

ora.rac3.ASM3.asm              ONLINE     ONLINE     rac3     

ora.rac3.LISTENER_RAC3.lsnr    ONLINE     ONLINE     rac3     

ora.rac3.gsd                   OFFLINE    OFFLINE             

ora.rac3.ons                   ONLINE     ONLINE     rac3     

ora.rac3.vip                   ONLINE     ONLINE     rac3     

ora.scan1.vip                  ONLINE     ONLINE     rac2  

备份OCR

在删除节点前,建议手动备份OCR GRID4个小时也会自动备份OCR),目的是如果出现某些问题,我们可以恢复OCR

这里在节点1上执行备份操作。

root用户执行:

--执行手工OCR的备份

[root@rac1 ~]# ocrconfig -manualbackup

rac3     2016/06/13 12:40:49     /u01/app/11.2.0/grid/cdata/rac-cluster/backup_20160613_124049.ocr

rac2     2016/06/01 05:41:52     /u01/app/11.2.0/grid/cdata/rac-cluster/backup_20160601_054152.ocr

--查看ocr的手工备份:

[root@rac1 ~]# ocrconfig -showbackup manual

rac3     2016/06/13 12:40:49     /u01/app/11.2.0/grid/cdata/rac-cluster/backup_20160613_124049.ocr

rac2     2016/06/01 05:41:52     /u01/app/11.2.0/grid/cdata/rac-cluster/backup_20160601_054152.ocr

. DBCA删除数据库实例

3.1 调整service 信息

如果RAC 环境配置了Service-Side TAF 的操作,并且待删除节点的service preferred的,那么在我们删除该节点之前,需要把该节点上的连接转移到其他节点上去,使用relocate service进行操作。

preferred instance 不可用时,service 会自动relocateavailable的实例上,这个过程也可以手工来执行,命令如下:

格式:srvctl relocate service -d dbname -s servicename -i instancename -t newinstancename [-f]

[root@rac1 ~]# srvctl status service -d orcl

Service orcl_taf is running on instance(s) orcl1,orcl2,orcl3

[root@rac1 ~]# srvctl config service -d orcl

Service name: orcl_taf

Service is enabled

Server pool: orcl_orcl_taf

Cardinality: 3

Disconnect: false

Service role: PRIMARY

Management policy: AUTOMATIC

DTP transaction: false

AQ HA notifications: false

Failover type: SELECT

Failover method: BASIC

TAF failover retries: 180

TAF failover delay: 5

Connection Load Balancing Goal: LONG

Runtime Load Balancing Goal: NONE

TAF policy specification: BASIC

Edition:

Preferred instances: orcl1,orcl2,orcl3

Available instances:

--将节点3上的service 转移到其他节点,用oracle用户执行:

[root@rac1 ~]# su - oracle

[oracle@rac1 ~]$ srvctl relocate service -s orcl_taf -d orcl -i orcl3 -t orcl1

PRCR-1106 : Failed to relocate resource ora.orcl.orcl_taf.svc from node rac3 to node rac1

PRCR-1089 : Failed to relocate resource ora.orcl.orcl_taf.svc.

CRS-5702: Resource 'ora.orcl.orcl_taf.svc' is already running on 'rac1'

因为我这里的3个节点的service 都是preferred的,所以不能完成切换,relocate只能从preferredavailable

--配置service的信息,删除节点3service

[oracle@rac1 ~]$ srvctl stop service -d orcl -s orcl_taf -i orcl3

[oracle@rac1 ~]$ srvctl status service -d orcl

Service orcl_taf is running on instance(s) orcl1,orcl2

[oracle@rac1 ~]$ srvctl modify service -d orcl -s orcl_taf -n -i orcl1,orcl2 -f

[oracle@rac1 ~]$ srvctl status service -d orcl

Service orcl_taf is running on instance(s) orcl1,orcl2

[oracle@rac1 ~]$ srvctl config service -d orcl

Service name: orcl_taf

Service is enabled

Server pool: orcl_orcl_taf

Cardinality: 2

Disconnect: true

Service role: PRIMARY

Management policy: AUTOMATIC

DTP transaction: false

AQ HA notifications: false

Failover type: SELECT

Failover method: BASIC

TAF failover retries: 180

TAF failover delay: 5

Connection Load Balancing Goal: LONG

Runtime Load Balancing Goal: NONE

TAF policy specification: BASIC

Edition:

Preferred instances: orcl1,orcl2

Available instances:

3.2 DBCA删除实例

在节点1上用Oracle 用户运行dbca 这里可以用图形界面来删除:

dbca -> RAC database -> nstance Management -> Delete Instance -> 选择实例,输入sys用户和密码 -> 选择准备删除的数据库实例

也可以使用dbca silent 来操作:

dbca -silent -deleteInstance [-nodeList node_name] -gdbName gdb_name -instanceName instance_name -sysDBAUserName sysdba -sysDBAPassword password

在节点1上用Oracle 用户执行:

[oracle@rac1 ~]$ dbca -silent -deleteInstance -nodeList rac3 -gdbName orcl -instanceName orcl3 -sysDBAUserName sys -sysDBAPassword oracle

Deleting instance

1% complete

2% complete

6% complete

13% complete

20% complete

26% complete

33% complete

40% complete

46% complete

53% complete

60% complete

66% complete

Completing instance management.

100% complete

Look at the log file "/u01/app/oracle/cfgtoollogs/dbca/orcl.log" for further details.

3.3 确认orcl3已经从CRS中清除

注意,用oracle用户执行,还要注意oracle 用户的属组信息

[oracle@rac1 ~]$ srvctl config database -d orcl

Database unique name: orcl

Database name: orcl

Oracle home: /u01/app/oracle/product/11.2.0/db_1

Oracle user: oracle

Spfile: +DATADG/orcl/spfileorcl.ora

Domain:

Start options: open

Stop options: immediate

Database role: PRIMARY

Management policy: AUTOMATIC

Server pools: orcl

Database instances: orcl1,orcl2

Disk Groups: DATADG,SYSTEMDG

Mount point paths:

Services: orcl_taf

Type: RAC

Database is administrator managed

这里已经没有orcl3这个实例了。

RAC层面删除节点(Oracle 软件)

这小节的操作都用oracle 用户完成。

4.1 停止节点3 Listener

grid用户执行:

[root@rac1 ~]# su - grid

lsnrctl status[grid@rac1 ~]$ lsnrctl status

LSNRCTL for Linux: Version 11.2.0.4.0 - Production on 13-JUN-2016 12:54:51

Copyright (c) 1991, 2013, Oracle.  All rights reserved.

Connecting to (DESCRIPTION=(ADDRESS=(PROTOCOL=IPC)(KEY=LISTENER)))

STATUS of the LISTENER

------------------------

Alias                     LISTENER

Version                   TNSLSNR for Linux: Version 11.2.0.4.0 - Production

Start Date                13-JUN-2016 12:32:26

Uptime                    0 days 0 hr. 22 min. 24 sec

Trace Level               off

Security                  ON: Local OS Authentication

SNMP                      OFF

Listener Parameter File   /u01/app/11.2.0/grid/network/admin/listener.ora

Listener Log File         /u01/app/grid/diag/tnslsnr/rac1/listener/alert/log.xml

Listening Endpoints Summary...

  (DESCRIPTION=(ADDRESS=(PROTOCOL=ipc)(KEY=LISTENER)))

  (DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.8.221)(PORT=1521)))

  (DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.8.242)(PORT=1521)))

Services Summary...

Service "+ASM" has 1 instance(s).

  Instance "+ASM1", status READY, has 1 handler(s) for this service...

Service "orcl" has 1 instance(s).

  Instance "orcl1", status READY, has 1 handler(s) for this service...

Service "orclXDB" has 1 instance(s).

  Instance "orcl1", status READY, has 1 handler(s) for this service...

Service "orcl_taf" has 1 instance(s).

  Instance "orcl1", status READY, has 1 handler(s) for this service...

The command completed successfully

[grid@rac1 ~]# ./crs_stat.sh

Name                             Target     State      Host     

------------------------       ---------- ---------  -------  

ora.DATADG.dg                  ONLINE     ONLINE     rac1     

ora.LISTENER.lsnr              ONLINE     ONLINE     rac1     

ora.LISTENER_SCAN1.lsnr        ONLINE     ONLINE     rac2     

ora.SYSTEMDG.dg                ONLINE     ONLINE     rac1     

ora.asm                        ONLINE     ONLINE     rac1     

ora.cvu                        ONLINE     ONLINE     rac2     

ora.gsd                        OFFLINE    OFFLINE             

ora.net1.network               ONLINE     ONLINE     rac1     

ora.oc4j                       ONLINE     ONLINE     rac2     

ora.ons                        ONLINE     ONLINE     rac1     

ora.orcl.db                    ONLINE     ONLINE     rac1     

ora.orcl.orcl_taf.svc          ONLINE     ONLINE     rac1     

ora.rac1.ASM1.asm              ONLINE     ONLINE     rac1     

ora.rac1.LISTENER_RAC1.lsnr    ONLINE     ONLINE     rac1     

ora.rac1.gsd                   OFFLINE    OFFLINE             

ora.rac1.ons                   ONLINE     ONLINE     rac1     

ora.rac1.vip                   ONLINE     ONLINE     rac1     

ora.rac2.ASM2.asm              ONLINE     ONLINE     rac2     

ora.rac2.LISTENER_RAC2.lsnr    ONLINE     ONLINE     rac2     

ora.rac2.gsd                   OFFLINE    OFFLINE             

ora.rac2.ons                   ONLINE     ONLINE     rac2     

ora.rac2.vip                   ONLINE     ONLINE     rac2     

ora.rac3.ASM3.asm              ONLINE     ONLINE     rac3     

ora.rac3.LISTENER_RAC3.lsnr    ONLINE     ONLINE     rac3     

ora.rac3.gsd                   OFFLINE    OFFLINE             

ora.rac3.ons                   ONLINE     ONLINE     rac3     

ora.rac3.vip                   ONLINE     ONLINE     rac3     

ora.scan1.vip                  ONLINE     ONLINE     rac2

[grid@rac1 ~]# srvctl disable listener -l LISTENER -n rac3

[grid@rac1 ~]# srvctl stop listener -l LISTENER -n rac3

[grid@rac1 ~]# srvctl status listener -l listener

Listener LISTENER is enabled

Listener LISTENER is running on node(s): rac2,rac1

[root@rac1 ~]# ./crs_stat.sh

Name                             Target     State      Host     

------------------------       ---------- ---------  -------  

ora.DATADG.dg                  ONLINE     ONLINE     rac1     

ora.LISTENER.lsnr              ONLINE     ONLINE     rac1     

ora.LISTENER_SCAN1.lsnr        ONLINE     ONLINE     rac2     

ora.SYSTEMDG.dg                ONLINE     ONLINE     rac1     

ora.asm                        ONLINE     ONLINE     rac1     

ora.cvu                        ONLINE     ONLINE     rac2     

ora.gsd                        OFFLINE    OFFLINE             

ora.net1.network               ONLINE     ONLINE     rac1     

ora.oc4j                       ONLINE     ONLINE     rac2     

ora.ons                        ONLINE     ONLINE     rac1     

ora.orcl.db                    ONLINE     ONLINE     rac1     

ora.orcl.orcl_taf.svc          ONLINE     ONLINE     rac1     

ora.rac1.ASM1.asm              ONLINE     ONLINE     rac1     

ora.rac1.LISTENER_RAC1.lsnr    ONLINE     ONLINE     rac1     

ora.rac1.gsd                   OFFLINE    OFFLINE             

ora.rac1.ons                   ONLINE     ONLINE     rac1     

ora.rac1.vip                   ONLINE     ONLINE     rac1     

ora.rac2.ASM2.asm              ONLINE     ONLINE     rac2     

ora.rac2.LISTENER_RAC2.lsnr    ONLINE     ONLINE     rac2     

ora.rac2.gsd                   OFFLINE    OFFLINE             

ora.rac2.ons                   ONLINE     ONLINE     rac2     

ora.rac2.vip                   ONLINE     ONLINE     rac2     

ora.rac3.ASM3.asm              ONLINE     ONLINE     rac3     

ora.rac3.LISTENER_RAC3.lsnr    OFFLINE    OFFLINE             

ora.rac3.gsd                   OFFLINE    OFFLINE             

ora.rac3.ons                   ONLINE     ONLINE     rac3     

ora.rac3.vip                   ONLINE     ONLINE     rac3     

ora.scan1.vip                  ONLINE     ONLINE     rac2 

4.2 在节点3oracle用户更新Inventory

[root@rac3 ~]# su - oracle

[oracle@rac3 ~]$ cd $ORACLE_HOME/oui/bin

[oracle@rac3 bin]$ ls

addLangs.sh  attachHome.sh  filesList.bat         filesList.sh  resource      runInstaller     runSSHSetup.sh

addNode.sh   detachHome.sh  filesList.properties  lsnodes       runConfig.sh  runInstaller.sh

[oracle@rac3 bin]$ ./runInstaller -updateNodeList ORACLE_HOME=/u01/app/oracle/product/11.2.0/db_1 "CLUSTER_NODES=rac3"

Starting Oracle Universal Installer...

Checking swap space: must be greater than 500 MB.   Actual 2047 MB    Passed

The inventory pointer is located at /etc/oraInst.loc

The inventory is located at /u01/app/oraInventory

'UpdateNodeList' was successful.

如果这里执行报错:

[oracle@rac3 bin]$ ./runInstaller -updateNodeList ORACLE_HOME=/u01/app/oracle/product/11.2.0/db_1 "CLUSTER_NODES=rac3"

Starting Oracle Universal Installer...

Checking swap space: must be greater than 500 MB.   Actual 2047 MB    Passed

The inventory pointer is located at /etc/oraInst.loc

The inventory is located at /u01/app/oraInventory

'UpdateNodeList' failed

注意这的解决方法:

log信息如下:

INFO: Setting variable 'INVENTORY_LOCATION' to '/u01/app/oraInventory'. Received the value from a code block.

INFO: Created OiicStandardInventorySession.

INFO: Checkpoint:getting indexSession from checkpoint factory

INFO: Checkpoint:Index file :/u01/app/oracle/11.2.0/db_1/install/checkpoints/oui/index.xml not found.

INFO: Checkpoint:Initializing checkpoint session in oiicUpdateNodeList.

INFO: Checkpoint:Location is- /u01/app/oracle/11.2.0/db_1/install

INFO: Checkpoint:Initializing checkpoint session in oiicUpdateNodeList.

INFO: Checkpoint:Index session object added to oiicexitops.

INFO: Checkpoint:Initializing checkpoint session for UpdateNodeList.

INFO: Checkpoint:checkpointfile :/u01/app/oracle/11.2.0/db_1/install/checkpoints/oui/checkpoint_null.xml not found,creating one for this session

INFO: Checkpoint:constructing checkpoint with name:oracle.installer.updatenodelist in checkpoint factory

SEVERE: oracle.sysman.oii.oiix.OiixException: The Oracle home '/u01/app/oracle/11.2.0/db_1' could not be updated as it does not exist.

at oracle.sysman.oii.oiic.OiicBaseInventoryApp.getOracleHomeInfo(OiicBaseInventoryApp.java:738)

at oracle.sysman.oii.oiic.OiicUpdateNodeList.doOperation(OiicUpdateNodeList.java:206)

at oracle.sysman.oii.oiic.OiicBaseInventoryApp.main_helper(OiicBaseInventoryApp.java:890)

at oracle.sysman.oii.oiic.OiicUpdateNodeList.main(OiicUpdateNodeList.java:399)

--查看:/etc/oraInst.loc

[root@rac3 logs]# cat /etc/oraInst.loc

inventory_loc=/u01/app/oraInventory

inst_group=oinstall

[root@rac3 logs]#

--我这个节点的添加是在节点1上操作的,所以查看节点的oraInst.loc 文件:

[oracle@rac1 ~]$ cat /etc/oraInst.loc

inventory_loc=/u01/oraInventory

inst_group=oinstall

--修改节点3oraInst.loc 与节点1一致:

[root@rac3 logs]# cat /etc/oraInst.loc

inventory_loc=/u01/oraInventory

inst_group=oinstall

--再次更新目录,这次成功:

[oracle@rac3 bin]$ ./runInstaller -updateNodeList ORACLE_HOME=/u01/app/oracle/11.2.0/db_1 "CLUSTER_NODES=rac3"

Starting Oracle Universal Installer...

Checking swap space: must be greater than 500 MB. Actual 2925 MB Passed

The inventory pointer is located at /etc/oraInst.loc

The inventory is located at /u01/oraInventory

'UpdateNodeList' was successful.

4.3 删除节点3ORACLE_HOME, oracle用户执行Deinstall命令

[oracle@rac3 bin]$ $ORACLE_HOME/deinstall/deinstall -local

Checking for required files and bootstrapping ...

Please wait ...

Location of logs /u01/app/oraInventory/logs/

############ ORACLE DEINSTALL & DECONFIG TOOL START ############

######################### CHECK OPERATION START #########################

## [START] Install check configuration ##

Checking for existence of the Oracle home location /u01/app/oracle/product/11.2.0/db_1

Oracle Home type selected for deinstall is: Oracle Real Application Cluster Database

Oracle Base selected for deinstall is: /u01/app/oracle

Checking for existence of central inventory location /u01/app/oraInventory

Checking for existence of the Oracle Grid Infrastructure home /u01/app/11.2.0/grid

The following nodes are part of this cluster: rac3

Checking for sufficient temp space availability on node(s) : 'rac3'

## [END] Install check configuration ##

Network Configuration check config START

Network de-configuration trace file location: /u01/app/oraInventory/logs/netdc_check2016-06-13_01-13-53-PM.log

Network Configuration check config END

Database Check Configuration START

Database de-configuration trace file location: /u01/app/oraInventory/logs/databasedc_check2016-06-13_01-13-56-PM.log

Database Check Configuration END

Enterprise Manager Configuration Assistant START

EMCA de-configuration trace file location: /u01/app/oraInventory/logs/emcadc_check2016-06-13_01-13-59-PM.log

Enterprise Manager Configuration Assistant END

Oracle Configuration Manager check START

OCM check log file location : /u01/app/oraInventory/logs//ocm_check3898.log

Oracle Configuration Manager check END

######################### CHECK OPERATION END #########################

####################### CHECK OPERATION SUMMARY #######################

Oracle Grid Infrastructure Home is: /u01/app/11.2.0/grid

The cluster node(s) on which the Oracle home deinstallation will be performed are:rac3

Since -local option has been specified, the Oracle home will be deinstalled only on the local node, 'rac3', and the global configuration will be removed.

Oracle Home selected for deinstall is: /u01/app/oracle/product/11.2.0/db_1

Inventory Location where the Oracle home registered is: /u01/app/oraInventory

The option -local will not modify any database configuration for this Oracle home.

No Enterprise Manager configuration to be updated for any database(s)

No Enterprise Manager ASM targets to update

No Enterprise Manager listener targets to migrate

Checking the config status for CCR

Oracle Home exists with CCR directory, but CCR is not configured

CCR check is finished

Do you want to continue (y - yes, n - no)? [n]: y

A log of this session will be written to: '/u01/app/oraInventory/logs/deinstall_deconfig2016-06-13_01-13-46-PM.out'

Any error messages from this session will be written to: '/u01/app/oraInventory/logs/deinstall_deconfig2016-06-13_01-13-46-PM.err'

######################## CLEAN OPERATION START ########################

Enterprise Manager Configuration Assistant START

EMCA de-configuration trace file location: /u01/app/oraInventory/logs/emcadc_clean2016-06-13_01-13-59-PM.log

Updating Enterprise Manager ASM targets (if any)

Updating Enterprise Manager listener targets (if any)

Enterprise Manager Configuration Assistant END

Database de-configuration trace file location: /u01/app/oraInventory/logs/databasedc_clean2016-06-13_01-15-56-PM.log

Network Configuration clean config START

Network de-configuration trace file location: /u01/app/oraInventory/logs/netdc_clean2016-06-13_01-15-56-PM.log

De-configuring Local Net Service Names configuration file...

Local Net Service Names configuration file de-configured successfully.

De-configuring backup files...

Backup files de-configured successfully.

The network configuration has been cleaned up successfully.

Network Configuration clean config END

Oracle Configuration Manager clean START

OCM clean log file location : /u01/app/oraInventory/logs//ocm_clean3898.log

Oracle Configuration Manager clean END

Setting the force flag to false

Setting the force flag to cleanup the Oracle Base

Oracle Universal Installer clean START

Detach Oracle home '/u01/app/oracle/product/11.2.0/db_1' from the central inventory on the local node : Done

Delete directory '/u01/app/oracle/product/11.2.0/db_1' on the local node : Done

Failed to delete the directory '/u01/app/oracle'. The directory is in use.

Delete directory '/u01/app/oracle' on the local node : Failed <<<<

Oracle Universal Installer cleanup completed with errors.

Oracle Universal Installer clean END

## [START] Oracle install clean ##

Clean install operation removing temporary directory '/tmp/deinstall2016-06-13_01-13-12PM' on node 'rac3'

## [END] Oracle install clean ##

######################### CLEAN OPERATION END #########################

####################### CLEAN OPERATION SUMMARY #######################

Cleaning the config for CCR

As CCR is not configured, so skipping the cleaning of CCR configuration

CCR clean is finished

Successfully detached Oracle home '/u01/app/oracle/product/11.2.0/db_1' from the central inventory on the local node.

Successfully deleted directory '/u01/app/oracle/product/11.2.0/db_1' on the local node.

Failed to delete directory '/u01/app/oracle' on the local node.

Oracle Universal Installer cleanup completed with errors.

Oracle deinstall tool successfully cleaned up temporary directories.

#######################################################################

############# ORACLE DEINSTALL & DECONFIG TOOL END #############

4.4 在节点Oracle用户更新inventory

[root@rac1 ~]# su - oracle

[oracle@rac1 ~]$ $ORACLE_HOME/oui/bin/runInstaller -updateNodeList ORACLE_HOME=/u01/app/oracle/product/11.2.0/db_1 "CLUSTER_NODES=rac1,rac2"

Starting Oracle Universal Installer...

Checking swap space: must be greater than 500 MB.   Actual 1868 MB    Passed

The inventory pointer is located at /etc/oraInst.loc

The inventory is located at /u01/app/oraInventory

'UpdateNodeList' was successful.

GRID层面删除节点(Clusterware

该小节的操作用grid用户或者root用户完成。

5.1 查看节点都是unpinned状态

[root@rac1 ~]# olsnodes -s -t

rac1   Active Unpinned

rac2   Active Unpinned

rac3   Active Unpinned

5.2 在节点3root用户运行deconfig

[root@rac3 ~]# /u01/app/11.2.0/grid/crs/install/rootcrs.pl -deconfig -deinstall -force

Using configuration parameter file: /u01/app/11.2.0/grid/crs/install/crsconfig_params

Network exists: 1/192.168.8.0/255.255.255.0/eth0, type static

VIP exists: /192.168.8.242/192.168.8.242/192.168.8.0/255.255.255.0/eth0, hosting node rac1

VIP exists: /192.168.8.244/192.168.8.244/192.168.8.0/255.255.255.0/eth0, hosting node rac2

VIP exists: /rac3-vip/192.168.8.247/192.168.8.0/255.255.255.0/eth0, hosting node rac3

GSD exists

ONS exists: Local port 6100, remote port 6200, EM port 2016

CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'rac3'

CRS-2673: Attempting to stop 'ora.crsd' on 'rac3'

CRS-2790: Starting shutdown of Cluster Ready Services-managed resources on 'rac3'

CRS-2673: Attempting to stop 'ora.DATADG.dg' on 'rac3'

CRS-2673: Attempting to stop 'ora.SYSTEMDG.dg' on 'rac3'

CRS-2677: Stop of 'ora.DATADG.dg' on 'rac3' succeeded

CRS-2677: Stop of 'ora.SYSTEMDG.dg' on 'rac3' succeeded

CRS-2673: Attempting to stop 'ora.asm' on 'rac3'

CRS-2677: Stop of 'ora.asm' on 'rac3' succeeded

CRS-2792: Shutdown of Cluster Ready Services-managed resources on 'rac3' has completed

CRS-2677: Stop of 'ora.crsd' on 'rac3' succeeded

CRS-2673: Attempting to stop 'ora.mdnsd' on 'rac3'

CRS-2673: Attempting to stop 'ora.ctssd' on 'rac3'

CRS-2673: Attempting to stop 'ora.evmd' on 'rac3'

CRS-2673: Attempting to stop 'ora.asm' on 'rac3'

CRS-2677: Stop of 'ora.evmd' on 'rac3' succeeded

CRS-2677: Stop of 'ora.mdnsd' on 'rac3' succeeded

CRS-2677: Stop of 'ora.ctssd' on 'rac3' succeeded

CRS-2677: Stop of 'ora.asm' on 'rac3' succeeded

CRS-2673: Attempting to stop 'ora.cluster_interconnect.haip' on 'rac3'

CRS-2677: Stop of 'ora.cluster_interconnect.haip' on 'rac3' succeeded

CRS-2673: Attempting to stop 'ora.cssd' on 'rac3'

CRS-2677: Stop of 'ora.cssd' on 'rac3' succeeded

CRS-2673: Attempting to stop 'ora.crf' on 'rac3'

CRS-2677: Stop of 'ora.crf' on 'rac3' succeeded

CRS-2673: Attempting to stop 'ora.gipcd' on 'rac3'

CRS-2677: Stop of 'ora.gipcd' on 'rac3' succeeded

CRS-2673: Attempting to stop 'ora.gpnpd' on 'rac3'

CRS-2677: Stop of 'ora.gpnpd' on 'rac3' succeeded

CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'rac3' has completed

CRS-4133: Oracle High Availability Services has been stopped.

Removing Trace File Analyzer

Successfully deconfigured Oracle clusterware stack on this node

--验证:

[root@rac1 ~]# olsnodes -s

rac1   Active

rac2   Active

rac3   Inactive

5.3 在节点1运行,删除节点

root用户执行:

[root@rac1 ~]# crsctl delete node -n rac3

CRS-4661: Node rac3 successfully deleted.

5.4 在节点3运行,更新inventory

[root@rac3 ~]# su - grid

[grid@rac3 ~]$ $ORACLE_HOME/oui/bin/runInstaller -updateNodeList ORACLE_HOME=/u01/app/11.2.0/grid "CLUSTER_NODES=rac3" -silent -local

Starting Oracle Universal Installer...

Checking swap space: must be greater than 500 MB.   Actual 2047 MB    Passed

The inventory pointer is located at /etc/oraInst.loc

The inventory is located at /u01/app/oraInventory

'UpdateNodeList' was successful.

5.5 删除GIRD HOME,在节点3运行Deinstall

grid用户执行:

[grid@rac3 ~]$ $ORACLE_HOME/deinstall/deinstall -local

Checking for required files and bootstrapping ...

Please wait ...

Location of logs /tmp/deinstall2016-06-13_01-38-44PM/logs/

############ ORACLE DEINSTALL & DECONFIG TOOL START ############

######################### CHECK OPERATION START #########################

## [START] Install check configuration ##

Checking for existence of the Oracle home location /u01/app/11.2.0/grid

Oracle Home type selected for deinstall is: Oracle Grid Infrastructure for a Cluster

Oracle Base selected for deinstall is: /u01/app/grid

Checking for existence of central inventory location /u01/app/oraInventory

Checking for existence of the Oracle Grid Infrastructure home

The following nodes are part of this cluster: rac3

Checking for sufficient temp space availability on node(s) : 'rac3'

## [END] Install check configuration ##

Traces log file: /tmp/deinstall2016-06-13_01-38-44PM/logs//crsdc.log

Enter an address or the name of the virtual IP used on node "rac3"[rac3-vip]

 >

The following information can be collected by running "/sbin/ifconfig -a" on node "rac3"

Enter the IP netmask of Virtual IP "192.168.8.247" on node "rac3"[255.255.255.0]

 >

Enter the network interface name on which the virtual IP address "192.168.8.247" is active

 >

Enter an address or the name of the virtual IP[]

 >

Network Configuration check config START

Network de-configuration trace file location: /tmp/deinstall2016-06-13_01-38-44PM/logs/netdc_check2016-06-13_01-39-35-PM.log

Specify all RAC listeners (do not include SCAN listener) that are to be de-configured [LISTENER]:

Network Configuration check config END

Asm Check Configuration START

ASM de-configuration trace file location: /tmp/deinstall2016-06-13_01-38-44PM/logs/asmcadc_check2016-06-13_01-39-38-PM.log

######################### CHECK OPERATION END #########################

####################### CHECK OPERATION SUMMARY #######################

Oracle Grid Infrastructure Home is:

The cluster node(s) on which the Oracle home deinstallation will be performed are:rac3

Since -local option has been specified, the Oracle home will be deinstalled only on the local node, 'rac3', and the global configuration will be removed.

Oracle Home selected for deinstall is: /u01/app/11.2.0/grid

Inventory Location where the Oracle home registered is: /u01/app/oraInventory

Following RAC listener(s) will be de-configured: LISTENER

Option -local will not modify any ASM configuration.

Do you want to continue (y - yes, n - no)? [n]: y

A log of this session will be written to: '/tmp/deinstall2016-06-13_01-38-44PM/logs/deinstall_deconfig2016-06-13_01-39-02-PM.out'

Any error messages from this session will be written to: '/tmp/deinstall2016-06-13_01-38-44PM/logs/deinstall_deconfig2016-06-13_01-39-02-PM.err'

######################## CLEAN OPERATION START ########################

ASM de-configuration trace file location: /tmp/deinstall2016-06-13_01-38-44PM/logs/asmcadc_clean2016-06-13_01-39-41-PM.log

ASM Clean Configuration END

Network Configuration clean config START

Network de-configuration trace file location: /tmp/deinstall2016-06-13_01-38-44PM/logs/netdc_clean2016-06-13_01-39-41-PM.log

De-configuring RAC listener(s): LISTENER

De-configuring listener: LISTENER

    Stopping listener on node "rac3": LISTENER

    Warning: Failed to stop listener. Listener may not be running.

Listener de-configured successfully.

De-configuring Naming Methods configuration file...

Naming Methods configuration file de-configured successfully.

De-configuring backup files...

Backup files de-configured successfully.

The network configuration has been cleaned up successfully.

Network Configuration clean config END

---------------------------------------->

The deconfig command below can be executed in parallel on all the remote nodes. Execute the command on  the local node after the execution completes on all the remote nodes.

Run the following command as the root user or the administrator on node "rac3".

/tmp/deinstall2016-06-13_01-38-44PM/perl/bin/perl -I/tmp/deinstall2016-06-13_01-38-44PM/perl/lib -I/tmp/deinstall2016-06-13_01-38-44PM/crs/install /tmp/deinstall2016-06-13_01-38-44PM/crs/install/rootcrs.pl -force  -deconfig -paramfile "/tmp/deinstall2016-06-13_01-38-44PM/response/deinstall_Ora11g_gridinfrahome1.rsp"

Press Enter after you finish running the above commands

<----------------------------------------

---------------------------------------->

遇到如上情况,重新开一个窗口执行

[root@rac3 ~]# /tmp/deinstall2016-06-13_01-38-44PM/perl/bin/perl -I/tmp/deinstall2016-06-13_01-38-44PM/perl/lib -I/tmp/deinstall2016-06-13_01-38-44PM/crs/install /tmp/deinstall2016-06-13_01-38-44PM/crs/install/rootcrs.pl -force  -deconfig -paramfile "/tmp/deinstall2016-06-13_01-38-44PM/response/deinstall_Ora11g_gridinfrahome1.rsp"

Using configuration parameter file: /tmp/deinstall2016-06-13_01-38-44PM/response/deinstall_Ora11g_gridinfrahome1.rsp

****Unable to retrieve Oracle Clusterware home.

Start Oracle Clusterware stack and try again.

CRS-4047: No Oracle Clusterware components configured.

CRS-4000: Command Stop failed, or completed with errors.

################################################################

# You must kill processes or reboot the system to properly #

# cleanup the processes started by Oracle clusterware          #

################################################################

Either /etc/oracle/olr.loc does not exist or is not readable

Make sure the file exists and it has read and execute access

Either /etc/oracle/olr.loc does not exist or is not readable

Make sure the file exists and it has read and execute access

Failure in execution (rc=-1, 256, No such file or directory) for command /etc/init.d/ohasd deinstall

error: package cvuqdisk is not installed

Successfully deconfigured Oracle clusterware stack on this node

执行完成后,重新回到刚才窗口按Enter,刚才的窗口会继续执行如下:

The deconfig command below can be executed in parallel on all the remote nodes. Execute the command on  the local node after the execution completes on all the remote nodes.

Press Enter after you finish running the above commands

<----------------------------------------

Remove the directory: /tmp/deinstall2016-06-13_01-38-44PM on node:

Setting the force flag to false

Setting the force flag to cleanup the Oracle Base

Oracle Universal Installer clean START

Detach Oracle home '/u01/app/11.2.0/grid' from the central inventory on the local node : Done

Delete directory '/u01/app/11.2.0/grid' on the local node : Done

Delete directory '/u01/app/oraInventory' on the local node : Done

Delete directory '/u01/app/grid' on the local node : Done

Oracle Universal Installer cleanup was successful.

Oracle Universal Installer clean END

## [START] Oracle install clean ##

Clean install operation removing temporary directory '/tmp/deinstall2016-06-13_01-38-44PM' on node 'rac3'

## [END] Oracle install clean ##

######################### CLEAN OPERATION END #########################

####################### CLEAN OPERATION SUMMARY #######################

Following RAC listener(s) were de-configured successfully: LISTENER

Oracle Clusterware is stopped and successfully de-configured on node "rac3"

Oracle Clusterware is stopped and de-configured successfully.

Successfully detached Oracle home '/u01/app/11.2.0/grid' from the central inventory on the local node.

Successfully deleted directory '/u01/app/11.2.0/grid' on the local node.

Successfully deleted directory '/u01/app/oraInventory' on the local node.

Successfully deleted directory '/u01/app/grid' on the local node.

Oracle Universal Installer cleanup was successful.

Run 'rm -rf /etc/oraInst.loc' as root on node(s) 'rac3' at the end of the session.

Run 'rm -rf /opt/ORCLfmap' as root on node(s) 'rac3' at the end of the session.

Run 'rm -rf /etc/oratab' as root on node(s) 'rac3' at the end of the session.

Oracle deinstall tool successfully cleaned up temporary directories.

#######################################################################

############# ORACLE DEINSTALL & DECONFIG TOOL END #############

5.6 在保留节点运行,更新inventory

在节点1,用grid用户执行。

[grid@rac1 ~]$ $ORACLE_HOME/oui/bin/runInstaller -updateNodeList ORACLE_HOME=/u01/app/11.2.0/grid "CLUSTER_NODES=rac1,rac2" -silent

Starting Oracle Universal Installer...

Checking swap space: must be greater than 500 MB.   Actual 1848 MB    Passed

The inventory pointer is located at /etc/oraInst.loc

The inventory is located at /u01/app/oraInventory

'UpdateNodeList' was successful.

5.7 CVU检查节点删除是否成功

在节点1上用grid用户执行:

[grid@rac1 ~]$ cluvfy stage -post nodedel -n rac3 -verbose

Performing post-checks for node removal

Checking CRS integrity...

Clusterware version consistency passed

The Oracle Clusterware is healthy on node "rac2"

The Oracle Clusterware is healthy on node "rac1"

CRS integrity check passed

Result:

Node removal check passed

Post-check for node removal was successful.

.验证

6.1 效验

[grid@rac1 ~]$ olsnodes -s

rac1   Active

rac2   Active

[grid@rac1 ~]$ olsnodes -n

rac1   1

rac2   2

[grid@rac1 ~]$ crsctl stat res -t

--------------------------------------------------------------------------------

NAME           TARGET  STATE        SERVER                   STATE_DETAILS      

--------------------------------------------------------------------------------

Local Resources

--------------------------------------------------------------------------------

ora.DATADG.dg

               ONLINE  ONLINE       rac1                                        

               ONLINE  ONLINE       rac2                                         

ora.LISTENER.lsnr

               ONLINE  ONLINE       rac1                                        

               ONLINE  ONLINE       rac2                                        

ora.SYSTEMDG.dg

               ONLINE  ONLINE       rac1                                        

               ONLINE  ONLINE       rac2                                        

ora.asm

               ONLINE  ONLINE       rac1                     Started            

               ONLINE  ONLINE       rac2                     Started            

ora.gsd

               OFFLINE OFFLINE      rac1                                        

               OFFLINE OFFLINE      rac2                                        

ora.net1.network

               ONLINE  ONLINE       rac1                                        

               ONLINE  ONLINE       rac2                                        

ora.ons

               ONLINE  ONLINE       rac1                                        

               ONLINE  ONLINE       rac2                                        

--------------------------------------------------------------------------------

Cluster Resources

--------------------------------------------------------------------------------

ora.LISTENER_SCAN1.lsnr

      1        ONLINE  ONLINE       rac2                                        

ora.cvu

      1        ONLINE  ONLINE       rac2                                        

ora.oc4j

      1        ONLINE  ONLINE       rac2                                         

ora.orcl.db

      1        ONLINE  ONLINE       rac1                     Open               

      2        ONLINE  ONLINE       rac2                     Open               

ora.orcl.orcl_taf.svc

      1        ONLINE  ONLINE       rac1                                        

      3        ONLINE  ONLINE       rac2                                        

ora.rac1.vip

      1        ONLINE  ONLINE       rac1                                        

ora.rac2.vip

      1        ONLINE  ONLINE       rac2                                        

ora.scan1.vip

      1        ONLINE  ONLINE       rac2       

     

[root@rac1 ~]# ./crs_stat.sh

Name                             Target     State      Host     

------------------------       ---------- ---------  -------  

ora.DATADG.dg                  ONLINE     ONLINE     rac1     

ora.LISTENER.lsnr              ONLINE     ONLINE     rac1     

ora.LISTENER_SCAN1.lsnr        ONLINE     ONLINE     rac2     

ora.SYSTEMDG.dg                ONLINE     ONLINE     rac1     

ora.asm                        ONLINE     ONLINE     rac1     

ora.cvu                        ONLINE     ONLINE     rac2     

ora.gsd                        OFFLINE    OFFLINE             

ora.net1.network               ONLINE     ONLINE     rac1     

ora.oc4j                       ONLINE     ONLINE     rac2     

ora.ons                        ONLINE     ONLINE     rac1     

ora.orcl.db                    ONLINE     ONLINE     rac1     

ora.orcl.orcl_taf.svc          ONLINE     ONLINE     rac1     

ora.rac1.ASM1.asm              ONLINE     ONLINE     rac1     

ora.rac1.LISTENER_RAC1.lsnr    ONLINE     ONLINE     rac1     

ora.rac1.gsd                   OFFLINE    OFFLINE             

ora.rac1.ons                   ONLINE     ONLINE     rac1     

ora.rac1.vip                   ONLINE     ONLINE     rac1     

ora.rac2.ASM2.asm              ONLINE     ONLINE     rac2     

ora.rac2.LISTENER_RAC2.lsnr    ONLINE     ONLINE     rac2     

ora.rac2.gsd                   OFFLINE    OFFLINE             

ora.rac2.ons                   ONLINE     ONLINE     rac2     

ora.rac2.vip                   ONLINE     ONLINE     rac2     

ora.scan1.vip                  ONLINE     ONLINE     rac2  

6.2 清除残留文件

在节点3上可能还有一些目录存在,可以使用如下命令,进行清除:

清除家目录:

rm -rf /u01/app/grid_home

rm -rf /home/oracle

清除相关文件:

rm -rf /tmp/.oracle

rm -rf /var/tmp/.oracle

rm -rf /etc/init/oracle-ohasd.conf

rm -rf /etc/init.d/ohasd

rm -rf /etc/init.d/init.ohasd

rm -rf /etc/oraInst.loc

rm -rf /etc/oratab

rm -rf /etc/oracle

. 11gR2 添加删除节点小结

11gR2 RAC 添加节点分3个阶段:

1)第一阶段主要工作是复制GIRD HOME到新节点,配置GRID,并且启动GRID,同时更新OCR信息,更新inventory信息。

2)第二阶段主要工作是复制RDBMS HOME到新节点,更新inventory信息。

3)第三阶段主要工作是DBCA创建新的数据库实例(包括创建undo 表空间,redo log,初始化参数等),更新OCR信息(包括注册新的数据库实例等)。

11gR2 的卸载步骤正好和上面的步骤相反。 步骤还是三个步骤。

在添加/删除节点的过程中,原有的节点一直是online状态,不需要停机,对客户端业务没有影响。新节点的ORACLE_BASEORACLE_HOME 路径在添加过程中会自动创建,无需手动创建。

注意事项:

1)在添加/删除节点前,建议手工备份一下OCR,在某些情况下添加/删除节点失败,可以通过恢复原来的OCR来解决问题。

2)在正常安装11.2 GRID时,OUI界面提供SSH 配置功能,但是添加节点脚本addNode.sh没有这个功能,因此需要手动配置oracle用户和grid用户的SSH 用户等效性。








小麦苗课程

小麦苗课堂开课啦,如下是现有的课程,欢迎咨询小麦苗:


课程名称

课时

上课时间

价格

OCP(从入门到专家)

每年1期,35课时左右/

2000-2200

1588(可优惠)

OCM认证

每年N期,9课时/

2000-2200

22888

高可用课程(rac+dg+ogg

每年1期,20课时左右/

2000-2200

1888(可优惠)

Oracle初级入门

每年1期,15课时左右/

2000-2200

800

Oracle健康检查脚本

可微信或微店购买。

88

Oracle数据库技能直通车

包含如下3个课程:

①《11g OCP网络课程培训》(面向零基础) 价值1600

②《11g OCM网络班课程培训》(Oracle技能合集)价值10000+

③《RAC + DG + OGG 高可用网络班课程》 价值2000

以上3个课程全部打包只要5888,只要5888所有课程带回家,终身指导!所有课程都是在线讲课,不是播放视频,课件全部赠送!

注意:以上OCPOCM课程只包括培训课程,不包括考试费用。OCM提供培训+视频,但是不提供练习环境和资料。报名一次,OCP和高可用的课程可以免费终身循环听课。

5888

OCP+高可用(rac+dg+ogg

报名OCP+高可用课程,可以优惠300元,优惠后的价格为3188.

3188(可优惠)

注意:

1、每次上课前30分钟答疑。

2、OCM实时答疑,提供和考试一样的练习模拟环境,只要按照老师讲的方式来练习,可以保证100%通过。

3、授课方式:YY语音网络直播讲课(非视频) + QQ互动答疑 + 视频复习

4、OCP课时可以根据大家学习情况进行增加或缩减。

5、以上所有课程均可循环听课。

6、12c OCM课程私聊。

7、Oracle初级入门课程,只教大家最实用+最常用的Oracle操作维护知识。

8、以上所有课程,可以加小麦苗微信(lhrbestxh)或QQ(646634621)详聊,优惠多多。
 


培训项目

连接地址

DB笔试面试历史连接

http://mp.weixin.qq.com/s/Vm5PqNcDcITkOr9cQg6T7w

OCP培训说明连接

https://mp.weixin.qq.com/s/2cymJ4xiBPtTaHu16HkiuA

OCM培训说明连接

https://mp.weixin.qq.com/s/7-R6Cz8RcJKduVv6YlAxJA

高可用(RAC+DG+OGG)培训说明连接

https://mp.weixin.qq.com/s/4vf042CnOdAD8zDyjUueiw

OCP最新题库解析历史连接(052)

http://mp.weixin.qq.com/s/bUgn4-uciSndji_pUbLZfA

微店地址

https://weidian.com/s/793741433?wfr=c&ifr=shopdetail





About Me

.............................................................................................................................................

● 本文作者:小麦苗,部分内容整理自网络,若有侵权请联系小麦苗删除

● 本文在itpub(http://blog.itpub.net/26736162/abstract/1/)、博客园(http://www.cnblogs.com/lhrbest)和个人微信公众号(xiaomaimiaolhr)上有同步更新

● 本文itpub地址:http://blog.itpub.net/26736162/abstract/1/

● 本文博客园地址:http://www.cnblogs.com/lhrbest

● 本文pdf版、个人简介及小麦苗云盘地址:http://blog.itpub.net/26736162/viewspace-1624453/

● 数据库笔试面试题库及解答:http://blog.itpub.net/26736162/viewspace-2134706/

● DBA宝典今日头条号地址:http://www.toutiao.com/c/user/6401772890/#mid=1564638659405826

.............................................................................................................................................

● QQ群号:230161599(满)、618766405

● 微信群:可加我微信,我拉大家进群,非诚勿扰

● 联系我请加QQ好友646634621,注明添加缘由

● 于 2018-06-01 06:00 ~ 2018-06-31 24:00 在魔都完成

● 最新修改时间:2018-06-01 06:00 ~ 2018-06-31 24:00

● 文章内容来源于小麦苗的学习笔记,部分整理自网络,若有侵权或不当之处还请谅解

● 版权所有,欢迎分享本文,转载请保留出处

.............................................................................................................................................

小麦苗的微店https://weidian.com/s/793741433?wfr=c&ifr=shopdetail

小麦苗出版的数据库类丛书http://blog.itpub.net/26736162/viewspace-2142121/

小麦苗OCP、OCM、高可用网络班http://blog.itpub.net/26736162/viewspace-2148098/

.............................................................................................................................................

使用微信客户端扫描下面的二维码来关注小麦苗的微信公众号(xiaomaimiaolhr)及QQ群(DBA宝典)、添加小麦苗微信,学习最实用的数据库技术。

小麦苗的微信公众号小麦苗的DBA宝典QQ群2小麦苗的微信二维码小麦苗的微店

   小麦苗的微信公众号      小麦苗的DBA宝典QQ群2       小麦苗的微信二维码          小麦苗的微店

.............................................................................................................................................


欢迎与我联系





请登录后发表评论 登录
全部评论
QQ:646634621| 网名:小麦苗| 微信公众号:xiaomaimiaolhr| 11g OCM| QQ群:618766405 微信群:私聊| 《数据库笔试面试宝典》作者| OCP、OCM、高可用(RAC+DG+OGG)网络班开讲啦,有需要的小伙伴可以私聊我。

注册时间:2012-09-23

  • 博文量
    1062
  • 访问量
    6926715