root@dbf0t00lpaora:$ su - grid
grid@dbf0t00lpaora:$ . oraenv
ORACLE_SID = [grid] ? +ASM1
The Oracle base has been set to /u01/app/grid
grid@dbf0t00lpaora:$ olsnodes
dbf0t00lpaora
dbf0t01lpbora
grid@dbf0t00lpaora:$ crsctl get cluster mode status
Cluster is running in "standard" mode
grid@dbf0t00lpaora:$ srvctl config gns
PRKF-1110 : Neither GNS server nor GNS client is configured on this cluster
grid@dbf0t00lpaora:$ oifcfg getif
bond0 172.27.110.0 global public
bond1 172.27.117.0 global cluster_interconnect
grid@dbf0t00lpaora:$ crsctl get node role config
Node 'dbf0t00lpaora' configured role is 'hub'
grid@dbf0t00lpaora:$ asmcmd showclustermode
ASM cluster : Flex mode disabled
grid@dbf0t00lpaora:$ asmcmd showclusterstate
Normal
grid@dbf0t00lpaora:$ srvctl status asm -detail
ASM is running on dbf0t00lpaora,dbf0t01lpbora
ASM is enabled.
grid@dbf0t00lpaora:$ crsctl get node role config -all
Node 'dbf0t00lpaora' configured role is 'hub'
Node 'dbf0t01lpbora' configured role is 'hub'
grid@dbf0t00lpaora:$ crsctl get node role status -all
Node 'dbf0t00lpaora' active role is 'hub'
Node 'dbf0t01lpbora' active role is 'hub
grid@dbf0t00lpaora:$ crsctl status res -t
--------------------------------------------------------------------------------
Name Target State Server State details
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.DATA.dg
ONLINE ONLINE dbf0t00lpaora STABLE
ONLINE ONLINE dbf0t01lpbora STABLE
ora.FRA.dg
ONLINE ONLINE dbf0t00lpaora STABLE
ONLINE ONLINE dbf0t01lpbora STABLE
ora.LISTENER.lsnr
ONLINE ONLINE dbf0t00lpaora STABLE
ONLINE ONLINE dbf0t01lpbora STABLE
ora.OCR_VD.dg
ONLINE ONLINE dbf0t00lpaora STABLE
ONLINE ONLINE dbf0t01lpbora STABLE
ora.asm
ONLINE ONLINE dbf0t00lpaora Started,STABLE
ONLINE ONLINE dbf0t01lpbora Started,STABLE
ora.net1.network
ONLINE ONLINE dbf0t00lpaora STABLE
ONLINE ONLINE dbf0t01lpbora STABLE
ora.ons
ONLINE ONLINE dbf0t00lpaora STABLE
ONLINE ONLINE dbf0t01lpbora STABLE
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.LISTENER_SCAN1.lsnr
1 ONLINE ONLINE dbf0t01lpbora STABLE
ora.LISTENER_SCAN2.lsnr
1 ONLINE ONLINE dbf0t00lpaora STABLE
ora.LISTENER_SCAN3.lsnr
1 ONLINE ONLINE dbf0t00lpaora STABLE
ora.MGMTLSNR
1 ONLINE ONLINE dbf0t00lpaora 169.254.28.162 172.2
7.117.125,STABLE
ora.cvu
1 ONLINE ONLINE dbf0t00lpaora STABLE
ora.dbf0t00lpaora.vip
1 ONLINE ONLINE dbf0t00lpaora STABLE
ora.dbf0t01lpbora.vip
1 ONLINE ONLINE dbf0t01lpbora STABLE
ora.m.db
1 ONLINE ONLINE dbf0t00lpaora Open,STABLE
2 ONLINE ONLINE dbf0t01lpbora Open,STABLE
ora.mgmtdb
1 ONLINE ONLINE dbf0t00lpaora Open,STABLE
ora.oc4j
1 ONLINE ONLINE dbf0t00lpaora STABLE
ora.scan1.vip
1 ONLINE ONLINE dbf0t01lpbora STABLE
ora.scan2.vip
1 ONLINE ONLINE dbf0t00lpaora STABLE
ora.scan3.vip
1 ONLINE ONLINE dbf0t00lpaora STABLE
ora.test.db
1 ONLINE ONLINE dbf0t00lpaora Open,STABLE
2 ONLINE ONLINE dbf0t01lpbora Open,STABLE
root@dbf0t00lpaora:$ /u01/app/12.1.0/grid/bin/olsnodes -s
dbf0t00lpaora Active
dbf0t01lpbora Active
root@dbf0t00lpaora:$ /u01/app/12.1.0/grid/bin/crsctl query css votedisk
## STATE File Universal Id File Name Disk group
-- ----- ----------------- --------- ---------
1. ONLINE 10e7c885235c4f64bf28ba4117dbed61 (ORCL:OCR_VD) [OCR_VD]
Located 1 voting disk(s).
root@dbf0t00lpaora:$ /u01/app/12.1.0/grid/bin/ocrcheck
Status of Oracle Cluster Registry is as follows :
Version : 4
Total space (kbytes) : 409568
Used space (kbytes) : 1608
Available space (kbytes) : 407960
ID : 793966683
Device/File Name : +OCR_VD
Device/File integrity check succeeded
Device/File not configured
Device/File not configured
Device/File not configured
Device/File not configured
Cluster registry integrity check succeeded
Logical corruption check succeeded
root@dbf0t00lpaora:$ /u01/app/12.1.0/grid/bin/srvctl status database -d TEST
Instance TEST1 is running on node dbf0t00lpaora
Instance TEST2 is running on node dbf0t01lpbora
root@dbf0t00lpaora:$ /u01/app/12.1.0/grid/bin/srvctl config service -d TEST
root@dbf0t00lpaora:$ /u01/app/12.1.0/grid/bin/srvctl status service -d TEST
2. A Removing an oracle database instance:
root@dbf0t00lpaora:$ su - oracle
oracle@dbf0t00lpaora:$ /u02/app/oracle/product/12.1.0/dbhome_1/bin/dbca -silent -deleteInstance -nodeList dbf0t01lpbora -gdbName TEST -instanceName TEST2 -sysDBAUserName sys -sysDBAPassword abc123
Deleting instance
1% complete
2% complete
6% complete
13% complete
20% complete
26% complete
33% complete
40% complete
46% complete
53% complete
60% complete
66% complete
Completing instance management.
100% complete
Look at the log file "/u02/app/oracle/cfgtoollogs/dbca/TEST.log" for further details.
oracle@dbf0t00lpaora:$ srvctl status database -d TEST
Instance TEST1 is running on node dbf0t00lpaora
oracle@dbf0t00lpaora:$ srvctl config database -d TEST -v
Database unique name: TEST
Database name: TEST
Oracle home: /u02/app/oracle/product/12.1.0/dbhome_1
Oracle user: oracle
Spfile: +DATA/TEST/PARAMETERFILE/spfile.269.900859867
Password file: +DATA/TEST/PASSWORD/pwdtest.256.900859377
Domain:
Start options: open
Stop options: immediate
Database role: PRIMARY
Management policy: AUTOMATIC
Server pools:
Disk Groups: DATA
Mount point paths:
Services:
Type: RAC
Start concurrency:
Stop concurrency:
OSDBA group: dba
OSOPER group: dba
Database instances: TEST1
Configured nodes: dbf0t00lpaora
Database is administrator managed
oracle@dbf0t00lpaora:$ sqlplus '/as sysdba'
SQL*Plus: Release 12.1.0.2.0 Production on Thu Jan 14 10:35:23 2016
Copyright (c) 1982, 2014, Oracle. All rights reserved.
Connected to:
Oracle Database 12c Enterprise Edition Release 12.1.0.2.0 - 64bit Production
With the Partitioning, Real Application Clusters, Automatic Storage Management, OLAP,
Advanced Analytics and Real Application Testing options
SQL> select inst_id, instance_name, status, to_char(startup_time,'DD-MON-YYYY HH24:MI:SS') as "START_TIME" from gv$instance order by inst_id;
INST_ID INSTANCE_NAME STATUS START_TIME
---------- ---------------- ------------ --------------------
1 TEST1 OPEN 14-JAN-2016 10:05:35
Check if the redo log thread and UNDO tablespace for the deleted instance is removed (which for my example, they were successfully removed). If not, manually remove them.
SQL> select thread# from v$thread where instance='TEST';
no rows selected
SQL> select thread# from v$thread where upper(instance) = upper('TEST');
no rows selected
SQL> select group# from v$log where thread# =2;
no rows selected
SQL> select member from v$logfile ;
MEMBER
--------------------------------------------------------------------------------
+DATA/TEST/ONLINELOG/group_2.263.900859605
+DATA/TEST/ONLINELOG/group_1.262.900859603
SQL> exit
Disconnected from Oracle Database 12c Enterprise Edition Release 12.1.0.2.0 - 64bit Production
With the Partitioning, Real Application Clusters, Automatic Storage Management, OLAP,
Advanced Analytics and Real Application Testing options
oracle@dbf0t00lpaora:$ srvctl config listener -a
Name: LISTENER
Type: Database Listener
Network: 1, Owner: grid
Home: <CRS home>
/u01/app/12.1.0/grid on node(s) dbf0t01lpbora,dbf0t00lpaora
End points: TCP:1521
Listener is enabled.
Listener is individually enabled on nodes:
Listener is individually disabled on nodes:
oracle@dbf0t00lpaora:$ ssh dbf0t01lpbora
dbf0t01lpbora:~ # exit
logout
Connection to dbf0t01lpbora closed.
If you find any redo log and undo references of the deleted instance in the cluster, use the following commands to remove those references.
alter database disable thread 2;
alter database drop logfile group 3;
alter database drop logfile group 4;
drop tablespace undotbs2 including contents and datafiles;
alter system reset undo_tablespace scope=spfile sid = 'TEST2';
alter system reset instance_number scope=spfile sid = 'TEST2';
2. B Removing RDBMS software:
On the node which is to be deleted from the cluster, run the following command...
root@dbf0t01lpbora:$ su – oracle
oracle@dbf0t01lpbora:$
oracle@dbf0t01lpbora:$ echo $ORACLE_HOME
/u02/app/oracle/product/12.1.0/dbhome_1
oracle@dbf0t01lpbora:$ cd $ORACLE_HOME/oui/bin
oracle@dbf0t01lpbora:$ pwd
/u02/app/oracle/product/12.1.0/dbhome_1/oui/bin
oracle@dbf0t01lpbora:$ ./runInstaller -updateNodeList ORACLE_HOME=$ORACLE_HOME "CLUSTER_NODES={dbf0t01lpbora}" -local
Starting Oracle Universal Installer...
Checking swap space: must be greater than 500 MB. Actual 16079 MB Passed
The inventory pointer is located at /etc/oraInst.loc
'UpdateNodeList' was successful.
Now run the following command on node 2 , to deinstall oracle home from this node.
oracle@dbf0t01lpbora:$ cd $ORACLE_HOME/deinstall
oracle@dbf0t01lpbora:$ pwd
/u02/app/oracle/product/12.1.0/dbhome_1/deinstall
oracle@dbf0t01lpbora:$ ./deinstall –local
On any cluster node that remains in the cluster , run the following command ....
oracle@dbf0t00lpaora:$ cd $ORACLE_HOME/oui/bin
oracle@dbf0t00lpaora:$ pwd
/u02/app/oracle/product/12.1.0/dbhome_1/oui/bin
oracle@dbf0t00lpaora:$ ./runInstaller -updateNodeList ORACLE_HOME=$ORACLE_HOME "CLUSTER_NODES={dbf0t00lpaora}"
Starting Oracle Universal Installer...
Checking swap space: must be greater than 500 MB. Actual 16070 MB Passed
The inventory pointer is located at /etc/oraInst.loc
'UpdateNodeList' was successful.
Now verify the inventory and make sure that the database on node2 is completely removed.
oracle@dbf0t00lpaora:$ cd /u01/app/oraInventory/ContentsXML/
oracle@dbf0t00lpaora:$ ls
comps.xml inventory.xml libs.xml
oracle@dbf0t00lpaora:$ cat inventory.xml
<?xml version="1.0" standalone="yes" ?>
<!-- Copyright (c) 1999, 2014, Oracle and/or its affiliates.
All rights reserved. -->
<!-- Do not modify the contents of this file by hand. -->
<INVENTORY>
<VERSION_INFO>
<SAVED_WITH>12.1.0.2.0</SAVED_WITH>
<MINIMUM_VER>2.1.0.6.0</MINIMUM_VER>
</VERSION_INFO>
<HOME_LIST>
<HOME NAME="OraGI12Home1" LOC="/u01/app/12.1.0/grid" TYPE="O" IDX="4" CRS="true">
<NODE_LIST>
<NODE NAME="dbf0t00lpaora"/>
<NODE NAME="dbf0t01lpbora"/>
</NODE_LIST>
</HOME>
<HOME NAME="OraDB12Home1" LOC="/u02/app/oracle/product/12.1.0/dbhome_1" TYPE="O" IDX="5">
<NODE_LIST>
<NODE NAME="dbf0t00lpaora"/>
</NODE_LIST>
</HOME>
<HOME NAME="Ora11g_gridinfrahome2" LOC="/u01/app/11.2.0.4/grid" TYPE="O" IDX="2" REMOVED="T"/>
<HOME NAME="Ora11g_gridinfrahome1" LOC="/u01/app/11.2.0/grid" TYPE="O" IDX="1" REMOVED="T"/>
<HOME NAME="OraDb11g_home1" LOC="/u01/app/oracle/product/11.2.0.4/dbhome_1" TYPE="O" IDX="3" REMOVED="T"/>
</HOME_LIST>
<COMPOSITEHOME_LIST>
</COMPOSITEHOME_LIST>
</INVENTORY>
2. C Removing Node from Cluster:
Run the following command as root to determine whether the node you want to delete is active and whether it is pinned.
root@dbf0t01lpbora:$ export ORACLE_HOME=/u02/app/oracle/product/12.1.0/dbhome_1
root@dbf0t01lpbora:$ export GRID_HOME=/u01/app/12.1.0/grid
root@dbf0t01lpbora:$ $GRID_HOME/bin/olsnodes -s -t
dbf0t00lpaora Active Unpinned
dbf0t01lpbora Active Unpinned
Disable the Oracle Clusterware applications and daemons running on the node to be deleted from the cluster. Run the rootcrs.pl script as root from the Grid_home/crs/install directory on the node to be deleted.
root@dbf0t01lpbora:$ ./rootcrs.pl -deconfig -force
Using configuration parameter file: ./crsconfig_params
Network 1 exists
Subnet IPv4: 172.27.110.0/255.255.255.0/bond0, static
Subnet IPv6:
Ping Targets:
Network is enabled
Network is individually enabled on nodes:
Network is individually disabled on nodes:
VIP exists: network number 1, hosting node dbf0t00lpaora
VIP Name: dbf0t00lpaora-vip.ch.cadhlt.org
VIP IPv4 Address: 172.27.110.95
VIP IPv6 Address:
VIP is enabled.
VIP is individually enabled on nodes:
VIP is individually disabled on nodes:
VIP exists: network number 1, hosting node dbf0t01lpbora
VIP Name: dbf0t01lpbora-vip.ch.cadhlt.org
VIP IPv4 Address: 172.27.110.97
VIP IPv6 Address:
VIP is enabled.
VIP is individually enabled on nodes:
VIP is individually disabled on nodes:
ONS exists: Local port 6100, remote port 6200, EM port 2016, Uses SSL false
ONS is enabled
ONS is individually enabled on nodes:
ONS is individually disabled on nodes:
CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'dbf0t01lpbora'
CRS-2673: Attempting to stop 'ora.crsd' on 'dbf0t01lpbora'
CRS-2790: Starting shutdown of Cluster Ready Services-managed resources on 'dbf0t01lpbora'
CRS-2673: Attempting to stop 'ora.FRA.dg' on 'dbf0t01lpbora'
CRS-2673: Attempting to stop 'ora.OCR_VD.dg' on 'dbf0t01lpbora'
CRS-2677: Stop of 'ora.OCR_VD.dg' on 'dbf0t01lpbora' succeeded
CRS-2677: Stop of 'ora.FRA.dg' on 'dbf0t01lpbora' succeeded
CRS-2673: Attempting to stop 'ora.DATA.dg' on 'dbf0t01lpbora'
CRS-2677: Stop of 'ora.DATA.dg' on 'dbf0t01lpbora' succeeded
CRS-2673: Attempting to stop 'ora.asm' on 'dbf0t01lpbora'
CRS-2677: Stop of 'ora.asm' on 'dbf0t01lpbora' succeeded
CRS-2792: Shutdown of Cluster Ready Services-managed resources on 'dbf0t01lpbora' has completed
CRS-2677: Stop of 'ora.crsd' on 'dbf0t01lpbora' succeeded
CRS-2673: Attempting to stop 'ora.ctssd' on 'dbf0t01lpbora'
CRS-2673: Attempting to stop 'ora.evmd' on 'dbf0t01lpbora'
CRS-2673: Attempting to stop 'ora.storage' on 'dbf0t01lpbora'
CRS-2673: Attempting to stop 'ora.crf' on 'dbf0t01lpbora'
CRS-2673: Attempting to stop 'ora.mdnsd' on 'dbf0t01lpbora'
CRS-2673: Attempting to stop 'ora.gpnpd' on 'dbf0t01lpbora'
CRS-2673: Attempting to stop 'ora.drivers.acfs' on 'dbf0t01lpbora'
CRS-2677: Stop of 'ora.storage' on 'dbf0t01lpbora' succeeded
CRS-2673: Attempting to stop 'ora.asm' on 'dbf0t01lpbora'
CRS-2677: Stop of 'ora.drivers.acfs' on 'dbf0t01lpbora' succeeded
CRS-2677: Stop of 'ora.crf' on 'dbf0t01lpbora' succeeded
CRS-2677: Stop of 'ora.ctssd' on 'dbf0t01lpbora' succeeded
CRS-2677: Stop of 'ora.evmd' on 'dbf0t01lpbora' succeeded
CRS-2677: Stop of 'ora.mdnsd' on 'dbf0t01lpbora' succeeded
CRS-2677: Stop of 'ora.gpnpd' on 'dbf0t01lpbora' succeeded
CRS-2677: Stop of 'ora.asm' on 'dbf0t01lpbora' succeeded
CRS-2673: Attempting to stop 'ora.cluster_interconnect.haip' on 'dbf0t01lpbora'
CRS-2677: Stop of 'ora.cluster_interconnect.haip' on 'dbf0t01lpbora' succeeded
CRS-2673: Attempting to stop 'ora.cssd' on 'dbf0t01lpbora'
CRS-2677: Stop of 'ora.cssd' on 'dbf0t01lpbora' succeeded
CRS-2673: Attempting to stop 'ora.gipcd' on 'dbf0t01lpbora'
CRS-2677: Stop of 'ora.gipcd' on 'dbf0t01lpbora' succeeded
CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'dbf0t01lpbora' has completed
CRS-4133: Oracle High Availability Services has been stopped.
2016/01/14 11:07:08 CLSRSC-4006: Removing Oracle Trace File Analyzer (TFA) Collector.
2016/01/14 11:07:31 CLSRSC-4007: Successfully removed Oracle Trace File Analyzer (TFA) Collector.
From a node that is to remain a member of the Oracle RAC, run the following command from the Grid_home/bin directory as root to update the Clusterware configuration to delete the node from the cluster.
root@dbf0t00lpaora:$ $GRID_HOME/bin/crsctl delete node -n dbf0t01lpbora
CRS-4661: Node dbf0t01lpbora successfully deleted.
root@dbf0t00lpaora:$ $GRID_HOME/bin/olsnodes -s -t
dbf0t00lpaora Active Unpinned
As the Grid Infrastructure owner, execute runInstaller from Grid_home/oui/bin on the node being removed to update the inventory.
grid@dbf0t01lpbora:$ cd /u01/app/12.1.0/grid/oui/bin/
grid@dbf0t01lpbora:$ pwd
/u01/app/12.1.0/grid/oui/bin
root@dbf0t01lpbora:$ su - grid
dbf0t01lpbora:~ # PS1="\u@\h:$ "
grid@dbf0t01lpbora:$
grid@dbf0t01lpbora:$ ./runInstaller -updateNodeList ORACLE_HOME=/u01/app/12.1.0/grid "CLUSTER_NODES={dbf0t01lpbora}" CRS=TRUE -silent -local
Starting Oracle Universal Installer...
Checking swap space: must be greater than 500 MB. Actual 16079 MB Passed
The inventory pointer is located at /etc/oraInst.loc
'UpdateNodeList' was successful.
Run deinstall as the Grid Infrastructure software owner from the node to be removed in order to delete the Oracle Grid Infrastructure software.
Please pay extra care while responding to the prompts. When supplying the values to listener, give only local listener value and don't specify scan_listener for deletion.
grid@dbf0t01lpbora:$ cd /u01/app/12.1.0/grid/deinstall/
grid@dbf0t01lpbora:$ ./deinstall -local
Checking for required files and bootstrapping ...
Please wait ...
On Node1
grid@dbf0t00lpaora:$ cd /u01/app/12.1.0/grid/oui/bin
grid@dbf0t00lpaora:$ ./runInstaller -updateNodeList ORACLE_HOME=/u01/app/12.1.0/grid "CLUSTER_NODES={dbf0t00lpaora}" CRS=TRUE -silent
Starting Oracle Universal Installer...
Checking swap space: must be greater than 500 MB. Actual 16070 MB Passed
The inventory pointer is located at /etc/oraInst.loc
'UpdateNodeList' was successful.
2. D Verification:
grid@dbf0t00lpaora:$ cd /u01/app/oraInventory/ContentsXML/
grid@dbf0t00lpaora:$ ls
comps.xml inventory.xml libs.xml
grid@dbf0t00lpaora:$ cat inventory.xml
<?xml version="1.0" standalone="yes" ?>
<!-- Copyright (c) 1999, 2014, Oracle and/or its affiliates.
All rights reserved. -->
<!-- Do not modify the contents of this file by hand. -->
<INVENTORY>
<VERSION_INFO>
<SAVED_WITH>12.1.0.2.0</SAVED_WITH>
<MINIMUM_VER>2.1.0.6.0</MINIMUM_VER>
</VERSION_INFO>
<HOME_LIST>
<HOME NAME="OraGI12Home1" LOC="/u01/app/12.1.0/grid" TYPE="O" IDX="4" CRS="true">
<NODE_LIST>
<NODE NAME="dbf0t00lpaora"/>
</NODE_LIST>
</HOME>
<HOME NAME="OraDB12Home1" LOC="/u02/app/oracle/product/12.1.0/dbhome_1" TYPE="O" IDX="5">
<NODE_LIST>
<NODE NAME="dbf0t00lpaora"/>
</NODE_LIST>
</HOME>
<HOME NAME="Ora11g_gridinfrahome2" LOC="/u01/app/11.2.0.4/grid" TYPE="O" IDX="2" REMOVED="T"/>
<HOME NAME="Ora11g_gridinfrahome1" LOC="/u01/app/11.2.0/grid" TYPE="O" IDX="1" REMOVED="T"/>
<HOME NAME="OraDb11g_home1" LOC="/u01/app/oracle/product/11.2.0.4/dbhome_1" TYPE="O" IDX="3" REMOVED="T"/>
</HOME_LIST>
<COMPOSITEHOME_LIST>
</COMPOSITEHOME_LIST>
</INVENTORY>
grid@dbf0t00lpaora:$ /u01/app/12.1.0/grid/bin/cluvfy stage -post nodedel -n dbf0t01lpbora -verbose
Performing post-checks for node removal
Checking CRS integrity...
The Oracle Clusterware is healthy on node "dbf0t00lpaora"
CRS integrity check passed
Clusterware version consistency passed.
Result:
Node removal check passed
Post-check for node removal was successful.
grid@dbf0t00lpaora:$ olsnodes -s -t
dbf0t00lpaora Active Unpinned
grid@dbf0t00lpaora:$ crsctl status res -t
--------------------------------------------------------------------------------
Name Target State Server State details
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.DATA.dg
ONLINE ONLINE dbf0t00lpaora STABLE
ora.FRA.dg
ONLINE ONLINE dbf0t00lpaora STABLE
ora.LISTENER.lsnr
ONLINE ONLINE dbf0t00lpaora STABLE
ora.OCR_VD.dg
ONLINE ONLINE dbf0t00lpaora STABLE
ora.asm
ONLINE ONLINE dbf0t00lpaora Started,STABLE
ora.net1.network
ONLINE ONLINE dbf0t00lpaora STABLE
ora.ons
ONLINE ONLINE dbf0t00lpaora STABLE
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.LISTENER_SCAN1.lsnr
1 ONLINE ONLINE dbf0t00lpaora STABLE
ora.LISTENER_SCAN2.lsnr
1 ONLINE ONLINE dbf0t00lpaora STABLE
ora.LISTENER_SCAN3.lsnr
1 ONLINE ONLINE dbf0t00lpaora STABLE
ora.MGMTLSNR
1 ONLINE ONLINE dbf0t00lpaora 169.254.28.162 172.2
7.117.125,STABLE
ora.cvu
1 ONLINE ONLINE dbf0t00lpaora STABLE
ora.dbf0t00lpaora.vip
1 ONLINE ONLINE dbf0t00lpaora STABLE
ora.m.db
1 ONLINE ONLINE dbf0t00lpaora Open,STABLE
ora.mgmtdb
1 ONLINE ONLINE dbf0t00lpaora Open,STABLE
ora.oc4j
1 ONLINE ONLINE dbf0t00lpaora STABLE
ora.scan1.vip
1 ONLINE ONLINE dbf0t00lpaora STABLE
ora.scan2.vip
1 ONLINE ONLINE dbf0t00lpaora STABLE
ora.scan3.vip
1 ONLINE ONLINE dbf0t00lpaora STABLE
ora.test.db
1 ONLINE ONLINE dbf0t00lpaora Open,STABLE
grid@dbf0t00lpaora:$ crsctl status res -t | grep -i dbf0t01lpbora
2. E Removing remaining components:
Remove asmlib if you are using asmlib for ASM storage
Revmoce udev rules, if you are using udev rules for ASM storage
Remove oracle and grid users and also corresponding groups.
groups