Thursday, October 31, 2013

Oracle GOLDEN GATE

                                                        GOLDEN GATE



#################################################################
Commands
#################################################################

send EAUSFLD3 status
info EAUSFLD2 , showch
send EAUSFLD2 , showtrans
info EAUSFLD3 , detail

stats EAUSFLD3


--  Use the HISTORY command to display a list of previously executed commands.
--  Use the ! command to execute a previous command again without editing it.
--  Use the FC command to edit a previous command and then execute it again.


echo $GGBASE  -- $ORACLE_HOME/ggsci
connect /
dblogin userid gguser , password gguser


pugre old extract  -- in param extract file
list table *


info all
info mgr
info manager
info extract
info e01123 , showch  -- Show checkpoints
info extract e_cust
info extract e_cust, detail
info extract e_cust, showch
info extract *, tasks
info extract *, allprocesses
info extract ext1, detail

info e01123 , showch  -- Show checkpoints
info extract e_cust
info extract e_cust, detail
info extract e_cust, showch
info extract *, tasks
info extract *, allprocesses
info exttrail *
info trandata SCOTT.*


GGSCI (server) 6> shell pwd


KILL EXTRACT group name
LAG EXTRACT
lag extract ext*


view param mgr
edit param mgr

start mgr
start <group>
start *
stop er *
info er *
GGSCI> START EXTRACT <extract name>, DETAIL


add trandata amer.*
info trandata amer.*

-info all
–info mgr
–info <group>
–send <group>, status
–info <group>, showch# checkpoint info, with required arch log #

send <group>, status
send extract ext1 status
send extract ext2, getlag
send manager childstatus
SEND MANAGER GETPURGEOLDEXTRACTS


status manager
status extract ext1
stats extract ext2 reportrate hr

VIEW GGSEVT
view report ext1
view report rep1
view report mgr

SHOWTRANS 

–lag <group>
•Use LAG EXTRACT to determine a true lag time between Extract and the data source. Lag time is the difference in seconds between the time
A record was processed by Extract and the timestamp of that record in the data source.

•Use LAG REPLICAT to determine a true lag time between Replicat and the trail. Lag time is the difference, in seconds, between the time that the last record was processed by Replicatand the timestamp of the record in the trail.
–view params<group> (to view the parameter file)
–alter <group>, etrollover(to create/increment a new trail file)
–alter <group>, extseqno<#>, extrba<rba> (To designate a begin point in trail file)
–alter <group>, extseqno<#>, extrba1024 (To start from new archive or online logfiles. Oracle reserves 1024 bytes for file header info)
–alter <group>, begin now (Begins processing at the current time)
–alter <group>, begin 2003-12-25 (Begins processing to the specific datetime)
–refresh mgr(Enables to change the parameter without stopping and starting the Manager process. The exception is changing the port number)
–send manger, childstatus(Retrieves status information about processes started by Manager.)

–stats <group> (to display statistics for one or more groups.)




#################################################################
GG VIEWS
#################################################################



COLUMN LOG_GROUP_NAME HEADING 'Log Group' FORMAT A20
COLUMN TABLE_NAME HEADING 'Table' FORMAT A15
COLUMN ALWAYS HEADING 'Conditional or|Unconditional' FORMAT A14
COLUMN LOG_GROUP_TYPE HEADING 'Type of Log Group' FORMAT A20
SELECT
LOG_GROUP_NAME,
TABLE_NAME,
DECODE(ALWAYS,
'ALWAYS', 'Unconditional',
'CONDITIONAL', 'Conditional') ALWAYS,
LOG_GROUP_TYPE
FROM DBA_LOG_GROUPS;


select SUPPLEMENTAL_LOG_DATA_MIN from v$database;

                            
SELECT s.inst_id,
s.sid,s.serial#,t.start_time,
s.status
FROM GV$session s, GV$transaction t, GV$rollstat r
WHERE s.saddr=t.ses_addr
and t.xidusn=r.usn
and s.inst_id=t.inst_id
and t.inst_id=r.inst_id
order by t.start_time;



/*
 Conn / as sysdba
 Col used format a8
 Prompt current_rba will show where redo is being written to

 Select  le.leseq  log_sequence#, cp.cpodr_bno * le.lebsz current_rba,
                         le.lesiz * le.lebsz log_size,
             substr(to_char(100 * cp.cpodr_bno / le.lesiz, '999.00'), 2) || '%'  used
     from    sys.x$kcccp  cp,
             sys.x$kccle  le
     where   le.inst_id = userenv('Instance') and
             cp.inst_id = userenv('Instance') and
             le.leseq = cp.cpodr_seq and le.leseq > 0
 */


#################################################################
Troubleshoot
#################################################################

main issues  :
gap , uncommitted transaction , gg abended ,  rep errors --  unique key , no transaction ,
checkpoint table corrupted , missing archives ,

--> info all command was not sowing lag after extra rollover . have to use "send extract status,"  



troubleshoot gap in case of missing arcive  :
-------------------------------------------------------
-- stop replication
-- export/import > refresh
-- start replication from that csn/time




FOR ERROR CHECK  : ggserr.log , tcperrs , view GGSEVT , ,logdump
Discard file , report file ,  sqlnet file ,  check if rba number is changing ,

Go to arch directory : fuser

replicate side : reperror 1401 discard , reperror 0001 discard ,

lsof -p <the process ID for one of the replicats you have running>  -- to check which process is writing to which file



-- ggserr.log: GG reports global level INFO, WARNING and ERROR messages in this file at GGBASE directory.

-- The Report file stores process level messages at dirrpt/<PROCESS_NAME>.rpt. Each process should have its own rpt file. ex. E0079.rpt.     New report file will be created each time the process starts and old one will be rolled-over. GG maintains 10 rolled-over files.

-- Discard file stores the vital RBA information, location of the problematic record within a transaction and record details. It also       saves all the discarded records, if discard flag (REPERROR) turn on. Normally discard file is located at          dirout/<PROCESS_NAME_PDB_RDB>.dsc

-- Always communicate with user during troubleshooting particularly when you planning to skip transaction. User has to approve before you     skip the transaction or a record.

-- The REPERROR flag skips all the subsequent records that encountered specific error until turned off. No way to skip only one specific     record using this flag.




Replication issues :

-- Most of the troubleshooting techniques due to data inconsistency between primary and target are similar to primary key issue that   covered in Alert 1. You can use the same tactics to resolve Alert 2 errors..
-- REPERROR flag discards records that encountered specific Oracle error and saves discarded details in discard file.
-- REPERROR skips only a problematic record/row; not entire transaction.

-- Most common REPERROR flags:
–REPERROR 0001 discard: Unique Constraint violation. Insert or update statement fails.
–REPERROR 1403 discard: No data found; update or delete statement fails.
–REPERROR 2292 discard: Integrity constraint .violated -child record found. Update or delete fails.
–REPERROR 28117 discard: Integrity constraint violated -parent record not found. Insert or update fails.


Main issues on replicate :
Alert 1 –Primary Key Issue
Alert 2 –Data Inconsistency Errors
Alert 3 –Uncommitted transaction
Alert 4 –Lag Checkpoint Exceeded Limit
Alert 5 –Data not moving
Alert 6 –Incompatible Record Format     -- fixed from gg 8 – scanforheader



How to sync gg when  when out of sync   :
-- stop gg
-- insert using dblink/export import
-- start gg -- wiuth skip errors . remove skip errors when gg is in sync
or
-- export and start replicate .. ( good if started from csn number )



Consider MAXCOMMITPROPAGATIONDELAY parameter in extract, if extract abends on SCN error.
THREADOPTIONS MAXCOMMITPROPAGATIONDELAY 30000, IOLATENCY 3000



#################################################################
Skip  Transaction 
#################################################################



replicat paramfile dirprm/rtwflxd3.prm  skiptransaction

send extract oow_ex1, showtrans

send extract oow_ex1, skiptrans 5.28.1323 FORCE


send E_CBDS showtrans 688.6.2315775  file tran_688.6.2315775.dmp detail
SEND EXTRACT E_CBDS , SKIPTRANS 688.6.2315775 THREAD 1




#################################################################



#################################################################
Tracing
#################################################################

ggsci> send <rep_name> trace2 ./dirrpt/rep_name.trc
wait for 3-4 minutes
ggsci> send <rep_name> trace2 off --->> get the trace file in rep_name.trc


 “send Extract , trace off” for Extract, “send Replicat , trace off” for Replicat
send extract EXT1 trace /tmp/trace_me.trc



TRACE | TRACE2

Valid For

Extract and Replicat

Description

Use the TRACE and TRACE2 parameters to capture Extract or Replicat processing information to help reveal processing bottlenecks. Both support the tracing of DML and DDL.

Tracing also can be turned on and off by using the SEND EXTRACT or SEND REPLICAT command in GGSCI. See "SEND EXTRACT" or "SEND REPLICAT".

Contact Oracle Support for assistance if the trace reveals significant processing bottlenecks.

Default

No tracing

Syntax

TRACE | TRACE2
[, DDL[INCLUDE] | DDLONLY]
[, [FILE] file_name]
[, THREADS (threadID[, threadID][, ...][, thread_range[, thread_range][, ...])]
TRACE
Provides step-by-step processing information.

TRACE2
Identifies the code segments on which Extract or Replicat is spending the most time.

DDL[INCLUDE] | DDLONLY
(Replicat only) Enables DDL tracing and specifies how DDL tracing is included in the trace report.

DDL[INCLUDE]
Traces DDL and also traces transactional data processing. This is the default. Either DDL or DDLINCLUDE is valid.

DDLONLY
Traces DDL but does not trace transactional data.

[FILE] file_name
The relative or fully qualified name of a file to which Oracle GoldenGate logs the trace information. The FILE keyword is optional, but must be used if other parameter options will follow the file name, for example:

TRACE FILE file_name DDLINCLUDE
If no other options will follow the file name, the FILE keyword can be omitted, for example:

TRACE DDLINCLUDE file_name
THREADS (threadID[, threadID][, ...][, thread_range[, thread_range][, ...])
Enables tracing only for the specified thread or threads of a coordinated Replicat. Tracing is only performed for threads that are active at runtime.

threadID[, threadID][, ...]
Specifies a thread ID or a comma-delimited list of threads in the format of threadID, threadID, threadID.

[, thread_range[, thread_range][, ...]
Specifies a range of threads in the form of threadIDlow-threadIDhigh or a comma-delimted list of ranges in the format of threadIDlow-threadIDhigh, threadIDlow-threadIDhigh.

A combination of these formats is permitted, such as threadID, threadID, threadIDlow-threadIDhigh.

If the Replicat is in coordinated mode and TRACE is used with a THREADS list or range, a trace file is created for each currently active thread. Each file name is appended with its associated thread ID. This method of identifying trace files by thread ID does not apply when SEND REPLICAT is issued by groupname with threadID (as in SEND REPLICAT fin003 TRACE...) or when only one thread is specified with THREADS.

Contact Oracle Support for assistance if the trace reveals significant processing bottlenecks.

Examples

Example 1  
The following traces to a file named trace.trc. If this is a coordinated Replicat group, the tracing applies to all active threads.

TRACE /home/ggs/dirrpt/trace.trc
Example 2  
The following enables tracing for only thread 1. In this case, because only one thread is being traced, the trace file will not have a threadID extension. The file name is trace.trc.

TRACE THREADS(1) FILE ./dirrpt/trace.trc
Example 3  
The following enables tracing for threads 1,2, and 3. Assuming all threads are active, the tracing produces files trace001, trace002, and trace003.

TRACE THREADS(1-3) FILE ./dirrpt/trace.trc





#################################################################
Stopping process forcefully 

#################################################################

stop  extract  ehahk  !
stop  extract  ehahk    force 



#################################################################
Bounded Recovery 
#################################################################





Bounded Recovery is a component of Oracle GoldenGate’s Extract process checkpointing facility. It guarantees an efficient recovery after Extract stops for any reason, planned or unplanned, no matter how many open (uncommitted) transactions there were at the time that Extract stopped, nor how old they were. Bounded Recovery sets an upper boundary for the maximum amount of time that it would take for Extract to recover to the point where it stopped and then resume normal processing.


  • BR Begin Recovery Checkpoint:
    This is similar to Standard recovery checkpoint.
    This is the first file that would be required for recovery.
    Whole or parts of transactions are restored by BR from BR files.
    Manually deleting the BR files is not recommended.
  • BR End Recovery Checkpoint:
    The end of bounded recovery is where the extract will begin to process records normally from redo or archive logs.
    This is similar to standard current checkpoint


Bounded Recovery is new feature in OGG 11.1, this is how it works:

A transaction qualifies as long-running if it has been open longer than one Bounded Recovery interval, which is specified with the BRINTERVAL option of the BR parameter.

For example, if the Bounded Recovery interval is four hours, a long-running open transaction is any transaction that started more than four hours ago.

At each Bounded Recovery interval, Extract makes a Bounded Recovery checkpoint, which persists the current state and data of Extract to disk, including the state and data (if any) of long-running transactions. If Extract stops after a Bounded Recovery checkpoint, it will recover from a position within the previous Bounded Recovery interval or at the last Bounded Recovery checkpoint, instead of processing from the log position where the oldest open long-running transaction first appeared, which could be several trail files ago.

 The BR checkpoint information is shown in the SHOWCH output starting with OGG v11.1.1.1



Bounded Recovery is enabled by default for Extract processes and has a 4 hour BR interval. To adjust the BR interval to say 24 hours, use the following syntax in your Extract parameter file:

BR BRINTERVAL 24, BRDIR BR



The default location for BR checkpoint files is the GoldenGate home directory. This can be altered by including a full path:

BR BRINTERVAL 24, BRDIR /ggsdata/brcheckpoint



Manually create  checkpoint : recommended before stopping extract 

send extsha1, br brcheckpoint immediate






#################################################################
Golden Gate  in case of  Dataguard   switchover / Failover 
#################################################################


We only need to cater is there is no lag and checkpoint is done before stopping .
Move trail files to Dr site . get more information from  below Oracle doc

1323670.1    Best Practice - Oracle GoldenGate and Oracle Data Guard - Switchover/Fail-over                                   Operations

1322547.1    Best Practice - Oracle GoldenGate and Oracle Data Guard - Switchover/Fail-over                                     Operations  for GoldenGate


Controlled DR Tests Using Dataguard with Goldengate in the Mix (Doc ID 1672938.1)






#################################################################
Golden Gate  in case of  Falshback 
#################################################################




If database is flashback , we need to  reset  Gg capture process accordingly . Refer Doc ID 1626736.1  for more information


SQL> select min (OLDEST_FLASHBACK_TIME) from v$flashback_database_log;
MIN(OLDEST_FLASHB
-----------------
26-FEB-2017 07:25


ggsci
stop replicat CSCURPCH;


SHUTDOWN IMMEDIATE;
STARTUP MOUNT;
flashback database to timestamp TO_TIMESTAMP( ‘2017-02-26 08:45:00′,’YYYY-MM-DD HH24:MI:SS’);
ALTER DATABASE OPEN READ ONLY;
SHUTDOWN IMMEDIATE
STARTUP MOUNT
ALTER DATABASE OPEN RESETLOGS;

ggsci
alter replicat CSCURPCH, begin 2017-02-26 08:45:00
start replicat CSCURPCH




#################################################################
Tables without  primary Key  --> KEYCOLS 
#################################################################


Ideally we cannot add  tables without having primary key or primary key is disabled . Also if some wanted columns are  not part of primary key  .  In this scenario we can use  KEYCOLS  



TABLE  OWNER.table_name ,KEYCOLS (column1,column2);


Refer Doc ID 1271578.1   for more detail 





#################################################################
Exception and Error Handling 
#################################################################



=>   Defining    reperror

REPERROR (DEFAULT, EXCEPTION)
REPERROR (DEFAULT2, ABEND)
REPERROR (-1, EXCEPTION




=>  Creating exception table

Doc ID 1382092.1)

GoldenGate does not provide a standard exceptions handler. By default, a Replicat process will abend should any operational failure occur, and will rollback the transaction to the last known checkpoint. This may not be ideal in a production environment.
The HANDLECOLLISIONS and NOHANDLECOLLISIONS parameters can be used to control whether or not Replicat tries to resolve duplicate-record and missing-record errors, but should these errors be ignored?
To determine what error has occurred, by which Replicat, caused by what data, create an Exceptions handler that will trap and log the specified Oracle error(s), but allow the Replicat to continue to process data.

Here is an example
drop table ggddlusr.exceptions
/
create table ggddlusr.exceptions
(rep_name varchar2(8),
table_name varchar2(61),
errno number,
dberrmsg varchar2(4000),
optype varchar2(20),
errtype varchar2(20),
logrba number,
logposition number,
committimestamp timestamp)
/

replicat r1
SETENV (ORACLE_HOME="/oracle/software/rdbms/11.2.0.2")
SETENV (ORACLE_SID="test")
SETENV (NLS_LANG = "AMERICAN_AMERICA.UTF8")
userid ggddlusr,password test

--Start of the Macro
MACRO #exception_handler
BEGIN
, TARGET ggddlusr.exceptions
, colmap ( rep_name = "R1"
, table_name = @GETENV ("GGHEADER", "TABLENAME")
, errno = @GETENV ("LASTERR", "DBERRNUM")
, dberrmsg = @GETENV ( "LASTERR", "DBERRMSG")
, optype = @GETENV ( "LASTERR", "OPTYPE")
, errtype = @GETENV ( "LASTERR", "ERRTYPE")
, logrba = @GETENV ( "GGHEADER", "LOGRBA")
, logposition = @GETENV ( "GGHEADER", "LOGPOSITION")
, committimestamp = @GETENV ( "GGHEADER", "COMMITTIMESTAMP") )
, INSERTALLRECORDS
, EXCEPTIONSONLY;
END;
--End of the Macro

Reportcount every 30 Minutes, Rate
Report at 01:00
ReportRollover at 01:15
discardfile /oracle/software/goldengate/11.1.1.1/dirrpt/r1.dsc, megabytes 50, append
DDL INCLUDE MAPPED &
EXCLUDE OBJNAME "GGDDLUSR.EXCEPTIONS"
AssumeTargetDefs
REPERROR (DEFAULT, EXCEPTION)
REPERROR (DEFAULT2, ABEND)
Map pubs.*, target pubs.*;

MAP pubs.* #exception_handler()




#################################################################
Golden Gate  in Rac Environment
#################################################################

Apart from normal GG installation    for  Rac gg installation 2 main task is   to  decide  cluster filesystem  and  to add gg in   Cluster as application resource

Also in Rac  configuration we need to move  dirpcs  to shared  drive / distributed file system environment   like dirprm , dirdat ,  dirchk  ,  dirtmp  , dirrpt .     Goldengate instance will be active on one node and in case of a node reboot or node eviction or node is down for a scheduled maintenance, the Goldengate process should automatically fail over to the other surviving nodes. 


>> Extract can only run against one instance

>>  If instance fails,
Manager must be stopped on failed node:
Manager and extract must be started on a surviving node

>> Failover can be configured in Oracle Grid  Infrastructure



Not covering ACFS creation in this blog

Adding GG  in  Cluster as application resource :

For adding GG  resource to cluster we need to first  create application vip resource .

Update the below vip in the /etc/hosts file on both the nodes , ( the vip should be on the same 
subnet of the public ip).


--> add Vip

cd /optware/grid/11.2.0.4/bin
./appvipcfg create -network=1 -ip=<x.x.x.x> -vipname=pggs-vip -user=root
./crsctl setperm resource pggs-vip -u user:oracle:r-x

 cd /optware/grid/11.2.0.4/bin
./crsctl status resource pggs-vip -t
./crsctl start resource pggs-vip -c  hostname  
./crsctl status resource pggs-vip -t




--> deploy agent script 

Oracle Clusterware runs resource-specific commands through an entity called an agent.
The agent script must be able to accept 5 parameter values: start, stop, check, clean and abort (optional).


Now we will create an script to  and will also place the script in the shared location, here we have placed the script
under the gg home which will be accessed on both the nodes. (This is the sample script provided by oracle we can also have a customized script as per our requirement).


GG_HOME
mkdir crs_gg_script
cd crs_gg_script
vi gg_action.scr
chmod 750 gg_action.scr

cd /optware/grid/11.2.0.4/bin/
ls -ltr  /ggdata/csgg/crs_gg_script/gg_action.scr


#!/bin/sh
#goldengate_action.scr
. ~oracle/.bash_profile
[ -z "$1" ]&& echo "ERROR!! Usage $0 "&& exit 99
GGS_HOME=/golden_gate
#specify delay after start before checking for successful start
start_delay_secs=5
#Include the Oracle GoldenGate home in the library path to start GGSCI
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${GGS_HOME}
#set the oracle home to the database to ensure Oracle GoldenGate will get
#the right environment settings to be able to connect to the database
export ORACLE_HOME=/u01/app/oracle/product/11.2/db
export CRS_HOME=/grid/11.2
#Set NLS_LANG otherwise it will default to US7ASCII
export NLS_LANG=AMERICAN_AMERICA.AL32UTF8
logfile=/tmp/crs_gg_start.log
###########################
function log
###########################
{
DATETIME=`date +%d/%m/%y-%H:%M:%S`
echo $DATETIME "goldengate_action.scr>>" $1
echo $DATETIME "goldengate_action.scr>>" $1 >> $logfile
}
#check_process validates that a manager process is running at the PID
#that Oracle GoldenGate specifies.
check_process () {
dt=`date +%d/%m/%y-%H:%M:%S`
if ( [ -f "${GGS_HOME}/dirpcs/MGR.pcm" ] )
then
pid=`cut -f8 "${GGS_HOME}/dirpcs/MGR.pcm"`
if [ ${pid} = `ps -e |grep ${pid} |grep mgr |awk '{ print $1 }'` ]
then
#manager process is running on the PID . exit success
echo $dt "manager process is running on the PID . exit success">> /tmp/check.out
exit 0
else
#manager process is not running on the PID
echo $dt "manager process is not running on the PID" >> /tmp/check.out
exit 1
fi
else
#manager is not running because there is no PID file
echo $ dt"manager is not running because there is no PID file" >> /tmp/check.out
exit 1
fi
}
#call_ggsci is a generic routine that executes a ggsci command
call_ggsci () {
log "entering call_ggsci"
ggsci_command=$1
#log "about to execute $ggsci_command"
log "id= $USER"
cd ${GGS_HOME}
ggsci_output=`${GGS_HOME}/ggsci << EOF
${ggsci_command}
exit
EOF`
log "got output of : $ggsci_output"
}
case $1 in
'start')
#Updated by Sourav B (02/10/2011)
# During failover if the “mgr.pcm” file is not deleted at the node crash
# then Oracle clusterware won’t start the manager on the new node assuming the
# manager process is still running on the failed node. To get around this issue
# we will delete the “mgr.prm” file before starting up the manager on the new
# node. We will also delete the other process files with pc* extension and to
# avoid any file locking issue we will first backup the checkpoint files and then
# delete them from the dirchk directory.After that we will restore the checkpoint
# files from backup to the original location (dirchk directory).
log "removing *.pc* files from dirpcs directory..."
cd $GGS_HOME/dirpcs
rm -f *.pc*
log "creating tmp directory to backup checkpoint file...."
cd $GGS_HOME/dirchk
mkdir tmp
log "backing up checkpoint files..."
cp *.cp* $GGS_HOME/dirchk/tmp
log "Deleting checkpoint files under dirchk......"
rm -f *.cp*
log "Restore checkpoint files from backup to dirchk directory...."
cp $GGS_HOME/dirchk/tmp/*.cp* $GGS_HOME/dirchk
log "Deleting tmp directory...."
rm -rf tmp
log "starting manager"
call_ggsci 'start manager'
#there is a small delay between issuing the start manager command
#and the process being spawned on the OS . wait before checking
log "sleeping for start_delay_secs"
sleep ${start_delay_secs}
#check whether manager is running and exit accordingly
check_process
;;
'stop')
#attempt a clean stop for all non-manager processes
call_ggsci 'stop er *'
#ensure everything is stopped
call_ggsci 'stop er *!'
#stop manager without (y/n) confirmation
call_ggsci 'stop manager!'
#exit success
exit 0
;;
'check')
check_process
exit 0
;;
'clean')
#attempt a clean stop for all non-manager processes
call_ggsci 'stop er *'
#ensure everything is stopped
call_ggsci 'stop er *!'
#in case there are lingering processes
call_ggsci 'kill er *'
#stop manager without (y/n) confirmation
call_ggsci 'stop manager!'
#exit success
exit 0
;;
'abort')
#ensure everything is stopped
call_ggsci 'stop er *!'
#in case there are lingering processes
call_ggsci 'kill er *'
#stop manager without (y/n) confirmation
call_ggsci 'stop manager!'
#exit success
exit 0
;;
esac





--> add  gg  to cluster

./crsctl add resource ggateapp \
-type cluster_resource \
-attr "ACTION_SCRIPT= /ggdata/csgg/crs_gg_script/gg_action.scr, \
CHECK_INTERVAL=30, \
START_DEPENDENCIES='hard(pggs-vip,ora.asm) pullup(pggs-vip)',  \
STOP_DEPENDENCIES='hard(pggs-vip)'"

./crsctl status resource ggateapp -t
./crsctl start resource ggateapp -c  hostname
./crsctl status resource ggateapp -t



-->Testing

cd /optware/grid/11.2.0.4/bin
./crsctl relocate resource ggateapp -f
./crsctl status resource ggateapp -t
./crsctl status resource pggs-vip -t


cd /optware/grid/11.2.0.4/bin/
./crs_relocate -f ggateapp -f
./crsctl status resource ggateapp -t
./crsctl status resource pggs-vip -t






References :

https://all-database-soultions.blogspot.com/2022/11/install-and-configure-gg-monitoring.html




List of 11g New features

                                                     List of 11g New features




######################################################################################
############### others  ##################
##########################################################################################


-- flashcache ****
-- rolling upgrade features -- ASM
-- Hot patching
-- ACL
-- adrci : :automatic diagnostic repository
-- 11g self managed database
-- datapump : compression / compatible / encryption /  reuse_dumpfile , remap_table
-- secure file / LOB compression : http://www.oracle-base.com/articles/11g/SecureFiles_11gR1.php
-- compression : compression can be enabled at table, tablespace or partition level or both direct/conventional loads
-- enterprise manager support workbench
-- online shrink  of temp tablespace/ tempfile   : (keep option)
-- tablespace can be encrypted .
-- oracle database file system .
-- Optimal flexible architecture .--- OFA
-- 11g afresh for auditing .
-- LDAP_DIRECTORY_SYSAUTH and Oracle Internet Directory .
-- Sqlnet.ora parameter oci_result_cache_max_size
-- X$DBGALERTEXT  -- for reading alert logs  || alert log in html and txt format
-- Password is case sensitive .
-- compress archives :  alter system set  log_archive_dest_1 = 'SERVICE=DBA11GDR COMPRESSION=ENABLE';
-- password protected roles are no longer enabled by default
-- $ORACLE_HOME/network/admin/endpoints_listener.ora
-- flashback : we can enable flashback at open stage .
-- sql_exec_start column in v$sql
-- 11g datapump compression : COMPRESSION=ALL
-- advance compression
-- Multiple Shared Memory Segments Created by Default on 11.2.0.3 : (1399908.1) 15566.1 , 731658.1
-- DEINSTALL OF ORACLE HOME IN 11.2.0.2 .   : In 11.2.0.2 , there is a new directory created when you install the binaries called deinstall



ADR :automatic diagnostic repository
-- default location is oracle base .
-- Default setting is of 1 year . after which incident metadata is purged from ADR and files are retained for one month .




####################################################################
############### rac ################################################
####################################################################


Oracle 11g R1 RAC :
-- ADDM for RAC
-- ADR command-line tool - Oracle Automatic Diagnostic repository (ADR) has a new command-line interface named ADRCI, ADR Command Interface.
   ADRCI can be used to access the 11g alert log:
-- Optimized RAC cache fusion protocols
-- Oracle 11g RAC Grid provisioning .



Oracle 11g R2 RAC :
-- Oracle 11g Release 2 (11.2.0.2) Reboot less Node Fencing
-- Raw devices are not supported any longer in 11gR2, but they can be used
-- ASMCA
-- scan
-- gns
-- global awr
-- server pooling
-- default, LOAD_BALANCE is ON.
-- GSD, gsdctl introduced.
-- RAC OneNode
-- HAS , oracle restart
-- 11gR2 Grid Infrastructure Redundant Interconnect and ora.cluster_interconnect.haip [ID 1210883.1]
-- olr -- for has
-- grid Plug and play
-- out of place patching
-- ASM : fast start mirror resych and disk repair time
-- ASM :  Disk check : “alter diskgroup diskgroup_name check .”
-- ASM : Diskgroup can be  mounted as restricted . -- fast rebalance
-- ASM fasT rebalance
-- ASM preferred mirror read
-- ASM : Can use force option to mount / drop disk group.
-- ASM : mount by doing offline to unavailable disk if quorum exists : offline/online  after 20m
-- Intelligent Data Placement
-- asm template  : diskgroup attibutes
-- sysasm
-- can keep ocr and voting disk in asm
-- sysasm role
-- asm variable extent size ( increasing as per use ) , explicitly allocate au to diskgroup
-- md_backup/md_restore  -- asm metadata backup
-- raw device concept is  obsolete ,
-- can rename a diskgroup
-- hot patching
-- Oracle 11g RAC parallel upgrades ,
-- Oracle 11g have rolling upgrade features:
-- SRVM_TRACE enabled by default in 11.2 -- $ORACLE_BASE/grid/cv/log
   With the 11g upgrade, you had the option of applying the patchset to an individual CRS_HOME at a time,
-- ADDM for RAC
-- Passwordless Automatic SSH Connectivity : can configure ssh at installation
-- Clusterware and ASM share the same Oracle Home :
   The clusterware and ASM share the same home thus it is known as the Grid Infrastructure home (prior to
   11gR2, ASM and RDBMS could be installed either in the same Oracle home or in separate Oracle homes).
-- Hangchecktimer and oprocd are replaced :
   Oracle Clusterware 11g release 2 (11.2) replaces the oprocd and Hangcheck processes with the cluster
   synchronization service daemon Agent and Monitor to provide more accurate recognition of hangs and to
   avoid false termination.
-- HAIP :
   In 11.2.0.2 the new HAIP (redundant Interconnect) facility is active and multiple interface selection will
   support load balancing and failover. You can select more than 4 interfaces for private interconnect at install
   time or add them dynamically using oifcfg.
-- local_listener and remote_listener parameters
-- asm need not needs to be bounced after drop disk . -- bug
-- sector_size for diskgroup
-- Time Synchronization is a new feature in Oracle 11g R2 that automatically synchronizes the timestamps
   of all of the nodes in the cluster. In previous releases, third party tools were generally used.
   Time Synchronization can be used in observer mode (if a Network Time Protocol is already in place) or active mode where one node is designated as the master node and all of the others are synchronized to it.
   wsx-prod-73 [58]:crsctl check ctss
   CRS-4700: The Cluster Time Synchronization Service is in Observer mode.
-- metadata back : ,md_backup/md_restore
-- 11gR2 Universal Collection is expanded diagcollection.pl to collect GI, ASM and database (RAC) diagnostics (logfile, trace file etc), the goal is to reduce back anf forth information request between Oracle Support and customers.
11gR2 GI/ASM/RAC Universal Collection Guide [ID 1485042.1]
-- Backup of Voting disk using “dd” command not supported
-- Voting disk and OCR can be keep in same disk-group or different disk-group
-- Voting disk and OCR automatic backup kept together in a single file.
-- we can use multiple private interconnect ( installation time or oifcfg setif) : link aggregation
-- adding node made easy
   cluvfy stage -pre nodeadd -n serverC -verbose
   addNode.sh -silent "CLUSTER_NEW_NODES={ServerC}" "CLUSTER_NEW_VIRTUAL_HOSTNAMES={ServerC-vip}"
   cluvfy stage -post nodeadd -n serverC -verbose
-- removing node made easy
   olsnodes -s -t
   crsctl unpin css -n <ServerC>
   $GI/crs/install/rootcrs.pl -dconfig -force  ( on node to be deleted)
   crsctl delete node -n <ServerC>    
-- new commands :  cluster wide
   crsctl (check / start / stop ) cluster -all
-- Oracle Direct NFS






>>>>>>>  Oracle 11g R1 RAC :


Oracle 11g RAC parallel upgrades - Oracle 11g have rolling upgrade features whereby RAC database can be upgraded without any downtime.

Hot patching - Zero downtime patch application.

Oracle RAC load balancing advisor - Starting from 10g R2 we have RAC load balancing advisor utility. 11g RAC load balancing advisor is only available with clients who use .NET, ODBC, or the Oracle Call Interface (OCI).

ADDM for RAC - Oracle has incorporated RAC into the automatic database diagnostic monitor, for cross-node advisories. The script addmrpt.sql run give report for single instance, will not report all instances in RAC, this is known as instance ADDM. But using the new package DBMS_ADDM, we can generate report for all instances of RAC, this known as database ADDM.

ADR command-line tool - Oracle Automatic Diagnostic repository (ADR) has a new command-line interface named ADRCI, ADR Command Interface. ADRCI can be used to access the 11g alert log:
$adrci
adrci> show alert
Optimized RAC cache fusion protocols - moves on from the general cache fusion protocols in 10g to deal with specific scenarios where the protocols could be further optimized.
Oracle 11g RAC Grid provisioning - The Oracle grid control provisioning pack allows us to "blow-out" a RAC node without the time-consuming install, using a pre-installed "footprint".

Data Guard - Standby snapshot - The new standby snapshot feature allows us to encapsulate a snapshot for regression testing. We can collect a standby snapshot and move it into our QA database, ensuring that our regression test uses real production data.
Quick Fault Resolution - Automatic capture of diagnostics (dumps) for a fault.


>>>>>>>>>>>>>>>>>>>>  Oracle 11g R2 RAC :


We can store everything on the ASM. We can store OCR & voting files also on the ASM.

ASMCA

Single Client Access Name (SCAN) - eliminates the need to change tns entry when nodes are added to or removed from the Cluster. RAC instances register to SCAN listeners as remote listeners. SCAN is fully qualified name. Oracle recommends assigning 3 addresses to SCAN, which create three SCAN listeners.

AWR is consolidated for the database.

11g Release 2 Real Application Cluster (RAC) has server pooling technologies so it’s easier to provision and manage database grids. This update is geared toward dynamically adjusting servers as corporations manage the ebb and flow between data requirements for datawarehousing and applications.
By default, LOAD_BALANCE is ON.

GSD, gsdctl introduced.

RAC OneNode is a new option that makes it easier to consolidate databases that aren’t mission critical, but need redundancy.

rac to rac data guard

11gR2 Grid Infrastructure Redundant Interconnect and ora.cluster_interconnect.haip [ID 1210883.1]


With Oracle Clusterware 11g release 2 and later, Database Configuration Agent (D
BCA) no longer sets the LOCAL_LISTENER parameter. The Oracle Clusterware agent t
hat starts the database sets the LOCAL_LISTENER parameter dynamically, and it se
ts it to the actual value, not an alias. So listener_alias entries are no longer
needed in the tnsnames.ora file. For the REMOTE_LISTENER parameter, Oracle Clus
terware uses the EZ connect syntax scanname:scanport, so no entries are needed f
or the REMOTE_LISTENER parameter in the tnsnames.ora file


Intelligent Data Placement can be managed with the ALTER DISKGROUP ADD or MODIFY TEMPLATE SQL and the ALTER DISKGROUP MODIFY FILE SQL statement, which include a disk region clause for setting hot/mirrorhot or cold/mirrorcold regions in a template:
ALTER DISKGROUP adndata1 ADD TEMPLATE datafile_hot
  ATTRIBUTE (  HOT   MIRRORHOT);
The ALTER DISKGROUP ... MODIFY FILE SQL statement that sets disk region attributes for hot/mirrorhot or cold/mirrorcold regions:
ALTER DISKGROUP adndata1 MODIFY FILE '+data/adn3/datafile/tools.255.765689507'
  ATTRIBUTE (   HOT    MIRRORHOT);



######################################################################################
###########  data guard ################
##########################################################################################




-- active dataguard
-- snapshot standby
-- rac to rac data guard
-- creation of dataguard using grid
-- block change tracking enabled on dataguard
-- Role Based Services ( need dataguard enabled )
-- Fast Start Failover Improvements
-- CONFIGURE ARCHIVELOG DELETION POLICY TO SHIPPED TO STANDBY;
-- Using DBMS_SCHEDULER to Run Jobs on a Logical Standby Database (11g R1)
-- log transport : Redo Compression ,
-- DB_LOST_WRITE_PROTECT
-- Automatic Gap Resolution (  to cover network issues if  not Maximum Protection. ) http://hongwang.wordpress.com/2011/12/29/data-guard-11gs-automatic-gap-resolution-and-ora-16401-error

-- LNS process
-- alter session set STANDBY_MAX_DATA_DELAY=1; ||  alter session sync with primary; ( whie db open for read only ) ()
-- Heterogeneous DataGuard.,
-- In Oracle Database 11g, you can temporarily convert the physical standby database to a logical standby to perform a rolling upgrade
-- RMAN Understands Data Guard Configurations : db_unique_name
-- compress archives :  alter system set  log_archive_dest_1 = 'SERVICE=DBA11GDR COMPRESSION=ENABLE';
-- Database Rolling Upgrade Using Transient Logical Standby:







#############################################################
###########  rman ################
#############################################################



-- import  recovery catalog of 10g  in schema of 11g recovery catalog -- Merging Catalogs
-- Data recovery advisor .
-- Enable block change tracking file on standby . : fast incremental backup enhancement .
-- Intra-File Parallel Backup :   Same datafile in parallel
-- Virtual private catalog
-- vALIDATE DATABASE ------enhancement--  Proactive Health Checks
-- VALIDATE DATAFILE 1 BLOCK 10;
-- Undo blocks that are not needed for transaction recovery are not backed up ;
-- Compress backupset enhancement (ZLIB algorithm)
-- Flashback Logs to Rescue - using flashback  for block recovery
-- Set NEWNAME Flexibility (Release 2 Only) ( for tablespace and database )
-- Auto Block Repair (Release 2 Only) (from active data guard  )  -- AUTO BMR( Block Media Recovery)
-- TO DESTINATION Clause (Release 2 Only)
-- More Comcodession Choices (Release 2 Only)
-- Backup to the Cloud (Release 2 Only)
-- CONFIGURE ARCHIVELOG DELETION POLICY TO SHIPPED TO STANDBY;
-- snapshot controlfile ( shared location , no enqueue required )
-- RMAN 11.2 duplicate now RESTORES the backup controlfile to the auxiliary host rather than CREATE it.
   So you must ensure that there is a controlfile backup available to meet the duplicate time.






Import catalog from 10g database :
-- Merging catalog .
    RMAN> connect catalog rco11/password@catdb
    RMAN> import catalog rcat10/oracle@inst1 (no unregister);
-- They deregister all database registered in RCAT10 catalog .
-- They import metadata for all registered database in RCAT10 database .
-- They register all RCAT10 catalog registered database in RCO11 catalog .



Data recovery advisor .
-- list  failure ( list failure reported in ADR . )
-- advice failure: { detects new failures recorded in the Automatic Diagnostic Repository (ADR) since the last LIST FAILURE. }
-- repair failure ( priview )  .
-- Change failure : It is used to explicitly close the open failures. ,
   It is used to change failure priority only for HIGH or LOW priorities



Virtual private catalog
-- access  to subset of catalog .
-- SQL> GRANT recovery_catalog_owner TO vpc1;
-- RMAN> CONNECT CATALOG catowner/password@catdb;
-- .RMAN> GRANT CATALOG FOR DATABASE prod1 TO vpc1
-- RMAN> CONNECT CATALOG vpc1/password@catdb;
-- RMAN> CREATE VIRTUAL CATALOG;



VALIDATE DATABASE ------enhancement .
-- We can directly use validate database command  instead of using “backup validate “
-- We can validate blocks using “validate datafile1 block 2”
-- records live corruption in v$database_block_corruption .
-- check database  for intra block corruption and not inter block.
-- Failure is logged in ADR.


Compress backupset enhancement .
-- uses new ZLIB algorithm instead of old BZIP2 algorithm . ZLIB is 40% faster .
-- “configure compression algorithm to ZLIB”



More Comcodession Choices (Release 2 Only) :
-- Comcodession in RMAN is not new; it has been around for some time. Here is how you can create a comcodessed backupset of the tablespace ABCD_DATA.
RMAN> backup as comcodessed backupset
2> format '/u01/oraback/%U.rmb'
3> tablespace abcd_data
4> ;
-- In Oracle Database 11g Release 1 we saw the introduction of a new encryption algorithm called ZLIB that is quite fast (and consumes less CPU) but with reduced comcodession ratio. In the current version there are several options for comcodession.
-- The default comcodession is called BASIC, which does not require any extra cost option. Using Advanced Comcodession Option, you now have the ability to specify different types of comcodession levels: LOW, MEDIUM and HIGH – with comcodession ratios from least to highest and CPU consumption (and conversely RMAN throughput) from least to highest. Here is how you configure the comcodession option to high:
rman> configure comcodession algorithm 'high';
-- In a test, I got a comcodessed backupset using HIGH as 118947840 compared to 1048952832 uncomcodessed – almost 9X improvement. Of course it will vary from database to database. A high setting for the comcodession option creates smaller backupsets, which are great for slow networks but consume CPU cycles.




Set NEWNAME Flexibility (Release 2 Only)  :
-- run
{
 set newname for tablespace examples to '/u02/examples%b.dbf';
 … }
-- run
{
set newname for database to '/u02/oradata/%b';
}






######################################################################################
#######################  installation , migration . upgrade ,#############################
##########################################################################################




-- Upgrade option while software installation ( out of box patching )
-- diagnostic_dest
-- utlu111i.sql  :  on database to be upgraded .
-- utlu111s.sql / utlu1112s.sql   :  to see upgrade report .
-- utlrp.sql does parallel compilation
-- Startup upgrade
-- We can  re-run catupgrd.sql if needed .
-- Automatic upgrade from 32bit to 64bit while upgrade . 62290.1
-- create spfile=’/u01/app/spfileorcl.bak’ from memory;
-- after upgrade we need to run catuppst.sql .
-- after upgrade we need to run utlpwdmg.sql – TO create new password verification function .
-- tablespace can be encrypted .
-- hot patching




Startup upgrade
--> it allows only sysdba connections
--> it sets system initialization parameters to specific values that are required to enable database upgrade scripts to be run.
--> disables job queues
--> disable system triggers .



Hot patching :
-- consume more memory ,
-- can be applied online
-- detect conflict between two online patches .
-- fast .
-- to check if patch can be installed as hot patch :
Optach query –is_online_patch <patch_location> or
Opatch query <patch_location> -all




######################################################################################
###########  cloning  ################
##########################################################################################



-- active database duplication if no files transferred -- DUPLICATE TARGET DATABASE FOR STANDBY FROM ACTIVE DATABASE
-- if files transferred no need to connect to target database -- Duplicate Database from Backup (Release 2 Only)
--




DUPLICATE TARGET DATABASE FOR STANDBY FROM ACTIVE DATABASE
SPFILE
PARAMETER_VALUE_CONVERT 'primdb', 'stbydb'
SET DB_UNIQUE_NAME='stbydb'
SET DB_FILE_NAME_CONVERT='/u01/primdb/','/u02/stbydb/'
SET LOG_FILE_NAME_CONVERT='/u01/primdb/','/u01/stbydb/'
SET CONTROL_FILES='/u01/primdb/controlfile1.ctl'
SET LOG_ARCHIVE_MAX_PROCESSES='7'
SET FAL_CLIENT='stbydb'
SET FAL_SERVER='primdb'
SET STANDBY_FILE_MANAGEMENT='auto'
SET LOG_ARCHIVE_CONFIG='DG_CONFIG=('primdb','stbydb')
SET LOG_ARCHIVE_DEST_2='service=primdb SYNC valid_for=(ONLINE_LOGFILE,PRIMARY_ROLE) db_unique_name=primdb')'







######################################################################################
###########  objects    ################
##########################################################################################




-- read only tables
-- After online redefinition of tables all objects except triggers remains valid .
-- invisible indexes
-- enhancements in partitioning : http://www.oracle-base.com/articles/11g/PartitioningEnhancements_11gR1.php

-- Virtual columns  : http://www.oracle-base.com/articles/11g/VirtualColumns_11gR1.php
-- compress table for all operation  : only data henceforth is compressed
-- flashback data archive : no need for log minner , no snapshot tool old : http://gavinsoorma.com/2009/11/11g-flashback-data-archive-part-one/
--





######################################################################################
###########  tuning   ################
##########################################################################################



--
-- memory_target/memory max_target   AMM
-- sql plan baselines
-- sql tuning sets
-- sql monitoring -- V$SQL_MONITOR
-- 11g Compression
-- Automatic Memory Tuning  (AMM)
-- sql profile
-- automatic sql tuning
-- Automatic Plan Capture
-- segment advisor , sql repair advisor , SQL Performance Analyzer
-- awr sql report  / awr compare report / awr global report
-- work load replay
-- pending stats
-- increment stats for partition
-- invisible index
-- awr compare report
-- can remove particular sql statement from Shared Pool without flushing the entire Shared Pool!! -- dbms_shared_pool.purge -- purge sql id
-- health check
-- Adaptive Cursor Sharing : http://www.oracle-base.com/articles/11g/AdaptiveCursorSharing_11gR1.php
-- Query result cache
-- I/O calibration
-- Automated maintenance task .
-- RESULT_CACHE
-- temp_space_allocated to the v$session and V$active_session_history
-- Auto DOP is enabled by setting parallel_degree_policy = AUTO or LIMITED.
-- PARALLEL_FORCE_LOCAL_RAC
-- Statistics Have Improved from Oracle 10gR2 to 11gR2
--


  ,




Sql Plan Baselines :
-- Plan can be manually loaded in sql plan baselines .
-- Plan in sql plan baselines are verified and accepted plan .
-- For sql plan baselines to be accessible to optimizer , SYSAUX tablespace must be  online .



Sql access advisor :
-- partitioning of tables and indexes
-- creation of bitmap , function-based, btree indexes
-- optimizing of materialized view for maximum query usage and fast refresh .


Sql tuning sets , sql management base
-- Every sql statements in STS is considered only once for execution .
-- execution plan and execution statistics are computed for each statement in STS.


Automatic SQL Tuning
-- part of auto task framework
-- Based on AWR top sql identification
http://www.oracle-base.com/articles/11g/AutomaticSqlTuning_11gR1.php



Performance Analyzer
– This is the codification of Oracle’s “holistic” approach, an empirical technique whereby SQL is tuned in a real-world environment.  Designed to testing silver bullet impact of global changes (init.ora parameters, new indexes and materialized views, &c), SPA provides real-world evidence  of the performance impact of major changes.


11g Compression
– with a late start out of the gate (other DBMS tools have had compression for decades), Oracle’s compression promises to improve the speed of full-scans operations (important to batch jobs and data warehouses).


SQL optimization improvements
– The cost-based optimizer (CBO) is continuously evolving, and we now see bind peeking fixed and extended optimizer statistics.


Automatic Memory Tuning
- Automatic PGA tuning was introduced in Oracle 9i. Automatic SGA tuning was introduced in Oracle 10g.
  In 11g, all memory can be tuned automatically by setting one parameter. You literally tell Oracle how much memory it has and it determines how much to use   for PGA, SGA and OS  Processes.


AWR Baselines
- The AWR baselines of 10g have been extended to allow automatic creation of baselines for use in other features.


Adaptive Metric Baselines
- Notification thresholds in 10g were based on a fixed point. In 11g, notification thresholds can be associated with a baseline, so the notification thresholds vary throughout the day in line with the baseline.



I/O calibration :
-- only one can be run at a time .
-- time_statistics must be set to true
-- can be used to estimate maximum number of I/O and maximum latency time for
-- system
-- need to set parameter filesystemiooptions




Automated maintenance task .
-- database resource manager is automatically enabled in maintenance window that runs automated maintenance task
   to prevent consuming excessive amount of memory.
-- Runs : segment advisor , automatic sql tuning advisor , optimizer statistics gathering .




Query result cache
-- can be set at system, session or table level
-- can store results from normal as well as flashback queries . result_cache_mode parameter and hint result_cache and result_cache
-- size is allocated from shared pool but is not flushed when we flush shared pool .
-- use  dbms_result_cache to  manage query result  cache
http://www.oracle-developer.net/display.php?id=503
-- optimizer by default decides
select * from v$sgastat where pool='shared pool' and name like 'Result%';
select * from v$latchname where name like 'Result Cache%';



sql performance analyzer
-- it detects change in sql plan
-- it produces results that can be used to create sql plan baselines
-- it generates recommendations to run sql tuning advisor to tune regressed sql.



######################################################################################
###########  Parameter :    ################
##########################################################################################





1) DDL_LOCK_TIMEOUT --  for deadlock .
2) sec_protocol_error_further_action
3) Statistics_level to typical enables baselines by default in 11g
5) distributed_lock_timeout
6)  PRE_PAGE_SGA
7) memory_target   ( dynamic )
8) memory_max_target
9) db_securefile
10) resumeable_timeout
11) enable_ddl_logging
12) optimizer_use_plan_baselines
13) sec_max_failed_login_attempts
14) optimizer_use_pending_statistics
15) LDAP_DIRECTORY_SYSAUTH
16) Diagnostic_dest
17) db_ultra_safe: --  block corruption
--  check for logical self-consistency of data block when modified in memory .
--  checksum is calculated before and after the block change .
--  checks are performed for lost writes to physical standby database .
1) db_lost_write_protect . --  block corruption
2)





######################################################################################
###########  deprecated  parameters   :    ################
##########################################################################################





1. BACKGROUND_DUMP_DEST (replaced by DIAGNOSTIC_DEST)
2. OMMIT_WRITE
3. CORE_DUMP_DEST (replaced by DIAGNOSTIC_DEST)
4. INSTANCE_GROUPS
5. LOG_ARCHIVE_LOCAL_FIRST
6. PLSQL_DEBUG (replaced by PLSQL_OPTIMIZE_LEVEL)
7. PLSQL_V2_COMPATIBILITY
8. REMOTE_OS_AUTHENT
9. STANDBY_ARCHIVE_DEST
10. TRANSACTION_LAG (attribute of the CQ_NOTIFICATION$_REG_INFO object)
11. USER_DUMP_DEST (replaced by DIAGNOSTIC_DEST)



######################################################################################
###########  Views :     ################
##########################################################################################





dba_temp_free_space
dba_sql_plan_baselines
DBA_USERS_WITH_DEFPWD --  to check user with default password .
V$DIAG_INFO
Dba_autotask_client
Dba_flashback_archive
v$memory_dynamic_components /  v$memory_target_advisor  / v$memory_resize_ops
DIRECT NFS VIEWS :
  --  V$DNFS_FILES
  --  V$DNFS_SERVERS
  --  V$DNFS_STATS
  --  V$DNFS_CHANNELS