Monday, November 18, 2013

World of Oracle statistics



                          World of Oracle statistics 



è Difference between analyze and dbms_stats


The difference is described in the Note:114671.1


-- Analyze cannot process virtual columns
-- ANALYZE can't retain old staticstics, dbms_stats can.
-- analyze doesn't collect global statistics for partitioned tables
-- use ANALYZE to LIST CHAINED ROWS


-- DBMS_STATS is able to parallelize its workload
-- DBMS_STATS allows you transfer stats between servers






è stats history :


select * from DBA_OPTSTAT_OPERATIONS  where lower(operation) like '%system%'

select * from DBA_OPTSTAT_OPERATIONS where START_TIME like '%MAR-12%'
AND TARGET IN ( 'ADVCLM.TRX_BATCH','ADVCLM.TRX','ADVCLM.TRX_BATCH_HANDLER','ADVCLM.TRX_BATCH_GROUP','ADVCLM.IXML_STATUS','ADVCLM.IXML_CLAIM')
 order by START_TIME ;


connect / as sysdba
pause see the opstat operations
col start_time for a32
col end_time for a32
col operation for a30
col target for a10
select *
from  dba_optstat_operations;
pause see stats history
col owner for a14
col table_name for a16
col partition_name for a10
col subpartition_name for a10
break on owner skip 1
select *
from dba_tab_stats_history;
clear break


############


DBA_OPTSTAT_OPERATIONS  --> see all gather stats run using dbms_stats
select * from DBA_OPTSTAT_OPERATIONS  where lower(operation) like '%system%'


SQL> select dbms_stats.get_stats_history_retention from dual;
GET_STATS_HISTORY_RETENTION
---------------------------
                         31


SQL> select dbms_stats.get_stats_history_availability from dual;
GET_STATS_HISTORY_AVAILABILITY
---------------------------------------------------------------------------
26-NOV-10 10.23.55.988228000 PM +04:00



SQL> select occupant_desc, space_usage_kbytes from v$sysaux_occupants where occupant_name=’SM/OPTSTAT’;
OCCUPANT_DESC                                                    SPACE_USAGE_KBYTES
---------------------------------------------------------------- ------------------
Server Manageability - Optimizer Statistics History                          192576



SQL> exec dbms_stats.alter_stats_history_retention(40);
PL/SQL procedure successfully completed.





DBMS_STATS.GET_STATS_HISTORY_RETENTION
 RETURN NUMBER;


DBMS_STATS.FLUSH_SCHEMA_MONITORING_INFO (
   ownname VARCHAR2 DEFAULT NULL);



DBMS STATS.ALTER STATS HISTORY RETENTION
This procedure changes the statistics history retention value. Statistics history retention is used by both the automatic purge and PURGE_STATS Procedure.

Syntax

DBMS_STATS.ALTER_STATS_HISTORY_RETENTION (
   retention       IN     NUMBER);
Parameter        Description
retention         
The retention time in days. The statistics history will be retained for at least these many number of days.The valid range is [1,365000]. Also you can use the following values for special purposes:

0 - old statistics are never saved. The automatic purge will delete all statistics history
1 - statistics history is never purged by automatic purge.
NULL - change statistics history retention to default value
Usage Notes
To run this procedure, you must have the SYSDBA or both ANALYZE ANY DICTIONARY and ANALYZE ANY system privilege.



Table Stats History

 -- All stats history for user objects
 -- Sort: most recent at top
 SELECT do.owner,
        do.object_name,
        oth.*
   FROM SYS.WRI$_OPTSTAT_TAB_HISTORY oth join dba_objects do on oth.obj#=do.object_id
  WHERE OWNER NOT IN ('SYS','SYSTEM')
  ORDER BY savtime desc;
-- Get historical Statistics info for 1+ Tables
WITH TableList as (select object_id, object_name
               from dba_objects
              where object_type='TABLE'
                and object_name IN ('TABLE_1','TABLE_2',...))
select OBJECT_NAME as TABLE_NAME,
       TO_CHAR(ANALYZETIME,'MM/DD/YYYY HH24:MI') as AnalyzeTime_Formatted,
       OTH.*
  From SYS.WRI$_OPTSTAT_TAB_HISTORY OTH, TableList
 where OTH.OBJ# = TableList.OBJECT_ID
 ORDER BY table_name, analyzetime;






è default database statistics collection using  statistics_level:

Ø  statistics_level :
Ÿ default :
No advisory stats data is collect . we can set external parameter like time_statistics , db_cache_advice , etc.
Ÿ typical  :
This is default value . data is collectd for segment-level statistics , timesd statistics and all advisories
The value of all other statistics collection parameter are overridden .
Ÿ all :
All typical level statistics data , timed operation system statistics , row source execution stats are collected . value for other stats collection parameter are overridden ,

Select statistics_name, activation_level from v$statistics_level order by 2 ;

conn / as sysdba
pause
col activation_level for a10
col statistics_name for a38
col statistics_view_name for a30
desc v$statistics_level
pause
select activation_level, statistics_name, statistics_view_name,
session_status,session_settable,system_status
from v$statistics_level
order by 1;

EXEC DBMS_SCHEDULER.ENABLE('GATHER_STATS_JOB');
exec dbms_stats.set_param('METHOD_OPT','FOR ALL INDEXED COLUMNS SIZE 254');
SELECT dbms_stats.get_param('METHOD_OPT') FROM dual;
exit
END




è table stats :


EXEC DBMS_STATS.GATHER_TABLE_STATS( OWNNAME=>'&OWNER',TABNAME=>'&TAB', partname => '&PART',DEGREE=>24, ESTIMATE_PERCENT=>DBMS_STATS.AUTO_SAMPLE_SIZE,METHOD_OPT=>'FOR ALL COLUMNS SIZE REPEAT',NO_INVALIDATE=>FALSE);


execute dbms_stats.gather_table_stats(ownname => 'RELODS', tabname =>'WEBTOKEN', estimate_percent =>DBMS_STATS.AUTO_SAMPLE_SIZE, method_opt => 'FOR ALL COLUMNS SIZE AUTO')

dbms_stats.gather_table_stats(ownname=>'VISION',tabname=>'PATIENT_PAYMENT',partname=>null,estimate_percent=>DBMS_STATS.AUTO_SAMPLE_SIZE,block_sample=>false,method_opt=>'for all columns size 1',
degree=>1,granularity=>'DEFAULT',cascade=>true,stattab=>null,statid=>null,statown=>null,no_invalidate=>false);

set pagesize 0
set heading off
set feedback off
spool c:\temp\alyz.sql
select
'ANALYZE TABLE '||OWNER||'.'||TABLE_NAME||' ESTIMATE STATISTICS;'
from dba_tables
--where owner not in ('SYS', 'SYSTEM')
where owner = 'BMR'
order by owner;
spool off
set feedback on
set heading on
@@ C:\temp\alyz
@@ d:\boban\scr\validate.sql

set pages 140
set lines 180
select 'exec dbms_stats.gather_table_stats('''||TABLE_OWNER||''','''||TABLE_NAME||''',method_opt=>''for columns status size 75'',cascade=>true)'
from dba_ind_columns where column_name='STATUS' and column_position=1
/

select 'exec dbms_stats.gather_table_stats(ownname=>'||chr(39)||OWNER||chr(39)||',tabname=>'||chr(39)||table_name||chr(39)||',estimate_percent=>'||100||',degree=>'||4||',cascade=>TRUE );'
from dba_tables where owner=UPPER ('AMS');

select 'analyze table ''||owner||''.''|table_name||'' compute statistics;' from dba_tables where owner='HR';

analyze table abcd estimate statistics sample 5 percentage for all indexed columns ;
analyze index  abcd estimate statistics sample 5 percentage fro all indexed columns ;

analyze table emp compute statistics for all indexed columns ;

analyze table abcd delete statistics ;




è 11g set stats preference:

exec dbms_stats.set_table_prefs('SH','SALES','STALE_PERCENT','13');
SELECT dbms_stats.get_prefs('STALE_PERCENT','HAFEEZ','SECURE')FROM dual;
exec dbms_stats.set_table_prefs('SH','CUSTOMERS','PUBLISH','false');

select * from DBA_TAB_STAT_PREFS ;


PREFERENCES ::
ESTIMATE_PERCENT
NO_INVALIDATE
METHOD_OPT
GRANULARITY
INCREMENTAL
PUBLISH
STALE_PERCENT


scope :
DATABASE LEVEL
SCHEMA LEVEL
TABLE LEVEL
STATEMENT LEVEL
GLOBAL LEVEL




Below can be used to set global preference 

SQL> column sname format a32
SQL> column spare4 format a32
SQL> select sname,spare4 from OPTSTAT_HIST_CONTROL$;

SNAME                 SPARE4
-------------------------------- --------------------------------
SKIP_TIME
STATS_RETENTION
TRACE                 0
DEBUG                 0
SYS_FLAGS             1
APPROXIMATE_NDV          TRUE
CASCADE              DBMS_STATS.AUTO_CASCADE
ESTIMATE_PERCENT         DBMS_STATS.AUTO_SAMPLE_SIZE
DEGREE                 2
METHOD_OPT             FOR TABLE FOR ALL INDEXED COLUMN
                 S SIZE 254

NO_INVALIDATE             DBMS_STATS.AUTO_INVALIDATE
GRANULARITY             AUTO
PUBLISH              TRUE
STALE_PERCENT             10
INCREMENTAL             FALSE
INCREMENTAL_INTERNAL_CONTROL     TRUE
AUTOSTATS_TARGET         AUTO
CONCURRENT             TRUE

18 rows selected


select dbms_stats.get_prefs('method_opt') a from dual;
exec dbms_stats.set_global_prefs('method_opt','FOR ALL COLUMNS SIZE 1')
select dbms_stats.get_prefs('method_opt') a from dual;







è 11g pending stats :



SQL> SELECT DBMS_STATS.GET_PREFS('PUBLISH') PUBLISH FROM DUAL;
SQL> show parameter pending


SQL> exec DBMS_STATS.GATHER_TABLE_STATS('SAPSR3','MYTEST');

SQL> select TABLE_NAME, BLOCKS FROM DBA_TAB_STATISTICS where table_name = 'MYTEST';
SQL> select INDEX_NAME, LEAF_BLOCKS from DBA_IND_STATISTICS where table_name = 'MYTEST';
SQL> select TABLE_NAME, BLOCKS FROM DBA_TAB_PENDING_STATS where table_name = 'MYTEST';
SQL> select INDEX_NAME, LEAF_BLOCKS from DBA_IND_PENDING_STATS where table_name = 'MYTEST';


SQL> exec dbms_stats.set_table_prefs('SAPSR3','MYTEST','PUBLISH', 'false');

SQL> select dbms_stats.get_prefs('PUBLISH', 'SH', 'SALES' ) FROM DUAL;


SQL> exec DBMS_STATS.SET_TABLE_STATS('SAPSR3','MYTEST',numblks=>'5000');
SQL> exec DBMS_STATS.SET_INDEX_STATS('SAPSR3','MYTEST_I',numlblks=> '5000');
EXEC DBMS_STATS.GATHER_TABLE_STATS('SH','SALES',METHOD_OPT => 'FOR ALL COLUMNS SIZE AUTO');



SQL> select TABLE_NAME, BLOCKS FROM DBA_TAB_STATISTICS where table_name = 'MYTEST';
SQL> select INDEX_NAME, LEAF_BLOCKS from DBA_IND_STATISTICS where table_name = 'MYTEST';
SQL> select TABLE_NAME, BLOCKS FROM DBA_TAB_PENDING_STATS where table_name = 'MYTEST';
SQL> select INDEX_NAME, LEAF_BLOCKS from DBA_IND_PENDING_STATS where table_name = 'MYTEST';


alter session set optimizer_use_pending_statistics = true;

Execute/ test / check plan   your workload from the same session.


exec dbms_stats.publish_pending_stats('SH','CUSTOMERS');
SELECT LAST_ANALYZED FROM DBA_TABLES WHERE TABLE_NAME='SALES';
SELECT COUNT(*) FROM DBA_TAB_PENDING_STATS;
alter session set optimizer_use_pending_statistics=FALSE;


SQL> exec dbms_stats.delete_pending_stats('ARUP','RES');


SQL> select TABLE_NAME, BLOCKS FROM DBA_TAB_STATISTICS where table_name = 'MYTEST';
SQL> select INDEX_NAME, LEAF_BLOCKS from DBA_IND_STATISTICS where table_name = 'MYTEST';
SQL> select TABLE_NAME, BLOCKS FROM DBA_TAB_PENDING_STATS where table_name = 'MYTEST';
SQL>  select INDEX_NAME, LEAF_BLOCKS from DBA_IND_PENDING_STATS where table_name = 'MYTEST';


Note: To analyze the differences between the pending statistics and the current ones, you could
export the pending statistics to your own statistics table and then use the new
DBMS_STAT.DIFF_TABLE_STATS function.






è compare stats :


1)
dbms_stat.diff_table_stats_in_stattab:  Here is an example of the diff_table_stats procedure for statistics that have been saved into a table:

set longchunksize 99999;
select * from table(
   dbms_stat.diff_table_stats_in_stattab( 'SCOTT', 'EMP', 'STAT_TAB_OLD', 'STAT_TAB_NEW'));



2)
dbms_stat.diff_table_stats_in_history:  Here is an example of the diff_table_stats procedure for statistics that have been collected at a specific data in history:

set longchunksize 99999;
select *
from table(
   dbms_stats.diff_table_stats_in_history(
      ownname => 'SCOTT',
      tabname => 'EMP',
      time1 => systimestamp,
      time2 => to_timestamp(to_date('2012-04-12','yyyy-mm-dd')),
      pctthreshold => 10));


3)
dbms_stat.diff_table_stats_in_pending:   Here is an example of the diff_table_stats procedure for statistics that are current (or older with the time_stamp argument), compared with statistics that are in a pending state:

set longchunksize 99999;

select *
from table(
   dbms_stats.diff_table_stats_in_pending(
      ownname => 'SCOTT',
      tabname => 'EMP',
      time_stamp => NULL,
      pctthreshold => 10));

Note that the pctthreshold parameter is used to filter-out report data, showing only data that exceeds the threshold value in change, as a percentage.  For example, pctthreshold=10 will only report on statistics that are greater then 10% changed from the baseline statistics.





Compare the current object statistics with the previous ones.
SQL> SELECT *
  2  FROM table(dbms_stats.diff_table_stats_in_history(
  3               ownname => user,
  4               tabname => 't',
  5               time1 => localtimestamp,
  6               time2 => localtimestamp-to_dsinterval('0 00:00:15'),
  7               pctthreshold => 0
  8             ));

STATISTICS DIFFERENCE REPORT FOR:
.................................

TABLE         : T
OWNER         : OPS$CHA
SOURCE A      : Statistics as of 21-SEP-08 06.39.37.597595 PM +02:00
SOURCE B      : Statistics as of 21-SEP-08 06.39.22.597595 PM +02:00
PCTTHRESHOLD  : 0
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

TABLE / (SUB)PARTITION STATISTICS DIFFERENCE:
.............................................

OBJECTNAME                  TYP SRC ROWS       BLOCKS     ROWLEN     SAMPSIZE
...............................................................................

T                           T   A   1500000    40091      183        500000
                                B   500000     8614       116        500000
T_FEB_2008                  P   A   1000000    31477      216        1000000
                                B   NO_STATS
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

COLUMN STATISTICS DIFFERENCE:
.............................

COLUMN_NAME     SRC NDV     DENSITY    HIST NULLS   LEN  MIN   MAX   SAMPSIZ
...............................................................................

D               A   31      .032258064 NO   0       8    786C0 786C0 1500000
                B   31      .032258064 NO   0       8    786C0 786C0 500000
ID              A   500000  .000002    NO   0       6    C4020 C404  1500000
                B   500000  .000002    NO   0       6    C4020 C4023 500000
N               A   11      .000001998 YES  749500  2    80    C10C  750500
                B   11      .000001998 YES  249750  2    80    C10B  250250
PAD             A   1       1          NO   0       168  2A2A2 2A2A2 1500000
                B   1       1          NO   0       101  2A2A2 2A2A2 500000

                              PARTITION: T_FEB_2008
                              .....................

D               A   29      .034482758 NO   0       8    786C0 786C0 1000000
                B   NO_STATS
ID              A   1000000 .000001    NO   0       6    C4030 C404  1000000
                B   NO_STATS
N               A   12      .000000999 YES  499750  2    80    C10C  500250
                B   NO_STATS
PAD             A   1       1          NO   0       201  2A2A2 2A2A2 1000000
                B   NO_STATS
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

INDEX / (SUB)PARTITION STATISTICS DIFFERENCE:
.............................................

OBJECTNAME      TYP SRC ROWS    LEAFBLK DISTKEY LF/KY DB/KY CLF     LVL SAMPSIZ
...............................................................................

                                    INDEX: I
                                    ........

I               I   A   750500  1221    12      101   3332  39991   2   750500
                    B   250250  407     11      37    778   8565    1   250250
T_FEB_2008      P   A   500250  814     12      67    2618  31426   2   500250
                    B   NO_STATS





è stale statistics :


-->  statistics are consider to be stale when there is a 10% change
--> Following a data-modification, there may be a few minutes delay while Oracle propagates the information to this view. Use     the DBMS_STATS. FLUSH_DATABASE_MONITORING_INFO procedure to immediately reflect the outstanding monitored information kept     in the memory.

SQL> EXEC DBMS_STATS.FLUSH_DATABASE_MONITORING_INFO();

exec dbms_stats.flush_database_monitoring_info;
select * from sys.dba_tab_modifications where table_name = 'T_STAT';
select stale_stats from sys.dba_tab_statistics where table_name = 'T_STAT';


SELECT t.table_name,
           last_analyzed,
           SUM(inserts),
           SUM(updates),
           SUM(deletes)
      FROM user_tables t,
           user_tab_modifications m
     WHERE t.table_name = m.table_name
       AND timestamp > last_analyzed
       AND t.table_name = <your table name>
  GROUP BY t.table_name,
           last_analyzed
  /






è lock table and partition  stats :

exec dbms_stats.lock_partition_stats('OKC997','LOCATION_DETAILS','M201202');


lock/unlock stats :
begin
                DBMS_STATS.unlock_table_stats('INFO10','JRL_AG_DIAGRAMME');
                DBMS_STATS.GATHER_TABLE_STATS(
                OwnName        => 'INFO10'
,TabName        => 'JRL_AG_DIAGRAMME'
,Estimate_Percent  => SYS.DBMS_STATS.AUTO_SAMPLE_SIZE
,Degree            => NULL
,Cascade           => TRUE
,No_Invalidate     => FALSE);
                DBMS_STATS.lock_table_stats('INFO10','JRL_AG_DIAGRAMME');
END;
/

exec dbms_stats.lock_partition_stats('OKC997','LOCATION_DETAILS','M201202');




è copy partition stats

exec dbms_stats.copy_table_stats(ownname => 'OKC997',tabname =>'ISA_DETAILS',srcpartname =>'M201201',dstpartname =>'M201202');

exec dbms_stats.copy_table_stats(ownname => 'OKC997',tabname =>'ISA_DETAILS',srcpartname =>'M201201',dstpartname =>'M201202');




è set table stats :

exec dbms_stats.set_table_stats( user, 'MAP', numrows => 100000, numblks => 10000 );

SQL> exec DBMS_STATS.SET_TABLE_STATS('SAPSR3','MYTEST',numblks=>'5000');
SQL> exec DBMS_STATS.SET_INDEX_STATS('SAPSR3','MYTEST_I',numlblks=> '5000');


SQL> exec dbms_stats.set_table_stats('ADAM','SALES',numrows=>100,numblks=>1)
SQL> select num_rows,blocks from user_tables;





è gather partition stats :

exec DBMS_STATS.GATHER_TABLE_STATS('CLM','CLM_FIXED_FORMAT_INFO',estimate_percent=>10,cascade=>TRUE,degree=>12, granularity=>'PARTITION',method_opt=>'for all columns size 1',  partname=>'P201201');

exec DBMS_STATS.GATHER_TABLE_STATS('CLM','CLM_HOME_HEALTH',estimate_percent=>10,cascade=>TRUE,degree=>12, granularity=>'PARTITION',method_opt=>'for all columns size 1',  partname=>'P201201');

exec DBMS_STATS.GATHER_TABLE_STATS('CLM','AFILE',estimate_percent=>10,cascade=>TRUE,degree=>12, granularity=>'PARTITION',method_opt=>'for all columns size 1',  partname=>'P201112' );             





è gather index stats :

Execute DBMS_STATS.GATHER_INDEX_STATS (‘HR’,’LOC_COUNTRY_IX’);

Alter index hr.loc_country_ix rebuild compute statistics;

execute dbms_stats.gather_index_stats(ownname=>user, indname=>'i', granularity=>'global')


dbms_output.put_line('Gathering Index Statistics for '||ind.object_name||'.....');
dbms_stats.gather_index_stats('SHEMA1', ind.object_name , estimate_percent=>100);
dbms_output.put_line('Gathering Index Statistics for '||ind.object_name||' is Complete!');
end loop;
end;
/




è table stats backup

select 'exec dbms_stats.create_stat_table(ownname => ' || '''PAYVAR''' || ', STATTAB  =>' || '''PAYVAR_'  || to_char(sysdate,'dd_MON_yyyy') ||  ''');' from dual;

EXEC DBMS_STATS.EXPORT_TABLE_STATS(ownname =>'RDB',tabname => 'D_PAYER', stattab => 'STAT_TAB',cascade => true,statown=>'RDB',statid=>'Feb18');
Statid : use the current date for easy identification


select 'exec dbms_stats.export_schema_stats(ownname => ' || '''PAYVAR''' || ', STATTAB =>' || '''PAYVAR_'  || to_char(sysdate,'dd_MON_yyyy') || ''');' from dual;

EXEC DBMS_STATS.IMPORT_TABLE_STATS(ownname =>'RDB',tabname => 'D_PAYER', stattab => 'STAT_TAB',cascade => true,statid=>'Feb18', no_invalidate => false);

exec dbms_stats.import_schema_stats ( ownname => 'hr' , stattab => 'hr' , statid => 'name_21nov09' , statown => 'own' );




è dictionary and fixed table stats :


EXEC DBMS_STATS.gather_dictionary_stats;


Identify if data dictionary statistics have been collected:
If there are performance problems due to large data dictionary access, it is advisable to check if data dictionary statistics have been collected. It can be easily displayed by querying data dictionary tables belonging to sys.

select table_name, last_analyzed
from dba_tables where owner='SYS'
and table_name='FET$';

This other query is useful to know how many sys tables have statistics:
select distinct trunc(last_analyzed), count(*)
from dba_tables
where owner='SYS'
group by trunc(last_analyzed)
order by 1 ;



Fixed object stats :

EXEC DBMS_STATS.gather_fixed_objects_stats;
execute  dbms_stats.gather_fixed_objects_stats();

It is important to make certain that the fixed object statistics are collected.  If the statistics are not collected, there is a strong chance that some queries of Oracle Database views will be slow, or even result in the crashing of the user’s session




to check if fixed object stats is not gathered :
-------------------------------------------------
select * from tab_stats$;






è database stats :

Exec dbms_stats.gather_database_stats(dbms_stats.auto_sample_size,method_opt => 'FOR ALL INDEXED COLUMNS SIZE AUTO',cascade=>TRUE);


sqlplus -s /nolog << EOF
   connect system/$PWD@$TNS
   set feedback on
   set timing on
   set echo on

   BEGIN
     DBMS_STATS.GATHER_DATABASE_STATS(
       ESTIMATE_PERCENT=>DBMS_STATS.AUTO_SAMPLE_SIZE,
       OPTIONS=>'GATHER STALE',
       METHOD_OPT=>'FOR ALL COLUMNS SIZE REPEAT',
       DEGREE=>4
    );
   END;
/
   BEGIN
     DBMS_STATS.GATHER_DATABASE_STATS(
       ESTIMATE_PERCENT=>DBMS_STATS.AUTO_SAMPLE_SIZE,
       OPTIONS=>'GATHER EMPTY',
       METHOD_OPT=>'FOR ALL COLUMNS SIZE REPEAT',
       DEGREE=>4
    );
   END;
/



To gather the dictionary stats, execute one of following:-
SQL> EXEC DBMS_STATS.GATHER_SCHEMA_STATS ('SYS');
SQL> exec DBMS_STATS.GATHER_DATABASE_STATS (gather_sys=>TRUE);
SQL> EXEC DBMS_STATS.GATHER_DICTIONARY_STATS





è system stats ( workload/ non-worklod ) :

System stats value :
exec   dbms_stats.gather_system_stats('START')
exec   dbms_stats.gather_system_stats('STOP')


select sname,pname,  pval1 , pval2 from   sys.aux_stats$  where  sname like 'SYSSTATS%' ;


select (select pval1
         from   sys.aux_stats$
          where  sname = 'SYSSTATS_MAIN' and
                 pname = 'MBRC') *
       (select to_number(value) block_size
         from   v$parameter
         where  name = 'db_block_size') /
      (select pval1
        from   sys.aux_stats$
         where  sname = 'SYSSTATS_MAIN' and
                pname = 'MREADTIM')
         /1024 sys_stat_mread_speed
from dual
  /



  
#########################  work lod stats ###################

Gathering Workload Statistics   :::::::::::::::::::

To gather workload statistics, either:

Run the dbms_stats.gather_system_stats('start') procedure at the beginning of the workload window, then the dbms_stats.gather_system_stats('stop') procedure at the end of the workload window.
Run dbms_stats.gather_system_stats('interval', interval=>N) where N is the number of minutes when statistics gathering will be stopped automatically.
To delete system statistics, run dbms_stats.delete_system_stats(). Workload statistics will be deleted and reset to the default noworkload statistics.



Workload statistics, this type of statistic takes into account the actual load on the system at a certain time interval. To collect this type of statistics gathering  mode INTERVAL  or  START and STOP is used by procedure DBMS_STATS.GATHER_SYSTEM_STATS.

SQL> begin
  2  sys.dbms_stats.gather_system_stats(
  3                                     gathering_mode=>'INTERVAL',
  4                                     interval=>720
  5                                    );
  6  end;
  7  /


SQL> exec sys.dbms_stats.gather_system_stats('START');
-- After some time
SQL> exec sys.dbms_stats.gather_system_stats('STOP');


mbrc
mreadtim
sreadtim
cpuspeed
maxthr
slavethr



###################  noworkload stats #################



Optimizer system statistics parameters computed by Noworkload collection are

cpuspeedNW
ioseektim
iotfrspeed


SQL> exec sys.dbms_stats.gather_system_stats();




è upgrade stats table :

exec dbms_stats.upgrade_stat_table('SYS','STAT_TABLE');

GIS_ADMIN
GIS_CC
SYS



SQL> BEGIN dbms_stats.export_schema_stats('GIS_ADMIN','STAT_TABLE'); EN
D;

*
ERROR at line 1:
ORA-20002: Version of statistics table GIS_ADMIN.STAT_TABLE is too old.  Please
try upgrading it with dbms_stats.upgrade_stat_table
ORA-06512: at "SYS.DBMS_STATS", line 11271
ORA-06512: at "SYS.DBMS_STATS", line 11288
ORA-06512: at "SYS.DBMS_STATS", line 12222
ORA-06512: at line 1


Elapsed: 00:00:06.09
06:00:08 SQL> BEGIN dbms_stats.export_schema_stats('GIS_CC','STAT_TABLE'); END;

*
ERROR at line 1:
ORA-20002: Version of statistics table GIS_CC.STAT_TABLE is too old.  Please
try upgrading it with dbms_stats.upgrade_stat_table
ORA-06512: at "SYS.DBMS_STATS", line 11271
ORA-06512: at "SYS.DBMS_STATS", line 11288
ORA-06512: at "SYS.DBMS_STATS", line 12222
ORA-06512: at line 1


Elapsed: 00:00:02.20
06:00:10 SQL> BEGIN dbms_stats.export_dictionary_stats('STAT_TABLE'); END;

*
ERROR at line 1:
ORA-20002: Version of statistics table SYS.STAT_TABLE is too old.  Please try
upgrading it with dbms_stats.upgrade_stat_table
ORA-06512: at "SYS.DBMS_STATS", line 11271
ORA-06512: at "SYS.DBMS_STATS", line 11288
ORA-06512: at "SYS.DBMS_STATS", line 26704
ORA-06512: at line 1


Elapsed: 00:00:01.93

  

è schema stats :

EXECUTE SYS.DBMS_STATS.GATHER_SCHEMA_STATS (OWNNAME=>'COND_DBA',ESTIMATE_PERCENT=>100, DEGREE=>4, CASCADE=>TRUE);

exec dbms_stats.gather_schema_stats
(ownname =>'DATA_FR',estimate_percent =>100,method_opt=>'for all indexed columns size 1',CASCADE=>TRUE,degree=>4);

EXEC DBMS_UTILITY.analyze_schema('SCOTT','ESTIMATE', estimate_percent => 15);
exec dbms_utility.analyze_schema('PIDB','COMPUTE');


exec dbms_stats.gather_schema_stats('MUREXSBY',estimate_percent=>100,method_opt=>'FOR ALL COLUMNS size 1',options=>'GATHER');
EXEC DBMS_STATS.gather_schema_stats('CP',DBMS_STATS.AUTO_SAMPLE_SIZE,degree=>8 ,cascade=>TRUE,granularity =>'ALL');
EXEC DBMS_STATS.delete_schema_stats('SCOTT');

set lines 160
set pages 0
set echo off
set feedback off
spool gather_schema_stat_tn.sql
select 'EXEC DBMS_STATS.GATHER_SCHEMA_STATS('''||USERNAME||''',ESTIMATE_PERCENT=>10,METHOD_OPT=>''FOR ALL COLUMNS SIZE REPEAT'',CASCADE=>TRUE)'
from dba_users where username not in ('SYS','SYSTEM','SNAPXIUS','OUTLN','DBSNMP','WMSYS','ORDSYS','ORDPLUGINS','MDSYS','CTXSYS','XDB','ANONYMOUS','WKSYS','WKPROXY','ODM','ODM_MTR','OLAPSYS')
/
spool off
set echo on
set pages 100
set timing on
set time on
set feedback on
@gather_schema_stat_tn.sql
--exit


exec dbms_utility.analyze_schema(schema=>'&schema_owner',method=>'&method', estimate_rows=>&est_rows, estimate_percent=>&est_pct);

  


 è Restoring old stats gathered by Oracle from 10g


old version of stats are saved automatically in 10g before  gatering new stats .
so we can restore old stats  using below . use DBA_OPTSTAT_OPERATIONS and  DBA_TAB_STATS_HISTORY  to see stats history .

exec dbms_stats.restore_table_stats({owner} ,{table name}, {timestamp});


alter session set NLS_TIMESTAMP_TZ_FORMAT='yyyy-mm-dd:hh24:mi:ss';
select table_name,stats_update_time from user_tab_stats_history;

begin
dbms_stats.restore_table_stats('ADAM','SALES',
to_timestamp('2010-05-18:09:47:38','yyyy-mm-dd:hh24:mi:ss'));
end;
/

alter system flush shared_pool;

Lets get more into Sql Plans



                                        Lets get more into Sql Plans 





Ÿ Re-analyze the schema statistics (done automatically, starting in 10g) – auto stats
Ÿ  object not anlyze
Ÿ cursor got invalidated
Ÿ  bind sensitive
Ÿ database paramter change Changing an optimizer parameter (optimizer_mode, optimizer_index_cost_adj)
Ÿ shared pool flush ::: new plan generation
Index got invalidated

Enabling dynamic sampling
Enabling parallelism






>>>>>>>>>>>>>>>>>>>>>>>>>>>> session tracing

•If you only know the package, enable trace 10046 before execute the package
1.Example:
2.SQLPLUS>alter session set events „10046 trace name context forever, level 12;
3.SQLPLUS>exec PKG_BL_RCR_REV_FACT.SP_BL_RCR_REV
4.After done, go the “user_dump_dest” and get the trace file.
5.OS> cd/u01/app/oracle/diag/rdbms/dwdb/DWDB1/trace
6.OS> tkproftracefile_nameoutputfile_nameexplain=hwdw/password waits=y



>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Using hints


/*+ gather_plan_statistics */

SELECT /*+ gather_plan_statistics */ COUNT(*)   FROM   x;

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>  cursor


Set long 20000000
Set pagesize0
Set linesize200
set linesize 121

select sql_id from v$session where sid=112;
SELECT * FROM TABLE(dbms_xplan.display_cursor('sql_id',1));
select * from table(dbms_xplan.display_cursor);  -- own session where query was run
SELECT * FROM TABLE(dbms_xplan.display_cursor('gpv3kb4n2f2q1'));
SELECT * FROM TABLE(dbms_xplan.display_cursor(NULL, NULL, 'last allstats'));


SQL> set linesize 150
SQL> set pagesize 2000
SQL> select * from TABLE(dbms_xplan.display_cursor('&SQL_ID', &CHILD_number,'ALL'));

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>  v$sql_plan


col PLAN FOR a150
 SELECT
     RPAD('(' || p.plan_line_ID || ' ' || NVL(p.plan_parent_id,'0') || ')',8) || '|' ||
     RPAD(LPAD (' ', 2*p.plan_DEPTH) || p.plan_operation || ' ' || p.plan_options,60,'.') ||
     NVL2(p.plan_object_owner||p.plan_object_name, '(' || p.plan_object_owner|| '.' || p.plan_object_name || ') ', '') ||
     NVL2(p.plan_COST,'Cost:' || p.plan_COST,'') || ' ' ||
     NVL2(p.plan_bytes||p.plan_CARDINALITY,'(' || p.plan_bytes || ' bytes, ' || p.plan_CARDINALITY || ' rows)','') || ' ' ||
     NVL2(p.plan_partition_start || p.plan_partition_stop,' PStart:' ||  p.plan_partition_start || ' PStop:' || p.plan_partition_stop,'') ||
     NVL2(p.plan_time, p.plan_time || '(s)','') AS PLAN
     FROM v$sql_plan_monitor p
     WHERE sql_id = '81c1xvnmsyr64'
     AND sql_exec_id = 16777270
     AND sql_exec_start=TO_DATE('11-jan-2012 05:59:27','dd-mon-yyyy hh24:mi:ss')
     ORDER BY p.plan_line_id, p.plan_parent_id;



>>>>>>>>>>>>>>>>>>>>>> explain plan for



EXPLAIN PLAN   
1) Explain plan displays execution plan(sequence of operation) chosen by oracle optimizer
To run statement of select , insert , update ,delete.


2)
  CREATING EXPLAIN TABLE  - UTLXPLAN.SQL 
  HIDE PARALLEL QUERY INFORMATION – UTLXPLS.SQL
     (TO QUERY THE PLAN_TABLE)
  SHOW PARALLEL QUERY INFORMATION – UTLXPLP.SQL
      (TO QUERY THE PLAN_TABLE)

3)
 EXPLAIN Plan ****INTO PLAN_TABLE  SET STATEMENT_ID = ‘    ‘ ******** FOR     SELECT ……………


NOTE : THE EXECUTION PLAN FOR EVERY SQL STATEMEN IS GENERATED AND STORED BY DEFAULT IN   V$SQL_PLAN. BUT THE EXECUTION   PLAN IS STORED ONLY TILL THE STATEMENT IS IN LIBRARY CACHE.


select * from table(dbms_xplan.Display);
select plan_table_output from table(dbms_xplan.display()) ;

select plan_table_output
   from TABLE( dbms_xplan.display
               ( 'PLAN_TABLE',
                (select rawtohex(address)||'_'||child_number x
                 from v$sql
 where sql_text='select * from t t1 where object_id > 32000' ),
                 'serial' ) );


>>>>>>>>>>>>>>>>>>>>>>>>>>>> awr ( 11g.. will list all sql plans . historical )

Ÿ check from dba_hist_sqltext if sqlid  has been AWR. If its not loaded create a manual snapshot


Ø  exec dbms_workload_repository.modify_snapshot_settings(topnsql=>’MAXIMUM’);
Ø  exec dbms_workload_repository.create_snapshot();
Ø  exec dbms_workload_repository.modify_snapshot_settings(topnsql=>’DEFAULT’);
Ø  select sql_id,sql_text from dba_hist_sqltext where sql_id=’232xssd22232’;


 select * from TABLE(DBMS_XPLAN.DISPLAY_AWR('&sqlid'));
 select * from TABLE(DBMS_XPLAN.DISPLAY_AWR('&sqlid',’plan_hash_value’));


>>>>>>>>>>>>>>>>>>>>>>>>>>>> set auto trace


SQL*PLUS AUT0TRACE / Explain plan
1)         RUN SQLPLUS/ ADMIN / PLUSTRCE.SQL  ? creates plus trace role .
2)         GRANT PLUSTRACE TO SCOTT
3)         SET AUTOTRACE         ON / OFF / TRACEONLY
                              ON EXPLAIN / ON STATISTICS

Set autotrace traceonly explain statistics

Note    :  TO SET AUTO TRACE WE MUST HAVE PLAN_TABLE AND PLUSTRACE ROLE.







1)select SQL_TEXT, hash_value from V$sqltext where ADDRESS='0700000B0BDC7B60' order by piece;


pause this query shows columns useful in Parallel Query or distributed query
col id for 99
col Par for 99
col pos for 99
col "Table Queue" for a10
col Operation format a24
col Options   format a12
col Object    format a12
col "Row Source" for a10
col "Inter Step SQL" for a16
col "Parallel Oper Type" for a20
col "Row Dist" for a11
set long 500
pause Position has total cost in row when ID=0 if CBO is used
select id "Id",
       parent_id "Par",
       position "Pos",
       lpad(' ', 2*LEVEL) || OPERATION ||
        decode(ID, 0, ' Cost='||POSITION) "Operation",
       OPTIONS "Options",
       OBJECT_NAME "Object",
       OBJECT_NODE "Table Queue",
       other_tag "Parallel Oper Type",
       distribution "Row Dist",
       other "Inter Step SQL"
from v$sql_plan
       where address = '&addr';




2)select '--------------------------------------------------------------------------------' from dual
union all
select '| Operation                      | PHV/Object Name     |  Rows | Bytes|   Cost |'  as "Optimizer Plan:"
from dual
union all
select '--------------------------------------------------------------------------------' from dual
union all
select /*+ RULE */ *
  from (select
       rpad('|'||substr(lpad(' ',1*(depth-1))||operation||
            decode(options, null,'',' '||options), 1, 32), 33, ' ')||'|'||
       rpad(decode(id, 0, '----- '||to_char(hash_value)||' -----'
                     , substr(decode(substr(object_name, 1, 7), 'SYS_LE_', null, object_name)
                       ||' ',1, 20)), 21, ' ')||'|'||
       lpad(decode(cardinality,null,'  ',
                decode(sign(cardinality-1000), -1, cardinality||' ',
                decode(sign(cardinality-1000000), -1, trunc(cardinality/1000)||'K',
                decode(sign(cardinality-1000000000), -1, trunc(cardinality/1000000)||'M',
                       trunc(cardinality/1000000000)||'G')))), 7, ' ') || '|' ||
       lpad(decode(bytes,null,' ',
                decode(sign(bytes-1024), -1, bytes||' ',
                decode(sign(bytes-1048576), -1, trunc(bytes/1024)||'K',
                decode(sign(bytes-1073741824), -1, trunc(bytes/1048576)||'M',
                       trunc(bytes/1073741824)||'G')))), 6, ' ') || '|' ||
       lpad(decode(cost,null,' ',
                decode(sign(cost-10000000), -1, cost||' ',
                decode(sign(cost-1000000000), -1, trunc(cost/1000000)||'M',
                       trunc(cost/1000000000)||'G'))), 8, ' ') || '|' as "Explain plan"
          from v$sql_plan
         where hash_value= nvl('&hash_value','0')
         order by id
)
union all
select '--------------------------------------------------------------------------------' from dual;
set verify on
set heading on
undefine hash_value
set heading on;




è Historical Sql plan  :

SELECT STAT.SQL_ID, SQL_TEXT, PLAN_HASH_VALUE, PARSING_SCHEMA_NAME, ELAPSED_TIME_DELTA, STAT.SNAP_ID, SS.END_INTERVAL_TIME FROM DBA_HIST_SQLSTAT STAT, DBA_HIST_SQLTEXT TXT,
DBA_HIST_SNAPSHOT SS WHERE STAT.SQL_ID = TXT.SQL_ID AND STAT.DBID = TXT.DBID AND SS.DBID = STAT.DBID
AND SS.INSTANCE_NUMBER = STAT.INSTANCE_NUMBER AND STAT.SNAP_ID = SS.SNAP_ID AND SS.BEGIN_INTERVAL_TIME >= sysdate-60
AND UPPER(STAT.SQL_ID) = '&SQL_ID'
AND PLAN_HASH_VALUE = &PLAN_HASH
ORDER BY ELAPSED_TIME_DELTA DESC;


select s.begin_interval_time, s.end_interval_time , q.snap_id, q.dbid, q.sql_id,
q.plan_hash_value, q.optimizer_cost, q.optimizer_mode
from dba_hist_sqlstat q, dba_hist_snapshot s
where  q.sql_id = 'bpkgrf2awmhfc'
and q.snap_id = s.snap_id
and s.begin_interval_time between sysdate-8 and sysdate
order by s.snap_id desc;






select sql_id, sql_text from dba_hist_sqltext where upper(sql_text) like '%CUSTOMERS%';


SELECT tf.* FROM DBA_HIST_SQLTEXT ht, TABLE (DBMS_XPLAN.DISPLAY_AWR(ht.sql_id,NULL, NULL,
'ALL' )) tf WHERE ht.sql_id='&amp;1'





1) select DBID, SQL_ID, SQL_TEXT from dba_hist_sqltext where sql_text like ‘%your query%’;

-->> or see all available sql

variable snap_id_1 number;
variable snap_id_2 number;
exec :snap_id_1:=789;
exec :snap_id_2:=786;
select   a.sql_id, a.plan_hash_value snap_id_1_plan, b.plan_hash_value snap_id_2_plan
    from dba_hist_sqlstat a, dba_hist_sqlstat b
   where (a.snap_id = :snap_id_1 and b.snap_id = :snap_id_2)
     and (a.sql_id = b.sql_id)
     and (a.plan_hash_value != b.plan_hash_value)
order by a.sql_id;


select * from V$SQL_SHARED_CURSOR where sql_id='32927hsaftay0';


--> see sql whoes plan has change

select distinct sql_id, plan_hash_value, f snapshot,
                (select begin_interval_time
                   from dba_hist_snapshot
                  where snap_id = f) snapdate
           from (select sql_id, plan_hash_value,
                        first_value (snap_id) over (partition by sql_id, plan_hash_value
order by snap_id) f
                   from (select   sql_id, plan_hash_value, snap_id,
                                  count (distinct plan_hash_value) over (partition by
sql_id) a
                             from dba_hist_sqlstat
                            where plan_hash_value > 0
                         order by sql_id)
                  where a > 1)
       order by sql_id, f;


Select Sql_Id,Count(*) From
(Select Distinct Sql_Id,Plan_Hash_Value From Dba_Hist_Sqlstat where plan_hash_value <> 0 )
Group By Sql_Id
having count(*) > 1



2)  get plan hash value :

   select   snap_id, plan_hash_value
    from dba_hist_sqlstat
    where snap_id in (789, 786) and sql_id = 'dcxm4kr1urg93'
    order by snap_id desc;

or

select s.begin_interval_time, s.end_interval_time , q.snap_id, q.dbid, q.sql_id,
q.plan_hash_value, q.optimizer_cost, q.optimizer_mode
from dba_hist_sqlstat q, dba_hist_snapshot s
where q.dbid = 1234567890 and q.sql_id = ‘12345xxxxxxxx’
and q.snap_id = s.snap_id
and s.begin_interval_time between sysdate-2 and sysdate
order by s.snap_id desc;


3) compare explain plan of different hash value using below query :

select id, operation, options, object_name, cost
from dba_hist_sql_plan
where dbid = 1234567890 and sql_id = ‘12345xxxxxxxx’
and plan_hash_value = <plan_hash_value1>


select * from table(dbms_xplan.display_awr('32927hsaftay0',714092912));
select * from table(dbms_xplan.display_awr('dcxm4kr1urg93',1582437882));




set lines 155
col execs for 999,999,999
col avg_etime for 999,999.999
col avg_lio for 999,999,999.9
col begin_interval_time for a30
col node for 99999
break on plan_hash_value on startup_time skip 1
select ss.snap_id, ss.instance_number node, begin_interval_time, sql_id, plan_hash_value,
nvl(executions_delta,0) execs,
(elapsed_time_delta/decode(nvl(executions_delta,0),0,1,executions_delta))/1000000
avg_etime,
(buffer_gets_delta/decode(nvl(buffer_gets_delta,0),0,1,executions_delta)) avg_lio
from DBA_HIST_SQLSTAT S, DBA_HIST_SNAPSHOT SS
where sql_id = nvl('&sql_id','b55nnb6ykd2tr')
and ss.snap_id = S.snap_id
and ss.instance_number = S.instance_number
and executions_delta > 0
order by 1, 2, 3
/









SPOOL coe_gen_sql_profile.log;
SET DEF ON TERM OFF ECHO ON FEED OFF VER OFF HEA ON LIN 2000 PAGES 100 LONG 8000000 LONGC
800000 TRIMS ON TI OFF TIMI OFF SERVEROUT ON SIZE 1000000 NUMF "" SQLP SQL>;
SET SERVEROUT ON SIZE UNL;
REM
REM $Header: 215187.1 coe_gen_sql_profile.sql 11.4.3.4 2011/07/20 carlos.sierra $
REM
REM Copyright (c) 2000-2011, Oracle Corporation. All rights reserved.
REM
REM AUTHOR
REM carlos.sierra@oracle.com
REM
REM SCRIPT
REM coe_gen_sql_profile.sql
REM
REM DESCRIPTION
REM Inputs one SQL_ID and outputs a set of scripts generated by
REM coe_xfr_sql_profile.sql. One for each known plan of given
REM SQL_ID.
REM
REM PRE-REQUISITES
REM 1. Oracle Tuning Pack license.
REM
REM PARAMETERS
REM 1. SQL_ID (required)
REM
REM EXECUTION
REM 1. Connect into SQL*Plus as SYSDBA or user with access to
REM data dictionary.
REM 2. Execute script coe_gen_sql_profile.sql passing SQL_ID
REM (parameter can be passed inline or until requested).
REM
REM EXAMPLE
REM # sqlplus system
REM SQL> START coe_gen_sql_profile.sql [SQL_ID];
REM SQL> START coe_gen_sql_profile.sql gnjy0mn4y9pbm;
REM SQL> START coe_gen_sql_profile.sql;
REM
REM NOTES
REM 1. For possible errors see coe_gen_sql_profile.log
REM 2. Be aware that using DBMS_SQLTUNE requires a license for
REM Oracle Tuning Pack.
REM
SET TERM ON ECHO OFF;
PRO
PRO Parameter 1:
PRO SQL_ID (required)
PRO
DEF sql_id0 = '&1';
PRO
SET TERM OFF;
PRO
PRO Plans from GV$SQL
PRO ~~~~~~~~~~~~~~~~~

SELECT plan_hash_value,
ROUND(SUM(elapsed_time)/SUM(executions)/1e6, 3) avg_et_secs,
ROUND(SUM(cpu_time)/SUM(executions)/1e6, 3) avg_cpu_secs,
ROUND(SUM(user_io_wait_time)/SUM(executions)/1e6, 3) avg_io_secs,
ROUND(SUM(buffer_gets)/SUM(executions)) avg_buffer_gets,
ROUND(SUM(disk_reads)/SUM(executions)) avg_disk_reads,
ROUND(SUM(rows_processed)/SUM(executions)) avg_rows,
SUM(executions) executions
FROM gv$sql
WHERE sql_id = TRIM('&&sql_id0.')
AND executions > 0
GROUP BY
plan_hash_value
ORDER BY
2, 3;
PRO
PRO
PRO Plans from DBA_HIST_SQLSTAT
PRO ~~~~~~~~~~~~~~~~~~~~~~~~~~~

SELECT plan_hash_value,
ROUND(SUM(elapsed_time_total)/SUM(executions_total)/1e6, 3) avg_et_secs,
ROUND(SUM(cpu_time_total)/SUM(executions_total)/1e6, 3) avg_cpu_secs,
ROUND(SUM(iowait_total)/SUM(executions_total)/1e6, 3) avg_io_secs,
ROUND(SUM(buffer_gets_total)/SUM(executions_total)) avg_buffer_gets,
ROUND(SUM(disk_reads_total)/SUM(executions_total)) avg_disk_reads,
ROUND(SUM(rows_processed_total)/SUM(executions_total)) avg_rows,
SUM(executions_total) executions
FROM dba_hist_sqlstat
WHERE sql_id = TRIM('&&sql_id0.')
AND executions_total > 0
GROUP BY
plan_hash_value
ORDER BY
2, 3;
PRO
PRO
PRO Plans from GV$SQL and/or DBA_HIST_SQLSTAT
PRO ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

BEGIN
FOR i IN (WITH
p AS (
SELECT plan_hash_value
FROM gv$sql_plan
WHERE sql_id = TRIM('&&sql_id0.')
AND other_xml IS NOT NULL
UNION
SELECT plan_hash_value
FROM dba_hist_sql_plan
WHERE sql_id = TRIM('&&sql_id0.')
AND other_xml IS NOT NULL ),
m AS (
SELECT plan_hash_value,
SUM(elapsed_time)/SUM(executions) avg_et_secs
FROM gv$sql
WHERE sql_id = TRIM('&&sql_id0.')
AND executions > 0
GROUP BY
plan_hash_value ),
a AS (
SELECT plan_hash_value,
SUM(elapsed_time_total)/SUM(executions_total) avg_et_secs
FROM dba_hist_sqlstat
WHERE sql_id = TRIM('&&sql_id0.')
AND executions_total > 0
GROUP BY
plan_hash_value )
SELECT p.plan_hash_value,
ROUND(NVL(m.avg_et_secs, a.avg_et_secs)/1e6, 3) avg_et_secs
FROM p, m, a
WHERE p.plan_hash_value = m.plan_hash_value(+)
AND p.plan_hash_value = a.plan_hash_value(+)
ORDER BY
avg_et_secs NULLS LAST)
LOOP
DBMS_OUTPUT.PUT_LINE('-- phv:'||i.plan_hash_value||' avg_et_secs:'||i.avg_et_secs);
DBMS_OUTPUT.PUT_LINE('@@coe_xfr_sql_profile.sql &&sql_id0. '||i.plan_hash_value||';');
END LOOP;
END;
/
SPO coe_gen_sql_profile_&&sql_id0..sql;
/
SPO OFF;
@coe_gen_sql_profile_&&sql_id0..sql;
HOS zip -mT coe_gen_sql_profile_&&sql_id0 coe_gen_sql_profile.log coe_xfr_sql_profile.log
coe_gen_sql_profile_&&sql_id0..sql coe_xfr_sql_profile_&&sql_id0._*.sql
HOS zip -d coe_gen_sql_profile_&&sql_id0 coe_xfr_sql_profile.log
SET TERM ON;
HOS unzip -l coe_gen_sql_profile_&&sql_id0
UNDEFINE 1 sql_id0
CL COL
PRO
PRO coe_gen_sql_profile completed.






-------------------------------------------------------------------------------------------
Cost =  (#SRds * sreadtim +
         #MRds * mreadtim +
         #CPUCycles / cpuspeed ) / sreadtim

where:
#SRDs is the number of single block reads
#MRDs is the number of multi block reads
#CPUCycles is the number of CPU Cycles *)
sreadtim is the single block read time
mreadtim is the multi block read time
cpuspeed is the CPU cycles per second

CPUCycles includes CPU cost of query processing (pure CPU cost) and CPU cost of data retrieval (CPU cost of the buffer cache get).


assume that our average disk I/O delay is 15 milliseconds. 
A “cost” of 4 would mean that the optimizer expects the query to take roughly
60 milliseconds to complete.


-------------------------------------------------------------------
| Id  | Operation                    | Name    | Rows  | Bytes | Cost (%CPU)| Time     |
--------------------------------------------------------------------
|   0 | SELECT STATEMENT             |         |     1 |    57 |     4   (0)| 00:00:01 |
|   1 |  NESTED LOOPS                |         |     1 |    57 |     4   (0)| 00:00:01 |
|*  2 |   TABLE ACCESS FULL          | EMP     |     1 |    37 |     3   (0)| 00:00:01 |
|   3 |   TABLE ACCESS BY INDEX ROWID| DEPT    |     1 |    20 |     1   (0)| 00:00:01 |
|*  4 |    INDEX UNIQUE SCAN         | PK_DEPT |     1 |       |     0   (0)| 00:00:01 |
--------------------------------------------------------------------


cost = number of single block reads
card = number of rows






##########################  11g   Automatic Plan Capture  #############################

When automatic plan capture is enabled, the system automatically creates and maintains the plan history for SQL statements using n information provided by the optimizer. The plan history will include relevant information used by the optimizer to reproduce an execution  plan, such as the SQL text, outline, bind variables, and compilation environment.

The initial plan generated for a SQL statement is marked as accepted for use by the optimizer, and represents both the plan history and  the SQL plan baseline. All subsequent plans will be included in the plan history, and those plans that are verified not to cause  performance regressions will be added to the SQL plan baseline during the SQL plan baseline evolution phase.

To enable automatic plan capture, set


There’s lot more to it than just the parameter. There’s something called optimizer_capture_sql_plan_baselines which is FALSE by default  and should stay FALSE, else we have no control over the plans. Also, If its’ set to TRUE, any repeating SQL would have its plan saved as a base lines.
Secondly, we have to load the proven plans manually using any of the following methods.

From cursor cache (using sqlid in the sqlarea)
creating a SQL tuning set and loading baselines from it. This gives the flexibility of adding and removing the SQLs from the tuning set.

From AWR as well.  You can use DBMS_SQLPLAN & DBMS_SPM packages. You can go through tuning guide or SQL pl/sql package reference for examples as to how to
use the procedures inside the packages.

You can also use OEM. Anything you do, basic thing is that the PLAN has to be the best one and that you’d want to lock in which turns out  to be a baseline that can be seen in the views like dba_sql_plan_baselines as fixed or enabled or whatever

Automatic plan capture will not occur for a statement if a stored outline exists for it and is enabled and the parameter use_stored_outlines is TRUE. In this case, turn on incremental capture of plans into an STS using the function capture_cursor_cache_sqlset() in the DBMS_SQLTUNE package. After you have collected the plans for your workload into the STS, manually create SQL plan baselines using the method described earlier. Then, disable the stored outlines or set use_stored_outlines to FALSE. From now on, SPM will manage your workload and stored outlines will not be used for those statements.

OPTIMIZER_USE_SQL_PLAN_BASELINES – msut be true


Oracle Performance Monitoring Basic


                                Oracle Performance Monitoring  Basic 


Lately few people have been requesting me to share something on Tuning Concepts .  Basically we might know 100 of  things but we must smartly know when to use them . Though i am still to explore more.. but it will surely help mid level dba's


I always prefer to OEM for everything .. It can be used for lot things we even think of .

If we talk about tuning . we can use OEM for  checking concurrency ,  generating reports ,  checking fragmentation , ruing segment advisory, checking session details and its wait  , historical performance etc.

Also its just lat year i  realized the importance of using past performance .

If i have time i will share some of my important sql to share how to check, but in this topic let me share what needs to be checked and what should be your  target areas


Its very crucial for us have all our scripts really handy . Have seen many Dba dying to search proper
script to monitor the very last moment .
Here are area where one should looks for as first steps. It depends whether you are  targeting to
diagnose one session or whole database  performance  :

-- wait event , blocking , multiple plan hash value, check alert logs, check for recent generated  trace files in     dump area, last analyzed , stale stats ,
    fragmentation of table and index ,  check sql plan for fts   ,  check if table was initially analyzed that caused    performance change ,   ofcource from O/S check  resource utilization , O/S error logs, V$resource_limit ,
  check if any backups are running hampering performance ,  check if any smon recovery / rollback is       happening  ,  check for hit ratios, check if session i/o is changing , session last et call ,  check for parallel
operations and long opps ,  check database temp and undo utilization - check highest consumer ,
check archive generation rate ,  ask application guys if any changes went in ,  check listener log ,
check defunct process  on o/s ,  check sqlid  historical excution : many time application guys just fool with you calming sql was running fine ,but it never did.. so check for historical sql performance using dba_hist_sqlstat  , try generating session trace

-- check for database report...  there are lot of reports which 90% dba  dont even know of ..
 ideally people just check awr report... but i prefer  ash report.. specially ashrpti.sql  .. to concentrate  on     one session or sqlid .  some of reports that can help you are,,  ash , ashrpti ,,  awr  sql report, awr compare report, addm ,   awr/statspack ,  sqldiag report-- check on metalink ,



When it comes for recommendation and fixes 

-- if case of multiple plan:  try creating profiles and baselines ,  gathering stats , creating missing index ,
sga recommendation based on hit ratios,   flushing shared pool, purging sqlid ,   restoring old stats -- from 10g ,  try invalidating cursor ,





 In case of Rac : 

For checks : 

-- check  resource status , check cluster alert log, deamon logs ,  ocr and voting disk status ,  service status ,  check deamons status ,  check asm logs , o/s logs , 


1) CRS HOME :
   Ÿ  CRS ALERT LOG :  log/<hostname>/alert<nodename>.log
   Ÿ  CRS logs :  log/<hostname>/crsd/  --- > archived every 10mb
   Ÿ  CSS logs : log/<hostname>/cssd/  -- > archived every 20mb
   Ÿ  EVM logs : log/<hostname>/evmd+evm/log
   Ÿ  OPMN logs – opmn/logs
   Ÿ  resource specific logs – log/<hostname>/racg/
   Ÿ  cluster communication logs :  log
2) ORACLE_home :
  Ÿ  resource specific logs :  log/<hostname>/racg
  Ÿ  srvm logs --  log/<hostname>/client
  Ÿ  alert and other trace files : bdump/cdump/udump
  Ÿ  AWR/ statspack/ ash/ addm  of each node
  Ÿ  listener logs .
3)  ASM_logs : 
  Ÿ   alert logs and other trace files  : oracle_home/rdbms/log/ bdump/udump/cdump
4)   /etc/oracle/oprocd/  (node eviction)

*** Oprocd log locations:
In /etc/oracle/oprocd or /var/opt/oracle/oprocd depending on version/platform.
Note that oprocd only runs when no vendor clusterware is running or on Linux > 10.2.0.4




1) SRVM tracing : We enable srvm tracing by giving “ export SRVM_TRACE=TRUE  ” .  By doing so we enable tracing for srvctl,cvu ,VIPCA and gsdctl .

srvctl config srvpool

2) Debugging the resource as root user :
crsctl debug log module_name component:debugging_level
 module_name = crs , css , evm
level = 1 to 5
component :  “crsctl lsmodules module_name”

3) Resource debugging : crsctl debug log res  "ora.node1.vip:1" 

4) ./crsctl get log css "all"
./crsctl set log css CSSD:4

5) To change amount  of  ocr logging, edit the file CRS_Home/srvm/admin/ocrlog.ini.




cluvfy -h

cluvfy stage -pre crsinst -n node_list
cluvfy stage -post crsinst -n node_list [-verbose]
cluvfy comp clu


cluvfy comp scan -verbose
Cluvfy comp crs –n all –verbose
cluvfy comp ocr –n all            (to verify ocr )
cluvfy comp ocr -n all -verbose

setenv MYNODES node1,node3,node5  ||  cluvfy comp nodecon -n $MYNODES [-verbose]  || 
cluvfy comp nodecon -n node1,node2,node3 –i eth0 -verbose
cluvfy comp nodecon -n all -verbose
cluvfy comp nodecon -n rac1

STORAGE ::
cluvfy comp ssa  -n all -verbose    ( to confirm if storage is shared)
cluvfy comp ssa  -n all -s /dev/sda
cluvfy comp space  -n all -l  /home/dbadmin/products  –z 2G -verbose    (to confirm free space . does not support block or raw device)
cluvfy comp cfs -f /oradbshare –n all -verbose    ( check intigrity of cluster filesystem )



è  Commands useful in day to day  :

.  ./setSID_k +asm1
crs_stat | awk -F= '/NAME=/{n=$2}/TYPE=/{t=$2}/TARGET=/{g=$2}/STATE=/{s=$2; printf("%-45s%-15s%-10s%-30s\n", n,t,g,s)}'

Ÿ crs_stat | awk -F= '/NAME=/{n=$2}/TYPE=/{t=$2}/TARGET=/{g=$2}/STATE=/{s=$2; printf("%-45s%-15s%-10s%-30s\n", n,t,g,s)}'
Ÿ crs_stat -t –v
Ÿ . $CRS_HOME/bin/crsstat
Ÿ ./crs_stat –t                      (status of all resources ) ( as oracle user)
Ÿ . $CRS_HOME/bin/crsctl check crs   ( to deamon  status ) ( as oracle user)
Ÿ ps –ef | grep d.bin
Ÿ  ./crsctl stop crs                  ( to stop crs ) (with root user )
Ÿ  ./crsctl start  crs                ( to start  crs) ( with root user )
Ÿ crsctl start resource –all
Ÿ crsctl stop has
Ÿ crsctl start has
Ÿ crsctl stop crs –f
Ÿ crsctl enable  crs  -->  enable crs  on next startup
Ÿ crsctl disable crs 
It will disable crs from coming up after reboot (These functions update the file /etc/oracle/scls_scr/node_name/root/crsstart  that           contain the string enable or disable)
Ÿ crsctl query crs softwareversion
Ÿ crsctl query crs activeversion
Ÿ  crsctl check daemon .              (see infor of each deamon )
Ÿ  crsctl stop  cluster -all
Ÿ  crsctl check cluster –all
Ÿ crs_start ora.q-faykan.ons

Ÿ crsctl stop resource ora.QGISC.INS_WFC.QGISC2.srv       
Ÿ crsctl stop resource ora.QGISC.QGISC_INS_FGEA.QGISC2.srv
Ÿ crsctl stop resource ora.QGISC.QGISC_INS_WFC.QGISC2.srv 
Ÿ crsctl stop resource ora.QRDBC.DBR_SRVC.QRDBC2.srv      
Ÿ crsctl start resource ora.QGISC.INS_WFC.QGISC2.srv       
Ÿ crsctl start resource ora.QGISC.QGISC_INS_FGEA.QGISC2.srv
Ÿ crsctl start resource ora.QGISC.QGISC_INS_WFC.QGISC2.srv 
Ÿ crsctl start resource ora.QRDBC.DBR_SRVC.QRDBC2.srv      


Ÿ  export ORACLE_SID=orcl1
Ÿ ons debug                                   (ons ip debuging)
Ÿ olsnodes -s -t  / olsnodes -n      ( node information )
Ÿ  /runcluvfy.sh stage -post crsinst -n node1,node2 -verbose
Ÿ $ date; ssh q-feyd "date";ssh q-faykan "date"
Ÿ ps –fu oracle     -->  to check resource started by oracle
Ÿ srvctl getenv database -d orcl
Ÿ srvctl getenv instance -d orcl -i orcl1
Ÿ Crs_getper
Ÿ Crs_setperm
Ÿ  crs_profile –print                 ( to see crs profile .)
Ÿ  crsctl start res -all
Ÿ  crsctl stop resource ora.QRDBC.DBR_SRVC.QRDBC2.srv    
Ÿ  crsctl stat res -t -init
Ÿ  crsctl start res ora.evmd -init
Ÿ  crsctl stop res ora.evmd -init -f
Ÿ  crsctl modify resource "ora.cssd" -attr "AUTO_START=1"
Ÿ crsctl delete serverpool msinqprd -f
+QUEUE_IN_DATA/msinqprd/spfilemsinqprd.ora -n msinqprd -a QUEUE_IN_DATA,QUEUE_IN_FLASHBACK

  crsctl (check / start / stop ) cluster –all  -- 11g


Ÿ srvctl config network
Ÿ srvctl config nodeapps
Ÿ srvctl modify database –dbname –y automatic/manual  (automatic start/stop resource )
Ÿ srvctl status database -d wwprd
Ÿ srvctl status nodeapps -n wwprod3
Ÿ srvctl status asm –n node_name
Ÿ srvctl start nodeapps -n green -4  ( starts  ons, listener , gsd, vip  )
Ÿ srvctl start asm -n green –i asm1
Ÿ srvctl start instance –d orcl –i orcl1
Ÿ srvctl remove database -d databasename ( -f)
Ÿ srvctl start database -d dctmdb
Ÿ srvctl start listener -n node_name
Ÿ srvctl stop database -d dctmdb  -o immediate
Ÿ srvctl start database -d dctmdb –i dctmdb1 –o mount
Ÿ srvctl stop instance –d orcl –i orcl1
Ÿ srvctl getenv database -d PBOKCC
Ÿ srvctl setenv database -d PBOKCC -t
Ÿ srvctl stop  asm -n green –i asm1 –o immediate
Ÿ srvctl stop nodeapps -n green -4
Ÿ srvctl remove instance -d DBM -i DBM8
Ÿ srvctl remove database -d DBM

srvctl add database -d msinqprd -o /oracle/product/11.2.0/db -p
srvctl add database -d BOSTON –o /opt/oracle/product/10g_db_rac
srvctl add instance -d BOSTON -i BOSTON1 -n boston_host1
srvctl add instance -d BOSTON -i BOSTON2 -n boston_host2
srvctl add database -d tststby -o
srvctl add instance -d tststby -i tststby2 -n raclinux2
srvctl add instance -d tststby -i tststby1 -n raclinux1
srvctl add service -d tststby -s tststby_s -r tststby2 -a tststby1
srvctl modify database -d tststby -p ’+DGDUP/spfiletststby.ora’
srvctl config database -d tststby
srvctl start instance -d orcl -i orcl1

srvctl status database –d ITDBSS
srvctl status service –d ITDBSS
srvctl status listener
srvctl status scan_listener
srvctl status scan
srvctl status nodeapps

srvctl remove/enable/disable/ asm -n node_name -i instance_name
srvctl start asm -n node_nmae -i instance_name

srvctl stop diskgroup -g DATA
srvctl [*] diskgroup -n'


srvctl add database -d JIRAP2_sldn0833por -n JIRAP2 -o /app/ora/local/product/11.2.0.2/db_1
srvctl disable database -d JIRAP2_sldn0833por
srvctl add listener -l LISTENER_JIRAP2 -o /app/ora/local/product/11.2.0.2/db_1 -p TCP:1552 -s
srvctl disable listener -l LISTENER_JIRAP2