Category Archives: oracleDBA

Monitor_undo_tablespace

select sum(decode(status,’EXPIRED’,bytes,0))/sum(bytes) EXPIRED_PERCENT,
sum(decode(status,’UNEXPIRED’,bytes,0))/sum(bytes) UNEXPIRED_PERCENT,
sum(decode(status,’ACTIVE’,bytes,0))/sum(bytes) ACTIVE_PERCENT
from dba_undo_extents;

 

select t.start_time, t.used_ublk, s.username, r.segment_name
from v$transaction t,v$session s, dba_rollback_segs r
where t.ses_addr= s.saddr and t.xidusn=r.segment_id;

calculate optimum undo retention:

SELECT d.undo_size/(1024*1024) “ACTUAL UNDO SIZE [MByte]”,
SUBSTR(e.value,1,25) “UNDO RETENTION [Sec]”,
(TO_NUMBER(e.value) * TO_NUMBER(f.value) *
g.undo_block_per_sec) / (1024*1024)
“NEEDED UNDO SIZE [MByte]”
FROM (
SELECT SUM(a.bytes) undo_size
FROM v$datafile a,
v$tablespace b,
dba_tablespaces c
WHERE c.contents = ‘UNDO’
AND c.status = ‘ONLINE’
AND b.name = c.tablespace_name
AND a.ts# = b.ts#
) d,
v$parameter e,
v$parameter f,
(
SELECT MAX(undoblks/((end_time-begin_time)*3600*24))
undo_block_per_sec
FROM v$undostat
) g
WHERE e.name = ‘undo_retention’
AND f.name = ‘db_block_size’

–Undo advisor

DECLARE
tid NUMBER;
tname VARCHAR2(30);
oid NUMBER;
BEGIN
DBMS_ADVISOR.CREATE_TASK(‘Undo Advisor’, tid, tname, ‘Undo Advisor Task’);
DBMS_ADVISOR.CREATE_OBJECT(tname, ‘UNDO_TBS’, null, null, null, ‘null’, oid);
DBMS_ADVISOR.SET_TASK_PARAMETER(tname, ‘TARGET_OBJECTS’, oid);
DBMS_ADVISOR.SET_TASK_PARAMETER(tname, ‘START_SNAPSHOT’, 1);
DBMS_ADVISOR.SET_TASK_PARAMETER(tname, ‘END_SNAPSHOT’, 2);
DBMS_ADVISOR.execute_task(tname);
end;

–Monitor Undo of a user session–

select ses.username
, substr(ses.program, 1, 19) command
, tra.used_ublk
, from v$session ses
, v$transaction tra
where ses.saddr = tra.ses_addr;

Unix Commands

Search and find for a string in a directory:

find . -type f | xargs grep -l “shawn.o.farley@wv.gov”

 

Search for files with a specific name: find -name

find . -name "whatYouAreLookingFor" -print

Schedule AWR Report Generation

set echo off set head off set feed off

spool /orasys/home/oracle/Kishi/Logs/Awr_report_daily.lst; select ‘Define begin_snap= ‘|| snap_id from dba_hist_snapshot where EXTRACT(HOUR FROM BEGIN_INTERVAL_TIME)=9 and EXTRACT(year FROM BEGIN_INTERVAL_TIME) = TO_CHAR(sysdate,’YYYY’) and EXTRACT(month FROM BEGIN_INTERVAL_TIME) = TO_CHAR(sysdate,’MM’) and EXTRACT(day FROM BEGIN_INTERVAL_TIME) = TO_CHAR(sysdate,’DD’);

SELECT ‘Define end_snap= ‘|| snap_id from dba_hist_snapshot where EXTRACT(HOUR FROM BEGIN_INTERVAL_TIME)=18 and EXTRACT(year FROM BEGIN_INTERVAL_TIME) = TO_CHAR(sysdate,’YYYY’) and EXTRACT(month FROM BEGIN_INTERVAL_TIME) = TO_CHAR(sysdate,’MM’) and EXTRACT(day FROM BEGIN_INTERVAL_TIME) = TO_CHAR(sysdate,’DD’);

select ‘define  report_type  = ‘ || ”’html”’ from dual; select ‘define  report_name  = /orasys/home/oracle/Kishi/Logs/AWR_REPORT_Daily.html’ from dual; select ‘define  inst_name    = ‘ || INSTANCE_NAME from v$instance; select ‘define  db_name    = ‘ || name from v$database; select ‘define  dbid = ‘ || dbid from v$database; select ‘define inst_num = ‘ || INSTANCE_NUMBER from v$instance; select ‘define  num_days     = 3’ from dual; select ‘@$ORACLE_HOME/rdbms/admin/awrrpti.sql’ from dual; spool off;

Database_Space_Scripts

–Free/used at tablespace level

rem ———————————————————————–
rem Filename: tsspace.sql
rem Purpose: Show Used/free space in Meg by tablespace name
rem Author: Kishore
rem ———————————————————————–

tti “Space Usage for Database in Meg”

SELECT Total.name “Tablespace Name”,
nvl(Free_space, 0) Free_space,
nvl(total_space-Free_space, 0) Used_space,
total_space
FROM
(select tablespace_name, sum(bytes/1024/1024) Free_Space
from sys.dba_free_space
group by tablespace_name
) Free,
(select b.name, sum(bytes/1024/1024) TOTAL_SPACE
from sys.v_$datafile a, sys.v_$tablespace B
where a.ts# = b.ts#
group by b.name
) Total
WHERE Free.Tablespace_name(+) = Total.name
ORDER BY Total.name
/

tti off

–Free/used space at datafile level

REM ——————————————————————————
REM Filename: dffree.sql
REM Purpose: Shows current size, used & freespace within the datafiles.
REM Author: Kishore
REM ——————————————————————————

TTI “Allocated, Used & Free space within datafiles”

COLUMN free_space_mb format 999999.90
COLUMN allocated_mb format 999999.90
COLUMN used_mb format 999999.90

SELECT SUBSTR (df.NAME, 1, 40) file_name, df.bytes / 1024 / 1024 allocated_mb,
((df.bytes / 1024 / 1024) – NVL (SUM (dfs.bytes) / 1024 / 1024, 0))
used_mb,
NVL (SUM (dfs.bytes) / 1024 / 1024, 0) free_space_mb
FROM v$datafile df, dba_free_space dfs
WHERE df.file# = dfs.file_id(+)
GROUP BY dfs.file_id, df.NAME, df.file#, df.bytes
ORDER BY file_name;

TTI off

–Used space at segment level

rem ———————————————————————–
rem Filename: usedsp.sql
rem Purpose: Used space in Meg by segment type
rem Author: Frank Naude, Oracle FAQ
rem ———————————————————————–

set pagesize 50000
set line 80

col “Total Used Meg” format 999,999,990
col “Data part” format 999,999,990
col “Index part” format 999,999,990
col “LOB part” format 999,999,990
col “RBS part” format 999,999,990

tti ‘Used space in Meg by segment type’

select sum(bytes)/1024/1024 “Total Used”,
sum( decode( substr(segment_type,1,5), ‘TABLE’, bytes/1024/1024, 0))
“Data part”,
sum( decode( substr(segment_type,1,5), ‘INDEX’, bytes/1024/1024, 0))
“Index part”,
sum( decode( substr(segment_type,1,3), ‘LOB’, bytes/1024/1024, 0))
“LOB part”,
sum( decode(segment_type, ‘ROLLBACK’, bytes/1024/1024, 0))
“RBS part”,
sum( decode(segment_type, ‘TEMPORARY’, bytes/1024/1024, 0))
“TEMP part”
from sys.dba_segments
/
tti off

tti “Total database size”

select sum(bytes)/1024/1024 “Total DB size in Meg”
from sys.v_$datafile
/
tti off

–Summary of database space history over a period of time

rem ———————————————————————–
rem Filename: spacehist.sql
rem Purpose: Save summary of database space history over time
rem Notes: Set JOB_QUEUE_PROCESSES to a value > 0 or schedule from
rem an external scheduler (corn, at…)
rem Date: 15-May-2002
rem Author: Frank Naude, Oracle FAQ
rem ———————————————————————–

—————————————————————————
— Create history table…
—————————————————————————
drop table db_space_hist;
create table db_space_hist (
timestamp date,
total_space number(8),
used_space number(8),
free_space number(8),
pct_inuse number(5,2),
num_db_files number(5)
);

—————————————————————————
— Stored proc to populate table…
—————————————————————————
create or replace procedure db_space_hist_proc as
begin
— Delete old records…
delete from db_space_hist where timestamp > SYSDATE + 364;
— Insert current utilization values…
insert into db_space_hist
select sysdate, total_space,
total_space-nvl(free_space,0) used_space,
nvl(free_space,0) free_space,
((total_space – nvl(free_space,0)) / total_space)*100 pct_inuse,
num_db_files
from ( select sum(bytes)/1024/1024 free_space
from sys.DBA_FREE_SPACE ) FREE,
( select sum(bytes)/1024/1024 total_space,
count(*) num_db_files
from sys.DBA_DATA_FILES) FULL;
commit;
end;
/
show errors

—————————————————————————
— Schedule the job using the DB Job System. This section can be removed if
— the job is sceduled via an external scheduler.
—————————————————————————
declare
v_job number;
begin
select job into v_job from user_jobs where what like ‘db_space_hist_proc%’;
dbms_job.remove(v_job);
dbms_job.submit(v_job, ‘db_space_hist_proc;’, sysdate,
‘sysdate+7’); — Run every 7 days
dbms_job.run(v_job);
dbms_output.put_line(‘Job ‘||v_job||’ re-submitted.’);
exception
when NO_DATA_FOUND then
dbms_job.submit(v_job, ‘db_space_hist_proc;’, sysdate,
‘sysdate+7’); — Run every 7 days
dbms_job.run(v_job);
dbms_output.put_line(‘Job ‘||v_job||’ submitted.’);
end;
/

—————————————————————————
— Generate a space history report…
—————————————————————————
select to_char(timestamp, ‘DD Mon RRRR HH24:MI’) “Timestamp”,
total_space “DBSize (Meg)”,
used_space “Free (Meg)”,
free_space “Used (Meg)”,
pct_inuse “% Used”,
num_db_files “Num DB Files”
from db_space_hist
order by timestamp;

–List segments that can not extend

rem ———————————————————————–
rem Filename: exterror.sql
rem Purpose: Segments that will cause errors when they try to extent!!!
rem Author: Frank Naude, Oracle FAQ
rem ———————————————————————–

prompt Objects that cannot extend (no space in TS)

column Sname form a40 heading ‘Object Name’
column Stype form a15 heading ‘Type’
column Size form 9,999 heading ‘Size’
column Next form 99,999 heading ‘Next’
column Tname form a15 heading ‘TsName’

select a.owner||’.’||a.segment_name “Sname”,
a.segment_type “Stype”,
a.bytes/1024/1024 “Size”,
a.next_extent/1024/1024 “Next”,
a.tablespace_name “TName”
from sys.dba_segments a
where a.tablespace_name not like ‘T%MP%’ — Exclude TEMP tablespaces
and next_extent * 1 > ( — Cannot extend 1x, can change to 2x…
select max(b.bytes)
from dba_free_space b
where a.tablespace_name = b.tablespace_name)
order by 3 desc
/

— THIS QUERY GIVES THE SAME RESULTS, BUT IS WAY TOO SLOW

— select a.owner, a.segment_name, b.tablespace_name,
— decode(ext.extents,1,b.next_extent,
— a.bytes*(1+b.pct_increase/100)) nextext,
— freesp.largest
— from dba_extents a,
— dba_segments b,
— (select owner, segment_name, max(extent_id) extent_id,
— count(*) extents
— from dba_extents
— group by owner, segment_name
— ) ext,
— (select tablespace_name, max(bytes) largest
— from dba_free_space
— group by tablespace_name
— ) freesp
— where a.owner=b.owner
— and a.segment_name=b.segment_name
— and a.owner=ext.owner
— and a.segment_name=ext.segment_name
— and a.extent_id=ext.extent_id
— and b.tablespace_name = freesp.tablespace_name
— and decode(ext.extents,1,b.next_extent, a.bytes*(1+b.pct_increase/100)) > freesp.largest
— /

–Some segment sizing recommendations

rem ———————————————————————–
rem Filename: sizing.sql
rem Purpose: Give some segment sizing recommendations
rem Date: 04-Jul-1999
rem Author: Frank Naude, Oracle FAQ
rem ———————————————————————–

prompt Database block size:

select to_number(value) “Block size in bytes”
from sys.v_$parameter
where name = ‘db_block_size’
/

prompt Max number of possible extents (if not set to UNLIMITED)
prompt is db_block_size/16-7

select to_number(value)/16-7 “MaxExtents”
from sys.v_$parameter
where name = ‘db_block_size’
/

prompt The recommended min extent size is a multiple of
prompt db_block_size * db_file_multiblock_read_count. This gives
prompt the chunks Oracle ask from the OS when doing read-ahead
prompt with full table scans.

select to_number(a.value) * to_number(b.value) / 1024 “Min extent size in K”
from sys.v_$parameter a, sys.v_$parameter b
where a.name = ‘db_block_size’
and b.name = ‘db_file_multiblock_read_count’
/

–Show database growth in Meg per month for the last year

rem ———————————————————————–
rem Filename: dbgrowth.sql
rem Purpose: Show database growth in Meg per month for the last year
rem DB Version: 8.0 or above
rem Note: File extending is not factored in as it’s not the available
rem in the dictionary.
rem Date: 19-Mar-2000
rem Author: Frank Naude, Oracle FAQ
rem ———————————————————————–

set pagesize 50000
tti “Database growth per month for last year”

select to_char(creation_time, ‘RRRR Month’) “Month”,
sum(bytes)/1024/1024 “Growth in Meg”
from sys.v_$datafile
where creation_time > SYSDATE-365
group by to_char(creation_time, ‘RRRR Month’)
/

tti off

–tables whose high water mark no# of used blocks

REM ########################################################################
REM ## Author : Sunil Kumar
REM ## Senior DBA
REM ## Email : sunilagarwal@hotmail.com
REM ## sunil.kumar@thehartford.com
REM ##
REM ## Script to determine highwater mark of tables. It is essential
REM ## to run ANALYZE_SCHEMA utility or ANALYZE TABLE commands before
REM ## running this script for accurate statistics.
REM ##
REM ## It displays all the tables whose high water mark no# of used
REM ## blocks.
REM ##
REM ## How To Run:
REM ## SQL>@highwtr
REM ## It will ask for the owner (press enter for all owners) and
REM ## the table name (press enter for all tables).
REM ##
REM ########################################################################

set verify off
column owner format a10
column alcblks heading ‘Allocated|Blocks’ just c
column usdblks heading ‘Used|Blocks’ just c
column hgwtr heading ‘High|Water’ just c
break on owner skip page

select
a.owner,
a.table_name,
b.blocks alcblks,
a.blocks usdblks,
(b.blocks-a.empty_blocks-1) hgwtr
from
dba_tables a,
dba_segments b
where
a.table_name=b.segment_name
and a.owner=b.owner
and a.owner not in(‘SYS’,’SYSTEM’)
and a.blocks (b.blocks-a.empty_blocks-1)
and a.owner like upper(‘&owner’)||’%’
and a.table_name like upper(‘&table_name’)||’%’
order by 1,2
/

set verify on
clear columns
clear breaks

–Check a TS is fragmentd and type of fragmentation

REM ——————————————————————————
REM Filename: tsfrag.sql
REM Purpose: Shows whether the TBS is fragmented and the type of fragmentation.
REM Author: Kam Muhamad Sirajdin [mdsirajdin@yahoo.com]
REM ——————————————————————————

TTI “Tablespace Fragmentation Details”

SELECT dfsc.tablespace_name tablespace_name,
DECODE (
dfsc.percent_extents_coalesced,
100,
(DECODE (
GREATEST ((SELECT COUNT (1)
FROM dba_free_space dfs
WHERE dfs.tablespace_name = dfsc.tablespace_name), 1),
1,
‘No Frag’,
‘Bubble Frag’
)
),
‘Possible Honey Comb Frag’
)
fragmentation_status
FROM dba_free_space_coalesced dfsc
ORDER BY dfsc.tablespace_name;

TTI off

–Index fragmentation status for a schema

/* ************************************************************* */
/* Index Fragmentation Status (idsfrag.sql): */
/* */
/* This script will report the index fragmentation status */
/* for a schema. */
/* */
/* Note: – Do not run this scrip during peak processing hours!!! */
/* – This script will fail for locked tables. */
/* */
/* ************************************************************* */

prompt — Drop and create temporary table to hold stats…
drop table my_index_stats
/
create table my_index_stats (
index_name varchar2(30),
height number(8),
del_lf_rows number(8),
distinct_keys number(8),
rows_per_key number(10,2),
blks_gets_per_access number(10,2)
)
/

prompt — Save script which we will later use to populate the above table…
insert into my_index_stats
select NAME, HEIGHT, DEL_LF_ROWS, DISTINCT_KEYS, ROWS_PER_KEY,
BLKS_GETS_PER_ACCESS
from INDEX_STATS
— Note this open line…

save /tmp/save_index_stats.sql replace

prompt
prompt — Spool listing with validate commands…
col line1 newline
col line2 newline
col line3 newline
set pagesize 0
set echo off
set termout off
set trimspool on
set feed off
set linesize 200
spool /tmp/validate_indexes.sql
select ‘prompt Process table ‘||owner||’.’||table_name||
‘, index ‘||index_name||’…’ line1,
‘validate index ‘||owner||’.’||index_name||’;’ line2,
‘@/tmp/save_index_stats.sql’ line3
from sys.dba_indexes where owner = ‘SCOTT’
order by table_name, index_name
/
spool off
set termout on
set feed on

prompt
prompt — Run script to validate indexes…
@/tmp/validate_indexes.sql

prompt — Print nice report…
set pagesize 50000
set trimspool on
col height format 99999
col del_rows format 9999999
col rows/key format 999999.9
spool idxfrag.lst
select INDEX_NAME, HEIGHT, DEL_LF_ROWS “DEL_ROWS”, DISTINCT_KEYS “DIST KEYS”,
ROWS_PER_KEY “ROWS/KEY”,
BLKS_GETS_PER_ACCESS “BLKS/ACCESS”
from MY_INDEX_STATS
/
spool off

— Cleanup
drop table my_index_stats
/
! rm /tmp/validate_indexes.sql
! rm /tmp/save_index_stats.sql

prompt
prompt Report is in idxfrag.lst
prompt Done!!!

Hidden Parameters

Hidden Parameters in Oracle. How to Change Hidden Parameter

The hidden parameters start with an “_”.They can not be viewed from the output of show parameter
or querying v$parameter unless and untill they are set explicitly in init.ora.
However if you want to view all the hidden parameters and their default values the following query
could be of help,

SELECT
a.ksppinm “Parameter”, b.ksppstvl “Session Value”, c.ksppstvl “Instance Value”
FROM x$ksppi a, x$ksppcv b, x$ksppsv c
WHERE
a.indx = b.indx AND a.indx = c.indx AND a.ksppinm LIKE ‘/_%’ escape ‘/’
/

In order to see the listing of all hidden parameters query,

select *
from SYS.X$KSPPI
where substr(KSPPINM,1,1) = ‘_’;

Change Hidden Parameters in Oracle
—————————————
It is never recommended to modify these hidden parameters without the assistance of Oracle Support.Changing these parameters may lead to high performance degradation and other problems in the database.
In order to change hidden parameter,
1)If you use pfile then in your initSID.ora you can entry of the hidden parameter and start the database.

2)If you want to use for the current session you can use ALTER SESSION SET ….

3)To set it permanently if you use spfile then use, ALTER SYSTEM SET …… SCOPE=SPFILE. Since hidden parameter starts with underscore(_) to access it you have to specify within double quotes. If you use SCOPE=SPFILE then in order to take effect you need to restart database. You can use SCOPE=BOTH if parameter can be set in the session also.

For hidden parameter or the values that start with underscore(_) must be specified within double quotes(“) as below.

SQL> alter system set “_offline_rollback_segments”=”_SYSSMU1$” SCOPE=spfile;

System altered.

General_Health_Check

Oracle Database Health check scripts
Hi All,
Lot of time DBAs are asked to check the health of the Database,Health of the Database can be check in various ways.It includes:

SL No Monitoring Scope Current Status OS Level
1 Physical memory / Load :Load normal, Load averages: 0.35, 0.37, 0.36
2 OS Space threshold ( archive, ora_dump etc.. ) :Sufficient Space available.
3 Top 10 process consuming memory:No process using exceptional high memory
4 Free volumes available :Sufficient disk space is available on the mount points
5 Filesystem space Under normal threshold
Database level.
6 Check extents / Pro active Space addition:Space is being regularly added.
7 Check alert log for ORA- and warn messages.
8 Major wait events (latch/enqueue/Lib cache pin) No major wait events
9 Max Sessions
10 Long running Jobs 6 inactive sessions running for more than 8 hrs
11 Invalid objects 185
12 Analyze Jobs ( once in a week ) Done on 20-JAN-2008 Time 06:00:06
13 Temp usage / Rollback segment usage Normal
14 Nologging Indexes
15 Hotbackup/Coldbackup Gone fine
16 Redo generation normal
17 PQ proceses Normal
18 I/O Generation Under normal threshold
19 2 PC Pending transactions 0
DR / backup
1 Sync arch Normal
2 Purge arch Normal
3 Recovery status Normal
20)DATABASE HEALTH CHECK SCRIPT: Showing locks and Archive generation details

In Detail DATABASE Health check:
OPERATING SYSTEM:

1)Physical memory/ Load:
1) Free:free command displays amount of total, free and used physical memory (RAM) in the system as well as showing information on shared memory, buffers, cached memory and swap space used by the Linux kernel.
Usage:
$ free -m

2) vmstat:vmstat reports report virtual memory statistics, which has information about processes, swap, free, buffer and cache memory, paging space, disk IO activity, traps, interrupts, context switches and CPU activity
Usage:
$vmstat 5

3) top:top command displays dynamic real-time view of the running tasks managed by kernel and in Linux system. The memory usage stats by top command include real-time live total, used and free physical memory and swap memory with their buffers and cached memory size respectively
Usage:
$top
4) ps :ps command reports a snapshot on information of the current active processes. ps will show the percentage of memory resource that is used by each process or task running in the system. With this command, top memory hogging processes can be identified.
Usage:
$ps aux

2) OS Space threshold ( archive, ora_dump etc.. ):
Checking the OS space is available in all filesystems,specially the location which is having archive logs ,oracle Database files.We can use the below OS commands:
$df –h
$du –csh *
3) Top 10 process consuming memory:
We can Displaying top 10 memory consuming processes as follows:

ps aux|head -1;ps aux|sort -m

We can use the top command, and press M which orders the process list by memory usage.

4) Free volumes available:

We have to make sure Sufficient disk space is available on the mount points on each OS servers where the Database is up and running.

$df –h

5)Filesystem space:

Under normal threshold.Check the filesystem in the OS side whether the sufficient space is available at all mount points.

DATABASE :

6)Check extents / Pro active Space addition:
Check each of the Data,Index and temporary tablespaces for extend and blocks
Allocation details.

SET LINES 1000
SELECT SEGMENT_NAME,TABLESPACE_NAME,EXTENTS,BLOCKS
FROM DBA_SEGMENTS;

SELECT SEGMENT_NAME,TABLESPACE_NAME,EXTENTS,BLOCKS
FROM DBA_SEGMENTS WHERE TABLESPACE_NAME=’STAR01D’;

7) Check alert log for ORA- and warn messages:

Checking the alert log file regulary is a vital task we have to do.In the alert log files we have to looks for the following things:

1) Look for any of the oracle related errors.
Open the alert log file with less or more command and search for ORA-
This will give you the error details and time of occurrence.

2) Look for the Database level or Tablespace level changes
Monitor the alert log file and search the file for each Day activities happening
In the Database either whether it is bouncing of Database.Increase in the size of the tablespaces,Increase in the size of the Database parameters.In the 11g Database we can look for TNS errors in the alert log file.

8) Major wait events (latch/enqueue/Lib cache pin):

We can check the wait events details with the help of below queries:

SELECT s.saddr, s.SID, s.serial#, s.audsid, s.paddr, s.user#, s.username,
s.command, s.ownerid, s.taddr, s.lockwait, s.status, s.server,
s.schema#, s.schemaname, s.osuser, s.process, s.machine, s.terminal,
UPPER (s.program) program, s.TYPE, s.sql_address, s.sql_hash_value,
s.sql_id, s.sql_child_number, s.sql_exec_start, s.sql_exec_id,
s.prev_sql_addr, s.prev_hash_value, s.prev_sql_id,
s.prev_child_number, s.prev_exec_start, s.prev_exec_id,
s.plsql_entry_object_id, s.plsql_entry_subprogram_id,
s.plsql_object_id, s.plsql_subprogram_id, s.module, s.module_hash,
s.action, s.action_hash, s.client_info, s.fixed_table_sequence,
s.row_wait_obj#, s.row_wait_file#, s.row_wait_block#,
s.row_wait_row#, s.logon_time, s.last_call_et, s.pdml_enabled,
s.failover_type, s.failover_method, s.failed_over,
s.resource_consumer_group, s.pdml_status, s.pddl_status, s.pq_status,
s.current_queue_duration, s.client_identifier,
s.blocking_session_status, s.blocking_instance, s.blocking_session,
s.seq#, s.event#, s.event, s.p1text, s.p1, s.p1raw, s.p2text, s.p2,
s.p2raw, s.p3text, s.p3, s.p3raw, s.wait_class_id, s.wait_class#,
s.wait_class, s.wait_time, s.seconds_in_wait, s.state,
s.wait_time_micro, s.time_remaining_micro,
s.time_since_last_wait_micro, s.service_name, s.sql_trace,
s.sql_trace_waits, s.sql_trace_binds, s.sql_trace_plan_stats,
s.session_edition_id, s.creator_addr, s.creator_serial#
FROM v$session s
WHERE ( (s.username IS NOT NULL)
AND (NVL (s.osuser, ‘x’) ‘SYSTEM’)
AND (s.TYPE ‘BACKGROUND’) AND STATUS=’ACTIVE’
)
ORDER BY “PROGRAM”;

The following query provides clues about whether Oracle has been waiting for library cache activities:

Select sid, event, p1raw, seconds_in_wait, wait_time
From v$session_wait
Where event = ‘library cache pin’
And state = ‘WAITING’;

The below Query gives details of Users sessions wait time and state:

SELECT NVL (s.username, ‘(oracle)’) AS username, s.SID, s.serial#, sw.event,
sw.wait_time, sw.seconds_in_wait, sw.state
FROM v$session_wait sw, v$session s
WHERE s.SID = sw.SID
ORDER BY sw.seconds_in_wait DESC;

9) Max Sessions:
There should not be more than 6 inactive sessions running for more than 8 hours in a Database in order to minimize the consumption of CPU and I/O resources.

a)Users and Sessions CPU consumption can be obtained by below query:

Set lines 1000
select ss.username, se.SID,VALUE/100 cpu_usage_seconds
from v$session ss, v$sesstat se, v$statname sn
where se.STATISTIC# = sn.STATISTIC#
and NAME like ‘%CPU used by this session%’
and se.SID = ss.SID and ss.status=’ACTIVE’
and ss.username is not null
order by VALUE desc;

b) Users and Sessions CPU and I/O consumption can be obtained by below query:

— shows Day wise,User wise,Process id of server wise- CPU and I/O consumption
set linesize 140
col spid for a6
col program for a35 trunc
select p.spid SPID,to_char(s.LOGON_TIME,’DDMonYY HH24:MI’) date_login,s.username,decode(nvl(p.background,0),1,bg.description, s.program ) program,
ss.value/100 CPU,physical_reads disk_io,(trunc(sysdate,’J’)-trunc(logon_time,’J’)) days,
round((ss.value/100)/(decode((trunc(sysdate,’J’)-trunc(logon_time,’J’)),0,1,(trunc(sysdate,’J’)-trunc(logon_time,’J’)))),2) cpu_per_day
from V$PROCESS p,V$SESSION s,V$SESSTAT ss,V$SESS_IO si,V$BGPROCESS bg
where s.paddr=p.addr and ss.sid=s.sid
and ss.statistic#=12 and si.sid=s.sid
and bg.paddr(+)=p.addr
and round((ss.value/100),0) > 10
order by 8;

10) Long running Jobs:

We can find out long running jobs with the help of the below query:

col username for a20
col message for a50
col remaining for 9999
select username,to_char(start_time, ‘hh24:mi:ss dd/mm/yy’) started,
time_remaining remaining, message
from v$session_longops
where time_remaining = 0
order by time_remaining desc;

11) Invalid objects:

We can check the invalid objects with the help of the below query:

select owner||’ ‘||object_name||’ ‘||created||’ ‘||status from dba_objects where status=’INVALID’;

12) Analyze Jobs ( once in a week ):

We need to analyze the jobs that are running once in a week as a golden rule.
The below steps can be considered for analyzing jobs.

Analyzing a Running Job
The status of a job or a task changes several times during its life cycle. A job can have the following as its status:
Scheduled: The job is created and will run at the specified time.
Running: The job is being executed and is in progress.
Initialization Error: The job or step could not be run successfully. If a step in a job fails initialization, the job status is Initialization Error.
Failed: The job was executed but failed.
Succeeded: The job was executed completely.
Stopped: The user canceled the job.
Stop Pending: The user has stopped the job. The already running steps are completing execution.
Suspended: This indicates that the execution of the job is deferred.
Inactive: This status indicates that the target has been deleted.
Reassigned: The owner of the job has changed.
Skipped: The job was not executed at the specified time and has been omitted.
The running jobs can be found out by the help of below query:

select sid, job,instance from dba_jobs_running;

We can find out the failed jobs and Broken jobs details with the help of the Below query:

select job||’ ‘||schema_user||’ ‘||Broken||’ ‘||failures||’ ‘||what||’ ‘||last_date||’ ‘||last_sec from dba_jobs;

13) Temp usage / Rollback segment/PGA usage:

We can get information of temporary tablespace usage details with the help of below query:
Set lines 1000
SELECT b.tablespace,
ROUND(((b.blocks*p.value)/1024/1024),2)||’M’ “SIZE”,
a.sid||’,’||a.serial# SID_SERIAL,
a.username,
a.program
FROM sys.v_$session a,
sys.v_$sort_usage b,
sys.v_$parameter p
WHERE p.name = ‘db_block_size’
AND a.saddr = b.session_addr
ORDER BY b.tablespace, b.blocks;

We can get information of Undo tablespace usage details with the help of the below query:
set lines 1000
SELECT TO_CHAR(s.sid)||’,’||TO_CHAR(s.serial#) sid_serial,
NVL(s.username, ‘None’) orauser,
s.program,
r.name undoseg,
t.used_ublk * TO_NUMBER(x.value)/1024||’K’ “Undo”
FROM sys.v_$rollname r,
sys.v_$session s,
sys.v_$transaction t,
sys.v_$parameter x
WHERE s.taddr = t.addr
AND r.usn = t.xidusn(+)
AND x.name = ‘db_block_size’;

We can get the PGA usage details with the help of the below query:
select st.sid “SID”, sn.name “TYPE”,
ceil(st.value / 1024 / 1024/1024) “GB”
from v$sesstat st, v$statname sn
where st.statistic# = sn.statistic#
and sid in
(select sid from v$session where username like UPPER(‘&user’))
and upper(sn.name) like ‘%PGA%’
order by st.sid, st.value desc;
Enter value for user: STARTXNAPP
14)Validating the Backup:

We have to verify the Hotbackup/Coldbackup(or any physical or logical backup) of all the Production and non-production Databases went fine.Make sure you are having a valid backups of all the Databases.Check the Backup locations to make sure the Backup completed on time with the required Backup data.

14)Hotbackup/Coldbackup:
Validating the backup of Database.It should complete on time with the required data for restoring and recovery purpose if required.

15) Redo generation/Archive logs generation details:
We should make sure there should not be frequent log switch happening in a Database.If there are frequent log switches than archive logs might generate more which may decrease the performance of the Database however in a production Database log switches could vary depending upon the Server configuration between 5 to 20.

We can the log switch details with the help of the below query:

Redolog switch Datewise and hourwise:
——————————-
set lines 120;
set pages 999;
select to_char(first_time,’DD-MON-RR’) “Date”,
to_char(sum(decode(to_char(first_time,’HH24′),’00’,1,0)),’99’) ” 00″,
to_char(sum(decode(to_char(first_time,’HH24′),’01’,1,0)),’99’) ” 01″,
to_char(sum(decode(to_char(first_time,’HH24′),’02’,1,0)),’99’) ” 02″,
to_char(sum(decode(to_char(first_time,’HH24′),’03’,1,0)),’99’) ” 03″,
to_char(sum(decode(to_char(first_time,’HH24′),’04’,1,0)),’99’) ” 04″,
to_char(sum(decode(to_char(first_time,’HH24′),’05’,1,0)),’99’) ” 05″,
to_char(sum(decode(to_char(first_time,’HH24′),’06’,1,0)),’99’) ” 06″,
to_char(sum(decode(to_char(first_time,’HH24′),’07’,1,0)),’99’) ” 07″,
to_char(sum(decode(to_char(first_time,’HH24′),’08’,1,0)),’99’) ” 08″,
to_char(sum(decode(to_char(first_time,’HH24′),’09’,1,0)),’99’) ” 09″,
to_char(sum(decode(to_char(first_time,’HH24′),’10’,1,0)),’99’) ” 10″,
to_char(sum(decode(to_char(first_time,’HH24′),’11’,1,0)),’99’) ” 11″,
to_char(sum(decode(to_char(first_time,’HH24′),’12’,1,0)),’99’) ” 12″,
to_char(sum(decode(to_char(first_time,’HH24′),’13’,1,0)),’99’) ” 13″,
to_char(sum(decode(to_char(first_time,’HH24′),’14’,1,0)),’99’) ” 14″,
to_char(sum(decode(to_char(first_time,’HH24′),’15’,1,0)),’99’) ” 15″,
to_char(sum(decode(to_char(first_time,’HH24′),’16’,1,0)),’99’) ” 16″,
to_char(sum(decode(to_char(first_time,’HH24′),’17’,1,0)),’99’) ” 17″,
to_char(sum(decode(to_char(first_time,’HH24′),’18’,1,0)),’99’) ” 18″,
to_char(sum(decode(to_char(first_time,’HH24′),’19’,1,0)),’99’) ” 19″,
to_char(sum(decode(to_char(first_time,’HH24′),’20’,1,0)),’99’) ” 20″,
to_char(sum(decode(to_char(first_time,’HH24′),’21’,1,0)),’99’) ” 21″,
to_char(sum(decode(to_char(first_time,’HH24′),’22’,1,0)),’99’) ” 22″,
to_char(sum(decode(to_char(first_time,’HH24′),’23’,1,0)),’99’) ” 23″
from v$log_history
group by to_char(first_time,’DD-MON-RR’)
order by 1
/
Archive logs generations is directly proportional to the number of log switches happening in a Database. If there are frequent log switches than archive logs might generate more which can affect the performance of Database.

We can use the below queries for archive logs generation details:

a)Archive logs by dates:
set lines 1000
select to_char(first_time,’DD-MON-RR’) “Date”,
to_char(sum(decode(to_char(first_time,’HH24′),’00’,1,0)),’99’) ” 00″,
to_char(sum(decode(to_char(first_time,’HH24′),’01’,1,0)),’99’) ” 01″,
to_char(sum(decode(to_char(first_time,’HH24′),’02’,1,0)),’99’) ” 02″,
to_char(sum(decode(to_char(first_time,’HH24′),’03’,1,0)),’99’) ” 03″,
to_char(sum(decode(to_char(first_time,’HH24′),’04’,1,0)),’99’) ” 04″,
to_char(sum(decode(to_char(first_time,’HH24′),’05’,1,0)),’99’) ” 05″,
to_char(sum(decode(to_char(first_time,’HH24′),’06’,1,0)),’99’) ” 06″,
to_char(sum(decode(to_char(first_time,’HH24′),’07’,1,0)),’99’) ” 07″,
to_char(sum(decode(to_char(first_time,’HH24′),’08’,1,0)),’99’) ” 08″,
to_char(sum(decode(to_char(first_time,’HH24′),’09’,1,0)),’99’) ” 09″,
to_char(sum(decode(to_char(first_time,’HH24′),’10’,1,0)),’99’) ” 10″,
to_char(sum(decode(to_char(first_time,’HH24′),’11’,1,0)),’99’) ” 11″,
to_char(sum(decode(to_char(first_time,’HH24′),’12’,1,0)),’99’) ” 12″,
to_char(sum(decode(to_char(first_time,’HH24′),’13’,1,0)),’99’) ” 13″,
to_char(sum(decode(to_char(first_time,’HH24′),’14’,1,0)),’99’) ” 14″,
to_char(sum(decode(to_char(first_time,’HH24′),’15’,1,0)),’99’) ” 15″,
to_char(sum(decode(to_char(first_time,’HH24′),’16’,1,0)),’99’) ” 16″,
to_char(sum(decode(to_char(first_time,’HH24′),’17’,1,0)),’99’) ” 17″,
to_char(sum(decode(to_char(first_time,’HH24′),’18’,1,0)),’99’) ” 18″,
to_char(sum(decode(to_char(first_time,’HH24′),’19’,1,0)),’99’) ” 19″,
to_char(sum(decode(to_char(first_time,’HH24′),’20’,1,0)),’99’) ” 20″,
to_char(sum(decode(to_char(first_time,’HH24′),’21’,1,0)),’99’) ” 21″,
to_char(sum(decode(to_char(first_time,’HH24′),’22’,1,0)),’99’) ” 22″,
to_char(sum(decode(to_char(first_time,’HH24′),’23’,1,0)),’99’) ” 23″
from v$log_history
group by to_char(first_time,’DD-MON-RR’)
order by 1
/
b)Archive log generation details Day-wise :

select to_char(COMPLETION_TIME,’DD-MON-YYYY’),count(*)
from v$archived_log group by to_char(COMPLETION_TIME,’DD-MON-YYYY’)
order by to_char(COMPLETION_TIME,’DD-MON-YYYY’);

c) Archive log count of the day:

select count(*)
from v$archived_log
where trunc(completion_time)=trunc(sysdate);

count of archived logs generated today on hourly basis:
——————————————————-
select to_char(first_time,’DD-MON-RR’) “Date”,
to_char(sum(decode(to_char(first_time,’HH24′),’00’,1,0)),’99’) ” 00″,
to_char(sum(decode(to_char(first_time,’HH24′),’01’,1,0)),’99’) ” 01″,
to_char(sum(decode(to_char(first_time,’HH24′),’02’,1,0)),’99’) ” 02″,
to_char(sum(decode(to_char(first_time,’HH24′),’03’,1,0)),’99’) ” 03″,
to_char(sum(decode(to_char(first_time,’HH24′),’04’,1,0)),’99’) ” 04″,
to_char(sum(decode(to_char(first_time,’HH24′),’05’,1,0)),’99’) ” 05″,
to_char(sum(decode(to_char(first_time,’HH24′),’06’,1,0)),’99’) ” 06″,
to_char(sum(decode(to_char(first_time,’HH24′),’07’,1,0)),’99’) ” 07″,
to_char(sum(decode(to_char(first_time,’HH24′),’08’,1,0)),’99’) ” 08″,
to_char(sum(decode(to_char(first_time,’HH24′),’09’,1,0)),’99’) ” 09″,
to_char(sum(decode(to_char(first_time,’HH24′),’10’,1,0)),’99’) ” 10″,
to_char(sum(decode(to_char(first_time,’HH24′),’11’,1,0)),’99’) ” 11″,
to_char(sum(decode(to_char(first_time,’HH24′),’12’,1,0)),’99’) ” 12″,
to_char(sum(decode(to_char(first_time,’HH24′),’13’,1,0)),’99’) ” 13″,
to_char(sum(decode(to_char(first_time,’HH24′),’14’,1,0)),’99’) ” 14″,
to_char(sum(decode(to_char(first_time,’HH24′),’15’,1,0)),’99’) ” 15″,
to_char(sum(decode(to_char(first_time,’HH24′),’16’,1,0)),’99’) ” 16″,
to_char(sum(decode(to_char(first_time,’HH24′),’17’,1,0)),’99’) ” 17″,
to_char(sum(decode(to_char(first_time,’HH24′),’18’,1,0)),’99’) ” 18″,
to_char(sum(decode(to_char(first_time,’HH24′),’19’,1,0)),’99’) ” 19″,
to_char(sum(decode(to_char(first_time,’HH24′),’20’,1,0)),’99’) ” 20″,
to_char(sum(decode(to_char(first_time,’HH24′),’21’,1,0)),’99’) ” 21″,
to_char(sum(decode(to_char(first_time,’HH24′),’22’,1,0)),’99’) ” 22″,
to_char(sum(decode(to_char(first_time,’HH24′),’23’,1,0)),’99’) ” 23″
from v$log_history
where to_char(first_time,’DD-MON-RR’)=’16-AUG-10′
group by to_char(first_time,’DD-MON-RR’)
order by 1
/

16)I/O Generation:
We can find out CPU and I/O generation details for all the users in the Database with the help of the below query:
— Show IO per session,CPU in seconds, sessionIOS.
set linesize 140
col spid for a6
col program for a35 trunc
select p.spid SPID,to_char(s.LOGON_TIME,’DDMonYY HH24:MI’) date_login,s.username,decode(nvl(p.background,0),1,bg.description, s.program ) program,
ss.value/100 CPU,physical_reads disk_io,(trunc(sysdate,’J’)-trunc(logon_time,’J’)) days,
round((ss.value/100)/(decode((trunc(sysdate,’J’)-trunc(logon_time,’J’)),0,1,(trunc(sysdate,’J’)-trunc(logon_time,’J’)))),2) cpu_per_day
from V$PROCESS p,V$SESSION s,V$SESSTAT ss,V$SESS_IO si,V$BGPROCESS bg
where s.paddr=p.addr and ss.sid=s.sid
and ss.statistic#=12 and si.sid=s.sid
and bg.paddr(+)=p.addr
and round((ss.value/100),0) > 10
order by 8;
To know what the session is doing and what kind of sql it is using:

— what kind of sql a session is using
set lines 9999
set pages 9999

select s.sid, q.sql_text from v$sqltext q, v$session s
where q.address = s.sql_address
and s.sid = &sid order by piece;

eg: sid=1853

17)Sync arch:
In a Dataguard environment we have to check primary is in sync with the secondary Database.This we can check as follows:
The V$ MANAGED_STANDBY view on the standby database site shows you the activities performed by
both redo transport and Redo Apply processes in a Data Guard environment
SELECT PROCESS, CLIENT_PROCESS, SEQUENCE#, STATUS FROM V$MANAGED_STANDBY;
In some situations, automatic gap recovery may not take place and you will need to perform gap recovery manually. For example, you will need to perform gap recovery manually if you are using logical standby databases and the primary database is not available.
The following sections describe how to query the appropriate views to determine which log files are missing and perform manual recovery.
On a physical standby database
To determine if there is an archive gap on your physical standby database, query the V$ARCHIVE_GAP view as shown in the following example:
SQL> SELECT * FROM V$ARCHIVE_GAP;

If it displays no rows than the primary Database is in sync with the standy Database.If it display any information with row than manually we have to apply the archive logs.

After you identify the gap, issue the following SQL statement on the primary database to locate the archived redo log files on your primary database (assuming the local archive destination on the primary database is LOG_ARCHIVE_DEST_1):
Eg:
SELECT NAME FROM V$ARCHIVED_LOG WHERE THREAD#=1 AND DEST_ID=1 AND SEQUENCE# BETWEEN 7 AND 10;
Copy these log files to your physical standby database and register them using the ALTER DATABASE REGISTER LOGFILE statement on your physical standby database. For example:
SQL> ALTER DATABASE REGISTER LOGFILE
‘/physical_standby1/thread1_dest/arcr_1_7.arc’;
SQL> ALTER DATABASE REGISTER LOGFILE
‘/physical_standby1/thread1_dest/arcr_1_8.arc’;

After you register these log files on the physical standby database, you can restart Redo Apply. The V$ARCHIVE_GAP fixed view on a physical standby database only returns the next gap that is currently blocking Redo Apply from continuing. After resolving the gap and starting Redo Apply, query the V$ARCHIVE_GAP fixed view again on the physical standby database to determine the next gap sequence, if there is one. Repeat this process until there are no more gaps.

On a logical standby database:
To determine if there is an archive gap, query the DBA_LOGSTDBY_LOG view on the logical standby database. For example, the following query indicates there is a gap in the sequence of archived redo log files because it displays two files for THREAD 1 on the logical standby database. (If there are no gaps, the query will show only one file for each thread.) The output shows that the highest registered file is sequence number 10, but there is a gap at the file shown as sequence number 6:
SQL> COLUMN FILE_NAME FORMAT a55
SQL> SELECT THREAD#, SEQUENCE#, FILE_NAME FROM DBA_LOGSTDBY_LOG L
2> WHERE NEXT_CHANGE# NOT IN
3> (SELECT FIRST_CHANGE# FROM DBA_LOGSTDBY_LOG WHERE L.THREAD# = THREAD#)
4> ORDER BY THREAD#,SEQUENCE#;

THREAD# SEQUENCE# FILE_NAME
———- ———- ———————————————–
1 6 /disk1/oracle/dbs/log-1292880008_6.arc
1 10 /disk1/oracle/dbs/log-1292880008_10.arc

Copy the missing log files, with sequence numbers 7, 8, and 9, to the logical standby system and register them using the ALTER DATABASE REGISTER LOGICAL LOGFILE statement on your logical standby database. For example:
SQL> ALTER DATABASE REGISTER LOGICAL LOGFILE ‘/disk1/oracle/dbs/log-1292880008_10.arc’;

After you register these log files on the logical standby database, you can restart SQL Apply.

The DBA_LOGSTDBY_LOG view on a logical standby database only returns the next gap that is currently blocking SQL Apply from continuing. After resolving the identified gap and starting SQL Apply, query the DBA_LOGSTDBY_LOG view again on the logical standby database to determine the next gap sequence, if there is one. Repeat this process until there are no more gaps.
Monitoring Log File Archival Information:
Step 1 Determine the current archived redo log file sequence numbers.
Enter the following query on the primary database to determine the current archived redo log file sequence numbers:
SQL> SELECT THREAD#, SEQUENCE#, ARCHIVED, STATUS FROM V$LOG
WHERE STATUS=’CURRENT’;
Step 2 Determine the most recent archived redo log file.
Enter the following query at the primary database to determine which archived redo log file contains the most recently transmitted redo data:
SQL> SELECT MAX(SEQUENCE#), THREAD# FROM V$ARCHIVED_LOG GROUP BY THREAD#;
Step 3 Determine the most recent archived redo log file at each destination.
Enter the following query at the primary database to determine which archived redo log file was most recently transmitted to each of the archiving destinations:
SQL> SELECT DESTINATION, STATUS, ARCHIVED_THREAD#, ARCHIVED_SEQ#
2> FROM V$ARCHIVE_DEST_STATUS
3> WHERE STATUS ‘DEFERRED’ AND STATUS ‘INACTIVE’;

DESTINATION STATUS ARCHIVED_THREAD# ARCHIVED_SEQ#
—————— —— —————- ————-
/private1/prmy/lad VALID 1 947
standby1 VALID 1 947
The most recently written archived redo log file should be the same for each archive destination listed. If it is not, a status other than VALID might identify an error encountered during the archival operation to that destination.
Step 4 Find out if archived redo log files have been received.
You can issue a query at the primary database to find out if an archived redo log file was not received at a particular site. Each destination has an ID number associated with it. You can query the DEST_ID column of the V$ARCHIVE_DEST fixed view on the primary database to identify each destination’s ID number.
Assume the current local destination is 1, and one of the remote standby destination IDs is 2. To identify which log files are missing at the standby destination, issue the following query:
SQL> SELECT LOCAL.THREAD#, LOCAL.SEQUENCE# FROM
2> (SELECT THREAD#, SEQUENCE# FROM V$ARCHIVED_LOG WHERE DEST_ID=1)
3> LOCAL WHERE
4> LOCAL.SEQUENCE# NOT IN
5> (SELECT SEQUENCE# FROM V$ARCHIVED_LOG WHERE DEST_ID=2 AND
6> THREAD# = LOCAL.THREAD#);

THREAD# SEQUENCE#
——— ———
1 12
1 13
1 14

18)Purge arch:
We have to make sure the archive logs files are purged safely or move to Tape drive or any other location in order to make space for new archive logs files in the Archive logs destination locations.

19)Recovery status:
In order to do recover make sure you are having latest archive logs,so that you can restore and do the recovery if required.

20) MY DATABASE HEALTH CHECK SCRIPT:
/* SCRIPT FOR MONITORING AND CHECKING HEALTH OF DATABASE-USEFUL FOR PRODUCTION DATABASES */

— SHOWS RUNNING JOBS
select ‘RUNNING JOBS’, sid, job,instance from dba_jobs_running;
set lines 1000
— SHOWS ARCHIVE LOGS GENERAION DETAILS HOURLY AND DATE WISE BASIS
select ‘ARCHIVE LOG REPORT’,to_char(first_time,’DD-MON-RR’) “Date”,
to_char(sum(decode(to_char(first_time,’HH24′),’00’,1,0)),’99’) ” 00″,
to_char(sum(decode(to_char(first_time,’HH24′),’01’,1,0)),’99’) ” 01″,
to_char(sum(decode(to_char(first_time,’HH24′),’02’,1,0)),’99’) ” 02″,
to_char(sum(decode(to_char(first_time,’HH24′),’03’,1,0)),’99’) ” 03″,
to_char(sum(decode(to_char(first_time,’HH24′),’04’,1,0)),’99’) ” 04″,
to_char(sum(decode(to_char(first_time,’HH24′),’05’,1,0)),’99’) ” 05″,
to_char(sum(decode(to_char(first_time,’HH24′),’06’,1,0)),’99’) ” 06″,
to_char(sum(decode(to_char(first_time,’HH24′),’07’,1,0)),’99’) ” 07″,
to_char(sum(decode(to_char(first_time,’HH24′),’08’,1,0)),’99’) ” 08″,
to_char(sum(decode(to_char(first_time,’HH24′),’09’,1,0)),’99’) ” 09″,
to_char(sum(decode(to_char(first_time,’HH24′),’10’,1,0)),’99’) ” 10″,
to_char(sum(decode(to_char(first_time,’HH24′),’11’,1,0)),’99’) ” 11″,
to_char(sum(decode(to_char(first_time,’HH24′),’12’,1,0)),’99’) ” 12″,
to_char(sum(decode(to_char(first_time,’HH24′),’13’,1,0)),’99’) ” 13″,
to_char(sum(decode(to_char(first_time,’HH24′),’14’,1,0)),’99’) ” 14″,
to_char(sum(decode(to_char(first_time,’HH24′),’15’,1,0)),’99’) ” 15″,
to_char(sum(decode(to_char(first_time,’HH24′),’16’,1,0)),’99’) ” 16″,
to_char(sum(decode(to_char(first_time,’HH24′),’17’,1,0)),’99’) ” 17″,
to_char(sum(decode(to_char(first_time,’HH24′),’18’,1,0)),’99’) ” 18″,
to_char(sum(decode(to_char(first_time,’HH24′),’19’,1,0)),’99’) ” 19″,
to_char(sum(decode(to_char(first_time,’HH24′),’20’,1,0)),’99’) ” 20″,
to_char(sum(decode(to_char(first_time,’HH24′),’21’,1,0)),’99’) ” 21″,
to_char(sum(decode(to_char(first_time,’HH24′),’22’,1,0)),’99’) ” 22″,
to_char(sum(decode(to_char(first_time,’HH24′),’23’,1,0)),’99’) ” 23″
from v$log_history
group by to_char(first_time,’DD-MON-RR’)
order by 1
/
— WHAT ALL THE SESSIONS ARE GETTING BLOCKED
select ‘SESSIONS BLOCKED’,process,sid, blocking_session from v$session where blocking_session is not null;
— WHICH SESSION IS BLOCKING WHICH SESSION
set lines 9999
set pages 9999
select s1.username || ‘@’ || s1.machine
|| ‘ ( SID=’ || s1.sid || ‘ ) is blocking ‘
|| s2.username || ‘@’ || s2.machine || ‘ ( SID=’ || s2.sid || ‘ ) ‘ AS blocking_status
from v$lock l1, v$session s1, v$lock l2, v$session s2
where s1.sid=l1.sid and s2.sid=l2.sid
and l1.BLOCK=1 and l2.request > 0
and l1.id1 = l2.id1
and l2.id2 = l2.id2 ;
— SHOWS BLOCK CHANGES DETAILS AND PHYSICAL READS DETAIL
select a.sid,b.username,block_gets,consistent_gets,physical_reads,block_changes
from V$SESS_IO a,V$SESSION b
where a.sid=b.sid and block_changes > 10000 order by block_changes desc;
— show IO per session / CPU in seconds. sessionIOS.sql
set linesize 140
col spid for a6
col program for a35 trunc
select p.spid SPID,to_char(s.LOGON_TIME,’DDMonYY HH24:MI’) date_login,s.username,decode(nvl(p.background,0),1,bg.description, s.program ) program,ss.value/100 CPU,physical_reads disk_io,(trunc(sysdate,’J’)-trunc(logon_time,’J’)) days,round((ss.value/100)/(decode((trunc(sysdate,’J’)-trunc(logon_time,’J’)),0,1,(trunc(sysdate,’J’)-trunc(logon_time,’J’)))),2) cpu_per_day
from V$PROCESS p,V$SESSION s,V$SESSTAT ss,V$SESS_IO si,V$BGPROCESS bg
where s.paddr=p.addr and ss.sid=s.sid
and ss.statistic#=12 and si.sid=s.sid
and bg.paddr(+)=p.addr
and round((ss.value/100),0) > 10
order by 8;
— SCRIPT TO IDENTIFY LONG RUNNING STATEMENTS
rem LONGOPS.SQL
rem Long Running Statements
rem Helmut Pfau, Oracle Deutschland GmbH
set linesize 120
col opname format a20
col target format a15
col units format a10
col time_remaining format 99990 heading Remaining[s]
col bps format 9990.99 heading [Units/s]
col fertig format 90.99 heading “complete[%]”
select sid,
opname,
target,
sofar,
totalwork,
units,
(totalwork-sofar)/time_remaining bps,
time_remaining,
sofar/totalwork*100 fertig
from v$session_longops
where time_remaining > 0
/
— ACTIVE SESSIONS IN DATABASE
select ‘ACTIVE SESSION’, sid, serial#,machine, osuser,username,status from v$session where username!=’NULL’ and status=’ACTIVE’;
— WHAT SQL A SESSION IS USING
set lines 9999
set pages 9999
select s.sid, q.sql_text from v$sqltext q, v$session s
where q.address = s.sql_address
and s.sid = &sid order by piece;

eg:SID=1844
I would like to add one more script which will tell me details regarding the Size of the Database used,occupied and available and Tablespace usage
details along with hit ratio of various SGA components which can be very helpfull
to monitor the performance of the Databases.

Database_monitor.sql:

ttitle “1. :============== Tablespace Usage Information ==================:” skip 2
set linesize 140
col Total format 99999.99 heading “Total space(MB)”
col Used format 99999.99 heading “Used space(MB)”
col Free format 99999.99 heading “Free space(MB)”
break on report
compute sum of Total space(MB) on report
compute sum of Used space(MB) on report
compute sum of Free space(MB) on report
select a.tablespace_name, round(a.bytes/1024/1024,2) Total,
round( nvl( b.bytes,0)/1024/1024,2) Used,
round(nvl(c.bytes, 0)/1024/1024,2) Free ,
round(nvl(b.bytes,0)*100/nvl(a.bytes,0),2) “% Used”
from sys.sm$ts_avail a, sys.sm$ts_used b, sys.sm$ts_free c
where a.tablespace_name=b.tablespace_name(+)
and b.tablespace_name=c.tablespace_name(+);

ttitle “2. :============== Hit Ratio Information ==================:” skip 2
set linesize 80
clear columns
clear breaks
set pagesize 60 heading off termout off echo off verify off
REM
col val1 new_val lib noprint
select 100*(1-(SUM(Reloads)/SUM(Pins))) val1
from V$LIBRARYCACHE;

ttitle off
col val2 new_val dict noprint
select 100*(1-(SUM(Getmisses)/SUM(Gets))) val2
from V$ROWCACHE;

ttitle off
col val3 new_val phys_reads noprint
select Value val3
from V$SYSSTAT
where Name = ‘physical reads’;

ttitle off
col val4 new_val log1_reads noprint
select Value val4
from V$SYSSTAT
where Name = ‘db block gets’;

ttitle off
col val5 new_val log2_reads noprint
select Value val5
from V$SYSSTAT
where Name = ‘consistent gets’;

ttitle off
col val6 new_val chr noprint
select 100*(1-(&phys_reads / (&log1_reads + &log2_reads))) val6
from DUAL;

ttitle off
col val7 new_val avg_users_cursor noprint
col val8 new_val avg_stmts_exe noprint
select SUM(Users_Opening)/COUNT(*) val7,
SUM(Executions)/COUNT(*) val8
from V$SQLAREA;
ttitle off
set termout on
set heading off
ttitle –
center ‘SGA Cache Hit Ratios’ skip 2

select ‘Data Block Buffer Hit Ratio : ‘||&chr db_hit_ratio,
‘ Shared SQL Pool ‘,
‘ Dictionary Hit Ratio : ‘||&dict dict_hit,
‘ Shared SQL Buffers (Library Cache) ‘,
‘ Cache Hit Ratio : ‘||&lib lib_hit,
‘ Avg. Users/Stmt : ‘||
&avg_users_cursor||’ ‘,
‘ Avg. Executes/Stmt : ‘||
&avg_stmts_exe||’ ‘
from DUAL;

ttitle “3. :============== Sort Information ==================:” skip 2

select A.Value Disk_Sorts,
B.Value Memory_Sorts,
ROUND(100*A.Value/
DECODE((A.Value+B.Value),0,1,(A.Value+B.Value)),2)
Pct_Disk_Sorts
from V$SYSSTAT A, V$SYSSTAT B
where A.Name = ‘sorts (disk)’
and B.Name = ‘sorts (memory)’;

ttitle “4. :============== Database Size Information ==================:” skip 2

select sum(bytes/1024/1024/1024) Avail from sm$ts_avail union all select sum(bytes/1024/1024/1024) Used from sm$ts_used union all select sum(bytes/1024/1024/1024) Free from sm$ts_free;

Rman Transportable Tablespace and Plugin into RAC and ASM

Rman Transportable Tablespace and Plugin into RAC and ASM
By AVargas on Apr 02, 2007

This post describes the Rman Transportable Tablespace Command, how to use it to plug a set of tablespaces from a single instance into a RAC database based on ASM.

The purpose of the Rman ‘Transportable Tablespace’ command is to generate a Transportable Tablespace Set (TTS), in addition it generates a Metadata export and an Import Metadata Script. They are to be used lately to plug in the TTS on another database.

The Rman ‘Transportable Tablespace’ command has the advantage that tablespaces to be transported do not need to be on read only mode on the siurce database; instead the datafiles are restored from a backupset into an auxiliary instance to create the TTS.

The exercise described on this post includes:

Create the Transportable Tablespace Set with Rman
Plug it into a RAC database and
Moving these tablespaces datafiles into ASM

The steps required to complete the task are:

1. Create an Rman Backup of the source database
2. Prepare an auxiliary directory for storing the Rman generated export and import files and a data directory for the resulting TTS
3. Prepare an Rman script to generate the TTS
4. Execute the Rman Script
5. Plug the TTS on the target database
6. Convert the transported tablespaces to ASM
7. Offline the transported tablespaces
8. Rename the datafiles to it’s new ASM destination
9. Online the transported tablespaces

On this example I’m using a small database named ‘whiteowl’ that has three test tablespaces that I will transport to a RAC database within ASM.

SQL> select tablespace_name from dba_tablespaces where tablespace_name like ‘TEST%’;

TABLESPACE_NAME
——————————
TEST2
TEST3
TEST4

SQL> select tablespace_name||’ ‘||
2 segment_name
3 from dba_segments
4 where tablespace_name like ‘TEST%’;

TABLESPACE_NAME||”||SEGMENT_NAME
——————————————————————————–
TEST2 TEST1
TEST3 TEST2
TEST4 TEST3

Execution Steps:

1. Create an Rman Backup of the source database

This is the backup script: Rman Single Instance Backup Script

2. Prepare an
auxiliary directory for storing the Rman generated export and import
files and a data directory for the resulting TTS

mkdir /asmtest/whiteowl/auxiliar/

This directory will be used to create on it the TTS, the export data pump and the import data pump scripts.

3. Prepare an Rman script to generate the TTS

Look at the time of your last backup to setup the until time clause.

run {
TRANSPORT TABLESPACE ‘TEST2′,’TEST3′,’TEST4’
AUXILIARY DESTINATION ‘/asmtest/whiteowl/auxiliar’
DUMP FILE ‘whiteowl_exp.dmp’
EXPORT LOG ‘whiteowl_tts.log’
IMPORT SCRIPT ‘whiteowl_imp.sql’
TABLESPACE DESTINATION ‘/asmtest/whiteowl/auxiliar’
UNTIL TIME=”to_date(’29-03-2007:16:57:00′, ‘dd-mm-yyyy:hh24:mi:ss’)”;
}

4. Execute the Script

{oracle} /oradisk/app01/oracle/scripts/av/rman [ractest1] > rman target / @rman_tts

While the script run it creates an auxiliar instance, to restore and recover the TTS:

On the auxiliar destination we got a directory containing the TTS and the import script:

{oracle} /asmtest/whiteowl/auxiliar [ractest1] > ls -ltr
total 12
drwxr-x— 4 oracle dba 4096 Apr 1 15:08 TSPITR_WHITEOWL_FQWD
-rw-r–r– 1 oracle dba 1428 Apr 1 15:12 whiteowl_tts.log
-rw-r–r– 1 oracle dba 2473 Apr 1 15:12 whiteowl_imp.sql

{oracle} /asmtest/whiteowl/auxiliar [ractest1] > ls -l TSPITR_WHITEOWL_FQWD/datafile/
total 307536
-rw-r—– 1 oracle dba 104865792 Apr 1 15:11 o1_mf_test2_30z87qw6_.dbf
-rw-r—– 1 oracle dba 104865792 Apr 1 15:11 o1_mf_test3_30z86w4w_.dbf
-rw-r—– 1 oracle dba 104865792 Apr 1 15:11 o1_mf_test4_30z87r52_.dbf

5. Plug the TTS on the target database

The final steps are to plug the tablespaces into the target database and move them to ASM, to plug the tablespaces in this case I did use the generated import data pump script:

{oracle} /vmasmtest/whiteowl/auxiliar [ractest1] > ./expscr
setenv ORACLE_HOME /oradisk/app01/oracle/product/10gDB
setenv PATH /oradisk/app01/oracle/product/10gDB/bin:/oradisk/app01/oracle/scripts/general:/usr/kerberos/sbin:/usr/kerberos/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/usr/X11R6/bin:/root/bin:/oradisk/app01/oracle/scripts:/usr/local/maint/oracle:/crmdb/app01/oracle/product/db_scripts/RAC:/crmdb/app01/oracle/product/db_scripts
setenv ORACLE_SID racdbtst1
impdp avargas/avargas directory=imp_dp dumpfile= whiteowl_exp.dmp transport_datafiles= /vmasmtest/whiteowl/auxiliar/TSPITR_WHITEOWL_FNOU/datafile/o1_mf_test2_3119t5fs_.dbf, /vmasmtest/whiteowl/auxiliar/TSPITR_WHITEOWL_FNOU/datafile/o1_mf_test3_3119sn54_.dbf, /vmasmtest/whiteowl/auxiliar/TSPITR_WHITEOWL_FNOU/datafile/o1_mf_test4_3119t5p7_.dbf

Import: Release 10.2.0.1.0 – Production on Monday, 02 April, 2007 18:08:47
Copyright (c) 2003, 2005, Oracle. All rights reserved.

Connected to: Oracle Database 10g Enterprise Edition Release 10.2.0.1.0 – Production
With the Partitioning, Real Application Clusters, OLAP and Data Mining options
Master table “AVARGAS”.”SYS_IMPORT_TRANSPORTABLE_01″ successfully loaded/unloaded
Starting “AVARGAS”.”SYS_IMPORT_TRANSPORTABLE_01″: avargas/******** directory=imp_dp dumpfile= whiteowl_exp.dmp transport_datafiles= /vmasmtest/whiteowl/auxiliar/TSPITR_WHITEOWL_FNOU/datafile/o1_mf_test2_3119t5fs_.dbf, /vmasmtest/whiteowl/auxiliar/TSPITR_WHITEOWL_FNOU/datafile/o1_mf_test3_3119sn54_.dbf, /vmasmtest/whiteowl/auxiliar/TSPITR_WHITEOWL_FNOU/datafile/o1_mf_test4_3119t5p7_.dbf
Processing object type TRANSPORTABLE_EXPORT/PLUGTS_BLK
Processing object type TRANSPORTABLE_EXPORT/TABLE
Processing object type TRANSPORTABLE_EXPORT/POST_INSTANCE/PLUGTS_BLK
Job “AVARGAS”.”SYS_IMPORT_TRANSPORTABLE_01″ successfully completed at 18:09:14

6. Convert the pluged tablespaces to ASM

{oracle} /oradisk/app01/oracle/scripts/av/rman [ractest1] > rman target / nocatalog
Recovery Manager: Release 10.2.0.1.0 – Production on Mon Apr 2 18:56:40 2007
Copyright (c) 1982, 2005, Oracle. All rights reserved.
connected to target database: RACDBTST (DBID=519338572)
using target database control file instead of recovery catalog

RMAN> convert datafile ‘/vmasmtest/whiteowl/auxiliar/TSPITR_WHITEOWL_FNOU/datafile/o1_mf_test4_3119t5p7_.dbf’
2> format ‘+DATADG/racdbtst/datafile/o1_mf_test4_3119t5p7_.dbf’;
Starting backup at 02/04/2007 18:57:57
allocated channel: ORA_DISK_1
channel ORA_DISK_1: sid=131 instance=racdbtst1 devtype=DISK
allocated channel: ORA_DISK_2
channel ORA_DISK_2: sid=130 instance=racdbtst1 devtype=DISK
channel ORA_DISK_1: starting datafile conversion
input filename=/vmasmtest/whiteowl/auxiliar/TSPITR_WHITEOWL_FNOU/datafile/o1_mf_test4_3119t5p7_.dbf
converted datafile=+DATADG/racdbtst/datafile/o1_mf_test4_3119t5p7_.dbf
channel ORA_DISK_1: datafile conversion complete, elapsed time: 00:00:16
Finished backup at 02/04/2007 18:58:16

RMAN> convert datafile ‘/vmasmtest/whiteowl/auxiliar/TSPITR_WHITEOWL_FNOU/datafile/o1_mf_test3_3119sn54_.dbf’
2> format ‘+DATADG/racdbtst/datafile/o1_mf_test3_3119sn54_.dbf’;
Starting backup at 02/04/2007 19:01:46
using channel ORA_DISK_1
using channel ORA_DISK_2
channel ORA_DISK_1: starting datafile conversion
input filename=/vmasmtest/whiteowl/auxiliar/TSPITR_WHITEOWL_FNOU/datafile/o1_mf_test3_3119sn54_.dbf
converted datafile=+DATADG/racdbtst/datafile/o1_mf_test3_3119sn54_.dbf
channel ORA_DISK_1: datafile conversion complete, elapsed time: 00:00:16
Finished backup at 02/04/2007 19:02:02

RMAN> convert datafile ‘/vmasmtest/whiteowl/auxiliar/TSPITR_WHITEOWL_FNOU/datafile/o1_mf_test2_3119t5fs_.dbf’
2> format ‘+DATADG/racdbtst/datafile/o1_mf_test2_3119t5fs_.dbf’;
Starting backup at 02/04/2007 19:03:50
using channel ORA_DISK_1
using channel ORA_DISK_2
channel ORA_DISK_1: starting datafile conversion
input filename=/vmasmtest/whiteowl/auxiliar/TSPITR_WHITEOWL_FNOU/datafile/o1_mf_test2_3119t5fs_.dbf
converted datafile=+DATADG/racdbtst/datafile/o1_mf_test2_3119t5fs_.dbf
channel ORA_DISK_1: datafile conversion complete, elapsed time: 00:00:08
Finished backup at 02/04/2007 19:03:58
RMAN>

After the convert command we can see the datafiles inside ASM:

{oracle} /home/oracle [ractest1] > 10asm
{oracle} /home/oracle [ractest1] > sts +ASM1
{oracle} /home/oracle [ractest1] > asmcmd
ASMCMD> cd datadg/racdbtst/datafile
ASMCMD> ls
ON_ASM.274.617123537
ON_ASM.280.617292277
ON_ASM.283.617125585
RECOP1.273.617292261
RECOP1.276.617125571
RECOP1.279.617123521
SYSAUX.261.606653697
SYSTEM.259.606653665
TBS_AUTO.275.617292277
TBS_AUTO.287.617123539
TBS_AUTO.288.617125577
TBS_MANUAL.272.617125575
TBS_MANUAL.284.617123519
TBS_MANUAL.286.617292261
TEST2.282.618865431
TEST3.264.618865309
TEST4.289.618865083
UNDOTBS1.260.606653693
UNDOTBS2.263.606653713
USERS.278.617292245
USERS.281.617125569
USERS.285.617123513
o1_mf_test2_3119t5fs_.dbf
o1_mf_test3_3119sn54_.dbf
o1_mf_test4_3119t5p7_.dbf
ASMCMD>

7. Offline the plugged tablespaces

Once the datafiles are converted to ASM using Rman we need to rename them to it’s new location, the first step for doing this is to made the tablespaces offline:

SQL> select file_name from dba_data_files;
FILE_NAME
———————————————————————
+DATADG/racdbtst/datafile/system.259.606653665
+DATADG/racdbtst/datafile/undotbs1.260.606653693
+DATADG/racdbtst/datafile/sysaux.261.606653697
+DATADG/racdbtst/datafile/undotbs2.263.606653713
+DATADG/racdbtst/datafile/users.278.617292245
+DATADG/racdbtst/datafile/recop1.273.617292261
+DATADG/racdbtst/datafile/tbs_manual.286.617292261
+DATADG/racdbtst/datafile/tbs_auto.275.617292277
+DATADG/racdbtst/datafile/on_asm.280.617292277
/asmtest/oradata/on_fs01.dbf
/asmtest/whiteowl/auxiliar/TSPITR_WHITEOWL_FNOU/datafile/o1_mf_test4_3119t5p7_.dbf
/asmtest/whiteowl/auxiliar/TSPITR_WHITEOWL_FNOU/datafile/o1_mf_test3_3119sn54_.dbf
/asmtest/whiteowl/auxiliar/TSPITR_WHITEOWL_FNOU/datafile/o1_mf_test2_3119t5fs_.dbf
13 rows selected.

SQL> alter tablespace test2 offline;
Tablespace altered.

SQL> c/2/3/
1* alter tablespace test3 offline
SQL> /
Tablespace altered.

SQL> c/3/4/
1* alter tablespace test4 offline
SQL> /
Tablespace altered.

8. Rename the datafiles to it’s new ASM destination

SQL> alter database rename file ‘/asmtest/whiteowl/auxiliar/TSPITR_WHITEOWL_FNOU/datafile/o1_mf_test4_3119t5p7_.dbf’
2 to ‘+DATADG/racdbtst/datafile/o1_mf_test4_3119t5p7_.dbf’;
Database altered.

SQL> alter database rename file ‘/asmtest/whiteowl/auxiliar/TSPITR_WHITEOWL_FNOU/datafile/o1_mf_test3_3119sn54_.dbf’
2 to ‘+DATADG/racdbtst/datafile/o1_mf_test3_3119sn54_.dbf’;
Database altered.

SQL> alter database rename datafile ‘/asmtest/whiteowl/auxiliar/TSPITR_WHITEOWL_FNOU/datafile/o1_mf_test2_3119t5fs_.dbf’
2 to ‘+DATADG/racdbtst/datafile/o1_mf_test2_3119t5fs_.dbf’;
Database altered.

9. Online the plugged tablespaces
SQL> alter tablespace test2 online;
Tablespace altered.
SQL> alter tablespace test3 online;

Tablespace altered.
SQL> alter tablespace test4 online;

Tablespace altered.

Now the tablespaces are available on the RAC Database.