All Scripts
All Scripts
----------------------------
select sesion.sid,
sesion.username,
optimizer_mode,
hash_value,
address,
cpu_time,
elapsed_time,
sql_text
from v$sqlarea sqlarea, v$session sesion
where sesion.sql_hash_value = sqlarea.hash_value
and sesion.sql_address = sqlarea.address
and sesion.username is not null;
FOR RAC
kill snipped sessions in db ---- It will generate kill session statements for all
snipped sessions
-------------------------------
select 'alter system kill session '''||sid||','||serial#||''' immediate;' from
v$session where status='SNIPED' ;
dumpfile=test.dmp
logfile=EXPLOG:test.log
directory=SOURCE_DUMP
tables=dbatest.EMPTAB
exclude=statistics
SELECT * FROM
TABLE(DBMS_XPLAN.display_sql_plan_baseline(plan_name=>'SQL_PLAN_gbhrw1v44209a5b2f75
14'));
ADDRESS HASH_VALUE
--------------- ------------
C000007067F39FF0 4000666812
-- Enable Trace:
SELECT active_session_history.user_id,
dba_users.username,
sqlarea.sql_text,
SUM(active_session_history.wait_time +
active_session_history.time_waited)/1000000 ttl_wait_time_in_seconds
FROM v$active_session_history active_session_history,
v$sqlarea sqlarea,
dba_users
WHERE active_session_history.sample_time BETWEEN SYSDATE - 1 AND SYSDATE-23/24
AND active_session_history.sql_id = sqlarea.sql_id
AND active_session_history.user_id = dba_users.user_id
and dba_users.username not in ('SYS','DBSNMP')
GROUP BY active_session_history.user_id,sqlarea.sql_text, dba_users.username
ORDER BY 4 DESC
/
set pagesize 50
set linesize 120
col sql_id format a15
col inst_id format '9'
col sql_text format a50
col module format a10
col blocker_ses format '999999'
col blocker_ser format '999999'
SELECT distinct
a.sql_id ,
a.inst_id,
a.blocking_session blocker_ses,
a.blocking_session_serial# blocker_ser,
a.user_id,
s.sql_text,
a.module,a.sample_time
FROM GV$ACTIVE_SESSION_HISTORY a,
gv$sql s
where a.sql_id=s.sql_id
and blocking_session is not null
and a.user_id <> 0 -- exclude SYS user
and a.sample_time BETWEEN SYSDATE - 1 AND SYSDATE-23/24
/
or
Generate report
SQL> @addmrpt.sql
BEGIN
FOR i IN (SELECT trigger_name
FROM user_triggers) LOOP
EXECUTE IMMEDIATE 'ALTER TRIGGER ' || i.trigger_name || ' DISABLE';
END LOOP;
END;
/
-- Verify again
select
a.constraint_name cons_name
,a.table_name tab_name
,b.column_name cons_column
,nvl(c.column_name,'***No Index***') ind_column
from user_constraints a
join
user_cons_columns b on a.constraint_name = b.constraint_name
left outer join
user_ind_columns c on b.column_name = c.column_name
and b.table_name = c.table_name
where constraint_type = 'R'
order by 2,1;
or
asmcmd>mount DATA
or
asmcmd>umount DATA
cd $GRID_HOME/bin
cluvfy comp clocksync -n all
Observer means - Time sync between nodes are taken care by NTP
Active means - Time sync between nodes are taken care by CTSS
ASMCMD> lspwusr
Username sysdba sysoper sysasm
SYS TRUE TRUE TRUE
ASMSNMP TRUE FALSE FALSE -- >
$ asmcmd
$asmcmd
ASMCMD> spset +OCR_VOTING/CLUSTER/ASMPARAMETERFILE/spfileASM.ora
e.g
srvctl start database -d PRODB -o nomount
srvctl start database -d PRODB -o mount
srvctl start database -d PRODB -o open
Relocate a service
------------------------------
SYNTAX -
stop/start a service
-----------------------------------
SYNTAX:
---------
srvctl start service -d {DB_NAME} -s {SERVICE_NAME}
srvctl stop service -d {DB_NAME} -s {SERVICE_NAME}
EXAMPLE:
---------------
srvctl start service -d PREDB -s PRDB_SRV
srvctl stop service -d PREDB -s PRDB_SRV
Add/remove a service
---------------------------------------
ADDING A SERVICE:
--------------------
SYNTAX:
------------
srvctl add service -d {DB_NAME} -s {SERVICE_NAME} -r {"preferred_list"} -a
{"available_list"} [-P {BASIC | NONE | PRECONNECT}]
EXAMPLE:
---------------
srvctl add service -d PREDB -s PRDB_SRV -r "PREDB1,PREDB2" -a "PREDB2" -P BASIC
REMOVING A SERVICE:
------------------------------------------
SYNTAX:
-------------
srvctl remove service -d {DB_NAME} -s {SERVICE_NAME}
EXAMPLE:
--------
srvctl remove service -d PREDB -s PRDB_SRV
SRVM_TRACE=true
export SRVM_TRACE
or
$GRID_HOME/bin/olsnodes -c
$GRID_HOME/bin/ocrcheck
NAME IP_ADDRESS
--------------- ----------------
loypredbib0 172.16.3.193
loypredbib1 172.16.4.1
$GRID_HOME/bin/ocrconfig -showbackup
$GRID_HOME/bin/ocrconfig -manualbackup
-- Leaf or Hub
olsnodes -a
$GRID_HOME/bin/ocrcheck -local
--------------------------------------------------------------------------------
MEMBER CLUSTER INFORMATION
================================================================================
================================================================================
l_plans_loaded PLS_INTEGER;
BEGIN
l_plans_loaded := DBMS_SPM.load_plans_from_cursor_cache(
sql_id => '&sql_id');
END;
/
DECLARE
l_plans_loaded PLS_INTEGER;
BEGIN
l_plans_loaded := DBMS_SPM.load_plans_from_cursor_cache(
sql_id => '&sql_id', plan_hash_value => '&plan_hash_value');
END;
/
You can get the sql baseline from a sql_id from below command:
SELECT sql_handle, plan_name FROM dba_sql_plan_baselines WHERE signature IN
( SELECT exact_matching_signature FROM gv$sql WHERE sql_id='&SQL_ID');
select distinct
p.name sql_profile_name,
s.sql_id
from
dba_sql_profiles p,
DBA_HIST_SQLSTAT s
where
p.name=s.sql_profile and s.sql_id='&sql_id';
select distinct
p.name sql_profile_name,
s.sql_id
from
dba_sql_profiles p,
DBA_HIST_SQLSTAT s
where
p.name=s.sql_profile and s.sql_id='&sql_id';
DECLARE
l_sql_tune_task_id VARCHAR2(100);
BEGIN
l_sql_tune_task_id := DBMS_SQLTUNE.create_tuning_task (
sql_id => '12xca9smf3hfy',
scope => DBMS_SQLTUNE.scope_comprehensive,
time_limit => 500,
task_name => '12xca9smf3hfy_tuning_task',
description => 'Tuning task1 for statement 12xca9smf3hfy');
DBMS_OUTPUT.put_line('l_sql_tune_task_id: ' || l_sql_tune_task_id);
END;
/
Begin
dbms_spm.alter_sql_plan_baseline(sql_handle =>'SQL_SQL_5818768f40d7be2a',
plan_name => 'SQL_PLAN_aaxsg8yktm4h100404251',
attribute_name=> 'enabled',
attribute_value=>'NO');
END;
/
Begin
dbms_spm.alter_sql_plan_baseline(sql_handle =>'SQL_SQL_5818768f40d7be2a',
plan_name => 'SQL_PLAN_aaxsg8yktm4h100404251',
attribute_name=> 'fixed',
attribute_value=>'NO');
END;
/
Truncate partitions
------------------------------
- SYNTAX : ALTER TABLE <SCHEMA_NAME>.<TABLE_NAME> TRUNCATE PARTITION <
PARTITION_NAME> < UPDATE GLOBAL INDEXES(optional)>;
--- NOTE: UPDATE GLOBAL INDEXES is required if GLOBAL INDEX is present
Table altered.
6 rows selected.
Table altered.
Rename a partition
------------------------------------
ALTER TABLE employee RENAME PARTITION TAB3 TO TAB4;
select
'ALTER TABLE '||TABLE_OWNER ||'.'||table_name||' MOVE PARTITION '||
partition_name||' TABLESPACE TS_USERS PARALLEL(DEGREE 4) NOLOGGING;'
from dba_tab_partitions where table_name='&TABLE_NAME' and
table_owner='&SCHEMA_NAME';
BEGIN
DBMS_STATS.GATHER_TABLE_STATS (
ownname => 'SCOTT',
tabname => 'TEST', --- TABLE NAME
partname => 'TEST_JAN2016' --- PARTITOIN NAME
method_opt=>'for all indexed columns size 1',
GRANULARITY => 'APPROX_GLOBAL AND PARTITION',
degree => 8);
END;
/
Lock/unlock statistics
--------------------------------
--- Lock statistics
EXEC DBMS_STATS.lock_schema_stats('SCOTT');
EXEC DBMS_STATS.lock_table_stats('SCOTT', 'TEST');
EXEC DBMS_STATS.lock_partition_stats('SCOTT', 'TEST', 'TEST_JAN2016');
-- Unlock statistics
EXEC DBMS_STATS.unlock_schema_stats('SCOTT');
EXEC DBMS_STATS.unlock_table_stats('SCOTT', 'TEST');
EXEC DBMS_STATS.unlock_partition_stats('SCOTT', 'TEST', 'TEST_JAN2016');
-- Export stats
-- Import stats
FOR INDEX
setlines 200
col owner for a12
col table_name for a21
select owner,TABLE_NAME,STATS_UPDATE_TIME from dba_tab_stats_history where
table_name='&TABLE_NAME';
exec dbms_stats.publish_pending_stats('SCHEMA_NAME',null);
exec dbms_stats.set_table_prefs('SCOTT','EMP','PUBLISH','FALSE');
Check the publish preference status
exec dbms_stats.SET_SCHEMA_PREFS('DBATEST','PUBLISH','FALSE');
SET_INDEX_STATS
GET_INDEX_STATS
-- FOR DATABASE
SET_DATABASE_PREFS
exec DBMS_STATS.ALTER_STATS_HISTORY_RETENTION(60);
FALSE
Delete Statastics
--------------------------------
Delete statistics of the complete database
EXEC DBMS_STATS.delete_database_stats;
EXEC DBMS_STATS.delete_schema_stats('DBACLASS');
--Delete dictionary a in db
EXEC DBMS_STATS.delete_dictionary_stats;
Upgrade statastics in DB
----------------------------------
-- If we are importing stats table from higher version to lower version,
then before importing in the database, we need to upgrade the stats table.
NAME TIME
--------------------------------
-----------------------------------------------
GRP_1490100093811 21-MAR-17 03.41.33.000000000 PM
shutdown immediate;
startup mount;
Flashback a procedure/package
-------------------------------------------
--- Like, tables ,If you have dropped or recreated a package/procedure, by using
flashback ,we can get the proc code, before drop.
OBJECT_ID
----------
2201943
Backup all archivelogs known to controlfile and delete them once backed up
Backup archivlogs known to controlfile and the logs which haven't backed up once
also
auxiliary destination – Location where all the related files for auxiliary instance
will be placed
datapump destination – Location where the export dump of the table will be placed
run
{
allocate channel t1 type SBT_TAPE parms
‘ENV=(NSR_SERVER=nwwerpw,NSR_CLIENT=tsc_test01,NSR_DATA_VOLUME_POOL=DD086A1)’connec
t sys/****@CRM_DB;
set archivelog destination to ‘/dumparea/';
restore archivelog from sequence 7630 until sequence 7640;
release channel t1;
}
-- Check status:
$ rman checksyntax
Copyright (c) 1982, 2014, Oracle and/or its affiliates. All rights reserved.
Eg:
-To create an user, which will prompt for new password upon login:
alter an user
--------------------------------
-- Change password of an user
-- Unlock/lock a user
-- Make sure account expiry, so upon login, it will ask for new one
USERNAME DEFAULT_TABLESPACE
----------------------- ------------------------------
SCOTT USERS
USERNAME DEFAULT_TABLESPACE
----------------------- ------------------------------
SCOTT DATATS
TABLESPACE_NAME UTILIZIED_SPACE
QUOTA_ALLOCATED
------------------------------ ---------------------------
--------------------------
USERS .0625
1024
Conn / as sysdba
SQL >alter user TEST2 grant connect through TEST1;
User altered.
Table created.
OWNER
------
TEST2
NOTE:
-- ALTER PROFILE:
no rows selected
Add/Drop/Alter datafile
-----------------------------------
-- Add a datafile to a tablespace
-- Resize a datafile
-- Drop a datafile:
Add/drop Tempfile
-----------------------------------
-- Add tempfile to temp tablespace:
-- Drop tempfile:
Rename/move a datafile
------------------------------------------
For oracle 12c, move or rename of datafile can be done online with one line:
For 11g, u have to follow below steps:( It needs downtime for the datafile)
mv /home/oracle/app/oracle/oradata/cdb1/testin1.dbf /home/oracle/producing1.dbf
-- Rename at db level
CON_NAME
------------------------------
PDB1
SYS_CONTEXT('USERENV','CON_NAME')
-----------------------------------
PDB1
Session altered.
SQL> startup
Pluggable Database opened.
SQL> shutdown
Pluggable Database closed.
CON_NAME
------------------------
CDB$ROOT
Manage dbms_schedulerjobs
----------------------------------
Enable a job
EXECUTE DBMS_SCHEDULER.ENABLE('SCOTT.MONTHLYBILLING');
Disable a job
EXECUTE DBMS_SCHEDULER.DISABLE('SCOTT.MONTHLYBILLING');
EXECUTE DBMS_SCHEDULER.STOP_JOB('SCOTT.MONTHLYBILLING');
EXECUTE DBMS_SCHEDULER.DROP_JOB('SCOTT.MONTHLYBILLING');
EXECUTE DBMS_SCHEDULER.RUN_JOB('SCOTT.MONTHLYBILLING');
BEGIN
DBMS_SCHEDULER.CREATE_SCHEDULE (
Schedule_name => 'DAILYBILLINGJOB',
Start_date => SYSTIMESTAMP,
Repeat_interval =>'FREQ=DAILY;BYHOUR=11; BYMINUTE=30',
Comments => 'DAILY BILLING JOB'
);
END;
-- Create a program
BEGIN
DBMS_SCHEDULER.CREATE_PROGRAM (
program_name => 'DAILYBILLINGJOB',
program_type => 'STORED_PROCEDURE',
program_action => 'DAILYJOB.BILLINGPROC'
number_of_arguments =>0,
enabled => TRUE,
comments => 'DAILY BILLING JOB'
);
END;
EXECUTE DBMS_SCHEDULER.ENABLE('DAILYBILLINGJOB_RUN');
Drop a schedule
-----------------------------
BEGIN
DBMS_SCHEDULER.DROP_SCHEDULE(
schedule_name => 'DAILYBILLINGJOB_SCHED',
force => TRUE
);
END;
BEGIN
dbms_credential.create_credential (
CREDENTIAL_NAME => 'ORACLEOSUSER',
USERNAME => 'oracle',
PASSWORD => 'oracle@98765',
DATABASE_ROLE => NULL,
WINDOWS_DOMAIN => NULL,
COMMENTS => 'Oracle OS User',
ENABLED => true
);
END;
/
exec dbms_scheduler.create_job(-
job_name=>'myscript4',-
job_type=>'external_script',-
job_action=>'/export/home/oracle/ttest.2.sh',-
enabled=>true,-
START_DATE=>sysdate,-
REPEAT_INTERVAL =>'FREQ=MINUTELY; byminute=1',-
auto_drop=>false,-
credential_name=>'ORACLEOSUSER');
exec dbms_scheduler.copy_job('SCOTT.MY_JOB_2','DBACLASS.MY_JOB_2');
743,DBATEST
--- connect to the owner , and get the definition of the job
set serveroutput on
SQL> DECLARE
callstr VARCHAR2(500);
BEGIN
dbms_job.user_export(743, callstr);
dbms_output.put_line(callstr);
END;
/
Enable/disable/drop a dbms_job
-----------------------------------
-- Get the job number from dba_jobs.
-- Disable a job
EXEC DBMS_IJOB.BROKEN(jobno,TRUE);
-- Enable a job
EXEC DBMS_IJOB.BROKEN(jobno,FALSE);
--REMOVE A DBMS_JOBS:
EXEC DBMS_IJOB.remove(jobno) ;
Check DB role(PRIMARY/STANDBY)
----------------------------------------
SELECT DATABASE_ROLE, DB_UNIQUE_NAME INSTANCE, OPEN_MODE, PROTECTION_MODE,
PROTECTION_LEVEL, SWITCHOVER_STATUS FROM V$DATABASE;
-- Drop column
-- Rename column
The output of this query returns the number of chained rows in that table.
Create public database link LINK_PUB connect to system identified by oracle using
'PRODB';
connect scott/tiger
connect scott/tiger
NOTE - Private database link can be dropped only by the owner of the database link
SELECT idx.table_name,bytes/1024/1024/1024
FROM dba_segments seg,
dba_indexes idx
where idx.table_name='&TABLE_NAME'
AND idx.index_name = seg.segment_name
GROUP BY idx.table_name order by 1;
Create/drop synonyms
------------------------------
-- Create public synonym
-- Drop synonym
SET SERVEROUTPUT ON
DECLARE
v_used_bytes NUMBER(10);
v_Allocated_Bytes NUMBER(10);
BEGIN
DBMS_SPACE.CREATE_INDEX_COST
(
'create index DBACLASS.INDEX1 on DBACLASS.EMP(EMPNO)',
v_used_Bytes,
v_Allocated_Bytes
);
DBMS_OUTPUT.PUT_LINE('Used Bytes MB: ' || round(v_used_Bytes/1024/1024));
DBMS_OUTPUT.PUT_LINE('Allocated Bytes MB: ' || round(v_Allocated_Bytes/1024/1024));
END;
/
-- Compiling a package;
-- Compiling a procedure:
-- Compiling a view:
-- Compiling a function:
shutdown immediate;
startup;
shutdown immediate;
cd $ORACLE_HOME/rdbms/lib
make -f ins_rdbms.mk unaiaud_on ioracle
startup
VALUE
-----------------
TRUE
-- Enable policy:
BEGIN
DBMS_AUDIT_MGMT.SET_AUDIT_TRAIL_LOCATION(audit_trail_type =>
DBMS_AUDIT_MGMT.AUDIT_TRAIL_AUD_STD,
audit_trail_location_value => 'AUDIT_DATA');
END;
/
-- Query to view new tablespace
-- Decrypt a column:
NOTE - This activity will take time, according to the table size and it might block
other session.
Better to take downtime before doing this activity.
BEGIN
DBMS_REDACT.drop_policy (
object_schema => 'SCOTT',
object_name => 'CREDIT_CARD_TAB',
policy_name => 'CREDIT_HIDE_POLICY'
);
END;
/
-- Enable Trace:
Connecting to (DESCRIPTION=(ADDRESS=(PROTOCOL=IPC)(KEY=LISTENER_TEST)))
LISTENER_TEST parameter "trc_level" set to admin
The command completed successfully
Create public database link LINK_PUB connect to system identified by oracle using
'PRODB';
connect scott/tiger
connect scott/tiger
drop database link LINK_PRIV;
NOTE - Private database link can be dropped only by the owner of the database link
-- Restart scan_listenr
-- Reload listener
exec DBMS_NETWORK_ACL_ADMIN.ASSIGN_ACL('scott_utl_mail.xml','*',25);
exec DBMS_NETWORK_ACL_ADMIN.UNASSIGN_ACL('scott_utl_mail.xml','*',25);
-- Drop ACL:
Set local_listener in db
-------------------------------------------
-- Make the sure the a listener is already running with that port(i.e 1524 here)
alter system set LOCAL_LISTENER='(ADDRESS = (PROTOCOL = TCP)(HOST = 162.20.217.15)
(PORT = 1524))' scope=both;
alter system register;
-- status of oms
cd $AGENT_HOME/bin
cd $OMS_HOME/bin
./emctl config oms -list_repos_details
cd $OMS_HOME/bin
./emctl status oms -details
cd $AGENT_HOME/bin
./emcli get_targets
./emcli get_target_types
./emcli list_plugins_on_server
./emcli list_plugins_on_agent
./emctl config oms -change_repos_pwd -use_sys_pwd -sys_pwd -new_pwd < new sysman
password>
-- Restart oms
cat parfile=compressed.par
dumpfile=schema.dmp
logfile=tables.log
directory=EXPDIR
FULL=Y
compression=ALL
cat parfile=parallel.par
dumpfile=parallel_%U.dmp
logfile=tables.log
directory=EXPDIR
schemas=PROD_DATA
parallel=4
expdp parfile=parallel.par
cat parfile=schema.par
dumpfile=schema.dmp
logfile=tables.log
directory=EXPDIR
schemas=PROD_DATA,
DEV_DATA
-- Run expdp
expdp parfile=schema.par
cat parfile=tables.par
dumpfile=tables.dmp
logfile=tables.log
directory=EXPDIR
tables=PROD_DATA.EMPLOYEE,
PROD_DATA.DEPT,
DEV_DATA.STAGING
expdp parfile=tables.par
cat expdp_query.par
dumpfile=test.dmp
logfile=test1.log
directory=TEST
tables=dbaclass.EMP_TAB
QUERY=dbaclass.EMP_TAB:"WHERE created > sysdate -40"
PARFILE SAMPLE:
dumpfile=test.dmp
logfile=test1.log
directory=TEST
tables=DBACLASS.DEP_TAB
sqlfile=emp_tab.sql
TABLE_EXISTS_ACTION
Action to take if imported object already exists.
Valid keywords are: APPEND, REPLACE, [SKIP] and TRUNCATE.
TABLE_EXISTS_ACTION=SKIP:
This is the default option with impdp. I.e if the the table exists, it will skip
that table.
TABLE_EXISTS_ACTION=APPEND:
while importing the table, if the table exists in the database, then it will append
the data on top the existing data in the table.
TABLE_EXISTS_ACTION=TRUNCATE:
While importing the table, if the table exists in database, it will truncate the
table and load the data.
While importing, if the table exists in database, then it will drop it and recreate
it from the dump
These two options can be used in both expdp or impdp to exclude or include,
particular objects or object_types:
dumpfile=test.dmp
logfile=test1.log
directory=TEST
exclude=TABLE:"IN ('EMP_TAB','DEPT')"
schemas=DBACLASS
dumpfile=test.dmp
logfile=test1.log
directory=TEST
EXCLUDE=SCHEMA:"IN ('WMSYS', 'OUTLN')"
dumpfile=FULL.dmp
logfile=full.log
directory=exp_dir
directory=DBATEST
INCLUDE=TABLE,INDEX
Directory created.
Directory created.
parfile content
dumpfile=DIR1:test_%U.dmp,
DIR2:test_%U.dmp
logfile=test.log
directory=DIR1
parallel=2
tables=raj.test
export parfile
dumpfile=test.dmp
logfile=EXPLOG:test.log
directory=SOURCE_DUMP
tables=dbatest.EMPTAB
exclude=statistics
parfile content:
dumpfile=asset_%U.dmp
logfile=asset.log
directory=VEN
parallel=32
cluster=N