Sunday, March 20, 2011

Basic DB commands

create spfile from pfile or vice versa

CREATE SPFILE [='SPFILE-NAME']FROM PFILE[='PFILE-NAME'];


----------------------------------------------------------

starting up with specific pfile or spfile

STARTUP PFILE/SPFILE=$ORACLE_HOME/dbs/initdb01.ora

---------------------------------------------------------
to change the state from nomount to mount

alter database mount;

---------------------------------------------------------

to open the database as read only

startup mount;


ALTER DATABASE db01 OPEN READ ONLY;

---------------------------------------------------------

to have the db in restricted mode

STARTUP RESTRICT;(if db is down)

or

ALTER SYSTEM ENABLE RESTRICTED SESSION; (if db is open)

---------------------------------------------------------

to kill the already connected user sessions

ALTER SYSTEM KILL SESSION 'integer1,integer2';

int1 is sid and int2 is serial# from v$session view

---------------------------------------------------------

enabling trace at session level

ALTER SESSION SET SQL_TRACE = TRUE (through the session itself)

dbms_system.SET_SQL_TRACE_IN_SESSION (through some other session)

--------------------------------------------------------

enabling trace at instance level

SQL_TRACE = TRUE

--------------------------------------------------------

to create database command


SQL> create database db01
logfile
GROUP 1 ('/u01/oradata/db01/log_01_db01.rdo') SIZE 15M,
GROUP 2 ('/u01/oradata/db01/log_02_db01.rdo') SIZE 15M,
GROUP 3 ('/u01/oradata/db01/log_03_db01.rdo') SIZE 15M
datafile '/u01/oradata/db01/system_01_db01.dbf' SIZE 100M
undo tablespace UNDO
datafile '/u01/oradata/db01/undo_01_db01.dbf' SIZE 40M
default temporary tablespace TEMP
tempfile '/u01/oradata/db01/temp_01_db01.dbf' SIZE 20M
extent management local uniform size 128k
character set AL32UTF8
national character set AL16UTF16
set time_zone = 'America/New_York'
;


multiplexing control file using spfile

Alter the SPFILE

SQL> ALTER SYSTEM SET control files =
'$HOME/ORADATA/u01/ctrl01.ctl',
'$HOME/ORADATA/u02/ctrl02.ctl' SCOPE=SPFILE;

Shutdown normal:

Create additional control files
cp $HOME/ORADATA/u01/ctrl01.ctl
$HOME/ORADATA/u02/ctrl02.ctl

startup



---------------------------------------------------------

multiplexing control file using pfile

shutdown normal

cp control01.ctl .../DISK3/control02.ctl

CONTROL_FILES = (/DISK1/control01.ctl,/DISK3/control02.ctl)

startup

---------------------------------------------------------

Checkpoints can be forced

Setting FAST_START_MTTR_TARGET parameter

ALTER SYSTEM CHECKPOINT;

---------------------------------------------------------

to force log switch

ALTER SYSTEM SWITCH LOGFILE;

---------------------------------------------------------

Adding Online Redo Log Groups

ALTER DATABASE ADD LOGFILE GROUP 3
('$HOME/ORADATA/u01/log3a.rdo',
'$HOME/ORADATA/u02/log3b.rdo')
SIZE 1M;
---------------------------------------------------------

Adding Online Redo Log Members

ALTER DATABASE ADD LOGFILE MEMBER
'$HOME/ORADATA/u04/log1c.rdo' TO GROUP 1,
'$HOME/ORADATA/u04/log2c.rdo' TO GROUP 2,
'$HOME/ORADATA/u04/log3c.rdo' TO GROUP 3;

---------------------------------------------------------

Dropping Online Redo Log Groups

ALTER DATABASE DROP LOGFILE GROUP 3;

--------------------------------------------------------

Dropping Online Redo Log Members

ALTER DATABASE DROP LOGFILE MEMBER '$HOME/ORADATA/u04/log3c.rdo';

--------------------------------------------------------

Clearing online redo log files:

ALTER DATABASE CLEAR [unarchived] LOGFILE '$HOME/ORADATA/u01/log2a.rdo';

---------------------------------------------------------

to see logfile groups

select * from v$log;

---------------------------------------------------------

to see logfile members

select * from v$logmembers;

---------------------------------------------------------

to create tablepsace

CREATE TABLESPACE userdata
DATAFILE '/u01/oradata/userdata01.dbf' SIZE 100M
AUTOEXTEND ON NEXT 5M MAXSIZE 200M;

--------------------------------------------------------

to create locally managed tablespace

CREATE TABLESPACE userdata
DATAFILE '/u01/oradata/userdata01.dbf' SIZE 500M
EXTENT MANAGEMENT LOCAL UNIFORM SIZE 256K;

---------------------------------------------------------

to create dictionary managed tablespace

CREATE TABLESPACE userdata
DATAFILE '/u01/oradata/userdata01.dbf' SIZE 500M
EXTENT MANAGEMENT DICTIONARY
DEFAULT STORAGE ( initial 1M NEXT 1M );

---------------------------------------------------------

changing default storage setting

ALTER TABLESPACE userdata MINIMUM EXTENT 2M;

ALTER TABLESPACE userdata DEFAULT STORAGE (INITIAL 2M NEXT 2M MAXEXTENTS 999 );

---------------------------------------------------------

to create undo tablespace

CREATE UNDO TABLESPACE undo1 DATAFILE '/u01/oradata/undo101.dbf' SIZE 40M;

---------------------------------------------------------

to create temporary tablespace

CREATE TEMPORARY TABLESPACE temp TEMPFILE '/u01/oradata/temp01.dbf' SIZE 500M EXTENT MANAGEMENT LOCAL UNIFORM SIZE 10M;

----------------------------------------------------------

to change default temporary tablespace

alter database default temporary tablespace TEMP1;

-----------------------------------------------------------

to check which is the default temporary tablespace

select * from database_properties where
PROPERTY_NAME='DEFAULT_TEMP_TABLESPACE';

-----------------------------------------------------------

to drop temporary tablespace

drop tablespace TEMP1 including contents and datafiles;

-----------------------------------------------------------

To take a tablespace offline:

ALTER TABLESPACE userdata OFFLINE;

ALTER TABLESPACE userdata ONLINE;

-----------------------------------------------------------

to take a tablespace read only

ALTER TABLESPACE userdata READ ONLY;

----------------------------------------------------------

dropping tablespace

alter tablespace userdata offline;

and then

DROP TABLESPACE userdata INCLUDING CONTENTS AND DATAFILES [cascade constraints];

-----------------------------------------------------------

to enable autoextend

ALTER DATABASE DATAFILE '/u01/oradata/userdata02.dbf' SIZE 200M AUTOEXTEND ON NEXT 10M MAXSIZE 500M;

------------------------------------------------------------

to resize a datafile in a tablespace

ALTER DATABASE DATAFILE '/gp09/u01/oradata/arwsoap/users01.dbf' RESIZE 650M;

------------------------------------------------------------

adding a datafile to tablespace

ALTER TABLESPACE app_data ADD DATAFILE '/u01/oradata/userdata03.dbf' SIZE 200M;

-------------------------------------------------------------

moving a datafile while db is open

The tablespace must be offline.
The target data files must exist.
ALTER TABLESPACE userdata RENAME DATAFILE '/u01/oradata/userdata01.dbf' TO '/u01/oradata/userdata01.dbf';

-------------------------------------------------------------

moving a datafile when db is not open but mounted

The database must be mounted.
The target data file must exist.
ALTER DATABASE RENAME FILE '/u01/oradata/system01.dbf'TO '/u03/oradata/system01.dbf';


-------------------------------------------------------------
to see list of tablespaces
select * from v$tablespace;
select * from dba_tablespaces;

-------------------------------------------------------------
to see the list of datafiles

select * from v$datafile;
select * from dba_data_files;
--------------------------------------------------------------

to see list of temporary files

select * from dba_temp_files;
select * from v$tempfile;

---------------------------------------------------------------

enabling auto segment space management
CREATE TABLESPACE data02
DATAFILE ‘/u01/oradata/data02.dbf’ SIZE 5M
EXTENT MANAGEMENT LOCAL UNIFORM SIZE 64K
SEGMENT SPACE MANAGEMENT AUTO;


--------------------------------------------------------------
to see list of segments

select * from dba_segments;

--------------------------------------------------------------

to see list of extents

select * from dba_extents;

--------------------------------------------------------------

to get info about free space

select * from dba_free_space;

---------------------------------------------------------------


request read consistency for a read-only transaction

SET TRANSACTION READ ONLY;

----------------------------------------------------------------

request read consistency for a transaction performing DML

SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;

----------------------------------------------------------------

to set undo tablespace

ALTER SYSTEM SET undo_tablespace = UNDOTBS;

----------------------------------------------------------------


setting undo tablespace while creating db

CREATE DATABASE db01
. . .
UNDO TABLESPACE undo1 DATAFILE 'undo1db01.dbf'
SIZE 20M AUTOEXTEND ON

---------------------------------------------------------------
creating undo tablespace

CREATE UNDO TABLESPACE undo1
DATAFILE 'undo1db01.dbf' SIZE 20M;

----------------------------------------------------------------

dropping undo tablespace

DROP TABLESPACE UNDOTBS2;

----------------------------------------------------------------

creating table

CREATE TABLE hr.departments(
department_id NUMBER(4),
department_name VARCHAR2(30),
manager_id NUMBER(6)
location_id NUMBER(4))
STORAGE(INITIAL 200K NEXT 200K
PCTINCREASE 0 MINEXTENTS 1 MAXEXTENTS 5)
TABLESPACE data;


-------------------------------------------------------------------
create global temporary tables

CREATE GLOBAL TEMPORARY TABLE
hr.employees_temp
AS SELECT * FROM hr.employees;

ON COMMIT DELETE ROWS to specify that rows are only visible within the
transaction
• ON COMMIT PRESERVE ROWS to specify that rows are visible for the entire session

-----------------------------------------------------------------

changing storage parameters of table

ALTER TABLE hr.employees
PCTFREE 30
PCTUSED 50
STORAGE(NEXT 500K
MINEXTENTS 2
MAXEXTENTS 100);

------------------------------------------------------------------

Manually Allocating Extents

ALTER TABLE hr.employees ALLOCATE EXTENT(SIZE 500K DATAFILE ‘/DISK3/DATA01.DBF’);

-----------------------------------------------------------------

moving table from one tablespace to other

ALTER TABLE hr.employees MOVE TABLESPACE data1;

-------------------------------------------------------------------
ANALYZING TABLE TO POPULATE STATISTICS

analyze table tablename estimate statistics;

--------------------------------------------------------------------------------------------

truncating table

TRUNCATE TABLE hr.employees;

-------------------------------------------------------------------

drop a table

DROP TABLE hr.department CASCADE CONSTRAINTS;

------------------------------------------------------------------

Removing a column from a table:

ALTER TABLE hr.employees DROP COLUMN comments CASCADE CONSTRAINTS CHECKPOINT 1000;

Use the following statement to resume an interrupted drop operation:

ALTER TABLE hr.employees DROP COLUMNS CONTINUE;


--------------------------------------------------------------------

Mark a column as unused

ALTER TABLE hr.employees SET UNUSED COLUMN comments CASCADE CONSTRAINTS;

--------------------------------------------------------------------

Drop unused columns

ALTER TABLE hr.employees DROP UNUSED COLUMNS CHECKPOINT 1000;

--------------------------------------------------------------------

Continue to drop column operation

ALTER TABLE hr.employees DROP COLUMNS CONTINUE CHECKPOINT 1000;

----------------------------------------------------------------------

identifying tables with unused columns

SELECT * FROM dba_unused_col_tabs;

-----------------------------------------------------------------------

Creating Normal B-Tree Indexes

CREATE INDEX hr.employees_last_name_idx
ON hr.employees(last_name)
PCTFREE 30
STORAGE(INITIAL 200K NEXT 200K
PCTINCREASE 0 MAXEXTENTS 50)
TABLESPACE indx;


------------------------------------------------------------------------

Creating Bitmap Indexes

CREATE BITMAP INDEX orders_region_id_idx
ON orders(region_id)
PCTFREE 30
STORAGE(INITIAL 200K NEXT 200K
PCTINCREASE 0 MAXEXTENTS 50)
TABLESPACE indx;


------------------------------------------------------------------------


Changing Storage Parameters for Indexes


ALTER INDEX wms.WMS_SELECTION_CRITERIA_TXN_PK1 STORAGE(maxextents unlimited);

change the maxextents of the table itself if u primary ker error

ORA-25176: storage specification not permitted for primary key
------------------------------------------------------------------------

Allocating and Deallocating Index Space

ALTER INDEX orders_region_id_idx
ALLOCATE EXTENT (SIZE 200K
DATAFILE ‘/DISK6/indx01.dbf’);

ALTER INDEX orders_id_idx
DEALLOCATE UNUSED;

-------------------------------------------------------------------------

Rebuilding Indexes

ALTER INDEX orders_region_id_idx REBUILD
TABLESPACE indx02;

for minimal table locking

ALTER INDEX orders_id_idx REBUILD ONLINE;

--------------------------------------------------------------------------

Coalescing Indexes

ALTER INDEX orders_id_idx COALESCE;

-------------------------------------------------------------------------

Checking Index Validity

ANALYZE INDEX orders_region_id_idx
VALIDATE STRUCTURE;

-------------------------------------------------------------------------
Dropping Indexes

DROP INDEX hr.deptartments_name_idx;

-------------------------------------------------------------------------

Identifying Unused Indexes

ALTER INDEX summit.orders_id_idx
MONITORING USAGE

ALTER INDEX summit.orders_id_idx
NOMONITORING USAGE

--------------------------------------------------------------------------

information on the columns indexed

select * from DBA_IND_COLUMNS;
-------------------------------------------------------------------------

information on function based indexes

select * from DBA_IND_EXPRESSIONS;

--------------------------------------------------------------------------


to check object usage

select * from V$OBJECT_USAGE;

--------------------------------------------------------------------------

Defining Constraints While Creating a Table

CREATE TABLE hr.employee(
id NUMBER(7)
CONSTRAINT employee_id_pk PRIMARY KEY
DEFERRABLE
USING INDEX
STORAGE(INITIAL 100K NEXT 100K)
TABLESPACE indx,
last_name VARCHAR2(25)
CONSTRAINT employee_last_name_nn NOT NULL,
dept_id NUMBER(7))
TABLESPACE users;

--------------------------------------------------------------------------

Enabling Constraints

ALTER TABLE hr.departments
ENABLE VALIDATE/NOVALIDATE CONSTRAINT dept_pk;

--------------------------------------------------------------------------

TO INF BAOUT CONSTRAINTS

select * from DBA_CONSTRAINTS;

select * from dba_cons_columns;

----------------------------------------------------------------------------

alter default profile

ALTER PROFILE DEFAULT LIMIT
PASSWORD_LIFE_TIME 60
PASSWORD_GRACE_TIME 10
PASSWORD_REUSE_TIME 1800
PASSWORD_REUSE_MAX UNLIMITED
FAILED_LOGIN_ATTEMPTS 3
PASSWORD_LOCK_TIME 1/1440
PASSWORD_VERIFY_FUNCTION verify_function;

----------------------------------------------------------------------------

creating profile

CREATE PROFILE grace_5 LIMIT
FAILED_LOGIN_ATTEMPTS 3
PASSWORD_LOCK_TIME UNLIMITED
PASSWORD_LIFE_TIME 30
PASSWORD_REUSE_TIME 30
PASSWORD_VERIFY_FUNCTION verify_function
PASSWORD_GRACE_TIME 5;

-----------------------------------------------------------------------------

dropping profile

DROP PROFILE developer_prof;

DROP PROFILE developer_prof CASCADE;

------------------------------------------------------------------------------


Enabling Resource Limits

ALTER SYSTEM SET RESOURCE_LIMIT=TRUE;

-----------------------------------------------------------------------------

to see profile

select * from dba_profiles;

-----------------------------------------------------------------------------

creating user

CREATE USER aaron
IDENTIFIED BY soccer
DEFAULT TABLESPACE data
TEMPORARY TABLESPACE temp
QUOTA 15m ON data
PASSWORD EXPIRE;

------------------------------------------------------------------------------

Changing User Quota on Tablespace

ALTER USER aaron
QUOTA 0 ON USERS;

--------------------------------------------------------------------------------

Dropping a User

DROP USER aaron;

DROP USER aaron CASCADE;(if user is connected)

--------------------------------------------------------------------------------

to see tablespace quotas

select * from DBA_TS_QUOTAS;

-------------------------------------------------------------------------------

Granting System Privileges

GRANT CREATE SESSION TO emi;

GRANT CREATE SESSION TO emi WITH ADMIN OPTION;

lists of system privs

INDEX CREATE ANY INDEX
ALTER ANY INDEX
DROP ANY INDEX
TABLE CREATE TABLE
CREATE ANY TABLE
ALTER ANY TABLE
DROP ANY TABLE
SELECT ANY TABLE
UPDATE ANY TABLE
DELETE ANY TABLE
SESSION CREATE SESSION
ALTER SESSION
RESTRICTED SESSION
TABLESPACE CREATE TABLESPACE
ALTER TABLESPACE
DROP TABLESPACE
UNLIMITED TABLESPACE
-------------------------------------------------------------------------------


Revoking System Privileges

REVOKE CREATE TABLE FROM emi;

-------------------------------------------------------------------------------

Granting Object Privileges

GRANT EXECUTE ON dbms_output TO jeff;

GRANT UPDATE ON emi.customers TO jeff WITH
GRANT OPTION;

---------------------------------------------------------------------------------

Revoking Object Privileges

REVOKE SELECT ON emi.orders FROM jeff [cascade constraints];

---------------------------------------------------------------------------------
lists system privileges granted to users and roles

select * from DBA_SYS_PRIVS;

----------------------------------------------------------------------------------

lists the privileges that are currently available to the user


select * from SESSION_PRIVS;
-----------------------------------------------------------------------------------

lists all grants on all objects in the database

select * from DBA_TAB_PRIVS;

----------------------------------------------------------------------------------

describes all object column grants in the database.

seelct * from DBA_COL_PRIVS;

-----------------------------------------------------------------------------------

Protecting the Audit Trail

AUDIT delete ON sys.aud$ BY ACCESS;

To protect the audit trail from unauthorized deletions, only the DBA should have the
DELETE_CATALOG_ROLE role.

-----------------------------------------------------------------------------------

enabling audit trail

AUDIT_TRAIL=true in init file

-------------------------------------------------------------------------------------

Statement auditing

AUDIT TABLE;

--------------------------------------------------------------------------------------

Privilege auditing

AUDIT create any trigger;

--------------------------------------------------------------------------------------

Schema object auditing

AUDIT SELECT ON emi.orders;

-------------------------------------------------------------------------------------

to see Default audit options

select * from ALL_DEF_AUDIT_OPTS;

---------------------------------------------------------------------------------------

to see Statement auditing options

select * from DBA_STMT_AUDIT_OPTS;

--------------------------------------------------------------------------------------

to see Privilege auditing options

select * from DBA_PRIV_AUDIT_OPTS;

----------------------------------------------------------------------------------------

to see Schema object auditing options

select * from DBA_OBJ_AUDIT_OPTS;


--------------------------------------------------------------------------------------

more audit views

DBA_AUDIT_TRAIL All audit trail entries
DBA_AUDIT_EXISTS Records for AUDIT EXISTS/NOT EXISTS
DBA_AUDIT_OBJECT Records concerning schema objects
DBA_AUDIT_SESSION All connect and disconnect entries
DBA_AUDIT_STATEMENT Statement auditing records

---------------------------------------------------------------------------------------

Creating Roles

CREATE ROLE oe_clerk;

CREATE ROLE hr_clerk
IDENTIFIED BY bonus;

CREATE ROLE hr_manager
IDENTIFIED EXTERNALLY;

-----------------------------------------------------------------------------------------

Predefined Roles

DELETE_CATALOG_ROLE DELETE privileges on
data dictionary tables

EXECUTE_CATALOG_ROLE EXECUTE privilege on
data dictionary packages

----------------------------------------------------------------------------------------

Assigning Roles

GRANT oe_clerk TO scott;

GRANT hr_clerk TO hr_manager;

GRANT hr_manager TO scott WITH ADMIN
OPTION;

-----------------------------------------------------------------------------------------

Establishing Default Roles

ALTER USER scott
DEFAULT ROLE hr_clerk, oe_clerk;

ALTER USER scott DEFAULT ROLE ALL;

ALTER USER scott DEFAULT ROLE ALL EXCEPT
hr_clerk;

ALTER USER scott DEFAULT ROLE NONE;

-------------------------------------------------------------------------------------------


Enabling and Disabling Roles

SET ROLE hr_clerk;

SET ROLE oe_clerk IDENTIFIED BY order;

SET ROLE ALL EXCEPT oe_clerk;

------------------------------------------------------------------------------------------

Removing Roles from Users

REVOKE oe_clerk FROM scott;

REVOKE hr_manager FROM PUBLIC;

-------------------------------------------------------------------------------------------
Removing Roles


DROP ROLE hr_manager;

-------------------------------------------------------------------------------------------

views related to roles


DBA_ROLES All roles that exist in the database
DBA_ROLE_PRIVS Roles granted to users and roles
ROLE_ROLE_PRIVS Roles that are granted to roles
DBA_SYS_PRIVS System privileges granted to users
and roles
ROLE_SYS_PRIVS System privileges granted to roles
ROLE_TAB_PRIVS Object privileges granted to roles
SESSION_ROLES Roles that the user currently has
enabled

--------------------------------------------------------------------------------------------
to make max extents unlimietd

alter table
modify lob ()
(storage (maxextents unlimited) );


alter table ZPB.AW$ZPBDATA44 modify lob (AWLOB) storage(maxextents unlimited);


--------------------------------------------------------------------------------------------


enabling trace option2
'

Enable SQL*Trace by choosing Help > Diagnostics > Trace > Trace with binds
Disable SQL*Trace by choosing Help > Diagnostics > Trace > Trace off


enabling trace option2

a. Please deliver a complete trace file created in the following manner:

First enable trace for a specific user:
- Bring up the Core application - System Administrator responsibility
- Move to Profile/System
- Check off the USER box - and enter your username that you are using when getting the error
- Search on the following profile option - 'Initialization SQL Statement - Custom'

Please set this at the user level with the following string:
BEGIN FND_CTL.FND_SESS_CTL('','', '', 'TRUE','','ALTER SESSION SET
TRACEFILE_IDENTIFIER='||''''||'5447333.993' ||''''||' EVENTS ='||''''||' 10046 TRACE NAME CONTEXT
FOREVER, LEVEL 12 '||''''); END;
(Cut and paste the above sql statement against the initialization sql - Custom Profile at the
User Level )

This must be one complete line of text - so may be best to copy the string into notepad prior to
putting it into the profile.

b. Once the profile is set, please change responsibilities and directly go and reproduce the
problem.

To locate the trace file, use the following sql in sqlplus:
select name, value from v$parameter where name like 'user_dump_dest';
- The value is the location of the trace file on the database server
- Please move to this directory and search for the file having 5447333.993 in the filename
- This is the trace file created - please tkprof and upload BOTH the raw and tkprof trace file to
metalink.

c. Ensure that the profile option is unset before exiting the Application.
This is a very important step. If this is not unset, unnecessary trace files will be generated.

3)customer should normally work with the instance till the issue happens again and the the
customer updates the SR.Once the issue is reproduced and SR has been update by the customer,
OnDemand Operations should provide the trace/audit info that allows support to identify which
process is performing the DDL on HZ_CUST_ACCT_SITES_ALL

--------------------------------------------------------------------------------------------


TO CHANGE SCHEDULE OF DBA_JOBS

exec dbms_job.change(job=>8687,WHAT => NULL,NEXT_DATE => TO_DATE('12-SEP-2006 18:00','DD-MON-YYYY HH24:MI'),INTERVAL => 'TRUNC(SYSDATE) + 18/24 + 1');

--------------------------------------------------------------------------------------------



to refresh materialzed view

alter materialized view MWAC_AR_CUSTOMER_PROFILE_MV refresh complete;

--------------------------------------------------------------------------------------------


to know status of ias servives and version


bash-2.05$ opmnctl status

Processes in Instance: auohssgds56_TSGD2I.auohssgds56.oracleoutsourcing.com
-------------------+--------------------+---------+---------
ias-component | process-type | pid | status
-------------------+--------------------+---------+---------
LogLoader | logloaderd | N/A | Down
dcm-daemon | dcm-daemon | N/A | Down
WebCache | WebCache | 8127 | Alive
WebCache | WebCacheAdmin | 18888 | Alive
OC4J | home | 18889 | Alive
OC4J | OC4J_BI_Forms | 18890 | Alive
HTTP_Server | HTTP_Server | 18891 | Alive
Discoverer | ServicesStatus | 18893 | Alive
Discoverer | PreferenceServer | 18899 | Alive
DSA | DSA | N/A | Down

bash-2.05$


--------------------------------------------------------------------------------------------


TO PUT A TABLE OR INDEX IN A PARTICULAR POOL

create index indexname
storage (buffer_pool keep...);

alter index indexname
storage (buffer_pool recycle);

--------------------------------------------------------------------------------------------

EXPORT OPTIONS



exp system/manager full=y log=exp_full_091806.log file=/dev/null volsize=0

exp system/change_on_install log=C:\exp_system.log file=C:\epxort_system.log user=system



---------------------------------------------------------------------------------------------
TO CHECK FORMS IS IN SERVLET OR SOCKET MODE

grep -i serverurl $FORMS60_WEB_CONFIG_FILE


IS EMPTY THEN SOCKET
IF SERVLERS COME THEN SERVLET

-----------------------------------------------------------------------------------


to check size of allocated shared memory segments on OS level

ipcs -m -p -a

-----------------------------------------------------------------------------------

to check a port is being used

netstat –a | grep 10015



Oracle Application Object Library - Version: 11.5.8 to 11.5.10
Information in this document applies to any platform.
GoalThis is a guide to extract all the information you want to know in the following scenarios :


1. You have identified a process PID ( Process Identifier ) in Operating System which is consuming lot of CPU/Memory.
And you want to relate it to some one from FND_USER. Or You are interested in finding out exactly to which
Database SID (Session Identifier ) this process is connected & exactly what is going on there.

2. You want to list all f60webmx process and want to know what they are doing, and/or which user it belongs to

since you suspect an identified PID to be a run away process.

3. You want to know whether a particular Database Session (SID) is from Forms or Self-Service

Web Applications(SSWA) or something else.

An Example of a Classical Problematic Situation
========================================
You have only 1 user logged into Application and can see 2 f60webmx process at the Operating System through the following command $ netstat -a|grep
It confirms the users PC name and the user is logged into the Application.

+ However, the port numbers in the netstat command do not correspond to any identifiable process by
$ ps -ef|grep f60webmx
+ Neither of the v$process or v$session shows this process ID be found at the O/S level.
+ In Enterprise Manager there is no Operating System PID with that value

The following is a cut&paste from the command prompt

List of Forms Server processes:
Users PID Parent stt StTime Terminal TimeOn Processes
oracle 14624 1 0 05:00:35 ? 0:00 f60srvm em=myhost port=9099 mode=socket exe=f60webmx

List of Thin Client processes:
oracle 23806 14624 25 08:52:17 ? 107:40 f60webmx webfile=5,9,myhost

In the above the parent is PID 14624, the child one is PID 23806, which has used over 107 minutes and most of a CPU.

SolutionFirst of all check is the process still consuming CPU, and if so, is it a runaway process? Is it hogging a whole CPU?
For example using `top` on a 4 processor box, 25% CPU utilization means it's a runaway process because the single threaded f60webmx process can only run on 1 CPU at a time. The following command will help you identify the culprit PIDs : $ps -aef | grep f60webmx | cut -c 10-20,48- | tee proc.txt

Now Start by using the following queries to try and identify the end user, find out what they are doing.

#1. select user_name, fnd.user_id from fnd_user fnd, icx_sessions icx
where icx.user_id = fnd.user_id and icx.last_connect > sysdate - 1/24 and icx.user_id != '-1';

#2. Check the last wait event from the SQL in the following example :
The first output shows that one user from machine 100.0.0.1 is connected to Apps and is using forms.
The 2ND one shows corresponding to the OS f60webmx PID 19721 what activities are going on.

$ ps -ef | grep f60webmx
applmgr 14940 1 0 Aug26 ? 00:00:00 f60srvm em myhost_9000_PROD port 9000 mode socket exe f60webmx
applmgr 14942 14940 0 Aug26 ? 00:00:00 f60webmx webfile=5,0,myhost_9000_PROD
applmgr 14968 1 0 Aug26 ? 00:00:00 d2lc60 myhost 9100 0 1 f60webmx
applmgr 19721 19720 0 07:24 ? 00:00:00 /oracle/prodappl/fnd/11.5.0/bin/f60webmx server webfile=HTTP-0,0,0,100.0.0.1
applmgr 20059 18060 0 07:27 pts/6 00:00:00 grep f60webmx

SQL> column userinfo heading "ORACLE/OS User" format a25
SQL> column terminal heading "Term" format a6
SQL> column process heading "Parent|Process ID" format a10
SQL> column "sid (audsid)" format a20
SQL> column spid heading "Shadow|Process ID" format a10
SQL> column event heading "Waiting" format a30
SQL> select s.username||' '||s.osuser||' ('||decode(s.terminal,'unknown','?','UNKNOWN','?',s.terminal) ||')' userinfo,
s.sid||','|| s.serial# ||' ('|| s.audsid ||')' "sid (audsid)", p.spid, w.event , w.p1 from
v$session s, v$process p ,v$session_wait w
where p.addr = s.paddr and s.sid = w.sid and s.process = '&form_process_id' ;

Enter value for form_process_id: 19721
old 5: and s.process = '&form_process_id'
new 5: and s.process = '19721'

Shadow
ORACLE/OS User sid (audsid) Process ID
------------------------- -------------------- ----------
Waiting P1
------------------------------ ----------
APPS applmgr () 22,21485 (615657) 27872
SQL*Net message from client 1952673792

Note down the value of "Shadow|Process ID" from the output.
Now Run
SQL> select user_name from fnd_user where user_id in ( select user_id
from fnd_logins where process_SPID='&shadow_process_id');

For repetitive purpose use the following stored procedure to create a table to store the os pid, the user_id & user_name
from fnd_user, and the shadow spid. with the following code.

Create Table:
===============
execute immediate 'create table OS_TRANSLATE_APPS_USERS
( MY_OS_PID varchar2(12), MY_USER_ID number(15), MY_USER_NAME varchar2(40), MY_SHADOW_PID varchar2(15))';

Load Table:
=============
insert into os_translate_apps_users
( select distinct s.process, fu.user_id, fu.user_name, p.spid
from v$session s, v$process p ,v$session_wait w, fnd_user fu
where p.addr = s.paddr and
s.sid = w.sid and
fu.user_id != 0 and
fu.user_id in
( select user_id from fnd_logins where process_SPID = p.spid))

#3. Check If the DB session is already present or the process may be just spinning.
#4. Also check what was the last sql executed by that session.

If you are having trouble tracing it back to the end user, and in a big global company it may not be easy,
you can try the following to get more diagnostic info -

1) Use` lsof ` to get a list of open files for the f60webmx process. That will tell what .fmx's it has open.
If all spinning processes have the same form open, it narrows down the problem a bit.

2) Terminate the process with a core file using `kill -3 `.
To start with, keep the signal handler enabled - FORMS60_CATCHTERM=1 or undefined (the default).
Then you should get an f60webmx_dump file which will tell the current form, trigger etc, as well as the stack trace.
Repeat for several spinning processes until a pattern emerges.

3) Repeat step 2), but with the signal handler disabled, i.e. FORMS60_CATCHTERM=0.
Now you should just get a core file, rather than the f60webmx_dump file generated by the signal handler.
Follow Note 1812.1 to extract the stack trace. Sometimes the stack trace generated by the signal handler and written to f60webmx_dump is not accurate, so manually extracting from the core file will give a more reliable stack.

For 2) and 3), f60webmx must be relinked with link_debug=y, otherwise the debug symbols are stripped and the stack trace just gives up the memory address.

Some Preventive Measures
====================
It is possible that users are not shutting down the Forms Applications at the end of the day. In that case take advantage of
forms60_timeout parameter , in conjunction with heartbeat parameter.
Refer to Note:269884.1 on appropriate setting for your instance.

Note: When the f60webmx process is terminated, the client process is not notified, and therefore no message is displayed on the client side. For the user, it appears that their session is hung. The user must close all browser windows to restart the
application.
Refer to Note:16728.1 - Monitoring Session IDLE_TIMES.

Keep in mind that each f60webmx process may have multiple database sessions, but 1 database process.
The actual f60webmx process should consume very little CPU while waiting for a long running query when running in blocking mode. Apply the cancel query patches Patch:3407944 & Patch:2974236
- they have fixed all the issues at the last couple of sites.
With cancel query enabled (i.e. non-blocking mode) it will consume a bit more CPU because of the client polling mechanism, but you should still only be talking about a few of seconds of CPU time for a query that takes several minutes. It was a lot worse prior to the fix for Internal Bug:2588453 in Forms 6.0.8.23, but that still would not account for anything like 107 minutes. If you really want to use cancel query you should be on a minimum of Forms 6.0.8.23.

Finally for the 3rd point described in the Goal section You can use
Note:233871.1 : Oracle Application Object Library Active Users Data Collection Test
As an alternative, if you do not want to run the whole script , you can pick out the individual
queries to list forms users , SSWA users , and concurrent users.

SSWA Users :
============
('SELECT (select user_name from fnd_user u where u.USER_ID=i.user_id) "User Name",
(select description from fnd_user u where u.USER_ID=i.user_id) "User Description",
decode(i.responsibility_id,NULL,''Navigator'',
(select responsibility_name from fnd_responsibility_tl r where r.responsibility_id=i.responsibility_id and
r.application_id=i.responsibility_application_id and r.language=USERENV(''LANG''))) "Responsibility",
org_id "Organization Id",
(select f.function_name from fnd_form_functions f where f.function_id=i.function_id) "Function Name",
to_char(first_connect,''DD-MON-YYYY HH24:MI'') "Login Time",
to_char(last_connect,''DD-MON-YYYY HH24:MI'') "Last Hit Time",
to_number(nvl(FND_PROFILE.VALUE (''ICX_SESSION_TIMEOUT''),limit_time*60)) "Session TimeOut(in Min)" ,
nls_date_language "NLS Date Language", nls_numeric_characters "NLS Numeric Char",
nls_sort "NLS Sort", nls_territory "NLS Territory", counter "No of Hits Made",limit_connects "No of Hits Allowed"
FROM icx_sessions i WHERE disabled_flag=''N'' AND ((last_connect + decode(FND_PROFILE.VALUE(''ICX_SESSION_TIMEOUT''),NULL,limit_time,
FND_PROFILE.VALUE(''ICX_SESSION_TIMEOUT'')/60)/24) > sysdate AND COUNTER < LIMIT_CONNECTS)
ORDER BY 1,last_connect', 'Active Self Service Web Application Users','Y','Y',NULL) ;

Forms Users :
=============
('SELECT usr.user_name "User Name", usr.description "User Description",
( select rtl.RESPONSIBILITY_NAME from applsys.fnd_responsibility_tl rtl
where rtl.responsibility_id=r.responsibility_id and rtl.application_id=r.resp_appl_id and
rtl.language=USERENV(''LANG'')) "Responsibility Name",
decode(vsess.module, NULL, ''FNDSCSGN-Navigator'',vsess.module || '' - '' ||
(select ft.user_form_name from applsys.fnd_login_resp_forms f, applsys.fnd_form_tl ft where f.audsid = vsess.audsid
and f.form_id=ft.form_id and f.FORM_APPL_ID=ft.application_id and ft.LANGUAGE=USERENV(''LANG'') and f.start_time=
(select max(start_time) from applsys.fnd_login_resp_forms where audsid=f.audsid))) "Form Name",
decode(vsess.module, NULL, ''Application Object Library'',
(select fa.application_name from applsys.fnd_application_tl fa, applsys.fnd_login_resp_forms f where
f.audsid = vsess.audsid and fa.application_id=f.FORM_APPL_ID and fa.language=USERENV(''LANG'') and
f.start_time=(select max(start_time) from applsys.fnd_login_resp_forms where audsid=f.audsid))) "Application Name",
to_char(l.start_time, ''DD-MON-YYYY HH24:MI'') "Start Time",
vsess.osuser "OS Username", vsess.process "F60WEBMX PID",
to_number(vproc.spid) "OS Oracle PID", vsess.sid "Oracle Session Id",
vsess.serial# "V$SESSION Serial #", vproc.pid "Oracle PID",
substr(vsess.machine,1,64) "Machine Name",
round(wait.seconds_in_wait/60,2) "Wait Minute", vsess.audsid AUDSID
FROM applsys.fnd_logins l, applsys.fnd_user usr, v$process vproc, v$session vsess,
V$SESSION_WAIT wait, applsys.fnd_login_responsibilities r
WHERE l.user_id = usr.user_id AND vproc.addr = vsess.paddr AND l.spid = vsess.process
AND wait.sid=vsess.sid AND l.pid = vproc.pid AND ( l.pid, l.start_time )
( SELECT pid, MAX(start_time) FROM fnd_logins WHERE end_time IS NULL AND terminal_id !=''Concurrent'' GROUP BY pid ) AND r.login_id=l.login_id AND r.end_time is NULL
ORDER BY 1,vproc.pid','Users Logged Into Forms', 'Y','Y',null);

Concurrent Users :
================
'SELECT ( SELECT SUBSTR ( fu.user_name, 1, 20 ) FROM fnd_user fu WHERE fu.user_id = cr.requested_by )
"User_Name", TO_CHAR ( NVL ( cr.actual_start_date, cr.requested_start_date ),''DD-MON-YYYY HH24:MI'' ) "Start_Time", cr.request_id, cr.oracle_process_id spid, vp.pid pid, to_number(cr.os_process_id) "OS Process Id",
cr.oracle_session_id AUDSID, vs.sid sid, vs.serial# serial#,
( SELECT SUBSTR ( cp.concurrent_program_name || '' - '' || cpl.user_concurrent_program_name, 1, 40 )
FROM fnd_concurrent_programs cp, fnd_concurrent_programs_tl cpl
WHERE cp.application_id = cr.program_application_id
AND cp.concurrent_program_id = cr.concurrent_program_id
AND cpl.application_id = cr.program_application_id
AND cpl.concurrent_program_id = cr.concurrent_program_id
AND cpl.language= USERENV(''LANG'')) "Concurrent_Program"
FROM fnd_concurrent_requests cr, v$process vp, v$session vs
WHERE cr.phase_code NOT IN (''I'',''C'') AND cr.status_code NOT IN (''U'',''X'',''D'',''E'',''I'',''C'')
AND cr.oracle_process_id = vp.spid(+) AND cr.oracle_session_id = vs.audsid(+)
ORDER BY 1,2,3';




==========================================================================================

Ideally
the UNDO_RETENTION parameter setting should be around the value of the longest running query in
the database. This can be got
from the query:
SQL> select max(maxquerylen) from v$undostat;

If the output of this query is more or less equal to the value of UNDO_RETENTION parameter,
then no issues. If its too less then
the UNDO_RETENTION needs to be increased. Along with that increase we should note whether
sufficient space is available in the
UNDO TABLESPACE which can be derived from the formula :

Select ((( UR * ( UPS * DBS)) + (DBS * 24))/1024)/1024 AS MB
FROM
(SELECT VALUE AS UR FROM V$PARAMETER WHERE NAME='undo_retention'),
(Select (sum(undoblks)/SUM(((end_time-begin_time)*86400))) as UPS from v$undostat),
(Select value as DBS from v$parameter where name='db_block_size');


==========================================================================================

flash abck query

Flashback Query Feature in Oracle9i:

CHECK LIST:
- Database Should be in Auto Undo Managemennt mode.
- Run the c:\oracle\ora92\rdbms\rdbmstran.ssql Script in Sys User.
SQL>@c:\oracle\ora92\rdbms\rdbmstran.sql
- Grant the EXECUTE privilege on DBMS_FLASHHBACK to the Particular User
SQL>grant execute on rdbms.flashback to bala

Connect to the Particular User(bala):

8.00 pm SQL>conn bala/bala

Do Some DML Operations and Commit the Transaction:

8.01 pm SQL> delete from emp where ename='KING';

1 row deleted.

8.04 pm SQL> commit;

Commit complete.

8.05 pm SQL> select ename from emp;

ENAME
----------
SMITH
ALLEN
WARD
JONES
MARTIN
BLAKE
CLARK
SCOTT
TURNER
ADAMS
JAMES

ENAME
----------
FORD
MILLER

13 rows selected.

KING is not found in this select Query Result.

Now Execute the DBMS_FLASHBACK.enable_at_time Procedure for go to Previous state of Your Database:(In this Example the Database State will be going to 5 min before from the current system time.) i.e 8:02 pm. At 8:02 pm emp table has King Record. Now you can see the King's record Even though deleted.

8:07 pm SQL> exec sys.dbms_flashback.enable_at_time(sysdate - (5/(24*60)));

PL/SQL procedure successfully completed.

SQL> select ename from emp;

ENAME
----------
SMITH
ALLEN
WARD
JONES
MARTIN
BLAKE
CLARK
SCOTT
KING
TURNER
ADAMS

ENAME
----------
JAMES
FORD
MILLER

14 rows selected.

Now You can not do any DML Operation in the current session until you disable the DBMS_FLASHBACK.

SQL> insert into emp(ename) values('bala');
insert into emp(ename) values('bala')
*
ERROR at line 1:
ORA-08182: operation not supported while in Flashback mode.

So Disable it first.

SQL> exec dbms_flashback.disable;

PL/SQL procedure successfully completed.

SQL> insert into emp(ename) values('bala');
1 row inserted.

By,
Balakumar N,
balakumar@qsoftindia.com
http://www.balakumar.tk/
Write Your Comments in my GUEST BOOK.


==============================================================

pa checklist chnage temp tables action

Change datafile properties make maxsize 1800M and Autoextend on for:
=======================================================================
SQL> select FILE_NAME,TABLESPACE_NAME,BYTES/(1024*1024)
"Size",MAXBYTES/(1024*1024) "Maxsize",AUTOEXTENSIBLE from dba_temp_files;

FILE_NAME
--------------------------------------------------------------------------------
TABLESPACE_NAME Size Maxsize AUT
------------------------------ ---------- ---------- ---
/pnexti/oradata/data02/temp01.dbf
TEMP 300 1800 YES

SQL> select TABLESPACE_NAME,INITIAL_EXTENT/(1024*1024)
"initial",NEXT_EXTENT/(1024*1024) "Next",MAX_EXTENTS "max",
PCT_INCREASE,STATUS,EXTENT_MANAGEMENT,SEGMENT_SPACE_MANAGEMENT from
dba_tablespaces where TABLESPACE_NAME='TEMP';

TABLESPACE_NAME initial Next max PCT_INCREASE
------------------------------ ---------- ---------- ---------- ------------
TEMP 2048 2048 0


SQL> select * from database_properties where
PROPERTY_NAME='DEFAULT_TEMP_TABLESPACE';

PROPERTY_NAME
------------------------------
PROPERTY_VALUE
--------------------------------------------------------------------------------
DESCRIPTION
--------------------------------------------------------------------------------
DEFAULT_TEMP_TABLESPACE
TEMP
ID of default temporary tablespace


SQL> create temporary tablespace TEMP1 tempfile
'/pnexti/oradata/data02/temp02.dbf' size 300M autoextend on maxsize
1800M extent management local uniform size 128K;

Tablespace created.

SQL> alter database default temporary tablespace TEMP1;

Database altered.

SQL> drop tablespace TEMP including contents and datafiles;

Tablespace dropped.

SQL> create temporary tablespace TEMP tempfile
'/pnexti/oradata/data02/temp01.dbf' size 300M reuse autoextend on
maxsize 1800M extent management local uniform size 128K;

Tablespace created.


alter tablespace TEMP add tempfile '/pcpusi/oradata/data02/temp39.dbf'
size 200M reuse autoextend on next 2M maxsize 1800M;



SQL> alter database default temporary tablespace TEMP;

Database altered.

SQL> drop tablespace TEMP1 including contents and datafiles;

Tablespace dropped.


=================================================================

TO CHANGE guest USERNAME PASSWWORD IN 11.5.10

java oracle.apps.fnd.security.AdminAppServer apps/XXXXX UPDATE GUEST_USER_PWD=GUEST/ DB_HOST= DB_PORT= DB_NAME=



================================================================
to check all the db patches applied to oracle home

opatch lsinventory -invPtrLoc $ORACLE_HOME/oraInst.loc


give this command in
$ORACLE_HOME/opatch



===========================================================

TO SCHEUDLED STATSPACK AS PERFTSAT

variable jobno number;
begin
dbms_job.submit(:jobno, 'statspack.snap;', trunc(sysdate,'HH')+1/24+2/1440, 'sysdate+1/24',TRUE,);
commit;
end;
/


to disbale and enable already existings schedule

exec dbms_job.broken(1648,TRUE);

to get status
col what format a30
select JOB,SCHEMA_USER,WHAT,BROKEN from dba_jobs where what like 'stats%';
[08:38] ppendurkar: select JOB,SCHEMA_USER,WHAT,BROKEN from dba_jobs where what like 'stats%';

---------------------------------------------

TO GET DB SESSION FROM OS PROCESS ID

col username format a10
col machine format a10
col module format a10
set lines 120
select sid,spid,sql_address,a.username,a.status,machine,module,last_call_et,sysdate from v$session a,v$process b
where a.paddr=b.addr and spid='os process id';




IOT's and overflow

TIP: Click help for a detailed explanation of this page.
Bookmark Go to End

Subject: Index Organized Tables (IOTs)
Doc ID: Note:50874.1 Type: BULLETIN
Last Revision Date: 16-NOV-2001 Status: PUBLISHED


Contents
1. Overview
2. Syntax
3. Segments created
4. Storage
5. DML & DDL
6. Locking
7. Optimizer behaviour
8. Parallel considerations
9. Analyze
10. Data dictionary views
11. Loading data into an IOT
12. Application uses of IOTs
13. References & further reading


1. Overview

Index Organized Tables (IOTs) are tables that keep data sorted on the
primary key. They differ from other Oracle table structures in that
Oracle maintains the table rows in a B*tree index built on the primary
key. However, the index row contains both the primary key column values
and the associated non-key column values for the corresponding row.

IOTs are new in Oracle8.


2. Syntax

Rather than present the railroad diagram, the syntax is probably better
demonstrated using an example:

CREATE TABLE TESTTAB_IOT (1)
(PKCOL NUMBER PRIMARY KEY, (2)
C2 VARCHAR2(500), (3)
C3 NUMBER, (4)
C4 VARCHAR2(100)) (5)
ORGANIZATION INDEX TABLESPACE USERS (6)
PCTTHRESHOLD 10 (7)
OVERFLOW TABLESPACE USERS; (8)

Line#2: The IOT primary key must be defined, otherwise the CREATE
statement will fail:

ORA-25175: no PRIMARY KEY constraint found

If the primary key consists of multiple columns, table constraint
syntax must be used. The remainder of this article assumes a
single column primary key.

Line#6: The ORGANIZATION INDEX clause is used to specify that the table
is to be created as an IOT.

Line#7: The PCTTHRESHOLD keyword specifies a row size upper limit, as
a percentage of space in the block. 'Portions' of any row larger
than (PCTTHRESHOLD/100)*db_block_size this will be stored in
the OVERFLOW table specified in the OVERFLOW TABLESPACE clause.


Line#6: The OVERFLOW TABLESPACE clause is optional. However, in Oracle 8.0
if you try to execute DML (INSERT or UPDATE) resulting in a row
larger than that specified by (PCTTHRESHOLD/100)*db_block_size, the
DML will fail with the following run-time error:

ORA-01429: Index-Organized Table: no data segment to store
overflow row-pieces

At this stage it is possible to add an overflow table as follows:

ALTER TABLE TESTTAB_IOT ADD OVERFLOW;

The manual says that you can specify a tablespace in the above
command. However there is a bug (563633, fixed in 8.0.5) that
prevents this.

In Oracle 8i, Oracle evaluates the maximum size of each column
to estimate the largest possible row. If an overflow segment is
needed (refer to Note 67989.1) but not specified, then the CREATE
TABLE will fail. This checking function guarantees that subsequent
DML operations on the IOT will not fail (no run-time errors).


3. Segments created

Creating an IOT generates the index structure, calling it
SYS_IOT_TOP_. It also optionally creates an overflow
table called SYS_IOT_OVER_. So in our example, and
assuming the IOT had been created in SCOTT's schema, a query of
DBA_OBJECTS (OWNER, OBJECT_ID, OBJECT_NAME, OBJECT_TYPE, GENERATED) would
show:

OWNER OBJECT_ID OBJECT_NAME OBJECT_TYPE G
---------- ---------- -------------------- --------------- -
SCOTT 1834 TESTTAB_IOT TABLE N
SCOTT 1835 SYS_IOT_OVER_1834 TABLE Y
SCOTT 1836 SYS_IOT_TOP_1834 INDEX Y

The TESTTAB_IOT table has NO space allocated to it. The SYS_IOT_OVER_1834
table and SYS_IOT_TOP_1834 index both have space allocated and are the
objects in which data is physically stored.

Note the G (GENERATED) column in DBA_TABLES - creating an IOT will
automatically generate the index and (optionally) the overflow table.
However, if during the creation of the IOT you name the constraint of
your primary key, the constraint name will be the index name (not a
SYS-generated name), for example:

CREATE TABLE IOT2
(A NUMBER CONSTRAINT C1 PRIMARY KEY)
ORGANIZATION INDEX;

The associated index can be found in DBA|USER_INDEXES as follows:

SQL> select table_name, index_name from dba_indexes
2 where table_name = 'IOT2';

TABLE_NAME INDEX_NAME
------------------------------ ------------------------------
IOT2 C1


4. Storage

Continuing with our example, the TESTTAB_IOT table has NO space allocated
to it, whilst the SYS_IOT_OVER_1834 table and SYS_IOT_TOP_1834 index both
have space allocated and are the objects in which data is physically
stored. This can be shown by selecting from DBA_SEGMENTS:

SELECT SEGMENT_NAME, HEADER_FILE, HEADER_BLOCK
FROM DBA_SEGMENTS
WHERE SEGMENT_NAME IN
('TESTTAB_IOT','SYS_IOT_OVER_1834','SYS_IOT_TOP_1834'

SEGMENT_NAME HEADER_FILE HEADER_BLOCK
-------------------- ----------- ------------
SYS_IOT_OVER_1834 4 2
SYS_IOT_TOP_1834 4 7

So, how is the data physically stored on disk? Again, this is best
demonstrated with an example, and comparing blockdumps with those from
a traditional heap table (with primary key). The example consists of our
previous IOT (TESTTAB_IOT) and a heap table created as follows:

CREATE TABLE TESTTAB_HEAP
(PKCOL NUMBER PRIMARY KEY,
C2 VARCHAR2(500),
C3 NUMBER,
C4 VARCHAR2(100))

4.1. Insert of a 'small' row

The following command inserts the same data into TESTTAB_IOT &
TESTTAB_HEAP. Note that the row is small enough NOT to cause it to be
inserted into the overflow table:

INSERT INTO TESTTAB_IOT VALUES (1,'Some data',100,'Some more data');
INSERT INTO TESTTAB_HEAP VALUES (1,'Some data',100,'Some more data');

Extract from relevent blockdump from TESTTAB_IOT (SYS_IOT_TOP_1834):

<>
row#0[1852] flag: K---, lock: 2
col 0; len 2; (2): c1 02 <----- Primary key
tl: 31 fb: --H-FL-- lb: 0x0 cc: 3
col 0: [ 9]
Dump of memory from FA9721 to FA972A
FA9720 09536F6D 65206461 746102C2
col 1: [ 2]
Dump of memory from FA972B to FA972D
FA9720 746102C2 020E536F
col 2: [14]
Dump of memory from FA972E to FA973C
FA9720 020E536F 6D65206D 6F726520 64617461
<>

Extract from relevent blockdump from TESTTAB_HEAP:

<>
block_row_dump:
tab 0, row 0, @0x796
tl: 34 fb: --H-FL-- lb: 0x1 cc: 4
col 0: [ 2] c1 02
col 1: [ 9] 53 6f 6d 65 20 64 61 74 61
col 2: [ 2] c2 02
col 3: [14] 53 6f 6d 65 20 6d 6f 72 65 20 64 61 74 61
end_of_block_dump
<>

Extract from relevent blockdump from TESTTAB_HEAP's primary key index:

<>
row#0[1877] flag: ----, lock: 2, data:(6): 01 00 00 17 00 00
col 0; len 2; (2): c1 02
<>

So, the IOT primary key is stored in the same way as it is stored in
a traditional btree index, but there is no ROWID.

The dump of the remaining row data (excluding the key) from the IOT
looks complex, but that is because of the way the data is dumped.
The columns are actually stored sequentially as data> pairs, exactly as in the heap table. The only difference is that
the heap table is dumped in a more readable format.

4.2. Insert of a large row

A large row was inserted into the IOT, causing the data to be stored in
the overflow table.

Extract from relevent blockdump from TESTTAB_IOT (SYS_IOT_TOP_1834):

<>
row#0[1874] flag: K---, lock: 2
col 0; len 2; (2): c1 02 <----- Primay key
tl: 9 fb: --H-F--- lb: 0x0 cc: 0
nrid: 0x01000012.0
<>

The key is stored in the IOT, but the remainder of the row data is not.
There is a pointer (nrid: ) to the row in the overflow
table and the flag byte is set to H (head piece) & F (first piece).
The row is effectively chained; any access to the row will increment
.

The portion of a 'long' row that is stored in the btree structure is
dependant upon the INCLUDING clause during IOT creation. This keyword
specifies a column where the long row is split; any columns after this
are stored in the OVERFLOW table.

Extract from relevent blockdump from overflow table (SYS_IOT_OVER_1834):

<>
bdba: 0x01000012
<>
block_row_dump:
tab 0, row 0, @0x6bd
tl: 251 fb: -----L-- lb: 0x1 cc: 3
col 0: [164]
53 6f 6d 65 20 64 61 74 61 2c 20 66 6f 6c 6c 6f 77 65 64 20 62 79 20
6c 6f 74 73 20 6f 66 20 73 70 61 63 65 73 20 20 20 20 20 20 20 20 20
20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20
20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20
20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20
20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20
20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20
20 20 20
col 1: [ 2] c2 02
col 2: [79]
53 6f 6d 65 20 6d 6f 72 65 20 64 61 74 65 2c 20 66 6f 6c 6c 6f 77 65
64 20 62 79 20 6c 6f 74 73 20 6f 66 20 73 70 61 63 65 73 20 20 20 20
20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20
20 20 20 20 20 20 20 20 20 20
end_of_block_dump
<>

The data is stored in the (heap) overflow table, in this case relative
file#1, block#18, slot#0 (0x01000012.0). The flag byte is set to L
(last piece).

4.3. General notes on blockdumps of IOTs

While we're talking about blockdumps, let's summarise the main
differences that we're likely to see between blockdumps of IOT blocks
and traditional b*tree index blocks:

Traditional PK index IOT index

-------------------------------------------------------------------------
ROWID Follows the lock byte None, all data stored in index

Rowdata Stored in indexed table Stored in the index. Each index
row starts with the primary key
value, and is followed by the
associated column values, dumped
as hex memory dumps

IOT flags Not set Set to 'I'

Row flag Not set Set to 'K' (cluster key)


5. DML & DDL

The syntax for DML and DDL issued against IOTs is identical to that for
traditional heap tables.

Like B*tree indexes, IOTs can become unbalanced. Applications that
consistently perform deletes of one extreme of the B*tree (key) and inserts
at the other extreme can cause the b*tree to become unbalanced, leading to
an increase in the b*tree level which will incur an extra I/O.
For traditional B*tree indexes the solution is to rebuild the index.
This might not be quite so straightforward for IOTs because they also store
the application data.

Updates to IOT row data that cause rows to grow larger than the overflow
threshold will cause the row data to 'chain' into the overflow table. Thus,
access to this data will incur more I/O than before it was chained.
Furthermore, if this row is updated again it will remain in the overflow
table, regardless of the new row size.

Non-monotonic inserts of IOT primary keys (which fit between existing
primary key values) will cause block-splits (as with traditional b*tree
indexes). This can lead to poor space utilisation of the b*tree. The same
thing would also occur if a primary key column is updated, as this
effectively deletes the existing key, and inserts the new value.

So, it is very important to consider potential DML activity against IOTs.


6. Locking

DML locking is identical to that for traditional tables i.e. a TM lock
is taken out on TESTTAB_IOT in the following modes:

INSERT: SHARE (mode 4)
UPDATE: ROW EXCLUSIVE (mode 3)
DELETE: ROW EXCLUSIVE (mode 3)

Note that locks are NOT taken out on either the index or overflow tables.
This is not necessary for the following reasons:

o. It is not possible to drop the index directly because it enforces the
primary key. An attempt to drop the index fails with:

ORA-02429: cannot drop index used for enforcement of unique/primary key

o. An attempt to drop the overflow table fails with:

ORA-25191: cannot reference overflow table of an index-organized table


7. Optimizer behaviour

Optimizer behaviour (plans chosen by the optimizer) for simple queries is
as it is for traditional B*tree indexes. Note that only Cost Based
Optimizer (CBO) can access IOTs. If you add an IOT to a query tuned using
RULE then CBO will be used automatically.
General access paths are:

o. Equality predicate specifies the primary key column. An index range
scan is performed.

o. Range predicate specifies the primary key column (and optionally other
columns). An index range scan is performed.

o. Predicates do not specify the primary key column, or there are no
predicates. An index full scan is performed. I am unable to get index
fast full scans working against IOTs.

o. Access to data in overflow tables is via a rowid lookup (the rowid is
stored in the IOT B*tree).

8. Parallel considerations

IOTs currently do NOT support parallel operations.


9. Analyze

IOTs can be analyzed to compute|estimate statistics, and to validate
structure. Because of the limited paths open to the CBO, the presence of
statistics is probably more valid in order to check the b*tree level and
space utilisation. Note that the B*tree statistics can be viewed via
DBA|ALL|USER_INDEXES (see section 10 - Data dictionary views).


10. Data dictionary views

No new dictionary tables have been added to support IOTs. However, the
b*tree structure (in our example SYS_IOT_TOP_1855) details may bew viewed
via DBA|ALL|USER_INDEXES, whilst the IOT and overflow table can be viewed
via DBA|ALL|USER_TABLES. DBA|ALL|USER_INDEXES is especially useful for
viewing the index statistics (B*tree level, index space utilisation, etc.).


11. Loading data into an IOT

During brief testing, I was unable to force the IOT into DIRECT LOAD state
after a failed SQL*LOADER direct load which would have caused a traditional
b*tree to go into this state.


12. Application uses of IOTs

The Oracle8 Server Concepts manual discusses typical
applications that might benefit from the the use of IOTs.


=====================

change max extents of overflow segments

ALTER TABLE okc.AQ$_OKC_AQ_EV_TAB_G storage(maxextents unlimited) OVERFLOW storage (maxextents unlimited);


==========================================================================================

to find particualr text in a rdf

strings -a MWAC_INVVAL.rdf | grep -i trace



===========================================================================================
ANALYZE SCHEMA

dbms_utility.analyze_schema('', 'COMPUTE');


====================================================================================

GATHER SCHEMA STATISTICS

dbms_stats.gather_schema_stats('');

====================================================================================

give select privileges on apps object to other schema
(09:49:01) sayareddy(ebso):

spool rights.sql

set echo off
set pagesize 0
set linesize 300
set feedback off
SELECT 'Grant select on '||synonym_name||' to BOLINF;'
FROM user_synonyms;
SELECT 'Grant select on APPS.'||object_name||' to BOLINF;' FROM user_objects
WHERE object_type IN ('TABLE','VIEW');
SELECT 'Grant execute on '||object_name||' to bolinf;'
FROM user_objects
WHERE object_type in ('PROCEDURE','FUNCTION','PACKAGE');
SELECT 'Grant insert,update,delete on '||synonym_name||' to BOLINF;'
FROM user_synonyms
WHERE synonym_name like '%IFACE%';
SELECT 'Grant insert,update,delete on '||synonym_name||' to BOLINF;'
FROM user_synonyms
WHERE synonym_name like '%INTERFACE%';
spool off;

then u can run the script right.sql by login in as apps

No comments:

Post a Comment