Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                

Cluster Setup

Download as txt, pdf, or txt
Download as txt, pdf, or txt
You are on page 1of 4

append /etc/system to limit the zfs cache size

set zfs:zfs_arc_max=8589934592

scswitch -z -g oracle-rg -h prdfdsdb01


projmod -s -K "project.max-shm-memory=(priv,25g,deny)" default
projmod -s -K "project.max-sem-ids=(priv,100,deny)" default
projmod -s -K "project.max-sem-nsems=(priv,256,deny)" default
projmod -s -K "project.max-shm-ids=(priv,100,deny)" default
projmod -s -K "project.max-file-descriptor=(basic,4096,deny)" default
projmod -s -K "project.max-file-descriptor=(basic,1024,deny)" default **
projmod -s -K "project.max-file-descriptor=(prv,1024,deny)" default
setup ntp client
#cd /etc/ntp/

#!/bin/sh
/usr/sbin/ndd -set /dev/tcp tcp_smallest_anon_port 9000
/usr/sbin/ndd -set /dev/tcp tcp_largest_anon_port 65500
/usr/sbin/ndd -set /dev/udp udp_smallest_anon_port 9000
/usr/sbin/ndd -set /dev/udp udp_largest_anon_port 65500

# chmod 744 /etc/init.d/ndd


# chown root:sys /etc/init.d/ndd
# ln /etc/init.d/ndd /etc/rc2.d/S70ndd

Append the following line to the "/etc/system" file.

set maxusers=4096
set max_nprocs=30000
set maxuprc=16384

Pending databse SID name

# Oracle Settings
TMP=/tmp; export TMP
TMPDIR=$TMP; export TMPDIR

# Select the appropriate ORACLE_BASE


ORACLE_HOSTNAME=prdfdsdb02; export ORACLE_HOSTNAME
ORACLE_UNQNAME=db11g; export ORACLE_UNQNAME
ORACLE_BASE=/opt/oracle/app; ORACLE_BASE
ORACLE_HOME=$ORACLE_BASE/product/11.2.0/dbhome_1; export ORACLE_HOME
ORACLE_SID=db11G; export ORACLE_SID
PATH=$ORACLE_HOME/bin:$PATH; export PATH

/opt/oracle/app/product/11.2.0/dbhome_1

$ cd Oracle-11g-R1-Database-Media-Directory
$ ./runInstaller

create shared zpool


# zpool create datapool c1t0d0 c1t1d0
# chown oracle:oinstall /datapool
#zfs create -o mountpoint=/ORA01 datapool/ORA01
#zfs create -o mountpoint=/ORA02 datapool/ORA02
#zfs create -o mountpoint=/ORA03 datapool/ORA03
#zfs create -o mountpoint=/expbkp datapool/expbkp
#zfs create -o mountpoint=/archive datapool/archive
#chown -R oracle:oinstall /datapool

Set reservation
#zfs set reservation=600g datapool/ORA01
#zfs set reservation=600g datapool/ORA02
#zfs set reservation=600g datapool/ORA03
#zfs set reservation=300g datapool/expbkp

#/usr/cluster/bin/scinstall

on one node

# clrt list
SUNW.LogicalHostname:3
SUNW.SharedAddress:2
# clresourcetype register SUNW.HAStoragePlus
# clresourcetype register SUNW.oracle_server
# clresourcetype register SUNW.oracle_listener

On both nodes add prdfsdsdb-vip to /etc/hosts file


prdfdsdb01# grep prdfsdsdb-vip /etc/inet/hosts
10.20.62.36 prdfsdsdb-vip

prdfdsdb02# grep prdfsdsdb-vip /etc/inet/hosts


10.20.62.36 prdfsdsdb-vip

On one node
#clresourcegroup create oracle-rg
#clreslogicalhostname create -h prdfdsdb-vip -g oracle-rg prdfdsdb-vip-rs
#clresource create -t SUNW.HAStoragePlus -p zpools=datapool -g oracle-rg hasp-rs
# clresourcegroup online -eM oracle-rg
# ping prdfsdsdb-vip
prdfsdsdb-vip is alive
# clresourcegroup status oracle-rg

As the oracle user, create the listener.ora and sqlnet.ora files on both nodes.
You can create them using the netca command

As the oracle user, create the database using the Database Configuration Assistant
(dbca)

As the oracle user on the first node, edit the tnsnames.ora file to reference the
logical
hostname and to add an entry for LISTENER.

# vi /opt/oracle/app/product/12.2.0/dbhome_1/network/admin/listener.ora
LISTENER =
(DESCRIPTION_LIST =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = oracle-lh)(PORT = 1521))
)
)
# vi /opt/oracle/app/product/12.2.0/dbhome_1/network/admin/tnsnames.ora
LISTENER_ORCL =
(ADDRESS = (PROTOCOL = TCP)(HOST = oracle-lh)(PORT = 1521))
ORCL =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = oracle-lh)(PORT = 1521))

As the oracle user on the second node, copy over the server initialization file,
password
file, tnsnames.ora file, and the diag and admin directory structure for the “demo”
SID.

node2$ cd $ORACLE_HOME/dbs
node2$ scp -q oracle@node1:`pwd`/spfiledemo.ora .
node2$ scp -q oracle@node1:`pwd`/orapwdemo .

node2$ cd $ORACLE_HOME/network/admin
node2$ scp -q oracle@node1:`pwd`/tnsnames.ora .
node2@scp -q oracle@node1:`pwd`/listener.ora .

node2$ cd /opt/oracle/app/diag/rdbms
node2$ scp -q -r oracle@node1:/opt/oracle/app/diag/rdbms/demo .

node2$ cd /u01/app/oracle
node2$ scp -r -q oracle@node1:`pwd`/admin .

As the oracle user on the second node, add an entry for the database to the oratab
file.
node2$ grep demo /var/opt/oracle/oratab
demo:/opt/oracle/app/product/11.2.0/db_1:N

Input required <LISTENER_NAME>

On one node, as the root user,


# clresource create -t SUNW.oracle_listener -g oracle-rg \
-p ORACLE_HOME=/opt/oracle/app/product/11.2.0/dbhome_1 \
-p LISTENER_NAME=LISTENER \
-p resource_dependencies=hasp-rs oracle-lsnr-rs

Input required= <SID_NAME>


# clresource create -t SUNW.oracle_server -g oracle-rg \
-p ORACLE_HOME=/opt/oracle/app/product/11.2.0/dbhome_1 \
-p ORACLE_SID=AAOPDB \
-p Alert_log_file=/opt/oracle/app/diag/rdbms/aaopdb/AAOPDB/trace/alert_AAOPDB.log \
-p resource_dependencies=hasp-rs \
-p connect_string=fdsclustr/mukmin2020 oracle-svr-rs

======
su - oracle
sqlplus "/ as sysdba"
startup

# clrs set -p Dataguard_role=IN_TRANSITION oracle-svr-rs


# clrs set -p Dataguard_role=STANDBY oracle-svr-rs
# clrs set -p Dataguard_role=PRIMARY oracle-svr-rs

DB Kernel Setup
-------------
projadd -s -K "project.max-shm-memory=(privileged,25G,deny)" user.oracle
projmod -s -K "project.max-sem-nsems=(priv,256,deny)" user.oracle
projmod -s -K "project.max-shm-ids=(priv,512,deny)" user.oracle
projmod -s -K "project.max-file-descriptor=(basic,4096,deny)" user.oracle
projmod -s -K "project.max-sem-ids=(priv,256,deny)" user.oracle

prctl -i project user.oracle

You might also like