Install DR
Install DR
Install DR
ORACLE_SID=SULPRD1
ORACLE_BASE=/u01/app/oracle
ORACLE_HOSTNAME=sgtdracdb11prd.spms.local
ORACLE_TERM=xterm
ORACLE_HOME=/u01/app/oracle/product/11.2.0.4/db_1
/etc/hosts
Name: sgtd01-prd-scan.spms.min-saude.pt
Address: 10.105.8.29
Name: sgtd01-prd-scan.spms.min-saude.pt
Address: 10.105.8.30
Name: sgtd01-prd-scan.spms.min-saude.pt
Address: 10.105.8.31
-----------------------------------------------------------------------------------------------------------------------------------
Discos
root@sgtd01-prd-bd01:~# oracleasm listdisks
DATA01
DATA02
FLASH01
FLASH02
MGMT01
MGMT02
OCR01
OCR02
OCR03
root@sgtd01-prd-bd01:~# rpm -qa | grep cvuqdisk
cvuqdisk-1.0.10-1.x86_64
root@sgtd01-prd-bd01:~# oracleasm configure
ORACLEASM_ENABLED=true
ORACLEASM_UID=grid
ORACLEASM_GID=asmadmin
ORACLEASM_SCANBOOT=true
ORACLEASM_SCANORDER=""
ORACLEASM_SCANEXCLUDE=""
ORACLEASM_SCAN_DIRECTORIES=""
ORACLEASM_USE_LOGICAL_BLOCK_SIZE="false"
root@sgtd01-prd-bd01:~# lsblk -f
bash_profile
vi .bash_profile
ORACLE_BASE=/app/grid
export ORACLE_BASE
ORACLE_HOME=/app/19.3.0/grid
export ORACLE_HOME
ORACLE_SID=+ASM1
export ORACLE_SID
CRS_HOME=/app/19.3.0/grid
export CRS_HOME
GRID_HOME=/app/19.3.0/grid
export GRID_HOME
PATH=$PATH:$HOME/.local/bin:$HOME/bin:$ORACLE_HOME/bin
export PATH
umask 022
Instalação SW CRS 19c (9.3.0)
LINUX.X64_193000_grid_home.zip
----------------------------------------
Software Only
-- install using moba ,
ssh grid@sgtd01-prd-bd01
W+|QUQk+@;
cd /app/19.3.0/grid
grid@ccm03-prd-bd01:grid$ ./gridSetup.sh
Launching Oracle Grid Infrastructure Setup Wizard...
grid@sgtd01-prd-bd02:grid$ pwd
/app/19.3.0/grid
Deve estar vazio
execute as root
/app/oraInventory/orainstRoot.sh
/app/19.3.0/grid/root.sh
root@sgtd01-prd-bd02:~# /app/oraInventory/orainstRoot.sh
Changing permissions of /app/oraInventory.
Adding read,write permissions for group.
Removing read,write,execute permissions for world.
root@sgtd01-prd-bd02:~# /app/19.3.0/grid/root.sh
Performing root user operation.
To configure Grid Infrastructure for a Cluster execute the following command as grid user:
/app/19.3.0/grid/gridSetup.sh
This command launches the Grid Infrastructure Setup Wizard. The wizard also supports silent
operation, and the parameters can be passed through the response file that is available in the
installation media.
OK
Close
10.
Disk group name: MGMT
normal, alocation size 4M
Change discovery path /dev/oracleasm/disks/*
slect 2 disks MGMT01,MGMT02
11. Password
sys ldtf45#45dsPa
asmsnmp ldtf45#45dsPaasm
12. Do not IPMI
16. summary
Run as root on 2 nodes
/app/19.3.0/grid/root.sh
root@sgtd01-prd-bd01:~# /app/19.3.0/grid/root.sh
Performing root user operation.
2023/01/23 17:16:12 CLSRSC-325: Configure Oracle Grid Infrastructure for a Cluster ... succeeded
root@sgtd01-prd-bd02:~# /app/19.3.0/grid/root.sh
Performing root user operation.
18 close
--p6880880_190000_Linux-x86-64.zip
On 2 nodes
PSU CRS GRID 19.0.0
Validate:
crsctl status res -t
srvctl config nodeapps
oifcfg getif
p34773504_190000_Linux-x86-64.zip
------------ Patch 34762026
----- /app/stage/p34773504_190000/34773504/34762026_README.html
--2.1.1.1 OPatch Utility Information
/app/19.3.0/grid/OPatch/opatch version
OPatch Version: 12.2.0.1.36
--2.1.1.2 Validation of Oracle Inventory
/app/19.3.0/grid/OPatch/opatch lsinventory -detail -oh /app/19.3.0/grid/ > 2.1.1.2_Validation.txt
--2.1.1.4 Run OPatch Conflict Check
As the Grid home user:
...
Invoking prereq "checkconflictagainstohwithdetail"
Prereq "checkConflictAgainstOHWithDetail" passed.
OPatch succeeded.
--2.1.5 OPatchAuto
-------- on node 1
The utility must be executed by an operating system (OS) user with root privileges, and it must be
executed on each node in the cluster if the Grid home
as ROOT
export PATH=$PATH:/app/19.3.0/grid/OPatch:/app/19.3.0/grid/bin
opatchauto apply /app/stage/p34773504_190000/34773504/34762026 -analyze
==Following patches were SUCCESSFULLY analyzed to be applied:
Patch: /app/stage/p34773504_190000/34773504/34762026/34768559
Log: /app/19.3.0/grid/cfgtoollogs/opatchauto/core/opatch/opatch2023-01-24_14-44-28PM_1.log
Patch: /app/stage/p34773504_190000/34773504/34762026/34768569
Log: /app/19.3.0/grid/cfgtoollogs/opatchauto/core/opatch/opatch2023-01-24_14-44-28PM_1.log
Patch: /app/stage/p34773504_190000/34773504/34762026/33575402
Log: /app/19.3.0/grid/cfgtoollogs/opatchauto/core/opatch/opatch2023-01-24_14-44-28PM_1.log
Patch: /app/stage/p34773504_190000/34773504/34762026/34863894
Log: /app/19.3.0/grid/cfgtoollogs/opatchauto/core/opatch/opatch2023-01-24_14-44-28PM_1.log
Patch: /app/stage/p34773504_190000/34773504/34762026/34765931
Log: /app/19.3.0/grid/cfgtoollogs/opatchauto/core/opatch/opatch2023-01-24_14-44-28PM_1.log
------------------------------
opatchauto apply /app/stage/p34773504_190000/34773504/34762026 -oh /app/19.3.0/grid
root@sgtd01-prd-bd01:34762026# opatchauto apply
/app/stage/p34773504_190000/34773504/34762026 -oh /app/19.3.0/grid
root@sgtd01-prd-bd01:34762026# opatchauto apply /app/stage/p34773504_190000/34773504/34762026 -oh /app/19.3.0/grid
Performing prepatch operations on CRS - bringing down CRS service on home /app/19.3.0/grid
Prepatch operation log file location: /app/grid/crsdata/sgtd01-prd-bd01/crsconfig/crs_prepatch_apply_inplace_sgtd01-prd-
bd01_2023-01-24_02-47-49PM.log
CRS service brought down successfully on home /app/19.3.0/grid
OPatchAuto successful.
--------------------------------Summary--------------------------------
Patching is completed successfully. Please find the summary as follows:
Host:sgtd01-prd-bd01
CRS Home:/app/19.3.0/grid
Version:19.0.0.0.0
Summary:
Patch: /app/stage/p34773504_190000/34773504/34762026/33575402
Log: /app/19.3.0/grid/cfgtoollogs/opatchauto/core/opatch/opatch2023-01-24_14-51-15PM_1.log
Patch: /app/stage/p34773504_190000/34773504/34762026/34765931
Log: /app/19.3.0/grid/cfgtoollogs/opatchauto/core/opatch/opatch2023-01-24_14-51-15PM_1.log
Patch: /app/stage/p34773504_190000/34773504/34762026/34768559
Log: /app/19.3.0/grid/cfgtoollogs/opatchauto/core/opatch/opatch2023-01-24_14-51-15PM_1.log
Patch: /app/stage/p34773504_190000/34773504/34762026/34768569
Log: /app/19.3.0/grid/cfgtoollogs/opatchauto/core/opatch/opatch2023-01-24_14-51-15PM_1.log
Patch: /app/stage/p34773504_190000/34773504/34762026/34863894
Log: /app/19.3.0/grid/cfgtoollogs/opatchauto/core/opatch/opatch2023-01-24_14-51-15PM_1.log
export PATH=$PATH:/app/19.3.0/grid/OPatch:/app/19.3.0/grid/bin
opatchauto apply /app/stage/p34773504_190000/34773504/34762026 -analyze
opatchauto apply /app/stage/p34773504_190000/34773504/34762026 -oh /app/19.3.0/grid
--------------------------------Summary--------------------------------
Host:sgtd01-prd-bd02
CRS Home:/app/19.3.0/grid
Version:19.0.0.0.0
Summary:
Patch: /app/stage/p34773504_190000/34773504/34762026/33575402
Log: /app/19.3.0/grid/cfgtoollogs/opatchauto/core/opatch/opatch2023-01-24_15-35-50PM_1.log
Patch: /app/stage/p34773504_190000/34773504/34762026/34765931
Log: /app/19.3.0/grid/cfgtoollogs/opatchauto/core/opatch/opatch2023-01-24_15-35-50PM_1.log
Patch: /app/stage/p34773504_190000/34773504/34762026/34768559
Log: /app/19.3.0/grid/cfgtoollogs/opatchauto/core/opatch/opatch2023-01-24_15-35-50PM_1.log
Patch: /app/stage/p34773504_190000/34773504/34762026/34768569
Log: /app/19.3.0/grid/cfgtoollogs/opatchauto/core/opatch/opatch2023-01-24_15-35-50PM_1.log
Patch: /app/stage/p34773504_190000/34773504/34762026/34863894
Log: /app/19.3.0/grid/cfgtoollogs/opatchauto/core/opatch/opatch2023-01-24_15-35-50PM_1.log
node-2/
crsctl query crs activeversion
crsctl query crs activeversion -f
Oracle Clusterware active version on the cluster is [19.0.0.0.0]. The cluster upgrade state is
[NORMAL]. The cluster active patch level is [3161362881].
-----------------------Patch 34786990
https://updates.oracle.com/Orion/Services/download?type=readme&aru=25032666
Patch 34786990 - Oracle JavaVM Component Release Update 19.18.0.0.230117
--------------não instalar, so Database
https://mikedietrichde.com/2020/01/24/do-you-need-to-apply-ojvm-patches-to-grid-
infrastructure/
grid@sgtd01-prd-bd02:34786990$ pwd
/app/stage/p34773504_190000/34773504/34786990
ORACLE_HOME/OPatch/opatch prereq CheckConflictAgainstOHWithDetail -ph ./
... Prereq "checkConflictAgainstOHWithDetail" passed.
$ORACLE_HOME/OPatch/opatch apply
OPatch failed with error code 73
--------------não instalar, so Database
Instalação SW RDBMS 11.2.0.4
moba
ssh oracle@sgtd01-prd-bd01
oracle / q:VEG+ltap
cd /app/stage/database
oracle@sgtd01-prd-bd01:database$ ./runInstaller
Starting Oracle Universal Installer...
Checking Temp space: must be greater than 120 MB. Actual 1940 MB Passed
Checking swap space: must be greater than 150 MB. Actual 4095 MB Passed
Checking monitor: must be configured to display at least 256 colors. Actual 16777216 Passed
Preparing to launch Oracle Universal Installer from /tmp/OraInstall2023-01-24_04-42-28PM.
Please wait ...
1. I NOT wish...
6. enterprise edition
7.Specify installation location
base: /app/oracle
software: /app/oracle/product/11.2.0.4/dbhome_1/
--------------------------------------------------
CRS Integrity - This test checks the integrity of Oracle Clusterware stack across the cluster
nodes. Error:
-------PRVF-4037 : CRS is not installed on any of the node
PRVF-7593 : CRS is not found to be installed on node
----PRVF-7593 : CRS is not found to be installed on node
RAC RDBMS Installation fails with Error:"PRVF-4037 : CRS is not installed on any of the
nodes" (Doc ID 2315020.1)
Less /app/oraInventory/ContentsXML/inventory.xml
<HOME_LIST>
<HOME NAME="OraGI19Home1" LOC="/app/19.3.0/grid" TYPE="O" IDX="1" CRS="true"/>
</HOME_LIST>
--PRVF-7611 : Proper user file creation mask (umask) for user "oracle" is not found on node
"ccm03-prd-bd02" [Expected = "0022" ; Found = "0027"] -
Cause: The user's OS file creation mask (umask) was not the required setting. -
Action: Set appropriate user file creation mask. Modify the users .profile or .cshrc
or .bashrc to include the required umask
in .bash_profile add umask 0022
ignore
---------------------------------------------------------------------------------
ap Size - This is a prerequisite condition to test whether sufficient total swap space is available on
the system.
Check Failed on Nodes: [sgtd01-prd-bd02, sgtd01-prd-bd01]
Verification result of failed node: sgtd01-prd-bd02
Expected Value
: 15.6235GB (1.6382396E7KB)
Actual Value
: 4GB (4194300.0KB)
Ignore
---------------------------------------------------------------------------------
ingle Client Access Name (SCAN) - This test verifies the Single Client Access Name
configuration. Error:
-
PRVG-1101 : SCAN name "null" failed to resolve - Cause: An attempt to resolve specified SCAN
name to a list of IP addresses failed because SCAN could not be resolved in DNS or GNS using
'nslookup'. - Action: Check whether the specified SCAN name is correct. If SCAN name should be
resolved in DNS, check the configuration of SCAN name in DNS. If it should be resolved in GNS
make sure that GNS resource is online.
-
PRVF-4657 : Name resolution setup check for "null" (IP address: 127.0.0.1) failed -
Cause: Inconsistent IP address definitions found for the SCAN name identified using DNS and
configured name resolution mechanism(s). - Action: Look up the SCAN name with nslookup, and
make sure the returned IP addresses are consistent with those defined in NIS and /etc/hosts as
configured in /etc/nsswitch.conf by reconfiguring the latter. Check the Name Service Cache
Daemon (/usr/sbin/nscd) by clearing its cache and restarting it.
Bug 25409838 - DBCA fails with error PRVG-1101 if SCAN VIP uses IPv6 addresses
(Doc ID 25409838.8)
grid@sgtd01-prd-bd01:~$ srvctl config scan
SCAN name: sgtd01-prd-scan.spms.min-saude.pt, Network: 1
Subnet IPv4: 10.105.8.0/255.255.255.0/ens224, static
Subnet IPv6:
SCAN 1 IPv4 VIP: 10.105.8.29
SCAN VIP is enabled.
SCAN 2 IPv4 VIP: 10.105.8.30
SCAN VIP is enabled.
SCAN 3 IPv4 VIP: 10.105.8.31
SCAN VIP is enabled.
grid@sgtd01-prd-bd01:~$ nslookup sgtd01-prd-scan.spms.min-saude.pt
Server: 127.0.0.1
Address: 127.0.0.1#53
Non-authoritative answer:
Name: sgtd01-prd-scan.spms.min-saude.pt
Address: 10.105.8.29
Name: sgtd01-prd-scan.spms.min-saude.pt
Address: 10.105.8.31
Name: sgtd01-prd-scan.spms.min-saude.pt
Address: 10.105.8.30
--Installing 11G : "File Not Found" Errors Running RunInstaller or Setup.exe (WFMLRSVCApp.ear,
WFMGRApp.ear, WFALSNRSVCApp.ear) (Doc ID 468771.1)
unzip -K p13390677_112040_Linux-x86-64_2of7.zip
--------------------------------------------------------------------
Exception String: Error in invoking target 'agent nmhs' of makefile
'/app/oracle/product/11.2.0.4/dbhome_1/sysman/lib/ins_emagent.mk'. See
'/app/oraInventory/logs/installActions2023-01-24_11-39-17PM.log' for details.
--Error in invoking target 'agent nmhs' of make file ins_emagent.mk while installing Oracle 11.2.0.4
on Linux (Doc ID 2299494.1)
vi /app/oracle/product/11.2.0.4/dbhome_1/sysman/lib/ins_emagent.mk
find $(MK_EMAGENT_NMECTL)
Then replace the line with
$(MK_EMAGENT_NMECTL) -lnnz11
----------------------------------------------------------
-----------------------------------------------------------------------------------------------------------------------------------
-----------------------------------------------------------------------------------------------------------------------------------
----------------
OPatch oracle home 11.2.0.4
unzip -K p6880880_112000_Linux-x86-64.zip -d p6880880_112000
Repete on server 2
-----------------------------------------------------------------------------------------------------------------------------------
----------------
Aplicação PSU + OJVM RDBMS 11.2.0.4
unzip -K p31720776_112040_Linux-x86-64.zip
cd 31720776
drwxr-xr-x. 30 oracle oinstall 4096 Sep 24 2020 31537677
drwxr-xr-x. 4 oracle oinstall 67 Sep 8 2020 31668908
patch 31537677
https://updates.oracle.com/Orion/Services/download?type=readme&aru=23856146
cd /app/stage/31720776/31537677
export ORACLE_BASE=/app/oracle
export ORACLE_HOME=/app/oracle/product/11.2.0.4/dbhome_1
export
PATH=$PATH:/app/oracle/product/11.2.0.4/dbhome_1/bin:/app/oracle/product/11.2.0.4/dbhom
e_1/OPatch
opatch version
OPatch Version: 11.2.0.3.29
PREREQ session
OPatch succeeded.
-------------------------------------------------
cd /app/stage/31720776/31537677
opatch apply
oracle@ccm03-prd-bd01:31537677$ opatch apply
Oracle Interim Patch Installer version 11.2.0.3.29
Copyright (c) 2022, Oracle Corporation. All rights reserved.
--------------------------------
OPatch found the word "error" in the stderr of the make command.
Please look at this stderr. You can re-run this make command.
Stderr output:
chmod: changing permissions of ‘/app/oracle/product/11.2.0.4/dbhome_1/bin/extjobO’:
Operation not permitted
make: [iextjob] Error 1 (ignored)
--Oracle Database 12.2.0.1 Release Update & Release Update Revision October 2019 Known
Issues (Doc ID 2568307.1)
Applying Proactive Bundle / PSU Patch fails with Error: "chmod: changing permissions of
`$ORACLE_HOME/bin/extjobO': Operation not permitted" (Doc ID 2265726.1)
oracle@ccm03-prd-bd01:31537677$ ll /app/oracle/product/11.2.0.4/dbhome_1/bin/extjobO
-rwsr-x---. 1 root oinstall 1248008 Jun 3 13:30
/app/oracle/product/11.2.0.4/dbhome_1/bin/extjobO
--ignore all
Composite patch 31537677 successfully applied.
OPatch Session completed with warnings.
Log file location: /app/oracle/product/11.2.0.4/dbhome_1/cfgtoollogs/opatch/opatch2023-01-
25_10-00-12AM_1.log
Patch 31668908
pach 31668908
Patch 31668908 - Oracle JavaVM Component 11.2.0.4.201020 Database PSU
https://updates.oracle.com/Orion/Services/download?type=readme&aru=23800881
export ORACLE_HOME=/app/oracle/product/11.2.0.4/dbhome_1
export
PATH=$PATH:/app/oracle/product/11.2.0.4/dbhome_1/bin:/app/oracle/product/11.2.0.4/dbh
ome_1/OPatch
cd /app/stage/31720776/31668908
opatch prereq CheckConflictAgainstOHWithDetail -ph ./
opatch apply
Origem:
[oracle@sgtdracdb11prd ~]$ asmcmd
ASMCMD> lsdg
State Type Rebal Sector Block AU Total_MB Free_MB Req_mir_free_MB Usable_file_MB
Offline_disks Voting_files Name
MOUNTED EXTERN N 512 4096 1048576 102399 101269 0 101269 0
N DG_ARCH/
MOUNTED EXTERN N 512 4096 1048576 409598 157083 0 157083 0
N DG_DATA/
DG_DATA --> External, disks: DATA01 + DATA02, alocation unit size 3096(4MB)
DG_ARCH --> External, disks: FLASH01 + FLASH02, alocation unit size 3096(4MB)
grid@sgtd01-prd-bd01:~$ asmcmd
ASMCMD> lsdg
State Type Rebal Sector Logical_Sector Block AU Total_MB Free_MB Req_mir_free_MB
Usable_file_MB Offline_disks Voting_files Name
MOUNTED NORMAL N 512 512 4096 4194304 15348 14432 5116
4658 0 Y ASM/
MOUNTED EXTERN N 512 512 4096 4194304 204792 204640 0
204640 0 N DG_ARCH/
MOUNTED EXTERN N 512 512 4096 4194304 614392 614240 0
614240 0 N DG_DATA/
MOUNTED NORMAL N 512 512 4096 4194304 102392 53936 0
26968 0 N MGMT/
using dbca , create database test
ssh oracle@sgtd01-prd-bd01
oracle / q:VEG+ltap
grid / W+|QUQk+@;
root / QbTkmta$GGM
ORACLE_BASE=/app/oracle
export ORACLE_BASE
ORACLE_HOME=/app/oracle/product/11.2.0.4/dbhome_1
export ORACLE_HOME
ORACLE_SID=TEST
export ORACLE_SID
PATH=$ORACLE_HOME/bin:$ORACLE_HOME/lib:$PATH
export PATH
cd $ORACLE_HOME/bin
dbca
11. Password
Sys/system Omp118ilvvtp1P
dbsnmp Omp1$8ianMP
Criação Dataguard da BD NORPRD
--------------------------------------------
-- duplicate from active database....
http://www.lamimdba.com.br/2014/12/duplicate-partir-de-um-active-dataguard.html
Step by Step Guide on Creating Physical Standby Using RMAN DUPLICATE...FROM ACTIVE
DATABASE (Doc ID 1075908.1)
https://dbaclass.com/article/rman-active-cloning-from-rac-to-rac/
-- diferentes db_unique_name...
db_name string NORPRD
db_unique_name string NORPRD
standby db_unique_name : NORPROD
add to /etc/hosts:
10.105.8.110 sgtdracdb11prd.spms.local sgtdracdb11prd
10.105.8.120 sgtdracdb12prd.spms.local sgtdracdb12prd
alter database add standby logfile THREAD 2 group 201 ('+DG_DATA','+DG_ARCH') SIZE 52428800;
alter database add standby logfile THREAD 2 group 202 ('+DG_DATA','+DG_ARCH') SIZE 52428800;
alter database add standby logfile THREAD 2 group 203 ('+DG_DATA','+DG_ARCH') SIZE 52428800;
alter database add standby logfile THREAD 2 group 204 ('+DG_DATA','+DG_ARCH') SIZE 52428800;
---------------------------
2. Ensure that the sql*net connectivity is working fine.
Insert a static entry for NORPROD in the listener.ora file of the standby system.
/app/19.3.0/grid/network/admin/listener.ora
SID_LIST_LISTENER =
(SID_LIST =
(SID_DESC =
(GLOBAL_DBNAME = NORPROD_DGMGRL)
(ORACLE_HOME = /app/oracle/product/11.2.0.4/dbhome_1)
(SID_NAME = NORPROD1)
)
(SID_DESC =
(GLOBAL_DBNAME = CENPROD_DGMGRL)
(ORACLE_HOME = /app/oracle/product/11.2.0.4/dbhome_1)
(SID_NAME = CENPROD1)
)
(SID_DESC =
(GLOBAL_DBNAME = LVTPROD_DGMGRL)
(ORACLE_HOME = /app/oracle/product/11.2.0.4/dbhome_1)
(SID_NAME = LVTPROD1)
)
(SID_DESC =
(GLOBAL_DBNAME = SULPROD_DGMGRL)
(ORACLE_HOME = /app/oracle/product/11.2.0.4/dbhome_1)
(SID_NAME = SULPROD1)
)
)
2.1 TNSNAMES.ORA for the Primary and Standby should have BOTH entries
--PARA AS STANDBYS
--PARA AS STANDBYS
NORPROD =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = sgtd01-prd-bd01-vip.spms.min-saude.pt)(PORT = 1521))
(CONNECT_DATA =
(SERVER = DEDICATED)
(SID = NORPROD1)
)
)
CENPROD =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = sgtd01-prd-bd01-vip.spms.min-saude.pt)(PORT = 1521))
(CONNECT_DATA =
(SERVER = DEDICATED)
(SID = CENPROD1)
)
)
LVTPROD =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = sgtd01-prd-bd01-vip.spms.min-saude.pt)(PORT = 1521))
(CONNECT_DATA =
(SERVER = DEDICATED)
(SID = LVTPROD1)
)
)
SULPROD =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = sgtd01-prd-bd01-vip.spms.min-saude.pt)(PORT = 1521))
(CONNECT_DATA =
(SERVER = DEDICATED)
(SID = SULPROD1)
)
)
## primary
NORPRD =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = sgtdracdb11prd-vip.spms.local)(PORT = 1521))
(CONNECT_DATA =
(SERVER = DEDICATED)
(SID = NORPRD1)
)
)
CENPRD =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = sgtdracdb11prd-vip.spms.local)(PORT = 1521))
(CONNECT_DATA =
(SERVER = DEDICATED)
(SID = CENPRD1)
)
)
LVTPRD =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = sgtdracdb11prd-vip.spms.local)(PORT = 1521))
(CONNECT_DATA =
(SERVER = DEDICATED)
(SID = LVTPRD1)
)
)
SULPRD =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = sgtdracdb11prd-vip.spms.local)(PORT = 1521))
(CONNECT_DATA =
(SERVER = DEDICATED)
(SID = SULPRD1)
)
)
NORPROD =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = sgtd01-prd-scan.spms.min-saude.pt)(PORT = 1521))
(CONNECT_DATA =
(SERVER = DEDICATED)
(SID = NORPROD1)
)
)
CENPROD =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = sgtd01-prd-scan.spms.min-saude.pt)(PORT = 1521))
(CONNECT_DATA =
(SERVER = DEDICATED)
(SID = CENPROD1)
)
)
LVTPROD =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = sgtd01-prd-scan.spms.min-saude.pt)(PORT = 1521))
(CONNECT_DATA =
(SERVER = DEDICATED)
(SID = LVTPROD1)
)
)
SULPROD =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = sgtd01-prd-scan.spms.min-saude.pt)(PORT = 1521))
(CONNECT_DATA =
(SERVER = DEDICATED)
(SID = SULPROD1)
)
)
-----------
3. Create the standby database
a. Copy the password file from the primary $ORACLE_HOME/dbs and rename it to the standby
database name.
[oracle@sgtdracdb11prd dbs]$ scp -p orapw* oracle@sgtd01-prd-bd01.spms.min-
saude.pt:/app/oracle/product/11.2.0.4/dbhome_1/dbs/ gSypdd9QKjnTYbnRZczN
DB_NAME=NORPRD
DB_UNIQUE_NAME= NORPROD
DB_BLOCK_SIZE=8192
sga_target=4G
c. Create the necessary directories in the standby location to place database files and trace files
($ADR_HOME).
mkdir /app/oracle/diag/rdbms/norprod
mkdir /app/oracle/admin/NORPROD
mkdir -p /app/oracle/admin/NORPROD/adump/
d. Set the environment variable ORACLE_SID to the standby service and start the standby-
instance.
Vi .profileNORPROD
ORACLE_BASE=/app/oracle
export ORACLE_BASE
ORACLE_HOME=/app/oracle/product/11.2.0.4/dbhome_1
export ORACLE_HOME
ORACLE_SID=NORPROD1
export ORACLE_SID
PATH=$ORACLE_HOME/bin:$ORACLE_HOME/lib:$PATH
export PATH
Migração BD
sqlplus "/ as sysdba"
startup nomount pfile=/app/oracle/product/11.2.0.4/dbhome_1/dbs/initNORPROD1.ora
sqlplus sys/sxoso17@NORPRD as sysdba
sqlplus sys/sxoso17@NORPROD as sysdba
rman target sys/sxoso17@NORPRD auxiliary sys/sxoso17@NORPROD
spool log to duplicate_ NORPROD1log
run {
allocate channel prmy1 type disk;
allocate channel prmy2 type disk;
allocate channel prmy3 type disk;
allocate channel prmy4 type disk;
allocate auxiliary channel stby type disk;
RMAN-04014: startup failed: ORA-48108: invalid value given for the diagnostic_dest init.ora
parameter
ORA-48140: the specified ADR Base directory does not exist [/u01/app/oracle]
ORA-48187: specified directory does not exist
Linux-x86_64 Error: 2: No such file or directory
set diagnostic_dest='/app/oracle'
RMAN-03015: error occurred in stored script Memory Script
RMAN-04014: startup failed: ORA-00119: invalid specification for system parameter
REMOTE_LISTENER
ORA-00132: syntax error or unresolved network name 'sgtddb1xracprd-scan:1521'
-- configure ccm02-prod-scan no hosts novos
10.105.8.130 sgtddb1xracprd-scan.spms.local sgtddb1xracprd-scan
--------------------------------------------------------------------------------------------------------------------------
set pagesize 900
set linesize 900
STARTUP NOMOUNT;
ALTER DATABASE MOUNT STANDBY DATABASE;
--ALTER DATABASE OPEN READ ONLY;
--ALTER DATABASE RECOVER MANAGED STANDBY DATABASE CANCEL;
ALTER DATABASE RECOVER MANAGED STANDBY DATABASE DISCONNECT FROM SESSION;
-- on primary- ALTER SYSTEM SET log_archive_dest_state_2=ENABLE;
-- add to cluster
srvctl add database -d NORPROD -o /app/oracle/product/11.2.0.4/dbhome_1 -s MOUNT
srvctl add instance -d NORPROD -i NORPROD1 -n sgtd01-prd-bd01
srvctl add instance -d NORPROD -i NORPROD2 -n sgtd01-prd-bd02
srvctl modify database -d NORPROD -r physical_standby -p
'+DG_DATA/NORPROD/spfileNORPROD.ora'
srvctl status database -d NORPROD
srvctl start database -d NORPROD
--srvctl modify database -d NORPROD -s "MOUNT" -r PHYSICAL_STANDBY
srvctl config database -d NORPROD
-----------------------------------------------------------------------------------------------------------------------------------
-
---------- setup dg broker
https://www.oracle.com/br/technical-resources/articles/database-performance/dataguard-setup-
broker.html
SQL> show parameter dg_
NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
cell_offloadgroup_name string
dg_broker_config_file1 string +DG_DATA/NORPROD/dr1.dat
dg_broker_config_file2 string +DG_DATA/NORPROD/dr2.dat
dg_broker_start boolean FALSE
--- primary
ALTER SYSTEM SET DG_BROKER_CONFIG_FILE1='+DG_DATA/NORPRD/dr1.dat' SCOPE=BOTH;
ALTER SYSTEM SET DG_BROKER_CONFIG_FILE2='+DG_DATA/NORPRD/dr2.dat' SCOPE=BOTH;
-- standby
ALTER SYSTEM SET DG_BROKER_CONFIG_FILE1='+DG_DATA/NORPROD/dr1.dat' SCOPE=BOTH;
ALTER SYSTEM SET DG_BROKER_CONFIG_FILE2='+DG_DATA/NORPROD/dr2.dat' SCOPE=BOTH;
-- primary e standby
dgmgrl /
DGMGRL>
CREATE CONFIGURATION dg_config AS PRIMARY DATABASE IS NORPRD CONNECT IDENTIFIER IS
NORPRD;
ADD DATABASE NORPROD AS CONNECT IDENTIFIER IS NORPROD MAINTAINED AS PHYSICAL;
ENABLE CONFIGURATION;
SHOW CONFIGURATION;
SHOW DATABASE NORPRD;
SHOW DATABASE NORPROD;
Instance(s):
NORPRD1
NORPRD2
Error: ORA-16737: the redo transport service for standby database "norprod" has an error
md5sum orapwNORPRD2
3d9d4fa1519bb4b1a3be044b7294ef30 orapwNORPRD2
5d46f98131375f2e8424bf9cf68c0b46 orapwNORPRD1
5d46f98131375f2e8424bf9cf68c0b46 orapwNORPROD1
5d46f98131375f2e8424bf9cf68c0b46 orapwNORPROD2
5d46f98131375f2e8424bf9cf68c0b46 orapwNORPRD2