IT博客汇
  • 首页
  • 精华
  • 技术
  • 设计
  • 资讯
  • 扯淡
  • 权利声明
  • 登录 注册

    [原]Oracle Rac 部署详细过程

    mchdba发表于 2017-04-30 22:56:58
    love 0

    1.        安装linux操作系统:

                                安装结束后安装一下包:

    rpm -Uvh binutils-2.*

    rpm -Uvh elfutils-libelf-0.*

    rpm -Uvh glibc-2.*

    rpm -Uvh glibc-common-2.*

    rpm -Uvh libaio-0.*

    rpm -Uvh libgcc-4.*

    rpm -Uvh libstdc++-4.*

    rpm -Uvh make-3.*

    rpm -Uvhkernel-headers-2.6.18-274.el5.x86_64.rpm

    rpm -Uvh glibc-headers-2.*

    rpm -Uvh glibc-devel-2.*

    rpm -Uvhelfutils-libelf-0.137-3.el5.i386.rpm

    rpm -Uvh elfutils-libelf-devel*

    rpm -Uvhelfutils-libelf-devel-0.*

    rpm -Uvh gcc-4.*

    rpm -Uvhlibstdc++-devel-4.*

    rpm -Uvh gcc-c++-4.*

    rpm -Uvh unixODBC-2.*

    rpm -Uvh compat-libstdc++-296*

    rpm -Uvh compat-libstdc++-33*

    rpm -Uvh libaio-devel-0.*

    rpm -Uvh libXp-1.*

    rpm -Uvh unixODBC-devel-2.*

    rpm -Uvh sysstat-7.*

    2.        给linux系统配置ntp,ftp

    3.        配置linux网络:

    1)        在两个节点上编辑/etc/hosts文件:

    127.0.0.1              localhost.localdomain localhost

    #used by ntp serviceto synchronize time

    66.187.233.4    clock.redhat.com

     

    #local network forRAC

    192.168.0.131   ora10racn1      ora10racn1.ccz.com

    192.168.0.132   ora10racn2      ora10racn2.ccz.com

     

    192.168.0.151   ora10racn1-vip

    192.168.0.152   ora10racn2-vip

     

    192.168.2.131   ora10racn1-str

    192.168.2.132   ora10racn2-str

     

    10.10.0.131   ora10racn1-priv

    10.10.0.132   ora10racn2-priv

     

    192.168.0.110   openfiler

    192.168.2.110   openfiler-str

    2)        确保节点的主机名称不在loopback中出现:

    127.0.0.1              localhost.localdomain localhost

    3)        修改/etc/sysctl.conf中的网络设置:

    #+---------------------------------------------------------+

    # | Default settingin bytes of the socket "receive" buffer |

    # | which may be setby using the SO_RCVBUF socket option.  |

    #+---------------------------------------------------------+

    net.core.rmem_default=1048576

     

    #+---------------------------------------------------------+

    # | Maximum settingin bytes of the socket "receive" buffer |

    # | which may be setby using the SO_RCVBUF socket option.  |

    #+---------------------------------------------------------+

    net.core.rmem_max=1048576

     

    #+---------------------------------------------------------+

    # | Default settingin bytes of the socket "send" buffer    |

    # | which may be setby using the SO_SNDBUF socket option.  |

    #+---------------------------------------------------------+

    net.core.wmem_default=262144

     

    #+---------------------------------------------------------+

    # | Maximum settingin bytes of the socket "send" buffer    |

    # | which may be setby using the SO_SNDBUF socket option.  |

    #+---------------------------------------------------------+

    net.core.wmem_max=262144

    4)        关闭所有节点上的UDP ICMP的rejection

    [root@ora10racn1Server]# /etc/rc.d/init.d/iptables status

    Firewall is stopped.

    如果是running状态需要用以下命令关闭:

    [root@ora10racn1Server]# /etc/rc.d/init.d/iptables stop

    在各个运行级别上关闭iptables

    [root@ora10racn1Server]# chkconfig --list|grep iptables

    iptables        0:off  1:off   2:on    3:on   4:on    5:on    6:off

    [root@ora10racn1Server]# chkconfig iptables off

    [root@ora10racn1Server]# chkconfig --list|grep iptables

    iptables        0:off  1:off   2:off   3:off  4:off   5:off   6:off

    4.        创建oracle用户并确认nobody帐号的存在:

    # groupadd -g 501 oinstall

    # groupadd -g 502 dba

    # groupadd -g 503 oper

    # useradd -m -u 501 -g oinstall-G dba,oper -d /home/oracle -s /bin/bash oracle

     

    # id oracle

    uid=501(oracle) gid=501(oinstall)groups=501(oinstall),502(dba),503(oper)

    # passwd oracle

    Changing password for useroracle.

    New UNIX password: xxxxxxxxxxx

    Retype new UNIX password:xxxxxxxxxxx

    passwd: all authentication tokensupdated successfully.

    修改oracle帐号的.bash_profile

    export JAVA_HOME=/usr/local/java

     

    # User specific environment andstartup programs

    exportORACLE_BASE=/u01/app/oracle

    exportORACLE_HOME=$ORACLE_BASE/product/10.2.0/db_1

    export ORA_CRS_HOME=/u01/app/crs

    exportORACLE_PATH=$ORACLE_BASE/dba_scripts/common/sql:.:$ORACLE_HOME/rdbms/admin

    export CV_JDKHOME=/usr/local/java

     

    # Each RAC node must have aunique ORACLE_SID. (i.e. racdb1, racdb2,...)

    exportORACLE_SID=racdb1

     

    exportPATH=.:${JAVA_HOME}/bin:${PATH}:$HOME/bin:$ORACLE_HOME/bin

    exportPATH=${PATH}:/usr/bin:/bin:/usr/bin/X11:/usr/local/bin

    export PATH=${PATH}:$ORACLE_BASE/dba_scripts/common/bin

    export ORACLE_TERM=xterm

    exportTNS_ADMIN=$ORACLE_HOME/network/admin

    exportORA_NLS10=$ORACLE_HOME/nls/data

    exportNLS_DATE_FORMAT="DD-MON-YYYY HH24:MI:SS"

    exportLD_LIBRARY_PATH=$ORACLE_HOME/lib

    export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:$ORACLE_HOME/oracm/lib

    exportLD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/lib:/usr/lib:/usr/local/lib

    export CLASSPATH=$ORACLE_HOME/JRE

    exportCLASSPATH=${CLASSPATH}:$ORACLE_HOME/jlib

    exportCLASSPATH=${CLASSPATH}:$ORACLE_HOME/rdbms/jlib

    exportCLASSPATH=${CLASSPATH}:$ORACLE_HOME/network/jlib

    export THREADS_FLAG=native

    export TEMP=/tmp

        检测nobody帐号:

    export TMPDIR=/tmp

    # id nobody

    uid=99(nobody) gid=99(nobody)groups=99(nobody)

    如果nobody不存在,需要增加:

    # /usr/sbin/useradd nobody

    5.        安装openfiler并创建相关卷:

    6.        在各个节点上创建相关的目录:

    # mkdir -p/u01/app/oracle

    # chown -Roracle:oinstall /u01/app/oracle

    # chmod -R 775/u01/app/oracle

     

    # mkdir -p/u01/app/crs

    # chown -Roracle:oinstall /u01/app/crs

    # chmod -R 775/u01/app/crs

     

    # mkdir -p /u02

    # chown -Roracle:oinstall /u02

    # chmod -R 775 /u02

    7.        在各个节点上识别iscsi卷

    1)        在各个节点安装iscsi的initator:

    [root@ora10racn1Server]# rpm -Uvh iscsi-initiator-utils-6.2.0.872-10.0.1.el5.x86_64.rpm

    warning:iscsi-initiator-utils-6.2.0.872-10.0.1.el5.x86_64.rpm: Header V3 DSA signature:NOKEY, key ID 1e5e0159

    Preparing...               ########################################### [100%]

            packageiscsi-initiator-utils-6.2.0.872-10.0.1.el5.x86_64 is already installed

    2)        在各个节点上启动iscsi的进程:

    [root@ora10racn1Server]# service iscsid start

    Starting iSCSI daemon:

    [  OK  ]

    3)        在各个节点上搜索openfiler上的相关卷:

    [root@ora10racn1Server]#  iscsiadm -m discovery -tsendtargets -p 192.168.2.110

    192.168.2.110:3260,1iqn.2006-01.com.openfiler:racdb.asm2

    192.168.0.110:3260,1iqn.2006-01.com.openfiler:racdb.asm2

    192.168.2.110:3260,1iqn.2006-01.com.openfiler:racdb.asm1

    192.168.0.110:3260,1iqn.2006-01.com.openfiler:racdb.asm1

    192.168.2.110:3260,1iqn.2006-01.com.openfiler:racdb.crs1

    192.168.0.110:3260,1iqn.2006-01.com.openfiler:racdb.crs1

    4)        在两个节点上手动连接相关卷并配置系统重启时自动连接

    [root@ora10racn1 Server]# iscsiadm -m node -Tiqn.2006-01.com.openfiler:racdb.crs1 -p 192.168.2.110 -l

    Logging in to [iface: default, target:iqn.2006-01.com.openfiler:racdb.crs1, portal: 192.168.2.110,3260]

    Login to [iface: default, target: iqn.2006-01.com.openfiler:racdb.crs1,portal: 192.168.2.110,3260] successful.

    [root@ora10racn1 Server]# iscsiadm -m node -Tiqn.2006-01.com.openfiler:racdb.asm1 -p 192.168.2.110 -l

    Logging in to [iface: default, target:iqn.2006-01.com.openfiler:racdb.asm1, portal: 192.168.2.110,3260]

    Login to [iface: default, target: iqn.2006-01.com.openfiler:racdb.asm1,portal: 192.168.2.110,3260] successful.

    [root@ora10racn1 Server]# iscsiadm -m node -Tiqn.2006-01.com.openfiler:racdb.asm2 -p 192.168.2.110 -l

    Logging in to [iface: default, target: iqn.2006-01.com.openfiler:racdb.asm2,portal: 192.168.2.110,3260]

    Login to [iface: default, target: iqn.2006-01.com.openfiler:racdb.asm2,portal: 192.168.2.110,3260] successful.

     

    [root@ora10racn1 Server]# iscsiadm -m node -Tiqn.2006-01.com.openfiler:racdb.crs1 -p 192.168.2.110 --op update -nnode.startup -v automatic

    [root@ora10racn1 Server]# iscsiadm -m node -Tiqn.2006-01.com.openfiler:racdb.asm1 -p 192.168.2.110 --op update -nnode.startup -v automatic

    [root@ora10racn1 Server]# iscsiadm -m node -Tiqn.2006-01.com.openfiler:racdb.asm2 -p 192.168.2.110 --op update -nnode.startup -v automatic

    5)        在各个节点上建立永久性连接规则,以保证自动连接时设备号不受连接顺序影响:

    (cd /dev/disk/by-path; ls -l *openfiler* | awk '{FS=" "; print$9 " " $10 " " $11}')

    ip-192.168.2.110:3260-iscsi-iqn.2006-01.com.openfiler:racdb.asm1-lun-0-> ../../sdc

    ip-192.168.2.110:3260-iscsi-iqn.2006-01.com.openfiler:racdb.asm2-lun-0-> ../../sdd

    ip-192.168.2.110:3260-iscsi-iqn.2006-01.com.openfiler:racdb.crs1-lun-0-> ../../sdb

    创建连接规则:

    [root@ora10racn1 rules.d]# vi /etc/udev/rules.d/55-openiscsi.rules

     

    KERNEL=="sd*", BUS=="scsi",PROGRAM="/etc/udev/scripts/iscsidev.sh%b",SYMLINK+="iscsi/%c/part%n"

    创建连接规则使用的脚本:

    [root@ora10racn2 Server]# mkdir -p /etc/udev/scripts

    [root@ora10racn2 scripts]# vi /etc/udev/scripts/iscsidev.sh

    #!/bin/sh

     

    # FILE: /etc/udev/scripts/iscsidev.sh

     

    BUS=${1}

    HOST=${BUS%%:*}

     

    [ -e /sys/class/iscsi_host ] || exit 1

     

    file="/sys/class/iscsi_host/host${HOST}/device/session*/iscsi_session*/targetname"

     

    target_name=$(cat ${file})

     

    # This is not an open-scsi drive

    if [ -z "${target_name}" ]; then

       exit 1

    fi

     

    # Check if QNAP drive

    check_qnap_target_name=${target_name%%:*}

    if [ $check_qnap_target_name = "iqn.2004-04.com.qnap" ]; then

        target_name=`echo"${target_name%.*}"`

    fi

     

    echo "${target_name##*.}"

    ~

    "iscsidev.sh" [New] 25L, 507C written

    [root@ora10racn2 scripts]#chmod 755 /etc/udev/scripts/iscsidev.sh

    重新启动iscsi服务:

    [root@ora10racn2 scripts]# service iscsi stop

    Logging out of session [sid: 1, target:iqn.2006-01.com.openfiler:racdb.crs1, portal: 192.168.2.110,3260]

    Logging out of session [sid: 2, target:iqn.2006-01.com.openfiler:racdb.asm1, portal: 192.168.2.110,3260]

    Logging out of session [sid: 3, target:iqn.2006-01.com.openfiler:racdb.asm2, portal: 192.168.2.110,3260]

    Logout of [sid: 1, target: iqn.2006-01.com.openfiler:racdb.crs1, portal:192.168.2.110,3260] successful.

    Logout of [sid: 2, target: iqn.2006-01.com.openfiler:racdb.asm1, portal:192.168.2.110,3260] successful.

    Logout of [sid: 3, target: iqn.2006-01.com.openfiler:racdb.asm2, portal:192.168.2.110,3260] successful.

    Stopping iSCSI daemon:

    [root@ora10racn2 scripts]# service iscsi start

    iscsid is stopped

    [  OK  ] iSCSI daemon: [  OK  ]

    [  OK  ]

    Setting up iSCSI targets: Logging in to [iface: default, target:iqn.2006-01.com.openfiler:racdb.asm1, portal: 192.168.2.110,3260]

    Logging in to [iface: default, target: iqn.2006-01.com.openfiler:racdb.asm1,portal: 192.168.0.110,3260]

    Logging in to [iface: default, target:iqn.2006-01.com.openfiler:racdb.asm2, portal: 192.168.2.110,3260]

    Logging in to [iface: default, target:iqn.2006-01.com.openfiler:racdb.asm2, portal: 192.168.0.110,3260]

    Logging in to [iface: default, target:iqn.2006-01.com.openfiler:racdb.crs1, portal: 192.168.2.110,3260]

    Logging in to [iface: default, target:iqn.2006-01.com.openfiler:racdb.crs1, portal: 192.168.0.110,3260]

    Login to [iface: default, target: iqn.2006-01.com.openfiler:racdb.asm1,portal: 192.168.2.110,3260] successful.

    iscsiadm: Could not login to [iface: default, target:iqn.2006-01.com.openfiler:racdb.asm1, portal: 192.168.0.110,3260].

    iscsiadm: initiator reported error (19 - encountered non-retryable iSCSI loginfailure)

    Login to [iface: default, target: iqn.2006-01.com.openfiler:racdb.asm2,portal: 192.168.2.110,3260] successful.

    iscsiadm: Could not login to [iface: default, target:iqn.2006-01.com.openfiler:racdb.asm2, portal: 192.168.0.110,3260].

    iscsiadm: initiator reported error (19 - encountered non-retryable iSCSIlogin failure)

    Login to [iface: default, target: iqn.2006-01.com.openfiler:racdb.crs1,portal: 192.168.2.110,3260] successful.

    iscsiadm: Could not login to [iface: default, target: iqn.2006-01.com.openfiler:racdb.crs1,portal: 192.168.0.110,3260].

    iscsiadm: initiator reported error (19 - encountered non-retryable iSCSIlogin failure)

    iscsiadm: Could not log into all portals

    [  OK  ]

    [root@ora10racn2 scripts]# ls -l /dev/iscsi/*

    /dev/iscsi/asm1:

    total 0

    lrwxrwxrwx 1 root root 9 May 16 15:16 part -> ../../sdc

     

    /dev/iscsi/asm2:

    total 0

    lrwxrwxrwx 1 root root 9 May 16 15:16 part -> ../../sdb

     

    /dev/iscsi/crs1:

    total 0

    lrwxrwxrwx 1 root root 9 May 16 15:16 part -> ../../sdd

    6)        在一个节点上为iSCSI卷创建分区:(意:只能在一个节点上行)

    [root@ora10racn1 rules.d]# fdisk /dev/iscsi/crs1/part

    Device contains neither a valid DOS partition table, nor Sun, SGI or OSFdisklabel

    Building a new DOS disklabel. Changes will remain in memory only,

    until you decide to write them. After that, of course, the previous

    content won't be recoverable.

     

    Warning: invalid flag 0x0000 of partition table 4 will be corrected byw(rite)

     

    Command (m for help): n

    Command action

       e   extended

       p   primary partition (1-4)

    p

    Partition number (1-4): 1

    First cylinder (1-1009, default 1): 1

    Last cylinder or +size or +sizeM or +sizeK (1-1009, default 1009): 1009

     

    Command (m for help): p

     

    Disk /dev/iscsi/crs1/part: 2147 MB, 2147483648 bytes

    67 heads, 62 sectors/track, 1009 cylinders

    Units = cylinders of 4154 * 512 = 2126848 bytes

     

                   Device Boot      Start         End      Blocks  Id  System

    /dev/iscsi/crs1/part1              1        1009     2095662  83  Linux

     

    Command (m for help): w

    The partition table has been altered!

     

    Calling ioctl() to re-read partition table.

    Syncing disks.

    [root@ora10racn1 rules.d]#fdisk /dev/iscsi/asm1/part

    Device contains neither a valid DOS partition table, nor Sun, SGI or OSFdisklabel

    Building a new DOS disklabel. Changes will remain in memory only,

    until you decide to write them. After that, of course, the previous

    content won't be recoverable.

     

     

    The number of cylinders for this disk is set to 8192.

    There is nothing wrong with that, but this is larger than 1024,

    and could in certain setups cause problems with:

    1) software that runs at boot time (e.g., old versions of LILO)

    2) booting and partitioning software from other OSs

       (e.g., DOS FDISK, OS/2 FDISK)

    Warning: invalid flag 0x0000 of partition table 4 will be corrected byw(rite)

     

    Command (m for help): n

    Command action

       e   extended

       p   primary partition (1-4)

    p

    Partition number (1-4): 1

    First cylinder (1-8192, default 1): 1

    Last cylinder or +size or +sizeM or +sizeK (1-8192, default 8192): 8192

     

    Command (m for help): p

     

    Disk /dev/iscsi/asm1/part: 8589 MB, 8589934592 bytes

    64 heads, 32 sectors/track, 8192 cylinders

    Units = cylinders of 2048 * 512 = 1048576 bytes

     

                   Device Boot      Start         End      Blocks  Id  System

    /dev/iscsi/asm1/part1              1        8192     8388592  83  Linux

     

    Command (m for help): w

    The partition table has been altered!

     

    Calling ioctl() to re-read partition table.

     

    WARNING: Re-reading the partition table failed with error 16: Device orresource busy.

    The kernel still uses the old table.

    The new table will be used at the next reboot.

    Syncing disks.

    [root@ora10racn1 rules.d]# fdisk /dev/iscsi/asm2/part

    Device contains neither a valid DOS partition table, nor Sun, SGI or OSFdisklabel

    Building a new DOS disklabel. Changes will remain in memory only,

    until you decide to write them. After that, of course, the previous

    content won't be recoverable.

     

     

    The number of cylinders for this disk is set to 8192.

    There is nothing wrong with that, but this is larger than 1024,

    and could in certain setups cause problems with:

    1) software that runs at boot time (e.g., old versions of LILO)

    2) booting and partitioning software from other OSs

       (e.g., DOS FDISK, OS/2 FDISK)

    Warning: invalid flag 0x0000 of partition table 4 will be corrected byw(rite)

     

    Command (m for help): n

    Command action

       e   extended

       p   primary partition (1-4)

    p   

    Partition number (1-4): 1

    First cylinder (1-8192, default 1): 1

    Last cylinder or +size or +sizeM or +sizeK (1-8192, default 8192): 8192

     

    Command (m for help): p

     

    Disk /dev/iscsi/asm2/part: 8589 MB, 8589934592 bytes

    64 heads, 32 sectors/track, 8192 cylinders

    Units = cylinders of 2048 * 512 = 1048576 bytes

     

                   Device Boot      Start         End      Blocks  Id  System

    /dev/iscsi/asm2/part1              1        8192     8388592  83  Linux

     

    Command (m for help): w

    The partition table has been altered!

     

    Calling ioctl() to re-read partition table.

    Syncing disks.

    7)        在各个节点上对分区进行检验:

    [root@ora10racn1 rules.d]# partprobe

    [root@ora10racn1 rules.d]# fdisk -l

     

    Disk /dev/sda: 53.6 GB, 53687091200 bytes

    255 heads, 63 sectors/track, 6527 cylinders

    Units = cylinders of 16065 * 512 = 8225280 bytes

     

       Device Boot      Start         End      Blocks  Id  System

    /dev/sda1   *           1          13      104391  83  Linux

    /dev/sda2              14        6527   52323705   8e  Linux LVM

     

    Disk /dev/dm-0: 49.3 GB, 49358569472 bytes

    255 heads, 63 sectors/track, 6000 cylinders

    Units = cylinders of 16065 * 512 = 8225280 bytes

     

    Disk /dev/dm-0 doesn't contain a valid partition table

     

    Disk /dev/dm-1: 4194 MB, 4194304000 bytes

    255 heads, 63 sectors/track, 509 cylinders

    Units = cylinders of 16065 * 512 = 8225280 bytes

     

    Disk /dev/dm-1 doesn't contain a valid partition table

     

    Disk /dev/sdb: 8589 MB, 8589934592 bytes

    64 heads, 32 sectors/track, 8192 cylinders

    Units = cylinders of 2048 * 512 = 1048576 bytes

     

       Device Boot      Start         End      Blocks  Id  System

    /dev/sdb1               1        8192    8388592   83  Linux

     

    Disk /dev/sdc: 8589 MB, 8589934592 bytes

    64 heads, 32 sectors/track, 8192 cylinders

    Units = cylinders of 2048 * 512 = 1048576 bytes

     

       Device Boot      Start         End      Blocks  Id  System

    /dev/sdc1               1        8192    8388592   83  Linux

     

    Disk /dev/sdd: 2147 MB, 2147483648 bytes

    67 heads, 62 sectors/track, 1009 cylinders

    Units = cylinders of 4154 * 512 = 2126848 bytes

     

       Device Boot      Start         End      Blocks  Id  System

    /dev/sdd1               1        1009    2095662   83  Linux

    [root@ora10racn1 rules.d]#(cd /dev/disk/by-path; ls -l *openfiler* | awk'{FS=" "; print $9 " " $10 " " $11}')

    ip-192.168.2.110:3260-iscsi-iqn.2006-01.com.openfiler:racdb.asm1-lun-0-> ../../sdc

    ip-192.168.2.110:3260-iscsi-iqn.2006-01.com.openfiler:racdb.asm1-lun-0-part1-> ../../sdc1

    ip-192.168.2.110:3260-iscsi-iqn.2006-01.com.openfiler:racdb.asm2-lun-0-> ../../sdb

    ip-192.168.2.110:3260-iscsi-iqn.2006-01.com.openfiler:racdb.asm2-lun-0-part1-> ../../sdb1

    ip-192.168.2.110:3260-iscsi-iqn.2006-01.com.openfiler:racdb.crs1-lun-0-> ../../sdd

    ip-192.168.2.110:3260-iscsi-iqn.2006-01.com.openfiler:racdb.crs1-lun-0-part1-> ../../sdd1

    8.        配置各个节点的内核参数:

    [root@ora10racn1 ~]# sysctl -p

    net.ipv4.ip_forward = 0

    net.ipv4.conf.default.rp_filter = 1

    net.ipv4.conf.default.accept_source_route = 0

    kernel.sysrq = 0

    kernel.core_uses_pid = 1

    net.ipv4.tcp_syncookies = 1

    kernel.msgmnb = 65536

    kernel.msgmax = 65536

    kernel.shmmax = 4294967295

    kernel.shmall = 268435456

    net.core.rmem_default = 1048576

    net.core.rmem_max = 1048576

    net.core.wmem_default = 262144

    net.core.wmem_max = 262144

    kernel.shmmni = 4096

    kernel.sem = 250 32000 100 128

    fs.file-max = 65536

    net.ipv4.ip_local_port_range = 1024 65000

     

    [root@ora10racn1 ~]#cat >>/etc/security/limits.conf <<EOF

    oracle soft nproc 2047

    oracle hard nproc 16384

    oracle soft nofile 1024

    oracle hard nofile 65536

    EOF

     

    [root@ora10racn1 ~]#cat >> /etc/pam.d/login<<EOF

    session required /lib/security/pam_limits.so

    EOF

    将下面一段加入到/etc/profile文件的末尾:

    if [ \$USER = "oracle"]; then

        if [ \$SHELL = "/bin/ksh" ]; then

            ulimit -p 16384

            ulimit -n 65536

        else

            ulimit -u 16384 -n 65536

        fi

        umask 022

    fi

    9.        配置各个节点的hangcheck-timer:

    1)        检查是否安装了该模块:

    [root@ora10racn1 ~]#find /lib/modules -name"hangcheck-timer.ko"

    /lib/modules/2.6.18-274.el5/kernel/drivers/char/hangcheck-timer.ko

    /lib/modules/2.6.32-200.13.1.el5uek/kernel/drivers/char/hangcheck-timer.ko

    2)        配置hangcheck-timer的两个参数:

    [root@ora10racn1 ~]#echo "options hangcheck-timerhangcheck_tick=30 hangcheck_margin=180" >>/etc/modprobe.conf

    3)        手动加载hangcheckkernel 模块:

    [root@ora10racn1 ~]# echo"/sbin/modprobe hangcheck-timer" >> /etc/rc.local

    [root@ora10racn1 ~]#modprobehangcheck-timer

    [root@ora10racn1 ~]# grepHangcheck /var/log/messages | tail -2

    May 16 17:10:22 ora10racn1 kernel: Hangcheck: startinghangcheck timer 0.9.0 (tick is 60 seconds, margin is 180 seconds).

    May 16 17:10:22 ora10racn1 kernel: Hangcheck: Usingget_cycles().

    10.     配置各个节点间的信任关系:

    Ø  在节点1上:

    [root@ora10racn2 scripts]# su - oracle

    [oracle@ora10racn2 ~]$ mkdir -p ~/.ssh

    [oracle@ora10racn2 ~]$ chmod 700 ~/.ssh

    [oracle@ora10racn2 ~]$ /usr/bin/ssh-keygen -t rsa

    Generating public/private rsa key pair.

    Enter file in which to save the key(/home/oracle/.ssh/id_rsa):

    Enter passphrase (empty for nopassphrase):

    Enter same passphrase again:                                    /*此处不需要输入密码

    Your identification has been saved in/home/oracle/.ssh/id_rsa.

    Your public key has been saved in/home/oracle/.ssh/id_rsa.pub.

    The key fingerprint is:

    47:d4:27:ab:64:82:24:97:d9:85:59:f0:87:49:7d:c7oracle@ora10racn1.ccz.com

    [oracle@ora10racn1 ~]$ touch ~/.ssh/authorized_keys

    [oracle@ora10racn1 ~]$ cd .ssh

    [oracle@ora10racn1 .ssh]$ ls -l *.pub

    -rw-r--r-- 1 oracle oinstall 407 May 1620:29 id_rsa.pub

     

    [oracle@ora10racn1 .ssh]$ ssh ora10racn1.ccz.com cat ~/.ssh/id_rsa.pub>>~/.ssh/authorized_keys

    The authenticity of host'ora10racn1.ccz.com (192.168.0.131)' can't be established.

    RSA key fingerprint isf9:5e:99:da:b1:00:9e:6b:a4:9e:a0:cd:ae:e3:4d:ca.

    Are you sure you want to continueconnecting (yes/no)? yes

    Warning: Permanently added'ora10racn1.ccz.com,192.168.0.131' (RSA) to the list of known hosts.

    oracle@ora10racn1.ccz.com's password: /*此处需要输入原密码

    [oracle@ora10racn1 .ssh]$ ssh ora10racn2.ccz.com cat ~/.ssh/id_rsa.pub>>~/.ssh/authorized_keys

    The authenticity of host'ora10racn2.ccz.com (192.168.0.132)' can't be established.

    RSA key fingerprint isf9:5e:99:da:b1:00:9e:6b:a4:9e:a0:cd:ae:e3:4d:ca.

    Are you sure you want to continue connecting(yes/no)? yes

    Warning: Permanently added'ora10racn2.ccz.com,192.168.0.132' (RSA) to the list of known hosts.

    [oracle@ora10racn1 .ssh]$ scp ~/.ssh/authorized_keys ora10racn2:.ssh/authorized_keys

    The authenticity of host 'ora10racn2(192.168.0.132)' can't be established.

    RSA key fingerprint isf9:5e:99:da:b1:00:9e:6b:a4:9e:a0:cd:ae:e3:4d:ca.

    Are you sure you want to continueconnecting (yes/no)? yes

    Warning: Permanently added 'ora10racn2'(RSA) to the list of known hosts.

    authorized_keys                                                                                   100%1221     1.2KB/s   00:00   

    [oracle@ora10racn1 .ssh]$ chmod 600 ~/.ssh/authorized_keys

    [oracle@ora10racn1 .ssh]$ ssh ora10racn1.ccz.com date

    Wed May 16 20:38:33 CST 2012

    [oracle@ora10racn1 .ssh]$ ssh ora10racn2.ccz.com date

    Wed May 16 20:38:38 CST 2012

    [oracle@ora10racn1 .ssh]$

     

    [oracle@ora10racn1 .ssh]$ exec /usr/bin/ssh-agent $SHELL

    [oracle@ora10racn1 .ssh]$ /usr/bin/ssh-add

    Identity added:/home/oracle/.ssh/id_rsa (/home/oracle/.ssh/id_rsa)

    [oracle@ora10racn1 .ssh]$ ssh ora10racn1.ccz.com "date;hostname"

    Wed May 16 20:52:16 CST 2012

    ora10racn1.ccz.com

    [oracle@ora10racn1 .ssh]$ ssh ora10racn2.ccz.com "date;hostname"

    Wed May 16 20:52:21 CST 2012

    ora10racn2.ccz.com

    Ø  在节点2上:

    [root@ora10racn2scripts]# su - oracle

    [oracle@ora10racn2 ~]$ mkdir -p ~/.ssh

    [oracle@ora10racn2 ~]$chmod 700 ~/.ssh

    [oracle@ora10racn2 ~]$/usr/bin/ssh-keygen -t rsa

    Generatingpublic/private rsa key pair.

    Enter file in which tosave the key (/home/oracle/.ssh/id_rsa):

    Enter passphrase(empty for no passphrase):

    Enter same passphraseagain:

    Your identificationhas been saved in /home/oracle/.ssh/id_rsa.

    Your public key hasbeen saved in /home/oracle/.ssh/id_rsa.pub.

    The key fingerprintis:

    73:ec:af:49:ad:f1:3d:af:96:3a:d5:1f:5c:a7:d0:eaoracle@ora10racn2.ccz.com

     

    [oracle@ora10racn2.ssh]$ chmod 600 ~/.ssh/authorized_keys

    [oracle@ora10racn2.ssh]$ ssh ora10racn1.ccz.com hostname

    ora10racn1.ccz.com

    [oracle@ora10racn2.ssh]$ ssh ora10racn1.ccz.com date

    Wed May 16 20:37:53CST 2012

    [oracle@ora10racn2.ssh]$ ssh ora10racn2.ccz.com date

    The authenticity ofhost 'ora10racn2.ccz.com (192.168.0.132)' can't be established.

    RSA key fingerprint isf9:5e:99:da:b1:00:9e:6b:a4:9e:a0:cd:ae:e3:4d:ca.

    Are you sure you wantto continue connecting (yes/no)? yes

    Warning: Permanentlyadded 'ora10racn2.ccz.com,192.168.0.132' (RSA) to the list of known hosts.

    Wed May 16 20:38:00CST 2012

    [oracle@ora10racn2.ssh]$ ssh ora10racn2.ccz.com hostname

    ora10racn2.ccz.com

    [oracle@ora10racn2.ssh]$ ssh ora10racn2.ccz.com date

    Wed May 16 20:38:11CST 2012

     

    [oracle@ora10racn2.ssh]$ exec /usr/bin/ssh-agent $SHELL

    [oracle@ora10racn2.ssh]$ /usr/bin/ssh-add

    Identity added:/home/oracle/.ssh/id_rsa (/home/oracle/.ssh/id_rsa)

    [oracle@ora10racn2.ssh]$ ssh ora10racn1.ccz.com "date;hostname"

    Wed May 16 20:52:36CST 2012

    ora10racn1.ccz.com

    [oracle@ora10racn2.ssh]$ ssh ora10racn2.ccz.com "date;hostname"

    Wed May 16 20:52:41CST 2012

    ora10racn2.ccz.com

    11.     在各个节点oracle帐号的.bash_profile中增加下段以disable stty:

    if [ -t 0 ]; then

        stty intr ^C

    fi

    12.     在各个节点的图形化界面中diskable SELinux

    #/usr/bin/system-config-securitylevel &

    13.     在各个节点安装并配置ocfs2:

    1)        安装ocfs2

     [root@ora10racn2 Server]# pwd

    /media/OL5.7 x86_64dvd 20110728/Server

    [root@ora10racn2Server]# ls -l ocfs2*

    -rw-r--r-- 1 rootroot  328649 Jul 26  2011ocfs2-2.6.18-274.el5-1.4.8-2.el5.x86_64.rpm

    -rw-r--r-- 1 rootroot  333910 Jul 26  2011ocfs2-2.6.18-274.el5debug-1.4.8-2.el5.x86_64.rpm

    -rw-r--r-- 1 rootroot  328371 Jul 26  2011ocfs2-2.6.18-274.el5xen-1.4.8-2.el5.x86_64.rpm

    -rw-r--r-- 1 rootroot  457984 Sep 17  2010 ocfs2console-1.6.3-2.el5.x86_64.rpm

    -rw-r--r-- 1 root root1825960 Sep 17  2010ocfs2-tools-1.6.3-2.el5.x86_64.rpm

    -rw-r--r-- 1 rootroot  180899 Sep 17  2010 ocfs2-tools-devel-1.6.3-2.el5.x86_64.rpm

    [root@ora10racn2Server]#rpm -Uvhocfs2-2.6.18-274.el5-1.4.8-2.el5.x86_64.rpm \

    >ocfs2console-1.6.3-2.el5.x86_64.rpm \

    >ocfs2-tools-1.6.3-2.el5.x86_64.rpm \

    >ocfs2-2.6.18-274.el5xen-1.4.8-2.el5.x86_64.rpm

    warning: ocfs2-2.6.18-274.el5-1.4.8-2.el5.x86_64.rpm:Header V3 DSA signature: NOKEY, key ID 1e5e0159

    Preparing...               ########################################### [100%]

            package ocfs2-tools-1.6.3-2.el5.x86_64is already installed

            package ocfs2console-1.6.3-2.el5.x86_64is already installed

    [root@ora10racn2Server]#

    2)        diskable SELinux

    [root@ora10racn2Server]# /usr/bin/system-config-securitylevel &

       

    3)        在两个节点上配置OCFS2

    [root@ora10racn1Server]#ocfs2console &

    依次【Cluster】-->【Configure Nodes】-->【add】,在每个节点上都要增加两个节点信息,注意此时虽然IP选用的是心跳IP,但hostname用的是主机名称(即hostname命令的返回值)

    配置后的结果写入到文件/etc/ocfs2/cluster.conf中

    node:

            ip_port = 7777

            ip_address = 10.10.0.131

            number = 0

            name = ora10racn1.ccz.com

            cluster = ocfs2

     

    node:

           ip_port = 7777

            ip_address = 10.10.0.132

            number = 1

            name = ora10racn2.ccz.com

            cluster = ocfs2

     

    cluster:

            node_count = 2

            name = ocfs2

    注意:如果在用ocfs2console增加节点时报以下错误:

    o2cb_ctl: Unable toaccess cluster service while creating node

    Could not add nodenode1

    则可以先将/etc/ocfs2/cluster.conf改名,然后使用ocfs2console重新加:

    此时可以查看o2cb服务:

    [oracle@ora10racn1 ~]$/etc/init.d/o2cb status

    Driver for"configfs": Loaded

    Filesystem"configfs": Mounted

    Stack glue driver:Loaded

    Stack plugin"o2cb": Loaded

    Driver for"ocfs2_dlmfs": Loaded

    Filesystem"ocfs2_dlmfs": Mounted

    Checking O2CB clusterocfs2: Online

    Heartbeat deadthreshold = 31

      Network idle timeout: 30000

      Network keepalive delay: 2000

      Network reconnect delay: 2000

    Checking O2CBheartbeat: Active

    4)        修改o2cb的配置(只需要在一个节点上执行),将heartbeat deadthreshold从缺省的31秒改为61秒:

    [root@ora10racn1 ~]# /etc/init.d/o2cb offline ocfs2

    Stopping O2CB clusterocfs2: OK

    [root@ora10racn1 ~]# /etc/init.d/o2cb unload

    Unmounting ocfs2_dlmfsfilesystem: OK

    Unloading module"ocfs2_dlmfs": OK

    Unloading module"ocfs2_stack_o2cb": OK

    Unmounting configfsfilesystem: OK

    Unloading module"configfs": OK

    [root@ora10racn1 ~]# /etc/init.d/o2cb configure

    Configuring the O2CBdriver.

     

    This will configurethe on-boot properties of the O2CB driver.

    The followingquestions will determine whether the driver is loaded on

    boot.  The current values will be shown in brackets('[]').  Hitting

    <ENTER> withouttyping an answer will keep that current value. Ctrl-C

    will abort.

     

    Load O2CB driver onboot (y/n) [y]:

    Cluster stack backingO2CB [o2cb]:

    Cluster to start onboot (Enter "none" to clear) [ocfs2]:

    Specify heartbeat deadthreshold (>=7) [31]: 61

    Specify network idletimeout in ms (>=5000) [30000]:

    Specify networkkeepalive delay in ms (>=1000) [2000]:

    Specify networkreconnect delay in ms (>=2000) [2000]:

    Writing O2CBconfiguration: OK

    Loading filesystem"configfs": OK

    Mounting configfsfilesystem at /sys/kernel/config: OK

    Loading stack plugin"o2cb": OK

    Loading filesystem"ocfs2_dlmfs": OK

    Mounting ocfs2_dlmfsfilesystem at /dlm: OK

    Setting cluster stack"o2cb": OK

    Starting O2CB clusterocfs2: OK

    检查修改结果:

    [oracle@ora10racn1 ~]$/etc/init.d/o2cb status

    Driver for"configfs": Loaded

    Filesystem"configfs": Mounted

    Stack glue driver:Loaded

    Stack plugin "o2cb":Loaded

    Driver for"ocfs2_dlmfs": Loaded

    Filesystem"ocfs2_dlmfs": Mounted

    Checking O2CB clusterocfs2: Online

    Heartbeat deadthreshold = 61

      Network idle timeout: 30000

      Network keepalive delay: 2000

      Network reconnect delay: 2000

    Checking O2CB heartbeat:Active

    5)        格式化OCFS2文件系统(只能在一个节点上执行):

    [root@ora10racn1 ~]# mkfs.ocfs2 -b 4k -C 32k -N 4 -L oracrsfiles/dev/iscsi/crs1/part1

    mkfs.ocfs2 1.6.3

    Cluster stack: classico2cb

    Overwriting existingocfs2 partition.

    Proceed (y/N): y

    Label: oracrsfiles

    Features: sparsebackup-super unwritten inline-data strict-journal-super

    Block size: 4096 (12bits)

    Cluster size: 32768(15 bits)

    Volume size:2145943552 (65489 clusters) (523912 blocks)

    Cluster groups: 3(tail covers 977 clusters, rest cover 32256 clusters)

    Extent allocator size:4194304 (1 groups)

    Journal size: 67108864

    Node slots: 4

    Creating bitmaps: done

    Initializingsuperblock: done

    Writing system files:done

    Writing superblock:done

    Writing backupsuperblock: 1 block(s)

    Formatting Journals:done

    Growing extentallocator: done

    Formatting slot map:done

    Formatting quotafiles: done

    Writing lost+found:done

    mkfs.ocfs2 successful

    6)        加载ocfs2文件系统(需要在两个节点上运行):

    [root@ora10racn1 ~]# mount -t ocfs2 -o datavolume,nointr -L"oracrsfiles" /u02

    [root@ora10racn1 ~]# mount

    /dev/mapper/VolGroup00-LogVol00on / type ext3 (rw)

    proc on /proc typeproc (rw)

    sysfs on /sys typesysfs (rw)

    devpts on /dev/ptstype devpts (rw,gid=5,mode=620)

    /dev/sda1 on /boottype ext3 (rw)

    tmpfs on /dev/shm typetmpfs (rw)

    none on/proc/sys/fs/binfmt_misc type binfmt_misc (rw)

    sunrpc on/var/lib/nfs/rpc_pipefs type rpc_pipefs (rw)

    192.168.2.110:/mnt/nfs4backup/nfs4backup/nfs4backupon /mnt/share type nfs(rw,hard,nointr,tcp,noac,nfsvers=3,timeo=600,rsize=32768,wsize=32768,addr=192.168.2.110)

    configfs on/sys/kernel/config type configfs (rw)

    ocfs2_dlmfs on /dlmtype ocfs2_dlmfs (rw)

    /dev/sdd1 on/u02 type ocfs2 (rw,_netdev,datavolume,nointr,heartbeat=local)

    7)        在两个节点上修改/etc/fstab以让系统启动时自动加载ocfs2文件系统:

    [root@ora10racn1 ~]# vi /etc/fstab

     

    /dev/VolGroup00/LogVol00/                       ext3   defaults        1 1

    LABEL=/boot             /boot                   ext3    defaults        1 2

    tmpfs                   /dev/shm                tmpfs   defaults        0 0

    devpts                  /dev/pts                devpts  gid=5,mode=620  0 0

    sysfs                   /sys                    sysfs   defaults        0 0

    proc                    /proc                   proc    defaults        0 0

    /dev/VolGroup00/LogVol01swap                    swap    defaults        0 0

    /home/swap swap swapdefaults 0 0

    192.168.2.110:/mnt/nfs4backup/nfs4backup/nfs4backup     /mnt/share      nfs    rw,hard,nointr,tcp,noac,vers=3,timeo=600,rsize=32768,wsize=32768        0      0

    LABEL=oracrsfiles     /u02           ocfs2   _netdev,datavolume,nointr     0 0

    8)        在两个节点上检查o2cb在各个运行级别上的运行设置:

    [root@ora10racn1 ~]# chkconfig --list o2cb

    o2cb            0:off   1:off  2:on   3:on    4:on    5:on    6:off

    9)        在一个节点上检查并修正ocfs2文件系统的所有者属性及权限属性:

    [root@ora10racn1 /]# ls -ld /u02

    drwxr-xr-x 3 root root 3896 May 16 21:54 /u02

    [root@ora10racn1 /]# chown oracle:oinstall /u02

    [root@ora10racn1 /]# chmod 775 /u02

    [root@ora10racn1 /]# ls -ld /u02

    drwxr-xr-x 3 oracle oinstall 3896 May 16 21:54 /u02

    10)    创建oracleclusterware 的相关目录(只需在一个节点上运行):

    [root@ora10racn1 ~]# mkdir -p /u02/oradata/racdb

    [root@ora10racn1 ~]# chown -R oracle:oinstall /u02/oradata

    [root@ora10racn1 ~]# chmod -R 775 /u02/oradata

    [root@ora10racn1 ~]# ls -l /u02/oradata

    total 0

    drwxr-xr-x 2 oracleoinstall 3896 May 16 22:01 racdb

    14.           在两个节点分别安装ASMLib

    1)        安装,其中前两个包在OLE5.7的介质中有,可直接安装,第三个需要到oracle网站下载:

    [root@ora10racn1Server]# rpm -Uvhoracleasm-2.6.18-274.el5-2.0.5-1.el5.x86_64.rpm \

    oracleasm-support-2.1.7-1.el5.x86_64.rpm

    warning:oracleasm-2.6.18-274.el5-2.0.5-1.el5.x86_64.rpm: Header V3 DSA signature:NOKEY, key ID 1e5e0159

    Preparing...               ########################################### [100%]

       1:oracleasm-support     ########################################### [ 50%]

      2:oracleasm-2.6.18-274.el###########################################[100%]

     

    [root@ora10racn1Server]# cd /home/oracle

    [root@ora10racn1oracle]# ls

    database  Desktop oracleasmlib-2.0.4-1.el5.x86_64.rpm

    [root@ora10racn1oracle]# rpm -Uvh oracleasmlib-2.0.4-1.el5.x86_64.rpm

    warning:oracleasmlib-2.0.4-1.el5.x86_64.rpm: Header V3 DSA signature: NOKEY, key ID 1e5e0159

    Preparing...               ########################################### [100%]

       1:oracleasmlib          ########################################### [100%]

    2)        在两个节点上配置ASMLib:

    [root@ora10racn1oracle]# /etc/init.d/oracleasm configure

    Configuring the OracleASM library driver.

     

    This will configurethe on-boot properties of the Oracle ASM library

    driver.  The following questions will determinewhether the driver is

    loaded on boot andwhat permissions it will have.  Thecurrent values

    will be shown in brackets('[]').  Hitting <ENTER> withouttyping an

    answer will keep thatcurrent value.  Ctrl-C will abort.

     

    Default user to ownthe driver interface []: oracle

    Default group to ownthe driver interface []: oinstall

    Start Oracle ASMlibrary driver on boot (y/n) [n]: y

    Scan for Oracle ASMdisks on boot (y/n) [y]: y

    Writing Oracle ASMlibrary driver configuration: done

    Initializing theOracle ASMLib driver: [  OK  ]

    Scanning the systemfor Oracle ASMLib disks: [  OK  ]

    3)        创建ASM磁盘(只需在一个节点上执行创建,在其余节点上扫描即可):

    节点一:

    [root@ora10racn1oracle]# /etc/init.d/oracleasm listdisks

    [root@ora10racn1oracle]# /etc/init.d/oracleasm createdisk VOL1/dev/iscsi/asm1/part1

    Marking disk"VOL1" as an ASM disk: [ OK  ]

    [root@ora10racn1oracle]# /etc/init.d/oracleasm createdisk VOL2 /dev/iscsi/asm2/part1

    Marking disk"VOL2" as an ASM disk: [ OK  ]

    [root@ora10racn1oracle]# /etc/init.d/oracleasm listdisks

    VOL1

    VOL2

    节点二:

    [root@ora10racn2oracle]# /etc/init.d/oracleasm listdisks

    [root@ora10racn2oracle]# /etc/init.d/oracleasm scandisks

    Scanning the systemfor Oracle ASMLib disks: [  OK  ]

    [root@ora10racn2oracle]# /etc/init.d/oracleasm listdisks

    VOL1

    VOL2

    15.     安装cluster软件前的预检:

    1)        在两个节点上安装cvuqdisk(只有RHEL需要)

    [oracle@ora10racn1 ~]$su -

    Password:

    [root@ora10racn1 ~]# rpm -qa --queryformat "%{NAME}-%{VERSION}-%{RELEASE} (%{ARCH})\n"|grep cvuqdisk

    [root@ora10racn1 ~]# cd /home/oracle/ clusterware/rpm

    [root@ora10racn1 rpm]#ls

    cvuqdisk-1.0.1-1.rpm

    [root@ora10racn1 rpm]# rpm -Uvh cvuqdisk-1.0.1-1.rpm

    Preparing...               ########################################### [100%]

       1:cvuqdisk              ########################################### [100%]

     

    [root@ora10racn1 rpm]#scp ./cvuqdisk-1.0.1-1.rpm ora10racn2.ccz.com:/home/oracle

    The authenticity ofhost 'ora10racn2.ccz.com (192.168.0.132)' can't be established.

    RSA key fingerprint isf9:5e:99:da:b1:00:9e:6b:a4:9e:a0:cd:ae:e3:4d:ca.

    Are you sure you wantto continue connecting (yes/no)? yes

    Warning: Permanentlyadded 'ora10racn2.ccz.com,192.168.0.132' (RSA) to the list of known hosts.

    root@ora10racn2.ccz.com'spassword:

    cvuqdisk-1.0.1-1.rpm  

     

    [root@ora10racn2oracle]# rpm -Uvh cvuqdisk-1.0.1-1.rpm

    Preparing...                ###########################################[100%]

       1:cvuqdisk              ########################################### [100%]

    2)        安装前检测(在oracle帐号下执行,且只需在一个节点上运行):

    [oracle@ora10racn1cluvfy]$ cd /home/oracle/clusterware/cluvfy

    [oracle@ora10racn1cluvfy]$ mkdir -p jdk14

    [oracle@ora10racn1cluvfy]$ unzip jrepack.zip -d jdk14

    [oracle@ora10racn1cluvfy]$ export CV_HOME=/home/oracle/clusterware/cluvfy[oracle@ora10racn1 cluvfy]$ exportCV_JDKHOME=/home/oracle/clusterware/cluvfy/jdk14[oracle@ora10racn1 cluvfy]$ ./runcluvfy.sh stage-pre crsinst -n ora10racn1,ora10racn2 -verbose

    在检测中报

    Check: Userequivalence for user "oracle"

      Node Name                            Comment                

      ------------------------------------  ------------------------

      ora10racn2                            passed                 

      ora10racn1                            failed                 

    Result: Userequivalence check failed for user "oracle".

     

    WARNING:

    User equivalence isnot set for nodes:

            ora10racn1

    Verification willproceed with nodes:

            ora10racn2

    这是因为在设置用户等效性后,第一次运行需要确认,而系统将这个确认视作失败,因此应手动先在各个节点上对所有节点(包括自身)进行下测试并确认,以确保第二次测试无需确认:

    [oracle@ora10racn1 cluvfy]$ sshora10racn1 date

    The authenticity ofhost 'ora10racn1 (192.168.0.131)' can't be established.

    RSA key fingerprint isf9:5e:99:da:b1:00:9e:6b:a4:9e:a0:cd:ae:e3:4d:ca.

    Are you sure you wantto continue connecting (yes/no)? yes

    Warning: Permanentlyadded 'ora10racn1' (RSA) to the list of known hosts.

    Thu May 17 08:45:40CST 2012

    [oracle@ora10racn1 cluvfy]$ sshora10racn2 date

    Thu May 17 08:45:55CST 2012

    [oracle@ora10racn1 cluvfy]$ sshora10racn1 date

    Thu May 17 08:50:10CST 2012

    再次进行检测,遇到报错:

    ERROR:

    Could not find asuitable set of interfaces for VIPs.

     

    Result: Nodeconnectivity check failed.

    根据oracle文档(338924.1),这是一个bug,可以忽略。

    3)        通过CVU对系统硬件及操作系统进行检测:

    [oracle@ora10racn1 cluvfy]$ ./runcluvfy.sh stage -post hwos -n ora10racn1,ora10racn2-verbose

     

    Performing post-checksfor hardware and operating system setup

    Checking nodereachability...

    ......

    ERROR:

    Could notfind a suitable set of interfaces for VIPs.

    Result: Nodeconnectivity check failed.

     

    Checking sharedstorage accessibility...

    WARNING:

    Unable todetermine the sharedness of /dev/sda on nodes:

            ora10racn2,ora10racn1

      Disk                                  Sharing Nodes(2 in count)

      ------------------------------------  ------------------------

      /dev/sdb                              ora10racn2ora10racn1  

      Disk                                  Sharing Nodes(2 in count)

      ------------------------------------  ------------------------

      /dev/sdc                              ora10racn2ora10racn1  

      Disk                                  Sharing Nodes(2 in count)

      ------------------------------------  ------------------------

      /dev/sdd                              ora10racn2ora10racn1  

     

    Shared storage checkwas successful on nodes "ora10racn2,ora10racn1".

    Post-check forhardware and operating system setup was unsuccessful on all the nodes.

    检测结果中除了VIP错误(忽略),还有一个warning,这个warning是因为CVU调用了linux的smartctl,而smartctl不能返回iscsi设备的序列号所致,不影响RAC的安装和使用,忽略。

    16.     安装clusterware(只需要在一个节点的oracle帐号下运行即可):

                               

                                注意此处需要将crshome的路径做下修改,不要和oracle的home重合了:

    根据实际情况增加节点:

    根据情况调整网卡设置,其中eth1是连接存储的网卡,此处我单独设置成DoNotUse:

    这只ocr及voting disk的存储路径:

    注意:在执行root.sh前先如下修复下srvctl及vipca,否则报错,在实行root.sh后需要在图形界面中以root身份调用vipca配置vip,否则OUI仍然会报错:

    分别在两个节点按顺序以root身份run相应脚本,但在最后一个节点run最后一个脚本root.sh时有一个报错:

    CSS is active on allnodes.

    Waiting for the OracleCRSD and EVMD to start

    Oracle CRS stackinstalled and running under init(1M)

    Running vipca(silent)for configuring nodeapps

    /u01/app/crs/jdk/jre//bin/java:error while loading shared libraries: libpthread.so.0: cannot open sharedobject file: No such file or directory

    经查,这是一个oracle的bug,解决方法见:

                                http://hi.baidu.com/heroofhero/blog/item/76747032361fc84dac4b5f09.html

    修复:

    Ø  /u01/app/crs/bin/vipca:

    152 exportLD_LIBRARY_PATH

    153 ;;

    154 esac

    155

    156 unset LD_ASSUME_KERNEL

    157

    158ARGUMENTS=""

    159 NUMBER_OF_ARGUMENTS=$#

    160 if [ $NUMBER_OF_ARGUMENTS -gt 0 ]; then

    161 ARGUMENTS=$*

    162 fi

    /u01/app/crs/bin/srvctl:

    166 #Remove this workaround when the bug 3937317 is fixed

    167 LD_ASSUME_KERNEL=2.4.19

    168 export LD_ASSUME_KERNEL

    169 unset LD_ASSUME_KERNEL

    170

    171 # Run ops control utility

    解决后回到原OUI中点击OK继续。

    17.     安装oracle软件,注意只安装软件,不创建数据库:

    18.     在两个节点上修正$ORACLE_HOME/bin/srvctl

    同安装clusterware后对$CRS_HOME/bin/srvctl做的修正一样,需对$ORACLE_HOME/bin/srvctl进行修正:

    LD_ASSUME_KERNEL=2.4.19

    exportLD_ASSUME_KERNEL

    unsetLD_ASSUME_KERNEL

    19.     创建监听起及配置naming method(只需在一个节点运行)

                               

    20.     创建cluster数据库:

    1)        创建前的预检:

    [oracle@ora10racn1cluvfy]$ env|grep CV

    CV_JDKHOME=/home/oracle/clusterware/cluvfy/jdk14

    CV_HOME=/home/oracle/clusterware/cluvfy

    [oracle@ora10racn1cluvfy]$ pwd

    /home/oracle/clusterware/cluvfy

    [oracle@ora10racn1 cluvfy]$ ./runcluvfy.sh stage-pre dbcfg -n ora10racn1,ora10racn2 -d ${ORACLE_HOME} -verbose

    预检中除了前面出现过的VIP问题以外,没有其他问题,该VIP问题可以忽略。

    2)        用dbca创建数据库:

    在创建的过程中注意,ASM的spfile缺省位置由于不是共享存储,应改成共享存储位置,如图:

    在创建ASM实例时报错:ORA-27125:unable to create shared memory segment

    解决的办法是将dba的用户组加入到文件/proc/sys/vm/hugetlb_shm_group

    [root@rac2 ~]# idoracle

    uid=500(oracle)gid=501(oinstall)groups=501(oinstall),502(dba),503(asmadmin),504(oper)

    [root@rac2 ~]#more/proc/sys/vm/hugetlb_shm_group

    0

    下面用root执行下面的命令,将dba组添加到系统内核中:

    [root@rac2 ~]# echo502 >/proc/sys/vm/hugetlb_shm_group

    具体参见:http://blog.csdn.net/tianlesoftware/article/details/7309046

    3)        创建磁盘组时注意修改下磁盘路径:

    如使用缺省的ORCL:VOL*,将会导致机器HANG住。

    4)        创建成功:

    21.     创建TAF服务:

    [oracle@ora10racn1 ~]$dbca&

    注意新建的taf服务不要带域名,否则会报错。

    SQL> show parameterservice

    NAME                                 TYPE        VALUE

    ----------------------------------------------- ------------------------------

    service_names                        string      racdb.ccz.com, racdb_taf

    [oracle@ora10racn2disks]$ crs_stat -t

    Name           Type           Target   State     Host       

    ------------------------------------------------------------

    ora....SM1.asmapplication    ONLINE    ONLINE   ora10racn1 

    ora....N1.lsnrapplication    ONLINE    ONLINE   ora10racn1 

    ora....cn1.gsdapplication    ONLINE    ONLINE   ora10racn1 

    ora....cn1.onsapplication    ONLINE    ONLINE   ora10racn1 

    ora....cn1.vipapplication    ONLINE    ONLINE   ora10racn1 

    ora....SM2.asmapplication    ONLINE    ONLINE   ora10racn2 

    ora....N2.lsnrapplication    ONLINE    ONLINE   ora10racn2 

    ora....cn2.gsdapplication    ONLINE    ONLINE   ora10racn2 

    ora....cn2.onsapplication    ONLINE    ONLINE   ora10racn2 

    ora....cn2.vipapplication    ONLINE    ONLINE   ora10racn2 

    ora.racdb.db   application    ONLINE   ONLINE    ora10racn1 

    ora....b1.instapplication    ONLINE    ONLINE   ora10racn1 

    ora....b2.instapplication    ONLINE    ONLINE   ora10racn2 

    ora...._taf.csapplication    ONLINE    ONLINE   ora10racn1 

    ora....db1.srvapplication    ONLINE    ONLINE   ora10racn1 

    ora....db2.srvapplication    ONLINE    ONLINE   ora10racn2 

    设置taf服务在系统启动时自动启动:

    [oracle@ora10racn2bash]$ srvctl enable service -d racdb -s racdb_taf

    查看taf服务的状态:

    [oracle@ora10racn2bash]$srvctl config service -d racdb -s racdb_taf -a

    racdb_tafPREF: racdb1 racdb2 AVAIL:  TAF: basic

    22.     重新编译数据库对象:

    $ sqlplus / as sysdba

    SQL>@?/rdbms/admin/utlrp.sql

    23.     开启系统archive模式:

    1)        在一号节点上关闭RAC设置:

    SYS@racdb1 SQL>showparameter cluster_database                    

    NAME                                 TYPE        VALUE

    ----------------------------------------------- ------------------------------

    cluster_database                     boolean     TRUE

    cluster_database_instances           integer     2

    SYS@racdb1SQL>alter system set cluster_database=falsescope=spfile sid='racdb1';

    2)        shutdown RAC上所有实例:

    [oracle@ora10racn1admin]$ srvctl stop database -d racdb

    [oracle@ora10racn1admin]$ crs_stat -t

    Name           Type           Target    State    Host       

    ------------------------------------------------------------

    ora....SM1.asmapplication    ONLINE    ONLINE   ora10racn1 

    ora....N1.lsnr application    ONLINE   ONLINE    ora10racn1 

    ora....cn1.gsdapplication    ONLINE    ONLINE   ora10racn1 

    ora....cn1.onsapplication    ONLINE    ONLINE   ora10racn1 

    ora....cn1.vipapplication    ONLINE    ONLINE   ora10racn1 

    ora....SM2.asm application    ONLINE   ONLINE    ora10racn2 

    ora....N2.lsnrapplication    ONLINE    ONLINE   ora10racn2 

    ora....cn2.gsdapplication    ONLINE    ONLINE   ora10racn2 

    ora....cn2.onsapplication    ONLINE    ONLINE   ora10racn2 

    ora....cn2.vipapplication    ONLINE    ONLINE   ora10racn2 

    ora.racdb.db   application    OFFLINE  OFFLINE              

    ora....b1.instapplication    OFFLINE   OFFLINE              

    ora....b2.instapplication    OFFLINE   OFFLINE              

    ora...._taf.csapplication    OFFLINE  OFFLINE              

    ora....db1.srvapplication    ONLINE    OFFLINE              

    ora....db2.srvapplication    ONLINE    OFFLINE      

    3)        在1号节点上启动实例到mount状态:

    SYS@racdb1 SQL>startup mount;

    ORACLE instancestarted.

     

    Total System GlobalArea 1862270976 bytes

    Fixed Size                  2021600 bytes

    Variable Size             469763872 bytes

    Database Buffers         1375731712 bytes

    Redo Buffers               14753792 bytes

    Database mounted.

    SYS@racdb1SQL>archive log list;

    Database log mode              No Archive Mode

    Automaticarchival             Disabled

    Archivedestination           USE_DB_RECOVERY_FILE_DEST

    Oldest online logsequence     33

    Current logsequence           34

    4)        在1号节点上打开archive模式:

    SYS@racdb1SQL>alter database archivelog;

     

    Database altered.

    5)        在1号节点上enable cluster的设置并关闭实例:

    SYS@racdb1SQL>alter system set cluster_database=true scope=spfile sid='racdb1';

    System altered.

     

    SYS@racdb1SQL>shutdown immediate;

    ORA-01109: databasenot open

    Database dismounted.

    ORACLE instance shutdown.

    6)        启动所有实例并验证:

    [oracle@ora10racn1admin]$ srvctl start database -d racdb

    [oracle@ora10racn1admin]$ sqlplus /nolog

    SQL*Plus: Release10.2.0.1.0 - Production on Fri May 18 17:44:53 2012

    Copyright (c) 1982,2005, Oracle.  All rights reserved.

     

    @ SQL>conn / assysdba

    Connected.

    SYS@racdb1SQL>archive log list;

    Database log mode              ArchiveMode

    Automaticarchival             Enabled

    Archivedestination           USE_DB_RECOVERY_FILE_DEST

    Oldest online logsequence     33

    Next log sequence toarchive   34

    Current logsequence           34

    24.     建立统一的password文件:

    1)        将一个节点上的ASM及RDBMS的口令文件移到共享文件系统上:

    oracle@ora10racn2dbs]$ cd $ORACLE_HOME/dbs

    [oracle@ora10racn2dbs]$ ls

    ab_+ASM2.dat  hc_+ASM2.dat hc_racdb2.dat  init+ASM2.ora  initdw.ora init.ora  initracdb2.ora  orapw+ASM2 orapwracdb2

    [oracle@ora10racn2dbs]$ mv ./orapw+ASM2 /u02/oradata/racdb/dbs/orapw+ASM

    [oracle@ora10racn2dbs]$ mv ./orapwracdb2/u02/oradata/racdb/dbs/orapwracdb

    2)        在各个节点建立到共享口令文件的软连接:

    [oracle@ora10racn2dbs]$ pwd

    /u01/app/oracle/product/10.2.0/db_1/dbs

    [oracle@ora10racn2dbs]$ ln -s /u02/oradata/racdb/dbs/orapw+ASM/u01/app/oracle/product/10.2.0/db_1/dbs/orapw+ASM2

    [oracle@ora10racn2dbs]$ ln -s /u02/oradata/racdb/dbs/orapwracdb/u01/app/oracle/product/10.2.0/db_1/dbs/orapwracdb2

    [oracle@ora10racn2dbs]$ ls -l

    total 48

    -rw-rw---- 1 oracleoinstall  1571 May 18 15:00 ab_+ASM2.dat

    -rw-rw---- 1 oracleoinstall  1552 May 18 15:00 hc_+ASM2.dat

    -rw-rw---- 1 oracleoinstall  1552 May 18 17:44 hc_racdb2.dat

    -rw-r----- 1 oracleoinstall    47 May 18 14:59 init+ASM2.ora

    -rw-r----- 1 oracleoinstall 12920 May  3  2001 initdw.ora

    -rw-r----- 1 oracleoinstall  8385 Sep 11  1998 init.ora

    -rw-r----- 1 oracleoinstall    39 May 18 15:33initracdb2.ora

    lrwxrwxrwx 1 oracleoinstall    32 May 19 08:22 orapw+ASM2-> /u02/oradata/racdb/dbs/orapw+ASM

    lrwxrwxrwx 1 oracleoinstall    33 May 19 08:22 orapwracdb2-> /u02/oradata/racdb/dbs/orapwracdb

     

    [oracle@ora10racn1dbs]$ rm -r orapw+ASM1

    [oracle@ora10racn1dbs]$ rm -r orapwracdb1

    [oracle@ora10racn1dbs]$ ln -s /u02/oradata/racdb/dbs/orapw+ASM/u01/app/oracle/product/10.2.0/db_1/dbs/orapw+ASM1

    [oracle@ora10racn1dbs]$ ln -s /u02/oradata/racdb/dbs/orapwracdb/u01/app/oracle/product/10.2.0/db_1/dbs/orapwracdb1

    [oracle@ora10racn1dbs]$ ls -l

    total 48

    -rw-rw---- 1 oracleoinstall   796 May 18 14:59 ab_+ASM1.dat

    -rw-rw---- 1 oracleoinstall  1552 May 18 14:59 hc_+ASM1.dat

    -rw-rw---- 1 oracleoinstall  1552 May 18 17:43 hc_racdb1.dat

    -rw-r----- 1 oracleoinstall    47 May 18 14:59 init+ASM1.ora

    -rw-r----- 1 oracleoinstall 12920 May  3  2001 initdw.ora

    -rw-r----- 1 oracleoinstall  8385 Sep 11  1998 init.ora

    -rw-r----- 1 oracleoinstall    39 May 18 15:33initracdb1.ora

    lrwxrwxrwx 1 oracleoinstall    32 May 19 08:27 orapw+ASM1-> /u02/oradata/racdb/dbs/orapw+ASM

    lrwxrwxrwx 1 oracleoinstall    33 May 19 08:27 orapwracdb1-> /u02/oradata/racdb/dbs/orapwracdb

    25.     修改各个数据文件的size:

    NAME                                              BYTES/(1024*1024)

    -------------------------------------------------------------------

    +ORADAT/racdb/datafile/system.259.783615899                      480

    +ORADAT/racdb/datafile/undotbs1.260.783615911                    330

    +ORADAT/racdb/datafile/sysaux.261.783615915                      270

    +ORADAT/racdb/datafile/undotbs2.263.783615923                    200

    +ORADAT/racdb/datafile/users.264.783615929                        5

    SYS@racdb2SQL>alter database datafile '+ORADAT/racdb/datafile/users.264.783615929'resize 1024M;

    Database altered.

     

    SYS@racdb2SQL>alter tablespace users add datafile '+ORADAT' size 1024m autoextend on;

    Tablespace altered.

     

    SYS@racdb2SQL>create tablespace indx datafile '+ORADAT' size 1024M

      2 autoextend on next 50M maxsize unlimited

      3* extent management local autoallocatesegment space management auto;

     

    SYS@racdb2SQL>alter database datafile '+ORADAT/racdb/datafile/system.259.783615899'resize 800M;

    Database altered.

     

    SYS@racdb2SQL>alter database datafile '+ORADAT/racdb/datafile/sysaux.261.783615915'resize 500M;

    Database altered.

     

    SYS@racdb2SQL>alter database datafile '+ORADAT/racdb/datafile/undotbs1.260.783615911'resize 1024m;

    Database altered.

     

    SYS@racdb2SQL>alter database datafile '+ORADAT/racdb/datafile/undotbs2.263.783615923'resize 1024M;

    Database altered.

     

    SYS@racdb2SQL>select name,bytes/(1024*1024) from v$datafile;

    NAME                                               BYTES/(1024*1024)

    -------------------------------------------------------------------

    +ORADAT/racdb/datafile/system.259.783615899                      800

    +ORADAT/racdb/datafile/undotbs1.260.783615911                   1024

    +ORADAT/racdb/datafile/sysaux.261.783615915                      500

    +ORADAT/racdb/datafile/undotbs2.263.783615923                   1024

    +ORADAT/racdb/datafile/users.264.783615929                      1024

    +ORADAT/racdb/datafile/users.268.783679533                      1024

    +ORADAT/racdb/datafile/indx.269.783679683                       1024

     

    SYS@racdb2SQL>select name,bytes/(1024*1024) from v$tempfile;

    NAME                                              BYTES/(1024*1024)

    -------------------------------------------------------------------

    +ORADAT/racdb/tempfile/temp.262.783615919                         28

     

    SYS@racdb2SQL>alter database tempfile '+ORADAT/racdb/tempfile/temp.262.783615919'resize 1024M;

    Database altered.

     



沪ICP备19023445号-2号
友情链接