#!/bin/ksh -p # $Id: //depot/dev/proactive/explorer3/tools/cluster#20 $ # # @(#)cluster.sh 1.2 01/02/06 # # # Cluster Explorer Script (ClusterSave) for Solstice 1.x and Sun Cluster 2.x # # Note: # Some routines may generate console messages, these can be ignored # ie. hareg -q - when dataservice is not registered # # # Debug switches # #exec 2> /tmp/out #set -x # Source common functions . ${EXP_LIB}/exp_tools # Run if specified to do so script=`basename $0` which_gate_exit $script default all # # Set global variables # # CL_VERSION will indicate which version of SunCluster is installed # This value is only used for SunCluster 2.x and greater # CL_VERSION=0 # # cluststate flag is used so that commands that require the node to # be in the cluster don't get executed when the node is out of the cluster # cluststate="DOWN" # =========== Function Definitions ============================================ # # Generic Database routinues # # # @(#)oracle_generic.shf 1.1 01/01/10 # # Generic ORACLE data gathering routinues # find_ora_bgdumpdest () { # the grep pattern has one space and one tab BGDUMPDESTLINE=`/bin/grep -i '^[ ]*background_dump_dest[ ]*=' ${1}` # this param might be in the ifile if [ ! -z "${BGDUMPDESTLINE}" ]; then BGDUMPDEST=`/bin/echo ${BGDUMPDESTLINE} | /bin/sed 's/^[ ]*background_dump_dest[ ]*=//'` # echo ${BGDUMPDEST} else # check for ifile IFILE_LIST=`/bin/grep -i '^[ ]*ifile[ ]*=' ${1} | /bin/cut -d= -f 2` for IFILE in ${IFILE_LIST} ; do find_ora_bgdumpdest ${IFILE} done fi } # # oracle_home_data # - Collects data based on information from oratab # # Input parameters - # $1 -- ${ORAHOME} # $2 -- ${ORASID} # oracle_home_data() { ORAHOME=$1 ORASID=$2 IDUMP="cluster/oracle/${ORASID}" [ -z "${ORAHOME}" -o -z "${ORASID}" ] && return # while we're here, get the dbms version as well get_file "${ORAHOME}/orainst/RELVER" ${IDUMP} || \ touch_file ${IDUMP}/RELVER.NOTFOUND get_file "${ORAHOME}/dbs/init${ORASID}.ora" ${IDUMP} || \ touch_file ${IDUMP}/init${ORASID}.ora.NOTFOUND # # Check for tnsnames, listener, and sqlnet # in the oracle network directory # ORANET="${ORAHOME}/network/admin" get_file "${ORANET}/listener.ora" ${IDUMP} get_file "${ORANET}/sqlnet.ora" ${IDUMP} get_file "${ORANET}/tnsnames.ora" ${IDUMP} } # # oracle_base_data # - Collect oracle data from ORACLE_BASE # # Input parameters # $1 - ${ORASID} # $2 - ${INITORA} # oracle_base_data() { ORASID=$1 INITORA=$2 IDUMP="cluster/oracle/${ORASID}" [ -z "${ORASID}" -o -z "${ORAHOME}" ] && return if [ ! -z "${INITORA}" -a -f "${INITORA}" ]; then ERRLOGDIR=`find_ora_bgdumpdest ${INITORA}` if [ -z "${ERRLOGDIR}" ]; then # # Check default location if no other location is # found # TMP=`dirname ${INITORA}` ORAHOME=`dirname ${TMP}` if [ -f "${ORAHOME}/rdbms/log/alert_${ORASID}.log" ];then ERRLOGDIR="${ORAHOME}/rdbms/log" fi fi # Make sure ERRLOGDIR is a directory [ ! -d "${ERRLOGDIR}" ] && return ERRLOGFILE=${ERRLOGDIR}/alert_${ORASID}.log get_file "${ERRLOGFILE}" ${IDUMP} || \ touch_file ${IDUMP}/alert_${ORASID}.log.NOTFOUND if [ -z "${ERRLOGDIR}" ]; then break fi # # Grab any tracefiles that were created in # the last 14 days # find ${ERRLOGDIR} -depth -name \*.trc -ctime -14 -print | \ while read line do get_file "${line}" ${IDUMP}/tracefiles done # # Copy the config${SID}.ora files # DBSDIR=`dirname ${INITORA}` IFILE=`grep -i '^[ ]*ifile[ ]*=' ${INITORA} \ | cut -d= -f 2` if [ ! -z "${IFILE}" ]; then get_file "${IFILE}" ${IDUMP} else if [ ! -z "${DBSDIR}" ]; then # # If the ifile does not exist then assume # that the config${SID}.ora file is in the # dbs directory # get_file "${DBSDIR}/config${ORASID}.ora" ${IDUMP} fi fi # logfile "Config file ${INITORA} for SID ${ORASID} not found" TEXT=`gettext "Config file %s for SID %s not found"` TEXT=`printf "${TEXT}" "${INITORA}" "${ORASID}"` logfile "${TEXT}" fi } # # Collects Generic Oracle information # oracle_collector() { HAORADIR="/var/opt/oracle" # # Check the permissioning on $ORACLE_HOME for each instance # if [ -f "${HAORADIR}/oratab" ]; then grep -v "^#" ${HAORADIR}/oratab | while read ORATABLINE ; do ORASID=`echo ${ORATABLINE} | cut -d : -f 1` ORAHOME=`echo ${ORATABLINE} | cut -d : -f 2` [ -z "${ORASID}" ] && continue # # Special case since OPS may have an "*" # in the oratab file so we attempt to determine # what the oracle SIDS are # if [ "${ORASID}" = "*" ]; then for SID in `ls ${ORAHOME}/dbs/init*.ora | sed -e 's/.*init\(.*\).ora/\1/'` do [ -z "${SID}" ] && continue oracle_home_data ${ORAHOME} ${SID} # # Collect the errorlogs and tracefiles # INITORA=${ORAHOME}/dbs/init${SID}.ora oracle_base_data ${SID} ${INITORA} done else # # There is a valid ORACLE_SID # oracle_home_data ${ORAHOME} ${ORASID} # # Collect the errorlogs and tracefiles # INITORA=${ORAHOME}/dbs/init${SID}.ora oracle_base_data ${ORASID} ${INITORA} fi done fi # # SunCluster 2.x specific # [ ${CL_VERSION} -ne 2 ] && return [ "$cluststate" = "DOWN" ] && return # # Another way to collect some of the data. # We only do this if HA-Oracle is installed # and the cluster is running # pkginfo -q SUNWscor [ $? -ne 0 ] && return # # For each instance: # - get the alert file # ${CLUSTERBIN}/haoracle list | while read HAORALIST; do SID=`echo ${HAORALIST} | cut -d : -f 2` [ -z "${SID}" ] && continue INITORA=`echo ${HAORALIST} | cut -d : -f 9` oracle_base_data ${SID} ${INITORA} done } #include oracle_generic # # @(#)sybase_generic.shf 1.1 01/01/10 # # Generic Sybase collection routines # sybase_collector() { IDUMP="cluster/sybase" HASYBDIR=/var/opt/sybase [ ! -d ${IDUMP} ] && return # # Get a directory listing of /var/opt/sybase # get_cmd "/bin/ls -lR ${HASYBDIR}" ${IDUMP}/ls-lR_var_opt_sybase # # Check the permissioning on $SYBASE_HOME for each instance # if [ -f ${HASYBDIR}/sybtab ]; then grep -v "^#" ${HASYBDIR}/sybtab | while read SYBTABLINE ; do SYBINST=`echo ${SYBTABLINE} | cut -d : -f 1` SYBHOME=`echo ${SYBTABLINE} | cut -d : -f 2` DSDUMP=${IDUMP}/${SYBINST} # Get the Sybase version get_file "${SYBHOME}/init/sqlsrv/version" ${DSDUMP} || \ touch_file ${DSDUMP}/version.NOTFOUND get_file "${SYBHOME}/interfaces" ${DSDUMP} || \ touch_file ${DSDUMP}/interfaces.NOTFOUND done else # logfile "Sybase installed but ${HASYBDIR}/sybtab does not exist" TEXT=`gettext "Sybase installed but %s/sybtab does not exist"` TEXT=`printf "${TEXT}" "${HASYBDIR}"` logfile "${TEXT}" fi # # SunCluster 2.x specific # [ ${CL_VERSION} -ne 2 ] && return [ "$cluststate" = "DOWN" ] && return # # For each instance, get the .log file # ${CLUSTERBIN}/hasybase list | while read HASYBLIST; do SYBINST=`echo ${HASYBLIST} | cut -d : -f 2` RUNSYBINST=`echo ${HASYBLIST} | cut -d : -f 9` DSDUMP=${IDUMP}/${SYBINST} if [ ! -z "${RUNSYBINST}" -a -f "${RUNSYBINST}" ]; then LOGFILE=`cat ${RUNSYBINST} | grep "\-e" | cut -c3- | cut -d " " -f1` get_file "${LOGFILE}" ${DSDUMP} if [ $? -ne 0 ]; then lfile=`basename ${LOGFILE}` touch_file ${DSDUMP}/${lfile}.NOTFOUND fi else # logfile "SQL server file ${RUNSYBINST} not found" TEXT=`gettext "SQL server file %s not found"` TEXT=`printf "${TEXT}" "${RUNSYBINST}"` logfile "${TEXT}" fi done } #include sybase_generic # # @(#)informix_generic.shf 1.1 01/01/10 # # Generic Informix routinues # # Collect Generic Informix Info # informix_collector() { IDUMP="cluster/informix" HAINFDIR=/var/opt/informix [ ! -d ${IDUMP} ] && return # # SunCluster 2.x specific -- Sun Cluster 3.x does not support # informix yet # [ ${CL_VERSION} -ne 2 ] && return # # If the cluster is down hainformix will fail so return # [ "$cluststate" = "DOWN" ] && return # for each informix database, get the config file # and the logfile (not from the /var/opt/informix) # ${CLUSTERBIN}/hainformix list | while read HAINFLIST; do CONFIG=`/bin/echo ${HAINFLIST} | /bin/cut -d : -f 2` DSDUMP=${IDUMP}/${CONFIG} [ -z "${CONFIG}" ] && continue [ ! -f "${HAINFDIR}/inftab" ] && continue INFDIR=`/bin/grep "${CONFIG}" "${HAINFDIR}/inftab" | /bin/cut -d : -f 2` 2>&1 if [ -n "${CONFIG}" -a -f "${INFDIR}/etc/${CONFIG}" ]; then MSGPATH=`/bin/grep "^MSGPATH" "${INFDIR}/etc/${CONFIG}" | /bin/awk '{print $2}'` get_file ${MSGPATH} ${DSDUMP} fi if [ -d "${INFDIR}" ]; then # # copy the config files to file.${CONFIG}.inf.dir # to indicate this file has *NOT* been copied # from /var/opt/informix # get_file "${INFDIR}/etc/sqlhosts" ${DSDUMP} || \ touch_file ${DSDUMP}/sqlhosts.NOTFOUND get_file "${INFDIR}/etc/${CONFIG}" ${DSDUMP} || \ touch_file ${DSDUMP}/${CONFIG}.NOTFOUND fi done # # check the links /usr/lib for informix files # get_cmd "/bin/ls -l /usr/lib | /bin/grep informix" ${IDUMP}/ls-l_usr_lib return } #include informix_generic # # Cluster specific routinues # # # @(#)clust_rg_common.shf 1.1 01/01/10 # # Common routinues used to parse output from scrgadm # # # Get the resource group triplet: # resource group name, resource name, and resource type # get_rgs() { /usr/cluster/bin/scrgadm -p | nawk '\ BEGIN { processing=0 } # # Get the resource group # /^Res Group name:/ { processing=1 rgname=$4 } # # If a resource type is seen at the beginning of a line # then reset the processing flag # /^Res Type name:/ { processing=0 } # # Get the resource name # /^ Res name:/ { if (processing) rsname=$3 } # # Get the resource type and print out the triplet # /^ Res resource type:/ { if (processing) { rstype=$4 print rgname" "rsname" "rstype } }' } # # Run scrgadm and grep out the string supplied # The opts and strings variabled are set in the case statement below # run_cmd() { /usr/cluster/bin/scrgadm -pvv ${opts} | grep "${string}" } # # find the parameter specified by resource type and resource # property, if supplied # find_rg_info() { type=$1 prop=$2 # # read in the resource group triplet # get_rgs | while read rg rs rt do if [ "$type" = "$rt" ]; then case $type in SUNW.nfs) opts="-g ${rg}" string="(${rg}) Res Group Pathprefix:" param=`run_cmd | awk '{print $5}'` # echo ${rs}":"${param} ;; SUNW.iws) opts="-j ${rs}" string="(${rg}:${rs}:Confdir_list) Res property value" param=`run_cmd | awk '{print $5}'` # echo ${rs}":"${param} ;; SUNW.oracle_server) opts="-j ${rs}" [ -z "${prop}" ] && return string="(${rg}:${rs}:${prop}) Res property value" run_cmd | awk '{print $5}' ;; *) # echo "Invalid option" exit 1 ;; esac fi done } #include clust_rg_common # # @(#)clust_pkgs.shf 1.1 01/01/10 # # Cluster package checks # # # Gather pkgchk information on all packages and # copy simple configuration files into the data directories # clust_pkgs() { IDUMP="cluster/packages" PKGFILE="${IDUMP}/PKGLIST" # # Check dataservices # SCDSPKGS="SUNWscdns SUNWschtt SUNWsclts SUNWscnew \ SUNWscnsl SUNWscnsm SUNWsctiv" SCDBPKGS="SUNWscor SUNWscorx SUNWscsyb SUNWscinf SUNWscsap SUNWudlm" SC2PKGS="SUNWdid SUNWsccf SUNWscid SUNWccd SUNWff SUNWscch \ SUNWmdm SUNWscins SUNWmond SUNWsclb SUNWcmm SUNWpnm SUNWsma \ SUNWcsnmp SUNWsc SUNWsci SUNWscman SUNWscmgr SUNWscsdb \ SUNWccon SUNWccp SUNWscds SUNWscpro" SC3PKGS="SUNWscfab SUNWscsal SUNWscshl SUNWscvm SUNWscman SUNWscsam \ SUNWscssv SUNWscdev SUNWscr SUNWscscn SUNWscu" # # collect database pkg info # for PKG in ${SCDBPKGS}; do pkginfo -q ${PKG} >/dev/null 2>&1 if [ $? -eq 0 ]; then echo_file ${PKGFILE} "Processed: "${PKG} get_cmd "/usr/sbin/pkgchk -n ${PKG}" ${IDUMP}/pkgchk-n.${PKG}! # # Only keep files that received errors # if [ $? -eq 0 ]; then rm -f "${EXP_TARGET}/${IDUMP}/pkgchk-n.${PKG}" fi # # Create the dataservice dump directory # case ${PKG} in "SUNWscsyb") DB="sybase";; "SUNWscor") DB="oracle";; "SUNWscinf") DB="informix";; "SUNWscsap") DB="sap";; "SUNWudlm") DB="oracle";; esac DSDUMP="cluster/${DB}" # # OPS does not collect anything else here # if [ "${PKG}" = "SUNWudlm" ]; then mkdir -p ${EXP_TARGET}/${DSDUMP} >/dev/null 2>&1 continue fi # # XXX -- Sun Cluster 2.x specific # if [ ${CL_VERSION} -eq 2 -a "$cluststate" = "UP" ];then get_cmd "${CLUSTERBIN}/hareg -q ${DB}" \ ${DSDUMP}/hareg-q fi # # Gather the SAP start and stop scripts # if [ "${PKG}" = "SUNWscsap" ]; then HADSDIR=/etc/opt/${PKG} DSDUMP="cluster/etc/opt/${PKG}" get_file "${HADSDIR}/hadsconf" ${DSDUMP} || \ touch_file ${DSDUMP}/hadsconf.NOTFOUND SAPDIR="${CLUSTER}/ha/sap" DSDUMP="cluster/opt/SUNWcluster/ha/sap" START=${SAPDIR}/hasap_start_all_instances STOP=${SAPDIR}/hasap_stop_all_instances get_file "${STOP}" ${DSDUMP} || \ touch_file ${DSDUMP}/hasap_stop_all_instances.NOTFOUND get_file "${START}" ${DSDUMP} || \ touch_file ${DSDUMP}/hasap_start_all_instances.NOTFOUND # # Bypass the rest of the loop # continue fi # # Copy the database fault monitor # configuration and log files # SRCDIR="/var/opt/${PKG}" DSDUMP="cluster/var/opt/${PKG}" get_dir recursive "${SRCDIR}" ${DSDUMP} # # Location of files have changed as of # Sun Cluster 3.x. The conditional check below # will do the right thing # if [ ${CL_VERSION} -eq 2 ];then SRCDIR="/etc/opt/${PKG}" DSDUMP="cluster/etc/opt/${PKG}" get_file "${SRCDIR}/ha${DB}_support" ${DSDUMP} get_file "${SRCDIR}/ha${DB}_config_V1" ${DSDUMP} elif [ ${CL_VERSION} -eq 3 ]; then for dir in oracle_server oracle_listener do SRCDIR="/opt/${PKG}/${dir}/etc" DSDUMP="cluster/opt/${PKG}/${dir}/etc" get_dir "${SRCDIR}" ${DSDUMP} done fi fi done # # collect other dataservice pkg info # for PKG in ${SCDSPKGS}; do pkginfo -q ${PKG} >/dev/null 2>&1 if [ $? -eq 0 ]; then echo_file ${PKGFILE} "Processed: "${PKG} get_cmd "/usr/sbin/pkgchk -n ${PKG}" ${IDUMP}/pkgchk-n.${PKG}! # Only keep files that received errors if [ $? -eq 0 ]; then rm -f "${EXP_TARGET}/${IDUMP}/pkgchk-n.${PKG}" 2>&1 fi DSDUMP="cluster/etc/opt/${PKG}" # # Copy the configuration files for each package # XXX -- Sun Cluster 2.x specific # if [ ${CL_VERSION} -eq 2 ];then get_file "/etc/opt/${PKG}/hadsconf" ${DSDUMP} fi # # Create the dataservice dump directory # case ${PKG} in "SUNWscdns") DS="dns";; "SUNWsclts") DS="lotus";; "SUNWsctiv") DS="tivoli";; "SUNWscnsl") DS="nsldap";; "SUNWscnew") DS="nsnews";; "SUNWschtt") DS="nshttp";; "SUNWscnsm") DS="nsmail";; *) continue;; esac DSDUMP="cluster/${DS}" if [ ${CL_VERSION} -eq 2 -a "$cluststate" = "UP" ];then get_cmd "${CLUSTERBIN}/hareg -q ${DS}" ${DSDUMP}/hareg-q elif [ ${CL_VERSION} -eq 3 ]; then get_dir "/opt/${PKG}/etc" "cluster/opt/${PKG}/etc" fi fi done # # Check base SC packages # if [ ${CL_VERSION} -eq 2 ];then for PKG in ${SC2PKGS}; do pkginfo -q ${PKG} >/dev/null 2>&1 if [ $? -eq 0 ]; then echo_file ${PKGFILE} "Processed: "${PKG} get_cmd "/usr/sbin/pkgchk -n ${PKG}" ${IDUMP}/pkgchk-n.${PKG}! # Only keep files that received errors if [ $? -eq 0 ]; then rm -f "${EXP_TARGET}/${IDUMP}/pkgchk-n.${PKG}" 2>&1 fi fi done elif [ ${CL_VERSION} -eq 3 ];then for PKG in ${SC3PKGS}; do pkginfo -q ${PKG} >/dev/null 2>&1 if [ $? -eq 0 ]; then echo_file ${PKGFILE} "Processed: "${PKG} get_cmd "/usr/sbin/pkgchk -n ${PKG}" ${IDUMP}/pkgchk-n.${PKG}! # Only keep files that received errors if [ $? -eq 0 ]; then rm -f "${EXP_TARGET}/${IDUMP}/pkgchk-n.${PKG}" 2>&1 fi fi done fi } #include clust_pkgs # # @(#)clust_dtk.shf 1.1 01/01/10 # # DTK collection script # clust_dtk() { # # SunCluster 3.x specific # [ ${CL_VERSION} -ne 3 ] && return pkginfo -q SUNWscdtk [ $? -ne 0 ] && return [ "${cluststate}" = "DOWN" ] && return IDUMP="cluster/dtk" get_cmd "/usr/cluster/dtk/bin/cmm_ctl -g" ${IDUMP}/cmm_ctl-g get_cmd "/usr/cluster/dtk/bin/dcs_config -c info" ${IDUMP}/dcs_config-c_info get_cmd "/usr/cluster/dtk/bin/dcs_config -c status" ${IDUMP}/dcs_config-c_status # # XXX -- there appears to be a bug in adb that causes # nodes to hang on Solaris 8. Need to investigate further # before adding this functionality. One potential fix is # to use mdb. # #get_cmd "/usr/cluster/dtk/bin/ddb /dev/ksyms /dev/mem" ${IDUMP}/ddb get_cmd "/usr/cluster/dtk/bin/orbadmin -P all" ${IDUMP}/orbadmin-P get_cmd "/usr/cluster/dtk/bin/orbadmin -R all" ${IDUMP}/orbadmin-R get_cmd "/usr/cluster/dtk/bin/print_net_state -s" ${IDUMP}/print_net_state-s get_cmd "/usr/cluster/dtk/bin/replctl" ${IDUMP}/replctl # # XXX -- Add for next release # get_cmd "/usr/cluster/lib/sc/reserve -c status" ${IDUMP}/reserve-c # get_cmd "/usr/cluster/lib/sc/rgmd_debug printbuf" ${IDUMP}/rgmd_debug } #include clust_dtk # # @(#)clust_config_data.shf 1.4 01/02/14 # # Cluster specific data # # # Verify the appropriate patch is installed prior to collecting pmf output # pmfadm_check() { scversion=$(egrep PRODVERS /var/sadm/pkg/SUNWsc/pkginfo | awk -F= '{print $2}') installed_patch=$(egrep PATCHLIST /var/sadm/pkg/SUNWsc/pkginfo | awk -F= '{print $2}') if [ -n "${scversion}" -a -n "${installed_patch}" ]; then # # The patches below fix bug# 4402834 which can cause rpc.pmfd to exit # abnormally and panic the system # case ${scversion} in "2.1") patchnum=105458 patchrev=17 ;; "2.2") case $(uname -r) in "5.6") patchnum=109208 patchrev=08 ;; "5.7") patchnum=109209 patchrev=08 ;; "5.8") patchnum=109210 patchrev=07 ;; esac ;; esac num=${installed_patch%%-*} rev=${installed_patch##*-} # # Only run the pmfadm command if the appropriate patch is installed # if [ ${num} -eq ${patchnum} -a ${rev} -ge ${patchrev} ]; then get_cmd "${CLUSTERBIN}/pmfadm -l \"\"" cluster/config/pmfadm-l else # logfile "Patch ${patchnum}-${patchrev} is not installed. Skipping pmfadm -l collection" TEXT=`gettext "Patch %s-%s is not installed. Skipping pmfadm -l collection"` TEXT=`printf "${TEXT}" "${patchnum}" "${patchrev}"` logfile "${TEXT}" fi fi } clust_config_data_2x() { # # To save time, only run certain commands when cluster node is running # if [ "`ps -e | grep clustd | wc -l | cut -c 8`" != "0" ]; then # logfile "Cluster is running on this node" TEXT=`gettext "Cluster is running on this node"` logfile "${TEXT}" cluststate="UP" get_cmd "${CLUSTERBIN}/get_node_status" cluster/config/ get_cmd "${CLUSTERBIN}/clustm dumpstate ${CLUSTERNAME}" cluster/config/clustm-dumpstate pmfadm_check get_cmd "${CLUSTERBIN}/scconf ${CLUSTERNAME} -p" cluster/config/scconf-p get_cmd "${CLUSTERBIN}/hastat" cluster/config/ get_cmd "${CLUSTERBIN}/hareg" cluster/config/ get_cmd "${PNMBIN}/pnmstat -l" cluster/config/pnmstat-l else cluststate="DOWN" # logfile "Cluster is NOT running on this node" TEXT=`gettext "Cluster is NOT running on this node"` logfile "${TEXT}" fi get_file "/etc/pnmconfig" cluster/etc get_file "/.rhosts" cluster/config/rhosts! get_file "${CLUSTERBIN}/init.snmpd" cluster/config # # XXX -- We use a loop instead of doing a recursive get_dir call # The reason for this is that we need to execlude certain # directories and we do this in the loop # # Get all files in /var/opt/SUNWcluster except the ${CLUSTERNAME} dir for DIR in `find /var/opt/SUNWcluster -type d | grep -v ${CLUSTERNAME}` do DSDUMP=`echo ${DIR} | sed -e 's/^\///'` # Don't get the devices directory, just list it if echo "${DIR}" | egrep -s -e "devices$" then get_cmd "/bin/ls -l ${DIR}" cluster/${DSDUMP}/ else # Don't collect nfs_probe_mountpoints if echo "${DIR}" | egrep -s -e "nfs_probe_mountpoints" then continue fi get_dir "${DIR}" cluster/${DSDUMP} fi done # Get all files in /etc/opt/SUNWcluster except nfs_probe_mountpoints for DIR in `find /etc/opt/SUNWcluster -type d | \ grep -v nfs_probe_mountpoints` do DSDUMP=`echo ${DIR} | sed -e 's/^\///'` get_dir "${DIR}" cluster/${DSDUMP} done } clust_config_common() { # # Cluster Private Interconnect info # pkginfo -q SUNWsci if [ $? -eq 0 ]; then get_file "/etc/sci.ifconf" cluster/etc get_file "/etc/sma.config" cluster/etc get_file "/etc/sma.ip" cluster/etc # # Create data dump directory # DSDUMP="cluster/sci" get_cmd "${SCIBIN}/sciadm -ident" ${DSDUMP}/sciadm-ident get_cmd "${SCIBIN}/sciinfo -a" ${DSDUMP}/sciinfo-a # # XXX -- Sun Cluster 2.x specific # if [ ${CL_VERSION} -eq 2 ]; then get_cmd "${SMABIN}/get_ci_status" ${DSDUMP}/get_ci_status get_cmd "${SMABIN}/smactl -l" ${DSDUMP}/smactl-l fi for id in 0 1 do get_cmd "${SCIDBIN}/scidstat -su ${id}" \ ${DSDUMP}/scidstat-su_${id} done fi } clust_config_data_3x() { get_cmd "${CLUSTERBIN}/scinstall -pv" cluster/config/scinstall-pv # # scconf will return 0 if node is in the cluster # get_cmd "${CLUSTERBIN}/scconf -pv" cluster/config/scconf-pv if [ $? -eq 0 ]; then cluststate="UP" get_cmd "${CLUSTERBIN}/pnmstat -lm" cluster/config/pnmstat-lm # Changed from pmfadm -l "" due to bug for handle in `${CLUSTERBIN}/pmfadm -L | awk -F: '{print $2}'` do get_cmd "${CLUSTERBIN}/pmfadm -l ${handle}" cluster/config/pmfadm-l.${handle} done get_cmd "${CLUSTERBIN}/scstat" cluster/config/scstat get_cmd "${CLUSTERBIN}/scstat -pv" cluster/config/scstat-pv get_cmd "${CLUSTERBIN}/scstat -pvv" cluster/config/scstat-pvv get_cmd "${CLUSTERBIN}/scconf -pvv" cluster/config/scconf-pvv get_cmd "${CLUSTERBIN}/scrgadm -pv" cluster/config/scrgadm-pv get_cmd "${CLUSTERBIN}/scrgadm -pvv" cluster/config/scrgadm-pvv get_cmd "${CLUSTERBIN}/sccheck -vh `uname -n`" cluster/config/sccheck-vh else cluststate="DOWN" # logfile "This node is currently not in the cluster" TEXT=`gettext "This node is currently not in the cluster"` logfile "${TEXT}" rm -f ${EXP_TARGET}/cluster/scconf-pv.* fi get_file "/etc/inet/ntp.conf" etc/inet get_dir recursive "/var/cluster" cluster/var/cluster get_dir recursive "/etc/cluster" cluster/etc/cluster return } clust_config_data () { if [ ${CL_VERSION} -eq 2 ]; then clust_config_data_2x else clust_config_data_3x fi clust_config_common } #include clust_config_data # # @(#)clust_ds_haoracle.shf 1.1 01/01/10 # # HA-ORACLE/OPS data # clust_ds_haoracle () { IDUMP="cluster/oracle" HAORADIR="/var/opt/oracle" # # Continue only if HA-ORACLE has been installed # [ ! -d "${EXP_TARGET}/${IDUMP}" ] && return # # collect /var/opt/oracle files # get_file "${HAORADIR}/oratab" cluster/var/opt/oracle || \ touch_file cluster/var/opt/oracle/oratab.NOTFOUND get_file "${HAORADIR}/listener.ora" cluster/var/opt/oracle || \ touch_file cluster/var/opt/oracle/listener.ora.NOTFOUND get_file "${HAORADIR}/tnsnames.ora" cluster/var/opt/oracle || \ touch_file cluster/var/opt/oracle/tnsnames.ora.NOTFOUND get_file "${HAORADIR}/sqlnet.ora" cluster/var/opt/oracle || \ touch_file cluster/var/opt/oracle/sqlnet.ora.NOTFOUND # # XXX -- Only run if SunCluster version 2.x and cluster is up # [ ${CL_VERSION} -ne 2 ] && return if [ "$cluststate" = "UP" ]; then get_cmd "${CLUSTERBIN}/haoracle list" ${IDUMP}/haoracle_list fi } #include clust_ds_haoracle # # @(#)clust_ds_hasybase.shf 1.1 01/01/10 # # HA-SYBASE # clust_ds_hasybase () { HASYBDIR=/var/opt/sybase HASYBPKGNAME=SUNWscsyb IDUMP="cluster/sybase" # # Continue only if HA-SYBASE has been installed # [ ! -d "${EXP_TARGET}/${IDUMP}" ] && return # # Pick up files in /var/opt/sybase # get_file "${HASYBDIR}/sybtab" cluster/var/opt/sybase || \ touch_file cluster/var/opt/sybase/sybtab.NOTFOUND get_file "${HASYBDIR}/interfaces" cluster/var/opt/sybase || \ touch_file cluster/var/opt/sybase/interfaces.NOTFOUND # # XXX -- Only run on SunCluster 2.x and if cluster is up # [ ${CL_VERSION} -ne 2 ] && return if [ "$cluststate" = "UP" ];then get_cmd "${CLUSTERBIN}/hasybase list" ${IDUMP}/hasybase_list fi } #include clust_ds_hasybase # # @(#)clust_ds_hainformix.shf 1.1 01/01/10 # # HA-INFORMIX # - informix files specific for clustering # clust_ds_hainformix () { HAINFDIR="/var/opt/informix" HAINFPKGNAME=SUNWscinf IDUMP="cluster/informix" # # Continue only if the HA-INFORMIX has been installed # - directory will exist if informix has been installed # [ ! -d "${EXP_TARGET}/${IDUMP}" ] && return # # collect /var/opt/informix # get_file "${HAINFDIR}/inftab" cluster/var/opt/informix || \ touch_file cluster/var/opt/informix/inftab.NOTFOUND get_file "${HAINFDIR}/etc/sqlhosts" cluster/var/opt/informix/etc || \ touch_file cluster/var/opt/informix/etc/sqlhosts.NOTFOUND if [ "$cluststate" = "UP" ];then get_cmd "${CLUSTERBIN}/hainformix list" ${IDUMP}/hainformix_list fi } #include clust_ds_hainformix # # @(#)clust_ds_sap.shf 1.1 01/01/10 # # HA-SAP # clust_ds_sap () { HADSDIR=/etc/opt HADSBINDIR=/opt/SUNWcluster/ha/sap HADSPKGNAME=SUNWscsap IDUMP="cluster/sap" # # XXX -- Only verified on SunCluster 2.x # [ ${CL_VERSION} -ne 2 ] && return # # Continue only if HA-SAP has been installed # [ ! -d "${EXP_TARGET}/${IDUMP}" ] && return # # Get more info for each configured HA-SAP instance # if [ -f "${HADSDIR}/${HADSPKGNAME}/hadsconf" ]; then SAPSIDLIST=`/bin/grep YOUR_SAP_SID ${HADSDIR}/${HADSPKGNAME}/hadsconf | /bin/cut -d' ' -f 5` else SAPSIDLIST= touch_file ${IDUMP}/hadsconf.NOTFOUND fi for SAPSID in ${SAPSIDLIST}; do DSDUMP=${IDUMP}/${SAPSID} get_cmd "/bin/ps -ecf | /bin/grep ${SAPSID}" ${DSDUMP}/ps-ecf get_cmd "/usr/sap/${SAPSID}/SYS/exe/run/disp+work -V" \ ${DSDUMP}/disp+work-V || \ touch_file ${DSDUMP}/disp+work-V.NOTFOUND get_dir recursive "/usr/sap/${SAPSID}/SYS/profile" \ ${DSDUMP}/profile || \ touch_file ${DSDUMP}/profile.NOTFOUND # there should only be one of these per SAPSID WORKDIR=`echo /usr/sap/${SAPSID}/DVEBMGS[0-9][0-9]/work` if [ -d "${WORKDIR}" ]; then get_file "${WORKDIR}/*.log" ${DSDUMP}/work get_file "${WORKDIR}/*.trc" ${DSDUMP}/work get_file "${WORKDIR}/dev*" ${DSDUMP}/work get_file "${WORKDIR}/std*" ${DSDUMP}/work get_file "${WORKDIR}/[A-Z]*" ${DSDUMP}/work else touch_file ${DSDUMP}/DVEBMGS-work.tar.NOTFOUND fi done } #include clust_ds_sap # # @(#)clust_ds_hanfs.shf 1.1 01/01/10 # # HA NFS collection script # clust_ds_hanfs () { IDUMP="cluster/hanfs" HANFSPKGNAME=SUNWscnfs # # XXX -- Verified on SunCluster 3.x only # [ ${CL_VERSION} -ne 3 ] && return [ "${cluststate}" = "DOWN" ] && return # # Continue only if HA-NFS has been installed # pkginfo -q ${HANFSPKGNAME} [ $? -ne 0 ] && return # # For each NFS resource group: # - get the vfstab/dfstab # for line in `find_rg_info SUNW.nfs` do RSNAME=`echo $line | cut -d: -f 1` PATHPREFIX=`echo $line | cut -d: -f 2` FILE=${PATHPREFIX}/SUNW.nfs/dfstab.${RSNAME} get_file "${FILE}" ${IDUMP}/dfstab.${RSNAME}! done } #include clust_ds_hanfs # # @(#)clust_ds_nshttp.shf 1.1 01/01/10 # # Netscape HTTP collection script # clust_ds_nshttp () { IDUMP="cluster/nshttp" # # XXX -- Verified only on SunCluster 3.x # Continue only if HA-NSHTTP has been installed # [ ${CL_VERSION} -ne 3 ] && return [ "${cluststate}" = "DOWN" ] && return pkginfo -q SUNWschtt [ $? -ne 0 ] && return # # For each HTTP resource group: # - get the magnus.conf file # for line in `find_rg_info SUNW.iws` do RSNAME=`echo $line | cut -d: -f 1` CONFDIR=`echo $line | cut -d: -f 2` FILE=${CONFDIR}/config/magnus.conf get_file "${FILE}" ${IDUMP}/magnus.conf.${RSNAME}! done } #include clust_ds_nshttp # # @(#)clust_sds_data.shf 1.1 01/01/10 # # SDS cluster specific data # clust_sds_data () { IDUMP="cluster/sds" # # SunCluster 2.x specific # if [ ${CL_VERSION} -eq 2 ]; then pkginfo -q SUNWdid [ $? -ne 0 ] && return get_file /etc/did.conf cluster/etc > /dev/null 2>&1 fi get_cmd "${CLUSTERBIN}/scdidadm -l" ${IDUMP}/scdidadm-l get_cmd "${CLUSTERBIN}/scdidadm -L" ${IDUMP}/scdidadm-L # # Get more SDS data # pkginfo -q SUNWmdm if [ $? -eq 0 ]; then pkginfo -q SUNWmd [ $? -ne 0 ] && return # # Disksuite changed paths in its version for 5.8 # if [ "`uname -r`" = "5.8" ]; then SDSBIN=/usr/sbin else SDSBIN=/usr/opt/SUNWmd/sbin fi DS=`${SDSBIN}/metaset|grep 'Set name'|sed 's/Set name = //'|cut -d, -f 1` for s in ${DS} do get_cmd "${SDSBIN}/medstat -s ${s}" ${IDUMP}/medstat-s.${s} done fi } #include clust_sds_data # # @(#)savefiles_1x.shf 1.1 01/01/10 # # # List of files/directories to save and exclude # savefiles_1x() { get_file "/.rhosts" cluster/rhosts! get_file "/etc/pnmconfig" cluster/etc get_file "/etc/sci.ifconf" cluster/etc get_file "/etc/sma.ip" cluster/etc get_file "/etc/sma.config" cluster/etc get_dir recursive /var/opt/sybase cluster/var/opt/sybase get_dir recursive /var/opt/informix cluster/var/opt/informix # Exclude /var/opt/SUNWhadf/hadf/nfs_probe_mountpoints for DIR in `find /var/opt/SUNWhadf -type d | \ grep -v nfs_probe_mountpoints` do DSDUMP=`echo ${DIR} | sed -e 's/^\///'` get_dir ${DIR} cluster/${DSDUMP} done # Exclude /etc/opt/SUNWhadf/nfs_probe_mountpoints for DIR in `find /etc/opt/SUNWhadf -type d | \ grep -v nfs_probe_mountpoints` do DSDUMP=`echo ${DIR} | sed -e 's/^\///'` get_dir ${DIR} cluster/${DSDUMP} done # Exclude /var/opt/oracle/nlsdata for DIR in `find /var/opt/oracle -type d | grep -v nlsdata` do DSDUMP=`echo ${DIR} | sed -e 's/^\///'` get_dir ${DIR} cluster/${DSDUMP} done } #include savefiles_1x database_data() { oracle_collector informix_collector sybase_collector } # # Collect data for Cluster Data Service data # clust_dataservice_data () { clust_ds_haoracle clust_ds_hasybase clust_ds_hainformix clust_ds_sap clust_ds_hanfs clust_ds_nshttp } # # Collect cluster runtime configuration data # collect_cluster_data() { logfile "Starting collect_cluster_data at `date`" # # Call clust_config_data first since it sets the global # cluststate flag which is used by other functions # clust_config_data clust_pkgs clust_dtk clust_dataservice_data clust_sds_data database_data logfile "Finished collect_cluster_data at `date`" } # =========== Start of script processing ====================================== # # This script will be called from explorer, option will always be 'save' # TEXT=`gettext "Cluster software not installed"` require "test -d /etc/opt/SUNWhadf -o -d /etc/opt/SUNWcluster -o -d /etc/cluster" "${TEXT}" # # Set basedir to the top directory. This # allows for an easy change to the cluster tree # BASEDIR=${EXP_TARGET}/cluster # # if HA 1.x just collect some config files # if [ -d /etc/opt/SUNWhadf ]; then savefiles_1x exit 0 fi SCIDBIN="/opt/SUNWscid/bin" SCIBIN="/opt/SUNWsci/bin" # # if SC 2.x # pkginfo -q SUNWsc if [ $? -eq 0 ]; then # logfile "Collecting Sun Cluster 2.x Configuration and Runtime info..." TEXT=`gettext "Collecting Sun Cluster 2.x Configuration and Runtime info..."` logfile "${TEXT}" CLUSTERNAME=`cat /etc/opt/SUNWcluster/conf/default_clustername` export CLUSTERNAME if [ -z "${CLUSTERNAME}" ]; then # logfile "Cannot locate default_clustername file, exiting script" TEXT=`gettext "Cannot locate default_clustername file, exiting script"` logfile "${TEXT}" exit 1 fi CLUSTER=/opt/SUNWcluster CLUSTERBIN=${CLUSTER}/bin SMABIN=/opt/SUNWsma/bin PNMBIN="/opt/SUNWpnm/bin" CL_VERSION=2 fi # # if SC 3.x # pkginfo -q SUNWscr if [ $? -eq 0 ]; then # logfile "Collecting Sun Cluster 2.x Configuration and Runtime info..." TEXT=`gettext "Collecting Sun Cluster 2.x Configuration and Runtime info..."` logfile "${TEXT}" CLUSTER=/usr/cluster CLUSTERBIN=${CLUSTER}/bin CL_VERSION=3 elif [ "${CL_VERSION}" -ne 2 ]; then # logfile "Cannot locate SUNWsc package ... cluster collection skipped" TEXT=`gettext "Cannot locate SUNWsc package ... cluster collection skipped"` logfile "${TEXT}" fi collect_cluster_data exit 0