#!/bin/ksh
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
#  
#  
#  
# Licensed Materials - Property of IBM 
#  
# (C) COPYRIGHT International Business Machines Corp. 1997,2006 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG 
# @(#)76 1.83.1.177 src/avs/fs/mmfs/ts/admin/mmcommon.sh, mmfs, avs_rgpfs24, rgpfs24s008a 11/8/06 12:34:03
#############################################################################
#
# Usage:  mmcommon keyword <arguments>
#
#############################################################################

# Include global declarations and service routines.
. /usr/lpp/mmfs/bin/mmglobfuncs
. /usr/lpp/mmfs/bin/mmsdrfsdef
. /usr/lpp/mmfs/bin/mmfsfuncs

sourceFile="mmcommon.sh"
[[ -n $DEBUG || -n $DEBUGmmcommon ]] && set -x
$mmTRACE_ENTER "$*"

# Local work files.  Names should be of the form:
#   fn=${tmpDir}fn.${mmcmd}.$$

LOCAL_FILES=" "


# Local variables
typeset -l kword_lc
typeset -l arg3_lc
integer n
rc=0

# Local functions

#####################################################################
#
# Function:  Runs the specified command on the first active node
#            (i.e., node on which the GPFS daemon is running)
#            it finds in the list of nodes provided by the caller.
#
# Input:     $1 - name of the node to try first, or 0
#            $2 - file with reliable node names
#            $3 - file to copy on the remote node, or _NO_FILE_COPY_
#            $4 - file system that cannot be mounted, or _NO_MOUNT_CHECK_
#            $5 - scope of mount checking
#            $6 - symbolic link indicator: _LINK_ or _NO_LINK_
#            $7 - command to execute; must be mmremote or tsxxxxxxx
#            $8 - argument list for the remote command
#
# Output:    Depends on the particular remote command
#
# Returns:   0 - command completed successfully
#            return code from the remote command
#
#####################################################################
function runRemoteCommand  # <preferredNode> <nodeFile> <fileToCopy>
                           # <fsToCheck> <symLink> <scope> <command> <args>
{
  typeset sourceFile="mmcommon.sh"
  [[ -n $DEBUG || -n $DEBUGrunRemoteCommand ]] && set -x
  $mmTRACE_ENTER "$*"
  typeset preferredNode=$1
  typeset nodeFile=$2
  typeset fileToCopy=$3
  typeset fsToCheck=$4
  typeset scope=$5
  typeset symLink=$6
  typeset command=$7
  typeset args=$8

  typeset rc=0
  typeset nodeName rcInfo lsrc

  # If this is a single node environment, assure that
  # the command will be executed locally.
  [[ $MMMODE = single ]] && preferredNode=$ourNodeName

  # If there is no preferred node, see if the local node is in the list.
  [[ $preferredNode = "0" ]] &&  \
    preferredNode=$($grep -w $ourNodeName $nodeFile)

  # Try the nodes one by one until the command is executed somewhere.
  # Always start with the local node first, if it is in the list.
  [[ -z $preferredNode ]] && preferredNode="$BLANKchar"
  for nodeName in $(print -- "$preferredNode" ; $grep -v -w "$preferredNode" $nodeFile)
  do
    # If nodeName refers to the node that we are running on,
    # try to execute the command locally.
    if [[ $nodeName = $ourNodeName ]]
    then
      runLocalCommand $fsToCheck $scope $symLink $command "$args"
      rc=$?
      # If this is a single node environment, there is nothing more to do.
      [[ $MMMODE = single ]] && break

      # If acceptable error (daemon not up or waiting for quorum),
      # try to find some other node to run the command.  Otherwise,
      # the command was executed and either succeeded or failed.
      # Either way, pass the result to the caller.
      if [[ $rc -eq $MM_DaemonDown || $rc -eq $MM_QuorumWait ]]
      then
        continue
      else
        return $rc
      fi
    fi  # end of if [[ $nodeName = $ourNodeName ]]

    # Invoke mmdsh to run the command on the remote node.
    $rm -f $tmpDir$command$$.*
    $mmdsh -svL $nodeName $mmremote onbehalf $ourNodeName $command$$  \
      $MMMODE $fileToCopy $fsToCheck $scope $symLink $command "$args"
    rc=$?

    #----------------------------------------------------------------------
    #
    # Determine the return code from the remote command.
    #
    # This is not an easy task because rsh and different versions of
    # ssh treat the return codes from the remote command differently.
    # For example, rsh does not propagate back the return code, while
    # at least some of the ssh versions pass back the return code.
    # This also makes it very difficult to distinguish between failures
    # of rsh/ssh itself and failures of the remote command.
    #
    # Our solution is to pass back the return code by creating a special
    # file that has the return code appended at the end of its name.
    # If we do not see this file on our side, we will assume that mmdsh
    # returned the return code from the remote command.  Although this is
    # not necessarily always the case (there could have been a problem
    # with the touch command that creates the special file) it is the best
    # we can do under the circumstances.
    #----------------------------------------------------------------------

    # Look for the return code to be encoded in a special file name.
    rcInfo=$($ls $tmpDir$command$$.* 2> /dev/null)
    $rm -f $tmpDir$command$$.*
    if [[ -n $rcInfo ]]
    then
      # The return code was passed back via the empty file mechanism.
      # Extract the return code from the file name.
      rc=${rcInfo#$tmpDir$command$$\.}

      # If acceptable error (daemon not up or waiting for quorum),
      # try to find some other node to run the command.  Otherwise,
      # the command was executed and either succeeded or failed.
      # Either way, pass the result to the caller.
      if [[ $rc -eq $MM_DaemonDown || $rc -eq $MM_QuorumWait ]]
      then
        continue
      else
        return $rc
      fi
    fi  # end if [[ -n $rcInfo ]]

    # Assume mmdsh returned the return code from the remote command.
    if [[ $rc -eq $MM_DaemonDown     ||
          $rc -eq $MM_QuorumWait     ||
          $rc -eq $MM_HostDown       ||
          $rc -eq $MM_ConnectTimeout ]]
    then
      # If the daemon is not up, if it is waiting for quorum, or if mmdsh
      # could not get to the node, try another node to run the command.
      continue
    fi

    if [[ $rc -ne 0 ]]
    then
      # The command failed.
      printErrorMsg 104 "runRemoteCommand" "$nodeName: $command $args"
    fi

    # Whether the command executed successfully or not,
    # return to the caller with the return code.
    return $rc

  done  # end for nodeName in $(print -- "$preferredNode" ; $cat $nodeFile)

  if [[ $MMMODE = single       &&
        $rc -ne $MM_DaemonDown &&
        $rc -ne $MM_QuorumWait ]]
  then
    return $rc
  fi

  # If we ever get here, all nodes were tried, but the command could not be
  # executed.  Either all nodes are down, or the daemon is down on all nodes.
  [[ $command != $MOUNT_CHECK_ONLY ]] &&  \
    printErrorMsg 312 $mmcmd $command
  rc=$MM_DaemonDown

  return $rc

}  #----- end of function runRemoteCommand -----------------------


#############################################################################
#
# Function:  Generate a list of all disk devices known to this node.
#
# Input:     None.
#
# Output:    For each device, a line is generated containing the following:
#              device_name device_type
#
# Returns:   Always zero.  Errors are ignored.
#
#############################################################################
function getDiskDevices  #
{
  typeset sourceFile="mmcommon.sh"
  [[ -n $DEBUG || -n $DEBUGgetDiskDevices ]] && set -x
  $mmTRACE_ENTER "$*"

  # Get the names of all disks, vpaths, etc.
  if [[ $osName = AIX ]]
  then
    LC_ALL=C $getlvodm -F 2>/dev/null |                 \
      $awk '                                            \
        /hdiskpower/  { print $1 " powerdisk" ; next }  \
        /dlmfdrv/     { print $1 " dlmfdrv"   ; next }  \
        /vpath/       { print $1 " vpath"     ; next }  \
        /hdisk/       { print $1 " hdisk"     ; next }  \
      '

  elif [[ $osName = Linux ]]
  then
    $awk '                                                                 \
      /emcpower/   { if (NF > 3 && $3 > 1) print $4 " powerdisk" ; next }  \
      /vpath/      { if (NF > 3 && $3 > 1) print $4 " vpath"     ; next }  \
      /[sh]d/      { if (NF > 3 && $3 > 1) print $4 " generic"   ; next }  \
    ' /proc/partitions 2>/dev/null

  else
    print -u2  " Unknown operating system $osName "
    cleanupAndExit
  fi

  return 0

}  #----- end of function getDiskDevices ------------------------


#############################################################################
#
# Function:  Generate a file correlating node numbers, IP addresses,
#            and reliable node names for nodes in the cluster.
#            The values are obtained from the latest mmsdrfs file.
#
# Input:     $1 - name of the node data file to create
#            $2 - (optional) nodeset for which to return information.
#                 If missing, or if GLOBAL_ID specified, return all
#                 nodes in the cluster.
#
# Output:    Each record of the output file consists of the following
#            fields (separated with a single blank character):
#
#              node_number ip_address adapter_type reliable_hostname  \
#                 switch_node_number adapter_type hags_node_number
#
# Returns:   0 - file created successfully
#            If an error is encountered, this function invokes the
#            cleanupAndExit routine which terminates execution.
#
#############################################################################
function getNodeDataFromSdrfs  # <fileName> [<nodesetId>]
{
  typeset sourceFile="mmcommon.sh"
  [[ -n $DEBUG || -n $DEBUGgetNodeDataFromSdrfs ]] && set -x
  $mmTRACE_ENTER "$*"
  typeset outfile=$1
  typeset nodesetId=$2

  typeset gpfsInitOutput

  [[ -z $outfile ]] &&  \
    checkForErrors "getNodeDataFromSdrfs - Missing output file parameter" 1

  # Ensure the GPFS system data is up to date.
  gpfsInitOutput=$(gpfsInit nolock)
  setGlobalVar $? $gpfsInitOutput

  [[ ! -f $mmsdrfsFile ]] && return 0

  # If a nodeset id is not specified, or if the global id is specified,
  # clear the variable so that we can look at all MEMBER_NODE lines.
  # Otherwise, prepend the '^' char to ensure a correct nodesetId match.
  [[ $nodesetId = "." ]] && nodesetId="$nsId"
  [[ $nodesetId = 0 ]] && return 0
  [[ $nodesetId = $GLOBAL_ID ]] && nodesetId=""
  [[ -n $nodesetId ]] && nodesetId="^$nodesetId"

  # Generate a list with the requested node information.
  $rm -f $outfile
  $awk -F: '                                                 \
    /'$nodesetId:$MEMBER_NODE:'/ {                           \
        # If this is an older cluster, the value for the     \
        # HAGS number may not be in the mmsdrfs file yet.    \
        if ( $'$GS_NODE_NUMBER_Field' == "" ) {              \
          { gsNodeNumber = $'$NODE_NUMBER_Field' }           \
        } else {                                             \
          { gsNodeNumber = $'$GS_NODE_NUMBER_Field' }        \
        }                                                    \
        # Write the information to the specified file.       \
        { print $'$NODE_NUMBER_Field' " "                    \
                $'$IPA_Field' " "                            \
                $'$ADAPTER_TYPE_Field' " "                   \
                $'$REL_HOSTNAME_Field' " "                   \
                $'$SWITCH_NODE_NUMBER_Field' " "             \
                $'$ADAPTER_TYPE_Field' " "                   \
                gsNodeNumber  >> "'$outfile'" }              \
    }                                                        \
  ' $mmsdrfsFile
  checkForErrors awk $?

  return 0

}  #----- end of function getNodeDataFromSdrfs --------------------


#############################################################################
#
# Function:  Generate a node list file showing the node number, IP address,
#            node name, and attributes (quorum/nonquorum, manager/client,
#            new) for all nodes in the cluster.
#            The values are obtained from the latest mmsdrfs file.
#            Since this routine is used by the daemon to obtain node data,
#            the node name returned is the one used by the GPFS daemons,
#            not the one used by the admin commands, although these are
#            usually the same.
#
# Input:     $1 - name of the node data file to create
#            $2 - (optional) assume cached data is current
#
# Output:    Each record of the output file consists of the following
#            fields (separated with a single blank character):
#
#              node_number ip_address nodename attributes...
#
#            where "attributes..." is a string containing a set of
#            comma-separated attribute values.
#
# Returns:   0 - file created successfully
#            If an error is encountered, this function invokes the
#            cleanupAndExit routine which terminates execution.
#
#############################################################################
function getNodeListFromSdrfs  # <fileName> [norefresh]
{
  typeset sourceFile="mmcommon.sh"
  [[ -n $DEBUG || -n $DEBUGgetNodeListFromSdrfs ]] && set -x
  $mmTRACE_ENTER "$*"
  typeset outfile=$1
  typeset -l refreshArg=$2

  typeset gpfsInitOutput

  [[ -z $outfile ]] &&  \
    checkForErrors "getNodeListFromSdrfs - Missing output file parameter" 1

  # Ensure the GPFS system data is up to date.
  if [[ $refreshArg != "norefresh" ]]
  then
    gpfsInitOutput=$(gpfsInit nolock)
    setGlobalVar $? $gpfsInitOutput
  fi

  [[ ! -f $mmsdrfsFile ]] &&  \
    checkForErrors "getNodeListFromSdrfs - Missing /var/mmfs/gen/mmsdrfs" 1

  # Generate a file with the requested node information.
  $rm -f $outfile
  $awk -F: '                                                  \
    BEGIN { separator = "," }                                 \
    /'$HOME_CLUSTER:$MEMBER_NODE:'/ {                         \
      # Collect all attributes for this node.                 \
      { attrs = $'$DESIGNATION_Field' }                       \
      if ( $'$CORE_QUORUM_Field' == "'$quorumNode'" ) {       \
        { attrs = attrs separator "'$QUORUM'" }               \
      }                                                       \
      if ( $'$ADDNODE_STATE_Field' != "" ) {                  \
        { attrs = attrs separator $'$ADDNODE_STATE_Field' }   \
      }                                                       \
      if ( $'$DAEMON_NODENAME_Field' == "" ) {                \
        { nodename = $'$REL_HOSTNAME_Field' }                 \
      } else {                                                \
        { nodename = $'$DAEMON_NODENAME_Field' }              \
      }                                                       \
      # Write the information to the specified file.          \
      { print $'$NODE_NUMBER_Field'  " "                      \
              $'$IPA_Field'          " "                      \
              nodename " " attrs >> "'$outfile'" }            \
    }                                                         \
  ' $mmsdrfsFile
  checkForErrors awk $?

  return 0

}  #----- end of function getNodeListFromSdrfs --------------------


#######################################################################
#
# Function:  Generate a file with the global group names and size of
#            all known VSDs, as well as their primary and secondary
#            servers and recovery state.
#
# Input:     $1 - name of file for the VSD information
#
# Output:    Each record of the output file consists of the following
#            colon-separated fields:
#
#   group_name:vsd_name:size:primary_node:secondary_node:recovery:vgname
#
# Returns:   0 - file created successfully
#            If error is encountered, this function invokes the
#            cleanupAndExit routine which terminates execution.
#
#######################################################################
function getVSDdataSDR
{
  typeset sourceFile="mmcommon.sh"
  [[ -n $DEBUG || -n $DEBUGgetVSDdataSDR ]] && set -x
  $mmTRACE_ENTER "$*"
  typeset joinedFile=$1

  # Get the name, size, and global group name of all VSDs.
  $SDRGetObjects -d ':' -x  VSD_Table  \
     global_group_name VSD_name size_in_MB >$vsdNamesFile
  checkForErrors SDRGetObjects $?

  # Sort the output.
  $sort -t: -k 1,1 $vsdNamesFile -o $vsdNamesFile
  checkForErrors sort $?

  # Get the primary and secondary nodes for the global group names.
  $SDRGetObjects -d ':' -x  VSD_Global_Volume_Group global_group_name  \
    primary_node secondary_node recovery local_group_name >$volGroupFile
  checkForErrors SDRGetObjects $?

  # Sort the output.
  $sort -t: -k 1,1 $volGroupFile -o $volGroupFile
  checkForErrors sort $?

  # Create one common file with all of the information.
  $join -t: $vsdNamesFile $volGroupFile >$joinedFile
  checkForErrors join $?

  $rm -f $vsdNamesFile $volGroupFile
  return 0

}  #----- end of function getVSDdataSDR --------------------------


##########################################################################
#
# Function:  Generate a file with the global group names and size of
#            all known VSDs, as well as their primary and secondary
#            servers and recovery state.
#
# Input:     $1 - name of file for the VSD information
#
# Output:    Each record of the output file consists of the following
#            colon-separated fields:
#
#   group_name:vsd_name:size:primary_node:secondary_node:recovery:vgname
#
# Returns:   0 - file created successfully
#            If error is encountered, this function invokes the
#            cleanupAndExit routine which terminates execution.
#
##########################################################################
function getVSDdataRPD
{
  typeset sourceFile="mmcommon.sh"
  [[ -n $DEBUG || -n $DEBUGgetVSDdataRPD ]] && set -x
  $mmTRACE_ENTER "$*"
  typeset joinedFile=$1

  # Get the name, size, and global group name of all VSDs.
  $lsrsrcapi -o "IBM.vsdtable::::::VSD_name::global_group_name::logical_volume_name::size_in_MB" >$vsdNamesFile
  checkForErrors "getVSDdataRPD: lsrsrc-api -o IBM.vsdtable::::::VSD_name::global_group_name::logical_volume_name::size_in_MB" $?

  # Sort the output
  $sort -t: -k 3,3 $vsdNamesFile -o $vsdNamesFile
  checkForErrors sort $?

  # Get the primary and secondary nodes for the global group names.
  $lsrsrcapi -o "IBM.vsdgvg::::::global_group_name::local_group_name::primary_node::secondary_node::recovery" >$volGroupFile
  checkForErrors "getVSDdataRPD: lsrsrc-api -o IBM.vsdgvg::::::global_group_name::local_group_name::primary_node::secondary_node::recovery" $?

  # Sort the output.
  $sort -t: -k 1,1 $volGroupFile -o $volGroupFile
  checkForErrors sort $?

  # Create one common file with all of the information.
  $join -t: -1 3 -2 1 -o 1.3,1.1,1.7,2.5,2.7,2.9,2.3 $vsdNamesFile $volGroupFile >$joinedFile
  checkForErrors join $?

  $rm -f $vsdNamesFile $volGroupFile
  return 0

}  #----- end of function getVSDdataRPD ---------------------------


#########################################################################
#
# Function:  Generate a file with the node number and node name for
#            all known VSD nodes.
#
# Input:     $1 - name of the file for storing the VSD node information
#
# Output:    Each record of the output file consists of the following
#            colon-separated fields:
#
#              node_number:node_name
#
# Returns:   0 - file created successfully
#            If an error is encountered, this function invokes the
#            cleanupAndExit routine which terminates execution.
#
#########################################################################
function getVsdNodeDataSDR
{
  typeset sourceFile="mmcommon.sh"
  [[ -n $DEBUG || -n $DEBUGgetVsdNodeDataSDR ]] && set -x
  $mmTRACE_ENTER "$*"
  typeset outputFile=$1

  # Get the number and name of each of the VSD nodes.
  $SDRGetObjects -d ':' -x Node node_number reliable_hostname >$outputFile
  checkForErrors "getVsdNodeDataSDR: SDRGetObjects" $?

  # Sort the output.
  $sort -t: -k 1,1 $outputFile -o $outputFile
  checkForErrors sort $?

  return 0

}  #----- end of function getVsdNodeDataSDR ---------------------


########################################################################
#
# Function:  Generate a file with the node number and node name for
#            all known VSD nodes.
#
# Input:     $1 - name of the file for storing the VSD node information
#
# Output:    Each record of the output file consists of the following
#            colon-separated fields:
#
#              node_number:node_name
#
# Returns:   0 - file created successfully
#            If an error is encountered, this function invokes the
#            cleanupAndExit routine which terminates execution.
#
########################################################################
function getVsdNodeDataRPD
{
  typeset sourceFile="mmcommon.sh"
  [[ -n $DEBUG || -n $DEBUGgetVsdNodeDataRPD ]] && set -x
  $mmTRACE_ENTER "$*"
  typeset outputFile=$1

  # Get the number and name of each of the VSD nodes.
  $lsrsrcapi -D ":" -o "IBM.vsdnode::::::VSD_nodenum::NodeNameList" |  \
    $sed -e 's/{//g' | $sed -e 's/}//g' >$outputFile
  checkForErrors "getVsdNodeDataRPD: lsrsrc-api -o IBM.vsdnode::::::VSD_nodenum::NodeNameList" $?

  # Sort the output.
  $sort -t: -k 1,1 $outputFile -o $outputFile
  checkForErrors sort $?

  return 0

}  #----- end of function getVsdNodeDataRPD -----------------------


###############################################################################
#
# Function:  Correlate GPFS node numbers with the corresponding RSCT
#            node numbers (if any).
#
# Input:     None.
#
# Output:    For each node in the cluster that can access VSD disks,
#            the following two fields (colon-separated, one pair per line)
#            are displayed on stdout:
#
#              GPFSnodeNumber:RSCTnodeNumber
#
# Notes:     GPFS nodes that are not part of the RSCT peer domain are omitted.
#
# Returns:   0 - no errors encountered.
#            If an error is encountered, this function invokes the
#            cleanupAndExit routine which terminates execution.
#
###############################################################################
function getVsdNodeNumbers  #
{
  typeset sourceFile="mmcommon.sh"
  [[ -n $DEBUG || -n $DEBUGgetVsdNodeNumbers ]] && set -x
  $mmTRACE_ENTER "$*"

  typeset gpfsNumbers=$tmpfile
  typeset vsdNumbers=$tmpfile2
  typeset vsdPath

  # This function makes no sense on Linux.
  [[ $osName = Linux ]] && return 1

  # Processing depends on which type of VSD code (PSSP or RSCT) is in use.
  vsdPath=$(LC_ALL=C $ls -l $vsdatalst 2>/dev/null)
  if [[ $vsdPath = *${vsdatalstPSSP}* && -x $vsdatalstPSSP ]]
  then
    # Find the PSSP node numbers and map them to the GPFS nodes.

    # Get the PSSP node numbers and IP addresses.
    $SDRGetObjects -d ':' -x Adapter node_number netaddr > $adfile
    checkForErrors "getPSSPvsdNumbers: SDRGetObjects Adapter" $?

    # Add multi-link adapters information from the Aggregate_IP class.
    # If the class does not exist, ignore the error.
    $SDRGetObjects -d ':' -x Aggregate_IP node_number ip_address >> $adfile 2>/dev/null
    [[ ! -s $adfile ]] &&  \
      checkForErrors "getVsdNodeNumbers: invalid SDR Adapter data; check SDR" 1

    # Sort the output based on the IP address.
    $sort -t: -k 2,2 $adfile -o $vsdNumbers
    checkForErrors "getPSSPvsdNumbers: sort" $?

  elif [[ $vsdPath = *${vsdatalstRSCT}* && -x $vsdatalstRSCT ]]
  then
    # Find the RSCT peer domain node numbers and map them to the GPFS nodes.

    # Create a file correlating RSCT node identifiers with IP addresses.
    $lsrsrcapi -o  \
       "IBM.NetworkInterface::::::NodeIDs::IPAddress::" > $adfile 2> /dev/null
    checkForErrors "getVsdNodeNumbers:  lsrsrc-api -o IBM.NetworkInterface::::::NodeIDs::IPAddress::" $?
    [[ ! -s $adfile ]] &&  \
      checkForErrors "getVsdNodeNumbers: invalid IBM.NetworkInterface data; check RSCT Peer Domain" 1

    # Sort the output based on the NodeID values.
    $sort -b -k 1,1 $adfile -o $adfile
    checkForErrors "getVsdNodeNumbers: sort" $?

    # Create a file correlating RSCT node identifiers with RSCT node numbers.
    $lsrsrcapi -o "IBM.PeerNode::::::NodeIDs::NodeList" > $rnfile 2> /dev/null
    checkForErrors "getVsdNodeNumbers:  lsrsrc-api -o IBM.PeerNode::::::NodeIDs::NodeList " $?
    [[ ! -s $rnfile ]] &&  \
      checkForErrors "getVsdNodeNumbers: invalid IBM.PeerNode output; check RSCT Peer Domain" 1

    # Sort the output based on the NodeID values.
    $sort -b -k 1,1 $rnfile -o $rnfile
    checkForErrors "getVsdNodeNumbers: sort" $?

    # Create one common file correlating IP addresses with RSCT node numbers.
    $join -1 1 -2 1 -t : -o 2.3,1.3 $adfile $rnfile > $vsdNumbers
    checkForErrors "getVsdNodeNumbers: join" $?

    # Sort the output based on the IP address.
    $sort -t: -k 2,2 $vsdNumbers -o $vsdNumbers
    checkForErrors "getVsdNodeNumbers: sort" $?

  else
    # VSD not installed.
    printErrorMsg 320 "getVSDdata"
    cleanupAndExit
  fi  # end of if [[ $? = 0 && -x $vsdatalstPSSP ]]

  # Create a file correlating GPFS node numbers with IP addresses.
  $awk -F: '                                               \
    /':$MEMBER_NODE:'/ {                                   \
        { print $'$NODE_NUMBER_Field' ":" $'$IPA_Field' }  \
    }                                                      \
  ' $mmsdrfsFile | $sort -t: -b -k 2,2 -o $gpfsNumbers
  checkForErrors "getVsdNodeNumbers: awk_sort" $?

  # Correlate the GPFS and RSCT node numbers, sort and display them on stdout.
  $join -1 2 -2 2 -t : -o 1.1,2.1 $gpfsNumbers $vsdNumbers | $sort -t: -k 1,1n

  return 0

}  #----- end of function getVsdNodeNumbers -----------------------


#############################################################################
#
# Function:  get NSD data for the caller
#
# Input:     $1 - file system name
#            (if $GLOBAL_ID or not specified, return data for all NSDs)
#
# Output:    For each NSD satisfying the input criteria, a colon-delimited
#            line of output of the following format is printed:
#
#   diskName:pvid:nsdSubtype:nsdSubtypeDiskname:primaryServer:backupServer
#
# Note:  Since this data is for the daemon, the server node names returned
#        are the node adapter names used by the daemon, not the admin ones.
#
# Returns:   0
#
#############################################################################
function getNsdData  # <fsname>
{
  typeset sourceFile="mmcommon.sh"
  [[ -n $DEBUG || -n $DEBUGgetNsdData ]] && set -x
  $mmTRACE_ENTER "$*"
  typeset fsname=$1

  typeset deviceName nsdSubtype server backup
  typeset needToDetermineDaemonNames=no

  # If a file system was passed, calculate the device name value.
  [[ -n $fsname && $fsname != $GLOBAL_ID ]] &&  \
    deviceName=${fsname##+(/)dev+(/)}

  # Loop through the mmsdrfs file to obtain the NSD data for the caller.
  $rm -f $tmpfile
  IFS=":"
  exec 3<&-
  exec 3< $mmsdrfsFile
  while read -u3 sdrfsLine
  do
    # Parse the line.
    set -f ; set -A v -- - $sdrfsLine ; set +f
    IFS="$IFS_sv"

    case ${v[$LINE_TYPE_Field]} in

      $SG_DISKS )  # This line describes a disk.
        # Is this an NSD with a valid PVID value?
        if [[ ${v[$DISK_TYPE_Field]} = nsd && -n ${v[$PVID_Field]} ]]
        then
          # Is this a disk in which we are interested?
          if [[ -z $fsname || $fsname = $GLOBAL_ID  ||
                $deviceName = ${v[$DEV_NAME_Field]} ]]
          then
            # Determine the nsd subtype value.
            if [[ -z ${v[$NSD_SUBTYPE_Field]} ]]
            then
              nsdSubtype="generic"
            else
              nsdSubtype=${v[$NSD_SUBTYPE_Field]}
            fi

            # Check whether the daemon node name has been recorded for
            # each NSD server.  If not, set a flag and exit the loop.
            if [[ ( -n ${v[$NSD_PRIMARY_NODE_Field]} &&      \
                    -z ${v[$DAEMON_NSD_PRIMARY_Field]} ) ||  \
                  ( -n ${v[$NSD_BACKUP_NODE_Field]} &&       \
                    -z ${v[$DAEMON_NSD_BACKUP_Field]} ) ]]
            then
              needToDetermineDaemonNames=yes
              break
            fi

            # Output the data for this NSD.
            print -- ${v[$DISK_NAME_Field]}:${v[$PVID_Field]}:$nsdSubtype:${v[$NSD_SUBTYPE_DISKNAME_Field]}:${v[$DAEMON_NSD_PRIMARY_Field]}:${v[$DAEMON_NSD_BACKUP_Field]} >> $tmpfile

          fi  # end of if [[ -z $fsname || $fsname = $GLOBAL_ID  || ... ]]

        fi  # end of if (v[$DISK_TYPE_Field] == "nsd" && -n v[$PVID_Field])
        ;;

      * )  # We are not interested in any other lines.
        ;;

    esac  # end of case ${v[$LINE_TYPE_Field]} in

    IFS=":"  # Change the separator back to ":" for the next iteration.

  done  # end of while read -u3 sdrfsLine do

  IFS="$IFS_sv"  # Restore the default IFS setting.

  # If all of the NSD servers had daemon names, we are done;
  # output the data and return to the caller.
  if [[ $needToDetermineDaemonNames = no ]]
  then
    [[ -s $tmpfile ]] && $cat $tmpfile
    return 0
  fi

  # If we get here, at least one of the NSD servers did not have a
  # daemon node name.  This is probably because the cluster migrated
  # from an earlier level of GPFS in which they did not exist,
  # and no one has issued mmchconfig release=LATEST to update the
  # mmsdrfs file.  We can still get the NSD data, but it will require
  # more time, since we must look at the MEMBER_NODE lines of the
  # mmsdrfs file as well as the SG_DISKS lines.
  # The mmsdrfs file will be updated with the daemon node names for
  # the NSD servers when the user issues mmchconfig release=LATEST;
  # it cannot be done in this code path.

  # Go through the current mmsdrfs file to obtain the NSD data.
  IFS=":"
  exec 3<&-
  exec 3< $mmsdrfsFile
  while read -u3 sdrfsLine
  do
    # Parse the line.
    set -f ; set -A v -- - $sdrfsLine ; set +f
    IFS="$IFS_sv"

    case ${v[$LINE_TYPE_Field]} in

      $SG_DISKS )  # This line describes a disk.
        # Is this an NSD with a valid PVID value?
        if [[ ${v[$DISK_TYPE_Field]} = nsd && -n ${v[$PVID_Field]} ]]
        then
          # If a file system was passed, calculate the device name value.
          [[ -n $fsname && $fsname != $GLOBAL_ID ]] &&  \
            deviceName=${fsname##+(/)dev+(/)}

          # Is this a disk in which we are interested?
          if [[ -z $fsname || $fsname = $GLOBAL_ID  ||
                $deviceName = ${v[$DEV_NAME_Field]} ]]
          then
            # Determine the nsd subtype value.
            if [[ -z ${v[$NSD_SUBTYPE_Field]} ]]
            then
              nsdSubtype="generic"
            else
              nsdSubtype=${v[$NSD_SUBTYPE_Field]}
            fi

            # If a server node was specified, check that it is valid
            # and convert it if necessary to a daemon adapter name.
            server=${v[$NSD_PRIMARY_NODE_Field]}
            if [[ -n $server ]]
            then
              server=$(checkAndConvertNodeValue $server $DAEMON_NODENAME_Field)
            fi
            backup=${v[$NSD_BACKUP_NODE_Field]}
            if [[ -n $backup ]]
            then
              backup=$(checkAndConvertNodeValue $backup $DAEMON_NODENAME_Field)
            fi

            # Output the data for this NSD.
            print -- ${v[$DISK_NAME_Field]}:${v[$PVID_Field]}:$nsdSubtype:${v[NSD_SUBTYPE_DISKNAME_Field]}:$server:$backup

          fi  # end of if [[ -z $fsname || $fsname = $GLOBAL_ID  || ... ]]

        fi  # end of if (v[$DISK_TYPE_Field] == "nsd" && -n v[$PVID_Field])
        ;;

      * )  # We are not interested in any other lines.
        ;;

    esac  # end of case ${v[$LINE_TYPE_Field]} in

    IFS=":"  # Change the separator back to ":" for the next iteration.

  done  # end of while read -u3 sdrfsLine

  IFS="$IFS_sv"  # Restore the default IFS setting.

  return 0

}  #----- end of function getNsdData ----------------------------


###########################################################################
#
# Function:  Determine whether a given cluster is authorized
#            to access the specified file system.
#
# Input:     $1 - cluster name
#            $2 - file system device name
#
# Output:    accessType [rootSquash=uid:gid]
#
# Returns:    0           - access level determined successfully
#             2 (ENOENT)  - cluster not defined (not allowed to connect)
#            13 (EACCES)  - access denied
#            19 (ENODEV)  - device not found
#            22 (EINVAL)  - device name is not valid
#            93 (EREMOTE) - device name is for a remote fs
#            other unexpected errors
#
###########################################################################
function checkAuth  # <clusterName> <device> [norefresh]
{
  typeset sourceFile="mmcommon.sh"
  [[ -n $DEBUG || -n $DEBUGcheckAuth ]] && set -x
  $mmTRACE_ENTER "$*"
  typeset clusterName=$1
  typeset device=$2
  typeset refreshArg=$3

  typeset deviceName accessType allowRemoteConnections
  typeset accessTypeString rsquashUid rsquashGid
  typeset rsquashOption=""
  typeset rc=0

  [[ ! -f $mmsdrfsFile ]] &&  \
    checkForErrors "checkAuth - Missing /var/mmfs/gen/mmsdrfs" 1

  # Strip away any /dev/ prefix from the device id.
  deviceName=${device##+(/)dev+(/)}

  # Verify the device name.
  if [[ $deviceName = /* ]]
  then
    printErrorMsg 169 $mmcmd "$device"
    rc=$MM_InvalidName
    return $rc
  elif [[ $deviceName = */* ]]
  then
    printErrorMsg 170 $mmcmd "$device"
    rc=$MM_InvalidName
    return $rc
  fi

  # Ensure the GPFS system data is up to date.
  if [[ $refreshArg != "norefresh" ]]
  then
    gpfsInitOutput=$(gpfsInit nolock)
    setGlobalVar $? $gpfsInitOutput
  fi

  # If the cluster does not utilize secure connections,
  # rw access is allowed to all of its file systems.
  allowRemoteConnections=$(showCfgValue allowRemoteConnections)
  if [[ $allowRemoteConnections = yes ]]
  then
    print -- "rw"
    return 0
  fi

  # Find out the maximum allowed access for the file system.
  accessTypeString=$($awk -F: '                               \
    BEGIN {                                                   \
      # Assume the file system does not exist.                \
      { fsNotFound = 1 }                                      \
      # Assume "mmauth add" was not issued for the cluster.   \
      { remoteClusterNotFound = 1 }                           \
      # Assume file system access not allowed (EACCES).       \
      { rc = '$MM_AccessDenied' }                             \
    }                                                         \
                                                              \
    $'$LINE_TYPE_Field' == "'$VERSION_LINE'" {                \
      # If the request is for the local cluster,              \
      # access is granted by default to all file systems.     \
      if ( $'$CLUSTER_NAME_Field' == "'$clusterName'" ||      \
           "'$clusterName'" == "." ) {                        \
        { print "rw:::" }                                     \
        { remoteClusterNotFound = 0 }                         \
        { fsNotFound = 0 }                                    \
        { rc = 0 }                                            \
        { exit }                                              \
      }                                                       \
    }                                                         \
                                                              \
    $'$NODESETID_Field' == "'$clusterName'"       &&          \
    $'$LINE_TYPE_Field' == "'$AUTHORIZED_CLUSTER'" {          \
      # This is the overall authorization record for          \
      # the desired cluster (created by "mmauth add").        \
      { remoteClusterNotFound = 0 }                           \
    }                                                         \
                                                              \
    $'$LINE_TYPE_Field' == "'$SG_HEADR'"  &&                  \
    $'$DEV_NAME_Field'  == "'$deviceName'" {                  \
      # This is the header line for the file system.          \
      if ( $'$FS_TYPE_Field' == "'$remotefs'" ) {             \
        # If it is a remote file system, return an error.     \
        { rc = '$MM_Remotefs' }                               \
        { exit }                                              \
      } else {                                                \
        # Otherwise, keep looking for an AUTHORIZED_FS line.  \
        # Note that it is OK for such a record not to exist.  \
        # All that means is that the given cluster cannot     \
        # access the file system.                             \
        { fsNotFound = 0 }                                    \
      }                                                       \
    }                                                         \
                                                              \
    $'$NODESETID_Field' == "'$clusterName'"   &&              \
    $'$LINE_TYPE_Field' == "'$AUTHORIZED_FS'" &&              \
    $'$DEV_NAME_Field'  == "'$deviceName'"     {              \
      # This is the auth record for the desired cluster and   \
      # file system.  Return the max allowed level of access. \
      { print $'$ACCESS_TYPE_Field'    ":"                    \
              $'$ROOTSQUASH_UID_Field' ":"                    \
              $'$ROOTSQUASH_GID_Field' ":"  }                 \
      { rc = 0 }                                              \
      { exit }                                                \
    }                                                         \
                                                              \
    END {                                                     \
      if ( fsNotFound ) {                                     \
        { rc = '$MM_FsNotFound' }                             \
      }                                                       \
      if ( remoteClusterNotFound ) {                          \
        { rc = '$MM_RemoteNotFound' }                         \
      }                                                       \
      { exit rc }                                             \
    }                                                         \
  ' $mmsdrfsFile)
  rc=$?
  if [[ $rc -ne 0 && $rc -ne $MM_AccessDenied ]]
  then
    if [[ $rc -eq $MM_FsNotFound ]]
    then
      # File system not found.
      printErrorMsg 288 $mmcmd "$device"
    elif [[ $rc -eq $MM_RemoteNotFound ]]
    then
      # The remote cluster is not authorized to access this cluster.
      printErrorMsg 259 $mmcmd $clusterName
    elif [[ $rc -eq $MM_Remotefs ]]
    then
      # Command is not allowed for remote file systems.
      printErrorMsg 238 $mmcmd
    else
      # Unexpected error.
      checkForErrors awk $rc
    fi
    return $rc
  fi

  # Parse the result.
  IFS=":"
  set -f ; set -- $accessTypeString ; set +f
  accessType=$1
  rsquashUid=$2
  rsquashGid=$3
  IFS="$IFS_sv"

  [[ -n $rsquashUid ]] && rsquashOption=" rootSquash=${rsquashUid}:${rsquashGid}"
  [[ -z $accessType ]] && accessType="none"

  # Put out the result and return.
  print -- "${accessType}${rsquashOption}"
  return $rc

}  #----- end of function checkAuth -----------------------------


######################################################################
#
# Function:  Generate a /etc/filesystems type options line.
#
# Input:     $1 - file system device name
#            $2 - skipMountPointCheck or doMountPointCheck
#
# Output:    The first line contains a comma-separated list of
#            mount options and the names of up to 50 disks.
#            The second line contains the mount point.
#
# Returns:   0 - options line generated successfully
#            19 (ENODEV) - options line not found
#            22 (EINVAL) - device name is not valid
#            1 - some other unexpected error
#
######################################################################
function getEFOptions  # <device> <checkMountPointOpt>
{
  typeset sourceFile="mmcommon.sh"
  [[ -n $DEBUG || -n $DEBUGgetEFOptions ]] && set -x
  $mmTRACE_ENTER "$*"
  typeset device=$1
  typeset checkMountPointOpt=$2

  typeset rc=0
  typeset fsLine efOptions minorNumber nodesetId mountPoint fsType
  typeset fsType fsHomeCluster

  [[ ! -f $mmsdrfsFile ]] &&  \
    checkForErrors "getEFOptions - Missing /var/mmfs/gen/mmsdrfs" 1

  # Strip away any /dev/ prefix from the device id.
  deviceName=${device##+(/)dev+(/)}

  # Verify the device name.
  if [[ $deviceName = /* ]]
  then
    printErrorMsg 169 $mmcmd "$device"
    rc=$MM_InvalidName
    return $rc
  elif [[ $deviceName = */* ]]
  then
    printErrorMsg 170 $mmcmd "$device"
    rc=$MM_InvalidName
    return $rc
  fi

  # Make sure the mmsdrfs file is current.  To avoid undesired side effects,
  # tell gpfsInit to restrict mount point checking to only the file system
  # that is subject to the current getEFOptions call.  This is done only if
  # the checkMountPointOpt parameter is set to "doMountPointCheck" or "yes".
  [[ $checkMountPointOpt = "doMountPointCheck" || $checkMountPointOpt = "yes" ]] && \
    export MOUNT_POINT_CHECK="$deviceName"
  gpfsInit nolock >/dev/null
  checkForErrors gpfsInit $?
  unset MOUNT_POINT_CHECK

  # The script looks for the SG_MOUNT line for the file system.
  # It examines the individual fields and if they have a value,
  # adds the value to the list.  The fields are comma-separated.
  # After the mount options are processed, the script looks for
  # SG_DISKS lines and puts out the names of the first 50 disks.
  # Disks that are excluded from SG descr operations are skipped.
  # At the end, if a SG_MOUNT line was found, the script returns 0.
  # Otherwise, the return code is set to 19 (ENODEV).
  fsLine=$($awk -F: '                                               \
    BEGIN {                                                         \
      { n = 0 }                                                     \
      { mountPoint = "" }                                           \
      { fsType     = "" }                                           \
      { exclOption = "" }                                           \
    }                                                               \
                                                                    \
    $'$LINE_TYPE_Field' == "'$SG_HEADR'"  &&                        \
    $'$DEV_NAME_Field'  == "'$deviceName'" {                        \
      { printf $'$NODESETID_Field' " " }                            \
      if ( $'$DEV_MINOR_Field' != "" ) {                            \
        { printf $'$DEV_MINOR_Field' " " }                          \
      }                                                             \
      else {                                                        \
        { printf "NULL " }                                          \
      }                                                             \
      if ( $'$FS_TYPE_Field' == "'$remotefs'" ) {                   \
        { fsType = "remotefs" }                                     \
        { remoteDevName = $'$REMOTE_DEV_NAME_Field' }               \
      }                                                             \
      else {                                                        \
        { fsType = "localfs" }                                      \
      }                                                             \
      { printf fsType " " }                                         \
    }                                                               \
                                                                    \
    $'$LINE_TYPE_Field'   == "'$SG_ETCFS'"        &&                \
    $'$DEV_NAME_Field'    == "'$deviceName'"      &&                \
    $'$LINE_NUMBER_Field' == "'$MOUNT_POINT_Line'" {                \
      { mountPoint = $'$ETCFS_TEXT_Field'":" }                      \
    }                                                               \
                                                                    \
    $'$LINE_TYPE_Field' == "'$SG_MOUNT'"  &&                        \
    $'$DEV_NAME_Field'  == "'$deviceName'" {                        \
      { printf $'$RW_OPT_Field' }                                   \
      if ( $'$MTIME_OPT_Field' != "" ) {                            \
        { printf ","$'$MTIME_OPT_Field' }                           \
      }                                                             \
      if ( $'$ATIME_OPT_Field' != "" ) {                            \
        { printf ","$'$ATIME_OPT_Field' }                           \
      }                                                             \
      if ( $'$OTHER_OPT_Field' != "" ) {                            \
        { printf ","$'$OTHER_OPT_Field' }                           \
      }                                                             \
      if ( $'$QUOTA_OPT_Field' != "" ) {                            \
        { printf ",quota="$'$QUOTA_OPT_Field' }                     \
      }                                                             \
      if ( fsType == "remotefs" ) {                                 \
        { printf ",dev="$'$NODESETID_Field'":"remoteDevName }       \
        { printf ",ldev='$deviceName'" }                            \
      }                                                             \
      { next }                                                      \
    }                                                               \
                                                                    \
    $'$LINE_TYPE_Field' == "'$SG_DISKS'"  &&                        \
    $'$DEV_NAME_Field'  == "'$deviceName'" {                        \
      if ( $'$EXCLUDE_Field' == "'$excludedDisk'") {                \
        { exclOption = ",exclDisks=yes" }                           \
      }                                                             \
      else if ( $'$DISK_STATUS_Field' == "" ||                      \
                $'$DISK_STATUS_Field' == "ready" ) {                \
        { diskType = $'$DISK_TYPE_Field' }                          \
        # Map certain disk types to the default GPFS driver type.   \
        if ( diskType == "lv" || diskType == "vsd" ) {              \
          { diskType = "" }                                         \
        }                                                           \
        if ( n == 0 ) {                                             \
          { printf ",disks="$'$DISK_NAME_Field'":"diskType }        \
          { n = n + 1 }                                             \
          { next }                                                  \
        }                                                           \
        else if ( n < 50 ) {                                        \
          { printf ";"$'$DISK_NAME_Field'":"diskType }              \
          { n = n + 1 }                                             \
          { next }                                                  \
        }                                                           \
        else {                                                      \
          { exit }                                                  \
        }                                                           \
      }                                                             \
    }                                                               \
                                                                    \
    END {                                                           \
      if ( fsType == "" ) {                                         \
        # Filesystem not found.  Return ENODEV.                     \
        { exit '$MM_FsNotFound' }                                   \
      }                                                             \
      else {                                                        \
        if ( exclOption != "" ) {                                   \
          { printf exclOption }                                     \
        }                                                           \
        # Filesystem found.  Add the mount point to the output.     \
        { printf " " mountPoint "\n" }                              \
        { exit  0 }                                                 \
      }                                                             \
    }                                                               \
  ' $mmsdrfsFile)
  rc=$?
  if [[ $rc -ne 0 || -z $fsLine ]]
  then
    [[ $rc -eq  0 ]] && rc=1
    if [[ $rc -eq $MM_FsNotFound ]]
    then
      # File system not found.
      printErrorMsg 288 $mmcmd "$device"
    else
      # Unexpected error.
      checkForErrors awk $rc
    fi
    return $rc
  fi

  # Parse the result.
  set -f ; set -- $fsLine ; set +f
  fsHomeCluster=$1
  minorNumber=$2
  fsType=$3
  efOptions=$4
  mountPoint=$5

  # Exit with a message if invoked for a remote file system.
  if [[ $fsType = remotefs ]]
  then
    # Command is not allowed for remote file systems.
    printErrorMsg 106 $mmcmd "$device" $fsHomeCluster
    return $MM_Remotefs
  fi

  # Make sure the /dev entry looks good.  Do this only if the
  # checkMountPointOpt parameter is set to "doMountPointCheck".
  if [[ $checkMountPointOpt = "doMountPointCheck" || $checkMountPointOpt = "yes" ]]
  then
    [[ $osName = Linux ]] && checkVfsNumber
    confirmMajorMinor mount /dev/$deviceName $neededMajorNumber $minorNumber
    rc=$?
    if [[ $rc -ne 0 ]]
    then
      # /dev entry not created by GPFS.
      printErrorMsg 462 "$mmcmd" "/dev/$deviceName"
      # Pretend everything is OK and let the daemon
      # decide whether it can mount the file system.
      rc=0
    fi
  fi  # end of if [[ $checkMountPointOpt = "doMountPointCheck" ]]

  # Ensure the output does not overrun the daemon input buffers.
  # If necessary, take out one or more disks from the efOptions string.
  while [[ ${#efOptions} -gt 1023 ]]
  do
    efOptions="${efOptions%;*}"
  done

  # Put out the result and return.
  print -- "$efOptions"
  print -- "$mountPoint"
  return $rc

}  #----- end of function getEFOptions --------------------------


#####################################################################
#
# Function:  Retrieve the mount point for a file system.
#
# Input:     $1 - file system device name
#
# Output:    The mount point for the file system.
#
# Returns:   0 - mount point generated successfully
#            19 (ENODEV) - file system not found
#            22 (EINVAL) - device name is not valid
#            1 - some other unexpected error
#
#####################################################################
function getMountPoint  # <device>
{
  typeset sourceFile="mmcommon.sh"
  [[ -n $DEBUG || -n $DEBUGgetMountPoint ]] && set -x
  $mmTRACE_ENTER "$*"
  typeset device=$1
  # typeset checkMountPointOpt=$2

  typeset rc=0
  typeset mountPoint deviceName

  [[ ! -f $mmsdrfsFile ]] &&  \
    checkForErrors "getMountPoint - Missing /var/mmfs/gen/mmsdrfs" 1

  # Strip away any /dev/ prefix from the device id.
  deviceName=${device##+(/)dev+(/)}

  # Verify the device name.
  if [[ $deviceName = /* ]]
  then
    printErrorMsg 169 $mmcmd "$device"
    rc=$MM_InvalidName
    return $rc
  elif [[ $deviceName = */* ]]
  then
    printErrorMsg 170 $mmcmd "$device"
    rc=$MM_InvalidName
    return $rc
  fi

  # Note:  We will assume that at the point this function is invoked,
  #        the /etc/filesystems information in general, and the mount
  #        point in particular, are up-to date. Therefore, we will skip
  #        the call to gpfsInit.  If this assumption turns out to be
  #        inaccurate, the sections of code that deal with gpfsInit
  #        and confirmMajorMinor will have to activated.

  # Make sure the mmsdrfs file is current.  To avoid undesired side effects,
  # tell gpfsInit to restrict mount point checking to only the file system
  # that is subject to the current getMountPoint call.  This is done only if
  # the checkMountPointOpt parameter is set to "doMountPointCheck" or "yes".
  # [[ $checkMountPointOpt = "doMountPointCheck" || $checkMountPointOpt = "yes" ]] && \
  #   export MOUNT_POINT_CHECK="$deviceName"
  # gpfsInit nolock >/dev/null
  # checkForErrors gpfsInit $?
  # unset MOUNT_POINT_CHECK

  # The script looks for the first SG_ETCFS line for the
  # file system and prints the value of the mount point.
  # If the line was found, the script returns 0.
  # Otherwise, the return code is set to 19 (ENODEV).
  mountPoint=$($awk -F: '                             \
    $'$LINE_TYPE_Field'   == "'$SG_ETCFS'"        &&  \
    $'$DEV_NAME_Field'    == "'$deviceName'"      &&  \
    $'$LINE_NUMBER_Field' == "'$MOUNT_POINT_Line'" {  \
      { mountPoint = $'$ETCFS_TEXT_Field'":" }        \
      { exit }                                        \
    }                                                 \
    END {                                             \
      if ( mountPoint == "" ) {                       \
        # Filesystem not found.  Return ENODEV.       \
        { exit '$MM_FsNotFound' }                     \
      }                                               \
      else {                                          \
        # Filesystem found.  Return the mount point.  \
        { print mountPoint }                          \
        { exit  0 }                                   \
      }                                               \
    }                                                 \
  ' $mmsdrfsFile)
  rc=$?
  if [[ $rc -ne 0 || -z $mountPoint ]]
  then
    [[ $rc -eq  0 ]] && rc=1
    if [[ $rc -eq $MM_FsNotFound ]]
    then
      # File system not found.
      printErrorMsg 288 $mmcmd "$device"
    else
      # Unexpected error.
      checkForErrors awk $rc
    fi
    return $rc
  fi

  # Make sure the /dev entry looks good.  Do this only if the
  # checkMountPointOpt parameter is set to "doMountPointCheck".
  # if [[ $checkMountPointOpt = "doMountPointCheck" || $checkMountPointOpt = "yes" ]]
  # then
  #   [[ $osName = Linux ]] && checkVfsNumber
  #   confirmMajorMinor mount /dev/$deviceName $neededMajorNumber $minorNumber
  #   rc=$?
  #   if [[ $rc -ne 0 ]]
  #   then
  #     # /dev entry not created by GPFS.
  #     printErrorMsg 462 "$mmcmd" "/dev/$deviceName"
  #     # Pretend everything is OK and let the daemon
  #     # decide whether it can mount the file system.
  #     rc=0
  #   fi
  # fi  # end of if [[ $checkMountPointOpt = "doMountPointCheck" ]]

  # Put out the result and return.
  print -- "$mountPoint"
  return $rc

}  #----- end of function getMountPoint -------------------------


#####################################################################
#
# Function:  This function is called by the mmfsd daemon during
#            initialization, before it runs recovery for any VFS
#            mount points that may already be mounted.  The only
#            thing that this function presently does is to invoke
#            the /var/mmfs/etc/gpfsready user exit.
#
# Note:      The daemon will wait until this script returns.
#            Do not invoke long running commands synchronously
#            from this function.  Specifically, do not start
#            mount commands here and do not do anything that
#            assumes a mounted GPFS file system.
#
# Input:     None
#
# Output:    None
#
# Returns:   0 - no errors encountered
#            1 - unexpected error
#
#####################################################################
function gpfsready   #
{
  typeset sourceFile="mmcommon.sh"
  [[ -n $DEBUG || -n $DEBUGgpfsready ]] && set -x
  $mmTRACE_ENTER "$*"

  print "$(date): mmcommon gpfsready invoked"

  #####################################################
  # Invoke the user exit, if it is properly installed.
  #####################################################
  if [[ -x ${mmfscfgDir}gpfsready ]]
  then
    print "$(date): ${mmfscfgDir}gpfsready invoked"
    ${mmfscfgDir}gpfsready
  fi

  return 0

}  #----- end of function gpfsready -------------------------


#####################################################################
#
# Function:  This function is called by the mmfsd daemon when it is
#            up and ready for sessions.  It mounts all file systems
#            that have to be mounted at daemon startup time and then
#            invokes the /var/mmfs/etc/mmfsup user exit.
#
# Input:     $1 - IP address of this node
#            $2 - IP address of the config manager node
#
# Output:    None
#
# Returns:   0 - no errors encountered
#            1 - unexpected error
#
#####################################################################
function mmfsup   # <localIPaddr> <ccMgrIPaddr>
{
  typeset sourceFile="mmcommon.sh"
  [[ -n $DEBUG || -n $DEBUGmmfsup ]] && set -x
  $mmTRACE_ENTER "$*"
  typeset localIPaddr=$1
  typeset ccMgrIPaddr=$2

  typeset mountedFileSystems device mountOptions

  print "$(date): mmcommon mmfsup invoked"

  ##############################################################
  # Mount all GPFS file systems for which -A yes was specified.
  # If there are such file systems, their device names are put
  # in file /var/mmfs/gen/automount.  The device names in this
  # file are ordered to avoid simultaneous mounting of the same
  # file system from all nodes in the cluster.
  ##############################################################
  if [[ -s $startupMountFile && ! -f $ignoreStartupMount ]]
  then
    # Create a list of the currently mounted file systems on this node.
    if [[ $osName = AIX ]]
    then
      $mount >$tmpfile 2>/dev/null
      mountedFileSystems=$tmpfile
    elif [[ $osName = Linux ]]
    then
      mountedFileSystems=/proc/mounts
    else
      # Should never get here.
      printErrorMsg 171 "$mmcmd" "mmfsup: unsupported OS $osName" 1
      return 1
    fi  # end of if [[ $osName = AIX ]]

    # Process the list of file systems.
    exec 3< $startupMountFile
    while read -u3 device
    do
      # If file ignoreStartupMount exists for this file system,
      # do not attempt to mount it.
      [[ -s ${ignoreStartupMount}.${device#/dev/} ]] &&  \
        continue

      # See if there are local override mount options for this file system.
      if [[ -s ${localMountOptions}.${device#/dev/} ]]
      then
        mountOptions="-o $($tail -n -1 ${localMountOptions}.${device#/dev/} 2>/dev/null)"
      else
        mountOptions=""
      fi

      # See if this file system is already mounted.
      $grep -qw $device $tmpfile > /dev/null 2>&1
      if [[ $? -ne 0 ]]
      then
        # The file system is not mounted.  Do it now.
        print -- "$(date): mounting $device"
        $mount $mountOptions $device 2>&1 | $grep -i -v "already mounted"
        print -- "$(date): finished mounting $device"
      fi
    done
  fi  # end of if [[ -s $startupMountFile ]]

  #####################################################
  # Invoke the user exit, if it is properly installed.
  #####################################################
  if [[ -x ${mmfscfgDir}mmfsup ]]
  then
    print "$(date): ${mmfscfgDir}mmfsup invoked"
    ${mmfscfgDir}mmfsup $localIPaddr $ccMgrIPaddr
  elif [[ -x ${mmfscfgDir}mmfsup.scr ]]
  then
    print "$(date): ${mmfscfgDir}mmfsup.scr invoked"
    ${mmfscfgDir}mmfsup.scr $localIPaddr $ccMgrIPaddr
  fi

  return 0

}  #----- end of function mmfsup -------------------------


#####################################################################
#
# Function:  This function is called by the mmfsd daemon during
#            normal and abnormal sutdown processing. A number of
#            cleanup tasks are performed and then the function
#            invokes the /var/mmfs/etc/mmfsdown user exit.
#
# Input:     None
#
# Output:    None
#
# Returns:   0 - no errors encountered
#            1 - unexpected error
#
#####################################################################
function mmfsdown   #
{
  typeset sourceFile="mmcommon.sh"
  [[ -n $DEBUG || -n $DEBUGmmfsdown ]] && set -x
  $mmTRACE_ENTER "$*"

  typeset pid status

  ############################################
  # Determine whether GPFS will be restarted.
  ############################################
  pid=$($ps -eo "pid args" | $awk '/\/runmmfs/ && !/this process/ {print $1}')
  if [[ -n $pid && -f $rerunmmfsFile ]]
  then
    status=active
  else
    status=down
  fi

  print "$(date): mmcommon mmfsdown invoked.  Subsystem: mmfs  Status: $status"
  ########################
  # Do the cleanup tasks.
  ########################
  if [[ $status = active ]]
  then
    #---------------------------------------------
    # The GPFS daemon died but will be restarted.
    #---------------------------------------------
    if [[ -n "$MMTRACE" ]]
    then
      # If the MMTRACE environment variable is set,
      # stop tracing on this node.
      $mmtrace stop

      if [[ $MMTRACE = global ]]
      then
        # If tracing was started on all nodes, cut trace
        # records on all of the nodes in the cluster.
        getNodeList $REL_HOSTNAME_Field $GLOBAL_ID $mmsdrfsFile > $nodefile
        $mmcommon onall $nodefile $unreachedNodes adminCmd mmtrace
      fi
    fi  # end of if [[ -n "$MMTRACE" ]]

  else
    #-----------------------------------
    # The GPFS daemon is shutting down.
    #-----------------------------------
    if [[ -n "$MMTRACE" ]]
    then
      # If the MMTRACE environment variable is set,
      # stop tracing on this node.
      $mmtrace stop
    fi

    # Force unmount of all gpfs file systems.
    printInfoMsg 426 "$(date)" $mmcmd
    if [[ $osName = AIX ]]
    then
      unmountFileSystems all -f 2>/dev/null
    elif [[ $osName = Linux ]]
    then
      # Note: Do not redirect stderr on Linux umount.
      unmountFileSystems all -f
    else
      # Should never get here.
      printErrorMsg 171 "$mmcmd" "mmfsdown: unsupported OS $osName" 1
      return 1
    fi

    # Remove the respawn file since the daemon will not be restarted.
    $rm -f $respawnlog

  fi  # end of if [[ $status = active ]]

  #####################################################
  # Invoke the user exit, if it is properly installed.
  #####################################################
  if [[ -x ${mmfscfgDir}mmfsdown ]]
  then
    print "$(date): ${mmfscfgDir}mmfsdown invoked"
    ${mmfscfgDir}mmfsdown $status
  elif [[ -x ${mmfscfgDir}mmfsdown.scr ]]
  then
    print "$(date): ${mmfscfgDir}mmfsdown.scr invoked"
    ${mmfscfgDir}mmfsdown.scr $status
  fi

  return 0

}  #----- end of function mmfsdown -------------------------


#####################################################################
#
# Function:  This function is called prior to unmounting a file system
#            in response to an explicit mmumount or mmshutdown command.
#            The function is also invoked by the mmfsd daemon during
#            normal and abnormal shutdown processing.
#
# Input:     $1 - file system device name or all
#            $2 - reason: umount or cleanup
#
# Output:    None
#
# Returns:   0 - no errors encountered
#            1 - unexpected error
#
#####################################################################
function preunmount   # <device> <reason>
{
  typeset sourceFile="mmcommon.sh"
  [[ -n $DEBUG || -n $DEBUGpreunmount ]] && set -x
  $mmTRACE_ENTER "$*"
  typeset device=$1
  typeset reason=$2

  typeset timeout=5
  typeset waited=0
  typeset retrydelay=1
  typeset pid


  ###################################################################
  # If the reason for the call is daemon cleanup, determine if this
  # is a normal or abnormal shutdown.  If normal shutdown, the exit
  # must have already been invoked by mmremote shutdownDaemon.
  ###################################################################
  if [[ -x ${mmfscfgDir}preunmount ]]
  then
    print "$(date): mmcommon preunmount invoked.  File system: $device  Reason: $reason"
  fi
  if [[ $reason = cleanup ]]
  then
    # Determine whether GPFS will be restarted.
    pid=$($ps -eo "pid args" | $awk '/\/runmmfs/ && !/this process/ {print $1}')
    if [[ -n $pid && -f $rerunmmfsFile ]]
    then
      # This is abnormal shutdown; the daemon will be restarted.
      :  # keep going with the rest of the code.
    else
      # This is normal shutdown; the daemon will not be restarted.
      # This function must have already been invoked by mmremote shutdownDaemon.
      return 0
    fi
  fi  # end of if [[ $reason = cleanup ]]

  #############################################################
  # Invoke the user exit, if it is properly installed.
  # Allow no more than 5 seconds for the user code to finish.
  # If the user process does not return within this time,
  # the function will return.  This is intended to prevent
  # GPFS recovery from hanging.
  #############################################################
  if [[ -x ${mmfscfgDir}preunmount ]]
  then
    print "$(date): ${mmfscfgDir}preunmount invoked"
    ${mmfscfgDir}preunmount $device $reason &
    pid=$!

    # Wait until the user exit finishes or the timeout expires.
    while [[ $waited -lt $timeout ]]
    do
      $sleep $retrydelay
      (( waited = waited + retrydelay ))

      if ! kill -0 $pid 2>/dev/null
      then
        # The user process finished.
        waited=-1
        break
      fi
    done  # while [[ $waited -lt $timeout ]]

    # Inform the user if the exit did not finish in the alloted time.
    if [[ $waited -gt 0 ]]
    then
      print "$(date): ${mmfscfgDir}preunmount not finished after $waited seconds."
      print "    The process is left to run in the background."
    fi
  fi  # end of if [[ -x ${mmfscfgDir}preunmount ]]

  return 0

}  #------------ end of function preunmount -------------------------


#####################################################################
#
# Function:  This function is called by the mmfsd daemon during
#            node failure recovery before any distributed locks
#            held by failed nodes are released.  This script is
#            invoked once for each file system on all nodes on
#            which the file system is mounted or otherwise in use.
#            The only thing that this function presently does is
#            to invoke the /var/mmfs/etc/gpfsrecover user exit.
#
# Note:      The daemon will wait until this script returns.
#            Do not invoke long running commands synchronously
#            from this function.  Specifically, do not start
#            mount commands here and do not attempt to access
#            any GPFS files or directories.
#
# Input:     $1 - file system name
#            $2 - recovery phase (always 0)
#            $3, $4, ... - node numbers of the failed nodes.
#
# Output:    None
#
# Returns:   0 - no errors encountered
#            1 - unexpected error
#
#####################################################################
function gpfsrecover   #
{
  typeset sourceFile="mmcommon.sh"
  [[ -n $DEBUG || -n $DEBUGgpfsrecover ]] && set -x
  $mmTRACE_ENTER "$*"
  typeset fsname=$1
  typeset phase=$2
  shift 2
  typeset failedNodes=$*

  print "$(date): mmcommon gpfsrecover invoked:"
  print "    fsname=$fsname  phase=$phase  failed nodes=\"$failedNodes\""

  #####################################################
  # Invoke the user exit, if it is properly installed.
  #####################################################
  if [[ -x ${mmfscfgDir}gpfsrecover ]]
  then
    print "$(date): ${mmfscfgDir}gpfsrecover invoked"
    ${mmfscfgDir}gpfsrecover $fsname $phase $failedNodes
  elif [[ -x ${mmfscfgDir}gpfsrecover.scr ]]
  then
    print "$(date): ${mmfscfgDir}gpfsrecover.scr invoked"
    ${mmfscfgDir}gpfsrecover.scr $fsname $phase $failedNodes
  fi

  return 0

}  #----- end of function gpfsrecover -------------------------


#####################################################################
#
# Function:  Reconcile the mmsdrfs file with the GPFS daemon's data
#            for the passed filesystem.
#
# Input:     fsname
#
# Output:    The mmsdrfs file is brought into sync with the
#            GPFS daemon's view of the passed filesystem.
#
# Returns:   0 - no errors encountered
#            1 - unexpected error
#
#####################################################################
function recoverfs   # <fsname>
{
  typeset sourceFile="mmcommon.sh"
  [[ -n $DEBUG || -n $DEBUGrecoverfs ]] && set -x
  $mmTRACE_ENTER "$*"

  typeset fsname=$1
  typeset nodeCount versionLine rc oddState
  typeset findFSoutput fqDeviceName deviceName fsHomeCluster
  integer newGenNumber

  # Set up trap exception handling and call the gpfsInit function.
  # It will ensure that the local copy of the mmsdrfs and the rest of the
  # GPFS system files are up-to-date and it will also obtain the sdr lock.
  trap pretrap HUP INT QUIT KILL
  gpfsInitOutput=$(gpfsInit $lockId)
  setGlobalVar $? $gpfsInitOutput

  findFSoutput=$(findFS "$fsname" $mmsdrfsFile)
  [[ -z $findFSoutput ]] && cleanupAndExit

  # Parse the output from the findFS function.
  set -f ; set -- $findFSoutput ; set +f
  fqDeviceName=$1
  deviceName=$2
  fsHomeCluster=$3

  # Exit with a message if the command was invoked for a remote file system.
  if [[ $fsHomeCluster != $HOME_CLUSTER ]]
  then
    # Command is not allowed for remote file systems.
    printErrorMsg 106 $mmcmd $fsname $fsHomeCluster
    cleanupAndExit
  fi

  # Create a file with the reliable names of the nodes that belong to
  # the nodeset to which the specified file system belongs.
  $rm -f $nodefile
  nodeCount=$(getNodeFile $REL_HOSTNAME_Field $fsHomeCluster $mmsdrfsFile $nodefile)
  if [[ $nodeCount -eq 0 ]]
  then
    # The nodeset is empty; there is nobody to run the command.
    printErrorMsg 171 $mmcmd "getNodeFile (nodeCount=0)" 1
    cleanupAndExit
  fi

  # Copy the sdrfs file to a temporary file.
  $cp $mmsdrfsFile $tmpsdrfs

  # Reconcile the sdrfs file with the GPFS daemon's view of the filesystem.
  oddState=$(reconcileSdrfsWithDaemon $deviceName $tmpsdrfs)
  rc=$?
  if [[ $rc -ne 0 ]]
  then
    # reconcileSdrfsWithDaemon failed.
    printErrorMsg 171 $mmcmd reconcileSdrfsWithDaemon $rc
    cleanupAndExit
  fi

  # Obtain the generation number from the version line of the new sdrfs file.
  versionLine=$($head -1 $tmpsdrfs)
  IFS=':'
  set -f ; set -- $versionLine ; set +f
  newGenNumber=$6
  IFS="$IFS_sv"

  # Commit the new mmsdrfs file.
  trap "" HUP INT QUIT KILL    # Disable interrupts until the commit is done.
  gpfsObjectInfo=$(commitChanges  \
    $GLOBAL_ID $fsHomeCluster $gpfsObjectInfo $newGenNumber $tmpsdrfs $primaryServer)
  rc=$?
  if [[ $rc -ne 0 ]]
  then
    # The commit step failed.
    printErrorMsg 381 $mmcmd
    # Tell the user what to do.
    printErrorMsg 190 $mmcmd $fsname $fsname
    cleanupAndExit
  fi

  # Unlock the sdr.
  [[ $sdrLocked = yes ]] &&  \
    freeLockOnServer $primaryServer $ourNodeNumber > /dev/null
  sdrLocked=no
  trap posttrap HUP INT QUIT KILL   # Enable interrupts again.

  # Propagate the changes to all affected nodes.
  propagateSdrfsFile async $nodefile $mmsdrfsFile $newGenNumber

  # If installed, invoke the syncfsconfig user exit.
  if [[ -x $syncfsconfig ]]
  then
     print -- "$mmcmd:  Starting $syncfsconfig ..."
     $syncfsconfig
     print -- "$mmcmd:  $syncfsconfig finished."
  fi

  cleanupAndExit 0

}  #----- end of function recoverfs ------------------------------


#####################################################################
#
# Function:  Reset the starting integer for NSD names generation.
#            This is a service-only command.
#
# Input:     $1 - nsdBaseNumber
#
# Output:    None
#
# Returns:   0 - no errors encountered
#            1 - unexpected error
#
#####################################################################
function resetNsdNumber    #  <nsdBaseNumber>
{
  typeset sourceFile="mmcommon.sh"
  [[ -n $DEBUG || -n $DEBUGresetNsdNumber ]] && set -x
  $mmTRACE_ENTER "$*"
  typeset nsdBaseNumber=$1

  typeset gpfsInitOutput sdrfsLine newGenNumber
  typeset rc

  #######################################################################
  # Set up trap exception handling and call the gpfsInit function.
  # It will ensure that the local copy of the mmsdrfs and the rest of
  # the GPFS system files are up-to-date and will obtain the sdr lock.
  #######################################################################
  trap pretrap HUP INT QUIT KILL
  gpfsInitOutput=$(gpfsInit $lockId)
  setGlobalVar $? $gpfsInitOutput

  ########################################################################
  # Go through the current mmsdrfs file.  Increment the generation
  # number and build the node name lists that will be needed later.
  # Set HIGHEST_GPFS_DISK_NBR_Field to the specified value.
  ########################################################################
  $rm -f $newsdrfs $nodefile
  IFS=":"
  exec 3<&-
  exec 3< $mmsdrfsFile
  while read -u3 sdrfsLine
  do
    # Parse the line.
    set -f ; set -A v -- - $sdrfsLine ; set +f
    IFS="$IFS_sv"

    case ${v[$LINE_TYPE_Field]} in

      $VERSION_LINE )  # this is the global header line
        # Increment the generation number
        newGenNumber=${v[$SDRFS_GENNUM_Field]}+1
        v[$SDRFS_GENNUM_Field]=$newGenNumber
        v[$HIGHEST_GPFS_DISK_NBR_Field]=$nsdBaseNumber
        ;;

      $MEMBER_NODE )   # this line describes a node
        # Collect the reliable names of all nodes in the cluster.
        print -- "${v[$REL_HOSTNAME_Field]}" >> $nodefile
        checkForErrors "writing to file $nodefile" $?
        ;;

      $SG_DISKS )  # this line describes a disk
        print -u2 "$mmcmd: There are disks in the cluster."
        print -u2 "$mmcmd: The base number for naming disks cannot be changed."
        cleanupAndExit
        ;;

      * )  # We are not interested in any other lines.
        ;;

    esac  # end Change some of the fields

    # Build and write the line to the new mmsdrfs file.
    print_newLine >> $newsdrfs
    checkForErrors "writing to file $newsdrfs" $?

    IFS=":"  # Change the separator back to ":" for the next iteration.

  done  # end while read -u3

  IFS="$IFS_sv"  # Restore the default IFS settings.

  #######################
  # Commit the changes.
  #######################
  trap "" HUP INT QUIT KILL
  gpfsObjectInfo=$(commitChanges  \
     $nsId $nsId $gpfsObjectInfo $newGenNumber $newsdrfs $primaryServer)
  rc=$?
  if [[ $rc -ne 0 ]]
  then
    # The commit step failed
    printErrorMsg 381 $mmcmd
    cleanupAndExit
  fi

  ##################
  # Unlock the sdr.
  ##################
  [[ $sdrLocked = yes ]] &&  \
    freeLockOnServer $primaryServer $ourNodeNumber > /dev/null
  sdrLocked=no
  trap posttrap HUP INT QUIT KILL

  ###############################################
  # Propagate the changes to all affected nodes.
  ###############################################
  propagateSdrfsFile async $nodefile $newsdrfs $newGenNumber

  cleanupAndExit 0

}  #----- end of function resetNsdNumber --------------------------


######################################################################
#
# Function:  Save the specified file on both the primary and backup
#            server nodes.  Used in conjunction with tspreparedisk to
#            create backup copy of the SG descriptor being migrated.
#            The information is stored in /var/mmfs/tmp/mmimportfs
#            The file name has the following format:
#            tspreparedisk.diskDesc.<diskName>.<checksum>.<timestamp>
#
# Input:     $1 - file name with the following format:
#                 /var/mmfs/tmp/tspreparedisk.diskDesc.<diskName>
#
# Output:    None
#
# Returns:   0 - no errors encountered
#            non-zero - error encounterred
#
######################################################################
function saveSGDescFile    #  <sgDescFile>
{
  typeset sourceFile="mmcommon.sh"
  [[ -n $DEBUG || -n $DEBUGsaveSGDescFile ]] && set -x
  $mmTRACE_ENTER "$*"
  typeset sgDescFile=$1

  typeset sumOutput checksum timeStamp targetName serverNode
  typeset mvResult keyword status errorCode

  if [[ $MMMODE != lc && $MMMODE != single ]]
  then
    # Command not supported.
    printErrorMsg 376 "$mmcmd:saveSGDescFile" $MMMODE
    cleanupAndExit
  fi

  # Return ENOENT if file is not there or if it is empty.
  [[ ! -s $sgDescFile ]] &&  \
    return 2

  # Calculate the checksum for the source file.
  sumOutput=$($sum $sgDescFile)
  checkForErrors "sum $sgDescFile" $?
  set -f ; set -- $sumOutput ; set +f
  checksum=$1

  # Get a current timestamp.
  timeStamp=$($perl -e 'print time')

  # Generate the name under which the file will be stored
  # on the primary and backup configuration servers.
  targetName=${tmpDir}mmimportfs/${sgDescFile##*/}.${checksum}.${timeStamp}

  # Commit the file to the server nodes.
  [[ -z $primaryServer ]] && determineMode
  for serverNode in $(print -- "$primaryServer $backupServer")
  do
    if [[ $serverNode = $ourNodeName ]]
    then
      mvResult=$($mmremote mvSGDescFile  \
                   $sgDescFile $targetName $checksum $ourNodeName)
      rc=$?
    else
      mvResult=$(run onNode $serverNode mvSGDescFile  \
                   $sgDescFile $targetName $checksum $ourNodeName)
      rc=$?
    fi

    # Parse the result.
    IFS=":"
    set -f ; set -- $mvResult ; set +f
    keyword=$1
    status=$2
    errorCode=$3
    IFS="$IFS_sv"

    if [[ $keyword != mvSGDescFile ]]
    then
      # Unexpected error from mmremote or mmdsh.
      printErrorMsg 171 $mmcmd "mmremote mvSGDescFile" 1
      return 1
    fi

    if [[ $status != success ]]
    then
      if [[ $errorCode = copyfile || $errorCode = checksum ]]
      then
        # Error retrieving data from client.
        printErrorMsg 379 $mmcmd $ourNodeName $serverNode
      else
        # Unexpected error from mmremote or mmdsh.
        printErrorMsg 171 $mmcmd "mmremote mvSGDescFile" 1
      fi
      return 1
    fi  # end of if [[ $status != success ]]

  done # for serverNode in $(print -- "$primaryServer $backupServer")


  # Everything seems to have worked fine.
  # Remove the source file and return.
  $rm -f $sgDescFile
  return 0

}  #----- end of function saveSGDescFile -------------------------



#######################
# Mainline processing
#######################

kword=$arg1
kword_lc=$arg1
arg3_lc=$arg3

if [[ -z $kword ]]
then
  # Missing keyword
  printErrorMsg 133 mmcommon NULL
  cleanupAndExit
fi

# Set up silent trap exception handling.
trap pretrap3 HUP INT QUIT KILL

# Determine the execution environment and set needed global variables.
if [[ $arg3_lc  = checknewclusternode* ||
      $arg3_lc  = removefromclustercr  ||
      $kword_lc = getdiskdevices       ]]
then
  # If the node is not yet a member of the GPFS cluster,
  # the functions to determine the local data do not work.
  [[ -z $ourNodeName ]] && ourNodeName=$($hostname)
else
  # In all other cases, file mmsdrfs should already exist
  # and we can use it as a starting point.
  [[ -z $MMMODE || -z $environmentType ]] && determineMode
  getLocalNodeData
fi

# Make sure we have the proper credentials.
[[ $getCredCalled = no ]] && getCred

# Reset the remote commands if necessary.
[[ -n $GPFS_rshPath ]] && rsh=$GPFS_rshPath
[[ -n $GPFS_rcpPath ]] && rcp=$GPFS_rcpPath

# Perform the action requested by the keyword.
case $kword_lc in

                   #----------------------------------------
  init)            # mmcommon init <lockid | nolock>
                   #----------------------------------------
    if [[ $argc -ne 2 ]]
    then
      operands="<lockid | nolock>"
      printErrorMsg 260 mmcommon $kword "$operands"
      cleanupAndExit
    fi

    # Update local system files; lock the sdr if requested.
    gpfsInit $arg2
    rc=$?
    ;;

                     #----------------------------------------
  getlocalnodename)  # mmcommon getLocalNodeName
                     #----------------------------------------
    print -- "$ourNodeName"
    rc=0
    ;;

                     #----------------------------------------
  getclusternodes)   # mmcommon getClusterNodes
                     #----------------------------------------
    # Return the reliable hostnames of all nodes in the cluster.
    getNodeList $REL_HOSTNAME_Field $GLOBAL_ID $mmsdrfsFile
    rc=$?
    ;;

                         #----------------------------------------
  getnodedatafordaemon)  # mmcommon getNodeDataForDaemon <clType>
                         #----------------------------------------
    # Set MMMODE from the passed cluster type parameter.
    [[ -n $arg2 ]] && export MMMODE=$arg2

    # Get the data from the mmsdrfs file.
    getNodeDataFromSdrfs $sdrNodeFile "."
    rc=$?

    # Return the result to the caller via standard output.
    [[ $rc -eq 0 ]] && $cat $sdrNodeFile
    ;;

               #----------------------------------------
  getnodelist) # mmcommon getNodeList [norefresh]
               #----------------------------------------
    # Get the data from the mmsdrfs file.
    getNodeListFromSdrfs $sdrNodeFile $arg2
    rc=$?

    # Return the result to the caller via standard output.
    [[ $rc -eq 0 ]] && $cat $sdrNodeFile
    ;;

                  #------------------------
  getdiskdevices) # mmcommon getDiskDevices
                  #------------------------
    # Generate a list of all disk devices known to this node.
    getDiskDevices
    rc=$?
    ;;

                         #---------------------------------------
  getvsdrpdnodedata)     # mmcommon getVsdRpdNodeData <filename>
                         #---------------------------------------
    if [[ $argc -ne 2 ]]
    then
      operands="<filename>"
      printErrorMsg 260 mmcommon $kword "$operands"
      cleanupAndExit
    fi

    # Obtain node-related data from the peer domain.
    getVsdRpdNodeData $arg2
    rc=$?
    ;;

                   #----------------------------------------
  getvsddata)      # mmcommon getVSDdata <filename>
                   #----------------------------------------
    if [[ $argc -ne 2 ]]
    then
      operands="<filename>"
      printErrorMsg 260 mmcommon $kword "$operands"
      cleanupAndExit
    fi

    # Call routine to extract vsd information based on
    # which type of VSD code (PSSP or RSCT) is in use.
    vsdPath=$(LC_ALL=C $ls -l $vsdatalst 2>/dev/null)
    if [[ $vsdPath = *${vsdatalstRSCT}* && -x $vsdatalstRSCT ]]
    then
      # Extract VSD-related data from the RPD.
      getVSDdataRPD $arg2
      rc=$?
    elif [[ $vsdPath = *${vsdatalstPSSP}* && -x $vsdatalstPSSP ]]
    then
      # Extract VSD-related data from the SDR.
      getVSDdataSDR $arg2
      rc=$?
    else
      printErrorMsg 320 "getVSDdata"
      cleanupAndExit
    fi
    ;;

                          #------------------------------------
  getvsdnodedata)         # mmcommon getVsdNodeData <filename>
                          #------------------------------------
    if [[ $argc -ne 2 ]]
    then
      operands="<filename>"
      printErrorMsg 260 mmcommon $kword "$operands"
      cleanupAndExit
    fi

    # Call routine to extract vsd information based on
    # which type of VSD code (PSSP or RSCT) is in use.
    vsdPath=$(LC_ALL=C $ls -l $vsdatalst 2>/dev/null)
    if [[ $vsdPath = *${vsdatalstRSCT}* && -x $vsdatalstRSCT ]]
    then
      # Extract VSD-related data from the RPD.
      getVsdNodeDataRPD $arg2
      rc=$?
    elif [[ $vsdPath = *${vsdatalstPSSP}* && -x $vsdatalstPSSP ]]
    then
      # Extract VSD-related data from the SDR.
      getVsdNodeDataSDR $arg2
      rc=$?
    else
      printErrorMsg 320 "getVsdNodeData"
      cleanupAndExit
    fi
    ;;

                     #----------------------------------------
  getvsdnodenumbers) # mmcommon getVsdNodeNumbers
                     #----------------------------------------
    # Create mapping of the GPFS to VSD node numbers.
    getVsdNodeNumbers
    rc=$?
    ;;

                   #---------------------------------------------------------
  checkauth)       # mmcommon checkAuth <clusterName> <sgDevice> [norefresh]
                   #---------------------------------------------------------
    if [[ $argc -lt 3 ]]
    then
      operands="<clusterName> <deviceName> [norefresh]"
      printErrorMsg 260 mmcommon $kword "$operands"
      cleanupAndExit
    fi

    # Check the authorization.
    checkAuth $arg2 "$arg3" $arg4
    rc=$?
    ;;

                   #--------------------
  gpfsready)       # mmcommon gpfsready
                   #--------------------
    # Invoke the gpfsready exit.
    gpfsready
    rc=$?
    ;;

                   #-----------------
  mmfsup)          # mmcommon mmfsup
                   #-----------------
    # Invoke the mmfsup exit.
    mmfsup $arg2 $arg3
    rc=$?
    ;;

                   #-------------------
  mmfsdown)        # mmcommon mmfsdown
                   #-------------------
    # Invoke the mmfsdown exit.
    mmfsdown
    rc=$?
    ;;

                    #---------------------------------------
  preunmount)       # mmcommon preunmount <device> <reason>
                    #---------------------------------------
    if [[ $argc -lt 3 ]]
    then
      operands="<deviceName|all> <reason>"
      printErrorMsg 260 mmcommon $kword "$operands"
      cleanupAndExit
    fi

    # Invoke the preunmount exit.
    preunmount $arg2 $arg3
    rc=$?
    ;;

                   #----------------------------------------------------
  gpfsrecover)     # mmcommon gpfsrecover <sgDevice> <phase> <nodeList>
                   #----------------------------------------------------
    if [[ $argc -lt 4 ]]
    then
      operands="<deviceName> <pahse> <nodeNumber> [<nodeNumber> ... ]"
      printErrorMsg 260 mmcommon $kword "$operands"
      cleanupAndExit
    fi

    # Shift past the positional parameters and get the node list.
    shift 3
    nodeList=$*

    # Invoke the gpfsrecover exit.
    gpfsrecover $arg2 $arg3 $nodeList
    rc=$?
    ;;

                   #-------------------------------------------------------
  getefoptions)    # mmcommon getEFOptions <sgDevice> <checkMountPointOpt>
                   #-------------------------------------------------------
    if [[ $argc -ne 3 ]]
    then
      operands="<deviceName> <checkMountPointOpt>"
      printErrorMsg 260 mmcommon $kword "$operands"
      cleanupAndExit
    fi

    # Generate the options line.
    getEFOptions "$arg2" $arg3
    rc=$?
    ;;

                   #------------------------------------
  getmountpoint)   # mmcommon getMountPoint <sgDevice>
                   #------------------------------------
    if [[ $argc -ne 2 ]]
    then
      operands="<deviceName>"
      printErrorMsg 260 mmcommon $kword "$operands"
      cleanupAndExit
    fi

    # Generate the options line.
    getMountPoint "$arg2" $arg3
    rc=$?
    ;;

                   #------------------------------------------
  getcontactnodes) # mmcommon getContactNodes <remoteCluster>
                   #------------------------------------------
    if [[ $argc -ne 2 ]]
    then
      operands="<remoteCluster>"
      printErrorMsg 260 mmcommon $kword "$operands"
      cleanupAndExit
    fi

    # If an user exit is installed, try it first.  If we get
    # a result, consider this to be the definitive answer.
    contactList=""
    if [[ -x $remoteclusternodes ]]
    then
      contactList=$($remoteclusternodes $arg2)
      rc=$?
      # Tell the daemon where does the data come from.
      [[ -n $contactList ]] &&  \
        contactList="${contactList},refreshMethod=1"
    fi

    # If an user exit is not activated, or if it did not return
    # anything, look for the contact nodes in the mmsdrfs file.
    if [[ -z $contactList ]]
    then
      gpfsInit nolock >/dev/null
      checkForErrors gpfsInit $?
      contactList=$($grep -e "^$arg2:$REM_CLUSTER:" $mmsdrfsFile |  \
                              $GETVALUE $CONTACT_NODES_Field)
#     # Tell the daemon where does the data come from.
#     [[ -n $contactList ]] &&  \
#       contactList="${contactList},refreshMethod=0"
    fi

    # Put out the result.
    if [[ -n $contactList ]]
    then
      print -- "$contactList"
      rc=0
    else
      [[ $rc -eq 0 ]] && rc=19  # ENODEV
    fi
    ;;

                   #----------------------------------------
  getfsnames)      # mmcommon getFSNames
                   #----------------------------------------

    # Return the names of the file systems in the nodeset to which
    # this node belongs.  The environment is assumed to be up to date.
    # All errors are ignored.
    #
    # Because this function is used by the daemon and can be invoked
    # during recovery, there should be no locking and/or initializing
    # of the GPFS environment.  Otherwise, there can be a deadlock.
    #
    $awk -F: '                                         \
      BEGIN { nodesetId = "" }                         \
      $'$LINE_TYPE_Field'   == "'$MEMBER_NODE'"   &&   \
      $'$NODE_NUMBER_Field' == "'$ourNodeNumber'"  {   \
        { nodesetId = $'$NODESETID_Field' }            \
        { next }                                       \
      }                                                \
      $'$LINE_TYPE_Field' == "'$SG_HEADR'" &&          \
      $'$NODESETID_Field' == nodesetId {               \
        { print "/dev/"$'$DEV_NAME_Field' }            \
      }                                                \
    ' $mmsdrfsFile  2>/dev/null
    rc=0
    ;;

                     #------------------------------------------
  getquotaenabledfs) # mmcommon getQuotaEnabledFS [<nodesetId>]
                     #------------------------------------------

    # Make sure the mmsdrfs file is current.
    gpfsInitOutput=$(gpfsInit nolock)
    setGlobalVar $? $gpfsInitOutput

    # If the nodeset is set to the global id, clear the variable
    # so that we can look at all SG_MOUNT records in the sdrfs.
    # Otherwise, prepend the '^' char to ensure correct match.
    [[ -n $arg2 ]] && nsId="$arg2"
    [[ $arg2 = $GLOBAL_ID ]] && nsId=""
    [[ -n $nsId ]] && nsId="^$nsId"

    # Put out the names of all file systems that have non-null quota options.
    $awk -F: '                                \
      /'$nsId:$SG_MOUNT:'/ {                  \
        if ( $'$QUOTA_OPT_Field' != "" ) {    \
          { print $'$DEV_NAME_Field' }        \
        }                                     \
      }                                       \
    ' $mmsdrfsFile
    checkForErrors awk $?
    rc=0
    ;;

                     #----------------------------------------
  getdisknames)      # mmcommon getDiskNames [<SGname>] [<nodesetId>]
                     #----------------------------------------

    # Make sure the mmsdrfs file is current.
    gpfsInitOutput=$(gpfsInit nolock)
    setGlobalVar $? $gpfsInitOutput

    # Make sure that the file system name, if specified,
    # is stripped from any /dev/ prefix.
    deviceName=""
    [[ -n $arg2 ]] && deviceName="${arg2##+(/)dev+(/)}:"

    # If the nodeset is set to the global id, clear the variable
    # so that we can look at all SG_DISKS records in the sdrfs.
    # Otherwise, prepend the '^' char to ensure correct match.
    [[ -n $arg3 ]] && nsId="$arg3"
    [[ $arg3 = $GLOBAL_ID ]] && nsId=""
    [[ -n $nsId ]] && nsId="^$nsId"

    # Put out the names of all file system disks.
    $awk -F: '                                                      \
      /'$nsId:$SG_DISKS:$deviceName'/ { print $'$DISK_NAME_Field' } \
    ' $mmsdrfsFile
    checkForErrors awk $?
    rc=0
    ;;

                     #----------------------------------------
  getsgdevice)       # mmcommon getSGDevice <diskname>
                     #----------------------------------------
    # Make sure the mmsdrfs file is current.
    gpfsInitOutput=$(gpfsInit nolock)
    setGlobalVar $? $gpfsInitOutput

    # Find the file system to which the disk belongs.
    findFSforDisk "$arg2" $mmsdrfsFile
    rc=0
    ;;

                     #----------------------------------------
  recoverfs)         # mmcommon recoverfs <fsname>
                     #----------------------------------------
    if [[ $argc -lt 2 ]]
    then
      operands="<fsname>"
      printErrorMsg 260 mmcommon $kword "$operands"
      cleanupAndExit
    fi

    # Make sure the mmsdrfs file matches the GPFS daemon's data
    # for the specified filesystem.
    recoverfs "$arg2"
    rc=$?
    ;;

                     #----------------------------------------
  getnsddata)        # mmcommon getNsdData [<fsname>]
                     #----------------------------------------
    # Make sure the mmsdrfs file is current.
    gpfsInitOutput=$(gpfsInit nolock)
    setGlobalVar $? $gpfsInitOutput

    # Retrieve the NSD data.
    getNsdData $arg2
    rc=0
    ;;

                     #---------------------------------------------------
  restoremmfscfg )   # mmcommon restoreMmfscfg <nodesetId>
                     #---------------------------------------------------
    if [[ $argc -lt 2 ]]
    then
      operands=" <nodesetId> "
      printErrorMsg 260 mmcommon $kword "$operands"
      cleanupAndExit
    fi

    # Retrieve the mmfs.cfg data.
    restoreMmfscfg $arg2
    rc=$?
    ;;

                     #---------------------------------------------------
  resetnsdnumber )   # mmcommon resetNsdNumber <nn>
                     #---------------------------------------------------
    if [[ $argc -lt 2 ]]
    then
      operands=" <nodesetId> "
      printErrorMsg 260 mmcommon $kword "$operands"
      cleanupAndExit
    fi

    # Change the starting number for NSD names.
    resetNsdNumber $arg2
    rc=$?
    ;;

                   #-------------------------------------------------------
  on1 | on1long )  # mmcommon on1 <hostname> <remoteCommand> [<arg ... >]
                   #-------------------------------------------------------
    if [[ $argc -lt 3 ]]
    then
      operands="<hostname> <remoteCommand> [<arg ... >] "
      printErrorMsg 260 mmcommon $kword "$operands"
      cleanupAndExit
    fi

    target=$arg2

    # Shift past the hostname and get the command to execute and its args.
    shift 2
    remoteCommand=$*
    remoteVerb=$1

    if [[ $target = $ourNodeName || $MMMODE = single ]]
    then
      # If we happen to be the target node, run the command locally.
      $mmremote $remoteCommand
      rc=$?

    else
      # Invoke mmdsh to execute command on remote system,
      # suppressing prepending of the hostname to the output lines.
      $rm -f $tmpDir$remoteVerb$$.*
      $mmdsh -svL $target $mmremote onbehalf2 $ourNodeName $remoteVerb$$  \
                  $MMMODE $NO_LINK mmremote $remoteCommand
      rc=$?

      # Determine the return code.  The goal is to pass back the return code
      # from the mmremote command.  Because rsh and different versions of ssh
      # handle the return code differently (e.g., rsh does not propagate
      # it back), mmremote onbehalf creates a special file that has the
      # return code appended at the end of its name.  If we do not see this
      # file on our side, we assume that mmdsh returned the return code from
      # the remote command.  Although this is not necessarily always the case
      # (there could have been a problem with the touch command that creates
      # the special file) it is the best we can do under the circumstances.
      rcInfo=$($ls $tmpDir$remoteVerb$$.* 2> /dev/null)
      $rm -f $tmpDir$remoteVerb$$.*
      if [[ -n $rcInfo ]]
      then
        # The return code was passed back via the empty file mechanism.
        # Extract the return code from the file name.
        rc=${rcInfo#$tmpDir$remoteVerb$$\.}
#esjx      else
#esjx        # The special file was not found.
#esjx        # Override the rc from mmdsh if necessary.
#esjx        [[ $rc -eq 0 ]] && rc=1
      fi
    fi
    ;;

              #---------------------------------------------------
  onall)      # mmcommon onall <relNameFile> <unreachedNodesFile>
              #                   <remoteCommand> [<arg ... >]
              #---------------------------------------------------
    if [[ $argc -lt 4 ]]
    then
      operands="<relNameFile> <unreachedNodesFile> <remoteCommand> [<arg ... >]"
      printErrorMsg 260 mmcommon $kword "$operands"
      cleanupAndExit
    fi

    # Shift past the reliable names file and the report file
    # and get the command to execute and its args.
    shift 3
    remoteCommand=$*

    if [[ $MMMODE = single ]]
    then
      # Execute the command locally.
      $mmremote $remoteCommand
      rc=$?
    else
      # Invoke mmdsh to execute command on remote systems.
      $mmdsh -vF $arg2 -R $arg3 $mmremote $remoteCommand
      rc=$?
    fi
    ;;

               #----------------------------------------------------------------
  onall_async) # mmcommon onall_async <relNameFile> <remoteCommand> [<arg ... >]
               #----------------------------------------------------------------
    if [[ $argc -lt 3 ]]
    then
      operands="<relNameFile> <remoteCommand> [<arg ... >] "
      printErrorMsg 260 mmcommon $kword "$operands"
      cleanupAndExit
    fi

    # Shift past the file name and get the command to execute and its args.
    shift 2
    remoteCommand=$*

    if [[ $MMMODE = single ]]
    then
      # Execute the command locally.
      $mmremote $remoteCommand
      rc=$?
    else
      # Invoke mmdsh to execute command on remote systems.
#esjdbg  # Show msgs during development and testing only.
#esjdbg  $mmdsh -vF $arg2 $mmremote $remoteCommand
      $mmdsh -vF $arg2 $mmremote $remoteCommand >/dev/null 2>&1
      rc=$?
    fi

    # Remove the node names file.  The caller is gone by now.
    $rm -f $arg2
    ;;

                              #----------------------------------------------
  pushsdr | pushsdr_async )   # mmcommon pushSdr <relNameFile> <fileToCopy>
                              #                  <checksum> [<options>]
                              #----------------------------------------------
    if [[ $argc -lt 4 ]]
    then
      operands="<relNameFile> <fileToCopy> <checksum> [<options>]"
      printErrorMsg 260 mmcommon $kword "$operands"
      cleanupAndExit
    fi

    if [[ $kword_lc = pushsdr_async ]]
    then
      async_call=yes
#esjdbg  # Show msgs during development and testing only.
#esjdbg  print -u2 "$(date): mmcommon $kword: mmsdrfs propagation started"
    fi

    # Shift past all positional parameters and collect the command arguments.
    shift 4
    opt="$@"

    if [[ $MMMODE = single ]]
    then
      # This function is a no-op for single clusters.
      rc=0
    else
#esjdbg  # Show msgs during development and testing only.
#esjdbg  $mmdsh -vF $arg2 -I $arg3 $mmremote upgradeSystemFiles $arg3 $arg4 "$opt"
      $mmdsh -vF $arg2 -I $arg3  \
        $mmremote upgradeSystemFiles $arg3 $arg4 "$opt" >/dev/null 2>&1
      rc=$?
    fi

    if [[ $async_call = yes ]]
    then
      # Cleanup files that were left behind by the main process.
      $rm -f $arg2 $arg3
#esjdbg  # Show msgs during development and testing only.
#esjdbg  print -u2 "$(date): mmcommon $kword: mmsdrfs propagation completed; mmdsh rc=$rc"
    fi
    ;;

                              #------------------------------------------------
  pushkey | pushkey_async )   # mmcommon pushKey <nodeFile> <sdrfsFile>
                              #                  <sdrfsChecksum>
                              #                  <keyFile> <keyChecksum>
                              #                  [<options>]
                              #------------------------------------------------
    if [[ $argc -lt 6 ]]
    then
      operands="<nodeFile> <sdrfsFile> <sdrfsChecksum> <keyFile> <keyChecksum> [<options>]"
      printErrorMsg 260 mmcommon $kword "$operands"
      cleanupAndExit
    fi

    if [[ $kword_lc = pushkey_async ]]
    then
      async_call=yes
#esjdbg  # Show msgs during development and testing only.
#esjdbg  print -u2 "$(date): mmcommon $kword: mmsdrfs propagation started"
    fi

    # Shift past all positional parameters and collect the command arguments.
    shift 6
    opt="$@"

    if [[ $MMMODE = single ]]
    then
      # This function is a no-op for single clusters.
      rc=0
    else
#esjdbg  # Show msgs during development and testing only.
#esjdbg  $mmdsh -vF $arg2 -I $arg3 -I $arg5  \
#esjdbg    $mmremote upgradeSystemFiles2 $arg3 $arg4 $arg5 $arg6 "$opt"
      $mmdsh -vF $arg2 -I $arg3 -I $arg5  \
        $mmremote upgradeSystemFiles2 $arg3 $arg4 $arg5 $arg6 "$opt" >/dev/null 2>&1
      rc=$?
    fi

    if [[ $async_call = yes ]]
    then
      # Cleanup files that were left behind by the main process.
      $rm -f $arg2 $arg3
      [[ $arg5 != ${genkeyData}* ]] && $rm -f $arg5
#esjdbg  # Show msgs during development and testing only.
#esjdbg  print -u2 "$(date): mmcommon $kword: mmsdrfs propagation completed; mmdsh rc=$rc"
    fi
    ;;

  onactive | onactive_async )
             #------------------------------------------------------------------
             # mmcommon onactive <preferredNode> <relNameFile> <fileToCopy>
             #           <fsToCheck> <scope> <link> <remoteCommand> [<arg ... >]
             #------------------------------------------------------------------
    if [[ $argc -lt 8 ]]
    then
      operands="<preferredNode> <relNameFile> <fileToCopy>"
      operands="$operands <fsToCheck> <scope> <link> <remoteCommand> [<arg ... >] "
      printErrorMsg 260 mmcommon $kword "$operands"
      cleanupAndExit
    fi

    # If async call, mark the relNameFile for deletion.
    [[ $kword_lc = onactive_async ]] && async_call=yes

    # Shift past all positional parameters and collect the command arguments.
    shift 8
    arguments="$@"

    # Find an active node and execute the command.
    runRemoteCommand $arg2 $arg3 $arg4 $arg5 $arg6 $arg7 $arg8 "$arguments"
    rc=$?

    # Remove the file with node names.
    [[ $async_call = yes ]] && $rm -f $arg3
    ;;

                #-------------------------------------------------------
  linkcommand)  # mmcommon linkCommand <preferredNode> <relNameFile>
                #                      <remoteCommand> [<arg ... >]
                #-------------------------------------------------------
    if [[ $argc -lt 4 ]]
    then
      operands="<preferredNode> <relNameFile> <remoteCommand> [<arg ... >] "
      printErrorMsg 260 mmcommon $kword "$operands"
      cleanupAndExit
    fi

    # Shift past all positional parameters and collect the command arguments.
    shift 4
    arguments="$@"

    # Find an active node and execute the command.
    runRemoteCommand $arg2 $arg3  \
      $NO_FILE_COPY $NO_MOUNT_CHECK NULL $LINK $arg4 "$arguments"
    rc=$?
    ;;

        #-------------------------------------------
  run)  # mmcommon run <function> [arg ... ]
        # Note:  Intended for use by SMIT mostly
        #-------------------------------------------
    if [[ $argc -lt 2 ]]
    then
      operands="<function> [arg ... ]"
      printErrorMsg 260 mmcommon $kword "$operands"
      cleanupAndExit
    fi

    shift 1  # Skip past the keyword "run".
    $@       # Execute the requested function.
    rc=$?
    ;;

              #-------------------------------------------
  freelocks)  # mmcommon freeLocks
              #-------------------------------------------
    primaryServer=$($head -1 $mmsdrfsFile | $GETVALUE $PRIMARY_SERVER_Field)
    setRunningCommand null $primaryServer > /dev/null 2>&1
    freeLockOnServer $primaryServer > /dev/null 2>&1
    freeEnvLock > /dev/null 2>&1
    ;;

                      #---------------------------
  startautomounter )  # mmcommon startAutomounter
                      #---------------------------
    # Make sure that the local mmfs.cfg file is up-to-date.
    gpfsInitOutput=$(gpfsInit nolock)
    checkForErrors gpfsInit $?

    # Find out the value of the automountDir config parameter.
    automountDir=$(showCfgValue automountDir)
    [[ -z $automountDir ]] && automountDir=$defaultAutomountDir

    # Run the OS automount command.
    startAutomounter $automountDir
    rc=$?
    ;;

                    #--------------------------------------
  savesgdescfile )  # mmcommon saveSGDescFile <sgDescFile>
                    #--------------------------------------
    if [[ $argc -lt 2 ]]
    then
      operands="<sgDescFile>"
      printErrorMsg 260 mmcommon $kword "$operands"
      cleanupAndExit
    fi

    # Store the file on the config server nodes.
    saveSGDescFile $arg2
    rc=$?
    ;;

                         #------------------------------------------------------
  expirationdatacleanup) # mmcommon expirationDataCleanup <expData> <lockAction>
                         #------------------------------------------------------
    expirationDataCleanup $arg2 $arg3
    rc=0
    ;;

                   #-----------------------
  killsdrserv)     # mmcommon killSdrServ
                   #-----------------------
    killSdrServ
    rc=$?
    ;;

        #----------------------------------------
  * )   # Unknown action requested
        #----------------------------------------
    # Invalid keyword
    printErrorMsg 133 mmcommon $kword
    cleanupAndExit
    ;;

esac  # end case $kword_lc in

cleanupAndExit $rc doNotUnlock

