#!/bin/ksh
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
#  
#  
#  
# Licensed Materials - Property of IBM 
#  
# (C) COPYRIGHT International Business Machines Corp. 1997,2006 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG 
# @(#)15 1.136.1.3 src/avs/fs/mmfs/ts/admin/mmaddnode.sh, mmfs, avs_rgpfs24, rgpfs24s005a 6/14/06 14:39:38
###############################################################################
#
# Usage:
#
#   mmaddnode -N {NodeDesc[,NodeDesc...] | NodeFile}
#
# where:
#
#   -N NodeDesc,NodeDesc,...  specifies a comma-separated list of node
#                             descriptors that detail the node interfaces
#                             to be added to the cluster.  The nodes must not
#                             presently belong to the cluster.
#
#   -N NodeFile    specifies a file of node descriptors that detail
#                  the node interfaces to be added to the cluster.
#                  The nodes must not presently belong to the cluster.
#
# Obsolete but still supported options:
#
#   NodeDesc,NodeDesc,...   specifies a comma-separated list of node
#                           descriptors that detail the node interfaces
#                           to be added to the cluster.  The nodes must not
#                           presently belong to the cluster.
#
#   -n NodeFile    specifies a file of node descriptors that detail
#                  the node interfaces to be added to the cluster.
#                  The nodes must not presently belong to the cluster.
#
# Each node descriptor has the format:
#
#   nodeName:nodeRoles:adminNodeName:
#
# where
#
#   nodeName        is either a short or fully-qualified hostname,
#                   or an IP address for the primary GPFS network
#                   to be used for daemon-to-daemon communications.
#
#   nodeRoles       is a '-' separated list of node roles.  Unless changed
#                   on the mmconfig or mmaddnode commands, these roles will
#                   be associated with the node when it becomes a member of
#                   a nodeset.
#
#   adminNodeName   is either a short or fully-qualified hostname, or an IP
#                   address to be used by the admin scripts to communicate
#                   between nodes.  This is an optional parameter; if it
#                   is not specified, the nodeName value is used.
#
###############################################################################

# Include global declarations and service routines.
. /usr/lpp/mmfs/bin/mmglobfuncs
. /usr/lpp/mmfs/bin/mmsdrfsdef
. /usr/lpp/mmfs/bin/mmfsfuncs

sourceFile="mmaddnode.sh"
[[ -n $DEBUG || -n $DEBUGmmaddnode ]] && set -x
$mmTRACE_ENTER "$*"


# Local work files.  Names should be of the form:
#   fn=${tmpDir}fn.${mmcmd}.$$
allnodes=${tmpDir}allnodes.${mmcmd}.$$
allnodenames=${tmpDir}allnodenames.${mmcmd}.$$
goodnodes=${tmpDir}goodnodes.${mmcmd}.$$
processedNodes=${tmpDir}processedNodes.${mmcmd}.$$
inputNodes=${tmpDir}inputNodes.${mmcmd}.$$
oldcfgFile=${tmpDir}oldcfgFile.${mmcmd}.$$
diskLines=${tmpDir}diskLines.${mmcmd}.$$

LOCAL_FILES=" $allnodenames $allnodes $goodnodes $processedNodes $inputNodes $oldcfgFile $diskLines "


# Local declarations

usageMsg=355
integer nodeCount=0
integer nodeNumber=0
integer totalNodeCount=0
integer quorumNodeCount=0
integer nodesAddedToCluster=0
integer highestNodeNumber=0
narg=""
lapiWindow=$noWindow      # obsolete
switchNumber=$noSwitch    # obsolete
quorumDefault=$nonQuorumNode
fatalError=""
rc=0
typeset -l role
typeset -l adapter_lc


# Local routines


#####################################################################
#
# Function:  Verifies that a command executed successfully.
#            If the return code from the command is not zero,
#            the function issues a message and sets a failure flag.
#            It returns with the same return code that was specified
#            on input.
#
# Input:     $1 - name of the command to check
#            $2 - return code from the execution of the command
#
#####################################################################
function checkForErrorsAndReturn
{
  typeset sourceFile="mmaddnode.sh"
  $mmTRACE_ENTER "$*"
  if [ $2 != "0" ]
  then
    fatalError=yes
    # Unexpected error
    printErrorMsg 171 "$mmcmd" "$1" $2
  fi

  return $2

}  #------ end of function checkForErrorsAndReturn --------------



#######################
# Mainline processing
#######################


#################################
# Process the command arguments.
#################################
[[ $arg1 = '-?' || $arg1 = '-h' || $arg1 = '--help' || $arg1 = '--' ]] &&  \
  syntaxError "help" $usageMsg

while getopts :n:N: OPT
do
  case $OPT in

    n) # node descriptors file
       [[ -n $narg ]] && syntaxError "multiple" $noUsageMsg "-$OPT"
       narg=$OPTARG
       argc=argc-2
       ;;

    N) # node descriptors list or file
       [[ -n $Narg ]] && syntaxError "multiple" $noUsageMsg "-$OPT"
       Narg=$OPTARG
       argc=argc-2
       ;;

    +[nN]) # Invalid option
       syntaxError "invalidOption" $usageMsg $OPT
       ;;

    :) # Missing argument
       syntaxError "missingValue" $usageMsg $OPTARG
       ;;

    *) # Invalid option
       syntaxError "invalidOption" $usageMsg $OPTARG
       ;;

  esac

done  # end of while getopts :n:N: OPT do

# Drop the processed options from the parameter list.
shift OPTIND-1


# Complete the parameter checking.
[[ -n $narg && -n $Narg ]] &&  \
  syntaxError "invalidCombination" $usageMsg "-n" "-N"

if [[ -n $Narg ]]
then
  # The -N parameter may be either a list or a file.  Which is it?
  if [[ -f $Narg ]]
  then
    # It is a file; verify its existence and create our own copy.
    checkUserFile $Narg $inputNodes
    [[ $? -ne 0 ]] && cleanupAndExit
  else
    # It is not a file, so it must be a list.
    # Convert the input node list into a file.
    $rm -f $inputNodes
    IFS=','
    for nodeDesc in $Narg
    do
      print -- "$nodeDesc" >> $inputNodes
      checkForErrors "writing to $inputNodes" $?
    done
    IFS="$IFS_sv"    # Restore the default IFS setting.
  fi  # end of if [[ -f $Narg ]]

elif [[ -z $narg ]]
then
  # If neither the -N nor the -n option is not used,
  # a list of node names is required.
  if [[ $argc -eq 1 ]]
  then
    # If there is exactly one string left,
    # it is assumed to be the list of nodes to add.
    arglist=$1
  elif [[ $argc -gt 1 ]]
  then
    # If more than one string is left,
    # we have a syntax error.
    syntaxError "extraArg" $usageMsg "$2"
  else
    # If there are no more parameters,
    # a required parameter is missing.
    syntaxError "missingArgs" $usageMsg
  fi

  # Convert the input node list into a file.
  $rm -f $inputNodes
  IFS=','
  for nodeDesc in $arglist
  do
    print -- "$nodeDesc" >> $inputNodes
    checkForErrors "writing to $inputNodes" $?
  done
  IFS="$IFS_sv"    # Restore the default IFS setting.

else
  # If -n is specified, there should be no other parms.
  [[ $argc -gt 0 ]] &&  \
    syntaxError "extraArg" $usageMsg "$1"

  # Check the node names file parameter and create our own copy.
  checkUserFile $narg $inputNodes
  [[ $? -ne 0 ]] && cleanupAndExit
fi   # end of if [[ -z $narg ]]


######################################################################
# Set up trap exception handling and call the gpfsInit function.
# It will ensure that the local copy of the mmsdrfs and the rest of
# the GPFS system files are up-to-date and will obtain the sdr lock.
######################################################################
trap pretrap HUP INT QUIT KILL
gpfsInitOutput=$(gpfsInit $lockId)
setGlobalVar $? $gpfsInitOutput

if [[ $MMMODE != lc ]]
then
  # Command is not valid in the user's environment.
  printErrorMsg 376 $mmcmd $MMMODE
  cleanupAndExit
fi

# Determine the lookup order for resolving host names.
[[ $osName != AIX ]] && resolveOrder=$(setHostResolveOrder)


####################################################
# Find out if disk-based quorum is in effect.
####################################################
diskQuorumInEffect=$(showCfgValue tiebreakerDisks)
[[ $diskQuorumInEffect = no ]] && diskQuorumInEffect=""


############################################################
# Go through the current mmsdrfs file.
# Increment the gen number and collect needed information.
############################################################
$rm -f $newsdrfs $allnodenames $allnodes $diskLines $oldcfgFile
[[ -z $backupServer ]] && backupServer="_NOSECONDARY_"

IFS=":"         # Change the field separator to ':'.
exec 3<&-
exec 3< $mmsdrfsFile
while read -u3 sdrfsLine
do
  # Parse the line.
  set -f ; set -A v -- - $sdrfsLine ; set +f
  IFS="$IFS_sv"    # Restore the default IFS settings.
  printLine=true   # Assume the line will be printed.

  # Change some of the fields depending on the type of line.
  case ${v[$LINE_TYPE_Field]} in

    $VERSION_LINE )  # This is the global header line.
      # Increment the generation number
      newGenNumber=${v[$SDRFS_GENNUM_Field]}+1
      v[$SDRFS_GENNUM_Field]=$newGenNumber

      # Create the overloaded clType and clusterIdAndSt parameters
      # that will be passed to the checkNewClusterNode routine.
      clType="${MMMODE}/${environmentType}"
      clusterIdAndSt="${v[$CLUSTERID_Field]}:${v[$CLUSTER_SUBTYPE_Field]}"
      ;;

    $NODESET_HDR )    # This is the nodeset header line.
      # Skip over this line.  It will be rebuilt after
      # we have the new value for node count.
      nodesetHdr=$sdrfsLine
      nodeCount=${v[$NODE_COUNT_Field]}
      lowestVersion=${v[$MIN_DAEMON_VERSION_Field]}
      highestVersion=${v[$MAX_DAEMON_VERSION_Field]}
      osEnvironment=${v[$OS_ENVIRONMENT_Field]}
      [[ -z $highestVersion ]] && highestVersion=0
      [[ -z $osEnvironment ]] && osEnvironment=$osName
      printLine=false
      ;;

    $MEMBER_NODE )  # This line describes a node.
      # Keep track of the highest node number assigned to a node.
      [[ $highestNodeNumber -lt ${v[$NODE_NUMBER_Field]} ]] &&  \
        highestNodeNumber=${v[$NODE_NUMBER_Field]}

      # Add the node's admin name to the list of admin names in the cluster.
      print -- "${v[$REL_HOSTNAME_Field]}" >> $allnodes
      checkForErrors "writing to file $allnodes" $?

      # Add the daemon node name and the admin node name to the
      # list of all daemon and admin node names in the cluster.
      print -- "${v[$DAEMON_NODENAME_Field]}:${v[$REL_HOSTNAME_Field]}" >> $allnodenames
      checkForErrors "writing to file $allnodenames" $?

      # Keep track of the overall number of quorum nodes.
      # If disk quorum is in effect, collect information
      # about the quorum nodes in the cluster.
      if [[ ${v[$CORE_QUORUM_Field]} = $quorumNode ]]
      then
        (( quorumNodeCount += 1 ))
        if [[ -n $diskQuorumInEffect ]]
        then
          if [[ -z $quorumNodeNames ]]
          then
            # This is the first node to add to the lists.
            quorumNodeNumbers="${v[$NODE_NUMBER_Field]}"
            quorumNodeNames="${v[$REL_HOSTNAME_Field]}"
          else
            if [[ ${v[$REL_HOSTNAME_Field]} = $ourNodeName ]]
            then
              # This is the local node; add it at the front of the lists
              # so it will be the first quorum node used.
              quorumNodeNumbers="${v[$NODE_NUMBER_Field]},${quorumNodeNumbers}"
              quorumNodeNames="${v[$REL_HOSTNAME_Field]},${quorumNodeNames}"
            else
              # This is not the local node; add it at the end of the lists.
              quorumNodeNumbers="${quorumNodeNumbers},${v[$NODE_NUMBER_Field]}"
              quorumNodeNames="${quorumNodeNames},${v[$REL_HOSTNAME_Field]}"
            fi
          fi  # end of if [[ -z $quorumNodeNames ]]
        fi  # end of if [[ -n $diskQuorumInEffect ]]
      fi  # end of if [[ ${v[$CORE_QUORUM_Field]} = $quorumNode ]]
      ;;

    $SG_DISKS )  # This line describes a disk.
      # Collect the lines that represent the quorum disks.
      if [[ -n $diskQuorumInEffect && ${v[$PAXOS_Field]} = $PaxosDisk ]]
      then
        print_newLine >> $diskLines
        checkForErrors "writing to file $diskLines" $?
      fi  # end if [[ -n $diskQuorumInEffect && ...
      ;;

    $MMFSCFG )     # This line contains mmfs.cfg information.
      # Extract the mmfs.cfg information.
      # It is everything after the first 4 fields.
      cfgLine="${v[5]}:${v[6]}:${v[7]}:${v[8]}:${v[9]}:${v[10]}:${v[11]}"
      cfgLine="$cfgLine:${v[12]}:${v[13]}:${v[14]}:${v[15]}:${v[16]}"
      cfgLine="$cfgLine:${v[17]}:${v[18]}:${v[19]}:${v[20]}:${v[21]}:${v[22]}"

      # To preserve tabs, temporarily set IFS to new line only.
      IFS="
"
      # Strip trailing colons and write the line to the file.
      print "${cfgLine%%+(:)}" >> $oldcfgFile
      checkForErrors "writing to file $oldcfgFile" $?
      IFS="$IFS_sv"
      printLine=false

      # Retrieve the value of maxFeatureLevelAllowed.
      set -f ; set -- $cfgLine ; set +f
      attribute=$1
      value=$2
      [[ $attribute = maxFeatureLevelAllowed ]] &&  \
        maxFeatureLevelAllowed=${value%%+(:)}
      ;;

    * )  # No need to look at any of the other lines.
      ;;

  esac  # end Change some of the fields

  # Build and write the line to the new mmsdrfs file.
  if [[ $printLine = true ]]
  then
    print_newLine >> $newsdrfs
    checkForErrors "writing to file $newsdrfs" $?
  fi

  IFS=":"  # Change the separator back to ":" for the next iteration.

done  # end while read -u3 sdrfsLine

IFS="$IFS_sv"  # Restore the default IFS settings.

# Save the highest node number found.
nodeNumber=$highestNodeNumber


#######################################################################
# Generate the node information for the mmsdrfs file.
#
# Loop through the nodes to be added to the sdrfs file, checking
# as we go.  When the loop is done we know which nodes can be added
# to the sdrfs file and which ones can't.  A MEMBER_NODE line will be
# generated for each node that can be added to the new GPFS cluster.
#######################################################################
# Loop through the nodes to be added.
$rm -f $tmpsdrfs $goodnodes $processedNodes
$touch $tmpsdrfs $goodnodes $processedNodes
exec 3<&-
exec 3< $inputNodes
while read -u3 nodeDesc
do
  # Skip empty and comment lines
  [[ $nodeDesc = *([$BLANKchar$TABchar])   ]] && continue
  [[ $nodeDesc = *([$BLANKchar$TABchar])#* ]] && continue

  # Keep track of the total number of nodes specified by the user.
  totalNodeCount=$totalNodeCount+1

  IFS=':'
  set -f ; set -- $nodeDesc ; set +f
  nodeName=$1
  nodeRoles=$2
  nodeName2=$3
  IFS="$IFS_sv"

  [[ $nodeName = "localhost" ]] && continue

  # Process the node roles list.
  designation=$CLIENT
  quorumField=$quorumDefault
  if [[ -n $nodeRoles ]]
  then
    IFS="-"
    set -f ; set -- $nodeRoles ; set +f
    IFS="$IFS_sv"
    while [[ -n $1 ]]
    do
      role=$1  # Convert the node's role to lower case only.
      case $role in
        $CLIENT )
          designation=$CLIENT
          ;;

        $MANAGER )
          designation=$MANAGER
          ;;

        $QUORUM )
          quorumField=$quorumNode
          ;;

        $NONQUORUM ) quorumField=$nonQuorumNode
          ;;

        * )
          # Invalid node designations specified.
          printErrorMsg 293 $mmcmd "$nodeDesc"
          fatalError=yes
          break 2
          ;;
      esac

      # Move to the next field.
      shift
    done  # end while [[ -n $1 ]]
  fi  # end if [[ -n $nodeRoles ]]

  # Determine the value of addNodeState.
  if [[ $quorumField = $nonQuorumNode ]]
  then
    addNodeState=$OLD_NODE
  else
    addNodeState=$NEW_NODE
  fi

  # At this point, the daemon node name could be a fully-qualified
  # adapter port name, a short name, or an IP address.  Determine the
  # fully-qualified hostname, the short name, and the IP address for
  # the specified node interface.
  hostResult=$($host $nodeName)
  set -f ; set -- $hostResult ; set +f
  daemonNodeName=$1
  shortName=${1%% *|.*}    # Exclude everything after the first dot.
  ipa=${3%%,*}             # Exclude everything after the first comma.
  if [[ -z $ipa ]]
  then
    # Invalid node name specified.
    printErrorMsg 54 $mmcmd $nodeName
    fatalError=yes
    break
  fi

  # At this point, if it was specified, the admin node name could be a
  # fully-qualified adapter port name, a short name, or an IP address.
  # Determine the fully-qualified hostname and the IP address for the
  # specified node interface.
  if [[ -n $nodeName2 ]]
  then
    # Stop here if the admin network support has not been activated yet.
    if [[ $sdrfsFormatLevel -eq 0 ]]
    then
      print -u2 "$mmcmd:  The separate administration network support has not been enabled yet."
      print -u2 "    Run \"mmchconfig release=LATEST\" to activate the new function."
      fatalError=yes
      break
    fi

    hostResult=$($host $nodeName2)
    set -f ; set -- $hostResult ; set +f
    adminNodeName=$1
    adminShortName=${1%% *|.*}  # Exclude everything after the first dot.
    adminIpa=${3%%,*}           # Exclude everything after the first comma.
    if [[ -z $adminIpa ]]
    then
      # An invalid admin node name was specified.
      printErrorMsg 54 $mmcmd $nodeName2
      fatalError=yes
      break
    fi
  else
    # The user did not set a distinct admin node name, so set the
    # admin node names to be the same as the daemon node names.
    adminNodeName=$daemonNodeName
    adminShortName=$shortName
  fi

  # Assign a node number to the node.
  nodeNumber=$nodeNumber+1
  gsNodeNumber=$nodeNumber
  adapterType=""

  # Ensure that the node interfaces do not already exist in the cluster.
  $grep -qw $daemonNodeName $allnodenames > /dev/null 2>&1
  if [[ $? -eq 0 ]]
  then
    # The node already belongs to the cluster.
    printErrorMsg 152 $mmcmd $nodeName
    fatalError=yes
    break
  fi
  if [[ $adminNodeName != $daemonNodeName ]]
  then
    $grep -qw $adminNodeName $allnodenames > /dev/null 2>&1
    if [[ $? -eq 0 ]]
    then
      # The node already belongs to the cluster.
      printErrorMsg 152 $mmcmd $nodeName2
      fatalError=yes
      break
    fi
  fi

  # Make sure neither node name (admin or daemon) is specified more than once.
  $grep -qw $daemonNodeName $processedNodes > /dev/null 2>&1
  if [[ $? -eq 0 ]]
  then
    # The node is specified twice.
    printErrorMsg 347 $mmcmd $nodeName
    fatalError=yes
    break
  fi
  if [[ $adminNodeName != $daemonNodeName ]]
  then
    $grep -qw $adminNodeName $processedNodes > /dev/null 2>&1
    if [[ $? -eq 0 ]]
    then
      # The node is specified twice.
      printErrorMsg 347 $mmcmd $nodeName2
      fatalError=yes
      break
    fi
  fi

  # If disk-based quorum is in effect, make sure that adding
  # this node will not exceed the total limit of quorum nodes.
  if [[ -n $diskQuorumInEffect     &&
        $quorumNodeCount -eq 8     &&
        $quorumField = $quorumNode ]]
  then
    # There are more than eight quorum nodes while tiebreaker disks are in use.
    printErrorMsg 131 $mmcmd
    # Adding the node to the cluster will exceed the quorum node limit.
    printErrorMsg 134 $mmcmd $nodeName
    fatalError=yes
    break
  fi

  # Add the daemon and admin node names to the list of processed nodes.
  print -- "${daemonNodeName}:${adminNodeName}" >> $processedNodes
  checkForErrorsAndReturn "writing to file $processedNodes" $?
  [[ $? -ne 0 ]] && break

  # Build a line with the local node data for the node.  The full-blown
  # MEMBER_NODE line will be created further down when we have all of the
  # information that we need.
  sdrfsLine="$nodesetId:$MEMBER_NODE::$nodeCount:$nodeNumber:$shortName:$ipa"
  sdrfsLine="$sdrfsLine:$adminNodeName:$designation:$adapterType:$lapiWindow"
  sdrfsLine="$sdrfsLine:$switchNumber:$addNodeState:$adapterType:$daemonNodeName"
  sdrfsLine="$sdrfsLine:$adminShortName::::$quorumField:$gsNodeNumber:"

  # Invoke the checkNewClusterNode function to ensure that
  # the node is new and that its level of GPFS supports clusters.
  # If it passes these tests, a skeleton sdrfs file is stored on the node.
  printInfoMsg 416 "$(date)" $mmcmd $daemonNodeName
  runOutput=$(run on1 $adminNodeName checkNewClusterNode  \
     $clType $primaryServer $backupServer "$sdrfsLine"    \
     "$rsh" "$rcp" "$clusterIdAndSt" 2> $errMsg)
  rc=$?
  IFS=':'
  set -f ; set -- $runOutput ; set +f
  IFS="$IFS_sv"
  keyword=$1
  nodeStatus=$2
  adapterType=$3
  installedDaemonVersion=$4
  installedProductVersion=$5
  installedOsName=$6

  if [[ $rc = 0 && $nodeStatus = success && $keyword = checkNewClusterNode ]]
  then
    # The checkNewClusterNode call succeeded.
    # Build the line that will represent this node in the mmsdrfs file.
    nodeCount=$nodeCount+1
    sdrfsLine="$nodesetId:$MEMBER_NODE::$nodeCount:$nodeNumber:$shortName"
    sdrfsLine="$sdrfsLine:$ipa:$adminNodeName:$designation:$adapterType"
    sdrfsLine="$sdrfsLine:$lapiWindow:$switchNumber:$addNodeState:$adapterType"
    sdrfsLine="$sdrfsLine:$daemonNodeName:$adminShortName"
    sdrfsLine="$sdrfsLine:$installedDaemonVersion:$installedProductVersion"
    sdrfsLine="$sdrfsLine:$installedOsName:$quorumField:$gsNodeNumber:"

    # Add the MEMBER_NODE line to the other lines
    # that will go in the mmsdrfs file.
    print -- "$sdrfsLine" >> $tmpsdrfs
    checkForErrorsAndReturn "Writing to file $tmpsdrfs" $?
    [[ $? -ne 0 ]] && break

    # Add the node name to the list of successful nodes.
    print -- "$adminNodeName" >> $goodnodes
    checkForErrorsAndReturn "Writing to file $goodnodes" $?
    [[ $? -ne 0 ]] && break
    nodesAddedToCluster=$nodesAddedToCluster+1

    if [[ $quorumField = $quorumNode ]]
    then
      (( quorumNodeCount += 1 ))
      if [[ -n $diskQuorumInEffect ]]
      then
        newQuorumNodes=yes
        if [[ -z $quorumNodeNames ]]
        then
          # This is the first node to add to the lists.
          quorumNodeNumbers="${v[$NODE_NUMBER_Field]}"
          quorumNodeNames="${v[$REL_HOSTNAME_Field]}"
        else
          if [[ ${v[$REL_HOSTNAME_Field]} = $ourNodeName ]]
          then
            # This is the local node; add it at the front of the lists
            # so it will be the first quorum node used.
            quorumNodeNumbers="${v[$NODE_NUMBER_Field]},${quorumNodeNumbers}"
            quorumNodeNames="${v[$REL_HOSTNAME_Field]},${quorumNodeNames}"
          else
            # This is not the local node; add it at the end of the lists.
            quorumNodeNumbers="${quorumNodeNumbers},${v[$NODE_NUMBER_Field]}"
            quorumNodeNames="${quorumNodeNames},${v[$REL_HOSTNAME_Field]}"
          fi
        fi  # end of if [[ -z $quorumNodeNames ]]
      fi  # end of if [[ -n $diskQuorumInEffect ]]
    fi  # end of if [[ $quorumField = $quorumNode ]]

    # Ensure that we will not exceed the maximum number of quorum nodes.
    if [[ $quorumNodeCount -gt $maxQuorumNodes && -z $fatalError ]]
    then
      # Error:  The number of quorum nodes exceeds the maximum allowed.
      printErrorMsg 393 $mmcmd $maxQuorumNodes
      fatalError=yes
      break
    fi

  else
    # The checkNewClusterNode call failed.
    # Not all errors are considered terminal.
    # If an individual node fails for a known reason,
    # we will not include it in the cluster but will
    # continue with the rest of the nodes.

    # Tell the world what went wrong.
    if [[ $nodeStatus = not_new ]]
    then
      # Node already belongs to a cluster.
      printErrorMsg 348 $mmcmd $adminNodeName
    elif [[ $nodeStatus = not_supported ]]
    then
      # Wrong GPFS code level.
      printErrorMsg 349 $mmcmd $adminNodeName
    elif [[ $nodeStatus = ipa_alias ]]
    then
      # IP address aliasing is not supported.
      printErrorMsg 476 $mmcmd $nodeName
    elif [[ $nodeStatus = ipa_missing ]]
    then
      # The daemon node adapter was not found on the admin node.
      printErrorMsg 175 $mmcmd $nodeName $nodeName2
    elif [[ $rc = $MM_HostDown || $rc = $MM_ConnectTimeout ]]
    then
      # The node cannot be reached.
      printErrorMsg 340 $mmcmd $adminNodeName
    else
      # Unexpected error.  Display all possible error messages.
      [[ -s $errMsg ]] && $cat $errMsg 1>&2
      [[ $rc -eq 0 ]] && rc=1
      checkForErrorsAndReturn "checkNewClusterNode $adminNodeName" $rc
      run on1 $adminNodeName removeFromCluster > /dev/null 2>&1 &
#     break
    fi

    # Append the node name to the list of failed nodes.
    failedNodes="$failedNodes\n\t\t$adminNodeName"

    # Adjust the node number for the next iteration.
    nodeNumber=$nodeNumber-1

  fi  # end of if [[ $rc = 0 && $nodeStatus = success ]]

  $rm -f $errMsg

done  # end of while read -u3 nodeDesc (Loop through the nodes to be added)


#########################################################
# If we have no nodes to add, issue a message and quit.
#########################################################
if [[ ! -s $goodnodes ]]
then
  # Command is quitting due to no valid nodes.
  printErrorMsg 387 $mmcmd $mmcmd
  cleanupAndExit
fi


#######################################################################
# At this point, we have successfully processed all of the new nodes.
# Next, build the nodeset header line and add it to the mmsdrfs file.
#######################################################################
# Parse the old nodeset header line.
IFS=":"
set -f ; set -A v -- - $nodesetHdr ; set +f
IFS="$IFS_sv"

v[$NODE_COUNT_Field]=$nodeCount
[[ $highestVersion = 0 ]] && highestVersion=""
v[$MAX_DAEMON_VERSION_Field]=$highestVersion
v[$OS_ENVIRONMENT_Field]=$osEnvironment

# Add the nodeset header line to the mmsdrfs file.
print_newLine >> $newsdrfs
checkForErrors "writing to file $newsdrfs" $?


############################################################
# Add the lines for the new nodes to the new mmsdrfs file.
############################################################
$cat $tmpsdrfs >> $newsdrfs
if [[ $? -ne 0 ]]
then
  printErrorMsg 171 "$mmcmd" "copying $tmpsdrfs to $newsdrfs" $?
  fatalError=yes
fi


#######################################################
# Put the cfg information back into the mmsdrfs file.
#######################################################
if [[ $mmfscfgModified = yes ]]
then
  appendCfgFile $nodesetId $tmpCfg $newsdrfs
else
  appendCfgFile $nodesetId $oldcfgFile $newsdrfs
fi
checkForErrors "appendCfgFile" $?
mmfscfgModified=no


#############################################
# Sort the new version of the mmsdrfs file.
#############################################
LC_ALL=C $SORT_MMSDRFS $newsdrfs -o $newsdrfs
if [[ $? -ne 0 ]]
then
  printErrorMsg 171 "$mmcmd" "sorting $newsdrfs" $?
  fatalError=yes
fi


#####################################################################
# If disk-based quorum is in effect, and new quorum nodes are being
# added to the cluster, the quorum disks must be reformatted.
#####################################################################
if [[ -n $diskQuorumInEffect && -n $newQuorumNodes && -z $fatalError ]]
then
  formatPaxosDisks $diskLines $quorumNodeNumbers $quorumNodeNames mmaddnode
  if [[ $? -ne 0 ]]
  then
    # GPFS failed to initialize the tiebreaker disks.
    printErrorMsg 132 $mmcmd
    fatalError=yes
  fi
fi


####################################################################
# Issue a warning if we will exceed the maximum recommended number
# of quorum nodes.
####################################################################
if [[ $quorumNodeCount -gt $maxRecQuorumNodes && -z $fatalError ]]
then
  # Warning:  The number of quorum nodes exceeds the recommended maximum.
  printErrorMsg 394 $mmcmd $maxRecQuorumNodes
fi


############################################################
# If anything failed, remove the skeleton sdrfs files from
# the new nodes, issue an error message, and die.
############################################################
if [[ -n $fatalError ]]
then
  # Remove the already-installed system files.
  printErrorMsg 351 $mmcmd
  $ln $goodnodes ${goodnodes}async
  $mmcommon onall_async ${goodnodes}async removeFromCluster &

  # The command failed.
  printErrorMsg 389 $mmcmd
  cleanupAndExit
fi


##############################################################
# Lock the Gpfs object to prevent the daemon from coming up
# until all changes are successfully committed.
##############################################################
[[ $getCredCalled = no ]] && getCred
setRunningCommand "$mmcmd" $primaryServer
checkForErrors setRunningCommand $?
gpfsLocked=yes


##########################################################################
# Put the new mmsdrfs file into the sdr.  This will make the newly-added
# nodes visible to the rest of the nodes in the cluster.
##########################################################################
trap "" HUP INT QUIT KILL
gpfsObjectInfo=$(commitChanges  \
   $HOME_CLUSTER $nsId $gpfsObjectInfo $newGenNumber $newsdrfs $primaryServer)
rc=$?
if [[ $rc -ne 0 ]]
then
  # We cannot replace the file in the sdr.
  printErrorMsg 381 $mmcmd

  # Remove the already-installed system files.
  printErrorMsg 351 $mmcmd
  $ln $goodnodes ${goodnodes}async
  $mmcommon onall_async ${goodnodes}async removeFromCluster &
  cleanupAndExit
fi


#####################################################################
# Unlock the sdr.
#####################################################################
[[ $sdrLocked = yes ]] &&  \
  freeLockOnServer $primaryServer $ourNodeNumber > /dev/null
sdrLocked=no
trap posttrap HUP INT QUIT KILL


#####################################################################
# At this point, skeleton sdrfs files have been put on the nodes
# in the success list.  If there are any nodes in the failure list,
# we issue a message telling the user to use the mmaddnode command
# to add them to the sdrfs file once they become reachable.
#####################################################################
# Report any nodes that could not be added to the user.
[[ -n $failedNodes ]] &&  \
  printErrorMsg 353 $mmcmd "$failedNodes"

# If not all nodes were added to the cluster,
# tell the user how many made it through.
[[ $nodesAddedToCluster -lt $totalNodeCount ]] &&  \
  printErrorMsg 12 $mmcmd $nodesAddedToCluster $totalNodeCount

# Issue "command was successful" message
printErrorMsg 272 $mmcmd


###################################################################
# Asynchronously propagate the changes to all nodes, new and old.
###################################################################
$cat $goodnodes >> $allnodes
checkForErrors "appending $goodnodes to $allnodes" $?
propagateSdrfsFile async $allnodes $newsdrfs $newGenNumber rereadNodeList

cleanupAndExit 0

