#!/bin/ksh
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
#  
#  
#  
# Licensed Materials - Property of IBM 
#  
# (C) COPYRIGHT International Business Machines Corp. 1997,2006 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG 
# @(#)24 1.130 src/avs/fs/mmfs/ts/admin/mmdelnode.sh, mmfs, avs_rgpfs24, rgpfs240610b 12/5/05 15:41:10
##############################################################################
#
# Usage:  mmdelnode {-a | -f | -N {Node[,Node...] | NodeFile | NodeClass}}
#
# where:
#
#   -a                 specifies that all of the nodes in the cluster
#                      are to be deleted.
#
#   -N Node,Node,...   Specify the nodes to be deleted from the cluster.
#   -N NodeFile        NodeClass may be one of several possible node classes
#   -N NodeClass       (e.g., quorumnodes, managernodes, nsdnodes, etc.)
#                      If none of -N, -n, or nodelist is not specified,
#                      the entire cluster is changed; otherwise, the changes
#                      are made to the specified nodes.
#                      -N cannot be specified with the obsolete -n options.
#
# Obsolete but still supported options:
#
#   -n Node,Node,...   is a comma-separated list of nodes to be deleted
#                      from the cluster.
#
#   -n NodeFile        is a file containing the names of the nodes
#                      to be deleted from the current GPFS cluster.
#
# Undocumented option:
#
#   -f                 remove all GPFS configuration files on the node.
#                      This option applies only to the node on which
#                      the mmdelnode command is issued.
#
##############################################################################

# Include global declarations and service routines.
. /usr/lpp/mmfs/bin/mmglobfuncs
. /usr/lpp/mmfs/bin/mmsdrfsdef

sourceFile="mmdelnode.sh"
[[ -n $DEBUG || -n $DEBUGmmdelnode ]] && set -x
$mmTRACE_ENTER "$*"


# Local work files.  Names should be of the form:
#   fn=${tmpDir}fn.${mmcmd}.$$
allnodes=${tmpDir}allnodes.${mmcmd}.$$            # list of all cluster nodes
delnodes=${tmpDir}delnodes.${mmcmd}.$$            # list of the nodes to delete
oldcfgFile=${tmpDir}oldcfgFile.${mmcmd}.$$        # original mmfs.cfg file
remnodes=${tmpDir}remnodes.${mmcmd}.$$            # list of the remaining nodes
nodesToDelete=${tmpDir}nodesToDelete.${mmcmd}.$$  # nodes from the command line
serverNodes=${tmpDir}serverNodes.${mmcmd}.$$      # list of NSD servernodes

LOCAL_FILES=" $allnodes $nodesToDelete $delnodes $oldcfgFile $remnodes $serverNodes "


# Local declarations

usageMsg=356
integer lineCnt

# Local routines



#######################
# Mainline processing
#######################


##################################
# Process the command arguments.
##################################
[[ $arg1 = '-?' || $arg1 = '-h' || $arg1 = '--help' || $arg1 = '--' ]] &&  \
  syntaxError "help" $usageMsg

while getopts :afn:N: OPT
do

  case $OPT in

    a) # Delete all of the nodes in the cluster.
       [[ -n $aflag ]] && syntaxError "multiple" $noUsageMsg "-$OPT"
       aflag=yes
       argc=argc-1
       ;;

    f) # Remove all config files on this node.
       [[ -n $fflag ]] && syntaxError "multiple" $noUsageMsg "-$OPT"
       fflag=yes
       argc=argc-1
       ;;

    n) # node names file
       [[ -n $nflag ]] && syntaxError "multiple" $noUsageMsg "-$OPT"
       nflag=yes
       narg=$OPTARG
       argc=argc-2
       ;;

    N) # node names list, file, or class
       [[ -n $Nflag ]] && syntaxError "multiple" $noUsageMsg "-$OPT"
       Nflag=yes
       Narg=$OPTARG
       argc=argc-2
       ;;

    +[afnN]) # Invalid option
       syntaxError "invalidOption" $usageMsg $OPT
       ;;

    :) # Missing argument
       syntaxError "missingValue" $usageMsg $OPTARG
       ;;

    *) # Invalid option
       syntaxError "invalidOption" $usageMsg $OPTARG
       ;;

  esac

done  # end of while getopts :afn:N: OPT do

shift OPTIND-1


[[ -n $aflag && -n $fflag ]] &&  \
  syntaxError "invalidCombination" $usageMsg "-a" "-f"

[[ -n $aflag && -n $nflag ]] &&  \
  syntaxError "invalidCombination" $usageMsg "-a" "-n"

[[ -n $aflag && -n $Nflag ]] &&  \
  syntaxError "invalidCombination" $usageMsg "-a" "-N"

[[ -n $fflag && -n $nflag ]] &&  \
  syntaxError "invalidCombination" $usageMsg "-f" "-n"

[[ -n $fflag && -n $Nflag ]] &&  \
  syntaxError "invalidCombination" $usageMsg "-f" "-N"

[[ -n $nflag && -n $Nflag ]] &&  \
  syntaxError "invalidCombination" $usageMsg "-n" "-N"

[[ $argc -gt 0 && ( -n $aflag || -n $fflag || -n $nflag ) ]] &&  \
  syntaxError "extraArg" $usageMsg "$1"


###################################################################
# If -f is specified, wipe out all of the GPFS config information
# on this node and exit.
###################################################################
if [[ -n $fflag ]]
then
  # Verify that the daemon is not running on this node.
  tsstatusOutput=$(LC_ALL=C $tsstatus -1 2>&1)
  print -- "$tsstatusOutput" | $grep -e 'file system daemon is running'  \
                                     -e 'Waiting for quorum' >/dev/null
  if [[ $? -eq 0 ]]
  then
    # MMFS is still active on this node.
    printErrorMsg 63 $mmcmd "$($hostname)"
  else
    # Go ahead and wipe out the files.
    removeFromCluster
  fi
  cleanupAndExit
fi


#####################################################################
# Complete the parameter checking and create the nodesToDelete file.
#####################################################################
[[ $osName != AIX ]] && resolveOrder=$(setHostResolveOrder)
$rm -f $nodesToDelete
if [[ -n $aflag ]]
then
  :  # There is nothing we need to do if -a was specified.

elif [[ -n $Nflag ]]
then
  # Convert the passed data into a file containing IP addresses.
  createVerifiedNodefile $Narg $IPA_Field no $nodesToDelete
  [[ $? -ne 0 ]] && cleanupAndExit

else
  if [[ -n $nflag ]]
  then
    # Check whether the node names file parameter exists and is readable.
    if [[ ! -f $narg || ! -r $narg ]]
    then
      # The node names file cannot be read.
      printErrorMsg 43 $mmcmd $narg
      cleanupAndExit
    fi

    # Filter out comment lines and localhost entries.
    $grep -v -e "localhost" -e "^#" "$narg" > $tmpfile

  else
    # If neither the -a nor the -n option was used,
    # a list of node names is required.
    if [[ $argc -eq 1 ]]
    then
      # If there is exactly one string left,
      # it is assumed to be the list of nodes to delete.
      arglist=$1
    elif [[ $argc -gt 1 ]]
    then
      # If more than one string is left,
      # we have a syntax error.
      syntaxError "extraArg" $usageMsg "$2"
    else
      # If there are no more parameters,
      # a required parameter is missing.
      syntaxError "missingArgs" $usageMsg
    fi

    # Convert the input node list into a file containing
    # the nodes to delete.
    IFS=','
    for node in $arglist
    do
      IFS="$IFS_sv"    # Restore the default IFS setting.

      # Append the node to the temp file.
      print -- "$node" >> $tmpfile
      checkForErrors "writing to file $tmpfile" $?

      IFS=','          # Set the field separator for the next iteration.
    done
    IFS="$IFS_sv"    # Restore the default IFS setting.

  fi  # end of if [[ -n $nflag ]]

  # Convert any entries in the temp file into IP addresses.
  if [[ -s $tmpfile ]]
  then
    createVerifiedNodefile $tmpfile $IPA_Field no $nodesToDelete
    [[ $? -ne 0 ]] && cleanupAndExit
  else
    # No node names were specified.
    printErrorMsg 328 $mmcmd $narg
    cleanupAndExit
  fi

fi   # end of if [[ -n $aflag ]]


#######################################################################
# Set up trap exception handling and call the gpfsInit function.
# It will ensure that the local copy of the mmsdrfs and the rest of
# the GPFS system files are up-to-date and will obtain the sdr lock.
#
# Note:  We are using a variation of gpfsInit - gpfsInitGeneric,
# which allows the command to still run on old GPFS cluster types.
# If the cluster type is lc or single, things work as they always do.
# But if the cluster type is sp, rpd, or hacmp, we are dealing with
# an obsolete GPFS cluster environment.  The daemon will never be
# allowed to start under these circumstances, nor will the bulk of
# the mm commands be allowed to work.  The only exception are commands
# (mmexportfs, mmdelnode) needed by the user to migrate to a supported
# environment.  Under such conditions it is acceptable to assume that
# the daemon is indeed not runing anywhere (i.e., there is no need to
# run verifyDaemonInactive) and to ignore the true commit processing
# and the rebuilding of the mmfs environment.  The idea is to allow
# the user to run "mmexportfs all", followed by "mmdelnode -a", and
# then create a new cluster of type lc.
#######################################################################
trap pretrap HUP INT QUIT KILL
gpfsInitOutput=$(gpfsInitGeneric $lockId)
setGlobalVar $? $gpfsInitOutput


#######################################################################
# Create a new version of the mmsdrfs file.
#######################################################################
$rm -f $newsdrfs $allnodes $remnodes $tmpfile $delnodes $serverNodes $oldcfgFile
lineCnt=0
coreQuorumDefined=""

IFS=":"
exec 3<&-
exec 3< $mmsdrfsFile
while read -u3 sdrfsLine
do
  # Parse the line.
  set -f ; set -A v -- - $sdrfsLine ; set +f
  IFS="$IFS_sv"    # Restore the default IFS settings.
  printLine=true   # Assume the line will be printed.

  # Change some of the fields depending on the type of line.
  case ${v[$LINE_TYPE_Field]} in

    $VERSION_LINE )  # This is the global header line.
      # Increment the generation number
      newGenNumber=${v[$SDRFS_GENNUM_Field]}+1
      v[$SDRFS_GENNUM_Field]=$newGenNumber
      ;;

    $NODESET_HDR )    # This is the nodeset header line.
      # The line will be rebuilt after we have the new value
      # for node count (field 5).
      nodesetHdr_A="${v[1]}:${v[2]}:${v[3]}:${v[4]}"
      nodesetHdr_B="${v[6]}:${v[7]}:${v[8]}:${v[9]}:${v[10]}:${v[11]}"
      nodesetHdr_C="${v[12]}:${v[13]}:${v[14]}:${v[15]}:${v[16]}:${v[17]}"
      nodesetHdr_D="${v[18]}:${v[19]}:${v[20]}:${v[21]}:${v[22]}"
      printLine=false
      ;;

    $MEMBER_NODE )  # This line describes a node.
      # Add the node to the list of nodes presently in the cluster.
      print -- "${v[$REL_HOSTNAME_Field]}" >> $allnodes
      checkForErrors "writing to file $allnodes" $?

      # Find out if core quorum is currently being used.
      [[ -n ${v[$CORE_QUORUM_Field]} ]] &&  \
        coreQuorumDefined=yes

      # Determine whether this is one of the nodes to be removed.
      # The awk script checks whether the first field in the nodesToDelete
      # file matches the IP address of the current MEMBER_NODE line.
      # If yes, the deleteThisNode flag will be set to 'yes'.
      # All other lines are passed unchanged and written to a tmpfile.

      # Ensure that tmpfile exists at the end, even if empty.
      # If this isn't done, the mv command further down will fail.
      $touch $tmpfile

      if [[ -n $aflag ]]
      then
        deleteThisNode=yes
      else
        deleteThisNode=$($awk '          \
          $1 == "'${v[$IPA_Field]}'" {   \
            { print "yes" }              \
            { exit }                     \
          }                              \
        ' $nodesToDelete)
        checkForErrors "awk" $?
      fi

      if [[ $deleteThisNode = yes ]]
      then
        # This node is being deleted.
        printLine=false
        print -- "${v[$REL_HOSTNAME_Field]}" >> $delnodes
        checkForErrors "writing to file $delnodes" $?

        # See if the node is one of the "interesting" nodes.
        [[ ${v[$REL_HOSTNAME_Field]} = $primaryServer ]] &&  \
          deletedPrimaryServer=${v[$REL_HOSTNAME_Field]}
        [[ ${v[$REL_HOSTNAME_Field]} = $backupServer ]] &&  \
          deletedBackupServer=${v[$REL_HOSTNAME_Field]}
        [[ ${v[$REL_HOSTNAME_Field]} = $ourNodeName ]] &&  \
          deletingOurNode=${v[$REL_HOSTNAME_Field]}

        # Add the short name to a list of node names to be used
        # to back out any local changes in the mmfs.cfg file.
        [[ -z $deletedNodeNames ]]  \
           && deletedNodeNames=${v[$NODE_NAME_Field]}  \
           || deletedNodeNames="$deletedNodeNames,${v[$NODE_NAME_Field]}"

      else
        # This node is not being deleted.

        # Adjust the line sequence number.
        lineCnt=$lineCnt+1
        v[$LINE_NUMBER_Field]=$lineCnt

        # Add the node to the remaining nodes list.
        print -- "${v[$REL_HOSTNAME_Field]}" >> $remnodes
        checkForErrors "writing to file $remnodes" $?

        # Keep track of the presence of quorum nodes.
        [[ ${v[$CORE_QUORUM_Field]} != $nonQuorumNode ]] &&  \
          quorumNodesDefined=yes
        [[ ${v[$CORE_QUORUM_Field]} = $quorumNode &&
           ${v[$ADDNODE_STATE_Field]} = $OLD_NODE ]] &&  \
          oldQuorumNodeFound=yes

      fi  # end of if [[ $deleteThisNode = yes ]]
      ;;

    $SG_HEADR )    # This is the header line for some file system.
      [[ ${v[$FS_TYPE_Field]} = $localfs ]] &&  \
        localFileSystemsFound=yes
      [[ ${v[$FS_TYPE_Field]} = $remotefs ]] &&  \
        remoteFileSystemsFound=yes
      ;;

    $SG_DISKS )    # This line describes some disk.

      # Collect the names of all server nodes.
      if [[ -n ${v[$NSD_PRIMARY_NODE_Field]} ]]
      then
        print -- "$diskName ${v[$NSD_PRIMARY_NODE_Field]}" >> $serverNodes
        rc=$?
        if [[ -n ${v[$NSD_BACKUP_NODE_Field]} ]]
        then
          print -- "$diskName ${v[$NSD_BACKUP_NODE_Field]}" >> $serverNodes
          rc=$?
        fi
        checkForErrors "writing to file $serverNodes" $rc
      fi
      ;;

    $MMFSCFG )     # This line contains mmfs.cfg information.

      # Remove the line from the mmsdrfs file for now.  The mmfs.cfg
      # information will be added back before committing the changes.
      printLine=false

      # Extract the mmfs.cfg information.
      # It is everything past the first 4 fields.
      cfgLine="${v[5]}:${v[6]}:${v[7]}:${v[8]}:${v[9]}:${v[10]}:${v[11]}"
      cfgLine="$cfgLine:${v[12]}:${v[13]}:${v[14]}:${v[15]}:${v[16]}"
      cfgLine="$cfgLine:${v[17]}:${v[18]}:${v[19]}:${v[20]}:${v[21]}:${v[22]}:"

      # To preserve tabs, temporarily set IFS to new line only.
      IFS="
"
      # Strip trailing colons and write the line to the file.
      print -- "${cfgLine%%+(:)}" >> $oldcfgFile
      checkForErrors "writing to file $oldcfgFile" $?
      IFS="$IFS_sv"  # Restore the default IFS settings.
      ;;

    $REM_CLUSTER )    # This line describes a remote cluster.
      remoteClusterFound=yes
      ;;

    * )  # There is no need to look at any of the other lines.
      ;;

  esac  # end Change some of the fields

  # If the line is to be kept, write it to the new mmsdrfs file.
  if [[ $printLine = true ]]
  then
    print_newLine >> $newsdrfs
    checkForErrors "writing to file $newsdrfs" $?
  fi

  IFS=":"  # Change the separator back to ":" for the next iteration.

done  # end while read -u3 sdrfsLine

IFS="$IFS_sv"  # Restore the default IFS settings.


########################################################
# Issue an error if no nodes to be deleted were found.
# (This should never happen, but check just in case.)
########################################################
if [[ ! -s $delnodes ]]
then
  # This should not happen at this point.
  print -u2 "$mmcmd: No nodes were found for deletion."
  cleanupAndExit
fi


##############################################################
# Issue an error if the entire cluster is being destroyed but
# there is still file system or remote cluster information.
##############################################################
if [[ ! -s $remnodes ]]
then
  # Make sure that there are no file systems left behind.
  if [[ -n $localFileSystemsFound ]]
  then
    # This cluster contains filesystems.
    printErrorMsg 310 $mmcmd
    cleanupAndExit
  fi

  # Make sure that there are no remote cluster declarations left behind.
  if [[ -n $remoteFileSystemsFound || -n $remoteClusterFound ]]
  then
    # This cluster contains remote declarations.
    printErrorMsg 268 $mmcmd
    cleanupAndExit
  fi
fi  # end of if [[ ! -s $remnodes ]]


###################################################################
# The daemon cannot be running on any of the nodes that will be
# deleted.  Depending on the circumstances (see inline comments),
# it may be necessary to stop GPFS on the remaining nodes as well.
#
# Note:  The verifyDaemonInactive call also gets the Gpfs object
#        lock which will prevent the daemon from starting until
#        the command completes.
###################################################################
if [[ $MMMODE = lc || $MMMODE = single ]]
then
  if [[ -z $coreQuorumDefined ]]
  then
    # If core quorum is not in effect, the daemon must be down everywhere.
    nodesToCheck=$allnodes
  elif [[ -s $remnodes && -z $oldQuorumNodeFound ]]
  then
    # If core quorum is in effect and none of the remaining quorum nodes
    # has been accepted yet by the daemon (they are all marked new),
    # the daemon must be stopped everywhere.
    nodesToCheck=$allnodes
  else
    # In all other cases, we can delete the nodes dynamically.
    nodesToCheck=$delnodes
  fi

  # Verifying that GPFS is stopped on all affected nodes.
  printInfoMsg 453
  verifyDaemonInactive $nodesToCheck $mmcmd
  [[ $? -ne 0 ]] && cleanupAndExit

  if [[ $nodesToCheck = $allnodes ]]
  then
    daemonInactive=yes
  else
    daemonInactive=no
  fi
fi  # end of if [[ $MMMODE = lc || $MMMODE = single ]]


#######################################################################
# If the entire cluster is being deleted, start a background process
# that will remove our files from each of the nodes and get out.
# There is nothing to commit because everything is going away anyway.
#######################################################################
if [[ ! -s $remnodes ]]
then
  # Clean up the lock file.
  [[ $sdrLocked = yes ]] &&  \
    freeLockOnServer $primaryServer $ourNodeNumber > /dev/null
  sdrLocked=no

  # Clean up all configuration files.
  printErrorMsg 271 $mmcmd
  $ln $delnodes ${delnodes}async
  $mmcommon onall_async ${delnodes}async removeFromCluster &

  # The command completed successfully.
  # We use the doNotUnlock option of cleanupAndExit because the
  # primary server may already be gone due to the removeFromCluster
  # we just executed, and everything is going away in any case.
  printErrorMsg 272 $mmcmd
  cleanupAndExit 0 doNotUnlock
fi


##################################################################
# If we arrive here, at least one node remains in the cluster.
##################################################################

##################################################################
# If this is an obsolete GPFS cluster environment, the user
# must delete all nodes in the cluster; there is no other option.
##################################################################
if [[ $MMMODE != lc && $MMMODE != single ]]
then
  print -u2 "$mmcmd: You must delete all nodes in the current cluster and"
  print -u2 "    move to a supported GPFS environment (cluster type lc)."
  print -u2 "    See the GPFS Concepts, Planning, and Installation Guide for instructions."
  cleanupAndExit
fi


#############################################################
# Issue an error message and exit if an attempt was made
# to delete the primary server or the backup server.
#############################################################
if [[ -n $deletedPrimaryServer || -n $deletedBackupServer ]]
then
  if [[ -n $deletedPrimaryServer ]]
  then
    server=$deletedPrimaryServer
  else
    server=$deletedBackupServer
  fi
  # Repository server nodes cannot be deleted.
  printErrorMsg 384 $mmcmd $server
  cleanupAndExit
fi


######################################################################
# The command must be issued from a node that remains in the cluster.
######################################################################
if [[ -n $deletingOurNode ]]
then
  # Issue the command from a node in the cluster.
  printErrorMsg 417 $mmcmd
  cleanupAndExit
fi


################################################################
# Ensure that there is at least one quorum node in the cluster.
################################################################
if [[ -z $quorumNodesDefined ]]
then
  printErrorMsg 53 $mmcmd
  cleanupAndExit
fi


############################################################
# If all quorum nodes are marked "new", the daemon will not
# be able to start unless the addNodeState field is reset.
############################################################
if [[ -z $oldQuorumNodeFound ]]
then
  # Reset the addnode state of all nodes to 'old'.
  $rm -f $tmpfile
  $awk -F:  '                                                                \
     # If this is a node line, clear the addnode state field.                \
     /'^$HOME_CLUSTER:$MEMBER_NODE:'/ {                                      \
       { $'$ADDNODE_STATE_Field' = "'$OLD_NODE'" }                           \
       { print  $1":" $2":" $3":" $4":" $5":" $6":" $7":" $8":" $9":"$10":"  \
               $11":"$12":"$13":"$14":"$15":"$16":"$17":"$18":"$19":"$20":"  \
               $21":"$22":"$23":"$24":"$25":"$26":"$27":" >> "'$tmpfile'" }  \
        { next }                                                             \
     }                                                                       \
     # All other lines are echoed without change.                            \
     { print $0 >> "'$tmpfile'" }                                            \
     END { print gen }                                                       \
  ' $newsdrfs
  checkForErrors awk $?

  # The file was updated successfully.
  $mv $tmpfile $newsdrfs
  checkForErrors "mv $tmpfile $newsdrfs" $?
fi  # end of if [[ -z $oldQuorumNodeFound ]]


###################################################
# Verify that none of the nodes to be deleted are
# still defined as a primary or backup NSD server.
###################################################
if [[ -s $serverNodes ]]
then
  $sort -u $serverNodes -o $serverNodes
  exec 3<&-
  exec 3< $delnodes
  while read -u3 relNodeName
  do
    $grep -w $relNodeName $serverNodes > /dev/null 2>&1
    if [[ $? -eq 0 ]]
    then
      # The node is still an NSD server for some disk.
      printErrorMsg 433 $mmcmd $relNodeName
      nsdServersFound=true
    fi
  done   # end of while read -u3 nodeLine
  [[ -n $nsdServersFound ]] &&  \
    cleanupAndExit
fi   # end of if [[ -s $serverNodes ]]


######################################################
# Add the nodeset header line back into the mmsdrfs.
######################################################
outline="$nodesetHdr_A:$lineCnt:$nodesetHdr_B:$nodesetHdr_C:$nodesetHdr_D"
print -- "$outline" >> $newsdrfs
checkForErrors "writing to file $newsdrfs" $?


######################################################
# Remove from the mmfs.cfg file any parameter values
# that are specific to the deleted nodes.
######################################################
if [[ -n $deletedNodeNames ]]
then
  $mmfixcfg $deletedNodeNames < $oldcfgFile > $newcfg
  if [[ $? != 0 ]]
  then
    # Warning:  failed to remove node-specific changes to mmfs.cfg
    printErrorMsg 311 $mmcmd
  else
    # mmfixcfg worked.
    replaceMmfscfg=yes
  fi
fi  # if [[ -n $deletedNodeNames ]]


###########################################################
# Put the mmfs.cfg information back into the mmsdrfs file.
###########################################################
if [[ $replaceMmfscfg = yes ]]
then
  appendCfgFile $nodesetId $newcfg $newsdrfs
  rc=$?
else
  appendCfgFile $nodesetId $oldcfgFile $newsdrfs
  rc=$?
fi
checkForErrors "appendCfgFile" $rc


############################################
# Sort the new version of the mmsdrfs file.
############################################
LC_ALL=C $SORT_MMSDRFS $newsdrfs -o $newsdrfs


#########################################
# Put the new mmsdrfs file into the sdr.
#########################################
trap "" HUP INT QUIT KILL
gpfsObjectInfo=$(commitChanges  \
   $HOME_CLUSTER $nsId $gpfsObjectInfo $newGenNumber $newsdrfs $primaryServer)
rc=$?
if [[ $rc -ne 0 ]]
then
  # We were unable to replace the file in the sdr.
  printErrorMsg 381 $mmcmd
  cleanupAndExit
fi

# Remove GPFS system files from the deleted nodes.  Ignore any errors.
[[ -s $delnodes ]] &&  \
  $mmcommon onall $delnodes $unreachedNodes removeFromCluster > /dev/null  2>&1


##################
# Unlock the sdr.
##################
[[ $sdrLocked = yes ]] &&  \
  freeLockOnServer $primaryServer $ourNodeNumber > /dev/null
sdrLocked=no
trap posttrap HUP INT QUIT KILL

# Indicate command was successful.
printErrorMsg 272 $mmcmd


##########################################################################
# Asynchronously propagate the changes to all remaining nodes.
##########################################################################
propagateSdrfsFile async $remnodes $newsdrfs $newGenNumber rereadNodeList


cleanupAndExit 0

