source: gpfs_3.1_ker2.6.20/lpp/mmfs/bin/mmdelnode @ 67

Last change on this file since 67 was 16, checked in by rock, 17 years ago
  • Property svn:executable set to *
File size: 25.0 KB
Line 
1#!/bin/ksh
2# IBM_PROLOG_BEGIN_TAG
3# This is an automatically generated prolog.
4
5
6
7# Licensed Materials - Property of IBM
8
9# (C) COPYRIGHT International Business Machines Corp. 1997,2006
10# All Rights Reserved
11
12# US Government Users Restricted Rights - Use, duplication or
13# disclosure restricted by GSA ADP Schedule Contract with IBM Corp.
14
15# IBM_PROLOG_END_TAG
16# @(#)24 1.130 src/avs/fs/mmfs/ts/admin/mmdelnode.sh, mmfs, avs_rgpfs24, rgpfs240610b 12/5/05 15:41:10
17##############################################################################
18#
19# Usage:  mmdelnode {-a | -f | -N {Node[,Node...] | NodeFile | NodeClass}}
20#
21# where:
22#
23#   -a                 specifies that all of the nodes in the cluster
24#                      are to be deleted.
25#
26#   -N Node,Node,...   Specify the nodes to be deleted from the cluster.
27#   -N NodeFile        NodeClass may be one of several possible node classes
28#   -N NodeClass       (e.g., quorumnodes, managernodes, nsdnodes, etc.)
29#                      If none of -N, -n, or nodelist is not specified,
30#                      the entire cluster is changed; otherwise, the changes
31#                      are made to the specified nodes.
32#                      -N cannot be specified with the obsolete -n options.
33#
34# Obsolete but still supported options:
35#
36#   -n Node,Node,...   is a comma-separated list of nodes to be deleted
37#                      from the cluster.
38#
39#   -n NodeFile        is a file containing the names of the nodes
40#                      to be deleted from the current GPFS cluster.
41#
42# Undocumented option:
43#
44#   -f                 remove all GPFS configuration files on the node.
45#                      This option applies only to the node on which
46#                      the mmdelnode command is issued.
47#
48##############################################################################
49
50# Include global declarations and service routines.
51. /usr/lpp/mmfs/bin/mmglobfuncs
52. /usr/lpp/mmfs/bin/mmsdrfsdef
53
54sourceFile="mmdelnode.sh"
55[[ -n $DEBUG || -n $DEBUGmmdelnode ]] && set -x
56$mmTRACE_ENTER "$*"
57
58
59# Local work files.  Names should be of the form:
60#   fn=${tmpDir}fn.${mmcmd}.$$
61allnodes=${tmpDir}allnodes.${mmcmd}.$$            # list of all cluster nodes
62delnodes=${tmpDir}delnodes.${mmcmd}.$$            # list of the nodes to delete
63oldcfgFile=${tmpDir}oldcfgFile.${mmcmd}.$$        # original mmfs.cfg file
64remnodes=${tmpDir}remnodes.${mmcmd}.$$            # list of the remaining nodes
65nodesToDelete=${tmpDir}nodesToDelete.${mmcmd}.$$  # nodes from the command line
66serverNodes=${tmpDir}serverNodes.${mmcmd}.$$      # list of NSD servernodes
67
68LOCAL_FILES=" $allnodes $nodesToDelete $delnodes $oldcfgFile $remnodes $serverNodes "
69
70
71# Local declarations
72
73usageMsg=356
74integer lineCnt
75
76# Local routines
77
78
79
80#######################
81# Mainline processing
82#######################
83
84
85##################################
86# Process the command arguments.
87##################################
88[[ $arg1 = '-?' || $arg1 = '-h' || $arg1 = '--help' || $arg1 = '--' ]] &&  \
89  syntaxError "help" $usageMsg
90
91while getopts :afn:N: OPT
92do
93
94  case $OPT in
95
96    a) # Delete all of the nodes in the cluster.
97       [[ -n $aflag ]] && syntaxError "multiple" $noUsageMsg "-$OPT"
98       aflag=yes
99       argc=argc-1
100       ;;
101
102    f) # Remove all config files on this node.
103       [[ -n $fflag ]] && syntaxError "multiple" $noUsageMsg "-$OPT"
104       fflag=yes
105       argc=argc-1
106       ;;
107
108    n) # node names file
109       [[ -n $nflag ]] && syntaxError "multiple" $noUsageMsg "-$OPT"
110       nflag=yes
111       narg=$OPTARG
112       argc=argc-2
113       ;;
114
115    N) # node names list, file, or class
116       [[ -n $Nflag ]] && syntaxError "multiple" $noUsageMsg "-$OPT"
117       Nflag=yes
118       Narg=$OPTARG
119       argc=argc-2
120       ;;
121
122    +[afnN]) # Invalid option
123       syntaxError "invalidOption" $usageMsg $OPT
124       ;;
125
126    :) # Missing argument
127       syntaxError "missingValue" $usageMsg $OPTARG
128       ;;
129
130    *) # Invalid option
131       syntaxError "invalidOption" $usageMsg $OPTARG
132       ;;
133
134  esac
135
136done  # end of while getopts :afn:N: OPT do
137
138shift OPTIND-1
139
140
141[[ -n $aflag && -n $fflag ]] &&  \
142  syntaxError "invalidCombination" $usageMsg "-a" "-f"
143
144[[ -n $aflag && -n $nflag ]] &&  \
145  syntaxError "invalidCombination" $usageMsg "-a" "-n"
146
147[[ -n $aflag && -n $Nflag ]] &&  \
148  syntaxError "invalidCombination" $usageMsg "-a" "-N"
149
150[[ -n $fflag && -n $nflag ]] &&  \
151  syntaxError "invalidCombination" $usageMsg "-f" "-n"
152
153[[ -n $fflag && -n $Nflag ]] &&  \
154  syntaxError "invalidCombination" $usageMsg "-f" "-N"
155
156[[ -n $nflag && -n $Nflag ]] &&  \
157  syntaxError "invalidCombination" $usageMsg "-n" "-N"
158
159[[ $argc -gt 0 && ( -n $aflag || -n $fflag || -n $nflag ) ]] &&  \
160  syntaxError "extraArg" $usageMsg "$1"
161
162
163###################################################################
164# If -f is specified, wipe out all of the GPFS config information
165# on this node and exit.
166###################################################################
167if [[ -n $fflag ]]
168then
169  # Verify that the daemon is not running on this node.
170  tsstatusOutput=$(LC_ALL=C $tsstatus -1 2>&1)
171  print -- "$tsstatusOutput" | $grep -e 'file system daemon is running'  \
172                                     -e 'Waiting for quorum' >/dev/null
173  if [[ $? -eq 0 ]]
174  then
175    # MMFS is still active on this node.
176    printErrorMsg 63 $mmcmd "$($hostname)"
177  else
178    # Go ahead and wipe out the files.
179    removeFromCluster
180  fi
181  cleanupAndExit
182fi
183
184
185#####################################################################
186# Complete the parameter checking and create the nodesToDelete file.
187#####################################################################
188[[ $osName != AIX ]] && resolveOrder=$(setHostResolveOrder)
189$rm -f $nodesToDelete
190if [[ -n $aflag ]]
191then
192  :  # There is nothing we need to do if -a was specified.
193
194elif [[ -n $Nflag ]]
195then
196  # Convert the passed data into a file containing IP addresses.
197  createVerifiedNodefile $Narg $IPA_Field no $nodesToDelete
198  [[ $? -ne 0 ]] && cleanupAndExit
199
200else
201  if [[ -n $nflag ]]
202  then
203    # Check whether the node names file parameter exists and is readable.
204    if [[ ! -f $narg || ! -r $narg ]]
205    then
206      # The node names file cannot be read.
207      printErrorMsg 43 $mmcmd $narg
208      cleanupAndExit
209    fi
210
211    # Filter out comment lines and localhost entries.
212    $grep -v -e "localhost" -e "^#" "$narg" > $tmpfile
213
214  else
215    # If neither the -a nor the -n option was used,
216    # a list of node names is required.
217    if [[ $argc -eq 1 ]]
218    then
219      # If there is exactly one string left,
220      # it is assumed to be the list of nodes to delete.
221      arglist=$1
222    elif [[ $argc -gt 1 ]]
223    then
224      # If more than one string is left,
225      # we have a syntax error.
226      syntaxError "extraArg" $usageMsg "$2"
227    else
228      # If there are no more parameters,
229      # a required parameter is missing.
230      syntaxError "missingArgs" $usageMsg
231    fi
232
233    # Convert the input node list into a file containing
234    # the nodes to delete.
235    IFS=','
236    for node in $arglist
237    do
238      IFS="$IFS_sv"    # Restore the default IFS setting.
239
240      # Append the node to the temp file.
241      print -- "$node" >> $tmpfile
242      checkForErrors "writing to file $tmpfile" $?
243
244      IFS=','          # Set the field separator for the next iteration.
245    done
246    IFS="$IFS_sv"    # Restore the default IFS setting.
247
248  fi  # end of if [[ -n $nflag ]]
249
250  # Convert any entries in the temp file into IP addresses.
251  if [[ -s $tmpfile ]]
252  then
253    createVerifiedNodefile $tmpfile $IPA_Field no $nodesToDelete
254    [[ $? -ne 0 ]] && cleanupAndExit
255  else
256    # No node names were specified.
257    printErrorMsg 328 $mmcmd $narg
258    cleanupAndExit
259  fi
260
261fi   # end of if [[ -n $aflag ]]
262
263
264#######################################################################
265# Set up trap exception handling and call the gpfsInit function.
266# It will ensure that the local copy of the mmsdrfs and the rest of
267# the GPFS system files are up-to-date and will obtain the sdr lock.
268#
269# Note:  We are using a variation of gpfsInit - gpfsInitGeneric,
270# which allows the command to still run on old GPFS cluster types.
271# If the cluster type is lc or single, things work as they always do.
272# But if the cluster type is sp, rpd, or hacmp, we are dealing with
273# an obsolete GPFS cluster environment.  The daemon will never be
274# allowed to start under these circumstances, nor will the bulk of
275# the mm commands be allowed to work.  The only exception are commands
276# (mmexportfs, mmdelnode) needed by the user to migrate to a supported
277# environment.  Under such conditions it is acceptable to assume that
278# the daemon is indeed not runing anywhere (i.e., there is no need to
279# run verifyDaemonInactive) and to ignore the true commit processing
280# and the rebuilding of the mmfs environment.  The idea is to allow
281# the user to run "mmexportfs all", followed by "mmdelnode -a", and
282# then create a new cluster of type lc.
283#######################################################################
284trap pretrap HUP INT QUIT KILL
285gpfsInitOutput=$(gpfsInitGeneric $lockId)
286setGlobalVar $? $gpfsInitOutput
287
288
289#######################################################################
290# Create a new version of the mmsdrfs file.
291#######################################################################
292$rm -f $newsdrfs $allnodes $remnodes $tmpfile $delnodes $serverNodes $oldcfgFile
293lineCnt=0
294coreQuorumDefined=""
295
296IFS=":"
297exec 3<&-
298exec 3< $mmsdrfsFile
299while read -u3 sdrfsLine
300do
301  # Parse the line.
302  set -f ; set -A v -- - $sdrfsLine ; set +f
303  IFS="$IFS_sv"    # Restore the default IFS settings.
304  printLine=true   # Assume the line will be printed.
305
306  # Change some of the fields depending on the type of line.
307  case ${v[$LINE_TYPE_Field]} in
308
309    $VERSION_LINE )  # This is the global header line.
310      # Increment the generation number
311      newGenNumber=${v[$SDRFS_GENNUM_Field]}+1
312      v[$SDRFS_GENNUM_Field]=$newGenNumber
313      ;;
314
315    $NODESET_HDR )    # This is the nodeset header line.
316      # The line will be rebuilt after we have the new value
317      # for node count (field 5).
318      nodesetHdr_A="${v[1]}:${v[2]}:${v[3]}:${v[4]}"
319      nodesetHdr_B="${v[6]}:${v[7]}:${v[8]}:${v[9]}:${v[10]}:${v[11]}"
320      nodesetHdr_C="${v[12]}:${v[13]}:${v[14]}:${v[15]}:${v[16]}:${v[17]}"
321      nodesetHdr_D="${v[18]}:${v[19]}:${v[20]}:${v[21]}:${v[22]}"
322      printLine=false
323      ;;
324
325    $MEMBER_NODE )  # This line describes a node.
326      # Add the node to the list of nodes presently in the cluster.
327      print -- "${v[$REL_HOSTNAME_Field]}" >> $allnodes
328      checkForErrors "writing to file $allnodes" $?
329
330      # Find out if core quorum is currently being used.
331      [[ -n ${v[$CORE_QUORUM_Field]} ]] &&  \
332        coreQuorumDefined=yes
333
334      # Determine whether this is one of the nodes to be removed.
335      # The awk script checks whether the first field in the nodesToDelete
336      # file matches the IP address of the current MEMBER_NODE line.
337      # If yes, the deleteThisNode flag will be set to 'yes'.
338      # All other lines are passed unchanged and written to a tmpfile.
339
340      # Ensure that tmpfile exists at the end, even if empty.
341      # If this isn't done, the mv command further down will fail.
342      $touch $tmpfile
343
344      if [[ -n $aflag ]]
345      then
346        deleteThisNode=yes
347      else
348        deleteThisNode=$($awk '          \
349          $1 == "'${v[$IPA_Field]}'" {   \
350            { print "yes" }              \
351            { exit }                     \
352          }                              \
353        ' $nodesToDelete)
354        checkForErrors "awk" $?
355      fi
356
357      if [[ $deleteThisNode = yes ]]
358      then
359        # This node is being deleted.
360        printLine=false
361        print -- "${v[$REL_HOSTNAME_Field]}" >> $delnodes
362        checkForErrors "writing to file $delnodes" $?
363
364        # See if the node is one of the "interesting" nodes.
365        [[ ${v[$REL_HOSTNAME_Field]} = $primaryServer ]] &&  \
366          deletedPrimaryServer=${v[$REL_HOSTNAME_Field]}
367        [[ ${v[$REL_HOSTNAME_Field]} = $backupServer ]] &&  \
368          deletedBackupServer=${v[$REL_HOSTNAME_Field]}
369        [[ ${v[$REL_HOSTNAME_Field]} = $ourNodeName ]] &&  \
370          deletingOurNode=${v[$REL_HOSTNAME_Field]}
371
372        # Add the short name to a list of node names to be used
373        # to back out any local changes in the mmfs.cfg file.
374        [[ -z $deletedNodeNames ]]  \
375           && deletedNodeNames=${v[$NODE_NAME_Field]}  \
376           || deletedNodeNames="$deletedNodeNames,${v[$NODE_NAME_Field]}"
377
378      else
379        # This node is not being deleted.
380
381        # Adjust the line sequence number.
382        lineCnt=$lineCnt+1
383        v[$LINE_NUMBER_Field]=$lineCnt
384
385        # Add the node to the remaining nodes list.
386        print -- "${v[$REL_HOSTNAME_Field]}" >> $remnodes
387        checkForErrors "writing to file $remnodes" $?
388
389        # Keep track of the presence of quorum nodes.
390        [[ ${v[$CORE_QUORUM_Field]} != $nonQuorumNode ]] &&  \
391          quorumNodesDefined=yes
392        [[ ${v[$CORE_QUORUM_Field]} = $quorumNode &&
393           ${v[$ADDNODE_STATE_Field]} = $OLD_NODE ]] &&  \
394          oldQuorumNodeFound=yes
395
396      fi  # end of if [[ $deleteThisNode = yes ]]
397      ;;
398
399    $SG_HEADR )    # This is the header line for some file system.
400      [[ ${v[$FS_TYPE_Field]} = $localfs ]] &&  \
401        localFileSystemsFound=yes
402      [[ ${v[$FS_TYPE_Field]} = $remotefs ]] &&  \
403        remoteFileSystemsFound=yes
404      ;;
405
406    $SG_DISKS )    # This line describes some disk.
407
408      # Collect the names of all server nodes.
409      if [[ -n ${v[$NSD_PRIMARY_NODE_Field]} ]]
410      then
411        print -- "$diskName ${v[$NSD_PRIMARY_NODE_Field]}" >> $serverNodes
412        rc=$?
413        if [[ -n ${v[$NSD_BACKUP_NODE_Field]} ]]
414        then
415          print -- "$diskName ${v[$NSD_BACKUP_NODE_Field]}" >> $serverNodes
416          rc=$?
417        fi
418        checkForErrors "writing to file $serverNodes" $rc
419      fi
420      ;;
421
422    $MMFSCFG )     # This line contains mmfs.cfg information.
423
424      # Remove the line from the mmsdrfs file for now.  The mmfs.cfg
425      # information will be added back before committing the changes.
426      printLine=false
427
428      # Extract the mmfs.cfg information.
429      # It is everything past the first 4 fields.
430      cfgLine="${v[5]}:${v[6]}:${v[7]}:${v[8]}:${v[9]}:${v[10]}:${v[11]}"
431      cfgLine="$cfgLine:${v[12]}:${v[13]}:${v[14]}:${v[15]}:${v[16]}"
432      cfgLine="$cfgLine:${v[17]}:${v[18]}:${v[19]}:${v[20]}:${v[21]}:${v[22]}:"
433
434      # To preserve tabs, temporarily set IFS to new line only.
435      IFS="
436"
437      # Strip trailing colons and write the line to the file.
438      print -- "${cfgLine%%+(:)}" >> $oldcfgFile
439      checkForErrors "writing to file $oldcfgFile" $?
440      IFS="$IFS_sv"  # Restore the default IFS settings.
441      ;;
442
443    $REM_CLUSTER )    # This line describes a remote cluster.
444      remoteClusterFound=yes
445      ;;
446
447    * )  # There is no need to look at any of the other lines.
448      ;;
449
450  esac  # end Change some of the fields
451
452  # If the line is to be kept, write it to the new mmsdrfs file.
453  if [[ $printLine = true ]]
454  then
455    print_newLine >> $newsdrfs
456    checkForErrors "writing to file $newsdrfs" $?
457  fi
458
459  IFS=":"  # Change the separator back to ":" for the next iteration.
460
461done  # end while read -u3 sdrfsLine
462
463IFS="$IFS_sv"  # Restore the default IFS settings.
464
465
466########################################################
467# Issue an error if no nodes to be deleted were found.
468# (This should never happen, but check just in case.)
469########################################################
470if [[ ! -s $delnodes ]]
471then
472  # This should not happen at this point.
473  print -u2 "$mmcmd: No nodes were found for deletion."
474  cleanupAndExit
475fi
476
477
478##############################################################
479# Issue an error if the entire cluster is being destroyed but
480# there is still file system or remote cluster information.
481##############################################################
482if [[ ! -s $remnodes ]]
483then
484  # Make sure that there are no file systems left behind.
485  if [[ -n $localFileSystemsFound ]]
486  then
487    # This cluster contains filesystems.
488    printErrorMsg 310 $mmcmd
489    cleanupAndExit
490  fi
491
492  # Make sure that there are no remote cluster declarations left behind.
493  if [[ -n $remoteFileSystemsFound || -n $remoteClusterFound ]]
494  then
495    # This cluster contains remote declarations.
496    printErrorMsg 268 $mmcmd
497    cleanupAndExit
498  fi
499fi  # end of if [[ ! -s $remnodes ]]
500
501
502###################################################################
503# The daemon cannot be running on any of the nodes that will be
504# deleted.  Depending on the circumstances (see inline comments),
505# it may be necessary to stop GPFS on the remaining nodes as well.
506#
507# Note:  The verifyDaemonInactive call also gets the Gpfs object
508#        lock which will prevent the daemon from starting until
509#        the command completes.
510###################################################################
511if [[ $MMMODE = lc || $MMMODE = single ]]
512then
513  if [[ -z $coreQuorumDefined ]]
514  then
515    # If core quorum is not in effect, the daemon must be down everywhere.
516    nodesToCheck=$allnodes
517  elif [[ -s $remnodes && -z $oldQuorumNodeFound ]]
518  then
519    # If core quorum is in effect and none of the remaining quorum nodes
520    # has been accepted yet by the daemon (they are all marked new),
521    # the daemon must be stopped everywhere.
522    nodesToCheck=$allnodes
523  else
524    # In all other cases, we can delete the nodes dynamically.
525    nodesToCheck=$delnodes
526  fi
527
528  # Verifying that GPFS is stopped on all affected nodes.
529  printInfoMsg 453
530  verifyDaemonInactive $nodesToCheck $mmcmd
531  [[ $? -ne 0 ]] && cleanupAndExit
532
533  if [[ $nodesToCheck = $allnodes ]]
534  then
535    daemonInactive=yes
536  else
537    daemonInactive=no
538  fi
539fi  # end of if [[ $MMMODE = lc || $MMMODE = single ]]
540
541
542#######################################################################
543# If the entire cluster is being deleted, start a background process
544# that will remove our files from each of the nodes and get out.
545# There is nothing to commit because everything is going away anyway.
546#######################################################################
547if [[ ! -s $remnodes ]]
548then
549  # Clean up the lock file.
550  [[ $sdrLocked = yes ]] &&  \
551    freeLockOnServer $primaryServer $ourNodeNumber > /dev/null
552  sdrLocked=no
553
554  # Clean up all configuration files.
555  printErrorMsg 271 $mmcmd
556  $ln $delnodes ${delnodes}async
557  $mmcommon onall_async ${delnodes}async removeFromCluster &
558
559  # The command completed successfully.
560  # We use the doNotUnlock option of cleanupAndExit because the
561  # primary server may already be gone due to the removeFromCluster
562  # we just executed, and everything is going away in any case.
563  printErrorMsg 272 $mmcmd
564  cleanupAndExit 0 doNotUnlock
565fi
566
567
568##################################################################
569# If we arrive here, at least one node remains in the cluster.
570##################################################################
571
572##################################################################
573# If this is an obsolete GPFS cluster environment, the user
574# must delete all nodes in the cluster; there is no other option.
575##################################################################
576if [[ $MMMODE != lc && $MMMODE != single ]]
577then
578  print -u2 "$mmcmd: You must delete all nodes in the current cluster and"
579  print -u2 "    move to a supported GPFS environment (cluster type lc)."
580  print -u2 "    See the GPFS Concepts, Planning, and Installation Guide for instructions."
581  cleanupAndExit
582fi
583
584
585#############################################################
586# Issue an error message and exit if an attempt was made
587# to delete the primary server or the backup server.
588#############################################################
589if [[ -n $deletedPrimaryServer || -n $deletedBackupServer ]]
590then
591  if [[ -n $deletedPrimaryServer ]]
592  then
593    server=$deletedPrimaryServer
594  else
595    server=$deletedBackupServer
596  fi
597  # Repository server nodes cannot be deleted.
598  printErrorMsg 384 $mmcmd $server
599  cleanupAndExit
600fi
601
602
603######################################################################
604# The command must be issued from a node that remains in the cluster.
605######################################################################
606if [[ -n $deletingOurNode ]]
607then
608  # Issue the command from a node in the cluster.
609  printErrorMsg 417 $mmcmd
610  cleanupAndExit
611fi
612
613
614################################################################
615# Ensure that there is at least one quorum node in the cluster.
616################################################################
617if [[ -z $quorumNodesDefined ]]
618then
619  printErrorMsg 53 $mmcmd
620  cleanupAndExit
621fi
622
623
624############################################################
625# If all quorum nodes are marked "new", the daemon will not
626# be able to start unless the addNodeState field is reset.
627############################################################
628if [[ -z $oldQuorumNodeFound ]]
629then
630  # Reset the addnode state of all nodes to 'old'.
631  $rm -f $tmpfile
632  $awk -F:  '                                                                \
633     # If this is a node line, clear the addnode state field.                \
634     /'^$HOME_CLUSTER:$MEMBER_NODE:'/ {                                      \
635       { $'$ADDNODE_STATE_Field' = "'$OLD_NODE'" }                           \
636       { print  $1":" $2":" $3":" $4":" $5":" $6":" $7":" $8":" $9":"$10":"  \
637               $11":"$12":"$13":"$14":"$15":"$16":"$17":"$18":"$19":"$20":"  \
638               $21":"$22":"$23":"$24":"$25":"$26":"$27":" >> "'$tmpfile'" }  \
639        { next }                                                             \
640     }                                                                       \
641     # All other lines are echoed without change.                            \
642     { print $0 >> "'$tmpfile'" }                                            \
643     END { print gen }                                                       \
644  ' $newsdrfs
645  checkForErrors awk $?
646
647  # The file was updated successfully.
648  $mv $tmpfile $newsdrfs
649  checkForErrors "mv $tmpfile $newsdrfs" $?
650fi  # end of if [[ -z $oldQuorumNodeFound ]]
651
652
653###################################################
654# Verify that none of the nodes to be deleted are
655# still defined as a primary or backup NSD server.
656###################################################
657if [[ -s $serverNodes ]]
658then
659  $sort -u $serverNodes -o $serverNodes
660  exec 3<&-
661  exec 3< $delnodes
662  while read -u3 relNodeName
663  do
664    $grep -w $relNodeName $serverNodes > /dev/null 2>&1
665    if [[ $? -eq 0 ]]
666    then
667      # The node is still an NSD server for some disk.
668      printErrorMsg 433 $mmcmd $relNodeName
669      nsdServersFound=true
670    fi
671  done   # end of while read -u3 nodeLine
672  [[ -n $nsdServersFound ]] &&  \
673    cleanupAndExit
674fi   # end of if [[ -s $serverNodes ]]
675
676
677######################################################
678# Add the nodeset header line back into the mmsdrfs.
679######################################################
680outline="$nodesetHdr_A:$lineCnt:$nodesetHdr_B:$nodesetHdr_C:$nodesetHdr_D"
681print -- "$outline" >> $newsdrfs
682checkForErrors "writing to file $newsdrfs" $?
683
684
685######################################################
686# Remove from the mmfs.cfg file any parameter values
687# that are specific to the deleted nodes.
688######################################################
689if [[ -n $deletedNodeNames ]]
690then
691  $mmfixcfg $deletedNodeNames < $oldcfgFile > $newcfg
692  if [[ $? != 0 ]]
693  then
694    # Warning:  failed to remove node-specific changes to mmfs.cfg
695    printErrorMsg 311 $mmcmd
696  else
697    # mmfixcfg worked.
698    replaceMmfscfg=yes
699  fi
700fi  # if [[ -n $deletedNodeNames ]]
701
702
703###########################################################
704# Put the mmfs.cfg information back into the mmsdrfs file.
705###########################################################
706if [[ $replaceMmfscfg = yes ]]
707then
708  appendCfgFile $nodesetId $newcfg $newsdrfs
709  rc=$?
710else
711  appendCfgFile $nodesetId $oldcfgFile $newsdrfs
712  rc=$?
713fi
714checkForErrors "appendCfgFile" $rc
715
716
717############################################
718# Sort the new version of the mmsdrfs file.
719############################################
720LC_ALL=C $SORT_MMSDRFS $newsdrfs -o $newsdrfs
721
722
723#########################################
724# Put the new mmsdrfs file into the sdr.
725#########################################
726trap "" HUP INT QUIT KILL
727gpfsObjectInfo=$(commitChanges  \
728   $HOME_CLUSTER $nsId $gpfsObjectInfo $newGenNumber $newsdrfs $primaryServer)
729rc=$?
730if [[ $rc -ne 0 ]]
731then
732  # We were unable to replace the file in the sdr.
733  printErrorMsg 381 $mmcmd
734  cleanupAndExit
735fi
736
737# Remove GPFS system files from the deleted nodes.  Ignore any errors.
738[[ -s $delnodes ]] &&  \
739  $mmcommon onall $delnodes $unreachedNodes removeFromCluster > /dev/null  2>&1
740
741
742##################
743# Unlock the sdr.
744##################
745[[ $sdrLocked = yes ]] &&  \
746  freeLockOnServer $primaryServer $ourNodeNumber > /dev/null
747sdrLocked=no
748trap posttrap HUP INT QUIT KILL
749
750# Indicate command was successful.
751printErrorMsg 272 $mmcmd
752
753
754##########################################################################
755# Asynchronously propagate the changes to all remaining nodes.
756##########################################################################
757propagateSdrfsFile async $remnodes $newsdrfs $newGenNumber rereadNodeList
758
759
760cleanupAndExit 0
761
Note: See TracBrowser for help on using the repository browser.