source: gpfs_3.1_ker2.6.20/lpp/mmfs/bin/mmexportfs @ 16

Last change on this file since 16 was 16, checked in by rock, 16 years ago
  • Property svn:executable set to *
File size: 21.5 KB
Line 
1#!/bin/ksh
2# IBM_PROLOG_BEGIN_TAG
3# This is an automatically generated prolog.
4
5
6
7# Licensed Materials - Property of IBM
8
9# (C) COPYRIGHT International Business Machines Corp. 2003,2007
10# All Rights Reserved
11
12# US Government Users Restricted Rights - Use, duplication or
13# disclosure restricted by GSA ADP Schedule Contract with IBM Corp.
14
15# IBM_PROLOG_END_TAG
16# @(#)36 1.7.5.2 src/avs/fs/mmfs/ts/admin/mmexportfs.sh, mmfs, avs_rgpfs24, rgpfs24s010a 2/22/07 03:17:55
17##############################################################################
18#
19# Usage:
20#
21#   mmexportfs {Device | all} -o ExportfsFile [-C nodesetId] [-P]
22#
23# where
24#
25#   Device        is the file system to be exported.
26#                 If "all" is specified, then all file systems in
27#                 the cluster are exported.  Free disks, if any,
28#                 are exported as well.
29#
30#   -o ExportfsFile   is the name of a file where the file system
31#                     export information is to be placed.
32#
33#
34#  Obsolete option (for use when migrating previous releases):
35#
36#   -C NodesetId  is the nodeset from which file systems are to be
37#                 exported.  This parameter is used to limit the
38#                 scope of the "all" keyword to all file systems
39#                 within a given nodeset rather than all file
40#                 systems in the cluster.
41#
42#  Undocumented option:
43#
44#   -P            Do not remove the file system information from
45#                 the current version of the mmsdrfs.
46#
47##############################################################################
48
49# Include global declarations and service routines.
50. /usr/lpp/mmfs/bin/mmglobfuncs
51. /usr/lpp/mmfs/bin/mmsdrfsdef
52. /usr/lpp/mmfs/bin/mmfsfuncs
53
54sourceFile="mmexportfs.sh"
55[[ -n $DEBUG || -n $DEBUGmmexportfs ]] && set -x
56$mmTRACE_ENTER "$*"
57
58
59# Local work files.  Names should be of the form:
60#   fn=${tmpDir}fn.${mmcmd}.$$
61
62# Note that the following nodesetMember declaration is the prefix for
63# the file names that will contain the member nodes of each nodeset.
64# The actual file names will be of the form $nodesetMembers.<nodesetId>
65nodesetMembers=${tmpDir}nodesetMembers.${mmcmd}.$$
66
67LOCAL_FILES=" $nodesetMembers.* "
68
69# Local variable declarations
70usageMsg=570
71oflag=""
72Pflag=""
73fsBeingExportedList=""
74
75newNodesetIdFound=no
76oldNodesetId=""
77fsMountPoint=""
78existingMountPoints=""
79existingMinorNumbers=""
80
81
82# Local functions
83
84
85####################################################################
86#
87# Function:  Based on the cluster type, guess at what the disk type
88#            for a given disk may be.
89#
90# Input:     None.
91#
92# Output:    The type of the disk:  vsd, lv, nsd, etc.
93#
94# Returns:   Always zero.
95#
96####################################################################
97function assignDiskType  #
98{
99  [[ -n $DEBUG || -n $DEBUGassignDiskType ]] && set -x
100  $mmTRACE_ENTER "$*"
101  typeset diskType wait4RVSD
102
103  # Determine the value of the wait4RVSD parameter.
104  [[ $MMMODE = rpd ]] && wait4RVSD=$(showCfgValue wait4RVSD)
105
106  case $MMMODE in
107    sp)     diskType=vsd ;;
108    hacmp)  diskType=lv  ;;
109    rpd)    [[ $wait4RVSD = yes ]] && diskType=vsd || diskType=lv ;;
110    lc)     diskType=nsd ;;
111    single) diskType=disk ;;
112    *)      diskType=unknown ;;
113  esac
114
115  print -- $diskType
116  return 0
117
118}  #--------- end of function assignDiskType --------------------
119
120
121
122######################
123# Mainline processing
124######################
125
126
127#################################
128# Process the command arguments.
129#################################
130[[ $arg1 = '-?' || $arg1 = '-h' || $arg1 = '--help' || $arg1 = '--' ]] &&  \
131  syntaxError "help" $usageMsg
132
133[[ $argc -lt 3 ]] &&  \
134  syntaxError "missingArgs" $usageMsg
135
136device=$arg1   # Save stripe group device (always the first parameter).
137shift 1        # Drop the device name from the parameter list.
138
139while getopts :C:o:P OPT
140do
141  case $OPT in
142    C) # Export the file systems from the specified nodeset.
143       [[ -n $Cflag ]] && syntaxError "multiple" $noUsageMsg "-$OPT"
144       sourceNodesetId=$OPTARG
145       Cflag=yes
146       ;;
147
148    o) # Name of the output file.
149       [[ -n $oflag ]] && syntaxError "multiple" $noUsageMsg "-$OPT"
150       fsExportData=$OPTARG
151       oflag=yes
152       ;;
153
154    P) # Leave the fs information in place.
155       [[ -n $Pflag ]] && syntaxError "multiple" $noUsageMsg "-$OPT"
156       Pflag=yes
157       ;;
158
159    :) syntaxError "missingValue" $usageMsg $OPTARG
160       ;;
161
162    +[CoP]) syntaxError "invalidOption" $usageMsg $OPT
163       ;;
164
165    *) # Invalid option specified.
166       syntaxError "invalidOption" $usageMsg $OPTARG
167       ;;
168  esac
169done
170
171shift OPTIND-1
172[[ $# != 0 ]] && syntaxError "extraArg" $usageMsg $1
173
174# Verify an output file is specified.
175[[ -z $oflag ]] && syntaxError "missingArgs" $usageMsg
176
177# Be careful not to inadvertently destroy some other file.
178if [[ -e $fsExportData ]]
179then
180  # The file already exists.
181  printErrorMsg 42 $mmcmd $fsExportData
182  cleanupAndExit
183fi
184
185# Temporarily add the user specified output file to the list of files
186# to be removed automatically upon exit.  We do this to assure that
187# there wil be no partial data in case of a failure.  Once the data is
188# complete, we will restore the original LOCAL_FILES value.
189LOCAL_FILES_SV="$LOCAL_FILES"
190LOCAL_FILES="$LOCAL_FILES $fsExportData"
191
192
193#######################################################################
194# Set up trap exception handling and call the gpfsInit function.
195# It will ensure that the local copy of the mmsdrfs and the rest of
196# the GPFS system files are up-to-date and will obtain the sdr lock.
197#
198# Note:   We are using a variation of gpfsInit - gpfsInitGeneric,
199# which allows the command to still run on old GPFS cluster types.
200# If the cluster type is lc or single, things work as they always do.
201# But if the cluster type is sp, rpd, or hacmp, we are dealing with
202# an obsolete GPFS cluster environment.  The daemon will never
203# be allowed to start under these circumstances, nor will be the bulk
204# of the mm commands allowed to work.  The only exception are commands
205# (mmexportfs, mmdelnode) needed by the user to migrate to a supported
206# environment.  Under such conditions it is acceptable to assume that
207# the daemon is indeed not runing anywhere (i.e., there is no need to
208# run verifyDaemonInactive) and to ignore the true commit processing
209# and the rebuilding of the mmfs environment.  The idea is to allow
210# the user to run "mmexportfs all", followed by "mmdelnode -a", and
211# then create a new cluster of type lc.
212#
213# Note:   If the -P option is specified, we are interested only
214# in retrieving the file system data without changing the current
215# mmsdrfs file.  There is no need to lock the mmsdrfs file because
216# it will not be changed.
217#######################################################################
218trap pretrap HUP INT QUIT KILL
219[[ -n $Pflag ]] && lockId=nolock
220gpfsInitOutput=$(gpfsInitGeneric $lockId)
221setGlobalVar $? $gpfsInitOutput
222
223# If the current nodeset is requested,
224# use the nodeset id returned from gpfsInit.
225[[ $sourceNodesetId = "." ]] && sourceNodesetId=$nsId
226
227if [[ $sourceNodesetId = 0 ]]
228then
229  print -u2 "$mmcmd: The nodeset can not be determined.  Use the -C <nodesetId> option,"
230  print -u2 "or issue the command from a node that belongs to the desired nodeset."
231  cleanupAndExit
232fi
233
234# Determine the lookup order for resolving host names.
235[[ $osName != AIX ]] && resolveOrder=$(setHostResolveOrder)
236
237
238###################################################
239# Make sure that the specified file system exists.
240###################################################
241if [[ $device = all ]]
242then
243  # The scope of the keyword all depends on whether
244  # -C was specified or not.
245  if [[ -z $sourceNodesetId ]]
246  then
247    nodesetId=$GLOBAL_ID
248  else
249    nodesetId=$sourceNodesetId
250  fi
251else
252  # Obtain the information about the filesystem from the mmsdrfs file.
253  findFSoutput=$(findFS "$device" $mmsdrfsFile)
254  [[ -z $findFSoutput ]] && cleanupAndExit
255
256  # Parse the output from the findFS function.
257  set -f ; set -- $findFSoutput ; set +f
258  fqDeviceName=$1
259  deviceName=$2
260  nodesetId=$3
261
262  # Exit with a message if the command was invoked for a remote file system.
263  if [[ $nodesetId != $HOME_CLUSTER ]]
264  then
265    # Command is not allowed for remote file systems.
266    printErrorMsg 106 $mmcmd $device $nodesetId
267    cleanupAndExit
268  fi
269
270  if [[ -n $sourceNodesetId && $nodesetId != $sourceNodesetId ]]
271  then
272    print -u2 "$mmcmd: File system $device does not exist in nodeset $sourceNodesetId".
273    cleanupAndExit
274  fi
275fi  # end if [[ $device = all ]]
276
277
278######################################################################
279# Create the new version of the mmsdrfs file and collect the relevant
280# information for the file systems being exported.
281######################################################################
282$rm -f $newsdrfs $fsExportData $allClusterNodes $nodesetMembers.*
283IFS=":"
284exec 3<&-
285exec 3< $mmsdrfsFile
286while read -u3 sdrfsLine
287do
288  # Parse the line.
289  set -f ; set -A v -- - $sdrfsLine ; set +f
290
291  IFS="$IFS_sv"    # Restore the default IFS settings.
292  printLine=true   # Assume the line will be printed.
293
294  # Change some of the fields depending on the type of line.
295  case ${v[$LINE_TYPE_Field]} in
296
297    $VERSION_LINE )  # this is the global header line
298       # If this is an sp cluster and it does not already
299       # have a cluster Id, assign it one now.
300       if [[ -z ${v[CLUSTERID_Field]} ]]
301       then
302         timeStamp=$($perl -e 'print time')
303         v[$CLUSTERID_Field]="gpfs$timeStamp"
304       fi
305
306       # Put the line into the fs export file.
307       print_newLine >> $fsExportData
308       checkForErrors "writing to file $fsExportData" $?
309
310       # Increment the generation number
311       newGenNumber=${v[$SDRFS_GENNUM_Field]}+1
312       v[$SDRFS_GENNUM_Field]=$newGenNumber
313       ;;
314
315    $NODESET_HDR )  # this is the header line for a nodeset
316       # If this is the nodeset from which we are exporting,
317       # put the line into the fs export file.
318       if [[ ${v[$NODESETID_Field]} = $nodesetId ||
319             $nodesetId = $GLOBAL_ID ]]
320       then
321         print_newLine >> $fsExportData
322         checkForErrors "writing to file $fsExportData" $?
323       fi
324       ;;
325
326    $MMFSCFG )     # this line contains mmfs.cfg information for some nodeset
327       # If this is the nodeset from which we are exporting,
328       # put the line into the fs export file.
329       if [[ ${v[$NODESETID_Field]} = $nodesetId ||
330             $nodesetId = $GLOBAL_ID ]]
331       then
332         print_newLine >> $fsExportData
333         checkForErrors "writing to file $fsExportData" $?
334       fi
335       ;;
336
337    $MEMBER_NODE )  # this line describes a node that belongs to some nodeset
338       # Create a file with the names of all nodes in the cluster.
339       # Create separate files with the names of the nodes in each nodeset.
340       if [[ ${v[DESIGNATION_Field]} != $DELETED_NODE ]]
341       then
342         print -- "${v[$REL_HOSTNAME_Field]}" >> $allClusterNodes
343         checkForErrors "writing to file $allClusterNodes" $?
344
345         print -- "${v[$REL_HOSTNAME_Field]}" >> $nodesetMembers.${v[$NODESETID_Field]}
346         checkForErrors "writing to file $nodesetMembers.${v[$NODESETID_Field]}" $?
347       fi
348       ;;
349
350    $SG_HEADR )  # this is the header line for some file system
351       # Starting the processing of a new file system.  If the file
352       # system should be exported, put the line into the fs export
353       # file and remove the line from the mmsdrfs file.
354       #
355       # Note:  Unless explicitly requested by their local name,
356       #        remote file systems are not exported.
357       if [[ ${v[$DEV_NAME_Field]} = $deviceName || $device = all &&
358           ( $nodesetId = $GLOBAL_ID ||
359             $nodesetId = ${v[$NODESETID_Field]} ) &&
360             ${v[$FS_TYPE_Field]} = $localfs ]]
361       then
362         # This file system should be exported.
363         exportThisFileSystem=true
364
365         # Put an informational message:  processing file system ...
366         print -- ""  # put a blank separator line
367         printInfoMsg 250 $mmcmd ${v[$DEV_NAME_Field]}
368
369         # Check whether the "odd state" flag is set.
370         if [[ -n ${v[$ODD_STATE_Field]} && ${v[$ODD_STATE_Field]} != no ]]
371         then
372           # Tell the user to bring the filesystem data up to date.
373           printErrorMsg 190 $mmcmd ${v[$DEV_NAME_Field]} ${v[$DEV_NAME_Field]}
374           cleanupAndExit
375         fi
376
377         # Save the line in the fs export file.
378         print_newLine >> $fsExportData
379         checkForErrors "writing to file $fsExportData" $?
380         printLine=false
381       else
382         # This file system should not be exported.
383         exportThisFileSystem=""
384       fi  # end if [[ ${v[$DEV_NAME_Field]} = $deviceName || $device = all &&
385       ;;
386
387    $SG_ETCFS )  # this line is a stanza line for one of the filesystems
388       # If this is the first line in the stanza for a file system
389       # that is being exported, save the device name, mount point
390       # and nodeset to which the file system belongs.  All stanza
391       # lines for file systems that are being exported are put into
392       # the fs export file and removed from the mmsdrfs file.
393       if [[ -n $exportThisFileSystem ]]
394       then
395         if [[ ${v[$LINE_NUMBER_Field]} = $MOUNT_POINT_Line ]]
396         then
397           fsInfo="${v[$DEV_NAME_Field]}:${v[$ETCFS_TEXT_Field]}:${v[$NODESETID_Field]}"
398           fsBeingExportedList="$fsInfo $fsBeingExportedList"
399         fi
400
401         # Save the line in the fs export file.
402         print_newLine >> $fsExportData
403         checkForErrors "writing to file $fsExportData" $?
404         printLine=false
405       fi  # end if [[ -n $exportThisFileSystem ]]
406       ;;
407
408    $SG_MOUNT )
409       # If the line belongs to a file system that is being exported, put the
410       # line into the fs export file and remove the line from the mmsdrfs file.
411       if [[ -n $exportThisFileSystem ]]
412       then
413         # Save the line in the fs export file.
414         print_newLine >> $fsExportData
415         checkForErrors "writing to file $fsExportData" $?
416         printLine=false
417       fi
418       ;;
419
420    $SG_DISKS )
421       # Decide if this disk is to be exported.  If the disk belongs to some
422       # file system, the exportThisFileSystem flag already has the correct
423       # value.  If this is a free disk, it will be exported only if all file
424       # systems are being exported.  Note that the exportThisFileSystem flag
425       # is used to control the exporting of disks that are part of some file
426       # system as well as the exporting of free disks.
427       if [[ ${v[$NODESETID_Field]} = $FREE_DISK ]]
428       then
429         if [[ $device != all ]]
430         then
431           exportThisFileSystem=""
432         else
433           exportThisFileSystem=true
434           # If not done already, put an informational message.
435           if [[ -z $freeDiskStartMsg && -n $fsBeingExportedList ]]
436           then
437             # Put an informational message:  processing free disks ...
438             print -- ""  # put a blank separator line
439             printInfoMsg 98 $mmcmd
440             freeDiskStartMsg=issued
441           fi
442         fi  # end if [[ $device = all ]]
443       fi  # end if [[ ${v[$NODESETID_Field]} = $FREE_DISK ]]
444
445       # If exporting this disk, put the SG_DISKS line into the fs export
446       # file and remove it from the mmsdrfs file.
447       if [[ -n $exportThisFileSystem ]]
448       then
449         # Fail the command if the disk is excluded.
450         # The user needs to repair the file system first.
451         # The only exception is if the -P option is specified.
452         if [[ ${v[$EXCLUDE_Field]} = $excludedDisk && -z $Pflag ]]
453         then
454           print -u2 "$mmcmd: File system ${v[$DEV_NAME_Field]} contains excluded disks."
455           cleanupAndExit
456         fi
457
458         # Make sure the disk type field is set.
459         [[ -z ${v[$DISK_TYPE_Field]} || ${v[$DISK_TYPE_Field]} = unknown ]] &&  \
460            v[$DISK_TYPE_Field]=$(assignDiskType)
461
462         # Save the line in the fs export file.
463         print_newLine >> $fsExportData
464         checkForErrors "writing to file $fsExportData" $?
465         printLine=false
466       fi  # end if [[ -n $exportThisFileSystem ]]
467       ;;
468
469    * )  # Pass all other lines without a change.
470       ;;
471
472  esac  # end Change some of the fields
473
474  # Build and write the line to the new mmsdrfs file.
475  if [[ $printLine = true || -n $Pflag ]]
476  then
477    print_newLine >> $newsdrfs
478    checkForErrors "writing to file $newsdrfs" $?
479  fi
480
481  IFS=":"  # Change the separator back to ":" for the next iteration.
482
483done  # end while read -u3 sdrfsLine
484
485IFS="$IFS_sv"  # Restore the default IFS settings.
486
487
488# Make sure that everything went OK.
489
490if [[ $device = all && -z $fsBeingExportedList ]]
491then
492  # No file systems were found.
493  printErrorMsg 200 $mmcmd
494  cleanupAndExit
495fi
496
497
498############################################
499# Sort the new version of the mmsdrfs file.
500############################################
501LC_ALL=C $SORT_MMSDRFS $newsdrfs -o $newsdrfs
502checkForErrors "sorting $newsdrfs" $?
503
504
505##############################################
506# If the -P option is specified, we are done.
507##############################################
508if [[ -n $Pflag ]]
509then
510  # Remove the output file from the list of files to be deleted upon exit.
511  LOCAL_FILES="$LOCAL_FILES_SV"
512  cleanupAndExit 0
513fi
514
515
516################################################################
517# If this is an old cluster type, we managed to extract the
518# file system data.  Perform a fake commit and take an early
519# exit.  The rest of the mm infrastructure does not know how to
520# handle a real commit, plus the user is expected to tear down
521# the obsolete environment right away and create a new cluster.
522################################################################
523if [[ $MMMODE != lc && $MMMODE != single ]]
524then
525  # Perform a pseudo commit of the changes on this node only.
526  $cp $newsdrfs $mmsdrfsFile
527  $rm -f ${mmfsEnvLevel}+([0-9])
528  $touch ${mmfsEnvLevel}$newGenNumber
529
530  # Remove the output file from the list of files to be deleted upon exit.
531  LOCAL_FILES="$LOCAL_FILES_SV"
532
533  cleanupAndExit 0
534
535fi  # end if [[ $MMMODE != lc && $MMMODE != single ]]
536
537
538############################################
539# Lock the gpfs object to prevent daemons
540# from starting during the commit phase.
541############################################
542[[ $getCredCalled = no ]] && getCred
543setRunningCommand "$mmcmd" $primaryServer
544checkForErrors setRunningCommand $?
545gpfsLocked=yes
546
547
548################################################################
549# Make sure the file systems being exported are not mounted
550# and that the currently-appointed fs managers have resigned.
551################################################################
552for expFs in $(print -- "$fsBeingExportedList")
553do
554  IFS=':'
555  set -f ; set -- $expFs ; set +f
556  IFS="$IFS_sv"
557  deviceName=$1
558  fqDeviceName="/dev/$deviceName"
559  fsMountPoint=$2
560  nodesetId=$3
561
562  if [[ -z $daemonDownVerified ]]
563  then
564    preferredNode=0
565    $mmcommon onactive $preferredNode $nodesetMembers.$nodesetId        \
566                       $NO_FILE_COPY $fqDeviceName $CHECK_ALL $NO_LINK  \
567                       $MOUNT_CHECK_ONLY $RESIGN_FSMGR 2>$errMsg
568    rc=$?
569    if [[ $rc -eq $MM_FsMounted ]]
570    then
571      # The file system is still mounted; messages were issued by mmcommon.
572      cleanupAndExit
573    elif [[ $rc -eq $MM_DaemonDown ]]
574    then
575      # GPFS is down on all nodes that we care about; that's just fine.
576      rc=0
577      daemonDownVerified=yes
578    elif [[ $rc -eq $MM_ConnectionReset ]]
579    then
580      # An internode connection was reset.
581      printErrorMsg 257 $mmcmd
582      cleanupAndExit
583    elif [[ $rc -ne 0 ]]
584    then
585      # An unexpected error occurred during the mount check.
586      if [[ -s $errMsg ]]
587      then
588        # Show the error messages from the daemon.
589        $cat $errMsg 1>&2
590      else
591        # The mount check failed and there were no messages from the daemon.
592        printErrorMsg 171 $mmcmd "mount check for $fqDeviceName" $rc
593      fi
594      # The command was unable to determine whether the file system is mounted.
595      printErrorMsg 564 $mmcmd $fqDeviceName
596      cleanupAndExit
597    fi
598    $rm -f $errMsg
599  fi  # end if [[ -z $daemonDownVerified ]]
600
601  # If exporting a single file system, remove all traces
602  # of that file system from the nodes.  Ignore errors.
603  # If exporting all file systems, we will bypass this
604  # time-consuming process; the cluster will most likely
605  # be destroyed anyway.  Plus, we will propagate the
606  # changes at the end of the command like we always do.
607  [[ $device != all ]] &&  \
608    $mmcommon onall $nodesetMembers.$nodesetId $unreachedNodes  \
609                      rmfs $fqDeviceName $fsMountPoint >/dev/null 2>&1
610
611done  # end for expFs in $(print -- "$fsBeingExportedList")
612
613
614############################################################
615# Replace the mmsdrfs file in the sdr with the new version.
616############################################################
617trap "" HUP INT QUIT KILL
618gpfsObjectInfo=$(commitChanges  \
619   $nsId $nsId $gpfsObjectInfo $newGenNumber $newsdrfs $primaryServer)
620rc=$?
621if [[ $rc -ne 0 ]]
622then
623  # Cannot replace file in the sdr.
624  printErrorMsg 381 $mmcmd
625  cleanupAndExit
626fi
627
628
629###################################################################
630# Unlock the sdr.
631###################################################################
632freeLockOnServer $primaryServer $ourNodeNumber >/dev/null
633sdrLocked=no
634trap posttrap HUP INT QUIT KILL
635
636# Remove the output file from the list of files to be deleted upon exit.
637LOCAL_FILES="$LOCAL_FILES_SV"
638
639
640#################################################################
641# Propagate the new mmsdrfs file.  This process is asynchronous.
642#################################################################
643propagateSdrfsFile async $allClusterNodes $newsdrfs $newGenNumber
644
645cleanupAndExit 0
646
Note: See TracBrowser for help on using the repository browser.