| 1 | #!/bin/ksh | 
|---|
| 2 | # IBM_PROLOG_BEGIN_TAG | 
|---|
| 3 | # This is an automatically generated prolog. | 
|---|
| 4 | # | 
|---|
| 5 | # | 
|---|
| 6 | # | 
|---|
| 7 | # Licensed Materials - Property of IBM | 
|---|
| 8 | # | 
|---|
| 9 | # (C) COPYRIGHT International Business Machines Corp. 1999,2006 | 
|---|
| 10 | # All Rights Reserved | 
|---|
| 11 | # | 
|---|
| 12 | # US Government Users Restricted Rights - Use, duplication or | 
|---|
| 13 | # disclosure restricted by GSA ADP Schedule Contract with IBM Corp. | 
|---|
| 14 | # | 
|---|
| 15 | # IBM_PROLOG_END_TAG | 
|---|
| 16 | # @(#)86 1.42.1.1 src/avs/fs/mmfs/ts/admin/mmlsmgr.sh, mmfs, avs_rgpfs24, rgpfs24s002a 4/27/06 19:20:03 | 
|---|
| 17 | ####################################################################### | 
|---|
| 18 | # | 
|---|
| 19 | # Usage:  mmlsmgr [Device[ Device...]] | [-C ClusterName] | 
|---|
| 20 | # | 
|---|
| 21 | ####################################################################### | 
|---|
| 22 |  | 
|---|
| 23 | # Include global declarations and service routines. | 
|---|
| 24 | . /usr/lpp/mmfs/bin/mmglobfuncs | 
|---|
| 25 | if [[ $ourUid -eq 0 ]] | 
|---|
| 26 | then | 
|---|
| 27 | . /usr/lpp/mmfs/bin/mmsdrfsdef | 
|---|
| 28 | . /usr/lpp/mmfs/bin/mmfsfuncs | 
|---|
| 29 | fi | 
|---|
| 30 |  | 
|---|
| 31 | sourceFile="mmlsmgr.sh" | 
|---|
| 32 | [[ -n $DEBUG || -n $DEBUGmmlsmgr ]] && set -x | 
|---|
| 33 | $mmTRACE_ENTER "$*" | 
|---|
| 34 |  | 
|---|
| 35 | # Local variables | 
|---|
| 36 | usageMsg=303 | 
|---|
| 37 | integer rc=0 | 
|---|
| 38 | integer nodeCount=0 | 
|---|
| 39 | deviceList="" | 
|---|
| 40 |  | 
|---|
| 41 |  | 
|---|
| 42 | ################################## | 
|---|
| 43 | # Process the command arguments. | 
|---|
| 44 | ################################## | 
|---|
| 45 | [[ $arg1 = '-?' || $arg1 = '-h' || $arg1 = '--help' || $arg1 = '--' ]] &&  \ | 
|---|
| 46 | syntaxError "help" $usageMsg | 
|---|
| 47 |  | 
|---|
| 48 | # We were handed either a blank-separated list of devices, | 
|---|
| 49 | # or a cluster name specified via the -C flag.  If -C is | 
|---|
| 50 | # specified, there may or may not be white space separating | 
|---|
| 51 | # the -C from the cluster name. | 
|---|
| 52 | if [[ $arg1 = "-C"* ]] | 
|---|
| 53 | then | 
|---|
| 54 | COpt="-C" | 
|---|
| 55 | clusterName=${arg1#-C} | 
|---|
| 56 | if [[ -z $clusterName ]] | 
|---|
| 57 | then | 
|---|
| 58 | clusterName=$arg2 | 
|---|
| 59 | [[ -n $arg3 ]] && syntaxError "extraArg" $usageMsg $arg3 | 
|---|
| 60 | else | 
|---|
| 61 | [[ -n $arg2 ]] && syntaxError "extraArg" $usageMsg $arg2 | 
|---|
| 62 | fi | 
|---|
| 63 | [[ -z $clusterName ]] && syntaxError "missingValue" $usageMsg "-C" | 
|---|
| 64 | else | 
|---|
| 65 | deviceList=$@ | 
|---|
| 66 | fi | 
|---|
| 67 |  | 
|---|
| 68 |  | 
|---|
| 69 | ################################### | 
|---|
| 70 | # Set up trap exception handling. | 
|---|
| 71 | ################################### | 
|---|
| 72 | trap pretrap2 HUP INT QUIT KILL | 
|---|
| 73 |  | 
|---|
| 74 |  | 
|---|
| 75 | #################################################################### | 
|---|
| 76 | # If invoked by a root user, call the gpfsInit function to ensure | 
|---|
| 77 | # that the local copy of the mmsdrfs file and the rest of the GPFS | 
|---|
| 78 | # system files are up-to-date.  There is no need to lock the sdr. | 
|---|
| 79 | # Non-root users are not allowed to invoke commands on other nodes. | 
|---|
| 80 | #################################################################### | 
|---|
| 81 | if [[ $ourUid -eq 0 ]] | 
|---|
| 82 | then | 
|---|
| 83 | gpfsInitOutput=$(gpfsInit nolock) | 
|---|
| 84 | setGlobalVar $? $gpfsInitOutput | 
|---|
| 85 | fi | 
|---|
| 86 |  | 
|---|
| 87 |  | 
|---|
| 88 | ################################################################ | 
|---|
| 89 | # The user can invoke the mmlsmgr command specifying either | 
|---|
| 90 | # a list of file systems, or a cluster name, or nothing at all. | 
|---|
| 91 | # If a list of file system names was given, the file systems | 
|---|
| 92 | # do not necessarily belong to the same cluster. | 
|---|
| 93 | # If a cluster name was given, information for all of the file | 
|---|
| 94 | # systems in this cluster is displayed.  If no input parameter | 
|---|
| 95 | # is specified, then information is displayed for all file | 
|---|
| 96 | # systems that belong to the same cluster as the node on which | 
|---|
| 97 | # the mmlsmgr command was issued. | 
|---|
| 98 | ################################################################ | 
|---|
| 99 | deviceList2="" | 
|---|
| 100 | if [[ -n $deviceList ]] | 
|---|
| 101 | then | 
|---|
| 102 | # The user provided a list of file system names. | 
|---|
| 103 | # Process the file systems one at a time. | 
|---|
| 104 | # Loop through the device list and replace any remote file systems | 
|---|
| 105 | # with the string "remoteClusterName:remoteFsName". | 
|---|
| 106 | set -f | 
|---|
| 107 | for device in $deviceList | 
|---|
| 108 | do | 
|---|
| 109 | set +f | 
|---|
| 110 |  | 
|---|
| 111 | # If the invocation is not for an explicitly-remote device, obtain | 
|---|
| 112 | # the needed information about the filesystem from the mmsdrfs file. | 
|---|
| 113 | if [[ $device != *:* ]] | 
|---|
| 114 | then | 
|---|
| 115 | findFSoutput=$(findFS "$device" $mmsdrfsFile) | 
|---|
| 116 | [[ -z $findFSoutput ]] && continue | 
|---|
| 117 |  | 
|---|
| 118 | # Parse the output from the findFS function. | 
|---|
| 119 | set -f ; set -- $findFSoutput ; set +f | 
|---|
| 120 | fqDeviceName=$1 | 
|---|
| 121 | deviceName=$2 | 
|---|
| 122 | fsHomeCluster=$3 | 
|---|
| 123 | remoteDevice=$4 | 
|---|
| 124 |  | 
|---|
| 125 | # If this is a remote file system, set fqDeviceName appropriately. | 
|---|
| 126 | if [[ $fsHomeCluster != $HOME_CLUSTER ]] | 
|---|
| 127 | then | 
|---|
| 128 | fqDeviceName="$fsHomeCluster:/dev/$remoteDevice" | 
|---|
| 129 | remoteFsSpecified=yes | 
|---|
| 130 | [[ -n $remoteCluster && $fsHomeCluster != $remoteCluster ]] &&  \ | 
|---|
| 131 | multipleClustersInvolved=yes | 
|---|
| 132 | remoteCluster=$fsHomeCluster | 
|---|
| 133 | fi | 
|---|
| 134 | else | 
|---|
| 135 | fqDeviceName=$device | 
|---|
| 136 | deviceName=${fqDeviceName##*:} | 
|---|
| 137 | fsHomeCluster=${fqDeviceName%%:*} | 
|---|
| 138 | remoteDevice=$deviceName | 
|---|
| 139 | remoteFsSpecified=yes | 
|---|
| 140 | [[ -n $remoteCluster && $fsHomeCluster != $remoteCluster ]] &&  \ | 
|---|
| 141 | multipleClustersInvolved=yes | 
|---|
| 142 | remoteCluster=$fsHomeCluster | 
|---|
| 143 | fi | 
|---|
| 144 |  | 
|---|
| 145 | # If more than one file system was requested, suppress error | 
|---|
| 146 | # messages that deal with individual file systems. | 
|---|
| 147 | # Such errors will go into the "unexpected" category. | 
|---|
| 148 | [[ -n $deviceList2 ]] && multipleDevicesInvolved=yes | 
|---|
| 149 |  | 
|---|
| 150 | # Append the fully-qualified name to the list of fs names. | 
|---|
| 151 | deviceList2="$deviceList2 $fqDeviceName" | 
|---|
| 152 | done | 
|---|
| 153 | set +f | 
|---|
| 154 |  | 
|---|
| 155 | elif [[ -n $COpt ]] | 
|---|
| 156 | then | 
|---|
| 157 | # The user provided a cluster name. | 
|---|
| 158 | deviceList2=$clusterName":" | 
|---|
| 159 | fsHomeCluster=$clusterName | 
|---|
| 160 | remoteFsSpecified=yes | 
|---|
| 161 |  | 
|---|
| 162 | else | 
|---|
| 163 | # The user did not specify any parameters.  Since deviceList2 | 
|---|
| 164 | # is already set to the null string, tslsmgr will return | 
|---|
| 165 | # information for the filesystems in the local cluster. | 
|---|
| 166 | :  # Do nothing; everything is ready to call tslsmgr. | 
|---|
| 167 | fi  # end of if [[ -n $deviceList ]] | 
|---|
| 168 |  | 
|---|
| 169 |  | 
|---|
| 170 | ######################################################################## | 
|---|
| 171 | # Invoke the command on the local node if devices were found. | 
|---|
| 172 | # Display any error messages and exit if any of the following are true: | 
|---|
| 173 | #   - the command completed successfully | 
|---|
| 174 | #   - there is an unacceptable error | 
|---|
| 175 | #       (anything other than daemon down or quorum wait) | 
|---|
| 176 | #   - one of the file systems is remote | 
|---|
| 177 | #   - we are not running as UID 0 | 
|---|
| 178 | #   - this is a single node cluster | 
|---|
| 179 | ######################################################################## | 
|---|
| 180 | [[ -n $deviceList && -z $deviceList2 ]] && cleanupAndExit | 
|---|
| 181 | ${mmcmdDir}/${links}/mmlsmgr $deviceList2 2>$errMsg | 
|---|
| 182 | rc=$(remapRC $?) | 
|---|
| 183 | if [[ ($rc -ne $MM_DaemonDown && $rc -ne $MM_QuorumWait) || | 
|---|
| 184 | $remoteFsSpecified = yes                           || | 
|---|
| 185 | $ourUid -ne 0                                      || | 
|---|
| 186 | $MMMODE = single ]] | 
|---|
| 187 | then | 
|---|
| 188 | if [[ $rc -eq $MM_FsNotFound && -z $multipleDevicesInvolved ]] | 
|---|
| 189 | then | 
|---|
| 190 | if [[ $fsHomeCluster != $HOME_CLUSTER ]] | 
|---|
| 191 | then | 
|---|
| 192 | # The remote cluster does not know anything about this file system. | 
|---|
| 193 | printErrorMsg 108 $mmcmd $remoteDevice $fsHomeCluster | 
|---|
| 194 | else | 
|---|
| 195 | # Unexpected error. | 
|---|
| 196 | printErrorMsg 171 $mmcmd "file system $deviceName not found" $rc | 
|---|
| 197 | fi | 
|---|
| 198 | elif [[ $rc -eq $MM_Remotefs            && | 
|---|
| 199 | $fsHomeCluster != $HOME_CLUSTER && | 
|---|
| 200 | -z $multipleDevicesInvolved     ]] | 
|---|
| 201 | then | 
|---|
| 202 | # The file system is not owned by the remote cluster. | 
|---|
| 203 | [[ $device != *:* ]] &&  \ | 
|---|
| 204 | printErrorMsg 111 $mmcmd $device $remoteDevice $fsHomeCluster | 
|---|
| 205 | printErrorMsg 112 $mmcmd $remoteDevice $fsHomeCluster | 
|---|
| 206 | elif [[ ($rc -eq $MM_HostDown    || | 
|---|
| 207 | $rc -eq $MM_TimedOut    || | 
|---|
| 208 | $rc -eq $MM_SecurityCfg || | 
|---|
| 209 | $rc -eq $MM_AuthorizationFailed || | 
|---|
| 210 | $rc -eq $MM_UnknownCluster)    && | 
|---|
| 211 | $fsHomeCluster != $HOME_CLUSTER && | 
|---|
| 212 | -z $multipleClustersInvolved    ]] | 
|---|
| 213 | then | 
|---|
| 214 | # Failed to connect to the remote cluster. | 
|---|
| 215 | [[ $rc -eq $MM_SecurityCfg ]] &&  \ | 
|---|
| 216 | printErrorMsg 150 $mmcmd | 
|---|
| 217 | [[ $rc -eq $MM_AuthorizationFailed ]] &&  \ | 
|---|
| 218 | printErrorMsg 151 $mmcmd | 
|---|
| 219 | printErrorMsg 105 $mmcmd $fsHomeCluster | 
|---|
| 220 | elif [[ $rc -eq $MM_DaemonDown ]] | 
|---|
| 221 | then | 
|---|
| 222 | # GPFS is down on this node. | 
|---|
| 223 | printErrorMsg 109 $mmcmd | 
|---|
| 224 | elif [[ $rc -eq $MM_QuorumWait ]] | 
|---|
| 225 | then | 
|---|
| 226 | # GPFS is not ready for commands. | 
|---|
| 227 | printErrorMsg 110 $mmcmd | 
|---|
| 228 | elif [[ $rc -eq $MM_ConnectionReset ]] | 
|---|
| 229 | then | 
|---|
| 230 | # An internode connection was reset. | 
|---|
| 231 | printErrorMsg 257 $mmcmd | 
|---|
| 232 | else | 
|---|
| 233 | # Either the command worked, or it is an unexpected error. | 
|---|
| 234 | if [[ -s $errMsg ]] | 
|---|
| 235 | then | 
|---|
| 236 | # Show the error messages from the daemon. | 
|---|
| 237 | $cat $errMsg 1>&2 | 
|---|
| 238 | elif [[ $rc -ne 0 ]] | 
|---|
| 239 | then | 
|---|
| 240 | # tslsmgr failed. | 
|---|
| 241 | printErrorMsg 104 "$mmcmd" "tslsmgr $deviceList2" | 
|---|
| 242 | else | 
|---|
| 243 | :  # The command must have worked. | 
|---|
| 244 | fi | 
|---|
| 245 | fi  # end of if [[ $rc -eq $MM_FsNotFound && -z $multipleDevicesInvolved ]] | 
|---|
| 246 | cleanupAndExit $rc | 
|---|
| 247 | fi  # end of if [[ ($rc -ne $MM_DaemonDown && ... ]] | 
|---|
| 248 | $rm -f $errMsg | 
|---|
| 249 |  | 
|---|
| 250 |  | 
|---|
| 251 | ######################################################################### | 
|---|
| 252 | # We come here if the command was invoked for a local file system but | 
|---|
| 253 | # the local daemon is not available; send the command to an active node. | 
|---|
| 254 | ######################################################################### | 
|---|
| 255 |  | 
|---|
| 256 | # Create a file with the reliable names of the nodes in the cluster. | 
|---|
| 257 | nodeCount=$(getNodeFile $REL_HOSTNAME_Field $GLOBAL_ID $mmsdrfsFile $nodefile) | 
|---|
| 258 | if [[ $nodeCount -eq 0 ]] | 
|---|
| 259 | then | 
|---|
| 260 | # The cluster is empty; there is nobody to run the command. | 
|---|
| 261 | printErrorMsg 171 $mmcmd "getNodeFile (nodeCount=0)" 1 | 
|---|
| 262 | cleanupAndExit | 
|---|
| 263 | fi | 
|---|
| 264 |  | 
|---|
| 265 | # Try the nodes one by one until you find a node that can execute the command. | 
|---|
| 266 | preferredNode=0     # We have no idea where to go first; let mmcommon decide. | 
|---|
| 267 | $mmcommon linkCommand $preferredNode $nodefile mmlsmgr $deviceList2 | 
|---|
| 268 | rc=$? | 
|---|
| 269 |  | 
|---|
| 270 | cleanupAndExit $rc | 
|---|
| 271 |  | 
|---|