#!/bin/ksh
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
#  
#  
#  
# Licensed Materials - Property of IBM 
#  
# (C) COPYRIGHT International Business Machines Corp. 1999,2006 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG 
# @(#)86 1.42.1.1 src/avs/fs/mmfs/ts/admin/mmlsmgr.sh, mmfs, avs_rgpfs24, rgpfs24s002a 4/27/06 19:20:03
#######################################################################
#
# Usage:  mmlsmgr [Device[ Device...]] | [-C ClusterName]
#
#######################################################################

# Include global declarations and service routines.
. /usr/lpp/mmfs/bin/mmglobfuncs
if [[ $ourUid -eq 0 ]]
then
  . /usr/lpp/mmfs/bin/mmsdrfsdef
  . /usr/lpp/mmfs/bin/mmfsfuncs
fi

sourceFile="mmlsmgr.sh"
[[ -n $DEBUG || -n $DEBUGmmlsmgr ]] && set -x
$mmTRACE_ENTER "$*"

# Local variables
usageMsg=303
integer rc=0
integer nodeCount=0
deviceList=""


##################################
# Process the command arguments.
##################################
[[ $arg1 = '-?' || $arg1 = '-h' || $arg1 = '--help' || $arg1 = '--' ]] &&  \
  syntaxError "help" $usageMsg

# We were handed either a blank-separated list of devices,
# or a cluster name specified via the -C flag.  If -C is
# specified, there may or may not be white space separating
# the -C from the cluster name.
if [[ $arg1 = "-C"* ]]
then
  COpt="-C"
  clusterName=${arg1#-C}
  if [[ -z $clusterName ]]
  then
    clusterName=$arg2
    [[ -n $arg3 ]] && syntaxError "extraArg" $usageMsg $arg3
  else
    [[ -n $arg2 ]] && syntaxError "extraArg" $usageMsg $arg2
  fi
  [[ -z $clusterName ]] && syntaxError "missingValue" $usageMsg "-C"
else
  deviceList=$@
fi


###################################
# Set up trap exception handling.
###################################
trap pretrap2 HUP INT QUIT KILL


####################################################################
# If invoked by a root user, call the gpfsInit function to ensure
# that the local copy of the mmsdrfs file and the rest of the GPFS
# system files are up-to-date.  There is no need to lock the sdr.
# Non-root users are not allowed to invoke commands on other nodes.
####################################################################
if [[ $ourUid -eq 0 ]]
then
  gpfsInitOutput=$(gpfsInit nolock)
  setGlobalVar $? $gpfsInitOutput
fi


################################################################
# The user can invoke the mmlsmgr command specifying either
# a list of file systems, or a cluster name, or nothing at all.
# If a list of file system names was given, the file systems
# do not necessarily belong to the same cluster.
# If a cluster name was given, information for all of the file
# systems in this cluster is displayed.  If no input parameter
# is specified, then information is displayed for all file
# systems that belong to the same cluster as the node on which
# the mmlsmgr command was issued.
################################################################
deviceList2=""
if [[ -n $deviceList ]]
then
  # The user provided a list of file system names.
  # Process the file systems one at a time.
  # Loop through the device list and replace any remote file systems
  # with the string "remoteClusterName:remoteFsName".
  set -f
  for device in $deviceList
  do
    set +f

    # If the invocation is not for an explicitly-remote device, obtain
    # the needed information about the filesystem from the mmsdrfs file.
    if [[ $device != *:* ]]
    then
      findFSoutput=$(findFS "$device" $mmsdrfsFile)
      [[ -z $findFSoutput ]] && continue

      # Parse the output from the findFS function.
      set -f ; set -- $findFSoutput ; set +f
      fqDeviceName=$1
      deviceName=$2
      fsHomeCluster=$3
      remoteDevice=$4

      # If this is a remote file system, set fqDeviceName appropriately.
      if [[ $fsHomeCluster != $HOME_CLUSTER ]]
      then
        fqDeviceName="$fsHomeCluster:/dev/$remoteDevice"
        remoteFsSpecified=yes
        [[ -n $remoteCluster && $fsHomeCluster != $remoteCluster ]] &&  \
          multipleClustersInvolved=yes
        remoteCluster=$fsHomeCluster
      fi
    else
      fqDeviceName=$device
      deviceName=${fqDeviceName##*:}
      fsHomeCluster=${fqDeviceName%%:*}
      remoteDevice=$deviceName
      remoteFsSpecified=yes
      [[ -n $remoteCluster && $fsHomeCluster != $remoteCluster ]] &&  \
        multipleClustersInvolved=yes
      remoteCluster=$fsHomeCluster
    fi

    # If more than one file system was requested, suppress error
    # messages that deal with individual file systems.
    # Such errors will go into the "unexpected" category.
    [[ -n $deviceList2 ]] && multipleDevicesInvolved=yes

    # Append the fully-qualified name to the list of fs names.
    deviceList2="$deviceList2 $fqDeviceName"
  done
  set +f

elif [[ -n $COpt ]]
then
  # The user provided a cluster name.
  deviceList2=$clusterName":"
  fsHomeCluster=$clusterName
  remoteFsSpecified=yes

else
  # The user did not specify any parameters.  Since deviceList2
  # is already set to the null string, tslsmgr will return
  # information for the filesystems in the local cluster.
  :  # Do nothing; everything is ready to call tslsmgr.
fi  # end of if [[ -n $deviceList ]]


########################################################################
# Invoke the command on the local node if devices were found.
# Display any error messages and exit if any of the following are true:
#   - the command completed successfully
#   - there is an unacceptable error
#       (anything other than daemon down or quorum wait)
#   - one of the file systems is remote
#   - we are not running as UID 0
#   - this is a single node cluster
########################################################################
[[ -n $deviceList && -z $deviceList2 ]] && cleanupAndExit
${mmcmdDir}/${links}/mmlsmgr $deviceList2 2>$errMsg
rc=$(remapRC $?)
if [[ ($rc -ne $MM_DaemonDown && $rc -ne $MM_QuorumWait) ||
      $remoteFsSpecified = yes                           ||
      $ourUid -ne 0                                      ||
      $MMMODE = single ]]
then
  if [[ $rc -eq $MM_FsNotFound && -z $multipleDevicesInvolved ]]
  then
    if [[ $fsHomeCluster != $HOME_CLUSTER ]]
    then
      # The remote cluster does not know anything about this file system.
      printErrorMsg 108 $mmcmd $remoteDevice $fsHomeCluster
    else
      # Unexpected error.
      printErrorMsg 171 $mmcmd "file system $deviceName not found" $rc
    fi
  elif [[ $rc -eq $MM_Remotefs            &&
          $fsHomeCluster != $HOME_CLUSTER &&
          -z $multipleDevicesInvolved     ]]
  then
    # The file system is not owned by the remote cluster.
    [[ $device != *:* ]] &&  \
      printErrorMsg 111 $mmcmd $device $remoteDevice $fsHomeCluster
    printErrorMsg 112 $mmcmd $remoteDevice $fsHomeCluster
  elif [[ ($rc -eq $MM_HostDown    ||
           $rc -eq $MM_TimedOut    ||
           $rc -eq $MM_SecurityCfg ||
           $rc -eq $MM_AuthorizationFailed ||
           $rc -eq $MM_UnknownCluster)    &&
          $fsHomeCluster != $HOME_CLUSTER &&
          -z $multipleClustersInvolved    ]]
  then
    # Failed to connect to the remote cluster.
    [[ $rc -eq $MM_SecurityCfg ]] &&  \
      printErrorMsg 150 $mmcmd
    [[ $rc -eq $MM_AuthorizationFailed ]] &&  \
      printErrorMsg 151 $mmcmd
    printErrorMsg 105 $mmcmd $fsHomeCluster
  elif [[ $rc -eq $MM_DaemonDown ]]
  then
    # GPFS is down on this node.
    printErrorMsg 109 $mmcmd
  elif [[ $rc -eq $MM_QuorumWait ]]
  then
    # GPFS is not ready for commands.
    printErrorMsg 110 $mmcmd
  elif [[ $rc -eq $MM_ConnectionReset ]]
  then
    # An internode connection was reset.
    printErrorMsg 257 $mmcmd
  else
    # Either the command worked, or it is an unexpected error.
    if [[ -s $errMsg ]]
    then
      # Show the error messages from the daemon.
      $cat $errMsg 1>&2
    elif [[ $rc -ne 0 ]]
    then
      # tslsmgr failed.
      printErrorMsg 104 "$mmcmd" "tslsmgr $deviceList2"
    else
      :  # The command must have worked.
    fi
  fi  # end of if [[ $rc -eq $MM_FsNotFound && -z $multipleDevicesInvolved ]]
  cleanupAndExit $rc
fi  # end of if [[ ($rc -ne $MM_DaemonDown && ... ]]
$rm -f $errMsg


#########################################################################
# We come here if the command was invoked for a local file system but
# the local daemon is not available; send the command to an active node.
#########################################################################

# Create a file with the reliable names of the nodes in the cluster.
nodeCount=$(getNodeFile $REL_HOSTNAME_Field $GLOBAL_ID $mmsdrfsFile $nodefile)
if [[ $nodeCount -eq 0 ]]
then
  # The cluster is empty; there is nobody to run the command.
  printErrorMsg 171 $mmcmd "getNodeFile (nodeCount=0)" 1
  cleanupAndExit
fi

# Try the nodes one by one until you find a node that can execute the command.
preferredNode=0     # We have no idea where to go first; let mmcommon decide.
$mmcommon linkCommand $preferredNode $nodefile mmlsmgr $deviceList2
rc=$?

cleanupAndExit $rc

