/*************************************************************************** * * Copyright (C) 2001 International Business Machines * All rights reserved. * * This file is part of the GPFS mmfslinux kernel module. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *************************************************************************** */ /* @(#)95 1.95.1.8 src/avs/fs/mmfs/ts/kernext/gpl-linux/file.c, mmfs, avs_rgpfs24, rgpfs24s012a 4/30/07 20:13:26 */ /* * File operations * * Contents: * gpfs_f_llseek * gpfs_f_readdir * gpfs_f_poll * gpfs_f_ioctl * gpfs_filemap_open * gpfs_filemap_close * gpfs_filemap_nopage (in mmap.c) * gpfs_filemap_nopagedone (in mmap.c) * gpfs_f_mmap * gpfs_f_open * gpfs_f_release * gpfs_f_fsync * gpfs_f_fasync * fsyncInternal * gpfs_f_lock * gpfs_f_flock * rdwrInternal * gpfs_f_read * gpfs_f_dir_read * gpfs_f_write * gpfs_f_readv * gpfs_f_writev * gpfs_f_cleanup * */ #include #include #include #include #include #ifdef MODULE #include #endif #include #include #include #include #ifdef NFS4_CLUSTER #include #endif /* NFS4_CLUSTER */ #if LINUX_KERNEL_VERSION >= 2050000 #include #include #endif #include #ifdef P_NFS4 #include #include #include #include #endif #include #include #include #include #include #include #include #include #include #include /* prototypes */ static int fsyncInternal(struct file *fP); /* file_operations */ loff_t gpfs_f_llseek(struct file *fP, loff_t offset, int origin) { struct inode *iP = fP->f_dentry->d_inode; loff_t rc = -EINVAL; ENTER(0); TRACE3(TRACE_VNODE, 1, TRCID_LINUXOPS_LLSEEK_ENTER, "gpfs_f_llseek enter: fP 0x%lX offset 0x%llX origin %d\n", fP, offset, origin); /* BKL is held at entry */ switch (origin) { case 2: gpfs_i_getattr_internal(iP); offset += iP->i_size; break; case 1: offset += fP->f_pos; } if (offset >= 0) { rc = offset; if (offset != fP->f_pos) { fP->f_pos = offset; #if LINUX_KERNEL_VERSION < 2050000 fP->f_reada = 0; #endif } } else cxiErrorNFS(rc); TRACE4(TRACE_VNODE, 1, TRCID_LINUXOPS_LLSEEK_EXIT, "gpfs_f_llseek exit: fP 0x%lX offset 0x%llX origin %d rc 0x%llX\n", fP, offset, origin, rc); EXIT(0); return rc; } /* Save everything we need to make the OS-specific filldir call. */ typedef struct { offset_t offset; ino_t ino; int namelen; char name[1]; } fillDirEntry; /* gpfs_f_readdir provides a buffer for NFS_fillDir to place entries, * and this structure to keep track of its use over successive calls. */ typedef struct { fillDirEntry *firstP; /* first entry */ fillDirEntry *endP; /* buffer end */ fillDirEntry *bufferP; /* current location */ } NFS_fillDirArgs; /* Return the location of our next filldir entry. Allow for the size * of the fillDirEntry struct plus the namelen. Round to dblword. */ #define NEXT_FILLDIR_ENTRY(eP, len) \ (fillDirEntry *)((caddr_t)(eP)+(((sizeof(fillDirEntry)+(len)+3)>>2)<<2)) /* Size of our NFS_fillDir buffer. */ #define FILLDIR_BUFSIZE 700 /* For NFS readdir, we provide our own filldir callback so that we can save * the records until after we release our locks. We can then make the real * filldir calls without fear they will deadlock when they loopback to the * filesystem for permission checks, etc. */ int NFS_fillDir(void *myArgP, char *nameP, int namelen, offset_t offset, const ino_t ino) { NFS_fillDirArgs *argsP = (NFS_fillDirArgs *)myArgP; fillDirEntry *entryP = argsP->bufferP; fillDirEntry *nextP = NEXT_FILLDIR_ENTRY(entryP, namelen); /* If this entry will not fit, report the full condition. */ if (nextP > argsP->endP) return -EINVAL; /* Save filldir information to make the real call later */ entryP->offset = offset; entryP->ino = ino; entryP->namelen = namelen; cxiMemcpy(entryP->name, nameP, namelen+1); /* Bump the entry location in arg structure for the next call */ argsP->bufferP = nextP; return 0; } int gpfs_f_readdir(struct file *fP, void *direntP, filldir_t filldir) { int rc; Boolean klock = false; struct gpfsVfsData_t *privVfsP; cxiNode_t *cnP; struct inode *iP; cxiFillDirArg_t fillDirArg; fillDirEntry *fillDirBuf = NULL; NFS_fillDirArgs filldirArgs; VFS_STAT_START(readdirCall); ENTER(0); DBGASSERT(fP != NULL); TRACE4(TRACE_VNODE, 1, TRCID_LINUXOPS_READDIR_ENTER, "gpfs_f_readdir enter: fP 0x%lX direntP 0x%lX " "filldir 0x%lX pos %lld\n", fP, direntP, filldir, fP->f_pos); /* BKL is held at entry */ /* Quick check for EOF */ if (fP->f_pos == GPFS_DIR_EOF) { rc = 0; // end-of-directory } else { iP = fP->f_dentry->d_inode; DBGASSERT(iP != NULL); cnP = VP_TO_CNP(iP); privVfsP = VP_TO_PVP(iP); DBGASSERT(privVfsP != NULL); /* When called by NFS, wait to make the filldar calls until after * we return from gpfsReaddir. The NFS filldir implementation * includes callbacks (e.g., permission checks) into the filesystem * and these calls may result in getting locks out-of-order and * are therefore subject to deadlock. */ if (cxiIsNFSThread()) { /* Specify a special filldir function where we will save entry * information. Upon our return from gpfsReaddir, we no longer * hold any locks so we will then go through these saved entries * and make the real filldir calls. */ fillDirArg.fnP = (void *)NFS_fillDir; fillDirArg.argP = &filldirArgs; fillDirBuf = (fillDirEntry *)cxiMallocPinned(FILLDIR_BUFSIZE); filldirArgs.firstP = fillDirBuf; filldirArgs.endP = (fillDirEntry *)((caddr_t)fillDirBuf + FILLDIR_BUFSIZE); filldirArgs.bufferP = filldirArgs.firstP; } else { /* Unfortunately we can't use the OS version of the filldir * routine directly. It has different signatures in varying * kernel levels, so we use cxiFillDir() in the portability layer * to handle the different signatures. */ fillDirArg.fnP = (void *)filldir; fillDirArg.argP = direntP; } if (kernel_locked()) { unlock_kernel(); klock = true; } rc = gpfs_ops.gpfsReaddir(privVfsP, cnP, &fillDirArg, cxiFillDir, &fP->f_pos, vnOp, NULL); /* Even if gpfsReaddir reports an error, we want to look * to see if there were any entries previously returned. */ if (cxiIsNFSThread() && (filldirArgs.bufferP > filldirArgs.firstP)) { int fillrc; fillDirEntry *nextP = filldirArgs.firstP; /* Set the real filldir fcn/arg pointers */ fillDirArg.fnP = (void *)filldir; fillDirArg.argP = direntP; while(nextP < filldirArgs.bufferP) { /* Do not overlay any gpfsReadir rc. */ fillrc = CALL_FILLDIR(cxiFillDir, &fillDirArg, (nextP->name), (nextP->namelen), (nextP->offset), (nextP->ino)); if (fillrc < 0) { rc = 0; /* entry doesn't fit is ok (will resume at offset) */ /* Reset f_pos based on what we've been able to pass back * to NFS. This is where they will start on the next call. */ fP->f_pos = nextP->offset; /* next offset for nfsd_readdir */ break; } nextP = NEXT_FILLDIR_ENTRY(nextP, nextP->namelen); } } if (klock) lock_kernel(); } TRACE3(TRACE_VNODE, 1, TRCID_LINUXOPS_READDIR_EXIT, "gpfs_f_readdir exit: fP 0x%lX pos %lld code 0 rc %d\n", fP, fP->f_pos, rc); if (fillDirBuf) cxiFreePinned(fillDirBuf); if (rc) cxiErrorNFS(rc); VFS_STAT_STOP; EXIT(0); return (-rc); } uint gpfs_f_poll(struct file *fP, struct poll_table_struct *wait) { TRACE1(TRACE_VNODE, 1, TRCID_LINUXOPS_POLL, "gpfs_f_poll: rc bits POLLERR: fP 0x%lX\n", fP); return (uint)0; // ?? which POLL* bits } int gpfs_f_ioctl(struct inode *iP, struct file *fP, uint cmd, unsigned long arg) { TRACE3(TRACE_VNODE, 1, TRCID_LINUXOPS_IOCTL, "gpfs_f_ioctl: rc -ENOTTY: iP 0x%lX fP 0x%lX cmd %d\n", iP, fP, cmd); return -ENOTTY; // no one can really explain why this errno, but it is common } /* called for every child process forked after mmap */ void gpfs_filemap_open(struct vm_area_struct * vma) { int rc = 0; Boolean writeAccess = false; cxiNode_t *cnP; ext_cred_t eCred; struct file *file = vma->vm_file; struct inode *inode = file->f_dentry->d_inode; struct gpfsVfsData_t *privVfsP; struct mm_struct *mm = vma->vm_mm; long long offset; long long length; ENTER(0); TRACE4(TRACE_VNODE, 2, TRCID_FM_OPEN, "gpfs_filemap_open enter: vma 0x%lX inode %d icount %d name %s\n", vma, inode->i_ino, atomic_read((atomic_t *)&inode->i_count), file->f_dentry? file->f_dentry->d_name.name: (const unsigned char*)""); TRACE2(TRACE_VNODE, 2, TRCID_FM_OPEN_1, "gpfs_filemap_open : mm 0x%lX mm_users %d\n", mm, atomic_read(&mm->mm_users)); cnP = VP_TO_CNP(inode); privVfsP = VP_TO_PVP(inode); DBGASSERT(privVfsP != NULL); if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) MMAP_WRITE_ACCESS(writeAccess); setCred(&eCred); offset = vma->vm_pgoff<vm_end - vma->vm_start; rc = gpfs_ops.gpfsMmap(privVfsP, cnP, (void *)inode, &eCred, NULL, writeAccess,false, offset,length); TRACE2(TRACE_VNODE, 2, TRCID_FM_OPEN_EXIT, "gpfs_filemap_open exit: vma 0x%lX icount %d\n", vma, atomic_read((atomic_t *)&inode->i_count)); if (rc) cxiErrorNFS(rc); EXIT(0); } void gpfs_filemap_close(struct vm_area_struct * vma) { struct file *fP = vma->vm_file; struct inode *inode = fP->f_dentry->d_inode; int flags, rc; struct gpfsVfsData_t *privVfsP; cxiNode_t *cnP = VP_TO_CNP(inode); struct mm_struct *mm = vma->vm_mm; VFS_STAT_START(unmapCall); ENTER(0); if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) flags = 0; else flags = CXI_SHM_RDONLY; privVfsP = VP_TO_PVP(inode); TRACE3(TRACE_VNODE, 2, TRCID_FM_CLOSE_ENTER, "gpfs_filemap_close: vma 0x%lX inode 0x%lX i_count %d\n", vma, inode, (Int32)atomic_read((atomic_t *)&inode->i_count)); TRACE3(TRACE_VNODE, 2, TRCID_FM_CLOSE_ENTER1, "gpfs_filemap_close: inode %d, name %s, nrpages %d\n", inode->i_ino, fP->f_dentry? fP->f_dentry->d_name.name: (const unsigned char*)"", inode->i_data.nrpages); TRACE2(TRACE_VNODE, 2, TRCID_FM_CLOSE_ENR, "gpfs_filemap_close: mm 0x%lX mm_users %d\n", mm,atomic_read(&mm->mm_users)); rc = gpfs_ops.gpfsUnmap(privVfsP, cnP, flags); cxiPutOSNode((void *)inode); TRACE3(TRACE_VNODE, 2, TRCID_FM_CLOSE, "gpfs_filemap_close: vma 0x%lX inode 0x%lX i_count %d\n", vma, inode, (Int32)atomic_read((atomic_t *)&inode->i_count)); if (rc) cxiErrorNFS(rc); VFS_STAT_STOP; EXIT(0); } int gpfs_f_mmap(struct file *fP, struct vm_area_struct *vma) { int rc; int code = 0; Boolean heldVnode = false; Boolean writeAccess = false; cxiNode_t *cnP; struct gpfsVfsData_t *privVfsP; struct inode *iP = fP->f_dentry->d_inode; ext_cred_t eCred; cxiVattr_t vattr; struct mm_struct *mm = vma->vm_mm; long long offset; long long length; VFS_STAT_START(map_lloffCall); ENTER(0); TRACE3(TRACE_VNODE, 1, TRCID_LINUXOPS_MMAP_ENTER, "gpfs_f_mmap enter: fP 0x%lX inum %d vma 0x%1X\n", fP, iP->i_ino, vma); TRACE4(TRACE_VNODE, 1, TRCID_LINUXOPS_MMAP_ENTER_A, "gpfs_f_mmap: vm_start 0x%lX vm_end 0x%lX, vmpgoff 0x%lX, " "vmflags 0x%lX\n", vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_flags); TRACE4(TRACE_VNODE, 2, TRCID_LINUXOPS_MMAP_ENTER_A1, "gpfs_f_mmap: inode %d icount %d name %s nrpages %d\n", iP->i_ino, atomic_read((atomic_t *)&iP->i_count), fP->f_dentry ? fP->f_dentry->d_name.name : (const unsigned char*)"", iP->i_data.nrpages); TRACE2(TRACE_VNODE, 1, TRCID_LINUXOPS_MMAP_ENTER_AB, "gpfs_f_mmap: mm 0x%lX mm_users %d\n", mm,atomic_read(&mm->mm_users)); cnP = VP_TO_CNP(iP); privVfsP = VP_TO_PVP(iP); DBGASSERT(privVfsP != NULL); if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) { MMAP_WRITE_ACCESS(writeAccess); if (!writeAccess) { /* Patch must be applied at this kernel level for mmap write */ code = 1; rc = -EINVAL; goto xerror; } } setCred(&eCred); offset = vma->vm_pgoff<vm_end - vma->vm_start; rc = gpfs_ops.gpfsMmap(privVfsP, cnP, (void *)iP, &eCred, NULL, writeAccess, true,offset,length); if (rc != 0) { code = 2; rc = -rc; goto xerror; } heldVnode = true; if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE) && !iP->i_mapping->a_ops->writepage) { code = 3; rc = -EINVAL; goto xerror; } if (!iP->i_sb || !S_ISREG(iP->i_mode)) { code = 4; rc = -EACCES; goto xerror; } if (!iP->i_mapping->a_ops->readpage) { code = 5; rc = -ENOEXEC; goto xerror; } /* revalidate linux inode */ /* This has the effect of calling us back under a lock and * setting the inode attributes at the OS level (since this * operating system caches this info in the vfs layer) */ rc = gpfs_ops.gpfsGetattr(privVfsP, cnP, &vattr, false); if (rc != 0) { code = 6; rc = -rc; goto xerror; } #ifdef UPDATE_ATIME UPDATE_ATIME(iP); #else #if LINUX_KERNEL_VERSION >= 2061600 touch_atime(NULL, fP->f_dentry); #else update_atime(iP); #endif #endif vma->vm_ops = &gpfs_vmop; xerror: if (rc != 0 && heldVnode) cxiPutOSNode((void *)iP); // corresponding hold in gpfsMmap TRACE2(TRACE_VNODE, 1, TRCID_LINUXOPS_MMAP_EXIT, "gpfs_f_mmap exit: rc %d code %d\n", rc, code); if (rc) cxiErrorNFS(rc); VFS_STAT_STOP; EXIT(0); return rc; } int gpfs_f_open(struct inode *iP, struct file *fP) { int rc = 0; int code = 0; Boolean gotBKL = false; int flags = cxiOpenFlagsXlate(fP->f_flags); int iflags = cxiIsSambaThread()? GPFS_OPEN_NO_SMBLOCK: 0; cxiNode_t *cnP; struct gpfsVfsData_t *privVfsP; ext_cred_t eCred; VFS_STAT_START(openCall); ENTER(0); TRACE7(TRACE_VNODE, 1, TRCID_LINUXOPS_OPEN_ENTER, "gpfs_f_open enter: iP 0x%lX fP 0x%lX f_flags 0x%X dP 0x%lX '%s' " "flags 0x%X isNFS %d\n", iP, fP, fP->f_flags, fP->f_dentry, fP->f_dentry? fP->f_dentry->d_name.name: (const unsigned char*)"", flags, cxiIsNFSThread()); /* BKL is not held at entry, except for NFS calls */ TraceBKL(); if (current->lock_depth >= 0) /* kernel lock is held by me */ { gotBKL = true; unlock_kernel(); } cnP = VP_TO_CNP(iP); privVfsP = VP_TO_PVP(iP); DBGASSERT(privVfsP != NULL); /* see comment in gpfs_i_create() on the reason for this code */ if (cnP->createRaceLoserThreadId && cnP->createRaceLoserThreadId == cxiGetThreadId()) { int fflags = cxiOpenFlagsXlate(fP->f_flags); int amode; cnP->createRaceLoserThreadId = 0; code = EEXIST; amode = ((flags & FWRITE ? W_ACC : 0) | (flags & FREAD ? R_ACC : 0) | (flags & FTRUNC ? W_ACC : 0)); TRACE4(TRACE_VNODE, 1, TRCID_LINUXOPS_OPEN_01, "gpfs_f_open fileExist iP 0x%lX cnP 0x%lX fflags 0x%X amode 0x%X\n", iP, cnP, fflags, amode); /* Check if FEXCL and FCREAT are on and the file exists return EEXIST * could not do it at create time because the open flgas are not availble * on the create call. */ if ((flags & FEXCL) && (flags & FCREAT)) { rc = EEXIST; goto xerror; } setCred(&eCred); rc = gpfs_ops.gpfsAccess(privVfsP, cnP, amode, ACC_SELF, &eCred); if (rc) goto xerror; } if (cxiIsNFSThread() && GNP_IS_FILE(cnP)) { int NFSflags; int code; BEGIN_FAR_CODE; /* Linux NFS will not do vget so the clone vnode cannot be created then. Need to GetNFS here so the NFS structures will be available. */ #ifdef NFS_CLUSTER_LOCKS //??? temp fix for NFSv4 fP->f_mode |= FMODE_READ; #endif NFSflags = FWRITE|FREAD; rc = gpfs_ops.gpfsGetNFS((void *)iP, (struct MMFSVInfo **)&fP->private_data, &NFSflags); if (rc != 0) { code = ENOSYS; //??EGET_NFS; goto xerror; } DBGASSERT((struct MMFSVInfo *)fP->private_data != NULL); END_FAR_CODE; goto xerror; } setCred(&eCred); // rebuild since gpfsAccess may have remapped the ids rc = gpfs_ops.gpfsOpen(privVfsP, cnP, flags, iflags, 0, (struct MMFSVInfo **)&fP->private_data, &eCred); xerror: TRACE4(TRACE_VNODE, 1, TRCID_LINUXOPS_OPEN_EXIT, "gpfs_f_open exit: iP 0x%lX vinfoP 0x%lX code %d rc %d\n", iP, (struct MMFSVInfo *)fP->private_data, code, rc); VFS_STAT_STOP; if (gotBKL) /* If held kernel lock on entry then reacquire it */ lock_kernel(); if (rc) cxiErrorNFS(rc); EXIT(0); return (-rc); } int gpfs_f_release(struct inode *iP, struct file *fP) { int rc = 0; int code = 0; int flags = cxiOpenFlagsXlate(fP->f_flags); struct MMFSVInfo *vinfoP = (struct MMFSVInfo *)fP->private_data; cxiNode_t *cnP; struct gpfsVfsData_t *privVfsP; VFS_STAT_START(closeCall); ENTER(0); cnP = VP_TO_CNP(iP); privVfsP = VP_TO_PVP(iP); DBGASSERT(privVfsP != NULL); TRACE4(TRACE_VNODE, 1, TRCID_LINUXOPS_CLOSE_ENTER, "gpfs_f_release enter: iP 0x%lX f_flags 0x%X flags 0x%X vinfoP 0x%lX\n", iP, fP->f_flags, flags, vinfoP); /* BKL is held if the file was open R/W, otherwise not held */ /* If nfsd is closing one of its files, schedule it for a delayed close. */ if (cnP && VP_TO_NFSP(iP) && cxiIsNFSThread()) { DBGASSERT(GNP_IS_FILE(cnP)); /* On the last NFS release, a watchdog will be set to close the file after a delay. */ rc = gpfs_ops.gpfsReleaseNFS(iP); goto xerror; } rc = gpfs_ops.gpfsClose(privVfsP, cnP, flags, vinfoP, true); fP->private_data = NULL; // MMFSVInfo was freed xerror: TRACE2(TRACE_VNODE, 1, TRCID_CLOSE_EXIT, "gpfs_f_release exit: code %d rc %d\n", code, rc); if (rc) cxiErrorNFS(rc); VFS_STAT_STOP; EXIT(0); return (-rc); } int gpfs_f_fsync(struct file *fP, struct dentry *direntP, int datasync) { int rc; ENTER(0); TRACE3(TRACE_VNODE, 1, TRCID_LINUXOPS_FSYNC_ENTER, "gpfs_f_fsync enter: fP 0x%lX dirent 0x%lX datasync %d\n", fP, direntP, datasync); /* Linux doc says BKL is held, but it does not seem to be */ rc = fsyncInternal(fP); TRACE2(TRACE_VNODE, 1, TRCID_LINUXOPS_FSYNC_EXIT, "gpfs_f_fsync exit: file 0x%lX rc %d\n", fP, rc); EXIT(0); return (-rc); } int gpfs_f_fasync(int fd, struct file *fP, int on) { int rc; ENTER(0); TRACE3(TRACE_VNODE, 1, TRCID_LINUXOPS_FASYNC_ENTER, "gpfs_f_fasync enter: fd %d fP 0x%lX on %d\n", fd, fP, on); /* Linux doc says BKL is held, but it does not seem to be */ rc = fsyncInternal(fP); TRACE2(TRACE_VNODE, 1, TRCID_LINUXOPS_FASYNC_EXIT, "gpfs_f_fasync exit: fP 0x%lX rc %d\n", fP, rc); EXIT(0); return (-rc); } static int fsyncInternal(struct file *fP) { int rc = 0; cxiNode_t *cnP; struct inode *iP; struct gpfsVfsData_t *privVfsP; ext_cred_t eCred; struct MMFSVInfo *vinfoP; VFS_STAT_START(fsyncCall); ENTER(0); VFS_INC(fsyncCall); /* Creating files via nfs can get us here with a null fP. */ if (!fP) goto xerror; vinfoP = (struct MMFSVInfo *)fP->private_data; iP = fP->f_dentry->d_inode; DBGASSERT(iP != NULL); cnP = VP_TO_CNP(iP); privVfsP = VP_TO_PVP(iP); DBGASSERT(privVfsP != NULL); setCred(&eCred); rc = gpfs_ops.gpfsFsync(privVfsP, vinfoP, cnP, FFILESYNC, &eCred); if (rc) cxiErrorNFS(rc); xerror: VFS_STAT_STOP; EXIT(0); return rc; } #ifdef NFS_CLUSTER_LOCKS void gpfs_grace(int on_off) { gpfs_ops.gpfsGrace(on_off); } /* * arg is lock type F_RDLCK:0 F_WRLCK:1 F_UNLCK:2 */ int gpfs_f_set_lease(struct file *fP, long arg, struct file_lock **flPP) { int rc = EAGAIN; int NFSflags; int mode, oplockWant, oplockGot, flags; void *cb_token, *cookie; struct file_lock *flP; ext_cred_t eCred; cxiNode_t *cnP; struct gpfsVfsData_t *privVfsP; struct MMFSVInfo *vinfoP = (struct MMFSVInfo *)fP->private_data; struct inode *iP = fP->f_dentry->d_inode; //VFS_STAT_START(lockctlCall); ENTER(0); TRACE4(TRACE_VNODE, 1, TRCID_LINUXOPS_LEASE_ENTER, "gpfs_f_set_lease: fP 0x%lX iP 0x%lX type %s by %s\n", fP, iP, (arg == F_RDLCK) ? "RD" :(arg == F_WRLCK) ? "WR" : "UNLCK", (cxiIsNFSThread()) ? "NFS" : "SAMBA"); privVfsP = VP_TO_PVP(fP->f_dentry->d_inode); DBGASSERT(privVfsP != NULL); flP = *flPP; cookie = NULL; NFSflags = FREAD; mode = FMODE_WRITE; flags = RESERVE_NONE; oplockGot = smbOplockNone; cb_token = iP; if (arg == F_UNLCK) { cb_token = NULL; oplockWant = smbOplockNone; flags = RESERVE_DOWNGRADE; } else if (arg == F_RDLCK) oplockWant = smbOplockShared; else if (arg == F_WRLCK) { oplockWant = smbOplockExclusive; NFSflags |= FWRITE; mode = FMODE_READ; } else goto xerror; if (cxiIsNFSThread()) { rc = gpfs_ops.gpfsGetNFS((void *)iP, (struct MMFSVInfo **)&vinfoP, &NFSflags); if (rc) goto xerror; cnP = VP_TO_CNP(iP); setCred(&eCred); rc = gpfs_ops.gpfsOpenNFS(privVfsP, cnP, NFSflags, vinfoP, &eCred); if (rc) goto xerror2; } rc = gpfs_ops.gpfsReserveDelegation(fP, vinfoP , privVfsP, oplockWant, flags, cb_token, cookie); if (rc) goto xerror2; lock_kernel(); rc = setlease(fP, arg, flPP); unlock_kernel(); if (rc) { // if error release the delegation gpfs_ops.gpfsReserveDelegation(fP, vinfoP , privVfsP, smbOplockNone, RESERVE_DOWNGRADE, NULL, NULL); if (rc < 0) rc = -rc; /* make it positive */ } else { // rc=0 oplockGot = gpfs_ops.SMBGetOplockStateV(vinfoP); if (oplockGot == oplockWant) goto xerror2; else { // already lost the delegation __break_lease(iP, FMODE_WRITE); } } xerror2: if (cxiIsNFSThread()) gpfs_ops.gpfsReleaseNFS(iP); xerror: TRACE6(TRACE_VNODE, 1, TRCID_LINUXOPS_LEASE_EXIT, "gpfs_f_set_lease: fP 0x%lX flP 0x%lX rc %d oplockWant %d oplockGot %d %s\n", fP, flP, rc, oplockWant, oplockGot, (oplockGot == smbOplockShared) ? "RD" : (oplockGot == smbOplockExclusive) ? "WR" : "NONE"); //VFS_STAT_STOP; EXIT(0); return (-rc); } #endif int gpfs_f_lock(struct file *fP, int cmd, struct file_lock *flP) { int rc = 0; int code = 0; cxiNode_t *cnP; ext_cred_t eCred; struct gpfsVfsData_t *privVfsP; eflock_t lckdat; unsigned long localRetryId = 0; int(* vfs_callback)(void *, void *, int) = NULL; VFS_STAT_START(lockctlCall); ENTER(0); /* Linux converts flock64 to flock before calling GPFS lock routine, but leaves "cmd" as is. Allow these to go through. */ #if !defined(__64BIT__) if (cmd == F_GETLK64) cmd = F_GETLK; if (cmd == F_SETLK64) cmd = F_SETLK; if (cmd == F_SETLKW64) cmd = F_SETLKW; #endif if ((cmd != F_GETLK) && (cmd != F_SETLK) && (cmd != F_SETLKW)) { code = 2; rc = ENOSYS; goto xerror; } setCred(&eCred); TRACE6(TRACE_VNODE, 1, TRCID_LINUXOPS_LOCKCTL_ENTER, "gpfs_f_lock enter: pid %d fp 0x%lX range 0x%lX:%lX cmd %s type %s\n", flP->fl_pid, fP, flP->fl_start, flP->fl_end, (cmd == F_GETLK) ? "GETLK" : (cmd == F_SETLK) ? "SETLK" : "SETLKW", (flP->fl_type == F_RDLCK) ? "RDLCK" : (flP->fl_type == F_WRLCK) ? "WRLCK" : "UNLCK"); TRACE5(TRACE_VNODE, 3, TRCID_LINUXOPS_LOCKCTL_ENTER2, "gpfs_f_lock : pos 0x%lX iP 0x%lX fl_flags 0x%X uid %d gid %d\n", fP->f_pos, fP->f_dentry->d_inode, flP->fl_flags, eCred.principal, eCred.group); TraceBKL(); cnP = VP_TO_CNP(fP->f_dentry->d_inode); privVfsP = VP_TO_PVP(fP->f_dentry->d_inode); DBGASSERT(privVfsP != NULL); /* convert file_lock to eflock */ cxiVFSToFlock((void *)flP, &lckdat); #ifdef NFS_CLUSTER_LOCKS #if 0 if (flP->fl_state == FL_CANCELED && flP->fl_type != F_UNLCK) { //??? just unblock queued lock // use kxCleanupAcquires() which is used only for AIX now rc = 0; goto xerror; } #endif #if 0 if (flP->fl_lmops) // && (flP->fl_flags & (FL_LOCKD|FL_NFSv4))) { if ((flP->fl_flags & FL_SLEEP) && flP->fl_lmops->fl_vfs_callback && flP->fl_type != F_UNLCK) { vfs_callback = flP->fl_lmops->fl_vfs_callback; cmd = F_SETLKW; } } #endif #endif lckdat.l_whence = SEEK_SET; rc = gpfs_ops.gpfsFcntl(NULL, // KernelOperation initialized in gpfsFcntl privVfsP, NULL, // struct vnode *vP or NULL // advObjP (advisory lock object) is inode fP->f_dentry->d_inode, flP, // struct file_lock cnP, 0, // offset &lckdat, // struct cxiFlock_t cmd, vfs_callback, // lockd callback &localRetryId, &eCred); xerror: TRACE2(TRACE_VNODE, 11, TRCID_LINUXOPS_LOCKCTL_DIAG2, "gpfs_f_lock: fP 0x%lX, f_dentry 0x%lX", fP, fP->f_dentry); VFS_STAT_STOP; TRACE1(TRACE_VNODE, 1, TRCID_LINUXOPS_LOCKCTL_EXIT, "gpfs_f_lock exit: rc %d", rc); EXIT(0); return (-rc); } #ifdef NFS4_CLUSTER /* * cmd: F_SETLKW or F_SETLK * fl->fl_flags = FL_FLOCK; if sys_flock(), can use other flags for NFSv4 * fl->fl_start = 0, fl->fl_end = OFFSET_MAX; * fl->fl_type = * LOCK_MAND allow other processes read * or LOCK_MAND&LOCK_RW allow other processes read and write * or F_RDLCK LOCK_SH -- a shared lock. * or F_WRLCK LOCK_EX -- an exclusive lock. * or F_UNLCK LOCK_UN -- remove an existing lock. * * LOCK_MAND -- a `mandatory' flock. This exists to emulate Windows Share Modes. */ int gpfs_f_flock(struct file *fP, int cmd, struct file_lock *flP) { int rc = 0; struct gpfsVfsData_t *privVfsP; int shareWant = 0; int flags = 0; struct MMFSVInfo *vinfoP = (struct MMFSVInfo *)fP->private_data; privVfsP = VP_TO_PVP(fP->f_dentry->d_inode); DBGASSERT(privVfsP != NULL); VFS_STAT_START(flockCall); ENTER(0); TRACE5(TRACE_VNODE, 1, TRCID_LINUX_FLOCK_ENTER, "gpfs_f_flock: enter fP 0x%lX flP 0x%lX cmd %d flags 0x%X type 0x%X\n", fP, flP, cmd, flP->fl_flags, flP->fl_type); if ((cmd != F_SETLK) && (cmd != F_SETLKW)) { rc = ENOSYS; goto xerror; } shareWant |= ALLOW_SHARE_DELETE; if (flP->fl_flags & FL_FLOCK) { /* Translate (and validate) the type arguments to our shareWant */ if (flP->fl_type & LOCK_MAND) { if (flP->fl_type & LOCK_RW) shareWant |= coRead|coWriteM|coWriteA; else shareWant |= coRead|coWriteM|coWriteA|coDenyR; } else { switch (flP->fl_type) { case F_RDLCK: /* LOCK_SH */ shareWant |= coRead|coDenyWM|coDenyWA; break; case F_WRLCK: /* LOCK_EX */ shareWant |= coRead|coWriteM|coWriteA|coDenyWM|coDenyWA|coDenyR; break; case F_UNLCK: /* LOCK_UN */ flags |= RESERVE_DOWNGRADE; shareWant |= 0; break; default: rc = EINVAL; goto xerror; } } } else { //??? add code for NFSv4 shares and delegations shareWant = coNFS4Share; if (!(flP->fl_flags & FL_SLEEP)) flags |= RESERVE_NOWAIT; } /* Call to make the reservation */ rc = gpfs_ops.gpfsReserveShare(fP, vinfoP, privVfsP, flags, shareWant, NULL, NULL); xerror: TRACE1(TRACE_VNODE, 1, TRCID_LINUX_FLOCK_EXIT, "gpfs_f_flock: exit rc %d\n", rc); if (rc) cxiErrorNFS(rc); VFS_STAT_STOP; EXIT(0); return (-rc); } #endif /* NFS4_CLUSTER */ static inline ssize_t rdwrInternal(struct file *fP, cxiRdWr_t op, const struct cxiIovec_t *iovecP, unsigned long count, loff_t *offsetP) { int i, rc; Boolean gotBKL = false; ssize_t total_len = 0; struct cxiUio_t tmp_uio; int flags = cxiOpenFlagsXlate(fP->f_flags); struct gpfsVfsData_t *privVfsP; cxiNode_t *cnP; struct MMFSVInfo *vinfoP = (struct MMFSVInfo *)fP->private_data; struct inode *iP; ext_cred_t eCred; ssize_t tmp_len; VFS_STAT_START((op == CXI_READ)? readCall: writeCall); ENTER(0); DBGASSERT(fP != NULL); iP = fP->f_dentry->d_inode; DBGASSERT(iP != NULL); TRACE11(TRACE_VNODE, 1, TRCID_LINUXOPS_RDWRINT_ENTER, "gpfs_f_rdwr enter: fP 0x%lX f_flags 0x%X flags 0x%X op %d " "iovec 0x%lX count %d offset 0x%llX " "dentry 0x%lX private 0x%lX iP 0x%lX name '%s'\n", fP, fP->f_flags, flags, op, iovecP, count, *offsetP, fP->f_dentry, fP->private_data, fP->f_dentry->d_inode, fP->f_dentry->d_name.name); /* BKL is not held at entry, except for NFS calls */ TraceBKL(); if (current->lock_depth >= 0) /* kernel lock is held by me */ { gotBKL = true; unlock_kernel(); } privVfsP = VP_TO_PVP(iP); DBGASSERT(privVfsP != NULL); cnP = VP_TO_CNP(iP); tmp_uio.uio_iov = (struct cxiIovec_t *)iovecP; /* ptr to iovec struct array */ tmp_uio.uio_iovcnt = count; /* #iovec elements left to be processed */ tmp_uio.uio_iovdcnt = 0; /* #iovec elements already processed */ tmp_uio.uio_offset = *offsetP; /* byte offset in file/dev to read/write*/ tmp_uio.uio_segflg = UIO_USERSPACE; /* copy to user space */ tmp_uio.uio_fmode = 0; /* file modes from open file struct */ for (i = 0; i < count; i++) total_len += iovecP[i].iov_len; tmp_uio.uio_resid = total_len; /* #bytes left in data area */ /* We should -EINVAL if total length is not >= 0 * Be careful here because uio_resid is a unsigned * long not an ssize_t */ tmp_len = (ssize_t)tmp_uio.uio_resid; if ( tmp_len < 0) { EXIT(0); return (-EINVAL); } DBGASSERT(vinfoP != NULL); setCred(&eCred); if (op == CXI_READ) rc = gpfs_ops.gpfsRead(privVfsP, NULL, cnP, flags, &tmp_uio, vinfoP, NULL, NULL, &eCred, false, true); else { rc = gpfs_ops.gpfsWrite(privVfsP, NULL, cnP, flags, &tmp_uio, vinfoP, NULL, NULL, &eCred, false, true); iP->i_sb->s_dirt = 1; } TRACE5(TRACE_VNODE, 1, TRCID_LINUXOPS_RDWRINT_EXIT, "gpfs_f_rdwr exit: fP 0x%lX total_len %d uio_resid %ld " "offset 0x%llX rc %d\n", fP, total_len, tmp_uio.uio_resid, tmp_uio.uio_offset, rc); VFS_STAT_STOP; if (gotBKL) /* If held kernel lock on entry then reacquire it */ lock_kernel(); if (rc) { cxiErrorNFS(rc); EXIT(0); return (-rc); } *offsetP = tmp_uio.uio_offset; EXIT(0); return (total_len - tmp_uio.uio_resid); } ssize_t gpfs_f_read(struct file *fP, char *bufP, size_t count, loff_t *offsetP) { ssize_t rc; cxiIovec_t tmp_iovec; ENTER(0); tmp_iovec.iov_base = bufP; /* base memory address */ tmp_iovec.iov_len = count; /* length of transfer for this area */ rc = rdwrInternal(fP, CXI_READ, &tmp_iovec, 1, offsetP); EXIT(0); return rc; } ssize_t gpfs_f_dir_read(struct file *fP, char *bufP, size_t count, loff_t *offsetP) { TRACE1(TRACE_VNODE, 1, TRCID_LINUXOPS_READ_DIR, "gpfs_f_dir_read: fP 0x%lX EISDIR\n", fP); return -EISDIR; } ssize_t gpfs_f_write(struct file *fP, const char *bufP, size_t count, loff_t *offsetP) { ssize_t rc; cxiIovec_t tmp_iovec; ENTER(0); tmp_iovec.iov_base = (char *)bufP; /* base memory address */ tmp_iovec.iov_len = count; /* length of transfer for this area */ rc = rdwrInternal(fP, CXI_WRITE, &tmp_iovec, 1, offsetP); EXIT(0); return rc; } ssize_t gpfs_f_readv(struct kiocb *iocb, const struct iovec *iovecP, unsigned long count, loff_t offsetP) { int rc; ENTER(0); rc = rdwrInternal(iocb->ki_filp, CXI_READ, (const struct cxiIovec_t *)iovecP, count, &offsetP); EXIT(0); return rc; } ssize_t gpfs_f_writev(struct kiocb *iocb, const struct iovec *iovecP, unsigned long count, loff_t offsetP) { int rc; ENTER(0); rc = rdwrInternal(iocb->ki_filp, CXI_WRITE, (const struct cxiIovec_t *)iovecP, count, &offsetP); EXIT(0); return rc; } #ifdef NFS4_CLUSTER int gpfs_f_share(struct file *fP, unsigned int share_access, unsigned int share_deny) { int err; struct inode *iP; struct dentry *dentryP; int shareHave, shareWant; struct gpfsVfsData_t *privVfsP; ext_cred_t eCred; cxiNode_t *cnP; int flags = RESERVE_NONE; ENTER(0); err = 0; dentryP = fP? fP->f_dentry: NULL; iP = dentryP? dentryP->d_inode: NULL; TRACE5(TRACE_VNODE, 1, TRCID_LINUXOPS_SHARE_ENTER, "gpfs_f_share enter: fP 0x%lX ino %d (%s) access 0x%X deny 0x%X\n", fP, iP? iP->i_ino: -1, dentryP? dentryP->d_name.name: (const unsigned char*)"", share_access, share_deny); if (fP) get_file(fP); /* Validate the file and obtain privVfsP */ if (!iP || !(privVfsP = VP_TO_PVP(iP))) { err = EBADF; goto xerror; } if ((share_access == 0) && (share_deny == 0)) { /* This type of request can happen after the server recalls a delegation. * We reject the request which we recognize since no access/deny flags * are given. This then causes the client to open the file at the server * (no delegation) and continue. */ TRACE3(TRACE_VNODE, 3, TRCID_LINUXOPS_SHARE_RESET, "gpfs_f_share: RESET (fP 0x%lX iP 0x%lX privVfsP 0x%lX)\n", fP, iP, iP? VP_TO_PVP(fP->f_dentry->d_inode): NULL); goto xerror; } /* Translate (and validate) the NFS4 share/deny arguments to our shareWant */ /* setup for the XLATE_NFS4 calls */ err = EINVAL; shareWant = coNFS4Share|ALLOW_SHARE_DELETE; XLATE_NFS4_ACCESS(share_access, shareWant); XLATE_NFS4_DENY(share_deny, shareWant); setCred(&eCred); cnP = VP_TO_CNP(iP); /* Call to make the reservation */ err = gpfs_ops.gpfsReserveShare(fP, fP->private_data, privVfsP, flags, shareWant, cnP, &eCred); xerror: if (fP) fput(fP); TRACE1(TRACE_VNODE, 1, TRCID_LINUXOPS_SHARE_EXIT, "gpfs_f_share exit: rc %d\n", err); EXIT(0); return err; } #endif /* NFS4_CLUSTER */ extern int cleanupFD; /* gpfs_f_cleanup is a routine that runs when the last mmfsd process terminates. It allows us to do some basic cleanup so that the daemon can be restarted nicely. */ int gpfs_f_cleanup(struct inode *iP, struct file *fP) { int rc = 0; ENTER(0); if (cleanupFD) { rc = gpfs_ops.gpfsCleanup(); cleanupFD = 0; } EXIT(0); return rc; } #if LINUX_KERNEL_VERSION >= 2060000 || defined(SUSE_LINUX) /* gpfs_f_direct_IO() is never called. Open currently requires a "value" in * gpfs_aops->direct_IO to be successful when O_DIRECT is supplied on the open * call. The linux "generic" file routines eventually call this op. We do not use * the generic file routines so gpfs_f_direct_IO is never called. */ #if LINUX_KERNEL_VERSION >= 2060000 ssize_t gpfs_f_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iovecP, loff_t in_offset, unsigned long count) #elif defined(SUSE_LINUX) int gpfs_f_direct_IO(int rw, struct file *file, struct kiobuf *kiobuf, unsigned long in_offset, int count) #endif { LOGASSERT(!"gpfs_f_direct_IO not supported"); } #endif #ifdef P_NFS4 static void printfh2(char *s, int *fh) { #ifdef GPFS_PRINTK printk("%s: %d: %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", s, fh[0],fh[1],fh[2],fh[3],fh[4],fh[5],fh[6],fh[7],fh[8],fh[9]); #endif } int gpfs_get_devicelist(struct super_block *sbP, void *p) { int rc = 0; struct gpfsVfsData_t *privVfsP = (struct gpfsVfsData_t *)SBLOCK_PRIVATE(sbP); // VFS_STAT_START(gpfs_get_devicelist); ENTER(0); DBGASSERT(privVfsP != NULL); TRACE2(TRACE_VNODE, 1, TRCID_LINUXOPS_GET_DEVICELIST_ENTER, "gpfs_get_devicelist: sbP 0x%lX p 0x%lX\n", sbP, p); rc = gpfs_ops.gpfsGetDeviceList(privVfsP, p); #ifdef GPFS_PRINTK printk("gpfs_get_devicelist: rc %d\n", rc); #endif xerror: TRACE1(TRACE_VNODE, 1, TRCID_GET_DEVICELIST_EXIT, "gpfs_get_devicelist exit: rc %d\n", rc); // VFS_STAT_STOP; EXIT(0); return (-rc); } int gpfs_layout_get(struct inode *iP, void *p) { int rc = 0; int code = 0; cxiNode_t *cnP; ext_cred_t eCred; struct gpfsVfsData_t *privVfsP; struct nfsd4_pnfs_layoutget *lgp = (struct nfsd4_pnfs_layoutget *)p; // VFS_STAT_START(gpfs_layout_get); ENTER(0); cnP = VP_TO_CNP(iP); privVfsP = VP_TO_PVP(iP); DBGASSERT(privVfsP != NULL); TRACE2(TRACE_VNODE, 1, TRCID_LINUXOPS_GET_LAYOUT_ENTER, "gpfs_layout_get: iP 0x%lX p 0x%lX\n", iP, p); setCred(&eCred); rc = gpfs_ops.gpfsGetLayout(privVfsP, cnP, p, &eCred); xerror: TRACE2(TRACE_VNODE, 1, TRCID_GET_LAYOUT_EXIT, "gpfs_layout_get exit: code %d rc %d\n", code, rc); // VFS_STAT_STOP; EXIT(0); return (-rc); } /* pNFS: return layout type */ #define LAYOUT_NFSV4_FILES 1 int gpfs_layout_type() { #ifdef GPFS_PRINTK printk("xxx gpfs_layout_type LAYOUT_NFSV4_FILES=%d\n", LAYOUT_NFSV4_FILES); #endif return LAYOUT_NFSV4_FILES; } gpfs_get_state(struct inode *iP, void *fh, void *p) { int rc = 0; int nodeId, len, code = 0; cxiNode_t *cnP; ext_cred_t eCred; struct gpfsVfsData_t *privVfsP; struct pnfs_get_state *osP = (struct pnfs_get_state *)p; struct knfsd_fh *fhP = (struct knfsd_fh *)fh; // VFS_STAT_START(gpfs_get_state); ENTER(0); cnP = VP_TO_CNP(iP); privVfsP = VP_TO_PVP(iP); DBGASSERT(privVfsP != NULL); #ifdef GPFS_PRINTK printk("gpfs_get_state iP %p fh type %d fh size %d\n", iP, fhP->fh_fsid_type, fhP->fh_size); printfh2("gpfs_get_state:", (int *)fhP); #endif len = sizeof(struct pnfs_get_state); if (fhP->fh_fsid_type >= max_fsid_type && fhP->fh_size > 8) { nodeId = fhP->fh_base.fh_pad[(fhP->fh_size >> 2) -1]; } else { rc = ENOENT; goto xerror; } setCred(&eCred); osP->devid = gpfs_ops.gpfsGetMyDevID(privVfsP); rc = gpfs_ops.gpfsGetOpenState(privVfsP, cnP, nodeId, p, len, &eCred); TRACE7(TRACE_VNODE, 2, TRCID_LINUXOPS_SET_STATEID_ENTER, "gpfs_get_state: iP 0x%lX mds %x ds %x p 0x%lX len %d verf 0x%lX:0x%lX\n", iP, nodeId, osP->devid, p, len, osP->verifier[0], osP->verifier[1]); #ifdef GPFS_PRINTK printk("gpfs_get_state mds-id %x my-id %x verifier %x:%x\n", nodeId, osP->devid, osP->verifier[0], osP->verifier[1]); #endif xerror: TRACE2(TRACE_VNODE, 1, TRCID_SET_STETEID_EXIT, "gpfs_get_state exit: code %d rc %d\n", code, rc); // VFS_STAT_STOP; EXIT(0); return (-rc); } int gpfs_layout_return(struct inode *iP, void *p) { int rc = 0; cxiNode_t *cnP; struct gpfsVfsData_t *privVfsP; // VFS_STAT_START(gpfs_layout_return); ENTER(0); cnP = VP_TO_CNP(iP); privVfsP = VP_TO_PVP(iP); DBGASSERT(privVfsP != NULL); #ifdef GPFS_PRINTK printk("gpfs_layout_return iP %p\n", iP); #endif TRACE1(TRACE_VNODE, 1, TRCID_LINUXOPS_LAYOUT_RET_ENTER, "gpfs_layout_return: iP 0x%lX\n", iP); rc = gpfs_ops.gpfsLayoutRetrun(privVfsP, cnP, p, sizeof(struct layout_return)); xerror: TRACE2(TRACE_VNODE, 1, TRCID_LAYOUT_RET_EXIT, "gpfs_layout_return exit: code %d iP 0x%lX\n", rc, iP); // VFS_STAT_STOP; EXIT(0); return (-rc); } int gpfs_get_deviceinfo(struct super_block *sbP, void *p) { int rc; struct gpfsVfsData_t *privVfsP = (struct gpfsVfsData_t *)SBLOCK_PRIVATE(sbP); // VFS_STAT_START(gpfs_get_deviceinfo); ENTER(0); DBGASSERT(privVfsP != NULL); TRACE2(TRACE_VNODE, 1, TRCID_LINUXOPS_GET_DEVICEINFO_ENTER, "gpfs_get_deviceinfo: sbP 0x%lX p 0x%lX\n", sbP, p); #ifdef GPFS_PRINTK printk("gpfs_get_deviceinfo: sbP 0x%lX p 0x%lX\n", sbP, p); #endif rc = gpfs_ops.gpfsGetDeviceInfo(privVfsP, p); #ifdef GPFS_PRINTK printk("gpfs_get_deviceinfo: rc %d\n", rc); #endif xerror: TRACE1(TRACE_VNODE, 1, TRCID_GET_DEVICEINFO_EXIT, "gpfs_get_deviceinfo exit: rc %d\n", rc); // VFS_STAT_STOP; EXIT(0); return (-rc); } void gpfs_get_verifier(struct super_block *sbP, u32 *p) { int rc; struct gpfsVfsData_t *privVfsP = (struct gpfsVfsData_t *)SBLOCK_PRIVATE(sbP); // VFS_STAT_START(gpfs_get_verifier); ENTER(0); DBGASSERT(privVfsP != NULL); #ifdef GPFS_PRINTK printk("gpfs_get_verifier: sbP 0x%lX p 0x%lX\n", sbP, p); #endif gpfs_ops.gpfsGetVerifier(privVfsP, p); #ifdef GPFS_PRINTK printk("gpfs_get_verifier: sbP 0x%lX v1 0x%lX v2 0x%lX\n", sbP, *p, *(p+1)); #endif // VFS_STAT_STOP; EXIT(0); return; } #endif