--- sys/miscfs/nullfs/null.h +++ sys/miscfs/nullfs/null.h @@ -57,6 +57,8 @@ LIST_ENTRY(null_node) null_hash; /* Hash list */ struct vnode *null_lowervp; /* VREFed once */ struct vnode *null_vnode; /* Back pointer */ + int null_pending_locks; + int null_drain_wakeup; }; #define MOUNTTONULLMOUNT(mp) ((struct null_mount *)((mp)->mnt_data)) @@ -65,7 +67,8 @@ int nullfs_init(struct vfsconf *vfsp); int nullfs_uninit(struct vfsconf *vfsp); -int null_node_create(struct mount *mp, struct vnode *target, struct vnode **vpp); +int null_nodeget(struct mount *mp, struct vnode *target, struct vnode **vpp); +void null_hashrem(struct null_node *xp); int null_bypass(struct vop_generic_args *ap); #ifdef DIAGNOSTIC @@ -76,7 +79,6 @@ #endif extern vop_t **null_vnodeop_p; -extern struct lock null_hashlock; #ifdef MALLOC_DECLARE MALLOC_DECLARE(M_NULLFSNODE); --- sys/miscfs/nullfs/null_subr.c +++ sys/miscfs/nullfs/null_subr.c @@ -41,13 +41,15 @@ #include #include #include +#include +#include +#include #include #include -#include -#include + #include -#define LOG2_SIZEVNODE 7 /* log2(sizeof struct vnode) */ +#define LOG2_SIZEVNODE 8 /* log2(sizeof struct vnode) */ #define NNULLNODECACHE 16 /* @@ -63,15 +65,13 @@ static LIST_HEAD(null_node_hashhead, null_node) *null_node_hashtbl; static u_long null_node_hash; -struct lock null_hashlock; +struct simplelock null_hashmtx; static MALLOC_DEFINE(M_NULLFSHASH, "NULLFS hash", "NULLFS hash table"); MALLOC_DEFINE(M_NULLFSNODE, "NULLFS node", "NULLFS vnode private part"); -static int null_node_alloc(struct mount *mp, struct vnode *lowervp, - struct vnode **vpp); -static struct vnode * - null_node_find(struct mount *mp, struct vnode *lowervp); +static struct vnode * null_hashget(struct mount *, struct vnode *); +static struct vnode * null_hashins(struct mount *, struct null_node *); /* * Initialise cache headers @@ -83,7 +83,7 @@ NULLFSDEBUG("nullfs_init\n"); /* printed during system boot */ null_node_hashtbl = hashinit(NNULLNODECACHE, M_NULLFSHASH, &null_node_hash); - lockinit(&null_hashlock, PVFS, "nullhs", 0, 0); + simple_lock_init(&null_hashmtx); return (0); } @@ -92,9 +92,7 @@ struct vfsconf *vfsp; { - if (null_node_hashtbl) { - free(null_node_hashtbl, M_NULLFSHASH); - } + free(null_node_hashtbl, M_NULLFSHASH); return (0); } @@ -103,7 +101,7 @@ * Lower vnode should be locked on entry and will be left locked on exit. */ static struct vnode * -null_node_find(mp, lowervp) +null_hashget(mp, lowervp) struct mount *mp; struct vnode *lowervp; { @@ -120,47 +118,117 @@ */ hd = NULL_NHASH(lowervp); loop: - lockmgr(&null_hashlock, LK_EXCLUSIVE, NULL, p); + simple_lock(&null_hashmtx); LIST_FOREACH(a, hd, null_hash) { if (a->null_lowervp == lowervp && NULLTOV(a)->v_mount == mp) { vp = NULLTOV(a); - lockmgr(&null_hashlock, LK_RELEASE, NULL, p); + VI_LOCK(vp); + /* + * Don't block if nullfs vnode is being recycled. + * We already hold a lock on the lower vnode, thus + * waiting might deadlock against the thread + * recycling the nullfs vnode or another thread + * in vrele() waiting for the vnode lock. + */ + if ((vp->v_flag & VXLOCK) != 0) { + VI_UNLOCK(vp); + continue; + } + simple_unlock(&null_hashmtx); /* * We need vget for the VXLOCK * stuff, but we don't want to lock * the lower node. */ - if (vget(vp, LK_EXCLUSIVE | LK_CANRECURSE, p)) { - printf ("null_node_find: vget failed.\n"); + if (vget(vp, LK_EXCLUSIVE | LK_THISLAYER | LK_INTERLOCK, p)) goto loop; - } - VOP_UNLOCK(lowervp, 0, p); + return (vp); } } - lockmgr(&null_hashlock, LK_RELEASE, NULL, p); - - return NULLVP; + simple_unlock(&null_hashmtx); + return (NULLVP); } +/* + * Act like null_hashget, but add passed null_node to hash if no existing + * node found. + */ +static struct vnode * +null_hashins(mp, xp) + struct mount *mp; + struct null_node *xp; +{ + struct proc *p = curproc; /* XXX */ + struct null_node_hashhead *hd; + struct null_node *oxp; + struct vnode *ovp; + + hd = NULL_NHASH(xp->null_lowervp); +loop: + simple_lock(&null_hashmtx); + LIST_FOREACH(oxp, hd, null_hash) { + if (oxp->null_lowervp == xp->null_lowervp && + NULLTOV(oxp)->v_mount == mp) { + ovp = NULLTOV(oxp); + VI_LOCK(ovp); + /* + * Don't block if nullfs vnode is being recycled. + * We already hold a lock on the lower vnode, thus + * waiting might deadlock against the thread + * recycling the nullfs vnode or another thread + * in vrele() waiting for the vnode lock. + */ + if ((ovp->v_flag & VXLOCK) != 0) { + VI_UNLOCK(ovp); + continue; + } + simple_unlock(&null_hashmtx); + if (vget(ovp, LK_EXCLUSIVE | LK_THISLAYER | LK_INTERLOCK, p)) + goto loop; + + return (ovp); + } + } + LIST_INSERT_HEAD(hd, xp, null_hash); + simple_unlock(&null_hashmtx); + return (NULLVP); +} /* - * Make a new null_node node. - * Vp is the alias vnode, lofsvp is the lower vnode. - * Maintain a reference to (lowervp). + * Make a new or get existing nullfs node. + * Vp is the alias vnode, lowervp is the lower vnode. + * + * The lowervp assumed to be locked and having "spare" reference. This routine + * vrele lowervp if nullfs node was taken from hash. Otherwise it "transfers" + * the caller's "spare" reference to created nullfs vnode. */ -static int -null_node_alloc(mp, lowervp, vpp) +int +null_nodeget(mp, lowervp, vpp) struct mount *mp; struct vnode *lowervp; struct vnode **vpp; { struct proc *p = curproc; /* XXX */ - struct null_node_hashhead *hd; struct null_node *xp; - struct vnode *othervp, *vp; + struct vnode *vp; int error; + /* Lookup the hash firstly */ + *vpp = null_hashget(mp, lowervp); + if (*vpp != NULL) { + vrele(lowervp); + return (0); + } + + /* + * We do not serialize vnode creation, instead we will check for + * duplicates later, when adding new vnode to hash. + * + * Note that duplicate can only appear in hash if the lowervp is + * locked LK_SHARED. + */ + /* * Do the MALLOC before the getnewvnode since doing so afterward * might cause a bogus v_data pointer to get dereferenced @@ -169,32 +237,21 @@ MALLOC(xp, struct null_node *, sizeof(struct null_node), M_NULLFSNODE, M_WAITOK); - error = getnewvnode(VT_NULL, mp, null_vnodeop_p, vpp); + error = getnewvnode(VT_NULL, mp, null_vnodeop_p, &vp); if (error) { FREE(xp, M_NULLFSNODE); return (error); } - vp = *vpp; - vp->v_type = lowervp->v_type; - lockinit(&xp->null_lock, PINOD, "nullnode", 0, LK_CANRECURSE); xp->null_vnode = vp; - vp->v_data = xp; xp->null_lowervp = lowervp; - /* - * Before we insert our new node onto the hash chains, - * check to see if someone else has beaten us to it. - * (We could have slept in MALLOC.) - */ - othervp = null_node_find(mp, lowervp); - if (othervp) { - vp->v_data = NULL; - FREE(xp, M_NULLFSNODE); - vp->v_type = VBAD; /* node is discarded */ - vrele(vp); - *vpp = othervp; - return 0; - } + xp->null_pending_locks = 0; + xp->null_drain_wakeup = 0; + + vp->v_type = lowervp->v_type; + vp->v_data = xp; + + lockinit(&xp->null_lock, PINOD, "nullnode", 0, LK_CANRECURSE); /* * From NetBSD: @@ -205,79 +262,49 @@ * NULL, then we copy that up and manually lock the new vnode. */ - lockmgr(&null_hashlock, LK_EXCLUSIVE, NULL, p); vp->v_vnlock = lowervp->v_vnlock; error = VOP_LOCK(vp, LK_EXCLUSIVE | LK_THISLAYER, p); if (error) - panic("null_node_alloc: can't lock new vnode\n"); + panic("null_nodeget: can't lock new vnode\n"); + + /* + * Atomically insert our new node into the hash or vget existing + * if someone else has beaten us to it. + */ + *vpp = null_hashins(mp, xp); + if (*vpp != NULL) { + vrele(lowervp); + VOP_UNLOCK(vp, LK_THISLAYER, p); + vp->v_vnlock = NULL; + xp->null_lowervp = NULL; + vrele(vp); + return (0); + } + /* + * XXX We take extra vref just to workaround UFS's XXX: + * UFS can vrele() vnode in VOP_CLOSE() in some cases. Luckily, this + * can only happen if v_usecount == 1. To workaround, we just don't + * let v_usecount be 1, it will be 2 or more. + */ VREF(lowervp); - hd = NULL_NHASH(lowervp); - LIST_INSERT_HEAD(hd, xp, null_hash); - lockmgr(&null_hashlock, LK_RELEASE, NULL, p); - return 0; -} + *vpp = vp; + + return (0); +} /* - * Try to find an existing null_node vnode refering to the given underlying - * vnode (which should be locked). If no vnode found, create a new null_node - * vnode which contains a reference to the lower vnode. + * Remove node from hash. */ -int -null_node_create(mp, lowervp, newvpp) - struct mount *mp; - struct vnode *lowervp; - struct vnode **newvpp; +void +null_hashrem(xp) + struct null_node *xp; { - struct vnode *aliasvp; - aliasvp = null_node_find(mp, lowervp); - if (aliasvp) { - /* - * null_node_find has taken another reference - * to the alias vnode. - */ - vrele(lowervp); -#ifdef NULLFS_DEBUG - vprint("null_node_create: exists", aliasvp); -#endif - } else { - int error; - - /* - * Get new vnode. - */ - NULLFSDEBUG("null_node_create: create new alias vnode\n"); - - /* - * Make new vnode reference the null_node. - */ - error = null_node_alloc(mp, lowervp, &aliasvp); - if (error) - return error; - - /* - * aliasvp is already VREF'd by getnewvnode() - */ - } - -#ifdef DIAGNOSTIC - if (lowervp->v_usecount < 1) { - /* Should never happen... */ - vprint ("null_node_create: alias ", aliasvp); - vprint ("null_node_create: lower ", lowervp); - panic ("null_node_create: lower has 0 usecount."); - }; -#endif - -#ifdef NULLFS_DEBUG - vprint("null_node_create: alias", aliasvp); - vprint("null_node_create: lower", lowervp); -#endif - - *newvpp = aliasvp; - return (0); + simple_lock(&null_hashmtx); + LIST_REMOVE(xp, null_hash); + simple_unlock(&null_hashmtx); } #ifdef DIAGNOSTIC --- sys/miscfs/nullfs/null_vfsops.c +++ sys/miscfs/nullfs/null_vfsops.c @@ -47,11 +47,13 @@ #include #include #include -#include +#include #include -#include #include #include +#include +#include + #include static MALLOC_DEFINE(M_NULLFSMNT, "NULLFS mount", "NULLFS mount structure"); @@ -166,15 +168,12 @@ * Save reference. Each mount also holds * a reference on the root vnode. */ - error = null_node_create(mp, lowerrootvp, &vp); - /* - * Unlock the node (either the lower or the alias) - */ - VOP_UNLOCK(vp, 0, p); + error = null_nodeget(mp, lowerrootvp, &vp); /* * Make sure the node alias worked */ if (error) { + VOP_UNLOCK(vp, 0, p); vrele(lowerrootvp); free(xmp, M_NULLFSMNT); /* XXX */ return (error); @@ -187,6 +186,12 @@ nullm_rootvp = vp; nullm_rootvp->v_flag |= VROOT; xmp->nullm_rootvp = nullm_rootvp; + + /* + * Unlock the node (either the lower or the alias) + */ + VOP_UNLOCK(vp, 0, p); + if (NULLVPTOLOWERVP(nullm_rootvp)->v_mount->mnt_flag & MNT_LOCAL) mp->mnt_flag |= MNT_LOCAL; mp->mnt_data = (qaddr_t) xmp; --- sys/miscfs/nullfs/null_vnops.c +++ sys/miscfs/nullfs/null_vnops.c @@ -49,19 +48,19 @@ * * (See mount_null(8) for more information.) * - * The null layer duplicates a portion of the file system + * The null layer duplicates a portion of the filesystem * name space under a new name. In this respect, it is - * similar to the loopback file system. It differs from + * similar to the loopback filesystem. It differs from * the loopback fs in two respects: it is implemented using * a stackable layers techniques, and its "null-node"s stack above * all lower-layer vnodes, not just over directory vnodes. * * The null layer has two purposes. First, it serves as a demonstration * of layering by proving a layer which does nothing. (It actually - * does everything the loopback file system does, which is slightly + * does everything the loopback filesystem does, which is slightly * more than nothing.) Second, the null layer can serve as a prototype * layer. Since it provides all necessary layer framework, - * new file system layers can be created very easily be starting + * new filesystem layers can be created very easily be starting * with a null layer. * * The remainder of this man page examines the null layer as a basis @@ -80,7 +79,7 @@ * * OPERATION OF A NULL LAYER * - * The null layer is the minimum file system layer, + * The null layer is the minimum filesystem layer, * simply bypassing all possible operations to the lower layer * for processing there. The majority of its activity centers * on the bypass routine, through which nearly all vnode operations @@ -142,7 +141,7 @@ * * CREATING OTHER FILE SYSTEM LAYERS * - * One of the easiest ways to construct new file system layers is to make + * One of the easiest ways to construct new filesystem layers is to make * a copy of the null layer, rename all files and variables, and * then begin modifing the copy. Sed can be used to easily rename * all variables. @@ -158,7 +157,7 @@ * is appropriate in different situations. In both cases, * it is the responsibility of the aliasing layer to make * the operation arguments "correct" for the lower layer - * by mapping an vnode arguments to the lower layer. + * by mapping a vnode arguments to the lower layer. * * The first approach is to call the aliasing layer's bypass routine. * This method is most suitable when you wish to invoke the operation @@ -176,15 +175,23 @@ #include #include +#include #include -#include -#include +#include +#include #include #include -#include -#include +#include +#include +#include + #include +#include +#include +#include +#include + static int null_bug_bypass = 0; /* for debugging: enables bypass printf'ing */ SYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW, &null_bug_bypass, 0, ""); @@ -339,7 +346,7 @@ vppp = VOPARG_OFFSETTO(struct vnode***, descp->vdesc_vpp_offset,ap); if (*vppp) - error = null_node_create(old_vps[0]->v_mount, **vppp, *vppp); + error = null_nodeget(old_vps[0]->v_mount, **vppp, *vppp); } out: @@ -385,7 +392,7 @@ * Rely only on the PDIRUNLOCK flag which should be carefully * tracked by underlying filesystem. */ - if (cnp->cn_flags & PDIRUNLOCK) + if ((cnp->cn_flags & PDIRUNLOCK) && dvp->v_vnlock != ldvp->v_vnlock) VOP_UNLOCK(dvp, LK_THISLAYER, p); if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) { if (ldvp == lvp) { @@ -393,9 +400,12 @@ VREF(dvp); vrele(lvp); } else { - error = null_node_create(dvp->v_mount, lvp, &vp); - if (error == 0) - *ap->a_vpp = vp; + error = null_nodeget(dvp->v_mount, lvp, &vp); + if (error) { + /* XXX Cleanup needed... */ + panic("null_nodeget failed"); + } + *ap->a_vpp = vp; } } return (error); @@ -404,7 +414,7 @@ /* * Setattr call. Disallow write attempts if the layer is mounted read-only. */ -int +static int null_setattr(ap) struct vop_setattr_args /* { struct vnodeop_desc *a_desc; @@ -487,7 +497,7 @@ /* * Disallow write attempts on read-only layers; * unless the file is a socket, fifo, or a block or - * character device resident on the file system. + * character device resident on the filesystem. */ if (mode & VWRITE) { switch (vp->v_type) { @@ -583,12 +593,13 @@ struct null_node *np = VTONULL(vp); struct vnode *lvp; int error; + struct null_node *nn; if (flags & LK_THISLAYER) { if (vp->v_vnlock != NULL) { /* lock is shared across layers */ if (flags & LK_INTERLOCK) - simple_unlock(&vp->v_interlock); + VI_UNLOCK(vp); return 0; } error = lockmgr(&np->null_lock, flags & ~LK_THISLAYER, @@ -605,40 +616,98 @@ * going away doesn't mean the struct lock below us is. * LK_EXCLUSIVE is fine. */ + if ((flags & LK_INTERLOCK) == 0) { + VI_LOCK(vp); + flags |= LK_INTERLOCK; + } + nn = VTONULL(vp); if ((flags & LK_TYPE_MASK) == LK_DRAIN) { NULLFSDEBUG("null_lock: avoiding LK_DRAIN\n"); - return(lockmgr(vp->v_vnlock, - (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE, - &vp->v_interlock, p)); + /* + * Emulate lock draining by waiting for all other + * pending locks to complete. Afterwards the + * lockmgr call might block, but no other threads + * will attempt to use this nullfs vnode due to the + * VI_XLOCK flag. + */ + while (nn->null_pending_locks > 0) { + nn->null_drain_wakeup = 1; + VI_UNLOCK(vp); + tsleep(&nn->null_pending_locks, + PVFS, + "nuldr", 0); + /* XXX race */ + VI_LOCK(vp); + } + error = lockmgr(vp->v_vnlock, + (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE, + &vp->v_interlock, p); + return error; + } + nn->null_pending_locks++; + error = lockmgr(vp->v_vnlock, flags, &vp->v_interlock, p); + VI_LOCK(vp); + /* + * If we're called from vrele then v_usecount can have been 0 + * and another process might have initiated a recycle + * operation. When that happens, just back out. + */ + if (error == 0 && (vp->v_flag & VXLOCK) != 0 && + p != vp->v_vxproc) { + lockmgr(vp->v_vnlock, + (flags & ~LK_TYPE_MASK) | LK_RELEASE, + &vp->v_interlock, p); + VI_LOCK(vp); + error = ENOENT; + } + nn->null_pending_locks--; + /* + * Wakeup the process draining the vnode after all + * pending lock attempts has been failed. + */ + if (nn->null_pending_locks == 0 && + nn->null_drain_wakeup != 0) { + nn->null_drain_wakeup = 0; + wakeup(&nn->null_pending_locks); + } + if (error == ENOENT && (vp->v_flag & VXLOCK) != 0 && + vp->v_vxproc != curproc) { + vp->v_flag |= VXWANT; + VI_UNLOCK(vp); + tsleep(vp, PINOD, "nulbo", 0); + /* XXX race */ + VI_LOCK(vp); } - return(lockmgr(vp->v_vnlock, flags, &vp->v_interlock, p)); - } - /* - * To prevent race conditions involving doing a lookup - * on "..", we have to lock the lower node, then lock our - * node. Most of the time it won't matter that we lock our - * node (as any locking would need the lower one locked - * first). But we can LK_DRAIN the upper lock as a step - * towards decomissioning it. - */ - lvp = NULLVPTOLOWERVP(vp); - if (lvp == NULL) - return (lockmgr(&np->null_lock, flags, &vp->v_interlock, p)); - if (flags & LK_INTERLOCK) { VI_UNLOCK(vp); - flags &= ~LK_INTERLOCK; - } - if ((flags & LK_TYPE_MASK) == LK_DRAIN) { - error = VOP_LOCK(lvp, - (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE, p); - } else - error = VOP_LOCK(lvp, flags, p); - if (error) + return error; + } else { + /* + * To prevent race conditions involving doing a lookup + * on "..", we have to lock the lower node, then lock our + * node. Most of the time it won't matter that we lock our + * node (as any locking would need the lower one locked + * first). But we can LK_DRAIN the upper lock as a step + * towards decomissioning it. + */ + lvp = NULLVPTOLOWERVP(vp); + if (lvp == NULL) + return (lockmgr(&np->null_lock, flags, &vp->v_interlock, p)); + if (flags & LK_INTERLOCK) { + VI_UNLOCK(vp); + flags &= ~LK_INTERLOCK; + } + if ((flags & LK_TYPE_MASK) == LK_DRAIN) { + error = VOP_LOCK(lvp, + (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE, p); + } else + error = VOP_LOCK(lvp, flags, p); + if (error) + return (error); + error = lockmgr(&np->null_lock, flags, &vp->v_interlock, p); + if (error) + VOP_UNLOCK(lvp, 0, p); return (error); - error = lockmgr(&np->null_lock, flags, &vp->v_interlock, p); - if (error) - VOP_UNLOCK(lvp, 0, p); - return (error); + } } /* @@ -675,10 +744,9 @@ VI_UNLOCK(vp); flags &= ~LK_INTERLOCK; } - VOP_UNLOCK(lvp, flags, p); + VOP_UNLOCK(lvp, flags & ~LK_INTERLOCK, p); } else flags &= ~LK_THISLAYER; - ap->a_flags = flags; return (lockmgr(&np->null_lock, flags | LK_RELEASE, &vp->v_interlock, p)); } @@ -697,11 +765,15 @@ return (lockstatus(&VTONULL(vp)->null_lock, p)); } - /* * There is no way to tell that someone issued remove/rmdir operation * on the underlying filesystem. For now we just have to release lowevrp * as soon as possible. + * + * Note, we can't release any resources nor remove vnode from hash before + * appropriate VXLOCK stuff is is done because other process can find this + * vnode in hash during inactivation and may be sitting in vget() and waiting + * for null_inactive to unlock vnode. Thus we will do all those in VOP_RECLAIM. */ static int null_inactive(ap) @@ -712,32 +784,20 @@ { struct vnode *vp = ap->a_vp; struct proc *p = ap->a_p; - struct null_node *xp = VTONULL(vp); - struct vnode *lowervp = xp->null_lowervp; - lockmgr(&null_hashlock, LK_EXCLUSIVE, NULL, p); - LIST_REMOVE(xp, null_hash); - lockmgr(&null_hashlock, LK_RELEASE, NULL, p); - - xp->null_lowervp = NULLVP; - if (vp->v_vnlock != NULL) { - vp->v_vnlock = &xp->null_lock; /* we no longer share the lock */ - } else - VOP_UNLOCK(vp, LK_THISLAYER, p); + VOP_UNLOCK(vp, 0, p); - vput(lowervp); /* - * Now it is safe to drop references to the lower vnode. - * VOP_INACTIVE() will be called by vrele() if necessary. + * If this is the last reference, then free up the vnode + * so as not to tie up the lower vnodes. */ - vrele (lowervp); + vrecycle(vp, NULL, p); return (0); } /* - * We can free memory in null_inactive, but we do this - * here. (Possible to guard vp->v_data to point somewhere) + * Now, the VXLOCK is in force and we're free to destroy the null vnode. */ static int null_reclaim(ap) @@ -747,10 +807,18 @@ } */ *ap; { struct vnode *vp = ap->a_vp; - void *vdata = vp->v_data; + struct null_node *xp = VTONULL(vp); + struct vnode *lowervp = xp->null_lowervp; + + if (lowervp) { + null_hashrem(xp); + + vrele(lowervp); + vrele(lowervp); + } vp->v_data = NULL; - FREE(vdata, M_NULLFSNODE); + FREE(xp, M_NULLFSNODE); return (0); } @@ -834,7 +902,9 @@ vop_t **null_vnodeop_p; static struct vnodeopv_entry_desc null_vnodeop_entries[] = { { &vop_default_desc, (vop_t *) null_bypass }, + { &vop_access_desc, (vop_t *) null_access }, + { &vop_bmap_desc, (vop_t *) vop_eopnotsupp }, { &vop_createvobject_desc, (vop_t *) null_createvobject }, { &vop_destroyvobject_desc, (vop_t *) null_destroyvobject }, { &vop_getattr_desc, (vop_t *) null_getattr }, @@ -848,6 +918,7 @@ { &vop_reclaim_desc, (vop_t *) null_reclaim }, { &vop_rename_desc, (vop_t *) null_rename }, { &vop_setattr_desc, (vop_t *) null_setattr }, + { &vop_strategy_desc, (vop_t *) vop_eopnotsupp }, { &vop_unlock_desc, (vop_t *) null_unlock }, { NULL, NULL } };