From 102a0743326a03cd1a1202ceda21e175b7d3575c Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Tue, 20 Feb 2024 01:20:52 +0000 Subject: [PATCH] add new system file --- kernel/fs/nfsd/nfs4state.c | 1882 +++++++++++++++++++++++++++++++-------------------------- 1 files changed, 1,023 insertions(+), 859 deletions(-) diff --git a/kernel/fs/nfsd/nfs4state.c b/kernel/fs/nfsd/nfs4state.c index ed5429d..b045be7 100644 --- a/kernel/fs/nfsd/nfs4state.c +++ b/kernel/fs/nfsd/nfs4state.c @@ -42,6 +42,7 @@ #include <linux/sunrpc/svcauth_gss.h> #include <linux/sunrpc/addr.h> #include <linux/jhash.h> +#include <linux/string_helpers.h> #include "xdr4.h" #include "xdr4cb.h" #include "vfs.h" @@ -49,6 +50,8 @@ #include "netns.h" #include "pnfs.h" +#include "filecache.h" +#include "trace.h" #define NFSDDBG_FACILITY NFSDDBG_PROC @@ -77,6 +80,8 @@ /* forward declarations */ static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner); static void nfs4_free_ol_stateid(struct nfs4_stid *stid); +void nfsd4_end_grace(struct nfsd_net *nn); +static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps); /* Locking: */ @@ -97,6 +102,13 @@ * the refcount on the open stateid to drop. */ static DECLARE_WAIT_QUEUE_HEAD(close_wq); + +/* + * A waitqueue where a writer to clients/#/ctl destroying a client can + * wait for cl_rpc_users to drop to 0 and then for the client to be + * unhashed. + */ +static DECLARE_WAIT_QUEUE_HEAD(expiry_wq); static struct kmem_cache *client_slab; static struct kmem_cache *openowner_slab; @@ -137,7 +149,7 @@ if (is_client_expired(clp)) return nfserr_expired; - atomic_inc(&clp->cl_refcount); + atomic_inc(&clp->cl_rpc_users); return nfs_ok; } @@ -156,11 +168,8 @@ return; } - dprintk("renewing client (clientid %08x/%08x)\n", - clp->cl_clientid.cl_boot, - clp->cl_clientid.cl_id); list_move_tail(&clp->cl_lru, &nn->client_lru); - clp->cl_time = get_seconds(); + clp->cl_time = ktime_get_boottime_seconds(); } static void put_client_renew_locked(struct nfs4_client *clp) @@ -169,20 +178,24 @@ lockdep_assert_held(&nn->client_lock); - if (!atomic_dec_and_test(&clp->cl_refcount)) + if (!atomic_dec_and_test(&clp->cl_rpc_users)) return; if (!is_client_expired(clp)) renew_client_locked(clp); + else + wake_up_all(&expiry_wq); } static void put_client_renew(struct nfs4_client *clp) { struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); - if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock)) + if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock)) return; if (!is_client_expired(clp)) renew_client_locked(clp); + else + wake_up_all(&expiry_wq); spin_unlock(&nn->client_lock); } @@ -238,7 +251,7 @@ } spin_unlock(&nn->blocked_locks_lock); if (found) - posix_unblock_lock(&found->nbl_lock); + locks_delete_block(&found->nbl_lock); return found; } @@ -267,6 +280,7 @@ static void free_blocked_lock(struct nfsd4_blocked_lock *nbl) { + locks_delete_block(&nbl->nbl_lock); locks_release_private(&nbl->nbl_lock); kfree(nbl); } @@ -295,9 +309,16 @@ nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock, nbl_lru); list_del_init(&nbl->nbl_lru); - posix_unblock_lock(&nbl->nbl_lock); free_blocked_lock(nbl); } +} + +static void +nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb) +{ + struct nfsd4_blocked_lock *nbl = container_of(cb, + struct nfsd4_blocked_lock, nbl_cb); + locks_delete_block(&nbl->nbl_lock); } static int @@ -327,6 +348,7 @@ } static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = { + .prepare = nfsd4_cb_notify_lock_prepare, .done = nfsd4_cb_notify_lock_done, .release = nfsd4_cb_notify_lock_release, }; @@ -409,18 +431,18 @@ } } -static struct file * +static struct nfsd_file * __nfs4_get_fd(struct nfs4_file *f, int oflag) { if (f->fi_fds[oflag]) - return get_file(f->fi_fds[oflag]); + return nfsd_file_get(f->fi_fds[oflag]); return NULL; } -static struct file * +static struct nfsd_file * find_writeable_file_locked(struct nfs4_file *f) { - struct file *ret; + struct nfsd_file *ret; lockdep_assert_held(&f->fi_lock); @@ -430,10 +452,10 @@ return ret; } -static struct file * +static struct nfsd_file * find_writeable_file(struct nfs4_file *f) { - struct file *ret; + struct nfsd_file *ret; spin_lock(&f->fi_lock); ret = find_writeable_file_locked(f); @@ -442,9 +464,10 @@ return ret; } -static struct file *find_readable_file_locked(struct nfs4_file *f) +static struct nfsd_file * +find_readable_file_locked(struct nfs4_file *f) { - struct file *ret; + struct nfsd_file *ret; lockdep_assert_held(&f->fi_lock); @@ -454,10 +477,10 @@ return ret; } -static struct file * +static struct nfsd_file * find_readable_file(struct nfs4_file *f) { - struct file *ret; + struct nfsd_file *ret; spin_lock(&f->fi_lock); ret = find_readable_file_locked(f); @@ -466,10 +489,10 @@ return ret; } -struct file * +struct nfsd_file * find_any_file(struct nfs4_file *f) { - struct file *ret; + struct nfsd_file *ret; if (!f) return NULL; @@ -482,6 +505,28 @@ } spin_unlock(&f->fi_lock); return ret; +} + +static struct nfsd_file *find_any_file_locked(struct nfs4_file *f) +{ + lockdep_assert_held(&f->fi_lock); + + if (f->fi_fds[O_RDWR]) + return f->fi_fds[O_RDWR]; + if (f->fi_fds[O_WRONLY]) + return f->fi_fds[O_WRONLY]; + if (f->fi_fds[O_RDONLY]) + return f->fi_fds[O_RDONLY]; + return NULL; +} + +static struct nfsd_file *find_deleg_file_locked(struct nfs4_file *f) +{ + lockdep_assert_held(&f->fi_lock); + + if (f->fi_deleg_file) + return f->fi_deleg_file; + return NULL; } static atomic_long_t num_delegations; @@ -572,17 +617,17 @@ might_lock(&fp->fi_lock); if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) { - struct file *f1 = NULL; - struct file *f2 = NULL; + struct nfsd_file *f1 = NULL; + struct nfsd_file *f2 = NULL; swap(f1, fp->fi_fds[oflag]); if (atomic_read(&fp->fi_access[1 - oflag]) == 0) swap(f2, fp->fi_fds[O_RDWR]); spin_unlock(&fp->fi_lock); if (f1) - fput(f1); + nfsd_file_put(f1); if (f2) - fput(f2); + nfsd_file_put(f2); } } @@ -688,7 +733,8 @@ idr_preload(GFP_KERNEL); spin_lock(&cl->cl_lock); - new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 0, 0, GFP_NOWAIT); + /* Reserving 0 for start of file in nfsdfs "states" file: */ + new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT); spin_unlock(&cl->cl_lock); idr_preload_end(); if (new_id < 0) @@ -701,6 +747,7 @@ /* Will be incremented before return to client: */ refcount_set(&stid->sc_count, 1); spin_lock_init(&stid->sc_lock); + INIT_LIST_HEAD(&stid->sc_cp_list); /* * It shouldn't be a problem to reuse an opaque stateid value. @@ -717,6 +764,83 @@ return NULL; } +/* + * Create a unique stateid_t to represent each COPY. + */ +static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid, + unsigned char sc_type) +{ + int new_id; + + stid->stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time; + stid->stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id; + stid->sc_type = sc_type; + + idr_preload(GFP_KERNEL); + spin_lock(&nn->s2s_cp_lock); + new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT); + stid->stid.si_opaque.so_id = new_id; + stid->stid.si_generation = 1; + spin_unlock(&nn->s2s_cp_lock); + idr_preload_end(); + if (new_id < 0) + return 0; + return 1; +} + +int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy) +{ + return nfs4_init_cp_state(nn, ©->cp_stateid, NFS4_COPY_STID); +} + +struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn, + struct nfs4_stid *p_stid) +{ + struct nfs4_cpntf_state *cps; + + cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL); + if (!cps) + return NULL; + cps->cpntf_time = ktime_get_boottime_seconds(); + refcount_set(&cps->cp_stateid.sc_count, 1); + if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID)) + goto out_free; + spin_lock(&nn->s2s_cp_lock); + list_add(&cps->cp_list, &p_stid->sc_cp_list); + spin_unlock(&nn->s2s_cp_lock); + return cps; +out_free: + kfree(cps); + return NULL; +} + +void nfs4_free_copy_state(struct nfsd4_copy *copy) +{ + struct nfsd_net *nn; + + WARN_ON_ONCE(copy->cp_stateid.sc_type != NFS4_COPY_STID); + nn = net_generic(copy->cp_clp->net, nfsd_net_id); + spin_lock(&nn->s2s_cp_lock); + idr_remove(&nn->s2s_cp_stateids, + copy->cp_stateid.stid.si_opaque.so_id); + spin_unlock(&nn->s2s_cp_lock); +} + +static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid) +{ + struct nfs4_cpntf_state *cps; + struct nfsd_net *nn; + + nn = net_generic(net, nfsd_net_id); + spin_lock(&nn->s2s_cp_lock); + while (!list_empty(&stid->sc_cp_list)) { + cps = list_first_entry(&stid->sc_cp_list, + struct nfs4_cpntf_state, cp_list); + _free_cpntf_state_locked(nn, cps); + } + spin_unlock(&nn->s2s_cp_lock); +} + static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp) { struct nfs4_stid *stid; @@ -730,6 +854,7 @@ static void nfs4_free_deleg(struct nfs4_stid *stid) { + WARN_ON(!list_empty(&stid->sc_cp_list)); kmem_cache_free(deleg_slab, stid); atomic_long_dec(&num_delegations); } @@ -755,7 +880,7 @@ static DEFINE_SPINLOCK(blocked_delegations_lock); static struct bloom_pair { int entries, old_entries; - time_t swap_time; + time64_t swap_time; int new; /* index into 'set' */ DECLARE_BITMAP(set[2], 256); } blocked_delegations; @@ -767,15 +892,15 @@ if (bd->entries == 0) return 0; - if (seconds_since_boot() - bd->swap_time > 30) { + if (ktime_get_seconds() - bd->swap_time > 30) { spin_lock(&blocked_delegations_lock); - if (seconds_since_boot() - bd->swap_time > 30) { + if (ktime_get_seconds() - bd->swap_time > 30) { bd->entries -= bd->old_entries; bd->old_entries = bd->entries; memset(bd->set[bd->new], 0, sizeof(bd->set[0])); bd->new = 1-bd->new; - bd->swap_time = seconds_since_boot(); + bd->swap_time = ktime_get_seconds(); } spin_unlock(&blocked_delegations_lock); } @@ -805,7 +930,7 @@ __set_bit((hash>>8)&255, bd->set[bd->new]); __set_bit((hash>>16)&255, bd->set[bd->new]); if (bd->entries == 0) - bd->swap_time = seconds_since_boot(); + bd->swap_time = ktime_get_seconds(); bd->entries += 1; spin_unlock(&blocked_delegations_lock); } @@ -864,6 +989,7 @@ return; } idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); + nfs4_free_cpntf_statelist(clp->net, s); spin_unlock(&clp->cl_lock); s->sc_free(s); if (fp) @@ -884,25 +1010,25 @@ static void put_deleg_file(struct nfs4_file *fp) { - struct file *filp = NULL; + struct nfsd_file *nf = NULL; spin_lock(&fp->fi_lock); if (--fp->fi_delegees == 0) - swap(filp, fp->fi_deleg_file); + swap(nf, fp->fi_deleg_file); spin_unlock(&fp->fi_lock); - if (filp) - fput(filp); + if (nf) + nfsd_file_put(nf); } static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp) { struct nfs4_file *fp = dp->dl_stid.sc_file; - struct file *filp = fp->fi_deleg_file; + struct nfsd_file *nf = fp->fi_deleg_file; WARN_ON_ONCE(!fp->fi_delegees); - vfs_setlease(filp, F_UNLCK, NULL, (void **)&dp); + vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp); put_deleg_file(fp); } @@ -1019,9 +1145,9 @@ WARN_ON(!list_empty(&dp->dl_recall_lru)); if (clp->cl_minorversion) { + spin_lock(&clp->cl_lock); dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID; refcount_inc(&dp->dl_stid.sc_count); - spin_lock(&clp->cl_lock); list_add(&dp->dl_recall_lru, &clp->cl_revoked); spin_unlock(&clp->cl_lock); } @@ -1037,9 +1163,9 @@ return id & CLIENT_HASH_MASK; } -static unsigned int clientstr_hashval(const char *name) +static unsigned int clientstr_hashval(struct xdr_netobj name) { - return opaque_hashval(name, 8) & CLIENT_HASH_MASK; + return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK; } /* @@ -1244,6 +1370,7 @@ release_all_access(stp); if (stp->st_stateowner) nfs4_put_stateowner(stp->st_stateowner); + WARN_ON(!list_empty(&stid->sc_cp_list)); kmem_cache_free(stateid_slab, stid); } @@ -1251,11 +1378,14 @@ { struct nfs4_ol_stateid *stp = openlockstateid(stid); struct nfs4_lockowner *lo = lockowner(stp->st_stateowner); - struct file *file; + struct nfsd_file *nf; - file = find_any_file(stp->st_stid.sc_file); - if (file) - filp_close(file, (fl_owner_t)lo); + nf = find_any_file(stp->st_stid.sc_file); + if (nf) { + get_file(nf->nf_file); + filp_close(nf->nf_file, (fl_owner_t)lo); + nfsd_file_put(nf); + } nfs4_free_ol_stateid(stid); } @@ -1526,21 +1656,39 @@ * re-negotiate active sessions and reduce their slot usage to make * room for new connections. For now we just fail the create session. */ -static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca) +static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn) { u32 slotsize = slot_bytes(ca); u32 num = ca->maxreqs; unsigned long avail, total_avail; + unsigned int scale_factor; spin_lock(&nfsd_drc_lock); - total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used; + if (nfsd_drc_max_mem > nfsd_drc_mem_used) + total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used; + else + /* We have handed out more space than we chose in + * set_max_drc() to allow. That isn't really a + * problem as long as that doesn't make us think we + * have lots more due to integer overflow. + */ + total_avail = 0; avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail); /* - * Never use more than a third of the remaining memory, - * unless it's the only way to give this client a slot: + * Never use more than a fraction of the remaining memory, + * unless it's the only way to give this client a slot. + * The chosen fraction is either 1/8 or 1/number of threads, + * whichever is smaller. This ensures there are adequate + * slots to support multiple clients per thread. + * Give the client one slot even if that would require + * over-allocation--it is better than failure. */ - avail = clamp_t(unsigned long, avail, slotsize, total_avail/3); + scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads); + + avail = clamp_t(unsigned long, avail, slotsize, + total_avail/scale_factor); num = min_t(int, num, avail / slotsize); + num = max_t(int, num, 1); nfsd_drc_mem_used += num * slotsize; spin_unlock(&nfsd_drc_lock); @@ -1802,8 +1950,7 @@ */ if (clid->cl_boot == (u32)nn->boot_time) return 0; - dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n", - clid->cl_boot, clid->cl_id, nn->boot_time); + trace_nfsd_clid_stale(clid); return 1; } @@ -1820,7 +1967,7 @@ clp = kmem_cache_zalloc(client_slab, GFP_KERNEL); if (clp == NULL) return NULL; - clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL); + xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL); if (clp->cl_name.data == NULL) goto err_no_name; clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE, @@ -1830,10 +1977,9 @@ goto err_no_hashtbl; for (i = 0; i < OWNER_HASH_SIZE; i++) INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]); - clp->cl_name.len = name.len; INIT_LIST_HEAD(&clp->cl_sessions); idr_init(&clp->cl_stateids); - atomic_set(&clp->cl_refcount, 0); + atomic_set(&clp->cl_rpc_users, 0); clp->cl_cb_state = NFSD4_CB_UNKNOWN; INIT_LIST_HEAD(&clp->cl_idhash); INIT_LIST_HEAD(&clp->cl_openowners); @@ -1843,6 +1989,8 @@ #ifdef CONFIG_NFSD_PNFS INIT_LIST_HEAD(&clp->cl_lo_states); #endif + INIT_LIST_HEAD(&clp->async_copies); + spin_lock_init(&clp->async_lock); spin_lock_init(&clp->cl_lock); rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); return clp; @@ -1851,6 +1999,25 @@ err_no_name: kmem_cache_free(client_slab, clp); return NULL; +} + +static void __free_client(struct kref *k) +{ + struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref); + struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs); + + free_svc_cred(&clp->cl_cred); + kfree(clp->cl_ownerstr_hashtbl); + kfree(clp->cl_name.data); + kfree(clp->cl_nii_domain.data); + kfree(clp->cl_nii_name.data); + idr_destroy(&clp->cl_stateids); + kmem_cache_free(client_slab, clp); +} + +static void drop_client(struct nfs4_client *clp) +{ + kref_put(&clp->cl_nfsdfs.cl_ref, __free_client); } static void @@ -1865,11 +2032,12 @@ free_session(ses); } rpc_destroy_wait_queue(&clp->cl_cb_waitq); - free_svc_cred(&clp->cl_cred); - kfree(clp->cl_ownerstr_hashtbl); - kfree(clp->cl_name.data); - idr_destroy(&clp->cl_stateids); - kmem_cache_free(client_slab, clp); + if (clp->cl_nfsd_dentry) { + nfsd_client_rmdir(clp->cl_nfsd_dentry); + clp->cl_nfsd_dentry = NULL; + wake_up_all(&expiry_wq); + } + drop_client(clp); } /* must be called under the client_lock */ @@ -1910,7 +2078,7 @@ static __be32 mark_client_expired_locked(struct nfs4_client *clp) { - if (atomic_read(&clp->cl_refcount)) + if (atomic_read(&clp->cl_rpc_users)) return nfserr_jukebox; unhash_client_locked(clp); return nfs_ok; @@ -1958,10 +2126,12 @@ } } nfsd4_return_all_client_layouts(clp); + nfsd4_shutdown_copy(clp); nfsd4_shutdown_callback(clp); if (clp->cl_cb_conn.cb_xprt) svc_xprt_put(clp->cl_cb_conn.cb_xprt); free_client(clp); + wake_up_all(&expiry_wq); } static void @@ -1969,6 +2139,22 @@ { unhash_client(clp); __destroy_client(clp); +} + +static void inc_reclaim_complete(struct nfs4_client *clp) +{ + struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); + + if (!nn->track_reclaim_completes) + return; + if (!nfsd4_find_reclaim_client(clp->cl_name, nn)) + return; + if (atomic_inc_return(&nn->nr_reclaim_complete) == + nn->reclaim_str_hashtbl_size) { + printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n", + clp->net->ns.inum); + nfsd4_end_grace(nn); + } } static void expire_client(struct nfs4_client *clp) @@ -2020,11 +2206,6 @@ if (o1->len > o2->len) return 1; return memcmp(o1->data, o2->data, o1->len); -} - -static int same_name(const char *n1, const char *n2) -{ - return 0 == memcmp(n1, n2, HEXDIR_LEN); } static int @@ -2121,14 +2302,14 @@ * This is opaque to client, so no need to byte-swap. Use * __force to keep sparse happy */ - verf[0] = (__force __be32)get_seconds(); + verf[0] = (__force __be32)(u32)ktime_get_real_seconds(); verf[1] = (__force __be32)nn->clverifier_counter++; memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data)); } static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn) { - clp->cl_clientid.cl_boot = nn->boot_time; + clp->cl_clientid.cl_boot = (u32)nn->boot_time; clp->cl_clientid.cl_id = nn->clientid_counter++; gen_confirm(clp, nn); } @@ -2161,6 +2342,380 @@ return s; } +static struct nfs4_client *get_nfsdfs_clp(struct inode *inode) +{ + struct nfsdfs_client *nc; + nc = get_nfsdfs_client(inode); + if (!nc) + return NULL; + return container_of(nc, struct nfs4_client, cl_nfsdfs); +} + +static void seq_quote_mem(struct seq_file *m, char *data, int len) +{ + seq_printf(m, "\""); + seq_escape_mem_ascii(m, data, len); + seq_printf(m, "\""); +} + +static int client_info_show(struct seq_file *m, void *v) +{ + struct inode *inode = m->private; + struct nfs4_client *clp; + u64 clid; + + clp = get_nfsdfs_clp(inode); + if (!clp) + return -ENXIO; + memcpy(&clid, &clp->cl_clientid, sizeof(clid)); + seq_printf(m, "clientid: 0x%llx\n", clid); + seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr); + seq_printf(m, "name: "); + seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len); + seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion); + if (clp->cl_nii_domain.data) { + seq_printf(m, "Implementation domain: "); + seq_quote_mem(m, clp->cl_nii_domain.data, + clp->cl_nii_domain.len); + seq_printf(m, "\nImplementation name: "); + seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len); + seq_printf(m, "\nImplementation time: [%lld, %ld]\n", + clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec); + } + drop_client(clp); + + return 0; +} + +static int client_info_open(struct inode *inode, struct file *file) +{ + return single_open(file, client_info_show, inode); +} + +static const struct file_operations client_info_fops = { + .open = client_info_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void *states_start(struct seq_file *s, loff_t *pos) + __acquires(&clp->cl_lock) +{ + struct nfs4_client *clp = s->private; + unsigned long id = *pos; + void *ret; + + spin_lock(&clp->cl_lock); + ret = idr_get_next_ul(&clp->cl_stateids, &id); + *pos = id; + return ret; +} + +static void *states_next(struct seq_file *s, void *v, loff_t *pos) +{ + struct nfs4_client *clp = s->private; + unsigned long id = *pos; + void *ret; + + id = *pos; + id++; + ret = idr_get_next_ul(&clp->cl_stateids, &id); + *pos = id; + return ret; +} + +static void states_stop(struct seq_file *s, void *v) + __releases(&clp->cl_lock) +{ + struct nfs4_client *clp = s->private; + + spin_unlock(&clp->cl_lock); +} + +static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f) +{ + seq_printf(s, "filename: \"%pD2\"", f->nf_file); +} + +static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f) +{ + struct inode *inode = f->nf_inode; + + seq_printf(s, "superblock: \"%02x:%02x:%ld\"", + MAJOR(inode->i_sb->s_dev), + MINOR(inode->i_sb->s_dev), + inode->i_ino); +} + +static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo) +{ + seq_printf(s, "owner: "); + seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len); +} + +static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid) +{ + seq_printf(s, "0x%.8x", stid->si_generation); + seq_printf(s, "%12phN", &stid->si_opaque); +} + +static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st) +{ + struct nfs4_ol_stateid *ols; + struct nfs4_file *nf; + struct nfsd_file *file; + struct nfs4_stateowner *oo; + unsigned int access, deny; + + if (st->sc_type != NFS4_OPEN_STID && st->sc_type != NFS4_LOCK_STID) + return 0; /* XXX: or SEQ_SKIP? */ + ols = openlockstateid(st); + oo = ols->st_stateowner; + nf = st->sc_file; + + spin_lock(&nf->fi_lock); + file = find_any_file_locked(nf); + if (!file) + goto out; + + seq_printf(s, "- "); + nfs4_show_stateid(s, &st->sc_stateid); + seq_printf(s, ": { type: open, "); + + access = bmap_to_share_mode(ols->st_access_bmap); + deny = bmap_to_share_mode(ols->st_deny_bmap); + + seq_printf(s, "access: %s%s, ", + access & NFS4_SHARE_ACCESS_READ ? "r" : "-", + access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-"); + seq_printf(s, "deny: %s%s, ", + deny & NFS4_SHARE_ACCESS_READ ? "r" : "-", + deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-"); + + nfs4_show_superblock(s, file); + seq_printf(s, ", "); + nfs4_show_fname(s, file); + seq_printf(s, ", "); + nfs4_show_owner(s, oo); + seq_printf(s, " }\n"); +out: + spin_unlock(&nf->fi_lock); + return 0; +} + +static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st) +{ + struct nfs4_ol_stateid *ols; + struct nfs4_file *nf; + struct nfsd_file *file; + struct nfs4_stateowner *oo; + + ols = openlockstateid(st); + oo = ols->st_stateowner; + nf = st->sc_file; + spin_lock(&nf->fi_lock); + file = find_any_file_locked(nf); + if (!file) + goto out; + + seq_printf(s, "- "); + nfs4_show_stateid(s, &st->sc_stateid); + seq_printf(s, ": { type: lock, "); + + /* + * Note: a lock stateid isn't really the same thing as a lock, + * it's the locking state held by one owner on a file, and there + * may be multiple (or no) lock ranges associated with it. + * (Same for the matter is true of open stateids.) + */ + + nfs4_show_superblock(s, file); + /* XXX: open stateid? */ + seq_printf(s, ", "); + nfs4_show_fname(s, file); + seq_printf(s, ", "); + nfs4_show_owner(s, oo); + seq_printf(s, " }\n"); +out: + spin_unlock(&nf->fi_lock); + return 0; +} + +static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st) +{ + struct nfs4_delegation *ds; + struct nfs4_file *nf; + struct nfsd_file *file; + + ds = delegstateid(st); + nf = st->sc_file; + spin_lock(&nf->fi_lock); + file = find_deleg_file_locked(nf); + if (!file) + goto out; + + seq_printf(s, "- "); + nfs4_show_stateid(s, &st->sc_stateid); + seq_printf(s, ": { type: deleg, "); + + /* Kinda dead code as long as we only support read delegs: */ + seq_printf(s, "access: %s, ", + ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w"); + + /* XXX: lease time, whether it's being recalled. */ + + nfs4_show_superblock(s, file); + seq_printf(s, ", "); + nfs4_show_fname(s, file); + seq_printf(s, " }\n"); +out: + spin_unlock(&nf->fi_lock); + return 0; +} + +static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st) +{ + struct nfs4_layout_stateid *ls; + struct nfsd_file *file; + + ls = container_of(st, struct nfs4_layout_stateid, ls_stid); + file = ls->ls_file; + + seq_printf(s, "- "); + nfs4_show_stateid(s, &st->sc_stateid); + seq_printf(s, ": { type: layout, "); + + /* XXX: What else would be useful? */ + + nfs4_show_superblock(s, file); + seq_printf(s, ", "); + nfs4_show_fname(s, file); + seq_printf(s, " }\n"); + + return 0; +} + +static int states_show(struct seq_file *s, void *v) +{ + struct nfs4_stid *st = v; + + switch (st->sc_type) { + case NFS4_OPEN_STID: + return nfs4_show_open(s, st); + case NFS4_LOCK_STID: + return nfs4_show_lock(s, st); + case NFS4_DELEG_STID: + return nfs4_show_deleg(s, st); + case NFS4_LAYOUT_STID: + return nfs4_show_layout(s, st); + default: + return 0; /* XXX: or SEQ_SKIP? */ + } + /* XXX: copy stateids? */ +} + +static struct seq_operations states_seq_ops = { + .start = states_start, + .next = states_next, + .stop = states_stop, + .show = states_show +}; + +static int client_states_open(struct inode *inode, struct file *file) +{ + struct seq_file *s; + struct nfs4_client *clp; + int ret; + + clp = get_nfsdfs_clp(inode); + if (!clp) + return -ENXIO; + + ret = seq_open(file, &states_seq_ops); + if (ret) + return ret; + s = file->private_data; + s->private = clp; + return 0; +} + +static int client_opens_release(struct inode *inode, struct file *file) +{ + struct seq_file *m = file->private_data; + struct nfs4_client *clp = m->private; + + /* XXX: alternatively, we could get/drop in seq start/stop */ + drop_client(clp); + return 0; +} + +static const struct file_operations client_states_fops = { + .open = client_states_open, + .read = seq_read, + .llseek = seq_lseek, + .release = client_opens_release, +}; + +/* + * Normally we refuse to destroy clients that are in use, but here the + * administrator is telling us to just do it. We also want to wait + * so the caller has a guarantee that the client's locks are gone by + * the time the write returns: + */ +static void force_expire_client(struct nfs4_client *clp) +{ + struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); + bool already_expired; + + spin_lock(&nn->client_lock); + clp->cl_time = 0; + spin_unlock(&nn->client_lock); + + wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0); + spin_lock(&nn->client_lock); + already_expired = list_empty(&clp->cl_lru); + if (!already_expired) + unhash_client_locked(clp); + spin_unlock(&nn->client_lock); + + if (!already_expired) + expire_client(clp); + else + wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL); +} + +static ssize_t client_ctl_write(struct file *file, const char __user *buf, + size_t size, loff_t *pos) +{ + char *data; + struct nfs4_client *clp; + + data = simple_transaction_get(file, buf, size); + if (IS_ERR(data)) + return PTR_ERR(data); + if (size != 7 || 0 != memcmp(data, "expire\n", 7)) + return -EINVAL; + clp = get_nfsdfs_clp(file_inode(file)); + if (!clp) + return -ENXIO; + force_expire_client(clp); + drop_client(clp); + return 7; +} + +static const struct file_operations client_ctl_fops = { + .write = client_ctl_write, + .release = simple_transaction_release, +}; + +static const struct tree_descr client_files[] = { + [0] = {"info", &client_info_fops, S_IRUSR}, + [1] = {"states", &client_states_fops, S_IRUSR}, + [2] = {"ctl", &client_ctl_fops, S_IWUSR}, + [3] = {""}, +}; + static struct nfs4_client *create_client(struct xdr_netobj name, struct svc_rqst *rqstp, nfs4_verifier *verf) { @@ -2168,6 +2723,7 @@ struct sockaddr *sa = svc_addr(rqstp); int ret; struct net *net = SVC_NET(rqstp); + struct nfsd_net *nn = net_generic(net, nfsd_net_id); clp = alloc_client(name); if (clp == NULL) @@ -2178,13 +2734,22 @@ free_client(clp); return NULL; } + gen_clid(clp, nn); + kref_init(&clp->cl_nfsdfs.cl_ref); nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL); - clp->cl_time = get_seconds(); + clp->cl_time = ktime_get_boottime_seconds(); clear_bit(0, &clp->cl_cb_slot_busy); copy_verf(clp, verf); - rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa); + memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage)); clp->cl_cb_session = NULL; clp->net = net; + clp->cl_nfsd_dentry = nfsd_client_mkdir(nn, &clp->cl_nfsdfs, + clp->cl_clientid.cl_id - nn->clientid_base, + client_files); + if (!clp->cl_nfsd_dentry) { + free_client(clp); + return NULL; + } return clp; } @@ -2345,14 +2910,12 @@ conn->cb_prog = se->se_callback_prog; conn->cb_ident = se->se_callback_ident; memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen); + trace_nfsd_cb_args(clp, conn); return; out_err: conn->cb_addr.ss_family = AF_UNSPEC; conn->cb_addrlen = 0; - dprintk("NFSD: this client (clientid %08x/%08x) " - "will not receive delegations\n", - clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id); - + trace_nfsd_cb_nodelegs(clp); return; } @@ -2491,7 +3054,23 @@ || !list_empty(&clp->cl_lo_states) #endif || !list_empty(&clp->cl_delegations) - || !list_empty(&clp->cl_sessions); + || !list_empty(&clp->cl_sessions) + || !list_empty(&clp->async_copies); +} + +static __be32 copy_impl_id(struct nfs4_client *clp, + struct nfsd4_exchange_id *exid) +{ + if (!exid->nii_domain.data) + return 0; + xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL); + if (!clp->cl_nii_domain.data) + return nfserr_jukebox; + xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL); + if (!clp->cl_nii_name.data) + return nfserr_jukebox; + clp->cl_nii_time = exid->nii_time; + return 0; } __be32 @@ -2520,6 +3099,9 @@ new = create_client(exid->clname, rqstp, &verf); if (new == NULL) return nfserr_jukebox; + status = copy_impl_id(new, exid); + if (status) + goto out_nolock; switch (exid->spa_how) { case SP4_MACH_CRED: @@ -2558,6 +3140,7 @@ break; default: /* checked by xdr code */ WARN_ON_ONCE(1); + fallthrough; case SP4_SSV: status = nfserr_encr_alg_unsupp; goto out_nolock; @@ -2627,7 +3210,6 @@ new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0]; new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1]; - gen_clid(new, nn); add_to_unconfirmed(new); swap(new, conf); out_copy: @@ -2732,10 +3314,10 @@ * performance. When short on memory we therefore prefer to * decrease number of slots instead of their size. Clients that * request larger slots than they need will get poor results: + * Note that we always allow at least one slot, because our + * accounting is soft and provides no guarantees either way. */ - ca->maxreqs = nfsd4_get_drc_mem(ca); - if (!ca->maxreqs) - return nfserr_jukebox; + ca->maxreqs = nfsd4_get_drc_mem(ca, nn); return nfs_ok; } @@ -2913,7 +3495,7 @@ case NFS4_CDFC4_BACK_OR_BOTH: *dir = NFS4_CDFC4_BOTH; return nfs_ok; - }; + } return nfserr_inval; } @@ -2939,6 +3521,47 @@ return nfs_ok; } +static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s) +{ + struct nfsd4_conn *c; + + list_for_each_entry(c, &s->se_conns, cn_persession) { + if (c->cn_xprt == xpt) { + return c; + } + } + return NULL; +} + +static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst, + struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn) +{ + struct nfs4_client *clp = session->se_client; + struct svc_xprt *xpt = rqst->rq_xprt; + struct nfsd4_conn *c; + __be32 status; + + /* Following the last paragraph of RFC 5661 Section 18.34.3: */ + spin_lock(&clp->cl_lock); + c = __nfsd4_find_conn(xpt, session); + if (!c) + status = nfserr_noent; + else if (req == c->cn_flags) + status = nfs_ok; + else if (req == NFS4_CDFC4_FORE_OR_BOTH && + c->cn_flags != NFS4_CDFC4_BACK) + status = nfs_ok; + else if (req == NFS4_CDFC4_BACK_OR_BOTH && + c->cn_flags != NFS4_CDFC4_FORE) + status = nfs_ok; + else + status = nfserr_inval; + spin_unlock(&clp->cl_lock); + if (status == nfs_ok && conn) + *conn = c; + return status; +} + __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) @@ -2959,6 +3582,17 @@ goto out_no_session; status = nfserr_wrong_cred; if (!nfsd4_mach_creds_match(session->se_client, rqstp)) + goto out; + status = nfsd4_match_existing_connection(rqstp, session, + bcts->dir, &conn); + if (status == nfs_ok) { + if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH || + bcts->dir == NFS4_CDFC4_BACK) + conn->cn_flags |= NFS4_CDFC4_BACK; + nfsd4_probe_callback(session->se_client); + goto out; + } + if (status == nfserr_inval) goto out; status = nfsd4_map_bcts_dir(&bcts->dir); if (status) @@ -3023,18 +3657,6 @@ spin_unlock(&nn->client_lock); out: return status; -} - -static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s) -{ - struct nfsd4_conn *c; - - list_for_each_entry(c, &s->se_conns, cn_persession) { - if (c->cn_xprt == xpt) { - return c; - } - } - return NULL; } static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses) @@ -3331,6 +3953,7 @@ status = nfs_ok; nfsd4_client_record_create(cstate->session->se_client); + inc_reclaim_complete(cstate->session->se_client); out: return status; } @@ -3359,23 +3982,18 @@ if (clp_used_exchangeid(conf)) goto out; if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { - char addr_str[INET6_ADDRSTRLEN]; - rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str, - sizeof(addr_str)); - dprintk("NFSD: setclientid: string in use by client " - "at %s\n", addr_str); + trace_nfsd_clid_inuse_err(conf); goto out; } } unconf = find_unconfirmed_client_by_name(&clname, nn); if (unconf) unhash_client_locked(unconf); + /* We need to handle only case 1: probable callback update */ if (conf && same_verf(&conf->cl_verifier, &clverifier)) { - /* case 1: probable callback update */ copy_clid(new, conf); gen_confirm(new, nn); - } else /* case 4 (new client) or cases 2, 3 (client reboot): */ - gen_clid(new, nn); + } new->cl_minorversion = 0; gen_callback(new, setclid, rqstp); add_to_unconfirmed(new); @@ -3558,7 +4176,6 @@ out_free_client_slab: kmem_cache_destroy(client_slab); out: - dprintk("nfsd4: out of memory while initializing nfsv4\n"); return -ENOMEM; } @@ -3598,12 +4215,11 @@ if (!sop) return NULL; - sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL); + xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL); if (!sop->so_owner.data) { kmem_cache_free(slab, sop); return NULL; } - sop->so_owner.len = owner->len; INIT_LIST_HEAD(&sop->so_stateids); sop->so_client = clp; @@ -3825,7 +4441,7 @@ last = oo->oo_last_closed_stid; oo->oo_last_closed_stid = s; list_move_tail(&oo->oo_close_lru, &nn->close_lru); - oo->oo_time = get_seconds(); + oo->oo_time = ktime_get_boottime_seconds(); spin_unlock(&nn->client_lock); if (last) nfs4_put_stid(&last->st_stid); @@ -3837,7 +4453,8 @@ { struct nfs4_file *fp; - hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) { + hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash, + lockdep_is_held(&state_lock)) { if (fh_match(&fp->fi_fhandle, fh)) { if (refcount_inc_not_zero(&fp->fi_ref)) return fp; @@ -3920,7 +4537,7 @@ */ spin_lock(&state_lock); if (delegation_hashed(dp) && dp->dl_time == 0) { - dp->dl_time = get_seconds(); + dp->dl_time = ktime_get_boottime_seconds(); list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru); } spin_unlock(&state_lock); @@ -3931,12 +4548,16 @@ { struct nfs4_delegation *dp = cb_to_delegation(cb); - if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID) + if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID || + dp->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) return 1; switch (task->tk_status) { case 0: return 1; + case -NFS4ERR_DELAY: + rpc_delay(task, 2 * HZ); + return 0; case -EBADHANDLE: case -NFS4ERR_BAD_STATEID: /* @@ -3947,9 +4568,9 @@ rpc_delay(task, 2 * HZ); return 0; } - /*FALLTHRU*/ + fallthrough; default: - return -1; + return 1; } } @@ -3987,6 +4608,8 @@ struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner; struct nfs4_file *fp = dp->dl_stid.sc_file; + trace_nfsd_deleg_break(&dp->dl_stid.sc_stateid); + /* * We don't want the locks code to timeout the lease for us; * we'll remove it ourself if a delegation isn't returned @@ -4001,6 +4624,30 @@ return ret; } +/** + * nfsd_breaker_owns_lease - Check if lease conflict was resolved + * @fl: Lock state to check + * + * Return values: + * %true: Lease conflict was resolved + * %false: Lease conflict was not resolved. + */ +static bool nfsd_breaker_owns_lease(struct file_lock *fl) +{ + struct nfs4_delegation *dl = fl->fl_owner; + struct svc_rqst *rqst; + struct nfs4_client *clp; + + if (!i_am_nfsd()) + return false; + rqst = kthread_data(current); + /* Note rq_prog == NFS_ACL_PROGRAM is also possible: */ + if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4) + return false; + clp = *(rqst->rq_lease_breaker); + return dl->dl_stid.sc_client == clp; +} + static int nfsd_change_deleg_cb(struct file_lock *onlist, int arg, struct list_head *dispose) @@ -4012,6 +4659,7 @@ } static const struct lock_manager_operations nfsd_lease_mng_ops = { + .lm_breaker_owns_lease = nfsd_breaker_owns_lease, .lm_break = nfsd_break_deleg_cb, .lm_change = nfsd_change_deleg_cb, }; @@ -4029,7 +4677,8 @@ static __be32 lookup_clientid(clientid_t *clid, struct nfsd4_compound_state *cstate, - struct nfsd_net *nn) + struct nfsd_net *nn, + bool sessions) { struct nfs4_client *found; @@ -4050,12 +4699,12 @@ */ WARN_ON_ONCE(cstate->session); spin_lock(&nn->client_lock); - found = find_confirmed_client(clid, false, nn); + found = find_confirmed_client(clid, sessions, nn); if (!found) { spin_unlock(&nn->client_lock); return nfserr_expired; } - atomic_inc(&found->cl_refcount); + atomic_inc(&found->cl_rpc_users); spin_unlock(&nn->client_lock); /* Cache the nfs4_client in cstate! */ @@ -4083,7 +4732,7 @@ if (open->op_file == NULL) return nfserr_jukebox; - status = lookup_clientid(clientid, cstate, nn); + status = lookup_clientid(clientid, cstate, nn, false); if (status) return status; clp = cstate->clp; @@ -4211,14 +4860,14 @@ return 0; if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE)) return nfserr_inval; - return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0); + return nfsd_setattr(rqstp, fh, &iattr, 0, (time64_t)0); } static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open) { - struct file *filp = NULL; + struct nfsd_file *nf = NULL; __be32 status; int oflag = nfs4_access_to_omode(open->op_share_access); int access = nfs4_access_to_access(open->op_share_access); @@ -4254,18 +4903,23 @@ if (!fp->fi_fds[oflag]) { spin_unlock(&fp->fi_lock); - status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp); + status = nfsd_file_acquire(rqstp, cur_fh, access, &nf); if (status) goto out_put_access; spin_lock(&fp->fi_lock); if (!fp->fi_fds[oflag]) { - fp->fi_fds[oflag] = filp; - filp = NULL; + fp->fi_fds[oflag] = nf; + nf = NULL; } } spin_unlock(&fp->fi_lock); - if (filp) - fput(filp); + if (nf) + nfsd_file_put(nf); + + status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode, + access)); + if (status) + goto out_put_access; status = nfsd4_truncate(rqstp, cur_fh, open); if (status) @@ -4334,7 +4988,7 @@ fl->fl_end = OFFSET_MAX; fl->fl_owner = (fl_owner_t)dp; fl->fl_pid = current->tgid; - fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file; + fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file; return fl; } @@ -4344,7 +4998,7 @@ { int status = 0; struct nfs4_delegation *dp; - struct file *filp; + struct nfsd_file *nf; struct file_lock *fl; /* @@ -4355,8 +5009,8 @@ if (fp->fi_had_conflict) return ERR_PTR(-EAGAIN); - filp = find_readable_file(fp); - if (!filp) { + nf = find_readable_file(fp); + if (!nf) { /* We should always have a readable file here */ WARN_ON_ONCE(1); return ERR_PTR(-EBADF); @@ -4366,17 +5020,17 @@ if (nfs4_delegation_exists(clp, fp)) status = -EAGAIN; else if (!fp->fi_deleg_file) { - fp->fi_deleg_file = filp; + fp->fi_deleg_file = nf; /* increment early to prevent fi_deleg_file from being * cleared */ fp->fi_delegees = 1; - filp = NULL; + nf = NULL; } else fp->fi_delegees++; spin_unlock(&fp->fi_lock); spin_unlock(&state_lock); - if (filp) - fput(filp); + if (nf) + nfsd_file_put(nf); if (status) return ERR_PTR(status); @@ -4389,7 +5043,7 @@ if (!fl) goto out_clnt_odstate; - status = vfs_setlease(fp->fi_deleg_file, fl->fl_type, &fl, NULL); + status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL); if (fl) locks_free_lock(fl); if (status) @@ -4409,7 +5063,7 @@ return dp; out_unlock: - vfs_setlease(fp->fi_deleg_file, F_UNLCK, NULL, (void **)&dp); + vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp); out_clnt_odstate: put_clnt_odstate(dp->dl_clnt_odstate); nfs4_put_stid(&dp->dl_stid); @@ -4496,8 +5150,7 @@ memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid)); - dprintk("NFSD: delegation stateid=" STATEID_FMT "\n", - STATEID_VAL(&dp->dl_stid.sc_stateid)); + trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid); open->op_delegate_type = NFS4_OPEN_DELEGATE_READ; nfs4_put_stid(&dp->dl_stid); return; @@ -4614,9 +5267,7 @@ nfs4_open_delegation(current_fh, open, stp); nodeleg: status = nfs_ok; - - dprintk("%s: stateid=" STATEID_FMT "\n", __func__, - STATEID_VAL(&stp->st_stid.sc_stateid)); + trace_nfsd_open(&stp->st_stid.sc_stateid); out: /* 4.1 client trying to upgrade/downgrade delegation? */ if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp && @@ -4670,9 +5321,8 @@ __be32 status; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); - dprintk("process_renew(%08x/%08x): starting\n", - clid->cl_boot, clid->cl_id); - status = lookup_clientid(clid, cstate, nn); + trace_nfsd_clid_renew(clid); + status = lookup_clientid(clid, cstate, nn, false); if (status) goto out; clp = cstate->clp; @@ -4692,7 +5342,7 @@ if (nn->grace_ended) return; - dprintk("NFSD: end of grace period\n"); + trace_nfsd_grace_complete(nn); nn->grace_ended = true; /* * If the server goes down again right now, an NFSv4 @@ -4724,10 +5374,13 @@ */ static bool clients_still_reclaiming(struct nfsd_net *nn) { - unsigned long now = get_seconds(); - unsigned long double_grace_period_end = nn->boot_time + - 2 * nn->nfsd4_lease; + time64_t double_grace_period_end = nn->boot_time + + 2 * nn->nfsd4_lease; + if (nn->track_reclaim_completes && + atomic_read(&nn->nr_reclaim_complete) == + nn->reclaim_str_hashtbl_size) + return false; if (!nn->somebody_reclaimed) return false; nn->somebody_reclaimed = false; @@ -4735,12 +5388,12 @@ * If we've given them *two* lease times to reclaim, and they're * still not done, give up: */ - if (time_after(now, double_grace_period_end)) + if (ktime_get_boottime_seconds() > double_grace_period_end) return false; return true; } -static time_t +static time64_t nfs4_laundromat(struct nfsd_net *nn) { struct nfs4_client *clp; @@ -4749,10 +5402,11 @@ struct nfs4_ol_stateid *stp; struct nfsd4_blocked_lock *nbl; struct list_head *pos, *next, reaplist; - time_t cutoff = get_seconds() - nn->nfsd4_lease; - time_t t, new_timeo = nn->nfsd4_lease; - - dprintk("NFSD: laundromat service - starting\n"); + time64_t cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease; + time64_t t, new_timeo = nn->nfsd4_lease; + struct nfs4_cpntf_state *cps; + copy_stateid_t *cps_t; + int i; if (clients_still_reclaiming(nn)) { new_timeo = 0; @@ -4760,17 +5414,26 @@ } nfsd4_end_grace(nn); INIT_LIST_HEAD(&reaplist); + + spin_lock(&nn->s2s_cp_lock); + idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) { + cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid); + if (cps->cp_stateid.sc_type == NFS4_COPYNOTIFY_STID && + cps->cpntf_time < cutoff) + _free_cpntf_state_locked(nn, cps); + } + spin_unlock(&nn->s2s_cp_lock); + spin_lock(&nn->client_lock); list_for_each_safe(pos, next, &nn->client_lru) { clp = list_entry(pos, struct nfs4_client, cl_lru); - if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) { + if (clp->cl_time > cutoff) { t = clp->cl_time - cutoff; new_timeo = min(new_timeo, t); break; } if (mark_client_expired_locked(clp)) { - dprintk("NFSD: client in use (clientid %08x)\n", - clp->cl_clientid.cl_id); + trace_nfsd_clid_expired(&clp->cl_clientid); continue; } list_add(&clp->cl_lru, &reaplist); @@ -4778,15 +5441,14 @@ spin_unlock(&nn->client_lock); list_for_each_safe(pos, next, &reaplist) { clp = list_entry(pos, struct nfs4_client, cl_lru); - dprintk("NFSD: purging unused client (clientid %08x)\n", - clp->cl_clientid.cl_id); + trace_nfsd_clid_purged(&clp->cl_clientid); list_del_init(&clp->cl_lru); expire_client(clp); } spin_lock(&state_lock); list_for_each_safe(pos, next, &nn->del_recall_lru) { dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); - if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) { + if (dp->dl_time > cutoff) { t = dp->dl_time - cutoff; new_timeo = min(new_timeo, t); break; @@ -4806,8 +5468,7 @@ while (!list_empty(&nn->close_lru)) { oo = list_first_entry(&nn->close_lru, struct nfs4_openowner, oo_close_lru); - if (time_after((unsigned long)oo->oo_time, - (unsigned long)cutoff)) { + if (oo->oo_time > cutoff) { t = oo->oo_time - cutoff; new_timeo = min(new_timeo, t); break; @@ -4837,8 +5498,7 @@ while (!list_empty(&nn->blocked_locks_lru)) { nbl = list_first_entry(&nn->blocked_locks_lru, struct nfsd4_blocked_lock, nbl_lru); - if (time_after((unsigned long)nbl->nbl_time, - (unsigned long)cutoff)) { + if (nbl->nbl_time > cutoff) { t = nbl->nbl_time - cutoff; new_timeo = min(new_timeo, t); break; @@ -4852,11 +5512,10 @@ nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock, nbl_lru); list_del_init(&nbl->nbl_lru); - posix_unblock_lock(&nbl->nbl_lock); free_blocked_lock(nbl); } out: - new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT); + new_timeo = max_t(time64_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT); return new_timeo; } @@ -4866,13 +5525,12 @@ static void laundromat_main(struct work_struct *laundry) { - time_t t; + time64_t t; struct delayed_work *dwork = to_delayed_work(laundry); struct nfsd_net *nn = container_of(dwork, struct nfsd_net, laundromat_work); t = nfs4_laundromat(nn); - dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t); queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ); } @@ -4998,15 +5656,6 @@ if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) || CLOSE_STATEID(stateid)) return status; - /* Client debugging aid. */ - if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) { - char addr_str[INET6_ADDRSTRLEN]; - rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str, - sizeof(addr_str)); - pr_warn_ratelimited("NFSD: client %s testing state ID " - "with incorrect client ID\n", addr_str); - return status; - } spin_lock(&cl->cl_lock); s = find_stateid_locked(cl, stateid); if (!s) @@ -5027,7 +5676,7 @@ break; default: printk("unknown stateid type %x\n", s->sc_type); - /* Fallthrough */ + fallthrough; case NFS4_CLOSED_STID: case NFS4_CLOSED_DELEG_STID: status = nfserr_bad_stateid; @@ -5057,7 +5706,8 @@ if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) || CLOSE_STATEID(stateid)) return nfserr_bad_stateid; - status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn); + status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn, + false); if (status == nfserr_stale_clientid) { if (cstate->session) return nfserr_bad_stateid; @@ -5077,7 +5727,7 @@ return nfs_ok; } -static struct file * +static struct nfsd_file * nfs4_find_file(struct nfs4_stid *s, int flags) { if (!s) @@ -5087,21 +5737,20 @@ case NFS4_DELEG_STID: if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file)) return NULL; - return get_file(s->sc_file->fi_deleg_file); + return nfsd_file_get(s->sc_file->fi_deleg_file); case NFS4_OPEN_STID: case NFS4_LOCK_STID: if (flags & RD_STATE) return find_readable_file(s->sc_file); else return find_writeable_file(s->sc_file); - break; } return NULL; } static __be32 -nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags) +nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags) { __be32 status; @@ -5113,32 +5762,107 @@ static __be32 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s, - struct file **filpp, bool *tmp_file, int flags) + struct nfsd_file **nfp, int flags) { int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE; - struct file *file; + struct nfsd_file *nf; __be32 status; - file = nfs4_find_file(s, flags); - if (file) { + nf = nfs4_find_file(s, flags); + if (nf) { status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry, acc | NFSD_MAY_OWNER_OVERRIDE); if (status) { - fput(file); - return status; + nfsd_file_put(nf); + goto out; } - - *filpp = file; } else { - status = nfsd_open(rqstp, fhp, S_IFREG, acc, filpp); + status = nfsd_file_acquire(rqstp, fhp, acc, &nf); if (status) return status; - - if (tmp_file) - *tmp_file = true; } + *nfp = nf; +out: + return status; +} +static void +_free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps) +{ + WARN_ON_ONCE(cps->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID); + if (!refcount_dec_and_test(&cps->cp_stateid.sc_count)) + return; + list_del(&cps->cp_list); + idr_remove(&nn->s2s_cp_stateids, + cps->cp_stateid.stid.si_opaque.so_id); + kfree(cps); +} +/* + * A READ from an inter server to server COPY will have a + * copy stateid. Look up the copy notify stateid from the + * idr structure and take a reference on it. + */ +__be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st, + struct nfs4_client *clp, + struct nfs4_cpntf_state **cps) +{ + copy_stateid_t *cps_t; + struct nfs4_cpntf_state *state = NULL; + if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id) + return nfserr_bad_stateid; + spin_lock(&nn->s2s_cp_lock); + cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id); + if (cps_t) { + state = container_of(cps_t, struct nfs4_cpntf_state, + cp_stateid); + if (state->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID) { + state = NULL; + goto unlock; + } + if (!clp) + refcount_inc(&state->cp_stateid.sc_count); + else + _free_cpntf_state_locked(nn, state); + } +unlock: + spin_unlock(&nn->s2s_cp_lock); + if (!state) + return nfserr_bad_stateid; + if (!clp && state) + *cps = state; return 0; +} + +static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st, + struct nfs4_stid **stid) +{ + __be32 status; + struct nfs4_cpntf_state *cps = NULL; + struct nfsd4_compound_state cstate; + + status = manage_cpntf_state(nn, st, NULL, &cps); + if (status) + return status; + + cps->cpntf_time = ktime_get_boottime_seconds(); + memset(&cstate, 0, sizeof(cstate)); + status = lookup_clientid(&cps->cp_p_clid, &cstate, nn, true); + if (status) + goto out; + status = nfsd4_lookup_stateid(&cstate, &cps->cp_p_stateid, + NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, + stid, nn); + put_client_renew(cstate.clp); +out: + nfs4_put_cpntf_state(nn, cps); + return status; +} + +void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps) +{ + spin_lock(&nn->s2s_cp_lock); + _free_cpntf_state_locked(nn, cps); + spin_unlock(&nn->s2s_cp_lock); } /* @@ -5147,7 +5871,8 @@ __be32 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct svc_fh *fhp, - stateid_t *stateid, int flags, struct file **filpp, bool *tmp_file) + stateid_t *stateid, int flags, struct nfsd_file **nfp, + struct nfs4_stid **cstid) { struct inode *ino = d_inode(fhp->fh_dentry); struct net *net = SVC_NET(rqstp); @@ -5155,10 +5880,8 @@ struct nfs4_stid *s = NULL; __be32 status; - if (filpp) - *filpp = NULL; - if (tmp_file) - *tmp_file = false; + if (nfp) + *nfp = NULL; if (grace_disallows_io(net, ino)) return nfserr_grace; @@ -5171,6 +5894,8 @@ status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, &s, nn); + if (status == nfserr_bad_stateid) + status = find_cpntf_state(nn, stateid, &s); if (status) return status; status = nfsd4_stid_check_stateid_generation(stateid, s, @@ -5184,7 +5909,7 @@ break; case NFS4_OPEN_STID: case NFS4_LOCK_STID: - status = nfs4_check_olstateid(fhp, openlockstateid(s), flags); + status = nfs4_check_olstateid(openlockstateid(s), flags); break; default: status = nfserr_bad_stateid; @@ -5195,11 +5920,15 @@ status = nfs4_check_fh(fhp, s); done: - if (!status && filpp) - status = nfs4_check_file(rqstp, fhp, s, filpp, tmp_file, flags); + if (status == nfs_ok && nfp) + status = nfs4_check_file(rqstp, fhp, s, nfp, flags); out: - if (s) - nfs4_put_stid(s); + if (s) { + if (!status && cstid) + *cstid = s; + else + nfs4_put_stid(s); + } return status; } @@ -5339,8 +6068,7 @@ struct nfs4_stid *s; struct nfs4_ol_stateid *stp = NULL; - dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__, - seqid, STATEID_VAL(stateid)); + trace_nfsd_preprocess(seqid, stateid); *stpp = NULL; status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn); @@ -5409,9 +6137,7 @@ oo->oo_flags |= NFS4_OO_CONFIRMED; nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid); mutex_unlock(&stp->st_mutex); - dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n", - __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid)); - + trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid); nfsd4_client_record_create(oo->oo_owner.so_client); status = nfs_ok; put_stateid: @@ -5496,6 +6222,7 @@ struct nfs4_client *clp = s->st_stid.sc_client; bool unhashed; LIST_HEAD(reaplist); + struct nfs4_ol_stateid *stp; spin_lock(&clp->cl_lock); unhashed = unhash_open_stateid(s, &reaplist); @@ -5504,6 +6231,8 @@ if (unhashed) put_ol_stateid_locked(s, &reaplist); spin_unlock(&clp->cl_lock); + list_for_each_entry(stp, &reaplist, st_locks) + nfs4_free_cpntf_statelist(clp->net, &stp->st_stid); free_ol_stateid_reaplist(&reaplist); } else { spin_unlock(&clp->cl_lock); @@ -5685,12 +6414,11 @@ if (fl->fl_lmops == &nfsd_posix_mng_ops) { lo = (struct nfs4_lockowner *) fl->fl_owner; - deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data, - lo->lo_owner.so_owner.len, GFP_KERNEL); + xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner, + GFP_KERNEL); if (!deny->ld_owner.data) /* We just don't care that much */ goto nevermind; - deny->ld_owner.len = lo->lo_owner.so_owner.len; deny->ld_clientid = lo->lo_owner.so_client->cl_clientid; } else { nevermind: @@ -5961,7 +6689,7 @@ struct nfs4_ol_stateid *lock_stp = NULL; struct nfs4_ol_stateid *open_stp = NULL; struct nfs4_file *fp; - struct file *filp = NULL; + struct nfsd_file *nf = NULL; struct nfsd4_blocked_lock *nbl = NULL; struct file_lock *file_lock = NULL; struct file_lock *conflock = NULL; @@ -6040,11 +6768,11 @@ case NFS4_READW_LT: if (nfsd4_has_session(cstate)) fl_flags |= FL_SLEEP; - /* Fallthrough */ + fallthrough; case NFS4_READ_LT: spin_lock(&fp->fi_lock); - filp = find_readable_file_locked(fp); - if (filp) + nf = find_readable_file_locked(fp); + if (nf) get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ); spin_unlock(&fp->fi_lock); fl_type = F_RDLCK; @@ -6052,11 +6780,11 @@ case NFS4_WRITEW_LT: if (nfsd4_has_session(cstate)) fl_flags |= FL_SLEEP; - /* Fallthrough */ + fallthrough; case NFS4_WRITE_LT: spin_lock(&fp->fi_lock); - filp = find_writeable_file_locked(fp); - if (filp) + nf = find_writeable_file_locked(fp); + if (nf) get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE); spin_unlock(&fp->fi_lock); fl_type = F_WRLCK; @@ -6066,7 +6794,7 @@ goto out; } - if (!filp) { + if (!nf) { status = nfserr_openmode; goto out; } @@ -6082,7 +6810,7 @@ file_lock->fl_type = fl_type; file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner)); file_lock->fl_pid = current->tgid; - file_lock->fl_file = filp; + file_lock->fl_file = nf->nf_file; file_lock->fl_flags = fl_flags; file_lock->fl_lmops = &nfsd_posix_mng_ops; file_lock->fl_start = lock->lk_offset; @@ -6097,14 +6825,14 @@ } if (fl_flags & FL_SLEEP) { - nbl->nbl_time = get_seconds(); + nbl->nbl_time = ktime_get_boottime_seconds(); spin_lock(&nn->blocked_locks_lock); list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked); list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru); spin_unlock(&nn->blocked_locks_lock); } - err = vfs_lock_file(filp, F_SETLK, file_lock, conflock); + err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock); switch (err) { case 0: /* success! */ nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid); @@ -6114,7 +6842,7 @@ break; case FILE_LOCK_DEFERRED: nbl = NULL; - /* Fallthrough */ + fallthrough; case -EAGAIN: /* conflock holds conflicting lock */ status = nfserr_denied; dprintk("NFSD: nfsd4_lock: conflicting lock found!\n"); @@ -6139,8 +6867,8 @@ } free_blocked_lock(nbl); } - if (filp) - fput(filp); + if (nf) + nfsd_file_put(nf); if (lock_stp) { /* Bump seqid manually if the 4.0 replay owner is openowner */ if (cstate->replay_owner && @@ -6170,17 +6898,27 @@ /* * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN, * so we do a temporary open here just to get an open file to pass to - * vfs_test_lock. (Arguably perhaps test_lock should be done with an - * inode operation.) + * vfs_test_lock. */ static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock) { - struct file *file; - __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file); - if (!err) { - err = nfserrno(vfs_test_lock(file, lock)); - fput(file); - } + struct nfsd_file *nf; + __be32 err; + + err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf); + if (err) + return err; + fh_lock(fhp); /* to block new leases till after test_lock: */ + err = nfserrno(nfsd_open_break_lease(fhp->fh_dentry->d_inode, + NFSD_MAY_READ)); + if (err) + goto out; + lock->fl_file = nf->nf_file; + err = nfserrno(vfs_test_lock(nf->nf_file, lock)); + lock->fl_file = NULL; +out: + fh_unlock(fhp); + nfsd_file_put(nf); return err; } @@ -6204,7 +6942,8 @@ return nfserr_inval; if (!nfsd4_has_session(cstate)) { - status = lookup_clientid(&lockt->lt_clientid, cstate, nn); + status = lookup_clientid(&lockt->lt_clientid, cstate, nn, + false); if (status) goto out; } @@ -6223,15 +6962,15 @@ case NFS4_READ_LT: case NFS4_READW_LT: file_lock->fl_type = F_RDLCK; - break; + break; case NFS4_WRITE_LT: case NFS4_WRITEW_LT: file_lock->fl_type = F_WRLCK; - break; + break; default: dprintk("NFSD: nfs4_lockt: bad lock type!\n"); status = nfserr_inval; - goto out; + goto out; } lo = find_lockowner_str(cstate->clp, &lockt->lt_owner); @@ -6267,7 +7006,7 @@ { struct nfsd4_locku *locku = &u->locku; struct nfs4_ol_stateid *stp; - struct file *filp = NULL; + struct nfsd_file *nf = NULL; struct file_lock *file_lock = NULL; __be32 status; int err; @@ -6285,8 +7024,8 @@ &stp, nn); if (status) goto out; - filp = find_any_file(stp->st_stid.sc_file); - if (!filp) { + nf = find_any_file(stp->st_stid.sc_file); + if (!nf) { status = nfserr_lock_range; goto put_stateid; } @@ -6294,13 +7033,13 @@ if (!file_lock) { dprintk("NFSD: %s: unable to allocate lock!\n", __func__); status = nfserr_jukebox; - goto fput; + goto put_file; } file_lock->fl_type = F_UNLCK; file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner)); file_lock->fl_pid = current->tgid; - file_lock->fl_file = filp; + file_lock->fl_file = nf->nf_file; file_lock->fl_flags = FL_POSIX; file_lock->fl_lmops = &nfsd_posix_mng_ops; file_lock->fl_start = locku->lu_offset; @@ -6309,14 +7048,14 @@ locku->lu_length); nfs4_transform_lock_offset(file_lock); - err = vfs_lock_file(filp, F_SETLK, file_lock, NULL); + err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL); if (err) { dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n"); goto out_nfserr; } nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid); -fput: - fput(filp); +put_file: + nfsd_file_put(nf); put_stateid: mutex_unlock(&stp->st_mutex); nfs4_put_stid(&stp->st_stid); @@ -6328,7 +7067,7 @@ out_nfserr: status = nfserrno(err); - goto fput; + goto put_file; } /* @@ -6341,17 +7080,17 @@ { struct file_lock *fl; int status = false; - struct file *filp = find_any_file(fp); + struct nfsd_file *nf = find_any_file(fp); struct inode *inode; struct file_lock_context *flctx; - if (!filp) { + if (!nf) { /* Any valid lock stateid should have some sort of access */ WARN_ON_ONCE(1); return status; } - inode = locks_inode(filp); + inode = locks_inode(nf->nf_file); flctx = inode->i_flctx; if (flctx && !list_empty_careful(&flctx->flc_posix)) { @@ -6364,7 +7103,7 @@ } spin_unlock(&flctx->flc_lock); } - fput(filp); + nfsd_file_put(nf); return status; } @@ -6388,7 +7127,7 @@ dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n", clid->cl_boot, clid->cl_id); - status = lookup_clientid(clid, cstate, nn); + status = lookup_clientid(clid, cstate, nn, false); if (status) return status; @@ -6401,16 +7140,12 @@ if (sop->so_is_open_owner || !same_owner_str(sop, owner)) continue; - /* see if there are still any locks associated with it */ - lo = lockowner(sop); - list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) { - if (check_for_locks(stp->st_stid.sc_file, lo)) { - status = nfserr_locks_held; - spin_unlock(&clp->cl_lock); - return status; - } + if (atomic_read(&sop->so_count) != 1) { + spin_unlock(&clp->cl_lock); + return nfserr_locks_held; } + lo = lockowner(sop); nfs4_get_stateowner(sop); break; } @@ -6442,7 +7177,7 @@ } bool -nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn) +nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn) { struct nfs4_client_reclaim *crp; @@ -6452,20 +7187,26 @@ /* * failure => all reset bets are off, nfserr_no_grace... + * + * The caller is responsible for freeing name.data if NULL is returned (it + * will be freed in nfs4_remove_reclaim_record in the normal case). */ struct nfs4_client_reclaim * -nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn) +nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash, + struct nfsd_net *nn) { unsigned int strhashval; struct nfs4_client_reclaim *crp; - dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name); crp = alloc_reclaim(); if (crp) { strhashval = clientstr_hashval(name); INIT_LIST_HEAD(&crp->cr_strhash); list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]); - memcpy(crp->cr_recdir, name, HEXDIR_LEN); + crp->cr_name.data = name.data; + crp->cr_name.len = name.len; + crp->cr_princhash.data = princhash.data; + crp->cr_princhash.len = princhash.len; crp->cr_clp = NULL; nn->reclaim_str_hashtbl_size++; } @@ -6476,6 +7217,8 @@ nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn) { list_del(&crp->cr_strhash); + kfree(crp->cr_name.data); + kfree(crp->cr_princhash.data); kfree(crp); nn->reclaim_str_hashtbl_size--; } @@ -6499,16 +7242,14 @@ /* * called from OPEN, CLAIM_PREVIOUS with a new clientid. */ struct nfs4_client_reclaim * -nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn) +nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn) { unsigned int strhashval; struct nfs4_client_reclaim *crp = NULL; - dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir); - - strhashval = clientstr_hashval(recdir); + strhashval = clientstr_hashval(name); list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) { - if (same_name(crp->cr_recdir, recdir)) { + if (compare_blob(&crp->cr_name, &name) == 0) { return crp; } } @@ -6526,7 +7267,7 @@ __be32 status; /* find clientid in conf_id_hashtbl */ - status = lookup_clientid(clid, cstate, nn); + status = lookup_clientid(clid, cstate, nn, false); if (status) return nfserr_reclaim_bad; @@ -6538,596 +7279,6 @@ return nfs_ok; } - -#ifdef CONFIG_NFSD_FAULT_INJECTION -static inline void -put_client(struct nfs4_client *clp) -{ - atomic_dec(&clp->cl_refcount); -} - -static struct nfs4_client * -nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size) -{ - struct nfs4_client *clp; - struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, - nfsd_net_id); - - if (!nfsd_netns_ready(nn)) - return NULL; - - list_for_each_entry(clp, &nn->client_lru, cl_lru) { - if (memcmp(&clp->cl_addr, addr, addr_size) == 0) - return clp; - } - return NULL; -} - -u64 -nfsd_inject_print_clients(void) -{ - struct nfs4_client *clp; - u64 count = 0; - struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, - nfsd_net_id); - char buf[INET6_ADDRSTRLEN]; - - if (!nfsd_netns_ready(nn)) - return 0; - - spin_lock(&nn->client_lock); - list_for_each_entry(clp, &nn->client_lru, cl_lru) { - rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf)); - pr_info("NFS Client: %s\n", buf); - ++count; - } - spin_unlock(&nn->client_lock); - - return count; -} - -u64 -nfsd_inject_forget_client(struct sockaddr_storage *addr, size_t addr_size) -{ - u64 count = 0; - struct nfs4_client *clp; - struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, - nfsd_net_id); - - if (!nfsd_netns_ready(nn)) - return count; - - spin_lock(&nn->client_lock); - clp = nfsd_find_client(addr, addr_size); - if (clp) { - if (mark_client_expired_locked(clp) == nfs_ok) - ++count; - else - clp = NULL; - } - spin_unlock(&nn->client_lock); - - if (clp) - expire_client(clp); - - return count; -} - -u64 -nfsd_inject_forget_clients(u64 max) -{ - u64 count = 0; - struct nfs4_client *clp, *next; - struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, - nfsd_net_id); - LIST_HEAD(reaplist); - - if (!nfsd_netns_ready(nn)) - return count; - - spin_lock(&nn->client_lock); - list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) { - if (mark_client_expired_locked(clp) == nfs_ok) { - list_add(&clp->cl_lru, &reaplist); - if (max != 0 && ++count >= max) - break; - } - } - spin_unlock(&nn->client_lock); - - list_for_each_entry_safe(clp, next, &reaplist, cl_lru) - expire_client(clp); - - return count; -} - -static void nfsd_print_count(struct nfs4_client *clp, unsigned int count, - const char *type) -{ - char buf[INET6_ADDRSTRLEN]; - rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf)); - printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type); -} - -static void -nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst, - struct list_head *collect) -{ - struct nfs4_client *clp = lst->st_stid.sc_client; - struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, - nfsd_net_id); - - if (!collect) - return; - - lockdep_assert_held(&nn->client_lock); - atomic_inc(&clp->cl_refcount); - list_add(&lst->st_locks, collect); -} - -static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max, - struct list_head *collect, - bool (*func)(struct nfs4_ol_stateid *)) -{ - struct nfs4_openowner *oop; - struct nfs4_ol_stateid *stp, *st_next; - struct nfs4_ol_stateid *lst, *lst_next; - u64 count = 0; - - spin_lock(&clp->cl_lock); - list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) { - list_for_each_entry_safe(stp, st_next, - &oop->oo_owner.so_stateids, st_perstateowner) { - list_for_each_entry_safe(lst, lst_next, - &stp->st_locks, st_locks) { - if (func) { - if (func(lst)) - nfsd_inject_add_lock_to_list(lst, - collect); - } - ++count; - /* - * Despite the fact that these functions deal - * with 64-bit integers for "count", we must - * ensure that it doesn't blow up the - * clp->cl_refcount. Throw a warning if we - * start to approach INT_MAX here. - */ - WARN_ON_ONCE(count == (INT_MAX / 2)); - if (count == max) - goto out; - } - } - } -out: - spin_unlock(&clp->cl_lock); - - return count; -} - -static u64 -nfsd_collect_client_locks(struct nfs4_client *clp, struct list_head *collect, - u64 max) -{ - return nfsd_foreach_client_lock(clp, max, collect, unhash_lock_stateid); -} - -static u64 -nfsd_print_client_locks(struct nfs4_client *clp) -{ - u64 count = nfsd_foreach_client_lock(clp, 0, NULL, NULL); - nfsd_print_count(clp, count, "locked files"); - return count; -} - -u64 -nfsd_inject_print_locks(void) -{ - struct nfs4_client *clp; - u64 count = 0; - struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, - nfsd_net_id); - - if (!nfsd_netns_ready(nn)) - return 0; - - spin_lock(&nn->client_lock); - list_for_each_entry(clp, &nn->client_lru, cl_lru) - count += nfsd_print_client_locks(clp); - spin_unlock(&nn->client_lock); - - return count; -} - -static void -nfsd_reap_locks(struct list_head *reaplist) -{ - struct nfs4_client *clp; - struct nfs4_ol_stateid *stp, *next; - - list_for_each_entry_safe(stp, next, reaplist, st_locks) { - list_del_init(&stp->st_locks); - clp = stp->st_stid.sc_client; - nfs4_put_stid(&stp->st_stid); - put_client(clp); - } -} - -u64 -nfsd_inject_forget_client_locks(struct sockaddr_storage *addr, size_t addr_size) -{ - unsigned int count = 0; - struct nfs4_client *clp; - struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, - nfsd_net_id); - LIST_HEAD(reaplist); - - if (!nfsd_netns_ready(nn)) - return count; - - spin_lock(&nn->client_lock); - clp = nfsd_find_client(addr, addr_size); - if (clp) - count = nfsd_collect_client_locks(clp, &reaplist, 0); - spin_unlock(&nn->client_lock); - nfsd_reap_locks(&reaplist); - return count; -} - -u64 -nfsd_inject_forget_locks(u64 max) -{ - u64 count = 0; - struct nfs4_client *clp; - struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, - nfsd_net_id); - LIST_HEAD(reaplist); - - if (!nfsd_netns_ready(nn)) - return count; - - spin_lock(&nn->client_lock); - list_for_each_entry(clp, &nn->client_lru, cl_lru) { - count += nfsd_collect_client_locks(clp, &reaplist, max - count); - if (max != 0 && count >= max) - break; - } - spin_unlock(&nn->client_lock); - nfsd_reap_locks(&reaplist); - return count; -} - -static u64 -nfsd_foreach_client_openowner(struct nfs4_client *clp, u64 max, - struct list_head *collect, - void (*func)(struct nfs4_openowner *)) -{ - struct nfs4_openowner *oop, *next; - struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, - nfsd_net_id); - u64 count = 0; - - lockdep_assert_held(&nn->client_lock); - - spin_lock(&clp->cl_lock); - list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) { - if (func) { - func(oop); - if (collect) { - atomic_inc(&clp->cl_refcount); - list_add(&oop->oo_perclient, collect); - } - } - ++count; - /* - * Despite the fact that these functions deal with - * 64-bit integers for "count", we must ensure that - * it doesn't blow up the clp->cl_refcount. Throw a - * warning if we start to approach INT_MAX here. - */ - WARN_ON_ONCE(count == (INT_MAX / 2)); - if (count == max) - break; - } - spin_unlock(&clp->cl_lock); - - return count; -} - -static u64 -nfsd_print_client_openowners(struct nfs4_client *clp) -{ - u64 count = nfsd_foreach_client_openowner(clp, 0, NULL, NULL); - - nfsd_print_count(clp, count, "openowners"); - return count; -} - -static u64 -nfsd_collect_client_openowners(struct nfs4_client *clp, - struct list_head *collect, u64 max) -{ - return nfsd_foreach_client_openowner(clp, max, collect, - unhash_openowner_locked); -} - -u64 -nfsd_inject_print_openowners(void) -{ - struct nfs4_client *clp; - u64 count = 0; - struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, - nfsd_net_id); - - if (!nfsd_netns_ready(nn)) - return 0; - - spin_lock(&nn->client_lock); - list_for_each_entry(clp, &nn->client_lru, cl_lru) - count += nfsd_print_client_openowners(clp); - spin_unlock(&nn->client_lock); - - return count; -} - -static void -nfsd_reap_openowners(struct list_head *reaplist) -{ - struct nfs4_client *clp; - struct nfs4_openowner *oop, *next; - - list_for_each_entry_safe(oop, next, reaplist, oo_perclient) { - list_del_init(&oop->oo_perclient); - clp = oop->oo_owner.so_client; - release_openowner(oop); - put_client(clp); - } -} - -u64 -nfsd_inject_forget_client_openowners(struct sockaddr_storage *addr, - size_t addr_size) -{ - unsigned int count = 0; - struct nfs4_client *clp; - struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, - nfsd_net_id); - LIST_HEAD(reaplist); - - if (!nfsd_netns_ready(nn)) - return count; - - spin_lock(&nn->client_lock); - clp = nfsd_find_client(addr, addr_size); - if (clp) - count = nfsd_collect_client_openowners(clp, &reaplist, 0); - spin_unlock(&nn->client_lock); - nfsd_reap_openowners(&reaplist); - return count; -} - -u64 -nfsd_inject_forget_openowners(u64 max) -{ - u64 count = 0; - struct nfs4_client *clp; - struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, - nfsd_net_id); - LIST_HEAD(reaplist); - - if (!nfsd_netns_ready(nn)) - return count; - - spin_lock(&nn->client_lock); - list_for_each_entry(clp, &nn->client_lru, cl_lru) { - count += nfsd_collect_client_openowners(clp, &reaplist, - max - count); - if (max != 0 && count >= max) - break; - } - spin_unlock(&nn->client_lock); - nfsd_reap_openowners(&reaplist); - return count; -} - -static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max, - struct list_head *victims) -{ - struct nfs4_delegation *dp, *next; - struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, - nfsd_net_id); - u64 count = 0; - - lockdep_assert_held(&nn->client_lock); - - spin_lock(&state_lock); - list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) { - if (victims) { - /* - * It's not safe to mess with delegations that have a - * non-zero dl_time. They might have already been broken - * and could be processed by the laundromat outside of - * the state_lock. Just leave them be. - */ - if (dp->dl_time != 0) - continue; - - atomic_inc(&clp->cl_refcount); - WARN_ON(!unhash_delegation_locked(dp)); - list_add(&dp->dl_recall_lru, victims); - } - ++count; - /* - * Despite the fact that these functions deal with - * 64-bit integers for "count", we must ensure that - * it doesn't blow up the clp->cl_refcount. Throw a - * warning if we start to approach INT_MAX here. - */ - WARN_ON_ONCE(count == (INT_MAX / 2)); - if (count == max) - break; - } - spin_unlock(&state_lock); - return count; -} - -static u64 -nfsd_print_client_delegations(struct nfs4_client *clp) -{ - u64 count = nfsd_find_all_delegations(clp, 0, NULL); - - nfsd_print_count(clp, count, "delegations"); - return count; -} - -u64 -nfsd_inject_print_delegations(void) -{ - struct nfs4_client *clp; - u64 count = 0; - struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, - nfsd_net_id); - - if (!nfsd_netns_ready(nn)) - return 0; - - spin_lock(&nn->client_lock); - list_for_each_entry(clp, &nn->client_lru, cl_lru) - count += nfsd_print_client_delegations(clp); - spin_unlock(&nn->client_lock); - - return count; -} - -static void -nfsd_forget_delegations(struct list_head *reaplist) -{ - struct nfs4_client *clp; - struct nfs4_delegation *dp, *next; - - list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) { - list_del_init(&dp->dl_recall_lru); - clp = dp->dl_stid.sc_client; - revoke_delegation(dp); - put_client(clp); - } -} - -u64 -nfsd_inject_forget_client_delegations(struct sockaddr_storage *addr, - size_t addr_size) -{ - u64 count = 0; - struct nfs4_client *clp; - struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, - nfsd_net_id); - LIST_HEAD(reaplist); - - if (!nfsd_netns_ready(nn)) - return count; - - spin_lock(&nn->client_lock); - clp = nfsd_find_client(addr, addr_size); - if (clp) - count = nfsd_find_all_delegations(clp, 0, &reaplist); - spin_unlock(&nn->client_lock); - - nfsd_forget_delegations(&reaplist); - return count; -} - -u64 -nfsd_inject_forget_delegations(u64 max) -{ - u64 count = 0; - struct nfs4_client *clp; - struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, - nfsd_net_id); - LIST_HEAD(reaplist); - - if (!nfsd_netns_ready(nn)) - return count; - - spin_lock(&nn->client_lock); - list_for_each_entry(clp, &nn->client_lru, cl_lru) { - count += nfsd_find_all_delegations(clp, max - count, &reaplist); - if (max != 0 && count >= max) - break; - } - spin_unlock(&nn->client_lock); - nfsd_forget_delegations(&reaplist); - return count; -} - -static void -nfsd_recall_delegations(struct list_head *reaplist) -{ - struct nfs4_client *clp; - struct nfs4_delegation *dp, *next; - - list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) { - list_del_init(&dp->dl_recall_lru); - clp = dp->dl_stid.sc_client; - /* - * We skipped all entries that had a zero dl_time before, - * so we can now reset the dl_time back to 0. If a delegation - * break comes in now, then it won't make any difference since - * we're recalling it either way. - */ - spin_lock(&state_lock); - dp->dl_time = 0; - spin_unlock(&state_lock); - nfsd_break_one_deleg(dp); - put_client(clp); - } -} - -u64 -nfsd_inject_recall_client_delegations(struct sockaddr_storage *addr, - size_t addr_size) -{ - u64 count = 0; - struct nfs4_client *clp; - struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, - nfsd_net_id); - LIST_HEAD(reaplist); - - if (!nfsd_netns_ready(nn)) - return count; - - spin_lock(&nn->client_lock); - clp = nfsd_find_client(addr, addr_size); - if (clp) - count = nfsd_find_all_delegations(clp, 0, &reaplist); - spin_unlock(&nn->client_lock); - - nfsd_recall_delegations(&reaplist); - return count; -} - -u64 -nfsd_inject_recall_delegations(u64 max) -{ - u64 count = 0; - struct nfs4_client *clp, *next; - struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, - nfsd_net_id); - LIST_HEAD(reaplist); - - if (!nfsd_netns_ready(nn)) - return count; - - spin_lock(&nn->client_lock); - list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) { - count += nfsd_find_all_delegations(clp, max - count, &reaplist); - if (max != 0 && ++count >= max) - break; - } - spin_unlock(&nn->client_lock); - nfsd_recall_delegations(&reaplist); - return count; -} -#endif /* CONFIG_NFSD_FAULT_INJECTION */ /* * Since the lifetime of a delegation isn't limited to that of an open, a @@ -7179,7 +7330,7 @@ INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]); nn->conf_name_tree = RB_ROOT; nn->unconf_name_tree = RB_ROOT; - nn->boot_time = get_seconds(); + nn->boot_time = ktime_get_real_seconds(); nn->grace_ended = false; nn->nfsd4_manager.block_opens = true; INIT_LIST_HEAD(&nn->nfsd4_manager.list); @@ -7187,6 +7338,8 @@ INIT_LIST_HEAD(&nn->close_lru); INIT_LIST_HEAD(&nn->del_recall_lru); spin_lock_init(&nn->client_lock); + spin_lock_init(&nn->s2s_cp_lock); + idr_init(&nn->s2s_cp_stateids); spin_lock_init(&nn->blocked_locks_lock); INIT_LIST_HEAD(&nn->blocked_locks_lru); @@ -7244,9 +7397,19 @@ return ret; locks_start_grace(net, &nn->nfsd4_manager); nfsd4_client_tracking_init(net); - printk(KERN_INFO "NFSD: starting %ld-second grace period (net %x)\n", + if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0) + goto skip_grace; + printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n", nn->nfsd4_grace, net->ns.inum); + trace_nfsd_grace_start(nn); queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ); + return 0; + +skip_grace: + printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n", + net->ns.inum); + queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ); + nfsd4_end_grace(nn); return 0; } @@ -7313,7 +7476,8 @@ static void get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) { - if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid)) + if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) && + CURRENT_STATEID(stateid)) memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t)); } @@ -7322,14 +7486,14 @@ { if (cstate->minorversion) { memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t)); - SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG); + SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG); } } void clear_current_stateid(struct nfsd4_compound_state *cstate) { - CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG); + CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG); } /* -- Gitblit v1.6.2