From 9999e48639b3cecb08ffb37358bcba3b48161b29 Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Fri, 10 May 2024 08:50:17 +0000 Subject: [PATCH] add ax88772_rst --- kernel/fs/afs/cell.c | 571 ++++++++++++++++++++++++++++++++++++-------------------- 1 files changed, 366 insertions(+), 205 deletions(-) diff --git a/kernel/fs/afs/cell.c b/kernel/fs/afs/cell.c index cce0e23..887b673 100644 --- a/kernel/fs/afs/cell.c +++ b/kernel/fs/afs/cell.c @@ -1,12 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* AFS cell and server record management * * Copyright (C) 2002, 2017 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. */ #include <linux/slab.h> @@ -20,8 +16,12 @@ #include "internal.h" static unsigned __read_mostly afs_cell_gc_delay = 10; +static unsigned __read_mostly afs_cell_min_ttl = 10 * 60; +static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60; +static atomic_t cell_debug_id; -static void afs_manage_cell(struct work_struct *); +static void afs_queue_cell_manager(struct afs_net *); +static void afs_manage_cell_work(struct work_struct *); static void afs_dec_cells_outstanding(struct afs_net *net) { @@ -39,19 +39,22 @@ atomic_inc(&net->cells_outstanding); if (timer_reduce(&net->cells_timer, jiffies + delay * HZ)) afs_dec_cells_outstanding(net); + } else { + afs_queue_cell_manager(net); } } /* - * Look up and get an activation reference on a cell record under RCU - * conditions. The caller must hold the RCU read lock. + * Look up and get an activation reference on a cell record. The caller must + * hold net->cells_lock at least read-locked. */ -struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net, - const char *name, unsigned int namesz) +static struct afs_cell *afs_find_cell_locked(struct afs_net *net, + const char *name, unsigned int namesz, + enum afs_cell_trace reason) { struct afs_cell *cell = NULL; struct rb_node *p; - int n, seq = 0, ret = 0; + int n; _enter("%*.*s", namesz, namesz, name); @@ -60,61 +63,48 @@ if (namesz > AFS_MAXCELLNAME) return ERR_PTR(-ENAMETOOLONG); - do { - /* Unfortunately, rbtree walking doesn't give reliable results - * under just the RCU read lock, so we have to check for - * changes. - */ - if (cell) - afs_put_cell(net, cell); - cell = NULL; - ret = -ENOENT; + if (!name) { + cell = net->ws_cell; + if (!cell) + return ERR_PTR(-EDESTADDRREQ); + goto found; + } - read_seqbegin_or_lock(&net->cells_lock, &seq); + p = net->cells.rb_node; + while (p) { + cell = rb_entry(p, struct afs_cell, net_node); - if (!name) { - cell = rcu_dereference_raw(net->ws_cell); - if (cell) { - afs_get_cell(cell); - ret = 0; - break; - } - ret = -EDESTADDRREQ; - continue; - } + n = strncasecmp(cell->name, name, + min_t(size_t, cell->name_len, namesz)); + if (n == 0) + n = cell->name_len - namesz; + if (n < 0) + p = p->rb_left; + else if (n > 0) + p = p->rb_right; + else + goto found; + } - p = rcu_dereference_raw(net->cells.rb_node); - while (p) { - cell = rb_entry(p, struct afs_cell, net_node); + return ERR_PTR(-ENOENT); - n = strncasecmp(cell->name, name, - min_t(size_t, cell->name_len, namesz)); - if (n == 0) - n = cell->name_len - namesz; - if (n < 0) { - p = rcu_dereference_raw(p->rb_left); - } else if (n > 0) { - p = rcu_dereference_raw(p->rb_right); - } else { - if (atomic_inc_not_zero(&cell->usage)) { - ret = 0; - break; - } - /* We want to repeat the search, this time with - * the lock properly locked. - */ - } - cell = NULL; - } +found: + return afs_use_cell(cell, reason); +} - } while (need_seqretry(&net->cells_lock, seq)); +/* + * Look up and get an activation reference on a cell record. + */ +struct afs_cell *afs_find_cell(struct afs_net *net, + const char *name, unsigned int namesz, + enum afs_cell_trace reason) +{ + struct afs_cell *cell; - done_seqretry(&net->cells_lock, seq); - - if (ret != 0 && cell) - afs_put_cell(net, cell); - - return ret == 0 ? cell : ERR_PTR(ret); + down_read(&net->cells_lock); + cell = afs_find_cell_locked(net, name, namesz, reason); + up_read(&net->cells_lock); + return cell; } /* @@ -123,8 +113,9 @@ */ static struct afs_cell *afs_alloc_cell(struct afs_net *net, const char *name, unsigned int namelen, - const char *vllist) + const char *addresses) { + struct afs_vlserver_list *vllist; struct afs_cell *cell; int i, ret; @@ -147,7 +138,7 @@ return ERR_PTR(-EINVAL); } - _enter("%*.*s,%s", namelen, namelen, name, vllist); + _enter("%*.*s,%s", namelen, namelen, name, addresses); cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL); if (!cell) { @@ -155,35 +146,62 @@ return ERR_PTR(-ENOMEM); } + cell->name = kmalloc(namelen + 1, GFP_KERNEL); + if (!cell->name) { + kfree(cell); + return ERR_PTR(-ENOMEM); + } + cell->net = net; cell->name_len = namelen; for (i = 0; i < namelen; i++) cell->name[i] = tolower(name[i]); + cell->name[i] = 0; - atomic_set(&cell->usage, 2); - INIT_WORK(&cell->manager, afs_manage_cell); - cell->flags = ((1 << AFS_CELL_FL_NOT_READY) | - (1 << AFS_CELL_FL_NO_LOOKUP_YET)); - INIT_LIST_HEAD(&cell->proc_volumes); - rwlock_init(&cell->proc_lock); - rwlock_init(&cell->vl_addrs_lock); + atomic_set(&cell->ref, 1); + atomic_set(&cell->active, 0); + INIT_WORK(&cell->manager, afs_manage_cell_work); + cell->volumes = RB_ROOT; + INIT_HLIST_HEAD(&cell->proc_volumes); + seqlock_init(&cell->volume_lock); + cell->fs_servers = RB_ROOT; + seqlock_init(&cell->fs_lock); + rwlock_init(&cell->vl_servers_lock); + cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS); - /* Fill in the VL server list if we were given a list of addresses to - * use. + /* Provide a VL server list, filling it in if we were given a list of + * addresses to use. */ - if (vllist) { - struct afs_addr_list *alist; - - alist = afs_parse_text_addrs(vllist, strlen(vllist), ':', - VL_SERVICE, AFS_VL_PORT); - if (IS_ERR(alist)) { - ret = PTR_ERR(alist); + if (addresses) { + vllist = afs_parse_text_addrs(net, + addresses, strlen(addresses), ':', + VL_SERVICE, AFS_VL_PORT); + if (IS_ERR(vllist)) { + ret = PTR_ERR(vllist); goto parse_failed; } - rcu_assign_pointer(cell->vl_addrs, alist); + vllist->source = DNS_RECORD_FROM_CONFIG; + vllist->status = DNS_LOOKUP_NOT_DONE; cell->dns_expiry = TIME64_MAX; + } else { + ret = -ENOMEM; + vllist = afs_alloc_vlserver_list(0); + if (!vllist) + goto error; + vllist->source = DNS_RECORD_UNAVAILABLE; + vllist->status = DNS_LOOKUP_NOT_DONE; + cell->dns_expiry = ktime_get_real_seconds(); } + + rcu_assign_pointer(cell->vl_servers, vllist); + + cell->dns_source = vllist->source; + cell->dns_status = vllist->status; + smp_store_release(&cell->dns_lookup_count, 1); /* vs source/status */ + atomic_inc(&net->cells_outstanding); + cell->debug_id = atomic_inc_return(&cell_debug_id); + trace_afs_cell(cell->debug_id, 1, 0, afs_cell_trace_alloc); _leave(" = %p", cell); return cell; @@ -191,6 +209,8 @@ parse_failed: if (ret == -EINVAL) printk(KERN_ERR "kAFS: bad VL server IP address\n"); +error: + kfree(cell->name); kfree(cell); _leave(" = %d", ret); return ERR_PTR(ret); @@ -215,14 +235,13 @@ { struct afs_cell *cell, *candidate, *cursor; struct rb_node *parent, **pp; + enum afs_cell_state state; int ret, n; _enter("%s,%s", name, vllist); if (!excl) { - rcu_read_lock(); - cell = afs_lookup_cell_rcu(net, name, namesz); - rcu_read_unlock(); + cell = afs_find_cell(net, name, namesz, afs_cell_trace_use_lookup); if (!IS_ERR(cell)) goto wait_for_cell; } @@ -243,7 +262,7 @@ /* Find the insertion point and check to see if someone else added a * cell whilst we were allocating. */ - write_seqlock(&net->cells_lock); + down_write(&net->cells_lock); pp = &net->cells.rb_node; parent = NULL; @@ -265,27 +284,28 @@ cell = candidate; candidate = NULL; + atomic_set(&cell->active, 2); + trace_afs_cell(cell->debug_id, atomic_read(&cell->ref), 2, afs_cell_trace_insert); rb_link_node_rcu(&cell->net_node, parent, pp); rb_insert_color(&cell->net_node, &net->cells); - atomic_inc(&net->cells_outstanding); - write_sequnlock(&net->cells_lock); + up_write(&net->cells_lock); - queue_work(afs_wq, &cell->manager); + afs_queue_cell(cell, afs_cell_trace_get_queue_new); wait_for_cell: + trace_afs_cell(cell->debug_id, atomic_read(&cell->ref), atomic_read(&cell->active), + afs_cell_trace_wait); _debug("wait_for_cell"); - ret = wait_on_bit(&cell->flags, AFS_CELL_FL_NOT_READY, TASK_INTERRUPTIBLE); - smp_rmb(); + wait_var_event(&cell->state, + ({ + state = smp_load_acquire(&cell->state); /* vs error */ + state == AFS_CELL_ACTIVE || state == AFS_CELL_REMOVED; + })); - switch (READ_ONCE(cell->state)) { - case AFS_CELL_FAILED: + /* Check the state obtained from the wait check. */ + if (state == AFS_CELL_REMOVED) { ret = cell->error; goto error; - default: - _debug("weird %u %d", cell->state, cell->error); - goto error; - case AFS_CELL_ACTIVE: - break; } _leave(" = %p [cell]", cell); @@ -297,16 +317,17 @@ if (excl) { ret = -EEXIST; } else { - afs_get_cell(cursor); + afs_use_cell(cursor, afs_cell_trace_use_lookup); ret = 0; } - write_sequnlock(&net->cells_lock); - kfree(candidate); + up_write(&net->cells_lock); + if (candidate) + afs_put_cell(candidate, afs_cell_trace_put_candidate); if (ret == 0) goto wait_for_cell; goto error_noput; error: - afs_put_cell(net, cell); + afs_unuse_cell(net, cell, afs_cell_trace_unuse_lookup); error_noput: _leave(" = %d [error]", ret); return ERR_PTR(ret); @@ -351,15 +372,16 @@ } if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags)) - afs_get_cell(new_root); + afs_use_cell(new_root, afs_cell_trace_use_pin); /* install the new cell */ - write_seqlock(&net->cells_lock); - old_root = rcu_access_pointer(net->ws_cell); - rcu_assign_pointer(net->ws_cell, new_root); - write_sequnlock(&net->cells_lock); + down_write(&net->cells_lock); + afs_see_cell(new_root, afs_cell_trace_see_ws); + old_root = net->ws_cell; + net->ws_cell = new_root; + up_write(&net->cells_lock); - afs_put_cell(net, old_root); + afs_unuse_cell(net, old_root, afs_cell_trace_unuse_ws); _leave(" = 0"); return 0; } @@ -367,53 +389,96 @@ /* * Update a cell's VL server address list from the DNS. */ -static void afs_update_cell(struct afs_cell *cell) +static int afs_update_cell(struct afs_cell *cell) { - struct afs_addr_list *alist, *old; - time64_t now, expiry; + struct afs_vlserver_list *vllist, *old = NULL, *p; + unsigned int min_ttl = READ_ONCE(afs_cell_min_ttl); + unsigned int max_ttl = READ_ONCE(afs_cell_max_ttl); + time64_t now, expiry = 0; + int ret = 0; _enter("%s", cell->name); - alist = afs_dns_query(cell, &expiry); - if (IS_ERR(alist)) { - switch (PTR_ERR(alist)) { - case -ENODATA: - /* The DNS said that the cell does not exist */ - set_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags); - clear_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags); - cell->dns_expiry = ktime_get_real_seconds() + 61; - break; + vllist = afs_dns_query(cell, &expiry); + if (IS_ERR(vllist)) { + ret = PTR_ERR(vllist); + _debug("%s: fail %d", cell->name, ret); + if (ret == -ENOMEM) + goto out_wake; + + ret = -ENOMEM; + vllist = afs_alloc_vlserver_list(0); + if (!vllist) + goto out_wake; + + switch (ret) { + case -ENODATA: + case -EDESTADDRREQ: + vllist->status = DNS_LOOKUP_GOT_NOT_FOUND; + break; case -EAGAIN: case -ECONNREFUSED: + vllist->status = DNS_LOOKUP_GOT_TEMP_FAILURE; + break; default: - set_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags); - cell->dns_expiry = ktime_get_real_seconds() + 10; + vllist->status = DNS_LOOKUP_GOT_LOCAL_FAILURE; break; } - - cell->error = -EDESTADDRREQ; - } else { - clear_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags); - clear_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags); - - /* Exclusion on changing vl_addrs is achieved by a - * non-reentrant work item. - */ - old = rcu_dereference_protected(cell->vl_addrs, true); - rcu_assign_pointer(cell->vl_addrs, alist); - cell->dns_expiry = expiry; - - if (old) - afs_put_addrlist(old); } - if (test_and_clear_bit(AFS_CELL_FL_NO_LOOKUP_YET, &cell->flags)) - wake_up_bit(&cell->flags, AFS_CELL_FL_NO_LOOKUP_YET); + _debug("%s: got list %d %d", cell->name, vllist->source, vllist->status); + cell->dns_status = vllist->status; now = ktime_get_real_seconds(); - afs_set_cell_timer(cell->net, cell->dns_expiry - now); - _leave(""); + if (min_ttl > max_ttl) + max_ttl = min_ttl; + if (expiry < now + min_ttl) + expiry = now + min_ttl; + else if (expiry > now + max_ttl) + expiry = now + max_ttl; + + _debug("%s: status %d", cell->name, vllist->status); + if (vllist->source == DNS_RECORD_UNAVAILABLE) { + switch (vllist->status) { + case DNS_LOOKUP_GOT_NOT_FOUND: + /* The DNS said that the cell does not exist or there + * weren't any addresses to be had. + */ + cell->dns_expiry = expiry; + break; + + case DNS_LOOKUP_BAD: + case DNS_LOOKUP_GOT_LOCAL_FAILURE: + case DNS_LOOKUP_GOT_TEMP_FAILURE: + case DNS_LOOKUP_GOT_NS_FAILURE: + default: + cell->dns_expiry = now + 10; + break; + } + } else { + cell->dns_expiry = expiry; + } + + /* Replace the VL server list if the new record has servers or the old + * record doesn't. + */ + write_lock(&cell->vl_servers_lock); + p = rcu_dereference_protected(cell->vl_servers, true); + if (vllist->nr_servers > 0 || p->nr_servers == 0) { + rcu_assign_pointer(cell->vl_servers, vllist); + cell->dns_source = vllist->source; + old = p; + } + write_unlock(&cell->vl_servers_lock); + afs_put_vlserverlist(cell->net, old); + +out_wake: + smp_store_release(&cell->dns_lookup_count, + cell->dns_lookup_count + 1); /* vs source/status */ + wake_up_var(&cell->dns_lookup_count); + _leave(" = %d", ret); + return ret; } /* @@ -422,15 +487,22 @@ static void afs_cell_destroy(struct rcu_head *rcu) { struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu); + struct afs_net *net = cell->net; + int u; _enter("%p{%s}", cell, cell->name); - ASSERTCMP(atomic_read(&cell->usage), ==, 0); + u = atomic_read(&cell->ref); + ASSERTCMP(u, ==, 0); + trace_afs_cell(cell->debug_id, u, atomic_read(&cell->active), afs_cell_trace_free); - afs_put_addrlist(rcu_access_pointer(cell->vl_addrs)); + afs_put_vlserverlist(net, rcu_access_pointer(cell->vl_servers)); + afs_unuse_cell(net, cell->alias_of, afs_cell_trace_unuse_alias); key_put(cell->anonymous_key); + kfree(cell->name); kfree(cell); + afs_dec_cells_outstanding(net); _leave(" [destroyed]"); } @@ -463,18 +535,63 @@ /* * Get a reference on a cell record. */ -struct afs_cell *afs_get_cell(struct afs_cell *cell) +struct afs_cell *afs_get_cell(struct afs_cell *cell, enum afs_cell_trace reason) { - atomic_inc(&cell->usage); + int u; + + if (atomic_read(&cell->ref) <= 0) + BUG(); + + u = atomic_inc_return(&cell->ref); + trace_afs_cell(cell->debug_id, u, atomic_read(&cell->active), reason); return cell; } /* * Drop a reference on a cell record. */ -void afs_put_cell(struct afs_net *net, struct afs_cell *cell) +void afs_put_cell(struct afs_cell *cell, enum afs_cell_trace reason) { + if (cell) { + unsigned int debug_id = cell->debug_id; + unsigned int u, a; + + a = atomic_read(&cell->active); + u = atomic_dec_return(&cell->ref); + trace_afs_cell(debug_id, u, a, reason); + if (u == 0) { + a = atomic_read(&cell->active); + WARN(a != 0, "Cell active count %u > 0\n", a); + call_rcu(&cell->rcu, afs_cell_destroy); + } + } +} + +/* + * Note a cell becoming more active. + */ +struct afs_cell *afs_use_cell(struct afs_cell *cell, enum afs_cell_trace reason) +{ + int u, a; + + if (atomic_read(&cell->ref) <= 0) + BUG(); + + u = atomic_read(&cell->ref); + a = atomic_inc_return(&cell->active); + trace_afs_cell(cell->debug_id, u, a, reason); + return cell; +} + +/* + * Record a cell becoming less active. When the active counter reaches 1, it + * is scheduled for destruction, but may get reactivated. + */ +void afs_unuse_cell(struct afs_net *net, struct afs_cell *cell, enum afs_cell_trace reason) +{ + unsigned int debug_id; time64_t now, expire_delay; + int u, a; if (!cell) return; @@ -484,15 +601,39 @@ now = ktime_get_real_seconds(); cell->last_inactive = now; expire_delay = 0; - if (!test_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags) && - !test_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags)) + if (cell->vl_servers->nr_servers) expire_delay = afs_cell_gc_delay; - if (atomic_dec_return(&cell->usage) > 1) - return; + debug_id = cell->debug_id; + u = atomic_read(&cell->ref); + a = atomic_dec_return(&cell->active); + trace_afs_cell(debug_id, u, a, reason); + WARN_ON(a == 0); + if (a == 1) + /* 'cell' may now be garbage collected. */ + afs_set_cell_timer(net, expire_delay); +} - /* 'cell' may now be garbage collected. */ - afs_set_cell_timer(net, expire_delay); +/* + * Note that a cell has been seen. + */ +void afs_see_cell(struct afs_cell *cell, enum afs_cell_trace reason) +{ + int u, a; + + u = atomic_read(&cell->ref); + a = atomic_read(&cell->active); + trace_afs_cell(cell->debug_id, u, a, reason); +} + +/* + * Queue a cell for management, giving the workqueue a ref to hold. + */ +void afs_queue_cell(struct afs_cell *cell, enum afs_cell_trace reason) +{ + afs_get_cell(cell, reason); + if (!queue_work(afs_wq, &cell->manager)) + afs_put_cell(cell, afs_cell_trace_put_queue_fail); } /* @@ -592,12 +733,10 @@ * Manage a cell record, initialising and destroying it, maintaining its DNS * records. */ -static void afs_manage_cell(struct work_struct *work) +static void afs_manage_cell(struct afs_cell *cell) { - struct afs_cell *cell = container_of(work, struct afs_cell, manager); struct afs_net *net = cell->net; - bool deleted; - int ret, usage; + int ret, active; _enter("%s", cell->name); @@ -606,21 +745,28 @@ switch (cell->state) { case AFS_CELL_INACTIVE: case AFS_CELL_FAILED: - write_seqlock(&net->cells_lock); - usage = 1; - deleted = atomic_try_cmpxchg_relaxed(&cell->usage, &usage, 0); - if (deleted) + down_write(&net->cells_lock); + active = 1; + if (atomic_try_cmpxchg_relaxed(&cell->active, &active, 0)) { rb_erase(&cell->net_node, &net->cells); - write_sequnlock(&net->cells_lock); - if (deleted) + trace_afs_cell(cell->debug_id, atomic_read(&cell->ref), 0, + afs_cell_trace_unuse_delete); + smp_store_release(&cell->state, AFS_CELL_REMOVED); + } + up_write(&net->cells_lock); + if (cell->state == AFS_CELL_REMOVED) { + wake_up_var(&cell->state); goto final_destruction; + } if (cell->state == AFS_CELL_FAILED) goto done; - cell->state = AFS_CELL_UNSET; + smp_store_release(&cell->state, AFS_CELL_UNSET); + wake_up_var(&cell->state); goto again; case AFS_CELL_UNSET: - cell->state = AFS_CELL_ACTIVATING; + smp_store_release(&cell->state, AFS_CELL_ACTIVATING); + wake_up_var(&cell->state); goto again; case AFS_CELL_ACTIVATING: @@ -628,29 +774,33 @@ if (ret < 0) goto activation_failed; - cell->state = AFS_CELL_ACTIVE; - smp_wmb(); - clear_bit(AFS_CELL_FL_NOT_READY, &cell->flags); - wake_up_bit(&cell->flags, AFS_CELL_FL_NOT_READY); + smp_store_release(&cell->state, AFS_CELL_ACTIVE); + wake_up_var(&cell->state); goto again; case AFS_CELL_ACTIVE: - if (atomic_read(&cell->usage) > 1) { - time64_t now = ktime_get_real_seconds(); - if (cell->dns_expiry <= now && net->live) - afs_update_cell(cell); + if (atomic_read(&cell->active) > 1) { + if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) { + ret = afs_update_cell(cell); + if (ret < 0) + cell->error = ret; + } goto done; } - cell->state = AFS_CELL_DEACTIVATING; + smp_store_release(&cell->state, AFS_CELL_DEACTIVATING); + wake_up_var(&cell->state); goto again; case AFS_CELL_DEACTIVATING: - set_bit(AFS_CELL_FL_NOT_READY, &cell->flags); - if (atomic_read(&cell->usage) > 1) + if (atomic_read(&cell->active) > 1) goto reverse_deactivation; afs_deactivate_cell(net, cell); - cell->state = AFS_CELL_INACTIVE; + smp_store_release(&cell->state, AFS_CELL_INACTIVE); + wake_up_var(&cell->state); goto again; + + case AFS_CELL_REMOVED: + goto done; default: break; @@ -662,17 +812,13 @@ cell->error = ret; afs_deactivate_cell(net, cell); - cell->state = AFS_CELL_FAILED; - smp_wmb(); - if (test_and_clear_bit(AFS_CELL_FL_NOT_READY, &cell->flags)) - wake_up_bit(&cell->flags, AFS_CELL_FL_NOT_READY); + smp_store_release(&cell->state, AFS_CELL_FAILED); /* vs error */ + wake_up_var(&cell->state); goto again; reverse_deactivation: - cell->state = AFS_CELL_ACTIVE; - smp_wmb(); - clear_bit(AFS_CELL_FL_NOT_READY, &cell->flags); - wake_up_bit(&cell->flags, AFS_CELL_FL_NOT_READY); + smp_store_release(&cell->state, AFS_CELL_ACTIVE); + wake_up_var(&cell->state); _leave(" [deact->act]"); return; @@ -681,9 +827,18 @@ return; final_destruction: - call_rcu(&cell->rcu, afs_cell_destroy); - afs_dec_cells_outstanding(net); - _leave(" [destruct %d]", atomic_read(&net->cells_outstanding)); + /* The root volume is pinning the cell */ + afs_put_volume(cell->net, cell->root_volume, afs_volume_trace_put_cell_root); + cell->root_volume = NULL; + afs_put_cell(cell, afs_cell_trace_put_destroy); +} + +static void afs_manage_cell_work(struct work_struct *work) +{ + struct afs_cell *cell = container_of(work, struct afs_cell, manager); + + afs_manage_cell(cell); + afs_put_cell(cell, afs_cell_trace_put_queue_work); } /* @@ -712,31 +867,39 @@ * lack of use and cells whose DNS results have expired and dispatch * their managers. */ - read_seqlock_excl(&net->cells_lock); + down_read(&net->cells_lock); for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) { struct afs_cell *cell = rb_entry(cursor, struct afs_cell, net_node); - unsigned usage; + unsigned active; bool sched_cell = false; - usage = atomic_read(&cell->usage); - _debug("manage %s %u", cell->name, usage); + active = atomic_read(&cell->active); + trace_afs_cell(cell->debug_id, atomic_read(&cell->ref), + active, afs_cell_trace_manage); - ASSERTCMP(usage, >=, 1); + ASSERTCMP(active, >=, 1); if (purging) { - if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags)) - usage = atomic_dec_return(&cell->usage); - ASSERTCMP(usage, ==, 1); + if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags)) { + active = atomic_dec_return(&cell->active); + trace_afs_cell(cell->debug_id, atomic_read(&cell->ref), + active, afs_cell_trace_unuse_pin); + } } - if (usage == 1) { + if (active == 1) { + struct afs_vlserver_list *vllist; time64_t expire_at = cell->last_inactive; - if (!test_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags) && - !test_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags)) + read_lock(&cell->vl_servers_lock); + vllist = rcu_dereference_protected( + cell->vl_servers, + lockdep_is_held(&cell->vl_servers_lock)); + if (vllist->nr_servers > 0) expire_at += afs_cell_gc_delay; + read_unlock(&cell->vl_servers_lock); if (purging || expire_at <= now) sched_cell = true; else if (expire_at < next_manage) @@ -744,17 +907,15 @@ } if (!purging) { - if (cell->dns_expiry <= now) + if (test_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) sched_cell = true; - else if (cell->dns_expiry <= next_manage) - next_manage = cell->dns_expiry; } if (sched_cell) - queue_work(afs_wq, &cell->manager); + afs_queue_cell(cell, afs_cell_trace_get_queue_manage); } - read_sequnlock_excl(&net->cells_lock); + up_read(&net->cells_lock); /* Update the timer on the way out. We have to pass an increment on * cells_outstanding in the namespace that we are in to the timer or @@ -784,11 +945,11 @@ _enter(""); - write_seqlock(&net->cells_lock); - ws = rcu_access_pointer(net->ws_cell); - RCU_INIT_POINTER(net->ws_cell, NULL); - write_sequnlock(&net->cells_lock); - afs_put_cell(net, ws); + down_write(&net->cells_lock); + ws = net->ws_cell; + net->ws_cell = NULL; + up_write(&net->cells_lock); + afs_unuse_cell(net, ws, afs_cell_trace_unuse_ws); _debug("del timer"); if (del_timer_sync(&net->cells_timer)) -- Gitblit v1.6.2