| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
|---|
| 1 | 2 | /* Client connection-specific management code. |
|---|
| 2 | 3 | * |
|---|
| 3 | | - * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. |
|---|
| 4 | + * Copyright (C) 2016, 2020 Red Hat, Inc. All Rights Reserved. |
|---|
| 4 | 5 | * Written by David Howells (dhowells@redhat.com) |
|---|
| 5 | | - * |
|---|
| 6 | | - * This program is free software; you can redistribute it and/or |
|---|
| 7 | | - * modify it under the terms of the GNU General Public Licence |
|---|
| 8 | | - * as published by the Free Software Foundation; either version |
|---|
| 9 | | - * 2 of the Licence, or (at your option) any later version. |
|---|
| 10 | | - * |
|---|
| 11 | 6 | * |
|---|
| 12 | 7 | * Client connections need to be cached for a little while after they've made a |
|---|
| 13 | 8 | * call so as to handle retransmitted DATA packets in case the server didn't |
|---|
| 14 | 9 | * receive the final ACK or terminating ABORT we sent it. |
|---|
| 15 | 10 | * |
|---|
| 16 | | - * Client connections can be in one of a number of cache states: |
|---|
| 17 | | - * |
|---|
| 18 | | - * (1) INACTIVE - The connection is not held in any list and may not have been |
|---|
| 19 | | - * exposed to the world. If it has been previously exposed, it was |
|---|
| 20 | | - * discarded from the idle list after expiring. |
|---|
| 21 | | - * |
|---|
| 22 | | - * (2) WAITING - The connection is waiting for the number of client conns to |
|---|
| 23 | | - * drop below the maximum capacity. Calls may be in progress upon it from |
|---|
| 24 | | - * when it was active and got culled. |
|---|
| 25 | | - * |
|---|
| 26 | | - * The connection is on the rxrpc_waiting_client_conns list which is kept |
|---|
| 27 | | - * in to-be-granted order. Culled conns with waiters go to the back of |
|---|
| 28 | | - * the queue just like new conns. |
|---|
| 29 | | - * |
|---|
| 30 | | - * (3) ACTIVE - The connection has at least one call in progress upon it, it |
|---|
| 31 | | - * may freely grant available channels to new calls and calls may be |
|---|
| 32 | | - * waiting on it for channels to become available. |
|---|
| 33 | | - * |
|---|
| 34 | | - * The connection is on the rxnet->active_client_conns list which is kept |
|---|
| 35 | | - * in activation order for culling purposes. |
|---|
| 36 | | - * |
|---|
| 37 | | - * rxrpc_nr_active_client_conns is held incremented also. |
|---|
| 38 | | - * |
|---|
| 39 | | - * (4) UPGRADE - As for ACTIVE, but only one call may be in progress and is |
|---|
| 40 | | - * being used to probe for service upgrade. |
|---|
| 41 | | - * |
|---|
| 42 | | - * (5) CULLED - The connection got summarily culled to try and free up |
|---|
| 43 | | - * capacity. Calls currently in progress on the connection are allowed to |
|---|
| 44 | | - * continue, but new calls will have to wait. There can be no waiters in |
|---|
| 45 | | - * this state - the conn would have to go to the WAITING state instead. |
|---|
| 46 | | - * |
|---|
| 47 | | - * (6) IDLE - The connection has no calls in progress upon it and must have |
|---|
| 48 | | - * been exposed to the world (ie. the EXPOSED flag must be set). When it |
|---|
| 49 | | - * expires, the EXPOSED flag is cleared and the connection transitions to |
|---|
| 50 | | - * the INACTIVE state. |
|---|
| 51 | | - * |
|---|
| 52 | | - * The connection is on the rxnet->idle_client_conns list which is kept in |
|---|
| 53 | | - * order of how soon they'll expire. |
|---|
| 54 | | - * |
|---|
| 55 | 11 | * There are flags of relevance to the cache: |
|---|
| 56 | | - * |
|---|
| 57 | | - * (1) EXPOSED - The connection ID got exposed to the world. If this flag is |
|---|
| 58 | | - * set, an extra ref is added to the connection preventing it from being |
|---|
| 59 | | - * reaped when it has no calls outstanding. This flag is cleared and the |
|---|
| 60 | | - * ref dropped when a conn is discarded from the idle list. |
|---|
| 61 | | - * |
|---|
| 62 | | - * This allows us to move terminal call state retransmission to the |
|---|
| 63 | | - * connection and to discard the call immediately we think it is done |
|---|
| 64 | | - * with. It also give us a chance to reuse the connection. |
|---|
| 65 | 12 | * |
|---|
| 66 | 13 | * (2) DONT_REUSE - The connection should be discarded as soon as possible and |
|---|
| 67 | 14 | * should not be reused. This is set when an exclusive connection is used |
|---|
| .. | .. |
|---|
| 83 | 30 | |
|---|
| 84 | 31 | #include "ar-internal.h" |
|---|
| 85 | 32 | |
|---|
| 86 | | -__read_mostly unsigned int rxrpc_max_client_connections = 1000; |
|---|
| 87 | 33 | __read_mostly unsigned int rxrpc_reap_client_connections = 900; |
|---|
| 88 | 34 | __read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ; |
|---|
| 89 | 35 | __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ; |
|---|
| .. | .. |
|---|
| 94 | 40 | DEFINE_IDR(rxrpc_client_conn_ids); |
|---|
| 95 | 41 | static DEFINE_SPINLOCK(rxrpc_conn_id_lock); |
|---|
| 96 | 42 | |
|---|
| 97 | | -static void rxrpc_cull_active_client_conns(struct rxrpc_net *); |
|---|
| 43 | +static void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle); |
|---|
| 98 | 44 | |
|---|
| 99 | 45 | /* |
|---|
| 100 | 46 | * Get a connection ID and epoch for a client connection from the global pool. |
|---|
| .. | .. |
|---|
| 158 | 104 | if (!idr_is_empty(&rxrpc_client_conn_ids)) { |
|---|
| 159 | 105 | idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) { |
|---|
| 160 | 106 | pr_err("AF_RXRPC: Leaked client conn %p {%d}\n", |
|---|
| 161 | | - conn, atomic_read(&conn->usage)); |
|---|
| 107 | + conn, refcount_read(&conn->ref)); |
|---|
| 162 | 108 | } |
|---|
| 163 | 109 | BUG(); |
|---|
| 164 | 110 | } |
|---|
| .. | .. |
|---|
| 167 | 113 | } |
|---|
| 168 | 114 | |
|---|
| 169 | 115 | /* |
|---|
| 116 | + * Allocate a connection bundle. |
|---|
| 117 | + */ |
|---|
| 118 | +static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_conn_parameters *cp, |
|---|
| 119 | + gfp_t gfp) |
|---|
| 120 | +{ |
|---|
| 121 | + struct rxrpc_bundle *bundle; |
|---|
| 122 | + |
|---|
| 123 | + bundle = kzalloc(sizeof(*bundle), gfp); |
|---|
| 124 | + if (bundle) { |
|---|
| 125 | + bundle->params = *cp; |
|---|
| 126 | + rxrpc_get_peer(bundle->params.peer); |
|---|
| 127 | + refcount_set(&bundle->ref, 1); |
|---|
| 128 | + atomic_set(&bundle->active, 1); |
|---|
| 129 | + spin_lock_init(&bundle->channel_lock); |
|---|
| 130 | + INIT_LIST_HEAD(&bundle->waiting_calls); |
|---|
| 131 | + } |
|---|
| 132 | + return bundle; |
|---|
| 133 | +} |
|---|
| 134 | + |
|---|
| 135 | +struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle) |
|---|
| 136 | +{ |
|---|
| 137 | + refcount_inc(&bundle->ref); |
|---|
| 138 | + return bundle; |
|---|
| 139 | +} |
|---|
| 140 | + |
|---|
| 141 | +static void rxrpc_free_bundle(struct rxrpc_bundle *bundle) |
|---|
| 142 | +{ |
|---|
| 143 | + rxrpc_put_peer(bundle->params.peer); |
|---|
| 144 | + kfree(bundle); |
|---|
| 145 | +} |
|---|
| 146 | + |
|---|
| 147 | +void rxrpc_put_bundle(struct rxrpc_bundle *bundle) |
|---|
| 148 | +{ |
|---|
| 149 | + unsigned int d = bundle->debug_id; |
|---|
| 150 | + bool dead; |
|---|
| 151 | + int r; |
|---|
| 152 | + |
|---|
| 153 | + dead = __refcount_dec_and_test(&bundle->ref, &r); |
|---|
| 154 | + |
|---|
| 155 | + _debug("PUT B=%x %d", d, r - 1); |
|---|
| 156 | + if (dead) |
|---|
| 157 | + rxrpc_free_bundle(bundle); |
|---|
| 158 | +} |
|---|
| 159 | + |
|---|
| 160 | +/* |
|---|
| 170 | 161 | * Allocate a client connection. |
|---|
| 171 | 162 | */ |
|---|
| 172 | 163 | static struct rxrpc_connection * |
|---|
| 173 | | -rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp) |
|---|
| 164 | +rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle, gfp_t gfp) |
|---|
| 174 | 165 | { |
|---|
| 175 | 166 | struct rxrpc_connection *conn; |
|---|
| 176 | | - struct rxrpc_net *rxnet = cp->local->rxnet; |
|---|
| 167 | + struct rxrpc_net *rxnet = bundle->params.local->rxnet; |
|---|
| 177 | 168 | int ret; |
|---|
| 178 | 169 | |
|---|
| 179 | 170 | _enter(""); |
|---|
| .. | .. |
|---|
| 184 | 175 | return ERR_PTR(-ENOMEM); |
|---|
| 185 | 176 | } |
|---|
| 186 | 177 | |
|---|
| 187 | | - atomic_set(&conn->usage, 1); |
|---|
| 188 | | - if (cp->exclusive) |
|---|
| 189 | | - __set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); |
|---|
| 190 | | - if (cp->upgrade) |
|---|
| 191 | | - __set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags); |
|---|
| 192 | | - |
|---|
| 193 | | - conn->params = *cp; |
|---|
| 178 | + refcount_set(&conn->ref, 1); |
|---|
| 179 | + conn->bundle = bundle; |
|---|
| 180 | + conn->params = bundle->params; |
|---|
| 194 | 181 | conn->out_clientflag = RXRPC_CLIENT_INITIATED; |
|---|
| 195 | 182 | conn->state = RXRPC_CONN_CLIENT; |
|---|
| 196 | | - conn->service_id = cp->service_id; |
|---|
| 183 | + conn->service_id = conn->params.service_id; |
|---|
| 197 | 184 | |
|---|
| 198 | 185 | ret = rxrpc_get_client_connection_id(conn, gfp); |
|---|
| 199 | 186 | if (ret < 0) |
|---|
| .. | .. |
|---|
| 212 | 199 | list_add_tail(&conn->proc_link, &rxnet->conn_proc_list); |
|---|
| 213 | 200 | write_unlock(&rxnet->conn_lock); |
|---|
| 214 | 201 | |
|---|
| 215 | | - /* We steal the caller's peer ref. */ |
|---|
| 216 | | - cp->peer = NULL; |
|---|
| 202 | + rxrpc_get_bundle(bundle); |
|---|
| 203 | + rxrpc_get_peer(conn->params.peer); |
|---|
| 217 | 204 | rxrpc_get_local(conn->params.local); |
|---|
| 218 | 205 | key_get(conn->params.key); |
|---|
| 219 | 206 | |
|---|
| 220 | 207 | trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_client, |
|---|
| 221 | | - atomic_read(&conn->usage), |
|---|
| 208 | + refcount_read(&conn->ref), |
|---|
| 222 | 209 | __builtin_return_address(0)); |
|---|
| 210 | + |
|---|
| 211 | + atomic_inc(&rxnet->nr_client_conns); |
|---|
| 223 | 212 | trace_rxrpc_client(conn, -1, rxrpc_client_alloc); |
|---|
| 224 | 213 | _leave(" = %p", conn); |
|---|
| 225 | 214 | return conn; |
|---|
| .. | .. |
|---|
| 239 | 228 | */ |
|---|
| 240 | 229 | static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn) |
|---|
| 241 | 230 | { |
|---|
| 242 | | - struct rxrpc_net *rxnet = conn->params.local->rxnet; |
|---|
| 231 | + struct rxrpc_net *rxnet; |
|---|
| 243 | 232 | int id_cursor, id, distance, limit; |
|---|
| 244 | 233 | |
|---|
| 234 | + if (!conn) |
|---|
| 235 | + goto dont_reuse; |
|---|
| 236 | + |
|---|
| 237 | + rxnet = conn->params.local->rxnet; |
|---|
| 245 | 238 | if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags)) |
|---|
| 246 | 239 | goto dont_reuse; |
|---|
| 247 | 240 | |
|---|
| 248 | | - if (conn->proto.epoch != rxnet->epoch) |
|---|
| 241 | + if (conn->state != RXRPC_CONN_CLIENT || |
|---|
| 242 | + conn->proto.epoch != rxnet->epoch) |
|---|
| 249 | 243 | goto mark_dont_reuse; |
|---|
| 250 | 244 | |
|---|
| 251 | 245 | /* The IDR tree gets very expensive on memory if the connection IDs are |
|---|
| .. | .. |
|---|
| 259 | 253 | distance = id - id_cursor; |
|---|
| 260 | 254 | if (distance < 0) |
|---|
| 261 | 255 | distance = -distance; |
|---|
| 262 | | - limit = max(rxrpc_max_client_connections * 4, 1024U); |
|---|
| 256 | + limit = max_t(unsigned long, atomic_read(&rxnet->nr_conns) * 4, 1024); |
|---|
| 263 | 257 | if (distance > limit) |
|---|
| 264 | 258 | goto mark_dont_reuse; |
|---|
| 265 | 259 | |
|---|
| .. | .. |
|---|
| 272 | 266 | } |
|---|
| 273 | 267 | |
|---|
| 274 | 268 | /* |
|---|
| 275 | | - * Create or find a client connection to use for a call. |
|---|
| 269 | + * Look up the conn bundle that matches the connection parameters, adding it if |
|---|
| 270 | + * it doesn't yet exist. |
|---|
| 271 | + */ |
|---|
| 272 | +static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *cp, |
|---|
| 273 | + gfp_t gfp) |
|---|
| 274 | +{ |
|---|
| 275 | + static atomic_t rxrpc_bundle_id; |
|---|
| 276 | + struct rxrpc_bundle *bundle, *candidate; |
|---|
| 277 | + struct rxrpc_local *local = cp->local; |
|---|
| 278 | + struct rb_node *p, **pp, *parent; |
|---|
| 279 | + long diff; |
|---|
| 280 | + |
|---|
| 281 | + _enter("{%px,%x,%u,%u}", |
|---|
| 282 | + cp->peer, key_serial(cp->key), cp->security_level, cp->upgrade); |
|---|
| 283 | + |
|---|
| 284 | + if (cp->exclusive) |
|---|
| 285 | + return rxrpc_alloc_bundle(cp, gfp); |
|---|
| 286 | + |
|---|
| 287 | + /* First, see if the bundle is already there. */ |
|---|
| 288 | + _debug("search 1"); |
|---|
| 289 | + spin_lock(&local->client_bundles_lock); |
|---|
| 290 | + p = local->client_bundles.rb_node; |
|---|
| 291 | + while (p) { |
|---|
| 292 | + bundle = rb_entry(p, struct rxrpc_bundle, local_node); |
|---|
| 293 | + |
|---|
| 294 | +#define cmp(X) ((long)bundle->params.X - (long)cp->X) |
|---|
| 295 | + diff = (cmp(peer) ?: |
|---|
| 296 | + cmp(key) ?: |
|---|
| 297 | + cmp(security_level) ?: |
|---|
| 298 | + cmp(upgrade)); |
|---|
| 299 | +#undef cmp |
|---|
| 300 | + if (diff < 0) |
|---|
| 301 | + p = p->rb_left; |
|---|
| 302 | + else if (diff > 0) |
|---|
| 303 | + p = p->rb_right; |
|---|
| 304 | + else |
|---|
| 305 | + goto found_bundle; |
|---|
| 306 | + } |
|---|
| 307 | + spin_unlock(&local->client_bundles_lock); |
|---|
| 308 | + _debug("not found"); |
|---|
| 309 | + |
|---|
| 310 | + /* It wasn't. We need to add one. */ |
|---|
| 311 | + candidate = rxrpc_alloc_bundle(cp, gfp); |
|---|
| 312 | + if (!candidate) |
|---|
| 313 | + return NULL; |
|---|
| 314 | + |
|---|
| 315 | + _debug("search 2"); |
|---|
| 316 | + spin_lock(&local->client_bundles_lock); |
|---|
| 317 | + pp = &local->client_bundles.rb_node; |
|---|
| 318 | + parent = NULL; |
|---|
| 319 | + while (*pp) { |
|---|
| 320 | + parent = *pp; |
|---|
| 321 | + bundle = rb_entry(parent, struct rxrpc_bundle, local_node); |
|---|
| 322 | + |
|---|
| 323 | +#define cmp(X) ((long)bundle->params.X - (long)cp->X) |
|---|
| 324 | + diff = (cmp(peer) ?: |
|---|
| 325 | + cmp(key) ?: |
|---|
| 326 | + cmp(security_level) ?: |
|---|
| 327 | + cmp(upgrade)); |
|---|
| 328 | +#undef cmp |
|---|
| 329 | + if (diff < 0) |
|---|
| 330 | + pp = &(*pp)->rb_left; |
|---|
| 331 | + else if (diff > 0) |
|---|
| 332 | + pp = &(*pp)->rb_right; |
|---|
| 333 | + else |
|---|
| 334 | + goto found_bundle_free; |
|---|
| 335 | + } |
|---|
| 336 | + |
|---|
| 337 | + _debug("new bundle"); |
|---|
| 338 | + candidate->debug_id = atomic_inc_return(&rxrpc_bundle_id); |
|---|
| 339 | + rb_link_node(&candidate->local_node, parent, pp); |
|---|
| 340 | + rb_insert_color(&candidate->local_node, &local->client_bundles); |
|---|
| 341 | + rxrpc_get_bundle(candidate); |
|---|
| 342 | + spin_unlock(&local->client_bundles_lock); |
|---|
| 343 | + _leave(" = %u [new]", candidate->debug_id); |
|---|
| 344 | + return candidate; |
|---|
| 345 | + |
|---|
| 346 | +found_bundle_free: |
|---|
| 347 | + rxrpc_free_bundle(candidate); |
|---|
| 348 | +found_bundle: |
|---|
| 349 | + rxrpc_get_bundle(bundle); |
|---|
| 350 | + atomic_inc(&bundle->active); |
|---|
| 351 | + spin_unlock(&local->client_bundles_lock); |
|---|
| 352 | + _leave(" = %u [found]", bundle->debug_id); |
|---|
| 353 | + return bundle; |
|---|
| 354 | +} |
|---|
| 355 | + |
|---|
| 356 | +/* |
|---|
| 357 | + * Create or find a client bundle to use for a call. |
|---|
| 276 | 358 | * |
|---|
| 277 | 359 | * If we return with a connection, the call will be on its waiting list. It's |
|---|
| 278 | 360 | * left to the caller to assign a channel and wake up the call. |
|---|
| 279 | 361 | */ |
|---|
| 280 | | -static int rxrpc_get_client_conn(struct rxrpc_sock *rx, |
|---|
| 281 | | - struct rxrpc_call *call, |
|---|
| 282 | | - struct rxrpc_conn_parameters *cp, |
|---|
| 283 | | - struct sockaddr_rxrpc *srx, |
|---|
| 284 | | - gfp_t gfp) |
|---|
| 362 | +static struct rxrpc_bundle *rxrpc_prep_call(struct rxrpc_sock *rx, |
|---|
| 363 | + struct rxrpc_call *call, |
|---|
| 364 | + struct rxrpc_conn_parameters *cp, |
|---|
| 365 | + struct sockaddr_rxrpc *srx, |
|---|
| 366 | + gfp_t gfp) |
|---|
| 285 | 367 | { |
|---|
| 286 | | - struct rxrpc_connection *conn, *candidate = NULL; |
|---|
| 287 | | - struct rxrpc_local *local = cp->local; |
|---|
| 288 | | - struct rb_node *p, **pp, *parent; |
|---|
| 289 | | - long diff; |
|---|
| 290 | | - int ret = -ENOMEM; |
|---|
| 368 | + struct rxrpc_bundle *bundle; |
|---|
| 291 | 369 | |
|---|
| 292 | 370 | _enter("{%d,%lx},", call->debug_id, call->user_call_ID); |
|---|
| 293 | 371 | |
|---|
| .. | .. |
|---|
| 300 | 378 | call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE; |
|---|
| 301 | 379 | else |
|---|
| 302 | 380 | call->cong_mode = RXRPC_CALL_SLOW_START; |
|---|
| 381 | + if (cp->upgrade) |
|---|
| 382 | + __set_bit(RXRPC_CALL_UPGRADE, &call->flags); |
|---|
| 303 | 383 | |
|---|
| 304 | | - /* If the connection is not meant to be exclusive, search the available |
|---|
| 305 | | - * connections to see if the connection we want to use already exists. |
|---|
| 384 | + /* Find the client connection bundle. */ |
|---|
| 385 | + bundle = rxrpc_look_up_bundle(cp, gfp); |
|---|
| 386 | + if (!bundle) |
|---|
| 387 | + goto error; |
|---|
| 388 | + |
|---|
| 389 | + /* Get this call queued. Someone else may activate it whilst we're |
|---|
| 390 | + * lining up a new connection, but that's fine. |
|---|
| 306 | 391 | */ |
|---|
| 307 | | - if (!cp->exclusive) { |
|---|
| 308 | | - _debug("search 1"); |
|---|
| 309 | | - spin_lock(&local->client_conns_lock); |
|---|
| 310 | | - p = local->client_conns.rb_node; |
|---|
| 311 | | - while (p) { |
|---|
| 312 | | - conn = rb_entry(p, struct rxrpc_connection, client_node); |
|---|
| 392 | + spin_lock(&bundle->channel_lock); |
|---|
| 393 | + list_add_tail(&call->chan_wait_link, &bundle->waiting_calls); |
|---|
| 394 | + spin_unlock(&bundle->channel_lock); |
|---|
| 313 | 395 | |
|---|
| 314 | | -#define cmp(X) ((long)conn->params.X - (long)cp->X) |
|---|
| 315 | | - diff = (cmp(peer) ?: |
|---|
| 316 | | - cmp(key) ?: |
|---|
| 317 | | - cmp(security_level) ?: |
|---|
| 318 | | - cmp(upgrade)); |
|---|
| 319 | | -#undef cmp |
|---|
| 320 | | - if (diff < 0) { |
|---|
| 321 | | - p = p->rb_left; |
|---|
| 322 | | - } else if (diff > 0) { |
|---|
| 323 | | - p = p->rb_right; |
|---|
| 324 | | - } else { |
|---|
| 325 | | - if (rxrpc_may_reuse_conn(conn) && |
|---|
| 326 | | - rxrpc_get_connection_maybe(conn)) |
|---|
| 327 | | - goto found_extant_conn; |
|---|
| 328 | | - /* The connection needs replacing. It's better |
|---|
| 329 | | - * to effect that when we have something to |
|---|
| 330 | | - * replace it with so that we don't have to |
|---|
| 331 | | - * rebalance the tree twice. |
|---|
| 332 | | - */ |
|---|
| 333 | | - break; |
|---|
| 334 | | - } |
|---|
| 335 | | - } |
|---|
| 336 | | - spin_unlock(&local->client_conns_lock); |
|---|
| 396 | + _leave(" = [B=%x]", bundle->debug_id); |
|---|
| 397 | + return bundle; |
|---|
| 398 | + |
|---|
| 399 | +error: |
|---|
| 400 | + _leave(" = -ENOMEM"); |
|---|
| 401 | + return ERR_PTR(-ENOMEM); |
|---|
| 402 | +} |
|---|
| 403 | + |
|---|
| 404 | +/* |
|---|
| 405 | + * Allocate a new connection and add it into a bundle. |
|---|
| 406 | + */ |
|---|
| 407 | +static void rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle, gfp_t gfp) |
|---|
| 408 | + __releases(bundle->channel_lock) |
|---|
| 409 | +{ |
|---|
| 410 | + struct rxrpc_connection *candidate = NULL, *old = NULL; |
|---|
| 411 | + bool conflict; |
|---|
| 412 | + int i; |
|---|
| 413 | + |
|---|
| 414 | + _enter(""); |
|---|
| 415 | + |
|---|
| 416 | + conflict = bundle->alloc_conn; |
|---|
| 417 | + if (!conflict) |
|---|
| 418 | + bundle->alloc_conn = true; |
|---|
| 419 | + spin_unlock(&bundle->channel_lock); |
|---|
| 420 | + if (conflict) { |
|---|
| 421 | + _leave(" [conf]"); |
|---|
| 422 | + return; |
|---|
| 337 | 423 | } |
|---|
| 338 | 424 | |
|---|
| 339 | | - /* There wasn't a connection yet or we need an exclusive connection. |
|---|
| 340 | | - * We need to create a candidate and then potentially redo the search |
|---|
| 341 | | - * in case we're racing with another thread also trying to connect on a |
|---|
| 342 | | - * shareable connection. |
|---|
| 343 | | - */ |
|---|
| 344 | | - _debug("new conn"); |
|---|
| 345 | | - candidate = rxrpc_alloc_client_connection(cp, gfp); |
|---|
| 425 | + candidate = rxrpc_alloc_client_connection(bundle, gfp); |
|---|
| 426 | + |
|---|
| 427 | + spin_lock(&bundle->channel_lock); |
|---|
| 428 | + bundle->alloc_conn = false; |
|---|
| 429 | + |
|---|
| 346 | 430 | if (IS_ERR(candidate)) { |
|---|
| 347 | | - ret = PTR_ERR(candidate); |
|---|
| 348 | | - goto error_peer; |
|---|
| 431 | + bundle->alloc_error = PTR_ERR(candidate); |
|---|
| 432 | + spin_unlock(&bundle->channel_lock); |
|---|
| 433 | + _leave(" [err %ld]", PTR_ERR(candidate)); |
|---|
| 434 | + return; |
|---|
| 349 | 435 | } |
|---|
| 350 | 436 | |
|---|
| 351 | | - /* Add the call to the new connection's waiting list in case we're |
|---|
| 352 | | - * going to have to wait for the connection to come live. It's our |
|---|
| 353 | | - * connection, so we want first dibs on the channel slots. We would |
|---|
| 354 | | - * normally have to take channel_lock but we do this before anyone else |
|---|
| 355 | | - * can see the connection. |
|---|
| 356 | | - */ |
|---|
| 357 | | - list_add(&call->chan_wait_link, &candidate->waiting_calls); |
|---|
| 437 | + bundle->alloc_error = 0; |
|---|
| 358 | 438 | |
|---|
| 359 | | - if (cp->exclusive) { |
|---|
| 360 | | - call->conn = candidate; |
|---|
| 361 | | - call->security_ix = candidate->security_ix; |
|---|
| 362 | | - call->service_id = candidate->service_id; |
|---|
| 363 | | - _leave(" = 0 [exclusive %d]", candidate->debug_id); |
|---|
| 364 | | - return 0; |
|---|
| 365 | | - } |
|---|
| 439 | + for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) { |
|---|
| 440 | + unsigned int shift = i * RXRPC_MAXCALLS; |
|---|
| 441 | + int j; |
|---|
| 366 | 442 | |
|---|
| 367 | | - /* Publish the new connection for userspace to find. We need to redo |
|---|
| 368 | | - * the search before doing this lest we race with someone else adding a |
|---|
| 369 | | - * conflicting instance. |
|---|
| 370 | | - */ |
|---|
| 371 | | - _debug("search 2"); |
|---|
| 372 | | - spin_lock(&local->client_conns_lock); |
|---|
| 373 | | - |
|---|
| 374 | | - pp = &local->client_conns.rb_node; |
|---|
| 375 | | - parent = NULL; |
|---|
| 376 | | - while (*pp) { |
|---|
| 377 | | - parent = *pp; |
|---|
| 378 | | - conn = rb_entry(parent, struct rxrpc_connection, client_node); |
|---|
| 379 | | - |
|---|
| 380 | | -#define cmp(X) ((long)conn->params.X - (long)candidate->params.X) |
|---|
| 381 | | - diff = (cmp(peer) ?: |
|---|
| 382 | | - cmp(key) ?: |
|---|
| 383 | | - cmp(security_level) ?: |
|---|
| 384 | | - cmp(upgrade)); |
|---|
| 385 | | -#undef cmp |
|---|
| 386 | | - if (diff < 0) { |
|---|
| 387 | | - pp = &(*pp)->rb_left; |
|---|
| 388 | | - } else if (diff > 0) { |
|---|
| 389 | | - pp = &(*pp)->rb_right; |
|---|
| 390 | | - } else { |
|---|
| 391 | | - if (rxrpc_may_reuse_conn(conn) && |
|---|
| 392 | | - rxrpc_get_connection_maybe(conn)) |
|---|
| 393 | | - goto found_extant_conn; |
|---|
| 394 | | - /* The old connection is from an outdated epoch. */ |
|---|
| 395 | | - _debug("replace conn"); |
|---|
| 396 | | - clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags); |
|---|
| 397 | | - rb_replace_node(&conn->client_node, |
|---|
| 398 | | - &candidate->client_node, |
|---|
| 399 | | - &local->client_conns); |
|---|
| 400 | | - trace_rxrpc_client(conn, -1, rxrpc_client_replace); |
|---|
| 401 | | - goto candidate_published; |
|---|
| 443 | + old = bundle->conns[i]; |
|---|
| 444 | + if (!rxrpc_may_reuse_conn(old)) { |
|---|
| 445 | + if (old) |
|---|
| 446 | + trace_rxrpc_client(old, -1, rxrpc_client_replace); |
|---|
| 447 | + candidate->bundle_shift = shift; |
|---|
| 448 | + atomic_inc(&bundle->active); |
|---|
| 449 | + bundle->conns[i] = candidate; |
|---|
| 450 | + for (j = 0; j < RXRPC_MAXCALLS; j++) |
|---|
| 451 | + set_bit(shift + j, &bundle->avail_chans); |
|---|
| 452 | + candidate = NULL; |
|---|
| 453 | + break; |
|---|
| 402 | 454 | } |
|---|
| 455 | + |
|---|
| 456 | + old = NULL; |
|---|
| 403 | 457 | } |
|---|
| 404 | 458 | |
|---|
| 405 | | - _debug("new conn"); |
|---|
| 406 | | - rb_link_node(&candidate->client_node, parent, pp); |
|---|
| 407 | | - rb_insert_color(&candidate->client_node, &local->client_conns); |
|---|
| 408 | | - |
|---|
| 409 | | -candidate_published: |
|---|
| 410 | | - set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags); |
|---|
| 411 | | - call->conn = candidate; |
|---|
| 412 | | - call->security_ix = candidate->security_ix; |
|---|
| 413 | | - call->service_id = candidate->service_id; |
|---|
| 414 | | - spin_unlock(&local->client_conns_lock); |
|---|
| 415 | | - _leave(" = 0 [new %d]", candidate->debug_id); |
|---|
| 416 | | - return 0; |
|---|
| 417 | | - |
|---|
| 418 | | - /* We come here if we found a suitable connection already in existence. |
|---|
| 419 | | - * Discard any candidate we may have allocated, and try to get a |
|---|
| 420 | | - * channel on this one. |
|---|
| 421 | | - */ |
|---|
| 422 | | -found_extant_conn: |
|---|
| 423 | | - _debug("found conn"); |
|---|
| 424 | | - spin_unlock(&local->client_conns_lock); |
|---|
| 459 | + spin_unlock(&bundle->channel_lock); |
|---|
| 425 | 460 | |
|---|
| 426 | 461 | if (candidate) { |
|---|
| 462 | + _debug("discard C=%x", candidate->debug_id); |
|---|
| 427 | 463 | trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate); |
|---|
| 428 | 464 | rxrpc_put_connection(candidate); |
|---|
| 429 | | - candidate = NULL; |
|---|
| 430 | 465 | } |
|---|
| 431 | 466 | |
|---|
| 432 | | - spin_lock(&conn->channel_lock); |
|---|
| 433 | | - call->conn = conn; |
|---|
| 434 | | - call->security_ix = conn->security_ix; |
|---|
| 435 | | - call->service_id = conn->service_id; |
|---|
| 436 | | - list_add_tail(&call->chan_wait_link, &conn->waiting_calls); |
|---|
| 437 | | - spin_unlock(&conn->channel_lock); |
|---|
| 438 | | - _leave(" = 0 [extant %d]", conn->debug_id); |
|---|
| 439 | | - return 0; |
|---|
| 440 | | - |
|---|
| 441 | | -error_peer: |
|---|
| 442 | | - rxrpc_put_peer(cp->peer); |
|---|
| 443 | | - cp->peer = NULL; |
|---|
| 444 | | -error: |
|---|
| 445 | | - _leave(" = %d", ret); |
|---|
| 446 | | - return ret; |
|---|
| 467 | + rxrpc_put_connection(old); |
|---|
| 468 | + _leave(""); |
|---|
| 447 | 469 | } |
|---|
| 448 | 470 | |
|---|
| 449 | 471 | /* |
|---|
| 450 | | - * Activate a connection. |
|---|
| 472 | + * Add a connection to a bundle if there are no usable connections or we have |
|---|
| 473 | + * connections waiting for extra capacity. |
|---|
| 451 | 474 | */ |
|---|
| 452 | | -static void rxrpc_activate_conn(struct rxrpc_net *rxnet, |
|---|
| 453 | | - struct rxrpc_connection *conn) |
|---|
| 475 | +static void rxrpc_maybe_add_conn(struct rxrpc_bundle *bundle, gfp_t gfp) |
|---|
| 454 | 476 | { |
|---|
| 455 | | - if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) { |
|---|
| 456 | | - trace_rxrpc_client(conn, -1, rxrpc_client_to_upgrade); |
|---|
| 457 | | - conn->cache_state = RXRPC_CONN_CLIENT_UPGRADE; |
|---|
| 458 | | - } else { |
|---|
| 459 | | - trace_rxrpc_client(conn, -1, rxrpc_client_to_active); |
|---|
| 460 | | - conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE; |
|---|
| 461 | | - } |
|---|
| 462 | | - rxnet->nr_active_client_conns++; |
|---|
| 463 | | - list_move_tail(&conn->cache_link, &rxnet->active_client_conns); |
|---|
| 464 | | -} |
|---|
| 477 | + struct rxrpc_call *call; |
|---|
| 478 | + int i, usable; |
|---|
| 465 | 479 | |
|---|
| 466 | | -/* |
|---|
| 467 | | - * Attempt to animate a connection for a new call. |
|---|
| 468 | | - * |
|---|
| 469 | | - * If it's not exclusive, the connection is in the endpoint tree, and we're in |
|---|
| 470 | | - * the conn's list of those waiting to grab a channel. There is, however, a |
|---|
| 471 | | - * limit on the number of live connections allowed at any one time, so we may |
|---|
| 472 | | - * have to wait for capacity to become available. |
|---|
| 473 | | - * |
|---|
| 474 | | - * Note that a connection on the waiting queue might *also* have active |
|---|
| 475 | | - * channels if it has been culled to make space and then re-requested by a new |
|---|
| 476 | | - * call. |
|---|
| 477 | | - */ |
|---|
| 478 | | -static void rxrpc_animate_client_conn(struct rxrpc_net *rxnet, |
|---|
| 479 | | - struct rxrpc_connection *conn) |
|---|
| 480 | | -{ |
|---|
| 481 | | - unsigned int nr_conns; |
|---|
| 480 | + _enter(""); |
|---|
| 482 | 481 | |
|---|
| 483 | | - _enter("%d,%d", conn->debug_id, conn->cache_state); |
|---|
| 482 | + spin_lock(&bundle->channel_lock); |
|---|
| 484 | 483 | |
|---|
| 485 | | - if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE || |
|---|
| 486 | | - conn->cache_state == RXRPC_CONN_CLIENT_UPGRADE) |
|---|
| 487 | | - goto out; |
|---|
| 484 | + /* See if there are any usable connections. */ |
|---|
| 485 | + usable = 0; |
|---|
| 486 | + for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) |
|---|
| 487 | + if (rxrpc_may_reuse_conn(bundle->conns[i])) |
|---|
| 488 | + usable++; |
|---|
| 488 | 489 | |
|---|
| 489 | | - spin_lock(&rxnet->client_conn_cache_lock); |
|---|
| 490 | | - |
|---|
| 491 | | - nr_conns = rxnet->nr_client_conns; |
|---|
| 492 | | - if (!test_and_set_bit(RXRPC_CONN_COUNTED, &conn->flags)) { |
|---|
| 493 | | - trace_rxrpc_client(conn, -1, rxrpc_client_count); |
|---|
| 494 | | - rxnet->nr_client_conns = nr_conns + 1; |
|---|
| 490 | + if (!usable && !list_empty(&bundle->waiting_calls)) { |
|---|
| 491 | + call = list_first_entry(&bundle->waiting_calls, |
|---|
| 492 | + struct rxrpc_call, chan_wait_link); |
|---|
| 493 | + if (test_bit(RXRPC_CALL_UPGRADE, &call->flags)) |
|---|
| 494 | + bundle->try_upgrade = true; |
|---|
| 495 | 495 | } |
|---|
| 496 | 496 | |
|---|
| 497 | | - switch (conn->cache_state) { |
|---|
| 498 | | - case RXRPC_CONN_CLIENT_ACTIVE: |
|---|
| 499 | | - case RXRPC_CONN_CLIENT_UPGRADE: |
|---|
| 500 | | - case RXRPC_CONN_CLIENT_WAITING: |
|---|
| 501 | | - break; |
|---|
| 497 | + if (!usable) |
|---|
| 498 | + goto alloc_conn; |
|---|
| 502 | 499 | |
|---|
| 503 | | - case RXRPC_CONN_CLIENT_INACTIVE: |
|---|
| 504 | | - case RXRPC_CONN_CLIENT_CULLED: |
|---|
| 505 | | - case RXRPC_CONN_CLIENT_IDLE: |
|---|
| 506 | | - if (nr_conns >= rxrpc_max_client_connections) |
|---|
| 507 | | - goto wait_for_capacity; |
|---|
| 508 | | - goto activate_conn; |
|---|
| 500 | + if (!bundle->avail_chans && |
|---|
| 501 | + !bundle->try_upgrade && |
|---|
| 502 | + !list_empty(&bundle->waiting_calls) && |
|---|
| 503 | + usable < ARRAY_SIZE(bundle->conns)) |
|---|
| 504 | + goto alloc_conn; |
|---|
| 509 | 505 | |
|---|
| 510 | | - default: |
|---|
| 511 | | - BUG(); |
|---|
| 512 | | - } |
|---|
| 513 | | - |
|---|
| 514 | | -out_unlock: |
|---|
| 515 | | - spin_unlock(&rxnet->client_conn_cache_lock); |
|---|
| 516 | | -out: |
|---|
| 517 | | - _leave(" [%d]", conn->cache_state); |
|---|
| 506 | + spin_unlock(&bundle->channel_lock); |
|---|
| 507 | + _leave(""); |
|---|
| 518 | 508 | return; |
|---|
| 519 | 509 | |
|---|
| 520 | | -activate_conn: |
|---|
| 521 | | - _debug("activate"); |
|---|
| 522 | | - rxrpc_activate_conn(rxnet, conn); |
|---|
| 523 | | - goto out_unlock; |
|---|
| 524 | | - |
|---|
| 525 | | -wait_for_capacity: |
|---|
| 526 | | - _debug("wait"); |
|---|
| 527 | | - trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting); |
|---|
| 528 | | - conn->cache_state = RXRPC_CONN_CLIENT_WAITING; |
|---|
| 529 | | - list_move_tail(&conn->cache_link, &rxnet->waiting_client_conns); |
|---|
| 530 | | - goto out_unlock; |
|---|
| 531 | | -} |
|---|
| 532 | | - |
|---|
| 533 | | -/* |
|---|
| 534 | | - * Deactivate a channel. |
|---|
| 535 | | - */ |
|---|
| 536 | | -static void rxrpc_deactivate_one_channel(struct rxrpc_connection *conn, |
|---|
| 537 | | - unsigned int channel) |
|---|
| 538 | | -{ |
|---|
| 539 | | - struct rxrpc_channel *chan = &conn->channels[channel]; |
|---|
| 540 | | - |
|---|
| 541 | | - rcu_assign_pointer(chan->call, NULL); |
|---|
| 542 | | - conn->active_chans &= ~(1 << channel); |
|---|
| 510 | +alloc_conn: |
|---|
| 511 | + return rxrpc_add_conn_to_bundle(bundle, gfp); |
|---|
| 543 | 512 | } |
|---|
| 544 | 513 | |
|---|
| 545 | 514 | /* |
|---|
| .. | .. |
|---|
| 551 | 520 | unsigned int channel) |
|---|
| 552 | 521 | { |
|---|
| 553 | 522 | struct rxrpc_channel *chan = &conn->channels[channel]; |
|---|
| 554 | | - struct rxrpc_call *call = list_entry(conn->waiting_calls.next, |
|---|
| 523 | + struct rxrpc_bundle *bundle = conn->bundle; |
|---|
| 524 | + struct rxrpc_call *call = list_entry(bundle->waiting_calls.next, |
|---|
| 555 | 525 | struct rxrpc_call, chan_wait_link); |
|---|
| 556 | 526 | u32 call_id = chan->call_counter + 1; |
|---|
| 527 | + |
|---|
| 528 | + _enter("C=%x,%u", conn->debug_id, channel); |
|---|
| 557 | 529 | |
|---|
| 558 | 530 | trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate); |
|---|
| 559 | 531 | |
|---|
| .. | .. |
|---|
| 561 | 533 | * as the DATA packet will implicitly ACK it. |
|---|
| 562 | 534 | */ |
|---|
| 563 | 535 | clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); |
|---|
| 564 | | - |
|---|
| 565 | | - write_lock_bh(&call->state_lock); |
|---|
| 566 | | - if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags)) |
|---|
| 567 | | - call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; |
|---|
| 568 | | - else |
|---|
| 569 | | - call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; |
|---|
| 570 | | - write_unlock_bh(&call->state_lock); |
|---|
| 536 | + clear_bit(conn->bundle_shift + channel, &bundle->avail_chans); |
|---|
| 571 | 537 | |
|---|
| 572 | 538 | rxrpc_see_call(call); |
|---|
| 573 | 539 | list_del_init(&call->chan_wait_link); |
|---|
| 574 | | - conn->active_chans |= 1 << channel; |
|---|
| 575 | 540 | call->peer = rxrpc_get_peer(conn->params.peer); |
|---|
| 541 | + call->conn = rxrpc_get_connection(conn); |
|---|
| 576 | 542 | call->cid = conn->proto.cid | channel; |
|---|
| 577 | 543 | call->call_id = call_id; |
|---|
| 544 | + call->security = conn->security; |
|---|
| 545 | + call->security_ix = conn->security_ix; |
|---|
| 546 | + call->service_id = conn->service_id; |
|---|
| 578 | 547 | |
|---|
| 579 | 548 | trace_rxrpc_connect_call(call); |
|---|
| 580 | 549 | _net("CONNECT call %08x:%08x as call %d on conn %d", |
|---|
| 581 | 550 | call->cid, call->call_id, call->debug_id, conn->debug_id); |
|---|
| 582 | 551 | |
|---|
| 583 | | - /* Paired with the read barrier in rxrpc_wait_for_channel(). This |
|---|
| 584 | | - * orders cid and epoch in the connection wrt to call_id without the |
|---|
| 585 | | - * need to take the channel_lock. |
|---|
| 552 | + write_lock_bh(&call->state_lock); |
|---|
| 553 | + call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; |
|---|
| 554 | + write_unlock_bh(&call->state_lock); |
|---|
| 555 | + |
|---|
| 556 | + /* Paired with the read barrier in rxrpc_connect_call(). This orders |
|---|
| 557 | + * cid and epoch in the connection wrt to call_id without the need to |
|---|
| 558 | + * take the channel_lock. |
|---|
| 586 | 559 | * |
|---|
| 587 | 560 | * We provisionally assign a callNumber at this point, but we don't |
|---|
| 588 | 561 | * confirm it until the call is about to be exposed. |
|---|
| .. | .. |
|---|
| 591 | 564 | * at the call ID through a connection channel. |
|---|
| 592 | 565 | */ |
|---|
| 593 | 566 | smp_wmb(); |
|---|
| 594 | | - chan->call_id = call_id; |
|---|
| 595 | | - chan->call_debug_id = call->debug_id; |
|---|
| 567 | + |
|---|
| 568 | + chan->call_id = call_id; |
|---|
| 569 | + chan->call_debug_id = call->debug_id; |
|---|
| 596 | 570 | rcu_assign_pointer(chan->call, call); |
|---|
| 597 | 571 | wake_up(&call->waitq); |
|---|
| 572 | +} |
|---|
| 573 | + |
|---|
| 574 | +/* |
|---|
| 575 | + * Remove a connection from the idle list if it's on it. |
|---|
| 576 | + */ |
|---|
| 577 | +static void rxrpc_unidle_conn(struct rxrpc_bundle *bundle, struct rxrpc_connection *conn) |
|---|
| 578 | +{ |
|---|
| 579 | + struct rxrpc_net *rxnet = bundle->params.local->rxnet; |
|---|
| 580 | + bool drop_ref; |
|---|
| 581 | + |
|---|
| 582 | + if (!list_empty(&conn->cache_link)) { |
|---|
| 583 | + drop_ref = false; |
|---|
| 584 | + spin_lock(&rxnet->client_conn_cache_lock); |
|---|
| 585 | + if (!list_empty(&conn->cache_link)) { |
|---|
| 586 | + list_del_init(&conn->cache_link); |
|---|
| 587 | + drop_ref = true; |
|---|
| 588 | + } |
|---|
| 589 | + spin_unlock(&rxnet->client_conn_cache_lock); |
|---|
| 590 | + if (drop_ref) |
|---|
| 591 | + rxrpc_put_connection(conn); |
|---|
| 592 | + } |
|---|
| 598 | 593 | } |
|---|
| 599 | 594 | |
|---|
| 600 | 595 | /* |
|---|
| 601 | 596 | * Assign channels and callNumbers to waiting calls with channel_lock |
|---|
| 602 | 597 | * held by caller. |
|---|
| 603 | 598 | */ |
|---|
| 604 | | -static void rxrpc_activate_channels_locked(struct rxrpc_connection *conn) |
|---|
| 599 | +static void rxrpc_activate_channels_locked(struct rxrpc_bundle *bundle) |
|---|
| 605 | 600 | { |
|---|
| 606 | | - u8 avail, mask; |
|---|
| 601 | + struct rxrpc_connection *conn; |
|---|
| 602 | + unsigned long avail, mask; |
|---|
| 603 | + unsigned int channel, slot; |
|---|
| 607 | 604 | |
|---|
| 608 | | - switch (conn->cache_state) { |
|---|
| 609 | | - case RXRPC_CONN_CLIENT_ACTIVE: |
|---|
| 610 | | - mask = RXRPC_ACTIVE_CHANS_MASK; |
|---|
| 611 | | - break; |
|---|
| 612 | | - case RXRPC_CONN_CLIENT_UPGRADE: |
|---|
| 613 | | - mask = 0x01; |
|---|
| 614 | | - break; |
|---|
| 615 | | - default: |
|---|
| 616 | | - return; |
|---|
| 605 | + if (bundle->try_upgrade) |
|---|
| 606 | + mask = 1; |
|---|
| 607 | + else |
|---|
| 608 | + mask = ULONG_MAX; |
|---|
| 609 | + |
|---|
| 610 | + while (!list_empty(&bundle->waiting_calls)) { |
|---|
| 611 | + avail = bundle->avail_chans & mask; |
|---|
| 612 | + if (!avail) |
|---|
| 613 | + break; |
|---|
| 614 | + channel = __ffs(avail); |
|---|
| 615 | + clear_bit(channel, &bundle->avail_chans); |
|---|
| 616 | + |
|---|
| 617 | + slot = channel / RXRPC_MAXCALLS; |
|---|
| 618 | + conn = bundle->conns[slot]; |
|---|
| 619 | + if (!conn) |
|---|
| 620 | + break; |
|---|
| 621 | + |
|---|
| 622 | + if (bundle->try_upgrade) |
|---|
| 623 | + set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags); |
|---|
| 624 | + rxrpc_unidle_conn(bundle, conn); |
|---|
| 625 | + |
|---|
| 626 | + channel &= (RXRPC_MAXCALLS - 1); |
|---|
| 627 | + conn->act_chans |= 1 << channel; |
|---|
| 628 | + rxrpc_activate_one_channel(conn, channel); |
|---|
| 617 | 629 | } |
|---|
| 618 | | - |
|---|
| 619 | | - while (!list_empty(&conn->waiting_calls) && |
|---|
| 620 | | - (avail = ~conn->active_chans, |
|---|
| 621 | | - avail &= mask, |
|---|
| 622 | | - avail != 0)) |
|---|
| 623 | | - rxrpc_activate_one_channel(conn, __ffs(avail)); |
|---|
| 624 | 630 | } |
|---|
| 625 | 631 | |
|---|
| 626 | 632 | /* |
|---|
| 627 | 633 | * Assign channels and callNumbers to waiting calls. |
|---|
| 628 | 634 | */ |
|---|
| 629 | | -static void rxrpc_activate_channels(struct rxrpc_connection *conn) |
|---|
| 635 | +static void rxrpc_activate_channels(struct rxrpc_bundle *bundle) |
|---|
| 630 | 636 | { |
|---|
| 631 | | - _enter("%d", conn->debug_id); |
|---|
| 637 | + _enter("B=%x", bundle->debug_id); |
|---|
| 632 | 638 | |
|---|
| 633 | | - trace_rxrpc_client(conn, -1, rxrpc_client_activate_chans); |
|---|
| 639 | + trace_rxrpc_client(NULL, -1, rxrpc_client_activate_chans); |
|---|
| 634 | 640 | |
|---|
| 635 | | - if (conn->active_chans == RXRPC_ACTIVE_CHANS_MASK) |
|---|
| 641 | + if (!bundle->avail_chans) |
|---|
| 636 | 642 | return; |
|---|
| 637 | 643 | |
|---|
| 638 | | - spin_lock(&conn->channel_lock); |
|---|
| 639 | | - rxrpc_activate_channels_locked(conn); |
|---|
| 640 | | - spin_unlock(&conn->channel_lock); |
|---|
| 644 | + spin_lock(&bundle->channel_lock); |
|---|
| 645 | + rxrpc_activate_channels_locked(bundle); |
|---|
| 646 | + spin_unlock(&bundle->channel_lock); |
|---|
| 641 | 647 | _leave(""); |
|---|
| 642 | 648 | } |
|---|
| 643 | 649 | |
|---|
| 644 | 650 | /* |
|---|
| 645 | 651 | * Wait for a callNumber and a channel to be granted to a call. |
|---|
| 646 | 652 | */ |
|---|
| 647 | | -static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp) |
|---|
| 653 | +static int rxrpc_wait_for_channel(struct rxrpc_bundle *bundle, |
|---|
| 654 | + struct rxrpc_call *call, gfp_t gfp) |
|---|
| 648 | 655 | { |
|---|
| 656 | + DECLARE_WAITQUEUE(myself, current); |
|---|
| 649 | 657 | int ret = 0; |
|---|
| 650 | 658 | |
|---|
| 651 | 659 | _enter("%d", call->debug_id); |
|---|
| 652 | 660 | |
|---|
| 653 | | - if (!call->call_id) { |
|---|
| 654 | | - DECLARE_WAITQUEUE(myself, current); |
|---|
| 655 | | - |
|---|
| 656 | | - if (!gfpflags_allow_blocking(gfp)) { |
|---|
| 657 | | - ret = -EAGAIN; |
|---|
| 658 | | - goto out; |
|---|
| 659 | | - } |
|---|
| 660 | | - |
|---|
| 661 | | - add_wait_queue_exclusive(&call->waitq, &myself); |
|---|
| 662 | | - for (;;) { |
|---|
| 663 | | - set_current_state(TASK_INTERRUPTIBLE); |
|---|
| 664 | | - if (call->call_id) |
|---|
| 665 | | - break; |
|---|
| 666 | | - if (signal_pending(current)) { |
|---|
| 667 | | - ret = -ERESTARTSYS; |
|---|
| 668 | | - break; |
|---|
| 669 | | - } |
|---|
| 670 | | - schedule(); |
|---|
| 671 | | - } |
|---|
| 672 | | - remove_wait_queue(&call->waitq, &myself); |
|---|
| 673 | | - __set_current_state(TASK_RUNNING); |
|---|
| 661 | + if (!gfpflags_allow_blocking(gfp)) { |
|---|
| 662 | + rxrpc_maybe_add_conn(bundle, gfp); |
|---|
| 663 | + rxrpc_activate_channels(bundle); |
|---|
| 664 | + ret = bundle->alloc_error ?: -EAGAIN; |
|---|
| 665 | + goto out; |
|---|
| 674 | 666 | } |
|---|
| 675 | 667 | |
|---|
| 676 | | - /* Paired with the write barrier in rxrpc_activate_one_channel(). */ |
|---|
| 677 | | - smp_rmb(); |
|---|
| 668 | + add_wait_queue_exclusive(&call->waitq, &myself); |
|---|
| 669 | + for (;;) { |
|---|
| 670 | + rxrpc_maybe_add_conn(bundle, gfp); |
|---|
| 671 | + rxrpc_activate_channels(bundle); |
|---|
| 672 | + ret = bundle->alloc_error; |
|---|
| 673 | + if (ret < 0) |
|---|
| 674 | + break; |
|---|
| 675 | + |
|---|
| 676 | + switch (call->interruptibility) { |
|---|
| 677 | + case RXRPC_INTERRUPTIBLE: |
|---|
| 678 | + case RXRPC_PREINTERRUPTIBLE: |
|---|
| 679 | + set_current_state(TASK_INTERRUPTIBLE); |
|---|
| 680 | + break; |
|---|
| 681 | + case RXRPC_UNINTERRUPTIBLE: |
|---|
| 682 | + default: |
|---|
| 683 | + set_current_state(TASK_UNINTERRUPTIBLE); |
|---|
| 684 | + break; |
|---|
| 685 | + } |
|---|
| 686 | + if (READ_ONCE(call->state) != RXRPC_CALL_CLIENT_AWAIT_CONN) |
|---|
| 687 | + break; |
|---|
| 688 | + if ((call->interruptibility == RXRPC_INTERRUPTIBLE || |
|---|
| 689 | + call->interruptibility == RXRPC_PREINTERRUPTIBLE) && |
|---|
| 690 | + signal_pending(current)) { |
|---|
| 691 | + ret = -ERESTARTSYS; |
|---|
| 692 | + break; |
|---|
| 693 | + } |
|---|
| 694 | + schedule(); |
|---|
| 695 | + } |
|---|
| 696 | + remove_wait_queue(&call->waitq, &myself); |
|---|
| 697 | + __set_current_state(TASK_RUNNING); |
|---|
| 678 | 698 | |
|---|
| 679 | 699 | out: |
|---|
| 680 | 700 | _leave(" = %d", ret); |
|---|
| .. | .. |
|---|
| 691 | 711 | struct sockaddr_rxrpc *srx, |
|---|
| 692 | 712 | gfp_t gfp) |
|---|
| 693 | 713 | { |
|---|
| 714 | + struct rxrpc_bundle *bundle; |
|---|
| 694 | 715 | struct rxrpc_net *rxnet = cp->local->rxnet; |
|---|
| 695 | | - int ret; |
|---|
| 716 | + int ret = 0; |
|---|
| 696 | 717 | |
|---|
| 697 | 718 | _enter("{%d,%lx},", call->debug_id, call->user_call_ID); |
|---|
| 698 | 719 | |
|---|
| 699 | 720 | rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper); |
|---|
| 700 | | - rxrpc_cull_active_client_conns(rxnet); |
|---|
| 701 | 721 | |
|---|
| 702 | | - ret = rxrpc_get_client_conn(rx, call, cp, srx, gfp); |
|---|
| 703 | | - if (ret < 0) |
|---|
| 704 | | - goto out; |
|---|
| 705 | | - |
|---|
| 706 | | - rxrpc_animate_client_conn(rxnet, call->conn); |
|---|
| 707 | | - rxrpc_activate_channels(call->conn); |
|---|
| 708 | | - |
|---|
| 709 | | - ret = rxrpc_wait_for_channel(call, gfp); |
|---|
| 710 | | - if (ret < 0) { |
|---|
| 711 | | - trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed); |
|---|
| 712 | | - rxrpc_disconnect_client_call(call); |
|---|
| 722 | + bundle = rxrpc_prep_call(rx, call, cp, srx, gfp); |
|---|
| 723 | + if (IS_ERR(bundle)) { |
|---|
| 724 | + ret = PTR_ERR(bundle); |
|---|
| 713 | 725 | goto out; |
|---|
| 714 | 726 | } |
|---|
| 715 | 727 | |
|---|
| 716 | | - spin_lock_bh(&call->conn->params.peer->lock); |
|---|
| 717 | | - hlist_add_head_rcu(&call->error_link, |
|---|
| 718 | | - &call->conn->params.peer->error_targets); |
|---|
| 719 | | - spin_unlock_bh(&call->conn->params.peer->lock); |
|---|
| 728 | + if (call->state == RXRPC_CALL_CLIENT_AWAIT_CONN) { |
|---|
| 729 | + ret = rxrpc_wait_for_channel(bundle, call, gfp); |
|---|
| 730 | + if (ret < 0) |
|---|
| 731 | + goto wait_failed; |
|---|
| 732 | + } |
|---|
| 720 | 733 | |
|---|
| 734 | +granted_channel: |
|---|
| 735 | + /* Paired with the write barrier in rxrpc_activate_one_channel(). */ |
|---|
| 736 | + smp_rmb(); |
|---|
| 737 | + |
|---|
| 738 | +out_put_bundle: |
|---|
| 739 | + rxrpc_deactivate_bundle(bundle); |
|---|
| 740 | + rxrpc_put_bundle(bundle); |
|---|
| 721 | 741 | out: |
|---|
| 722 | 742 | _leave(" = %d", ret); |
|---|
| 723 | 743 | return ret; |
|---|
| 724 | | -} |
|---|
| 725 | 744 | |
|---|
| 726 | | -/* |
|---|
| 727 | | - * Note that a connection is about to be exposed to the world. Once it is |
|---|
| 728 | | - * exposed, we maintain an extra ref on it that stops it from being summarily |
|---|
| 729 | | - * discarded before it's (a) had a chance to deal with retransmission and (b) |
|---|
| 730 | | - * had a chance at re-use (the per-connection security negotiation is |
|---|
| 731 | | - * expensive). |
|---|
| 732 | | - */ |
|---|
| 733 | | -static void rxrpc_expose_client_conn(struct rxrpc_connection *conn, |
|---|
| 734 | | - unsigned int channel) |
|---|
| 735 | | -{ |
|---|
| 736 | | - if (!test_and_set_bit(RXRPC_CONN_EXPOSED, &conn->flags)) { |
|---|
| 737 | | - trace_rxrpc_client(conn, channel, rxrpc_client_exposed); |
|---|
| 738 | | - rxrpc_get_connection(conn); |
|---|
| 745 | +wait_failed: |
|---|
| 746 | + spin_lock(&bundle->channel_lock); |
|---|
| 747 | + list_del_init(&call->chan_wait_link); |
|---|
| 748 | + spin_unlock(&bundle->channel_lock); |
|---|
| 749 | + |
|---|
| 750 | + if (call->state != RXRPC_CALL_CLIENT_AWAIT_CONN) { |
|---|
| 751 | + ret = 0; |
|---|
| 752 | + goto granted_channel; |
|---|
| 739 | 753 | } |
|---|
| 754 | + |
|---|
| 755 | + trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed); |
|---|
| 756 | + rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret); |
|---|
| 757 | + rxrpc_disconnect_client_call(bundle, call); |
|---|
| 758 | + goto out_put_bundle; |
|---|
| 740 | 759 | } |
|---|
| 741 | 760 | |
|---|
| 742 | 761 | /* |
|---|
| .. | .. |
|---|
| 758 | 777 | chan->call_counter++; |
|---|
| 759 | 778 | if (chan->call_counter >= INT_MAX) |
|---|
| 760 | 779 | set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); |
|---|
| 761 | | - rxrpc_expose_client_conn(conn, channel); |
|---|
| 780 | + trace_rxrpc_client(conn, channel, rxrpc_client_exposed); |
|---|
| 762 | 781 | } |
|---|
| 763 | 782 | } |
|---|
| 764 | 783 | |
|---|
| .. | .. |
|---|
| 767 | 786 | */ |
|---|
| 768 | 787 | static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet) |
|---|
| 769 | 788 | { |
|---|
| 770 | | - unsigned long now = jiffies; |
|---|
| 771 | | - unsigned long reap_at = now + rxrpc_conn_idle_client_expiry; |
|---|
| 789 | + if (!rxnet->kill_all_client_conns) { |
|---|
| 790 | + unsigned long now = jiffies; |
|---|
| 791 | + unsigned long reap_at = now + rxrpc_conn_idle_client_expiry; |
|---|
| 772 | 792 | |
|---|
| 773 | | - if (rxnet->live) |
|---|
| 774 | | - timer_reduce(&rxnet->client_conn_reap_timer, reap_at); |
|---|
| 793 | + if (rxnet->live) |
|---|
| 794 | + timer_reduce(&rxnet->client_conn_reap_timer, reap_at); |
|---|
| 795 | + } |
|---|
| 775 | 796 | } |
|---|
| 776 | 797 | |
|---|
| 777 | 798 | /* |
|---|
| 778 | 799 | * Disconnect a client call. |
|---|
| 779 | 800 | */ |
|---|
| 780 | | -void rxrpc_disconnect_client_call(struct rxrpc_call *call) |
|---|
| 801 | +void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call *call) |
|---|
| 781 | 802 | { |
|---|
| 782 | | - struct rxrpc_connection *conn = call->conn; |
|---|
| 803 | + struct rxrpc_connection *conn; |
|---|
| 783 | 804 | struct rxrpc_channel *chan = NULL; |
|---|
| 784 | | - struct rxrpc_net *rxnet = conn->params.local->rxnet; |
|---|
| 785 | | - unsigned int channel = -1; |
|---|
| 805 | + struct rxrpc_net *rxnet = bundle->params.local->rxnet; |
|---|
| 806 | + unsigned int channel; |
|---|
| 807 | + bool may_reuse; |
|---|
| 786 | 808 | u32 cid; |
|---|
| 787 | 809 | |
|---|
| 788 | | - spin_lock(&conn->channel_lock); |
|---|
| 810 | + _enter("c=%x", call->debug_id); |
|---|
| 811 | + |
|---|
| 812 | + spin_lock(&bundle->channel_lock); |
|---|
| 789 | 813 | set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); |
|---|
| 790 | 814 | |
|---|
| 791 | | - cid = call->cid; |
|---|
| 792 | | - if (cid) { |
|---|
| 793 | | - channel = cid & RXRPC_CHANNELMASK; |
|---|
| 794 | | - chan = &conn->channels[channel]; |
|---|
| 795 | | - } |
|---|
| 796 | | - trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect); |
|---|
| 797 | | - |
|---|
| 798 | 815 | /* Calls that have never actually been assigned a channel can simply be |
|---|
| 799 | | - * discarded. If the conn didn't get used either, it will follow |
|---|
| 800 | | - * immediately unless someone else grabs it in the meantime. |
|---|
| 816 | + * discarded. |
|---|
| 801 | 817 | */ |
|---|
| 802 | | - if (!list_empty(&call->chan_wait_link)) { |
|---|
| 818 | + conn = call->conn; |
|---|
| 819 | + if (!conn) { |
|---|
| 803 | 820 | _debug("call is waiting"); |
|---|
| 804 | 821 | ASSERTCMP(call->call_id, ==, 0); |
|---|
| 805 | 822 | ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags)); |
|---|
| 806 | 823 | list_del_init(&call->chan_wait_link); |
|---|
| 807 | | - |
|---|
| 808 | | - trace_rxrpc_client(conn, channel, rxrpc_client_chan_unstarted); |
|---|
| 809 | | - |
|---|
| 810 | | - /* We must deactivate or idle the connection if it's now |
|---|
| 811 | | - * waiting for nothing. |
|---|
| 812 | | - */ |
|---|
| 813 | | - spin_lock(&rxnet->client_conn_cache_lock); |
|---|
| 814 | | - if (conn->cache_state == RXRPC_CONN_CLIENT_WAITING && |
|---|
| 815 | | - list_empty(&conn->waiting_calls) && |
|---|
| 816 | | - !conn->active_chans) |
|---|
| 817 | | - goto idle_connection; |
|---|
| 818 | 824 | goto out; |
|---|
| 819 | 825 | } |
|---|
| 820 | 826 | |
|---|
| 827 | + cid = call->cid; |
|---|
| 828 | + channel = cid & RXRPC_CHANNELMASK; |
|---|
| 829 | + chan = &conn->channels[channel]; |
|---|
| 830 | + trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect); |
|---|
| 831 | + |
|---|
| 821 | 832 | if (rcu_access_pointer(chan->call) != call) { |
|---|
| 822 | | - spin_unlock(&conn->channel_lock); |
|---|
| 833 | + spin_unlock(&bundle->channel_lock); |
|---|
| 823 | 834 | BUG(); |
|---|
| 824 | 835 | } |
|---|
| 836 | + |
|---|
| 837 | + may_reuse = rxrpc_may_reuse_conn(conn); |
|---|
| 825 | 838 | |
|---|
| 826 | 839 | /* If a client call was exposed to the world, we save the result for |
|---|
| 827 | 840 | * retransmission. |
|---|
| .. | .. |
|---|
| 835 | 848 | if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) { |
|---|
| 836 | 849 | _debug("exposed %u,%u", call->call_id, call->abort_code); |
|---|
| 837 | 850 | __rxrpc_disconnect_call(conn, call); |
|---|
| 851 | + |
|---|
| 852 | + if (test_and_clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) { |
|---|
| 853 | + trace_rxrpc_client(conn, channel, rxrpc_client_to_active); |
|---|
| 854 | + bundle->try_upgrade = false; |
|---|
| 855 | + if (may_reuse) |
|---|
| 856 | + rxrpc_activate_channels_locked(bundle); |
|---|
| 857 | + } |
|---|
| 858 | + |
|---|
| 838 | 859 | } |
|---|
| 839 | 860 | |
|---|
| 840 | 861 | /* See if we can pass the channel directly to another call. */ |
|---|
| 841 | | - if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE && |
|---|
| 842 | | - !list_empty(&conn->waiting_calls)) { |
|---|
| 862 | + if (may_reuse && !list_empty(&bundle->waiting_calls)) { |
|---|
| 843 | 863 | trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass); |
|---|
| 844 | 864 | rxrpc_activate_one_channel(conn, channel); |
|---|
| 845 | | - goto out_2; |
|---|
| 865 | + goto out; |
|---|
| 846 | 866 | } |
|---|
| 847 | 867 | |
|---|
| 848 | 868 | /* Schedule the final ACK to be transmitted in a short while so that it |
|---|
| .. | .. |
|---|
| 859 | 879 | rxrpc_reduce_conn_timer(conn, final_ack_at); |
|---|
| 860 | 880 | } |
|---|
| 861 | 881 | |
|---|
| 862 | | - /* Things are more complex and we need the cache lock. We might be |
|---|
| 863 | | - * able to simply idle the conn or it might now be lurking on the wait |
|---|
| 864 | | - * list. It might even get moved back to the active list whilst we're |
|---|
| 865 | | - * waiting for the lock. |
|---|
| 882 | + /* Deactivate the channel. */ |
|---|
| 883 | + rcu_assign_pointer(chan->call, NULL); |
|---|
| 884 | + set_bit(conn->bundle_shift + channel, &conn->bundle->avail_chans); |
|---|
| 885 | + conn->act_chans &= ~(1 << channel); |
|---|
| 886 | + |
|---|
| 887 | + /* If no channels remain active, then put the connection on the idle |
|---|
| 888 | + * list for a short while. Give it a ref to stop it going away if it |
|---|
| 889 | + * becomes unbundled. |
|---|
| 866 | 890 | */ |
|---|
| 867 | | - spin_lock(&rxnet->client_conn_cache_lock); |
|---|
| 891 | + if (!conn->act_chans) { |
|---|
| 892 | + trace_rxrpc_client(conn, channel, rxrpc_client_to_idle); |
|---|
| 893 | + conn->idle_timestamp = jiffies; |
|---|
| 868 | 894 | |
|---|
| 869 | | - switch (conn->cache_state) { |
|---|
| 870 | | - case RXRPC_CONN_CLIENT_UPGRADE: |
|---|
| 871 | | - /* Deal with termination of a service upgrade probe. */ |
|---|
| 872 | | - if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) { |
|---|
| 873 | | - clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags); |
|---|
| 874 | | - trace_rxrpc_client(conn, channel, rxrpc_client_to_active); |
|---|
| 875 | | - conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE; |
|---|
| 876 | | - rxrpc_activate_channels_locked(conn); |
|---|
| 877 | | - } |
|---|
| 878 | | - /* fall through */ |
|---|
| 879 | | - case RXRPC_CONN_CLIENT_ACTIVE: |
|---|
| 880 | | - if (list_empty(&conn->waiting_calls)) { |
|---|
| 881 | | - rxrpc_deactivate_one_channel(conn, channel); |
|---|
| 882 | | - if (!conn->active_chans) { |
|---|
| 883 | | - rxnet->nr_active_client_conns--; |
|---|
| 884 | | - goto idle_connection; |
|---|
| 885 | | - } |
|---|
| 886 | | - goto out; |
|---|
| 887 | | - } |
|---|
| 895 | + rxrpc_get_connection(conn); |
|---|
| 896 | + spin_lock(&rxnet->client_conn_cache_lock); |
|---|
| 897 | + list_move_tail(&conn->cache_link, &rxnet->idle_client_conns); |
|---|
| 898 | + spin_unlock(&rxnet->client_conn_cache_lock); |
|---|
| 888 | 899 | |
|---|
| 889 | | - trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass); |
|---|
| 890 | | - rxrpc_activate_one_channel(conn, channel); |
|---|
| 891 | | - goto out; |
|---|
| 892 | | - |
|---|
| 893 | | - case RXRPC_CONN_CLIENT_CULLED: |
|---|
| 894 | | - rxrpc_deactivate_one_channel(conn, channel); |
|---|
| 895 | | - ASSERT(list_empty(&conn->waiting_calls)); |
|---|
| 896 | | - if (!conn->active_chans) |
|---|
| 897 | | - goto idle_connection; |
|---|
| 898 | | - goto out; |
|---|
| 899 | | - |
|---|
| 900 | | - case RXRPC_CONN_CLIENT_WAITING: |
|---|
| 901 | | - rxrpc_deactivate_one_channel(conn, channel); |
|---|
| 902 | | - goto out; |
|---|
| 903 | | - |
|---|
| 904 | | - default: |
|---|
| 905 | | - BUG(); |
|---|
| 900 | + rxrpc_set_client_reap_timer(rxnet); |
|---|
| 906 | 901 | } |
|---|
| 907 | 902 | |
|---|
| 908 | 903 | out: |
|---|
| 909 | | - spin_unlock(&rxnet->client_conn_cache_lock); |
|---|
| 910 | | -out_2: |
|---|
| 911 | | - spin_unlock(&conn->channel_lock); |
|---|
| 904 | + spin_unlock(&bundle->channel_lock); |
|---|
| 912 | 905 | _leave(""); |
|---|
| 913 | 906 | return; |
|---|
| 907 | +} |
|---|
| 914 | 908 | |
|---|
| 915 | | -idle_connection: |
|---|
| 916 | | - /* As no channels remain active, the connection gets deactivated |
|---|
| 917 | | - * immediately or moved to the idle list for a short while. |
|---|
| 918 | | - */ |
|---|
| 919 | | - if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) { |
|---|
| 920 | | - trace_rxrpc_client(conn, channel, rxrpc_client_to_idle); |
|---|
| 921 | | - conn->idle_timestamp = jiffies; |
|---|
| 922 | | - conn->cache_state = RXRPC_CONN_CLIENT_IDLE; |
|---|
| 923 | | - list_move_tail(&conn->cache_link, &rxnet->idle_client_conns); |
|---|
| 924 | | - if (rxnet->idle_client_conns.next == &conn->cache_link && |
|---|
| 925 | | - !rxnet->kill_all_client_conns) |
|---|
| 926 | | - rxrpc_set_client_reap_timer(rxnet); |
|---|
| 927 | | - } else { |
|---|
| 928 | | - trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive); |
|---|
| 929 | | - conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE; |
|---|
| 930 | | - list_del_init(&conn->cache_link); |
|---|
| 909 | +/* |
|---|
| 910 | + * Remove a connection from a bundle. |
|---|
| 911 | + */ |
|---|
| 912 | +static void rxrpc_unbundle_conn(struct rxrpc_connection *conn) |
|---|
| 913 | +{ |
|---|
| 914 | + struct rxrpc_bundle *bundle = conn->bundle; |
|---|
| 915 | + unsigned int bindex; |
|---|
| 916 | + bool need_drop = false; |
|---|
| 917 | + int i; |
|---|
| 918 | + |
|---|
| 919 | + _enter("C=%x", conn->debug_id); |
|---|
| 920 | + |
|---|
| 921 | + if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK) |
|---|
| 922 | + rxrpc_process_delayed_final_acks(conn, true); |
|---|
| 923 | + |
|---|
| 924 | + spin_lock(&bundle->channel_lock); |
|---|
| 925 | + bindex = conn->bundle_shift / RXRPC_MAXCALLS; |
|---|
| 926 | + if (bundle->conns[bindex] == conn) { |
|---|
| 927 | + _debug("clear slot %u", bindex); |
|---|
| 928 | + bundle->conns[bindex] = NULL; |
|---|
| 929 | + for (i = 0; i < RXRPC_MAXCALLS; i++) |
|---|
| 930 | + clear_bit(conn->bundle_shift + i, &bundle->avail_chans); |
|---|
| 931 | + need_drop = true; |
|---|
| 931 | 932 | } |
|---|
| 932 | | - goto out; |
|---|
| 933 | + spin_unlock(&bundle->channel_lock); |
|---|
| 934 | + |
|---|
| 935 | + if (need_drop) { |
|---|
| 936 | + rxrpc_deactivate_bundle(bundle); |
|---|
| 937 | + rxrpc_put_connection(conn); |
|---|
| 938 | + } |
|---|
| 939 | +} |
|---|
| 940 | + |
|---|
| 941 | +/* |
|---|
| 942 | + * Drop the active count on a bundle. |
|---|
| 943 | + */ |
|---|
| 944 | +static void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle) |
|---|
| 945 | +{ |
|---|
| 946 | + struct rxrpc_local *local = bundle->params.local; |
|---|
| 947 | + bool need_put = false; |
|---|
| 948 | + |
|---|
| 949 | + if (atomic_dec_and_lock(&bundle->active, &local->client_bundles_lock)) { |
|---|
| 950 | + if (!bundle->params.exclusive) { |
|---|
| 951 | + _debug("erase bundle"); |
|---|
| 952 | + rb_erase(&bundle->local_node, &local->client_bundles); |
|---|
| 953 | + need_put = true; |
|---|
| 954 | + } |
|---|
| 955 | + |
|---|
| 956 | + spin_unlock(&local->client_bundles_lock); |
|---|
| 957 | + if (need_put) |
|---|
| 958 | + rxrpc_put_bundle(bundle); |
|---|
| 959 | + } |
|---|
| 933 | 960 | } |
|---|
| 934 | 961 | |
|---|
| 935 | 962 | /* |
|---|
| 936 | 963 | * Clean up a dead client connection. |
|---|
| 937 | 964 | */ |
|---|
| 938 | | -static struct rxrpc_connection * |
|---|
| 939 | | -rxrpc_put_one_client_conn(struct rxrpc_connection *conn) |
|---|
| 965 | +static void rxrpc_kill_client_conn(struct rxrpc_connection *conn) |
|---|
| 940 | 966 | { |
|---|
| 941 | | - struct rxrpc_connection *next = NULL; |
|---|
| 942 | 967 | struct rxrpc_local *local = conn->params.local; |
|---|
| 943 | 968 | struct rxrpc_net *rxnet = local->rxnet; |
|---|
| 944 | | - unsigned int nr_conns; |
|---|
| 969 | + |
|---|
| 970 | + _enter("C=%x", conn->debug_id); |
|---|
| 945 | 971 | |
|---|
| 946 | 972 | trace_rxrpc_client(conn, -1, rxrpc_client_cleanup); |
|---|
| 947 | | - |
|---|
| 948 | | - if (test_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags)) { |
|---|
| 949 | | - spin_lock(&local->client_conns_lock); |
|---|
| 950 | | - if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, |
|---|
| 951 | | - &conn->flags)) |
|---|
| 952 | | - rb_erase(&conn->client_node, &local->client_conns); |
|---|
| 953 | | - spin_unlock(&local->client_conns_lock); |
|---|
| 954 | | - } |
|---|
| 973 | + atomic_dec(&rxnet->nr_client_conns); |
|---|
| 955 | 974 | |
|---|
| 956 | 975 | rxrpc_put_client_connection_id(conn); |
|---|
| 957 | | - |
|---|
| 958 | | - ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_INACTIVE); |
|---|
| 959 | | - |
|---|
| 960 | | - if (test_bit(RXRPC_CONN_COUNTED, &conn->flags)) { |
|---|
| 961 | | - trace_rxrpc_client(conn, -1, rxrpc_client_uncount); |
|---|
| 962 | | - spin_lock(&rxnet->client_conn_cache_lock); |
|---|
| 963 | | - nr_conns = --rxnet->nr_client_conns; |
|---|
| 964 | | - |
|---|
| 965 | | - if (nr_conns < rxrpc_max_client_connections && |
|---|
| 966 | | - !list_empty(&rxnet->waiting_client_conns)) { |
|---|
| 967 | | - next = list_entry(rxnet->waiting_client_conns.next, |
|---|
| 968 | | - struct rxrpc_connection, cache_link); |
|---|
| 969 | | - rxrpc_get_connection(next); |
|---|
| 970 | | - rxrpc_activate_conn(rxnet, next); |
|---|
| 971 | | - } |
|---|
| 972 | | - |
|---|
| 973 | | - spin_unlock(&rxnet->client_conn_cache_lock); |
|---|
| 974 | | - } |
|---|
| 975 | | - |
|---|
| 976 | 976 | rxrpc_kill_connection(conn); |
|---|
| 977 | | - if (next) |
|---|
| 978 | | - rxrpc_activate_channels(next); |
|---|
| 979 | | - |
|---|
| 980 | | - /* We need to get rid of the temporary ref we took upon next, but we |
|---|
| 981 | | - * can't call rxrpc_put_connection() recursively. |
|---|
| 982 | | - */ |
|---|
| 983 | | - return next; |
|---|
| 984 | 977 | } |
|---|
| 985 | 978 | |
|---|
| 986 | 979 | /* |
|---|
| .. | .. |
|---|
| 990 | 983 | { |
|---|
| 991 | 984 | const void *here = __builtin_return_address(0); |
|---|
| 992 | 985 | unsigned int debug_id = conn->debug_id; |
|---|
| 993 | | - int n; |
|---|
| 986 | + bool dead; |
|---|
| 987 | + int r; |
|---|
| 994 | 988 | |
|---|
| 995 | | - do { |
|---|
| 996 | | - n = atomic_dec_return(&conn->usage); |
|---|
| 997 | | - trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, n, here); |
|---|
| 998 | | - if (n > 0) |
|---|
| 999 | | - return; |
|---|
| 1000 | | - ASSERTCMP(n, >=, 0); |
|---|
| 1001 | | - |
|---|
| 1002 | | - conn = rxrpc_put_one_client_conn(conn); |
|---|
| 1003 | | - } while (conn); |
|---|
| 1004 | | -} |
|---|
| 1005 | | - |
|---|
| 1006 | | -/* |
|---|
| 1007 | | - * Kill the longest-active client connections to make room for new ones. |
|---|
| 1008 | | - */ |
|---|
| 1009 | | -static void rxrpc_cull_active_client_conns(struct rxrpc_net *rxnet) |
|---|
| 1010 | | -{ |
|---|
| 1011 | | - struct rxrpc_connection *conn; |
|---|
| 1012 | | - unsigned int nr_conns = rxnet->nr_client_conns; |
|---|
| 1013 | | - unsigned int nr_active, limit; |
|---|
| 1014 | | - |
|---|
| 1015 | | - _enter(""); |
|---|
| 1016 | | - |
|---|
| 1017 | | - ASSERTCMP(nr_conns, >=, 0); |
|---|
| 1018 | | - if (nr_conns < rxrpc_max_client_connections) { |
|---|
| 1019 | | - _leave(" [ok]"); |
|---|
| 1020 | | - return; |
|---|
| 1021 | | - } |
|---|
| 1022 | | - limit = rxrpc_reap_client_connections; |
|---|
| 1023 | | - |
|---|
| 1024 | | - spin_lock(&rxnet->client_conn_cache_lock); |
|---|
| 1025 | | - nr_active = rxnet->nr_active_client_conns; |
|---|
| 1026 | | - |
|---|
| 1027 | | - while (nr_active > limit) { |
|---|
| 1028 | | - ASSERT(!list_empty(&rxnet->active_client_conns)); |
|---|
| 1029 | | - conn = list_entry(rxnet->active_client_conns.next, |
|---|
| 1030 | | - struct rxrpc_connection, cache_link); |
|---|
| 1031 | | - ASSERTIFCMP(conn->cache_state != RXRPC_CONN_CLIENT_ACTIVE, |
|---|
| 1032 | | - conn->cache_state, ==, RXRPC_CONN_CLIENT_UPGRADE); |
|---|
| 1033 | | - |
|---|
| 1034 | | - if (list_empty(&conn->waiting_calls)) { |
|---|
| 1035 | | - trace_rxrpc_client(conn, -1, rxrpc_client_to_culled); |
|---|
| 1036 | | - conn->cache_state = RXRPC_CONN_CLIENT_CULLED; |
|---|
| 1037 | | - list_del_init(&conn->cache_link); |
|---|
| 1038 | | - } else { |
|---|
| 1039 | | - trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting); |
|---|
| 1040 | | - conn->cache_state = RXRPC_CONN_CLIENT_WAITING; |
|---|
| 1041 | | - list_move_tail(&conn->cache_link, |
|---|
| 1042 | | - &rxnet->waiting_client_conns); |
|---|
| 1043 | | - } |
|---|
| 1044 | | - |
|---|
| 1045 | | - nr_active--; |
|---|
| 1046 | | - } |
|---|
| 1047 | | - |
|---|
| 1048 | | - rxnet->nr_active_client_conns = nr_active; |
|---|
| 1049 | | - spin_unlock(&rxnet->client_conn_cache_lock); |
|---|
| 1050 | | - ASSERTCMP(nr_active, >=, 0); |
|---|
| 1051 | | - _leave(" [culled]"); |
|---|
| 989 | + dead = __refcount_dec_and_test(&conn->ref, &r); |
|---|
| 990 | + trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, r - 1, here); |
|---|
| 991 | + if (dead) |
|---|
| 992 | + rxrpc_kill_client_conn(conn); |
|---|
| 1052 | 993 | } |
|---|
| 1053 | 994 | |
|---|
| 1054 | 995 | /* |
|---|
| .. | .. |
|---|
| 1082 | 1023 | /* We keep an estimate of what the number of conns ought to be after |
|---|
| 1083 | 1024 | * we've discarded some so that we don't overdo the discarding. |
|---|
| 1084 | 1025 | */ |
|---|
| 1085 | | - nr_conns = rxnet->nr_client_conns; |
|---|
| 1026 | + nr_conns = atomic_read(&rxnet->nr_client_conns); |
|---|
| 1086 | 1027 | |
|---|
| 1087 | 1028 | next: |
|---|
| 1088 | 1029 | spin_lock(&rxnet->client_conn_cache_lock); |
|---|
| .. | .. |
|---|
| 1092 | 1033 | |
|---|
| 1093 | 1034 | conn = list_entry(rxnet->idle_client_conns.next, |
|---|
| 1094 | 1035 | struct rxrpc_connection, cache_link); |
|---|
| 1095 | | - ASSERT(test_bit(RXRPC_CONN_EXPOSED, &conn->flags)); |
|---|
| 1096 | 1036 | |
|---|
| 1097 | 1037 | if (!rxnet->kill_all_client_conns) { |
|---|
| 1098 | 1038 | /* If the number of connections is over the reap limit, we |
|---|
| .. | .. |
|---|
| 1114 | 1054 | } |
|---|
| 1115 | 1055 | |
|---|
| 1116 | 1056 | trace_rxrpc_client(conn, -1, rxrpc_client_discard); |
|---|
| 1117 | | - if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags)) |
|---|
| 1118 | | - BUG(); |
|---|
| 1119 | | - conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE; |
|---|
| 1120 | 1057 | list_del_init(&conn->cache_link); |
|---|
| 1121 | 1058 | |
|---|
| 1122 | 1059 | spin_unlock(&rxnet->client_conn_cache_lock); |
|---|
| 1123 | 1060 | |
|---|
| 1124 | | - /* When we cleared the EXPOSED flag, we took on responsibility for the |
|---|
| 1125 | | - * reference that that had on the usage count. We deal with that here. |
|---|
| 1126 | | - * If someone re-sets the flag and re-gets the ref, that's fine. |
|---|
| 1127 | | - */ |
|---|
| 1128 | | - rxrpc_put_connection(conn); |
|---|
| 1061 | + rxrpc_unbundle_conn(conn); |
|---|
| 1062 | + rxrpc_put_connection(conn); /* Drop the ->cache_link ref */ |
|---|
| 1063 | + |
|---|
| 1129 | 1064 | nr_conns--; |
|---|
| 1130 | 1065 | goto next; |
|---|
| 1131 | 1066 | |
|---|
| .. | .. |
|---|
| 1139 | 1074 | */ |
|---|
| 1140 | 1075 | _debug("not yet"); |
|---|
| 1141 | 1076 | if (!rxnet->kill_all_client_conns) |
|---|
| 1142 | | - timer_reduce(&rxnet->client_conn_reap_timer, |
|---|
| 1143 | | - conn_expires_at); |
|---|
| 1077 | + timer_reduce(&rxnet->client_conn_reap_timer, conn_expires_at); |
|---|
| 1144 | 1078 | |
|---|
| 1145 | 1079 | out: |
|---|
| 1146 | 1080 | spin_unlock(&rxnet->client_conn_cache_lock); |
|---|
| .. | .. |
|---|
| 1175 | 1109 | { |
|---|
| 1176 | 1110 | struct rxrpc_connection *conn, *tmp; |
|---|
| 1177 | 1111 | struct rxrpc_net *rxnet = local->rxnet; |
|---|
| 1178 | | - unsigned int nr_active; |
|---|
| 1179 | 1112 | LIST_HEAD(graveyard); |
|---|
| 1180 | 1113 | |
|---|
| 1181 | 1114 | _enter(""); |
|---|
| 1182 | 1115 | |
|---|
| 1183 | 1116 | spin_lock(&rxnet->client_conn_cache_lock); |
|---|
| 1184 | | - nr_active = rxnet->nr_active_client_conns; |
|---|
| 1185 | 1117 | |
|---|
| 1186 | 1118 | list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns, |
|---|
| 1187 | 1119 | cache_link) { |
|---|
| 1188 | 1120 | if (conn->params.local == local) { |
|---|
| 1189 | | - ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_IDLE); |
|---|
| 1190 | | - |
|---|
| 1191 | 1121 | trace_rxrpc_client(conn, -1, rxrpc_client_discard); |
|---|
| 1192 | | - if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags)) |
|---|
| 1193 | | - BUG(); |
|---|
| 1194 | | - conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE; |
|---|
| 1195 | 1122 | list_move(&conn->cache_link, &graveyard); |
|---|
| 1196 | | - nr_active--; |
|---|
| 1197 | 1123 | } |
|---|
| 1198 | 1124 | } |
|---|
| 1199 | 1125 | |
|---|
| 1200 | | - rxnet->nr_active_client_conns = nr_active; |
|---|
| 1201 | 1126 | spin_unlock(&rxnet->client_conn_cache_lock); |
|---|
| 1202 | | - ASSERTCMP(nr_active, >=, 0); |
|---|
| 1203 | 1127 | |
|---|
| 1204 | 1128 | while (!list_empty(&graveyard)) { |
|---|
| 1205 | 1129 | conn = list_entry(graveyard.next, |
|---|
| 1206 | 1130 | struct rxrpc_connection, cache_link); |
|---|
| 1207 | 1131 | list_del_init(&conn->cache_link); |
|---|
| 1208 | | - |
|---|
| 1132 | + rxrpc_unbundle_conn(conn); |
|---|
| 1209 | 1133 | rxrpc_put_connection(conn); |
|---|
| 1210 | 1134 | } |
|---|
| 1211 | 1135 | |
|---|