1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Client connection-specific management code.
3 *
4 * Copyright (C) 2016, 2020 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 *
7 * Client connections need to be cached for a little while after they've made a
8 * call so as to handle retransmitted DATA packets in case the server didn't
9 * receive the final ACK or terminating ABORT we sent it.
10 *
11 * There are flags of relevance to the cache:
12 *
13 * (2) DONT_REUSE - The connection should be discarded as soon as possible and
14 * should not be reused. This is set when an exclusive connection is used
15 * or a call ID counter overflows.
16 *
17 * The caching state may only be changed if the cache lock is held.
18 *
19 * There are two idle client connection expiry durations. If the total number
20 * of connections is below the reap threshold, we use the normal duration; if
21 * it's above, we use the fast duration.
22 */
23
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26 #include <linux/slab.h>
27 #include <linux/idr.h>
28 #include <linux/timer.h>
29 #include <linux/sched/signal.h>
30
31 #include "ar-internal.h"
32
33 __read_mostly unsigned int rxrpc_reap_client_connections = 900;
34 __read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
35 __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
36
rxrpc_activate_bundle(struct rxrpc_bundle * bundle)37 static void rxrpc_activate_bundle(struct rxrpc_bundle *bundle)
38 {
39 atomic_inc(&bundle->active);
40 }
41
42 /*
43 * Release a connection ID for a client connection.
44 */
rxrpc_put_client_connection_id(struct rxrpc_local * local,struct rxrpc_connection * conn)45 static void rxrpc_put_client_connection_id(struct rxrpc_local *local,
46 struct rxrpc_connection *conn)
47 {
48 idr_remove(&local->conn_ids, conn->proto.cid >> RXRPC_CIDSHIFT);
49 }
50
51 /*
52 * Destroy the client connection ID tree.
53 */
rxrpc_destroy_client_conn_ids(struct rxrpc_local * local)54 static void rxrpc_destroy_client_conn_ids(struct rxrpc_local *local)
55 {
56 struct rxrpc_connection *conn;
57 int id;
58
59 if (!idr_is_empty(&local->conn_ids)) {
60 idr_for_each_entry(&local->conn_ids, conn, id) {
61 pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
62 conn, refcount_read(&conn->ref));
63 }
64 BUG();
65 }
66
67 idr_destroy(&local->conn_ids);
68 }
69
70 /*
71 * Allocate a connection bundle.
72 */
rxrpc_alloc_bundle(struct rxrpc_call * call,gfp_t gfp)73 static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_call *call,
74 gfp_t gfp)
75 {
76 struct rxrpc_bundle *bundle;
77
78 bundle = kzalloc(sizeof(*bundle), gfp);
79 if (bundle) {
80 bundle->local = call->local;
81 bundle->peer = rxrpc_get_peer(call->peer, rxrpc_peer_get_bundle);
82 bundle->key = key_get(call->key);
83 bundle->security = call->security;
84 bundle->exclusive = test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags);
85 bundle->upgrade = test_bit(RXRPC_CALL_UPGRADE, &call->flags);
86 bundle->service_id = call->dest_srx.srx_service;
87 bundle->security_level = call->security_level;
88 refcount_set(&bundle->ref, 1);
89 atomic_set(&bundle->active, 1);
90 INIT_LIST_HEAD(&bundle->waiting_calls);
91 trace_rxrpc_bundle(bundle->debug_id, 1, rxrpc_bundle_new);
92 }
93 return bundle;
94 }
95
rxrpc_get_bundle(struct rxrpc_bundle * bundle,enum rxrpc_bundle_trace why)96 struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle,
97 enum rxrpc_bundle_trace why)
98 {
99 int r;
100
101 __refcount_inc(&bundle->ref, &r);
102 trace_rxrpc_bundle(bundle->debug_id, r + 1, why);
103 return bundle;
104 }
105
rxrpc_free_bundle(struct rxrpc_bundle * bundle)106 static void rxrpc_free_bundle(struct rxrpc_bundle *bundle)
107 {
108 trace_rxrpc_bundle(bundle->debug_id, 1, rxrpc_bundle_free);
109 rxrpc_put_peer(bundle->peer, rxrpc_peer_put_bundle);
110 key_put(bundle->key);
111 kfree(bundle);
112 }
113
rxrpc_put_bundle(struct rxrpc_bundle * bundle,enum rxrpc_bundle_trace why)114 void rxrpc_put_bundle(struct rxrpc_bundle *bundle, enum rxrpc_bundle_trace why)
115 {
116 unsigned int id;
117 bool dead;
118 int r;
119
120 if (bundle) {
121 id = bundle->debug_id;
122 dead = __refcount_dec_and_test(&bundle->ref, &r);
123 trace_rxrpc_bundle(id, r - 1, why);
124 if (dead)
125 rxrpc_free_bundle(bundle);
126 }
127 }
128
129 /*
130 * Get rid of outstanding client connection preallocations when a local
131 * endpoint is destroyed.
132 */
rxrpc_purge_client_connections(struct rxrpc_local * local)133 void rxrpc_purge_client_connections(struct rxrpc_local *local)
134 {
135 rxrpc_destroy_client_conn_ids(local);
136 }
137
138 /*
139 * Allocate a client connection.
140 */
141 static struct rxrpc_connection *
rxrpc_alloc_client_connection(struct rxrpc_bundle * bundle)142 rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle)
143 {
144 struct rxrpc_connection *conn;
145 struct rxrpc_local *local = bundle->local;
146 struct rxrpc_net *rxnet = local->rxnet;
147 int id;
148
149 _enter("");
150
151 conn = rxrpc_alloc_connection(rxnet, GFP_ATOMIC | __GFP_NOWARN);
152 if (!conn)
153 return ERR_PTR(-ENOMEM);
154
155 id = idr_alloc_cyclic(&local->conn_ids, conn, 1, 0x40000000,
156 GFP_ATOMIC | __GFP_NOWARN);
157 if (id < 0) {
158 kfree(conn);
159 return ERR_PTR(id);
160 }
161
162 refcount_set(&conn->ref, 1);
163 conn->proto.cid = id << RXRPC_CIDSHIFT;
164 conn->proto.epoch = local->rxnet->epoch;
165 conn->out_clientflag = RXRPC_CLIENT_INITIATED;
166 conn->bundle = rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_conn);
167 conn->local = rxrpc_get_local(bundle->local, rxrpc_local_get_client_conn);
168 conn->peer = rxrpc_get_peer(bundle->peer, rxrpc_peer_get_client_conn);
169 conn->key = key_get(bundle->key);
170 conn->security = bundle->security;
171 conn->exclusive = bundle->exclusive;
172 conn->upgrade = bundle->upgrade;
173 conn->orig_service_id = bundle->service_id;
174 conn->security_level = bundle->security_level;
175 conn->state = RXRPC_CONN_CLIENT_UNSECURED;
176 conn->service_id = conn->orig_service_id;
177
178 if (conn->security == &rxrpc_no_security)
179 conn->state = RXRPC_CONN_CLIENT;
180
181 atomic_inc(&rxnet->nr_conns);
182 write_lock(&rxnet->conn_lock);
183 list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
184 write_unlock(&rxnet->conn_lock);
185
186 rxrpc_see_connection(conn, rxrpc_conn_new_client);
187
188 atomic_inc(&rxnet->nr_client_conns);
189 trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
190 return conn;
191 }
192
193 /*
194 * Determine if a connection may be reused.
195 */
rxrpc_may_reuse_conn(struct rxrpc_connection * conn)196 static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
197 {
198 struct rxrpc_net *rxnet;
199 int id_cursor, id, distance, limit;
200
201 if (!conn)
202 goto dont_reuse;
203
204 rxnet = conn->rxnet;
205 if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags))
206 goto dont_reuse;
207
208 if ((conn->state != RXRPC_CONN_CLIENT_UNSECURED &&
209 conn->state != RXRPC_CONN_CLIENT) ||
210 conn->proto.epoch != rxnet->epoch)
211 goto mark_dont_reuse;
212
213 /* The IDR tree gets very expensive on memory if the connection IDs are
214 * widely scattered throughout the number space, so we shall want to
215 * kill off connections that, say, have an ID more than about four
216 * times the maximum number of client conns away from the current
217 * allocation point to try and keep the IDs concentrated.
218 */
219 id_cursor = idr_get_cursor(&conn->local->conn_ids);
220 id = conn->proto.cid >> RXRPC_CIDSHIFT;
221 distance = id - id_cursor;
222 if (distance < 0)
223 distance = -distance;
224 limit = max_t(unsigned long, atomic_read(&rxnet->nr_conns) * 4, 1024);
225 if (distance > limit)
226 goto mark_dont_reuse;
227
228 return true;
229
230 mark_dont_reuse:
231 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
232 dont_reuse:
233 return false;
234 }
235
236 /*
237 * Look up the conn bundle that matches the connection parameters, adding it if
238 * it doesn't yet exist.
239 */
rxrpc_look_up_bundle(struct rxrpc_call * call,gfp_t gfp)240 int rxrpc_look_up_bundle(struct rxrpc_call *call, gfp_t gfp)
241 {
242 static atomic_t rxrpc_bundle_id;
243 struct rxrpc_bundle *bundle, *candidate;
244 struct rxrpc_local *local = call->local;
245 struct rb_node *p, **pp, *parent;
246 long diff;
247 bool upgrade = test_bit(RXRPC_CALL_UPGRADE, &call->flags);
248
249 _enter("{%px,%x,%u,%u}",
250 call->peer, key_serial(call->key), call->security_level,
251 upgrade);
252
253 if (test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags)) {
254 call->bundle = rxrpc_alloc_bundle(call, gfp);
255 return call->bundle ? 0 : -ENOMEM;
256 }
257
258 /* First, see if the bundle is already there. */
259 _debug("search 1");
260 spin_lock(&local->client_bundles_lock);
261 p = local->client_bundles.rb_node;
262 while (p) {
263 bundle = rb_entry(p, struct rxrpc_bundle, local_node);
264
265 #define cmp(X, Y) ((long)(X) - (long)(Y))
266 diff = (cmp(bundle->peer, call->peer) ?:
267 cmp(bundle->key, call->key) ?:
268 cmp(bundle->security_level, call->security_level) ?:
269 cmp(bundle->upgrade, upgrade));
270 #undef cmp
271 if (diff < 0)
272 p = p->rb_left;
273 else if (diff > 0)
274 p = p->rb_right;
275 else
276 goto found_bundle;
277 }
278 spin_unlock(&local->client_bundles_lock);
279 _debug("not found");
280
281 /* It wasn't. We need to add one. */
282 candidate = rxrpc_alloc_bundle(call, gfp);
283 if (!candidate)
284 return -ENOMEM;
285
286 _debug("search 2");
287 spin_lock(&local->client_bundles_lock);
288 pp = &local->client_bundles.rb_node;
289 parent = NULL;
290 while (*pp) {
291 parent = *pp;
292 bundle = rb_entry(parent, struct rxrpc_bundle, local_node);
293
294 #define cmp(X, Y) ((long)(X) - (long)(Y))
295 diff = (cmp(bundle->peer, call->peer) ?:
296 cmp(bundle->key, call->key) ?:
297 cmp(bundle->security_level, call->security_level) ?:
298 cmp(bundle->upgrade, upgrade));
299 #undef cmp
300 if (diff < 0)
301 pp = &(*pp)->rb_left;
302 else if (diff > 0)
303 pp = &(*pp)->rb_right;
304 else
305 goto found_bundle_free;
306 }
307
308 _debug("new bundle");
309 candidate->debug_id = atomic_inc_return(&rxrpc_bundle_id);
310 rb_link_node(&candidate->local_node, parent, pp);
311 rb_insert_color(&candidate->local_node, &local->client_bundles);
312 call->bundle = rxrpc_get_bundle(candidate, rxrpc_bundle_get_client_call);
313 spin_unlock(&local->client_bundles_lock);
314 _leave(" = B=%u [new]", call->bundle->debug_id);
315 return 0;
316
317 found_bundle_free:
318 rxrpc_free_bundle(candidate);
319 found_bundle:
320 call->bundle = rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_call);
321 rxrpc_activate_bundle(bundle);
322 spin_unlock(&local->client_bundles_lock);
323 _leave(" = B=%u [found]", call->bundle->debug_id);
324 return 0;
325 }
326
327 /*
328 * Allocate a new connection and add it into a bundle.
329 */
rxrpc_add_conn_to_bundle(struct rxrpc_bundle * bundle,unsigned int slot)330 static bool rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle,
331 unsigned int slot)
332 {
333 struct rxrpc_connection *conn, *old;
334 unsigned int shift = slot * RXRPC_MAXCALLS;
335 unsigned int i;
336
337 old = bundle->conns[slot];
338 if (old) {
339 bundle->conns[slot] = NULL;
340 trace_rxrpc_client(old, -1, rxrpc_client_replace);
341 rxrpc_put_connection(old, rxrpc_conn_put_noreuse);
342 }
343
344 conn = rxrpc_alloc_client_connection(bundle);
345 if (IS_ERR(conn)) {
346 bundle->alloc_error = PTR_ERR(conn);
347 return false;
348 }
349
350 rxrpc_activate_bundle(bundle);
351 conn->bundle_shift = shift;
352 bundle->conns[slot] = conn;
353 for (i = 0; i < RXRPC_MAXCALLS; i++)
354 set_bit(shift + i, &bundle->avail_chans);
355 return true;
356 }
357
358 /*
359 * Add a connection to a bundle if there are no usable connections or we have
360 * connections waiting for extra capacity.
361 */
rxrpc_bundle_has_space(struct rxrpc_bundle * bundle)362 static bool rxrpc_bundle_has_space(struct rxrpc_bundle *bundle)
363 {
364 int slot = -1, i, usable;
365
366 _enter("");
367
368 bundle->alloc_error = 0;
369
370 /* See if there are any usable connections. */
371 usable = 0;
372 for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) {
373 if (rxrpc_may_reuse_conn(bundle->conns[i]))
374 usable++;
375 else if (slot == -1)
376 slot = i;
377 }
378
379 if (!usable && bundle->upgrade)
380 bundle->try_upgrade = true;
381
382 if (!usable)
383 goto alloc_conn;
384
385 if (!bundle->avail_chans &&
386 !bundle->try_upgrade &&
387 usable < ARRAY_SIZE(bundle->conns))
388 goto alloc_conn;
389
390 _leave("");
391 return usable;
392
393 alloc_conn:
394 return slot >= 0 ? rxrpc_add_conn_to_bundle(bundle, slot) : false;
395 }
396
397 /*
398 * Assign a channel to the call at the front of the queue and wake the call up.
399 * We don't increment the callNumber counter until this number has been exposed
400 * to the world.
401 */
rxrpc_activate_one_channel(struct rxrpc_connection * conn,unsigned int channel)402 static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
403 unsigned int channel)
404 {
405 struct rxrpc_channel *chan = &conn->channels[channel];
406 struct rxrpc_bundle *bundle = conn->bundle;
407 struct rxrpc_call *call = list_entry(bundle->waiting_calls.next,
408 struct rxrpc_call, wait_link);
409 u32 call_id = chan->call_counter + 1;
410
411 _enter("C=%x,%u", conn->debug_id, channel);
412
413 list_del_init(&call->wait_link);
414
415 trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
416
417 /* Cancel the final ACK on the previous call if it hasn't been sent yet
418 * as the DATA packet will implicitly ACK it.
419 */
420 clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
421 clear_bit(conn->bundle_shift + channel, &bundle->avail_chans);
422
423 rxrpc_see_call(call, rxrpc_call_see_activate_client);
424 call->conn = rxrpc_get_connection(conn, rxrpc_conn_get_activate_call);
425 call->cid = conn->proto.cid | channel;
426 call->call_id = call_id;
427 call->dest_srx.srx_service = conn->service_id;
428 call->cong_ssthresh = call->peer->cong_ssthresh;
429 if (call->cong_cwnd >= call->cong_ssthresh)
430 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
431 else
432 call->cong_mode = RXRPC_CALL_SLOW_START;
433
434 chan->call_id = call_id;
435 chan->call_debug_id = call->debug_id;
436 chan->call = call;
437
438 rxrpc_see_call(call, rxrpc_call_see_connected);
439 trace_rxrpc_connect_call(call);
440 call->tx_last_sent = ktime_get_real();
441 rxrpc_start_call_timer(call);
442 rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_SEND_REQUEST);
443 wake_up(&call->waitq);
444 }
445
446 /*
447 * Remove a connection from the idle list if it's on it.
448 */
rxrpc_unidle_conn(struct rxrpc_connection * conn)449 static void rxrpc_unidle_conn(struct rxrpc_connection *conn)
450 {
451 if (!list_empty(&conn->cache_link)) {
452 list_del_init(&conn->cache_link);
453 rxrpc_put_connection(conn, rxrpc_conn_put_unidle);
454 }
455 }
456
457 /*
458 * Assign channels and callNumbers to waiting calls.
459 */
rxrpc_activate_channels(struct rxrpc_bundle * bundle)460 static void rxrpc_activate_channels(struct rxrpc_bundle *bundle)
461 {
462 struct rxrpc_connection *conn;
463 unsigned long avail, mask;
464 unsigned int channel, slot;
465
466 trace_rxrpc_client(NULL, -1, rxrpc_client_activate_chans);
467
468 if (bundle->try_upgrade)
469 mask = 1;
470 else
471 mask = ULONG_MAX;
472
473 while (!list_empty(&bundle->waiting_calls)) {
474 avail = bundle->avail_chans & mask;
475 if (!avail)
476 break;
477 channel = __ffs(avail);
478 clear_bit(channel, &bundle->avail_chans);
479
480 slot = channel / RXRPC_MAXCALLS;
481 conn = bundle->conns[slot];
482 if (!conn)
483 break;
484
485 if (bundle->try_upgrade)
486 set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
487 rxrpc_unidle_conn(conn);
488
489 channel &= (RXRPC_MAXCALLS - 1);
490 conn->act_chans |= 1 << channel;
491 rxrpc_activate_one_channel(conn, channel);
492 }
493 }
494
495 /*
496 * Connect waiting channels (called from the I/O thread).
497 */
rxrpc_connect_client_calls(struct rxrpc_local * local)498 void rxrpc_connect_client_calls(struct rxrpc_local *local)
499 {
500 struct rxrpc_call *call;
501
502 while ((call = list_first_entry_or_null(&local->new_client_calls,
503 struct rxrpc_call, wait_link))
504 ) {
505 struct rxrpc_bundle *bundle = call->bundle;
506
507 spin_lock(&local->client_call_lock);
508 list_move_tail(&call->wait_link, &bundle->waiting_calls);
509 spin_unlock(&local->client_call_lock);
510
511 if (rxrpc_bundle_has_space(bundle))
512 rxrpc_activate_channels(bundle);
513 }
514 }
515
516 /*
517 * Note that a call, and thus a connection, is about to be exposed to the
518 * world.
519 */
rxrpc_expose_client_call(struct rxrpc_call * call)520 void rxrpc_expose_client_call(struct rxrpc_call *call)
521 {
522 unsigned int channel = call->cid & RXRPC_CHANNELMASK;
523 struct rxrpc_connection *conn = call->conn;
524 struct rxrpc_channel *chan = &conn->channels[channel];
525
526 if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
527 /* Mark the call ID as being used. If the callNumber counter
528 * exceeds ~2 billion, we kill the connection after its
529 * outstanding calls have finished so that the counter doesn't
530 * wrap.
531 */
532 chan->call_counter++;
533 if (chan->call_counter >= INT_MAX)
534 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
535 trace_rxrpc_client(conn, channel, rxrpc_client_exposed);
536
537 spin_lock(&call->peer->lock);
538 hlist_add_head(&call->error_link, &call->peer->error_targets);
539 spin_unlock(&call->peer->lock);
540 }
541 }
542
543 /*
544 * Set the reap timer.
545 */
rxrpc_set_client_reap_timer(struct rxrpc_local * local)546 static void rxrpc_set_client_reap_timer(struct rxrpc_local *local)
547 {
548 if (!local->kill_all_client_conns) {
549 unsigned long now = jiffies;
550 unsigned long reap_at = now + rxrpc_conn_idle_client_expiry;
551
552 if (local->rxnet->live)
553 timer_reduce(&local->client_conn_reap_timer, reap_at);
554 }
555 }
556
557 /*
558 * Disconnect a client call.
559 */
rxrpc_disconnect_client_call(struct rxrpc_bundle * bundle,struct rxrpc_call * call)560 void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call *call)
561 {
562 struct rxrpc_connection *conn;
563 struct rxrpc_channel *chan = NULL;
564 struct rxrpc_local *local = bundle->local;
565 unsigned int channel;
566 bool may_reuse;
567 u32 cid;
568
569 _enter("c=%x", call->debug_id);
570
571 /* Calls that have never actually been assigned a channel can simply be
572 * discarded.
573 */
574 conn = call->conn;
575 if (!conn) {
576 _debug("call is waiting");
577 ASSERTCMP(call->call_id, ==, 0);
578 ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags));
579 list_del_init(&call->wait_link);
580 return;
581 }
582
583 cid = call->cid;
584 channel = cid & RXRPC_CHANNELMASK;
585 chan = &conn->channels[channel];
586 trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
587
588 if (WARN_ON(chan->call != call))
589 return;
590
591 may_reuse = rxrpc_may_reuse_conn(conn);
592
593 /* If a client call was exposed to the world, we save the result for
594 * retransmission.
595 *
596 * We use a barrier here so that the call number and abort code can be
597 * read without needing to take a lock.
598 *
599 * TODO: Make the incoming packet handler check this and handle
600 * terminal retransmission without requiring access to the call.
601 */
602 if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
603 _debug("exposed %u,%u", call->call_id, call->abort_code);
604 __rxrpc_disconnect_call(conn, call);
605
606 if (test_and_clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) {
607 trace_rxrpc_client(conn, channel, rxrpc_client_to_active);
608 bundle->try_upgrade = false;
609 if (may_reuse)
610 rxrpc_activate_channels(bundle);
611 }
612 }
613
614 /* See if we can pass the channel directly to another call. */
615 if (may_reuse && !list_empty(&bundle->waiting_calls)) {
616 trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
617 rxrpc_activate_one_channel(conn, channel);
618 return;
619 }
620
621 /* Schedule the final ACK to be transmitted in a short while so that it
622 * can be skipped if we find a follow-on call. The first DATA packet
623 * of the follow on call will implicitly ACK this call.
624 */
625 if (call->completion == RXRPC_CALL_SUCCEEDED &&
626 test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
627 unsigned long final_ack_at = jiffies + 2;
628
629 WRITE_ONCE(chan->final_ack_at, final_ack_at);
630 smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */
631 set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
632 rxrpc_reduce_conn_timer(conn, final_ack_at);
633 }
634
635 /* Deactivate the channel. */
636 chan->call = NULL;
637 set_bit(conn->bundle_shift + channel, &conn->bundle->avail_chans);
638 conn->act_chans &= ~(1 << channel);
639
640 /* If no channels remain active, then put the connection on the idle
641 * list for a short while. Give it a ref to stop it going away if it
642 * becomes unbundled.
643 */
644 if (!conn->act_chans) {
645 trace_rxrpc_client(conn, channel, rxrpc_client_to_idle);
646 conn->idle_timestamp = jiffies;
647
648 rxrpc_get_connection(conn, rxrpc_conn_get_idle);
649 list_move_tail(&conn->cache_link, &local->idle_client_conns);
650
651 rxrpc_set_client_reap_timer(local);
652 }
653 }
654
655 /*
656 * Remove a connection from a bundle.
657 */
rxrpc_unbundle_conn(struct rxrpc_connection * conn)658 static void rxrpc_unbundle_conn(struct rxrpc_connection *conn)
659 {
660 struct rxrpc_bundle *bundle = conn->bundle;
661 unsigned int bindex;
662 int i;
663
664 _enter("C=%x", conn->debug_id);
665
666 if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
667 rxrpc_process_delayed_final_acks(conn, true);
668
669 bindex = conn->bundle_shift / RXRPC_MAXCALLS;
670 if (bundle->conns[bindex] == conn) {
671 _debug("clear slot %u", bindex);
672 bundle->conns[bindex] = NULL;
673 for (i = 0; i < RXRPC_MAXCALLS; i++)
674 clear_bit(conn->bundle_shift + i, &bundle->avail_chans);
675 rxrpc_put_client_connection_id(bundle->local, conn);
676 rxrpc_deactivate_bundle(bundle);
677 rxrpc_put_connection(conn, rxrpc_conn_put_unbundle);
678 }
679 }
680
681 /*
682 * Drop the active count on a bundle.
683 */
rxrpc_deactivate_bundle(struct rxrpc_bundle * bundle)684 void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle)
685 {
686 struct rxrpc_local *local;
687 bool need_put = false;
688
689 if (!bundle)
690 return;
691
692 local = bundle->local;
693 if (atomic_dec_and_lock(&bundle->active, &local->client_bundles_lock)) {
694 if (!bundle->exclusive) {
695 _debug("erase bundle");
696 rb_erase(&bundle->local_node, &local->client_bundles);
697 need_put = true;
698 }
699
700 spin_unlock(&local->client_bundles_lock);
701 if (need_put)
702 rxrpc_put_bundle(bundle, rxrpc_bundle_put_discard);
703 }
704 }
705
706 /*
707 * Clean up a dead client connection.
708 */
rxrpc_kill_client_conn(struct rxrpc_connection * conn)709 void rxrpc_kill_client_conn(struct rxrpc_connection *conn)
710 {
711 struct rxrpc_local *local = conn->local;
712 struct rxrpc_net *rxnet = local->rxnet;
713
714 _enter("C=%x", conn->debug_id);
715
716 trace_rxrpc_client(conn, -1, rxrpc_client_cleanup);
717 atomic_dec(&rxnet->nr_client_conns);
718
719 rxrpc_put_client_connection_id(local, conn);
720 }
721
722 /*
723 * Discard expired client connections from the idle list. Each conn in the
724 * idle list has been exposed and holds an extra ref because of that.
725 *
726 * This may be called from conn setup or from a work item so cannot be
727 * considered non-reentrant.
728 */
rxrpc_discard_expired_client_conns(struct rxrpc_local * local)729 void rxrpc_discard_expired_client_conns(struct rxrpc_local *local)
730 {
731 struct rxrpc_connection *conn;
732 unsigned long expiry, conn_expires_at, now;
733 unsigned int nr_conns;
734
735 _enter("");
736
737 /* We keep an estimate of what the number of conns ought to be after
738 * we've discarded some so that we don't overdo the discarding.
739 */
740 nr_conns = atomic_read(&local->rxnet->nr_client_conns);
741
742 next:
743 conn = list_first_entry_or_null(&local->idle_client_conns,
744 struct rxrpc_connection, cache_link);
745 if (!conn)
746 return;
747
748 if (!local->kill_all_client_conns) {
749 /* If the number of connections is over the reap limit, we
750 * expedite discard by reducing the expiry timeout. We must,
751 * however, have at least a short grace period to be able to do
752 * final-ACK or ABORT retransmission.
753 */
754 expiry = rxrpc_conn_idle_client_expiry;
755 if (nr_conns > rxrpc_reap_client_connections)
756 expiry = rxrpc_conn_idle_client_fast_expiry;
757 if (conn->local->service_closed)
758 expiry = rxrpc_closed_conn_expiry * HZ;
759
760 conn_expires_at = conn->idle_timestamp + expiry;
761
762 now = READ_ONCE(jiffies);
763 if (time_after(conn_expires_at, now))
764 goto not_yet_expired;
765 }
766
767 atomic_dec(&conn->active);
768 trace_rxrpc_client(conn, -1, rxrpc_client_discard);
769 list_del_init(&conn->cache_link);
770
771 rxrpc_unbundle_conn(conn);
772 /* Drop the ->cache_link ref */
773 rxrpc_put_connection(conn, rxrpc_conn_put_discard_idle);
774
775 nr_conns--;
776 goto next;
777
778 not_yet_expired:
779 /* The connection at the front of the queue hasn't yet expired, so
780 * schedule the work item for that point if we discarded something.
781 *
782 * We don't worry if the work item is already scheduled - it can look
783 * after rescheduling itself at a later time. We could cancel it, but
784 * then things get messier.
785 */
786 _debug("not yet");
787 if (!local->kill_all_client_conns)
788 timer_reduce(&local->client_conn_reap_timer, conn_expires_at);
789
790 _leave("");
791 }
792
793 /*
794 * Clean up the client connections on a local endpoint.
795 */
rxrpc_clean_up_local_conns(struct rxrpc_local * local)796 void rxrpc_clean_up_local_conns(struct rxrpc_local *local)
797 {
798 struct rxrpc_connection *conn;
799
800 _enter("");
801
802 local->kill_all_client_conns = true;
803
804 del_timer_sync(&local->client_conn_reap_timer);
805
806 while ((conn = list_first_entry_or_null(&local->idle_client_conns,
807 struct rxrpc_connection, cache_link))) {
808 list_del_init(&conn->cache_link);
809 atomic_dec(&conn->active);
810 trace_rxrpc_client(conn, -1, rxrpc_client_discard);
811 rxrpc_unbundle_conn(conn);
812 rxrpc_put_connection(conn, rxrpc_conn_put_local_dead);
813 }
814
815 _leave(" [culled]");
816 }
817