1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3 * (C) Copyright IBM Corp. 2001, 2004
4 * Copyright (c) 1999-2000 Cisco, Inc.
5 * Copyright (c) 1999-2001 Motorola, Inc.
6 * Copyright (c) 2001 Intel Corp.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
8 *
9 * This file is part of the SCTP kernel implementation
10 *
11 * This module provides the abstraction for an SCTP association.
12 *
13 * Please send any bug reports or fixes you make to the
14 * email address(es):
15 * lksctp developers <linux-sctp@vger.kernel.org>
16 *
17 * Written or modified by:
18 * La Monte H.P. Yarroll <piggy@acm.org>
19 * Karl Knutson <karl@athena.chicago.il.us>
20 * Jon Grimm <jgrimm@us.ibm.com>
21 * Xingang Guo <xingang.guo@intel.com>
22 * Hui Huang <hui.huang@nokia.com>
23 * Sridhar Samudrala <sri@us.ibm.com>
24 * Daisy Chang <daisyc@us.ibm.com>
25 * Ryan Layer <rmlayer@us.ibm.com>
26 * Kevin Gao <kevin.gao@intel.com>
27 */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/types.h>
32 #include <linux/fcntl.h>
33 #include <linux/poll.h>
34 #include <linux/init.h>
35
36 #include <linux/slab.h>
37 #include <linux/in.h>
38 #include <net/ipv6.h>
39 #include <net/sctp/sctp.h>
40 #include <net/sctp/sm.h>
41
42 /* Forward declarations for internal functions. */
43 static void sctp_select_active_and_retran_path(struct sctp_association *asoc);
44 static void sctp_assoc_bh_rcv(struct work_struct *work);
45 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
46 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
47
48 /* 1st Level Abstractions. */
49
50 /* Initialize a new association from provided memory. */
sctp_association_init(struct sctp_association * asoc,const struct sctp_endpoint * ep,const struct sock * sk,enum sctp_scope scope,gfp_t gfp)51 static struct sctp_association *sctp_association_init(
52 struct sctp_association *asoc,
53 const struct sctp_endpoint *ep,
54 const struct sock *sk,
55 enum sctp_scope scope, gfp_t gfp)
56 {
57 struct sctp_sock *sp;
58 struct sctp_paramhdr *p;
59 int i;
60
61 /* Retrieve the SCTP per socket area. */
62 sp = sctp_sk((struct sock *)sk);
63
64 /* Discarding const is appropriate here. */
65 asoc->ep = (struct sctp_endpoint *)ep;
66 asoc->base.sk = (struct sock *)sk;
67 asoc->base.net = sock_net(sk);
68
69 sctp_endpoint_hold(asoc->ep);
70 sock_hold(asoc->base.sk);
71
72 /* Initialize the common base substructure. */
73 asoc->base.type = SCTP_EP_TYPE_ASSOCIATION;
74
75 /* Initialize the object handling fields. */
76 refcount_set(&asoc->base.refcnt, 1);
77
78 /* Initialize the bind addr area. */
79 sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
80
81 asoc->state = SCTP_STATE_CLOSED;
82 asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life);
83 asoc->user_frag = sp->user_frag;
84
85 /* Set the association max_retrans and RTO values from the
86 * socket values.
87 */
88 asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
89 asoc->pf_retrans = sp->pf_retrans;
90 asoc->ps_retrans = sp->ps_retrans;
91 asoc->pf_expose = sp->pf_expose;
92
93 asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
94 asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
95 asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
96
97 /* Initialize the association's heartbeat interval based on the
98 * sock configured value.
99 */
100 asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
101
102 /* Initialize path max retrans value. */
103 asoc->pathmaxrxt = sp->pathmaxrxt;
104
105 asoc->flowlabel = sp->flowlabel;
106 asoc->dscp = sp->dscp;
107
108 /* Set association default SACK delay */
109 asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
110 asoc->sackfreq = sp->sackfreq;
111
112 /* Set the association default flags controlling
113 * Heartbeat, SACK delay, and Path MTU Discovery.
114 */
115 asoc->param_flags = sp->param_flags;
116
117 /* Initialize the maximum number of new data packets that can be sent
118 * in a burst.
119 */
120 asoc->max_burst = sp->max_burst;
121
122 asoc->subscribe = sp->subscribe;
123
124 /* initialize association timers */
125 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
126 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
127 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
128
129 /* sctpimpguide Section 2.12.2
130 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
131 * recommended value of 5 times 'RTO.Max'.
132 */
133 asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
134 = 5 * asoc->rto_max;
135
136 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
137 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
138
139 /* Initializes the timers */
140 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
141 timer_setup(&asoc->timers[i], sctp_timer_events[i], 0);
142
143 /* Pull default initialization values from the sock options.
144 * Note: This assumes that the values have already been
145 * validated in the sock.
146 */
147 asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams;
148 asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams;
149 asoc->max_init_attempts = sp->initmsg.sinit_max_attempts;
150
151 asoc->max_init_timeo =
152 msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo);
153
154 /* Set the local window size for receive.
155 * This is also the rcvbuf space per association.
156 * RFC 6 - A SCTP receiver MUST be able to receive a minimum of
157 * 1500 bytes in one SCTP packet.
158 */
159 if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
160 asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
161 else
162 asoc->rwnd = sk->sk_rcvbuf/2;
163
164 asoc->a_rwnd = asoc->rwnd;
165
166 /* Use my own max window until I learn something better. */
167 asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
168
169 /* Initialize the receive memory counter */
170 atomic_set(&asoc->rmem_alloc, 0);
171
172 init_waitqueue_head(&asoc->wait);
173
174 asoc->c.my_vtag = sctp_generate_tag(ep);
175 asoc->c.my_port = ep->base.bind_addr.port;
176
177 asoc->c.initial_tsn = sctp_generate_tsn(ep);
178
179 asoc->next_tsn = asoc->c.initial_tsn;
180
181 asoc->ctsn_ack_point = asoc->next_tsn - 1;
182 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
183 asoc->highest_sacked = asoc->ctsn_ack_point;
184 asoc->last_cwr_tsn = asoc->ctsn_ack_point;
185
186 /* ADDIP Section 4.1 Asconf Chunk Procedures
187 *
188 * When an endpoint has an ASCONF signaled change to be sent to the
189 * remote endpoint it should do the following:
190 * ...
191 * A2) a serial number should be assigned to the chunk. The serial
192 * number SHOULD be a monotonically increasing number. The serial
193 * numbers SHOULD be initialized at the start of the
194 * association to the same value as the initial TSN.
195 */
196 asoc->addip_serial = asoc->c.initial_tsn;
197 asoc->strreset_outseq = asoc->c.initial_tsn;
198
199 INIT_LIST_HEAD(&asoc->addip_chunk_list);
200 INIT_LIST_HEAD(&asoc->asconf_ack_list);
201
202 /* Make an empty list of remote transport addresses. */
203 INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
204
205 /* RFC 2960 5.1 Normal Establishment of an Association
206 *
207 * After the reception of the first data chunk in an
208 * association the endpoint must immediately respond with a
209 * sack to acknowledge the data chunk. Subsequent
210 * acknowledgements should be done as described in Section
211 * 6.2.
212 *
213 * [We implement this by telling a new association that it
214 * already received one packet.]
215 */
216 asoc->peer.sack_needed = 1;
217 asoc->peer.sack_generation = 1;
218
219 /* Create an input queue. */
220 sctp_inq_init(&asoc->base.inqueue);
221 sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
222
223 /* Create an output queue. */
224 sctp_outq_init(asoc, &asoc->outqueue);
225
226 if (!sctp_ulpq_init(&asoc->ulpq, asoc))
227 goto fail_init;
228
229 if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams,
230 0, gfp))
231 goto fail_init;
232
233 /* Initialize default path MTU. */
234 asoc->pathmtu = sp->pathmtu;
235 sctp_assoc_update_frag_point(asoc);
236
237 /* Assume that peer would support both address types unless we are
238 * told otherwise.
239 */
240 asoc->peer.ipv4_address = 1;
241 if (asoc->base.sk->sk_family == PF_INET6)
242 asoc->peer.ipv6_address = 1;
243 INIT_LIST_HEAD(&asoc->asocs);
244
245 asoc->default_stream = sp->default_stream;
246 asoc->default_ppid = sp->default_ppid;
247 asoc->default_flags = sp->default_flags;
248 asoc->default_context = sp->default_context;
249 asoc->default_timetolive = sp->default_timetolive;
250 asoc->default_rcv_context = sp->default_rcv_context;
251
252 /* AUTH related initializations */
253 INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
254 if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp))
255 goto stream_free;
256
257 asoc->active_key_id = ep->active_key_id;
258 asoc->strreset_enable = ep->strreset_enable;
259
260 /* Save the hmacs and chunks list into this association */
261 if (ep->auth_hmacs_list)
262 memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list,
263 ntohs(ep->auth_hmacs_list->param_hdr.length));
264 if (ep->auth_chunk_list)
265 memcpy(asoc->c.auth_chunks, ep->auth_chunk_list,
266 ntohs(ep->auth_chunk_list->param_hdr.length));
267
268 /* Get the AUTH random number for this association */
269 p = (struct sctp_paramhdr *)asoc->c.auth_random;
270 p->type = SCTP_PARAM_RANDOM;
271 p->length = htons(sizeof(*p) + SCTP_AUTH_RANDOM_LENGTH);
272 get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH);
273
274 return asoc;
275
276 stream_free:
277 sctp_stream_free(&asoc->stream);
278 fail_init:
279 sock_put(asoc->base.sk);
280 sctp_endpoint_put(asoc->ep);
281 return NULL;
282 }
283
284 /* Allocate and initialize a new association */
sctp_association_new(const struct sctp_endpoint * ep,const struct sock * sk,enum sctp_scope scope,gfp_t gfp)285 struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
286 const struct sock *sk,
287 enum sctp_scope scope, gfp_t gfp)
288 {
289 struct sctp_association *asoc;
290
291 asoc = kzalloc(sizeof(*asoc), gfp);
292 if (!asoc)
293 goto fail;
294
295 if (!sctp_association_init(asoc, ep, sk, scope, gfp))
296 goto fail_init;
297
298 SCTP_DBG_OBJCNT_INC(assoc);
299
300 pr_debug("Created asoc %p\n", asoc);
301
302 return asoc;
303
304 fail_init:
305 kfree(asoc);
306 fail:
307 return NULL;
308 }
309
310 /* Free this association if possible. There may still be users, so
311 * the actual deallocation may be delayed.
312 */
sctp_association_free(struct sctp_association * asoc)313 void sctp_association_free(struct sctp_association *asoc)
314 {
315 struct sock *sk = asoc->base.sk;
316 struct sctp_transport *transport;
317 struct list_head *pos, *temp;
318 int i;
319
320 /* Only real associations count against the endpoint, so
321 * don't bother for if this is a temporary association.
322 */
323 if (!list_empty(&asoc->asocs)) {
324 list_del(&asoc->asocs);
325
326 /* Decrement the backlog value for a TCP-style listening
327 * socket.
328 */
329 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
330 sk_acceptq_removed(sk);
331 }
332
333 /* Mark as dead, so other users can know this structure is
334 * going away.
335 */
336 asoc->base.dead = true;
337
338 /* Dispose of any data lying around in the outqueue. */
339 sctp_outq_free(&asoc->outqueue);
340
341 /* Dispose of any pending messages for the upper layer. */
342 sctp_ulpq_free(&asoc->ulpq);
343
344 /* Dispose of any pending chunks on the inqueue. */
345 sctp_inq_free(&asoc->base.inqueue);
346
347 sctp_tsnmap_free(&asoc->peer.tsn_map);
348
349 /* Free stream information. */
350 sctp_stream_free(&asoc->stream);
351
352 if (asoc->strreset_chunk)
353 sctp_chunk_free(asoc->strreset_chunk);
354
355 /* Clean up the bound address list. */
356 sctp_bind_addr_free(&asoc->base.bind_addr);
357
358 /* Do we need to go through all of our timers and
359 * delete them? To be safe we will try to delete all, but we
360 * should be able to go through and make a guess based
361 * on our state.
362 */
363 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
364 if (del_timer(&asoc->timers[i]))
365 sctp_association_put(asoc);
366 }
367
368 /* Free peer's cached cookie. */
369 kfree(asoc->peer.cookie);
370 kfree(asoc->peer.peer_random);
371 kfree(asoc->peer.peer_chunks);
372 kfree(asoc->peer.peer_hmacs);
373
374 /* Release the transport structures. */
375 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
376 transport = list_entry(pos, struct sctp_transport, transports);
377 list_del_rcu(pos);
378 sctp_unhash_transport(transport);
379 sctp_transport_free(transport);
380 }
381
382 asoc->peer.transport_count = 0;
383
384 sctp_asconf_queue_teardown(asoc);
385
386 /* Free pending address space being deleted */
387 kfree(asoc->asconf_addr_del_pending);
388
389 /* AUTH - Free the endpoint shared keys */
390 sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
391
392 /* AUTH - Free the association shared key */
393 sctp_auth_key_put(asoc->asoc_shared_key);
394
395 sctp_association_put(asoc);
396 }
397
398 /* Cleanup and free up an association. */
sctp_association_destroy(struct sctp_association * asoc)399 static void sctp_association_destroy(struct sctp_association *asoc)
400 {
401 if (unlikely(!asoc->base.dead)) {
402 WARN(1, "Attempt to destroy undead association %p!\n", asoc);
403 return;
404 }
405
406 sctp_endpoint_put(asoc->ep);
407 sock_put(asoc->base.sk);
408
409 if (asoc->assoc_id != 0) {
410 spin_lock_bh(&sctp_assocs_id_lock);
411 idr_remove(&sctp_assocs_id, asoc->assoc_id);
412 spin_unlock_bh(&sctp_assocs_id_lock);
413 }
414
415 WARN_ON(atomic_read(&asoc->rmem_alloc));
416
417 kfree_rcu(asoc, rcu);
418 SCTP_DBG_OBJCNT_DEC(assoc);
419 }
420
421 /* Change the primary destination address for the peer. */
sctp_assoc_set_primary(struct sctp_association * asoc,struct sctp_transport * transport)422 void sctp_assoc_set_primary(struct sctp_association *asoc,
423 struct sctp_transport *transport)
424 {
425 int changeover = 0;
426
427 /* it's a changeover only if we already have a primary path
428 * that we are changing
429 */
430 if (asoc->peer.primary_path != NULL &&
431 asoc->peer.primary_path != transport)
432 changeover = 1 ;
433
434 asoc->peer.primary_path = transport;
435 sctp_ulpevent_notify_peer_addr_change(transport,
436 SCTP_ADDR_MADE_PRIM, 0);
437
438 /* Set a default msg_name for events. */
439 memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
440 sizeof(union sctp_addr));
441
442 /* If the primary path is changing, assume that the
443 * user wants to use this new path.
444 */
445 if ((transport->state == SCTP_ACTIVE) ||
446 (transport->state == SCTP_UNKNOWN))
447 asoc->peer.active_path = transport;
448
449 /*
450 * SFR-CACC algorithm:
451 * Upon the receipt of a request to change the primary
452 * destination address, on the data structure for the new
453 * primary destination, the sender MUST do the following:
454 *
455 * 1) If CHANGEOVER_ACTIVE is set, then there was a switch
456 * to this destination address earlier. The sender MUST set
457 * CYCLING_CHANGEOVER to indicate that this switch is a
458 * double switch to the same destination address.
459 *
460 * Really, only bother is we have data queued or outstanding on
461 * the association.
462 */
463 if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen)
464 return;
465
466 if (transport->cacc.changeover_active)
467 transport->cacc.cycling_changeover = changeover;
468
469 /* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that
470 * a changeover has occurred.
471 */
472 transport->cacc.changeover_active = changeover;
473
474 /* 3) The sender MUST store the next TSN to be sent in
475 * next_tsn_at_change.
476 */
477 transport->cacc.next_tsn_at_change = asoc->next_tsn;
478 }
479
480 /* Remove a transport from an association. */
sctp_assoc_rm_peer(struct sctp_association * asoc,struct sctp_transport * peer)481 void sctp_assoc_rm_peer(struct sctp_association *asoc,
482 struct sctp_transport *peer)
483 {
484 struct sctp_transport *transport;
485 struct list_head *pos;
486 struct sctp_chunk *ch;
487
488 pr_debug("%s: association:%p addr:%pISpc\n",
489 __func__, asoc, &peer->ipaddr.sa);
490
491 /* If we are to remove the current retran_path, update it
492 * to the next peer before removing this peer from the list.
493 */
494 if (asoc->peer.retran_path == peer)
495 sctp_assoc_update_retran_path(asoc);
496
497 /* Remove this peer from the list. */
498 list_del_rcu(&peer->transports);
499 /* Remove this peer from the transport hashtable */
500 sctp_unhash_transport(peer);
501
502 /* Get the first transport of asoc. */
503 pos = asoc->peer.transport_addr_list.next;
504 transport = list_entry(pos, struct sctp_transport, transports);
505
506 /* Update any entries that match the peer to be deleted. */
507 if (asoc->peer.primary_path == peer)
508 sctp_assoc_set_primary(asoc, transport);
509 if (asoc->peer.active_path == peer)
510 asoc->peer.active_path = transport;
511 if (asoc->peer.retran_path == peer)
512 asoc->peer.retran_path = transport;
513 if (asoc->peer.last_data_from == peer)
514 asoc->peer.last_data_from = transport;
515
516 if (asoc->strreset_chunk &&
517 asoc->strreset_chunk->transport == peer) {
518 asoc->strreset_chunk->transport = transport;
519 sctp_transport_reset_reconf_timer(transport);
520 }
521
522 /* If we remove the transport an INIT was last sent to, set it to
523 * NULL. Combined with the update of the retran path above, this
524 * will cause the next INIT to be sent to the next available
525 * transport, maintaining the cycle.
526 */
527 if (asoc->init_last_sent_to == peer)
528 asoc->init_last_sent_to = NULL;
529
530 /* If we remove the transport an SHUTDOWN was last sent to, set it
531 * to NULL. Combined with the update of the retran path above, this
532 * will cause the next SHUTDOWN to be sent to the next available
533 * transport, maintaining the cycle.
534 */
535 if (asoc->shutdown_last_sent_to == peer)
536 asoc->shutdown_last_sent_to = NULL;
537
538 /* If we remove the transport an ASCONF was last sent to, set it to
539 * NULL.
540 */
541 if (asoc->addip_last_asconf &&
542 asoc->addip_last_asconf->transport == peer)
543 asoc->addip_last_asconf->transport = NULL;
544
545 /* If we have something on the transmitted list, we have to
546 * save it off. The best place is the active path.
547 */
548 if (!list_empty(&peer->transmitted)) {
549 struct sctp_transport *active = asoc->peer.active_path;
550
551 /* Reset the transport of each chunk on this list */
552 list_for_each_entry(ch, &peer->transmitted,
553 transmitted_list) {
554 ch->transport = NULL;
555 ch->rtt_in_progress = 0;
556 }
557
558 list_splice_tail_init(&peer->transmitted,
559 &active->transmitted);
560
561 /* Start a T3 timer here in case it wasn't running so
562 * that these migrated packets have a chance to get
563 * retransmitted.
564 */
565 if (!timer_pending(&active->T3_rtx_timer))
566 if (!mod_timer(&active->T3_rtx_timer,
567 jiffies + active->rto))
568 sctp_transport_hold(active);
569 }
570
571 list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list)
572 if (ch->transport == peer)
573 ch->transport = NULL;
574
575 asoc->peer.transport_count--;
576
577 sctp_ulpevent_notify_peer_addr_change(peer, SCTP_ADDR_REMOVED, 0);
578 sctp_transport_free(peer);
579 }
580
581 /* Add a transport address to an association. */
sctp_assoc_add_peer(struct sctp_association * asoc,const union sctp_addr * addr,const gfp_t gfp,const int peer_state)582 struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
583 const union sctp_addr *addr,
584 const gfp_t gfp,
585 const int peer_state)
586 {
587 struct sctp_transport *peer;
588 struct sctp_sock *sp;
589 unsigned short port;
590
591 sp = sctp_sk(asoc->base.sk);
592
593 /* AF_INET and AF_INET6 share common port field. */
594 port = ntohs(addr->v4.sin_port);
595
596 pr_debug("%s: association:%p addr:%pISpc state:%d\n", __func__,
597 asoc, &addr->sa, peer_state);
598
599 /* Set the port if it has not been set yet. */
600 if (0 == asoc->peer.port)
601 asoc->peer.port = port;
602
603 /* Check to see if this is a duplicate. */
604 peer = sctp_assoc_lookup_paddr(asoc, addr);
605 if (peer) {
606 /* An UNKNOWN state is only set on transports added by
607 * user in sctp_connectx() call. Such transports should be
608 * considered CONFIRMED per RFC 4960, Section 5.4.
609 */
610 if (peer->state == SCTP_UNKNOWN) {
611 peer->state = SCTP_ACTIVE;
612 }
613 return peer;
614 }
615
616 peer = sctp_transport_new(asoc->base.net, addr, gfp);
617 if (!peer)
618 return NULL;
619
620 sctp_transport_set_owner(peer, asoc);
621
622 /* Initialize the peer's heartbeat interval based on the
623 * association configured value.
624 */
625 peer->hbinterval = asoc->hbinterval;
626
627 /* Set the path max_retrans. */
628 peer->pathmaxrxt = asoc->pathmaxrxt;
629
630 /* And the partial failure retrans threshold */
631 peer->pf_retrans = asoc->pf_retrans;
632 /* And the primary path switchover retrans threshold */
633 peer->ps_retrans = asoc->ps_retrans;
634
635 /* Initialize the peer's SACK delay timeout based on the
636 * association configured value.
637 */
638 peer->sackdelay = asoc->sackdelay;
639 peer->sackfreq = asoc->sackfreq;
640
641 if (addr->sa.sa_family == AF_INET6) {
642 __be32 info = addr->v6.sin6_flowinfo;
643
644 if (info) {
645 peer->flowlabel = ntohl(info & IPV6_FLOWLABEL_MASK);
646 peer->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
647 } else {
648 peer->flowlabel = asoc->flowlabel;
649 }
650 }
651 peer->dscp = asoc->dscp;
652
653 /* Enable/disable heartbeat, SACK delay, and path MTU discovery
654 * based on association setting.
655 */
656 peer->param_flags = asoc->param_flags;
657
658 /* Initialize the pmtu of the transport. */
659 sctp_transport_route(peer, NULL, sp);
660
661 /* If this is the first transport addr on this association,
662 * initialize the association PMTU to the peer's PMTU.
663 * If not and the current association PMTU is higher than the new
664 * peer's PMTU, reset the association PMTU to the new peer's PMTU.
665 */
666 sctp_assoc_set_pmtu(asoc, asoc->pathmtu ?
667 min_t(int, peer->pathmtu, asoc->pathmtu) :
668 peer->pathmtu);
669
670 peer->pmtu_pending = 0;
671
672 /* The asoc->peer.port might not be meaningful yet, but
673 * initialize the packet structure anyway.
674 */
675 sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port,
676 asoc->peer.port);
677
678 /* 7.2.1 Slow-Start
679 *
680 * o The initial cwnd before DATA transmission or after a sufficiently
681 * long idle period MUST be set to
682 * min(4*MTU, max(2*MTU, 4380 bytes))
683 *
684 * o The initial value of ssthresh MAY be arbitrarily high
685 * (for example, implementations MAY use the size of the
686 * receiver advertised window).
687 */
688 peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
689
690 /* At this point, we may not have the receiver's advertised window,
691 * so initialize ssthresh to the default value and it will be set
692 * later when we process the INIT.
693 */
694 peer->ssthresh = SCTP_DEFAULT_MAXWINDOW;
695
696 peer->partial_bytes_acked = 0;
697 peer->flight_size = 0;
698 peer->burst_limited = 0;
699
700 /* Set the transport's RTO.initial value */
701 peer->rto = asoc->rto_initial;
702 sctp_max_rto(asoc, peer);
703
704 /* Set the peer's active state. */
705 peer->state = peer_state;
706
707 /* Add this peer into the transport hashtable */
708 if (sctp_hash_transport(peer)) {
709 sctp_transport_free(peer);
710 return NULL;
711 }
712
713 /* Attach the remote transport to our asoc. */
714 list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
715 asoc->peer.transport_count++;
716
717 sctp_ulpevent_notify_peer_addr_change(peer, SCTP_ADDR_ADDED, 0);
718
719 /* If we do not yet have a primary path, set one. */
720 if (!asoc->peer.primary_path) {
721 sctp_assoc_set_primary(asoc, peer);
722 asoc->peer.retran_path = peer;
723 }
724
725 if (asoc->peer.active_path == asoc->peer.retran_path &&
726 peer->state != SCTP_UNCONFIRMED) {
727 asoc->peer.retran_path = peer;
728 }
729
730 return peer;
731 }
732
733 /* Delete a transport address from an association. */
sctp_assoc_del_peer(struct sctp_association * asoc,const union sctp_addr * addr)734 void sctp_assoc_del_peer(struct sctp_association *asoc,
735 const union sctp_addr *addr)
736 {
737 struct list_head *pos;
738 struct list_head *temp;
739 struct sctp_transport *transport;
740
741 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
742 transport = list_entry(pos, struct sctp_transport, transports);
743 if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) {
744 /* Do book keeping for removing the peer and free it. */
745 sctp_assoc_rm_peer(asoc, transport);
746 break;
747 }
748 }
749 }
750
751 /* Lookup a transport by address. */
sctp_assoc_lookup_paddr(const struct sctp_association * asoc,const union sctp_addr * address)752 struct sctp_transport *sctp_assoc_lookup_paddr(
753 const struct sctp_association *asoc,
754 const union sctp_addr *address)
755 {
756 struct sctp_transport *t;
757
758 /* Cycle through all transports searching for a peer address. */
759
760 list_for_each_entry(t, &asoc->peer.transport_addr_list,
761 transports) {
762 if (sctp_cmp_addr_exact(address, &t->ipaddr))
763 return t;
764 }
765
766 return NULL;
767 }
768
769 /* Remove all transports except a give one */
sctp_assoc_del_nonprimary_peers(struct sctp_association * asoc,struct sctp_transport * primary)770 void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc,
771 struct sctp_transport *primary)
772 {
773 struct sctp_transport *temp;
774 struct sctp_transport *t;
775
776 list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list,
777 transports) {
778 /* if the current transport is not the primary one, delete it */
779 if (t != primary)
780 sctp_assoc_rm_peer(asoc, t);
781 }
782 }
783
784 /* Engage in transport control operations.
785 * Mark the transport up or down and send a notification to the user.
786 * Select and update the new active and retran paths.
787 */
sctp_assoc_control_transport(struct sctp_association * asoc,struct sctp_transport * transport,enum sctp_transport_cmd command,sctp_sn_error_t error)788 void sctp_assoc_control_transport(struct sctp_association *asoc,
789 struct sctp_transport *transport,
790 enum sctp_transport_cmd command,
791 sctp_sn_error_t error)
792 {
793 int spc_state = SCTP_ADDR_AVAILABLE;
794 bool ulp_notify = true;
795
796 /* Record the transition on the transport. */
797 switch (command) {
798 case SCTP_TRANSPORT_UP:
799 /* If we are moving from UNCONFIRMED state due
800 * to heartbeat success, report the SCTP_ADDR_CONFIRMED
801 * state to the user, otherwise report SCTP_ADDR_AVAILABLE.
802 */
803 if (transport->state == SCTP_PF &&
804 asoc->pf_expose != SCTP_PF_EXPOSE_ENABLE)
805 ulp_notify = false;
806 else if (transport->state == SCTP_UNCONFIRMED &&
807 error == SCTP_HEARTBEAT_SUCCESS)
808 spc_state = SCTP_ADDR_CONFIRMED;
809
810 transport->state = SCTP_ACTIVE;
811 break;
812
813 case SCTP_TRANSPORT_DOWN:
814 /* If the transport was never confirmed, do not transition it
815 * to inactive state. Also, release the cached route since
816 * there may be a better route next time.
817 */
818 if (transport->state != SCTP_UNCONFIRMED) {
819 transport->state = SCTP_INACTIVE;
820 spc_state = SCTP_ADDR_UNREACHABLE;
821 } else {
822 sctp_transport_dst_release(transport);
823 ulp_notify = false;
824 }
825 break;
826
827 case SCTP_TRANSPORT_PF:
828 transport->state = SCTP_PF;
829 if (asoc->pf_expose != SCTP_PF_EXPOSE_ENABLE)
830 ulp_notify = false;
831 else
832 spc_state = SCTP_ADDR_POTENTIALLY_FAILED;
833 break;
834
835 default:
836 return;
837 }
838
839 /* Generate and send a SCTP_PEER_ADDR_CHANGE notification
840 * to the user.
841 */
842 if (ulp_notify)
843 sctp_ulpevent_notify_peer_addr_change(transport,
844 spc_state, error);
845
846 /* Select new active and retran paths. */
847 sctp_select_active_and_retran_path(asoc);
848 }
849
850 /* Hold a reference to an association. */
sctp_association_hold(struct sctp_association * asoc)851 void sctp_association_hold(struct sctp_association *asoc)
852 {
853 refcount_inc(&asoc->base.refcnt);
854 }
855
856 /* Release a reference to an association and cleanup
857 * if there are no more references.
858 */
sctp_association_put(struct sctp_association * asoc)859 void sctp_association_put(struct sctp_association *asoc)
860 {
861 if (refcount_dec_and_test(&asoc->base.refcnt))
862 sctp_association_destroy(asoc);
863 }
864
865 /* Allocate the next TSN, Transmission Sequence Number, for the given
866 * association.
867 */
sctp_association_get_next_tsn(struct sctp_association * asoc)868 __u32 sctp_association_get_next_tsn(struct sctp_association *asoc)
869 {
870 /* From Section 1.6 Serial Number Arithmetic:
871 * Transmission Sequence Numbers wrap around when they reach
872 * 2**32 - 1. That is, the next TSN a DATA chunk MUST use
873 * after transmitting TSN = 2*32 - 1 is TSN = 0.
874 */
875 __u32 retval = asoc->next_tsn;
876 asoc->next_tsn++;
877 asoc->unack_data++;
878
879 return retval;
880 }
881
882 /* Compare two addresses to see if they match. Wildcard addresses
883 * only match themselves.
884 */
sctp_cmp_addr_exact(const union sctp_addr * ss1,const union sctp_addr * ss2)885 int sctp_cmp_addr_exact(const union sctp_addr *ss1,
886 const union sctp_addr *ss2)
887 {
888 struct sctp_af *af;
889
890 af = sctp_get_af_specific(ss1->sa.sa_family);
891 if (unlikely(!af))
892 return 0;
893
894 return af->cmp_addr(ss1, ss2);
895 }
896
897 /* Return an ecne chunk to get prepended to a packet.
898 * Note: We are sly and return a shared, prealloced chunk. FIXME:
899 * No we don't, but we could/should.
900 */
sctp_get_ecne_prepend(struct sctp_association * asoc)901 struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc)
902 {
903 if (!asoc->need_ecne)
904 return NULL;
905
906 /* Send ECNE if needed.
907 * Not being able to allocate a chunk here is not deadly.
908 */
909 return sctp_make_ecne(asoc, asoc->last_ecne_tsn);
910 }
911
912 /*
913 * Find which transport this TSN was sent on.
914 */
sctp_assoc_lookup_tsn(struct sctp_association * asoc,__u32 tsn)915 struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
916 __u32 tsn)
917 {
918 struct sctp_transport *active;
919 struct sctp_transport *match;
920 struct sctp_transport *transport;
921 struct sctp_chunk *chunk;
922 __be32 key = htonl(tsn);
923
924 match = NULL;
925
926 /*
927 * FIXME: In general, find a more efficient data structure for
928 * searching.
929 */
930
931 /*
932 * The general strategy is to search each transport's transmitted
933 * list. Return which transport this TSN lives on.
934 *
935 * Let's be hopeful and check the active_path first.
936 * Another optimization would be to know if there is only one
937 * outbound path and not have to look for the TSN at all.
938 *
939 */
940
941 active = asoc->peer.active_path;
942
943 list_for_each_entry(chunk, &active->transmitted,
944 transmitted_list) {
945
946 if (key == chunk->subh.data_hdr->tsn) {
947 match = active;
948 goto out;
949 }
950 }
951
952 /* If not found, go search all the other transports. */
953 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
954 transports) {
955
956 if (transport == active)
957 continue;
958 list_for_each_entry(chunk, &transport->transmitted,
959 transmitted_list) {
960 if (key == chunk->subh.data_hdr->tsn) {
961 match = transport;
962 goto out;
963 }
964 }
965 }
966 out:
967 return match;
968 }
969
970 /* Do delayed input processing. This is scheduled by sctp_rcv(). */
sctp_assoc_bh_rcv(struct work_struct * work)971 static void sctp_assoc_bh_rcv(struct work_struct *work)
972 {
973 struct sctp_association *asoc =
974 container_of(work, struct sctp_association,
975 base.inqueue.immediate);
976 struct net *net = asoc->base.net;
977 union sctp_subtype subtype;
978 struct sctp_endpoint *ep;
979 struct sctp_chunk *chunk;
980 struct sctp_inq *inqueue;
981 int first_time = 1; /* is this the first time through the loop */
982 int error = 0;
983 int state;
984
985 /* The association should be held so we should be safe. */
986 ep = asoc->ep;
987
988 inqueue = &asoc->base.inqueue;
989 sctp_association_hold(asoc);
990 while (NULL != (chunk = sctp_inq_pop(inqueue))) {
991 state = asoc->state;
992 subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
993
994 /* If the first chunk in the packet is AUTH, do special
995 * processing specified in Section 6.3 of SCTP-AUTH spec
996 */
997 if (first_time && subtype.chunk == SCTP_CID_AUTH) {
998 struct sctp_chunkhdr *next_hdr;
999
1000 next_hdr = sctp_inq_peek(inqueue);
1001 if (!next_hdr)
1002 goto normal;
1003
1004 /* If the next chunk is COOKIE-ECHO, skip the AUTH
1005 * chunk while saving a pointer to it so we can do
1006 * Authentication later (during cookie-echo
1007 * processing).
1008 */
1009 if (next_hdr->type == SCTP_CID_COOKIE_ECHO) {
1010 chunk->auth_chunk = skb_clone(chunk->skb,
1011 GFP_ATOMIC);
1012 chunk->auth = 1;
1013 continue;
1014 }
1015 }
1016
1017 normal:
1018 /* SCTP-AUTH, Section 6.3:
1019 * The receiver has a list of chunk types which it expects
1020 * to be received only after an AUTH-chunk. This list has
1021 * been sent to the peer during the association setup. It
1022 * MUST silently discard these chunks if they are not placed
1023 * after an AUTH chunk in the packet.
1024 */
1025 if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
1026 continue;
1027
1028 /* Remember where the last DATA chunk came from so we
1029 * know where to send the SACK.
1030 */
1031 if (sctp_chunk_is_data(chunk))
1032 asoc->peer.last_data_from = chunk->transport;
1033 else {
1034 SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
1035 asoc->stats.ictrlchunks++;
1036 if (chunk->chunk_hdr->type == SCTP_CID_SACK)
1037 asoc->stats.isacks++;
1038 }
1039
1040 if (chunk->transport)
1041 chunk->transport->last_time_heard = ktime_get();
1042
1043 /* Run through the state machine. */
1044 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
1045 state, ep, asoc, chunk, GFP_ATOMIC);
1046
1047 /* Check to see if the association is freed in response to
1048 * the incoming chunk. If so, get out of the while loop.
1049 */
1050 if (asoc->base.dead)
1051 break;
1052
1053 /* If there is an error on chunk, discard this packet. */
1054 if (error && chunk)
1055 chunk->pdiscard = 1;
1056
1057 if (first_time)
1058 first_time = 0;
1059 }
1060 sctp_association_put(asoc);
1061 }
1062
1063 /* This routine moves an association from its old sk to a new sk. */
sctp_assoc_migrate(struct sctp_association * assoc,struct sock * newsk)1064 void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
1065 {
1066 struct sctp_sock *newsp = sctp_sk(newsk);
1067 struct sock *oldsk = assoc->base.sk;
1068
1069 /* Delete the association from the old endpoint's list of
1070 * associations.
1071 */
1072 list_del_init(&assoc->asocs);
1073
1074 /* Decrement the backlog value for a TCP-style socket. */
1075 if (sctp_style(oldsk, TCP))
1076 sk_acceptq_removed(oldsk);
1077
1078 /* Release references to the old endpoint and the sock. */
1079 sctp_endpoint_put(assoc->ep);
1080 sock_put(assoc->base.sk);
1081
1082 /* Get a reference to the new endpoint. */
1083 assoc->ep = newsp->ep;
1084 sctp_endpoint_hold(assoc->ep);
1085
1086 /* Get a reference to the new sock. */
1087 assoc->base.sk = newsk;
1088 sock_hold(assoc->base.sk);
1089
1090 /* Add the association to the new endpoint's list of associations. */
1091 sctp_endpoint_add_asoc(newsp->ep, assoc);
1092 }
1093
1094 /* Update an association (possibly from unexpected COOKIE-ECHO processing). */
sctp_assoc_update(struct sctp_association * asoc,struct sctp_association * new)1095 int sctp_assoc_update(struct sctp_association *asoc,
1096 struct sctp_association *new)
1097 {
1098 struct sctp_transport *trans;
1099 struct list_head *pos, *temp;
1100
1101 /* Copy in new parameters of peer. */
1102 asoc->c = new->c;
1103 asoc->peer.rwnd = new->peer.rwnd;
1104 asoc->peer.sack_needed = new->peer.sack_needed;
1105 asoc->peer.auth_capable = new->peer.auth_capable;
1106 asoc->peer.i = new->peer.i;
1107
1108 if (!sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
1109 asoc->peer.i.initial_tsn, GFP_ATOMIC))
1110 return -ENOMEM;
1111
1112 /* Remove any peer addresses not present in the new association. */
1113 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1114 trans = list_entry(pos, struct sctp_transport, transports);
1115 if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
1116 sctp_assoc_rm_peer(asoc, trans);
1117 continue;
1118 }
1119
1120 if (asoc->state >= SCTP_STATE_ESTABLISHED)
1121 sctp_transport_reset(trans);
1122 }
1123
1124 /* If the case is A (association restart), use
1125 * initial_tsn as next_tsn. If the case is B, use
1126 * current next_tsn in case data sent to peer
1127 * has been discarded and needs retransmission.
1128 */
1129 if (asoc->state >= SCTP_STATE_ESTABLISHED) {
1130 asoc->next_tsn = new->next_tsn;
1131 asoc->ctsn_ack_point = new->ctsn_ack_point;
1132 asoc->adv_peer_ack_point = new->adv_peer_ack_point;
1133
1134 /* Reinitialize SSN for both local streams
1135 * and peer's streams.
1136 */
1137 sctp_stream_clear(&asoc->stream);
1138
1139 /* Flush the ULP reassembly and ordered queue.
1140 * Any data there will now be stale and will
1141 * cause problems.
1142 */
1143 sctp_ulpq_flush(&asoc->ulpq);
1144
1145 /* reset the overall association error count so
1146 * that the restarted association doesn't get torn
1147 * down on the next retransmission timer.
1148 */
1149 asoc->overall_error_count = 0;
1150
1151 } else {
1152 /* Add any peer addresses from the new association. */
1153 list_for_each_entry(trans, &new->peer.transport_addr_list,
1154 transports)
1155 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr) &&
1156 !sctp_assoc_add_peer(asoc, &trans->ipaddr,
1157 GFP_ATOMIC, trans->state))
1158 return -ENOMEM;
1159
1160 asoc->ctsn_ack_point = asoc->next_tsn - 1;
1161 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
1162
1163 if (sctp_state(asoc, COOKIE_WAIT))
1164 sctp_stream_update(&asoc->stream, &new->stream);
1165
1166 /* get a new assoc id if we don't have one yet. */
1167 if (sctp_assoc_set_id(asoc, GFP_ATOMIC))
1168 return -ENOMEM;
1169 }
1170
1171 /* SCTP-AUTH: Save the peer parameters from the new associations
1172 * and also move the association shared keys over
1173 */
1174 kfree(asoc->peer.peer_random);
1175 asoc->peer.peer_random = new->peer.peer_random;
1176 new->peer.peer_random = NULL;
1177
1178 kfree(asoc->peer.peer_chunks);
1179 asoc->peer.peer_chunks = new->peer.peer_chunks;
1180 new->peer.peer_chunks = NULL;
1181
1182 kfree(asoc->peer.peer_hmacs);
1183 asoc->peer.peer_hmacs = new->peer.peer_hmacs;
1184 new->peer.peer_hmacs = NULL;
1185
1186 return sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
1187 }
1188
1189 /* Update the retran path for sending a retransmitted packet.
1190 * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints:
1191 *
1192 * When there is outbound data to send and the primary path
1193 * becomes inactive (e.g., due to failures), or where the
1194 * SCTP user explicitly requests to send data to an
1195 * inactive destination transport address, before reporting
1196 * an error to its ULP, the SCTP endpoint should try to send
1197 * the data to an alternate active destination transport
1198 * address if one exists.
1199 *
1200 * When retransmitting data that timed out, if the endpoint
1201 * is multihomed, it should consider each source-destination
1202 * address pair in its retransmission selection policy.
1203 * When retransmitting timed-out data, the endpoint should
1204 * attempt to pick the most divergent source-destination
1205 * pair from the original source-destination pair to which
1206 * the packet was transmitted.
1207 *
1208 * Note: Rules for picking the most divergent source-destination
1209 * pair are an implementation decision and are not specified
1210 * within this document.
1211 *
1212 * Our basic strategy is to round-robin transports in priorities
1213 * according to sctp_trans_score() e.g., if no such
1214 * transport with state SCTP_ACTIVE exists, round-robin through
1215 * SCTP_UNKNOWN, etc. You get the picture.
1216 */
sctp_trans_score(const struct sctp_transport * trans)1217 static u8 sctp_trans_score(const struct sctp_transport *trans)
1218 {
1219 switch (trans->state) {
1220 case SCTP_ACTIVE:
1221 return 3; /* best case */
1222 case SCTP_UNKNOWN:
1223 return 2;
1224 case SCTP_PF:
1225 return 1;
1226 default: /* case SCTP_INACTIVE */
1227 return 0; /* worst case */
1228 }
1229 }
1230
sctp_trans_elect_tie(struct sctp_transport * trans1,struct sctp_transport * trans2)1231 static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1,
1232 struct sctp_transport *trans2)
1233 {
1234 if (trans1->error_count > trans2->error_count) {
1235 return trans2;
1236 } else if (trans1->error_count == trans2->error_count &&
1237 ktime_after(trans2->last_time_heard,
1238 trans1->last_time_heard)) {
1239 return trans2;
1240 } else {
1241 return trans1;
1242 }
1243 }
1244
sctp_trans_elect_best(struct sctp_transport * curr,struct sctp_transport * best)1245 static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr,
1246 struct sctp_transport *best)
1247 {
1248 u8 score_curr, score_best;
1249
1250 if (best == NULL || curr == best)
1251 return curr;
1252
1253 score_curr = sctp_trans_score(curr);
1254 score_best = sctp_trans_score(best);
1255
1256 /* First, try a score-based selection if both transport states
1257 * differ. If we're in a tie, lets try to make a more clever
1258 * decision here based on error counts and last time heard.
1259 */
1260 if (score_curr > score_best)
1261 return curr;
1262 else if (score_curr == score_best)
1263 return sctp_trans_elect_tie(best, curr);
1264 else
1265 return best;
1266 }
1267
sctp_assoc_update_retran_path(struct sctp_association * asoc)1268 void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1269 {
1270 struct sctp_transport *trans = asoc->peer.retran_path;
1271 struct sctp_transport *trans_next = NULL;
1272
1273 /* We're done as we only have the one and only path. */
1274 if (asoc->peer.transport_count == 1)
1275 return;
1276 /* If active_path and retran_path are the same and active,
1277 * then this is the only active path. Use it.
1278 */
1279 if (asoc->peer.active_path == asoc->peer.retran_path &&
1280 asoc->peer.active_path->state == SCTP_ACTIVE)
1281 return;
1282
1283 /* Iterate from retran_path's successor back to retran_path. */
1284 for (trans = list_next_entry(trans, transports); 1;
1285 trans = list_next_entry(trans, transports)) {
1286 /* Manually skip the head element. */
1287 if (&trans->transports == &asoc->peer.transport_addr_list)
1288 continue;
1289 if (trans->state == SCTP_UNCONFIRMED)
1290 continue;
1291 trans_next = sctp_trans_elect_best(trans, trans_next);
1292 /* Active is good enough for immediate return. */
1293 if (trans_next->state == SCTP_ACTIVE)
1294 break;
1295 /* We've reached the end, time to update path. */
1296 if (trans == asoc->peer.retran_path)
1297 break;
1298 }
1299
1300 asoc->peer.retran_path = trans_next;
1301
1302 pr_debug("%s: association:%p updated new path to addr:%pISpc\n",
1303 __func__, asoc, &asoc->peer.retran_path->ipaddr.sa);
1304 }
1305
sctp_select_active_and_retran_path(struct sctp_association * asoc)1306 static void sctp_select_active_and_retran_path(struct sctp_association *asoc)
1307 {
1308 struct sctp_transport *trans, *trans_pri = NULL, *trans_sec = NULL;
1309 struct sctp_transport *trans_pf = NULL;
1310
1311 /* Look for the two most recently used active transports. */
1312 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
1313 transports) {
1314 /* Skip uninteresting transports. */
1315 if (trans->state == SCTP_INACTIVE ||
1316 trans->state == SCTP_UNCONFIRMED)
1317 continue;
1318 /* Keep track of the best PF transport from our
1319 * list in case we don't find an active one.
1320 */
1321 if (trans->state == SCTP_PF) {
1322 trans_pf = sctp_trans_elect_best(trans, trans_pf);
1323 continue;
1324 }
1325 /* For active transports, pick the most recent ones. */
1326 if (trans_pri == NULL ||
1327 ktime_after(trans->last_time_heard,
1328 trans_pri->last_time_heard)) {
1329 trans_sec = trans_pri;
1330 trans_pri = trans;
1331 } else if (trans_sec == NULL ||
1332 ktime_after(trans->last_time_heard,
1333 trans_sec->last_time_heard)) {
1334 trans_sec = trans;
1335 }
1336 }
1337
1338 /* RFC 2960 6.4 Multi-Homed SCTP Endpoints
1339 *
1340 * By default, an endpoint should always transmit to the primary
1341 * path, unless the SCTP user explicitly specifies the
1342 * destination transport address (and possibly source transport
1343 * address) to use. [If the primary is active but not most recent,
1344 * bump the most recently used transport.]
1345 */
1346 if ((asoc->peer.primary_path->state == SCTP_ACTIVE ||
1347 asoc->peer.primary_path->state == SCTP_UNKNOWN) &&
1348 asoc->peer.primary_path != trans_pri) {
1349 trans_sec = trans_pri;
1350 trans_pri = asoc->peer.primary_path;
1351 }
1352
1353 /* We did not find anything useful for a possible retransmission
1354 * path; either primary path that we found is the same as
1355 * the current one, or we didn't generally find an active one.
1356 */
1357 if (trans_sec == NULL)
1358 trans_sec = trans_pri;
1359
1360 /* If we failed to find a usable transport, just camp on the
1361 * active or pick a PF iff it's the better choice.
1362 */
1363 if (trans_pri == NULL) {
1364 trans_pri = sctp_trans_elect_best(asoc->peer.active_path, trans_pf);
1365 trans_sec = trans_pri;
1366 }
1367
1368 /* Set the active and retran transports. */
1369 asoc->peer.active_path = trans_pri;
1370 asoc->peer.retran_path = trans_sec;
1371 }
1372
1373 struct sctp_transport *
sctp_assoc_choose_alter_transport(struct sctp_association * asoc,struct sctp_transport * last_sent_to)1374 sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
1375 struct sctp_transport *last_sent_to)
1376 {
1377 /* If this is the first time packet is sent, use the active path,
1378 * else use the retran path. If the last packet was sent over the
1379 * retran path, update the retran path and use it.
1380 */
1381 if (last_sent_to == NULL) {
1382 return asoc->peer.active_path;
1383 } else {
1384 if (last_sent_to == asoc->peer.retran_path)
1385 sctp_assoc_update_retran_path(asoc);
1386
1387 return asoc->peer.retran_path;
1388 }
1389 }
1390
sctp_assoc_update_frag_point(struct sctp_association * asoc)1391 void sctp_assoc_update_frag_point(struct sctp_association *asoc)
1392 {
1393 int frag = sctp_mtu_payload(sctp_sk(asoc->base.sk), asoc->pathmtu,
1394 sctp_datachk_len(&asoc->stream));
1395
1396 if (asoc->user_frag)
1397 frag = min_t(int, frag, asoc->user_frag);
1398
1399 frag = min_t(int, frag, SCTP_MAX_CHUNK_LEN -
1400 sctp_datachk_len(&asoc->stream));
1401
1402 asoc->frag_point = SCTP_TRUNC4(frag);
1403 }
1404
sctp_assoc_set_pmtu(struct sctp_association * asoc,__u32 pmtu)1405 void sctp_assoc_set_pmtu(struct sctp_association *asoc, __u32 pmtu)
1406 {
1407 if (asoc->pathmtu != pmtu) {
1408 asoc->pathmtu = pmtu;
1409 sctp_assoc_update_frag_point(asoc);
1410 }
1411
1412 pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc,
1413 asoc->pathmtu, asoc->frag_point);
1414 }
1415
1416 /* Update the association's pmtu and frag_point by going through all the
1417 * transports. This routine is called when a transport's PMTU has changed.
1418 */
sctp_assoc_sync_pmtu(struct sctp_association * asoc)1419 void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
1420 {
1421 struct sctp_transport *t;
1422 __u32 pmtu = 0;
1423
1424 if (!asoc)
1425 return;
1426
1427 /* Get the lowest pmtu of all the transports. */
1428 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
1429 if (t->pmtu_pending && t->dst) {
1430 sctp_transport_update_pmtu(t,
1431 atomic_read(&t->mtu_info));
1432 t->pmtu_pending = 0;
1433 }
1434 if (!pmtu || (t->pathmtu < pmtu))
1435 pmtu = t->pathmtu;
1436 }
1437
1438 sctp_assoc_set_pmtu(asoc, pmtu);
1439 }
1440
1441 /* Should we send a SACK to update our peer? */
sctp_peer_needs_update(struct sctp_association * asoc)1442 static inline bool sctp_peer_needs_update(struct sctp_association *asoc)
1443 {
1444 struct net *net = asoc->base.net;
1445
1446 switch (asoc->state) {
1447 case SCTP_STATE_ESTABLISHED:
1448 case SCTP_STATE_SHUTDOWN_PENDING:
1449 case SCTP_STATE_SHUTDOWN_RECEIVED:
1450 case SCTP_STATE_SHUTDOWN_SENT:
1451 if ((asoc->rwnd > asoc->a_rwnd) &&
1452 ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
1453 (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
1454 asoc->pathmtu)))
1455 return true;
1456 break;
1457 default:
1458 break;
1459 }
1460 return false;
1461 }
1462
1463 /* Increase asoc's rwnd by len and send any window update SACK if needed. */
sctp_assoc_rwnd_increase(struct sctp_association * asoc,unsigned int len)1464 void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
1465 {
1466 struct sctp_chunk *sack;
1467 struct timer_list *timer;
1468
1469 if (asoc->rwnd_over) {
1470 if (asoc->rwnd_over >= len) {
1471 asoc->rwnd_over -= len;
1472 } else {
1473 asoc->rwnd += (len - asoc->rwnd_over);
1474 asoc->rwnd_over = 0;
1475 }
1476 } else {
1477 asoc->rwnd += len;
1478 }
1479
1480 /* If we had window pressure, start recovering it
1481 * once our rwnd had reached the accumulated pressure
1482 * threshold. The idea is to recover slowly, but up
1483 * to the initial advertised window.
1484 */
1485 if (asoc->rwnd_press) {
1486 int change = min(asoc->pathmtu, asoc->rwnd_press);
1487 asoc->rwnd += change;
1488 asoc->rwnd_press -= change;
1489 }
1490
1491 pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n",
1492 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1493 asoc->a_rwnd);
1494
1495 /* Send a window update SACK if the rwnd has increased by at least the
1496 * minimum of the association's PMTU and half of the receive buffer.
1497 * The algorithm used is similar to the one described in
1498 * Section 4.2.3.3 of RFC 1122.
1499 */
1500 if (sctp_peer_needs_update(asoc)) {
1501 asoc->a_rwnd = asoc->rwnd;
1502
1503 pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
1504 "a_rwnd:%u\n", __func__, asoc, asoc->rwnd,
1505 asoc->a_rwnd);
1506
1507 sack = sctp_make_sack(asoc);
1508 if (!sack)
1509 return;
1510
1511 asoc->peer.sack_needed = 0;
1512
1513 sctp_outq_tail(&asoc->outqueue, sack, GFP_ATOMIC);
1514
1515 /* Stop the SACK timer. */
1516 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
1517 if (del_timer(timer))
1518 sctp_association_put(asoc);
1519 }
1520 }
1521
1522 /* Decrease asoc's rwnd by len. */
sctp_assoc_rwnd_decrease(struct sctp_association * asoc,unsigned int len)1523 void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
1524 {
1525 int rx_count;
1526 int over = 0;
1527
1528 if (unlikely(!asoc->rwnd || asoc->rwnd_over))
1529 pr_debug("%s: association:%p has asoc->rwnd:%u, "
1530 "asoc->rwnd_over:%u!\n", __func__, asoc,
1531 asoc->rwnd, asoc->rwnd_over);
1532
1533 if (asoc->ep->rcvbuf_policy)
1534 rx_count = atomic_read(&asoc->rmem_alloc);
1535 else
1536 rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1537
1538 /* If we've reached or overflowed our receive buffer, announce
1539 * a 0 rwnd if rwnd would still be positive. Store the
1540 * potential pressure overflow so that the window can be restored
1541 * back to original value.
1542 */
1543 if (rx_count >= asoc->base.sk->sk_rcvbuf)
1544 over = 1;
1545
1546 if (asoc->rwnd >= len) {
1547 asoc->rwnd -= len;
1548 if (over) {
1549 asoc->rwnd_press += asoc->rwnd;
1550 asoc->rwnd = 0;
1551 }
1552 } else {
1553 asoc->rwnd_over += len - asoc->rwnd;
1554 asoc->rwnd = 0;
1555 }
1556
1557 pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
1558 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1559 asoc->rwnd_press);
1560 }
1561
1562 /* Build the bind address list for the association based on info from the
1563 * local endpoint and the remote peer.
1564 */
sctp_assoc_set_bind_addr_from_ep(struct sctp_association * asoc,enum sctp_scope scope,gfp_t gfp)1565 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
1566 enum sctp_scope scope, gfp_t gfp)
1567 {
1568 struct sock *sk = asoc->base.sk;
1569 int flags;
1570
1571 /* Use scoping rules to determine the subset of addresses from
1572 * the endpoint.
1573 */
1574 flags = (PF_INET6 == sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
1575 if (!inet_v6_ipv6only(sk))
1576 flags |= SCTP_ADDR4_ALLOWED;
1577 if (asoc->peer.ipv4_address)
1578 flags |= SCTP_ADDR4_PEERSUPP;
1579 if (asoc->peer.ipv6_address)
1580 flags |= SCTP_ADDR6_PEERSUPP;
1581
1582 return sctp_bind_addr_copy(asoc->base.net,
1583 &asoc->base.bind_addr,
1584 &asoc->ep->base.bind_addr,
1585 scope, gfp, flags);
1586 }
1587
1588 /* Build the association's bind address list from the cookie. */
sctp_assoc_set_bind_addr_from_cookie(struct sctp_association * asoc,struct sctp_cookie * cookie,gfp_t gfp)1589 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
1590 struct sctp_cookie *cookie,
1591 gfp_t gfp)
1592 {
1593 int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length);
1594 int var_size3 = cookie->raw_addr_list_len;
1595 __u8 *raw = (__u8 *)cookie->peer_init + var_size2;
1596
1597 return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3,
1598 asoc->ep->base.bind_addr.port, gfp);
1599 }
1600
1601 /* Lookup laddr in the bind address list of an association. */
sctp_assoc_lookup_laddr(struct sctp_association * asoc,const union sctp_addr * laddr)1602 int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1603 const union sctp_addr *laddr)
1604 {
1605 int found = 0;
1606
1607 if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
1608 sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1609 sctp_sk(asoc->base.sk)))
1610 found = 1;
1611
1612 return found;
1613 }
1614
1615 /* Set an association id for a given association */
sctp_assoc_set_id(struct sctp_association * asoc,gfp_t gfp)1616 int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
1617 {
1618 bool preload = gfpflags_allow_blocking(gfp);
1619 int ret;
1620
1621 /* If the id is already assigned, keep it. */
1622 if (asoc->assoc_id)
1623 return 0;
1624
1625 if (preload)
1626 idr_preload(gfp);
1627 spin_lock_bh(&sctp_assocs_id_lock);
1628 /* 0, 1, 2 are used as SCTP_FUTURE_ASSOC, SCTP_CURRENT_ASSOC and
1629 * SCTP_ALL_ASSOC, so an available id must be > SCTP_ALL_ASSOC.
1630 */
1631 ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, SCTP_ALL_ASSOC + 1, 0,
1632 GFP_NOWAIT);
1633 spin_unlock_bh(&sctp_assocs_id_lock);
1634 if (preload)
1635 idr_preload_end();
1636 if (ret < 0)
1637 return ret;
1638
1639 asoc->assoc_id = (sctp_assoc_t)ret;
1640 return 0;
1641 }
1642
1643 /* Free the ASCONF queue */
sctp_assoc_free_asconf_queue(struct sctp_association * asoc)1644 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
1645 {
1646 struct sctp_chunk *asconf;
1647 struct sctp_chunk *tmp;
1648
1649 list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
1650 list_del_init(&asconf->list);
1651 sctp_chunk_free(asconf);
1652 }
1653 }
1654
1655 /* Free asconf_ack cache */
sctp_assoc_free_asconf_acks(struct sctp_association * asoc)1656 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
1657 {
1658 struct sctp_chunk *ack;
1659 struct sctp_chunk *tmp;
1660
1661 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1662 transmitted_list) {
1663 list_del_init(&ack->transmitted_list);
1664 sctp_chunk_free(ack);
1665 }
1666 }
1667
1668 /* Clean up the ASCONF_ACK queue */
sctp_assoc_clean_asconf_ack_cache(const struct sctp_association * asoc)1669 void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc)
1670 {
1671 struct sctp_chunk *ack;
1672 struct sctp_chunk *tmp;
1673
1674 /* We can remove all the entries from the queue up to
1675 * the "Peer-Sequence-Number".
1676 */
1677 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1678 transmitted_list) {
1679 if (ack->subh.addip_hdr->serial ==
1680 htonl(asoc->peer.addip_serial))
1681 break;
1682
1683 list_del_init(&ack->transmitted_list);
1684 sctp_chunk_free(ack);
1685 }
1686 }
1687
1688 /* Find the ASCONF_ACK whose serial number matches ASCONF */
sctp_assoc_lookup_asconf_ack(const struct sctp_association * asoc,__be32 serial)1689 struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1690 const struct sctp_association *asoc,
1691 __be32 serial)
1692 {
1693 struct sctp_chunk *ack;
1694
1695 /* Walk through the list of cached ASCONF-ACKs and find the
1696 * ack chunk whose serial number matches that of the request.
1697 */
1698 list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
1699 if (sctp_chunk_pending(ack))
1700 continue;
1701 if (ack->subh.addip_hdr->serial == serial) {
1702 sctp_chunk_hold(ack);
1703 return ack;
1704 }
1705 }
1706
1707 return NULL;
1708 }
1709
sctp_asconf_queue_teardown(struct sctp_association * asoc)1710 void sctp_asconf_queue_teardown(struct sctp_association *asoc)
1711 {
1712 /* Free any cached ASCONF_ACK chunk. */
1713 sctp_assoc_free_asconf_acks(asoc);
1714
1715 /* Free the ASCONF queue. */
1716 sctp_assoc_free_asconf_queue(asoc);
1717
1718 /* Free any cached ASCONF chunk. */
1719 if (asoc->addip_last_asconf)
1720 sctp_chunk_free(asoc->addip_last_asconf);
1721 }
1722