1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 La Monte H.P. Yarroll
7 *
8 * This file is part of the SCTP kernel implementation
9 *
10 * This module provides the abstraction for an SCTP association.
11 *
12 * This SCTP implementation is free software;
13 * you can redistribute it and/or modify it under the terms of
14 * the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This SCTP implementation is distributed in the hope that it
19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20 * ************************
21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22 * See the GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with GNU CC; see the file COPYING. If not, see
26 * <http://www.gnu.org/licenses/>.
27 *
28 * Please send any bug reports or fixes you make to the
29 * email address(es):
30 * lksctp developers <linux-sctp@vger.kernel.org>
31 *
32 * Written or modified by:
33 * La Monte H.P. Yarroll <piggy@acm.org>
34 * Karl Knutson <karl@athena.chicago.il.us>
35 * Jon Grimm <jgrimm@us.ibm.com>
36 * Xingang Guo <xingang.guo@intel.com>
37 * Hui Huang <hui.huang@nokia.com>
38 * Sridhar Samudrala <sri@us.ibm.com>
39 * Daisy Chang <daisyc@us.ibm.com>
40 * Ryan Layer <rmlayer@us.ibm.com>
41 * Kevin Gao <kevin.gao@intel.com>
42 */
43
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
46 #include <linux/types.h>
47 #include <linux/fcntl.h>
48 #include <linux/poll.h>
49 #include <linux/init.h>
50
51 #include <linux/slab.h>
52 #include <linux/in.h>
53 #include <net/ipv6.h>
54 #include <net/sctp/sctp.h>
55 #include <net/sctp/sm.h>
56
57 /* Forward declarations for internal functions. */
58 static void sctp_select_active_and_retran_path(struct sctp_association *asoc);
59 static void sctp_assoc_bh_rcv(struct work_struct *work);
60 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
61 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
62
63 /* 1st Level Abstractions. */
64
65 /* Initialize a new association from provided memory. */
sctp_association_init(struct sctp_association * asoc,const struct sctp_endpoint * ep,const struct sock * sk,enum sctp_scope scope,gfp_t gfp)66 static struct sctp_association *sctp_association_init(
67 struct sctp_association *asoc,
68 const struct sctp_endpoint *ep,
69 const struct sock *sk,
70 enum sctp_scope scope, gfp_t gfp)
71 {
72 struct net *net = sock_net(sk);
73 struct sctp_sock *sp;
74 struct sctp_paramhdr *p;
75 int i;
76
77 /* Retrieve the SCTP per socket area. */
78 sp = sctp_sk((struct sock *)sk);
79
80 /* Discarding const is appropriate here. */
81 asoc->ep = (struct sctp_endpoint *)ep;
82 asoc->base.sk = (struct sock *)sk;
83
84 sctp_endpoint_hold(asoc->ep);
85 sock_hold(asoc->base.sk);
86
87 /* Initialize the common base substructure. */
88 asoc->base.type = SCTP_EP_TYPE_ASSOCIATION;
89
90 /* Initialize the object handling fields. */
91 refcount_set(&asoc->base.refcnt, 1);
92
93 /* Initialize the bind addr area. */
94 sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
95
96 asoc->state = SCTP_STATE_CLOSED;
97 asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life);
98 asoc->user_frag = sp->user_frag;
99
100 /* Set the association max_retrans and RTO values from the
101 * socket values.
102 */
103 asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
104 asoc->pf_retrans = net->sctp.pf_retrans;
105
106 asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
107 asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
108 asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
109
110 /* Initialize the association's heartbeat interval based on the
111 * sock configured value.
112 */
113 asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
114
115 /* Initialize path max retrans value. */
116 asoc->pathmaxrxt = sp->pathmaxrxt;
117
118 asoc->flowlabel = sp->flowlabel;
119 asoc->dscp = sp->dscp;
120
121 /* Initialize default path MTU. */
122 asoc->pathmtu = sp->pathmtu;
123
124 /* Set association default SACK delay */
125 asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
126 asoc->sackfreq = sp->sackfreq;
127
128 /* Set the association default flags controlling
129 * Heartbeat, SACK delay, and Path MTU Discovery.
130 */
131 asoc->param_flags = sp->param_flags;
132
133 /* Initialize the maximum number of new data packets that can be sent
134 * in a burst.
135 */
136 asoc->max_burst = sp->max_burst;
137
138 /* initialize association timers */
139 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
140 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
141 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
142
143 /* sctpimpguide Section 2.12.2
144 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
145 * recommended value of 5 times 'RTO.Max'.
146 */
147 asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
148 = 5 * asoc->rto_max;
149
150 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
151 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
152
153 /* Initializes the timers */
154 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
155 timer_setup(&asoc->timers[i], sctp_timer_events[i], 0);
156
157 /* Pull default initialization values from the sock options.
158 * Note: This assumes that the values have already been
159 * validated in the sock.
160 */
161 asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams;
162 asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams;
163 asoc->max_init_attempts = sp->initmsg.sinit_max_attempts;
164
165 asoc->max_init_timeo =
166 msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo);
167
168 /* Set the local window size for receive.
169 * This is also the rcvbuf space per association.
170 * RFC 6 - A SCTP receiver MUST be able to receive a minimum of
171 * 1500 bytes in one SCTP packet.
172 */
173 if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
174 asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
175 else
176 asoc->rwnd = sk->sk_rcvbuf/2;
177
178 asoc->a_rwnd = asoc->rwnd;
179
180 /* Use my own max window until I learn something better. */
181 asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
182
183 /* Initialize the receive memory counter */
184 atomic_set(&asoc->rmem_alloc, 0);
185
186 init_waitqueue_head(&asoc->wait);
187
188 asoc->c.my_vtag = sctp_generate_tag(ep);
189 asoc->c.my_port = ep->base.bind_addr.port;
190
191 asoc->c.initial_tsn = sctp_generate_tsn(ep);
192
193 asoc->next_tsn = asoc->c.initial_tsn;
194
195 asoc->ctsn_ack_point = asoc->next_tsn - 1;
196 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
197 asoc->highest_sacked = asoc->ctsn_ack_point;
198 asoc->last_cwr_tsn = asoc->ctsn_ack_point;
199
200 /* ADDIP Section 4.1 Asconf Chunk Procedures
201 *
202 * When an endpoint has an ASCONF signaled change to be sent to the
203 * remote endpoint it should do the following:
204 * ...
205 * A2) a serial number should be assigned to the chunk. The serial
206 * number SHOULD be a monotonically increasing number. The serial
207 * numbers SHOULD be initialized at the start of the
208 * association to the same value as the initial TSN.
209 */
210 asoc->addip_serial = asoc->c.initial_tsn;
211 asoc->strreset_outseq = asoc->c.initial_tsn;
212
213 INIT_LIST_HEAD(&asoc->addip_chunk_list);
214 INIT_LIST_HEAD(&asoc->asconf_ack_list);
215
216 /* Make an empty list of remote transport addresses. */
217 INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
218
219 /* RFC 2960 5.1 Normal Establishment of an Association
220 *
221 * After the reception of the first data chunk in an
222 * association the endpoint must immediately respond with a
223 * sack to acknowledge the data chunk. Subsequent
224 * acknowledgements should be done as described in Section
225 * 6.2.
226 *
227 * [We implement this by telling a new association that it
228 * already received one packet.]
229 */
230 asoc->peer.sack_needed = 1;
231 asoc->peer.sack_generation = 1;
232
233 /* Assume that the peer will tell us if he recognizes ASCONF
234 * as part of INIT exchange.
235 * The sctp_addip_noauth option is there for backward compatibility
236 * and will revert old behavior.
237 */
238 if (net->sctp.addip_noauth)
239 asoc->peer.asconf_capable = 1;
240
241 /* Create an input queue. */
242 sctp_inq_init(&asoc->base.inqueue);
243 sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
244
245 /* Create an output queue. */
246 sctp_outq_init(asoc, &asoc->outqueue);
247
248 if (!sctp_ulpq_init(&asoc->ulpq, asoc))
249 goto fail_init;
250
251 if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams,
252 0, gfp))
253 goto fail_init;
254
255 /* Assume that peer would support both address types unless we are
256 * told otherwise.
257 */
258 asoc->peer.ipv4_address = 1;
259 if (asoc->base.sk->sk_family == PF_INET6)
260 asoc->peer.ipv6_address = 1;
261 INIT_LIST_HEAD(&asoc->asocs);
262
263 asoc->default_stream = sp->default_stream;
264 asoc->default_ppid = sp->default_ppid;
265 asoc->default_flags = sp->default_flags;
266 asoc->default_context = sp->default_context;
267 asoc->default_timetolive = sp->default_timetolive;
268 asoc->default_rcv_context = sp->default_rcv_context;
269
270 /* AUTH related initializations */
271 INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
272 if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp))
273 goto stream_free;
274
275 asoc->active_key_id = ep->active_key_id;
276 asoc->prsctp_enable = ep->prsctp_enable;
277 asoc->reconf_enable = ep->reconf_enable;
278 asoc->strreset_enable = ep->strreset_enable;
279
280 /* Save the hmacs and chunks list into this association */
281 if (ep->auth_hmacs_list)
282 memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list,
283 ntohs(ep->auth_hmacs_list->param_hdr.length));
284 if (ep->auth_chunk_list)
285 memcpy(asoc->c.auth_chunks, ep->auth_chunk_list,
286 ntohs(ep->auth_chunk_list->param_hdr.length));
287
288 /* Get the AUTH random number for this association */
289 p = (struct sctp_paramhdr *)asoc->c.auth_random;
290 p->type = SCTP_PARAM_RANDOM;
291 p->length = htons(sizeof(*p) + SCTP_AUTH_RANDOM_LENGTH);
292 get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH);
293
294 return asoc;
295
296 stream_free:
297 sctp_stream_free(&asoc->stream);
298 fail_init:
299 sock_put(asoc->base.sk);
300 sctp_endpoint_put(asoc->ep);
301 return NULL;
302 }
303
304 /* Allocate and initialize a new association */
sctp_association_new(const struct sctp_endpoint * ep,const struct sock * sk,enum sctp_scope scope,gfp_t gfp)305 struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
306 const struct sock *sk,
307 enum sctp_scope scope, gfp_t gfp)
308 {
309 struct sctp_association *asoc;
310
311 asoc = kzalloc(sizeof(*asoc), gfp);
312 if (!asoc)
313 goto fail;
314
315 if (!sctp_association_init(asoc, ep, sk, scope, gfp))
316 goto fail_init;
317
318 SCTP_DBG_OBJCNT_INC(assoc);
319
320 pr_debug("Created asoc %p\n", asoc);
321
322 return asoc;
323
324 fail_init:
325 kfree(asoc);
326 fail:
327 return NULL;
328 }
329
330 /* Free this association if possible. There may still be users, so
331 * the actual deallocation may be delayed.
332 */
sctp_association_free(struct sctp_association * asoc)333 void sctp_association_free(struct sctp_association *asoc)
334 {
335 struct sock *sk = asoc->base.sk;
336 struct sctp_transport *transport;
337 struct list_head *pos, *temp;
338 int i;
339
340 /* Only real associations count against the endpoint, so
341 * don't bother for if this is a temporary association.
342 */
343 if (!list_empty(&asoc->asocs)) {
344 list_del(&asoc->asocs);
345
346 /* Decrement the backlog value for a TCP-style listening
347 * socket.
348 */
349 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
350 sk->sk_ack_backlog--;
351 }
352
353 /* Mark as dead, so other users can know this structure is
354 * going away.
355 */
356 asoc->base.dead = true;
357
358 /* Dispose of any data lying around in the outqueue. */
359 sctp_outq_free(&asoc->outqueue);
360
361 /* Dispose of any pending messages for the upper layer. */
362 sctp_ulpq_free(&asoc->ulpq);
363
364 /* Dispose of any pending chunks on the inqueue. */
365 sctp_inq_free(&asoc->base.inqueue);
366
367 sctp_tsnmap_free(&asoc->peer.tsn_map);
368
369 /* Free stream information. */
370 sctp_stream_free(&asoc->stream);
371
372 if (asoc->strreset_chunk)
373 sctp_chunk_free(asoc->strreset_chunk);
374
375 /* Clean up the bound address list. */
376 sctp_bind_addr_free(&asoc->base.bind_addr);
377
378 /* Do we need to go through all of our timers and
379 * delete them? To be safe we will try to delete all, but we
380 * should be able to go through and make a guess based
381 * on our state.
382 */
383 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
384 if (del_timer(&asoc->timers[i]))
385 sctp_association_put(asoc);
386 }
387
388 /* Free peer's cached cookie. */
389 kfree(asoc->peer.cookie);
390 kfree(asoc->peer.peer_random);
391 kfree(asoc->peer.peer_chunks);
392 kfree(asoc->peer.peer_hmacs);
393
394 /* Release the transport structures. */
395 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
396 transport = list_entry(pos, struct sctp_transport, transports);
397 list_del_rcu(pos);
398 sctp_unhash_transport(transport);
399 sctp_transport_free(transport);
400 }
401
402 asoc->peer.transport_count = 0;
403
404 sctp_asconf_queue_teardown(asoc);
405
406 /* Free pending address space being deleted */
407 kfree(asoc->asconf_addr_del_pending);
408
409 /* AUTH - Free the endpoint shared keys */
410 sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
411
412 /* AUTH - Free the association shared key */
413 sctp_auth_key_put(asoc->asoc_shared_key);
414
415 sctp_association_put(asoc);
416 }
417
418 /* Cleanup and free up an association. */
sctp_association_destroy(struct sctp_association * asoc)419 static void sctp_association_destroy(struct sctp_association *asoc)
420 {
421 if (unlikely(!asoc->base.dead)) {
422 WARN(1, "Attempt to destroy undead association %p!\n", asoc);
423 return;
424 }
425
426 sctp_endpoint_put(asoc->ep);
427 sock_put(asoc->base.sk);
428
429 if (asoc->assoc_id != 0) {
430 spin_lock_bh(&sctp_assocs_id_lock);
431 idr_remove(&sctp_assocs_id, asoc->assoc_id);
432 spin_unlock_bh(&sctp_assocs_id_lock);
433 }
434
435 WARN_ON(atomic_read(&asoc->rmem_alloc));
436
437 kfree(asoc);
438 SCTP_DBG_OBJCNT_DEC(assoc);
439 }
440
441 /* Change the primary destination address for the peer. */
sctp_assoc_set_primary(struct sctp_association * asoc,struct sctp_transport * transport)442 void sctp_assoc_set_primary(struct sctp_association *asoc,
443 struct sctp_transport *transport)
444 {
445 int changeover = 0;
446
447 /* it's a changeover only if we already have a primary path
448 * that we are changing
449 */
450 if (asoc->peer.primary_path != NULL &&
451 asoc->peer.primary_path != transport)
452 changeover = 1 ;
453
454 asoc->peer.primary_path = transport;
455
456 /* Set a default msg_name for events. */
457 memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
458 sizeof(union sctp_addr));
459
460 /* If the primary path is changing, assume that the
461 * user wants to use this new path.
462 */
463 if ((transport->state == SCTP_ACTIVE) ||
464 (transport->state == SCTP_UNKNOWN))
465 asoc->peer.active_path = transport;
466
467 /*
468 * SFR-CACC algorithm:
469 * Upon the receipt of a request to change the primary
470 * destination address, on the data structure for the new
471 * primary destination, the sender MUST do the following:
472 *
473 * 1) If CHANGEOVER_ACTIVE is set, then there was a switch
474 * to this destination address earlier. The sender MUST set
475 * CYCLING_CHANGEOVER to indicate that this switch is a
476 * double switch to the same destination address.
477 *
478 * Really, only bother is we have data queued or outstanding on
479 * the association.
480 */
481 if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen)
482 return;
483
484 if (transport->cacc.changeover_active)
485 transport->cacc.cycling_changeover = changeover;
486
487 /* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that
488 * a changeover has occurred.
489 */
490 transport->cacc.changeover_active = changeover;
491
492 /* 3) The sender MUST store the next TSN to be sent in
493 * next_tsn_at_change.
494 */
495 transport->cacc.next_tsn_at_change = asoc->next_tsn;
496 }
497
498 /* Remove a transport from an association. */
sctp_assoc_rm_peer(struct sctp_association * asoc,struct sctp_transport * peer)499 void sctp_assoc_rm_peer(struct sctp_association *asoc,
500 struct sctp_transport *peer)
501 {
502 struct list_head *pos;
503 struct sctp_transport *transport;
504
505 pr_debug("%s: association:%p addr:%pISpc\n",
506 __func__, asoc, &peer->ipaddr.sa);
507
508 /* If we are to remove the current retran_path, update it
509 * to the next peer before removing this peer from the list.
510 */
511 if (asoc->peer.retran_path == peer)
512 sctp_assoc_update_retran_path(asoc);
513
514 /* Remove this peer from the list. */
515 list_del_rcu(&peer->transports);
516 /* Remove this peer from the transport hashtable */
517 sctp_unhash_transport(peer);
518
519 /* Get the first transport of asoc. */
520 pos = asoc->peer.transport_addr_list.next;
521 transport = list_entry(pos, struct sctp_transport, transports);
522
523 /* Update any entries that match the peer to be deleted. */
524 if (asoc->peer.primary_path == peer)
525 sctp_assoc_set_primary(asoc, transport);
526 if (asoc->peer.active_path == peer)
527 asoc->peer.active_path = transport;
528 if (asoc->peer.retran_path == peer)
529 asoc->peer.retran_path = transport;
530 if (asoc->peer.last_data_from == peer)
531 asoc->peer.last_data_from = transport;
532
533 if (asoc->strreset_chunk &&
534 asoc->strreset_chunk->transport == peer) {
535 asoc->strreset_chunk->transport = transport;
536 sctp_transport_reset_reconf_timer(transport);
537 }
538
539 /* If we remove the transport an INIT was last sent to, set it to
540 * NULL. Combined with the update of the retran path above, this
541 * will cause the next INIT to be sent to the next available
542 * transport, maintaining the cycle.
543 */
544 if (asoc->init_last_sent_to == peer)
545 asoc->init_last_sent_to = NULL;
546
547 /* If we remove the transport an SHUTDOWN was last sent to, set it
548 * to NULL. Combined with the update of the retran path above, this
549 * will cause the next SHUTDOWN to be sent to the next available
550 * transport, maintaining the cycle.
551 */
552 if (asoc->shutdown_last_sent_to == peer)
553 asoc->shutdown_last_sent_to = NULL;
554
555 /* If we remove the transport an ASCONF was last sent to, set it to
556 * NULL.
557 */
558 if (asoc->addip_last_asconf &&
559 asoc->addip_last_asconf->transport == peer)
560 asoc->addip_last_asconf->transport = NULL;
561
562 /* If we have something on the transmitted list, we have to
563 * save it off. The best place is the active path.
564 */
565 if (!list_empty(&peer->transmitted)) {
566 struct sctp_transport *active = asoc->peer.active_path;
567 struct sctp_chunk *ch;
568
569 /* Reset the transport of each chunk on this list */
570 list_for_each_entry(ch, &peer->transmitted,
571 transmitted_list) {
572 ch->transport = NULL;
573 ch->rtt_in_progress = 0;
574 }
575
576 list_splice_tail_init(&peer->transmitted,
577 &active->transmitted);
578
579 /* Start a T3 timer here in case it wasn't running so
580 * that these migrated packets have a chance to get
581 * retransmitted.
582 */
583 if (!timer_pending(&active->T3_rtx_timer))
584 if (!mod_timer(&active->T3_rtx_timer,
585 jiffies + active->rto))
586 sctp_transport_hold(active);
587 }
588
589 asoc->peer.transport_count--;
590
591 sctp_transport_free(peer);
592 }
593
594 /* Add a transport address to an association. */
sctp_assoc_add_peer(struct sctp_association * asoc,const union sctp_addr * addr,const gfp_t gfp,const int peer_state)595 struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
596 const union sctp_addr *addr,
597 const gfp_t gfp,
598 const int peer_state)
599 {
600 struct net *net = sock_net(asoc->base.sk);
601 struct sctp_transport *peer;
602 struct sctp_sock *sp;
603 unsigned short port;
604
605 sp = sctp_sk(asoc->base.sk);
606
607 /* AF_INET and AF_INET6 share common port field. */
608 port = ntohs(addr->v4.sin_port);
609
610 pr_debug("%s: association:%p addr:%pISpc state:%d\n", __func__,
611 asoc, &addr->sa, peer_state);
612
613 /* Set the port if it has not been set yet. */
614 if (0 == asoc->peer.port)
615 asoc->peer.port = port;
616
617 /* Check to see if this is a duplicate. */
618 peer = sctp_assoc_lookup_paddr(asoc, addr);
619 if (peer) {
620 /* An UNKNOWN state is only set on transports added by
621 * user in sctp_connectx() call. Such transports should be
622 * considered CONFIRMED per RFC 4960, Section 5.4.
623 */
624 if (peer->state == SCTP_UNKNOWN) {
625 peer->state = SCTP_ACTIVE;
626 }
627 return peer;
628 }
629
630 peer = sctp_transport_new(net, addr, gfp);
631 if (!peer)
632 return NULL;
633
634 sctp_transport_set_owner(peer, asoc);
635
636 /* Initialize the peer's heartbeat interval based on the
637 * association configured value.
638 */
639 peer->hbinterval = asoc->hbinterval;
640
641 /* Set the path max_retrans. */
642 peer->pathmaxrxt = asoc->pathmaxrxt;
643
644 /* And the partial failure retrans threshold */
645 peer->pf_retrans = asoc->pf_retrans;
646
647 /* Initialize the peer's SACK delay timeout based on the
648 * association configured value.
649 */
650 peer->sackdelay = asoc->sackdelay;
651 peer->sackfreq = asoc->sackfreq;
652
653 if (addr->sa.sa_family == AF_INET6) {
654 __be32 info = addr->v6.sin6_flowinfo;
655
656 if (info) {
657 peer->flowlabel = ntohl(info & IPV6_FLOWLABEL_MASK);
658 peer->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
659 } else {
660 peer->flowlabel = asoc->flowlabel;
661 }
662 }
663 peer->dscp = asoc->dscp;
664
665 /* Enable/disable heartbeat, SACK delay, and path MTU discovery
666 * based on association setting.
667 */
668 peer->param_flags = asoc->param_flags;
669
670 /* Initialize the pmtu of the transport. */
671 sctp_transport_route(peer, NULL, sp);
672
673 /* If this is the first transport addr on this association,
674 * initialize the association PMTU to the peer's PMTU.
675 * If not and the current association PMTU is higher than the new
676 * peer's PMTU, reset the association PMTU to the new peer's PMTU.
677 */
678 sctp_assoc_set_pmtu(asoc, asoc->pathmtu ?
679 min_t(int, peer->pathmtu, asoc->pathmtu) :
680 peer->pathmtu);
681
682 peer->pmtu_pending = 0;
683
684 /* The asoc->peer.port might not be meaningful yet, but
685 * initialize the packet structure anyway.
686 */
687 sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port,
688 asoc->peer.port);
689
690 /* 7.2.1 Slow-Start
691 *
692 * o The initial cwnd before DATA transmission or after a sufficiently
693 * long idle period MUST be set to
694 * min(4*MTU, max(2*MTU, 4380 bytes))
695 *
696 * o The initial value of ssthresh MAY be arbitrarily high
697 * (for example, implementations MAY use the size of the
698 * receiver advertised window).
699 */
700 peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
701
702 /* At this point, we may not have the receiver's advertised window,
703 * so initialize ssthresh to the default value and it will be set
704 * later when we process the INIT.
705 */
706 peer->ssthresh = SCTP_DEFAULT_MAXWINDOW;
707
708 peer->partial_bytes_acked = 0;
709 peer->flight_size = 0;
710 peer->burst_limited = 0;
711
712 /* Set the transport's RTO.initial value */
713 peer->rto = asoc->rto_initial;
714 sctp_max_rto(asoc, peer);
715
716 /* Set the peer's active state. */
717 peer->state = peer_state;
718
719 /* Add this peer into the transport hashtable */
720 if (sctp_hash_transport(peer)) {
721 sctp_transport_free(peer);
722 return NULL;
723 }
724
725 /* Attach the remote transport to our asoc. */
726 list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
727 asoc->peer.transport_count++;
728
729 /* If we do not yet have a primary path, set one. */
730 if (!asoc->peer.primary_path) {
731 sctp_assoc_set_primary(asoc, peer);
732 asoc->peer.retran_path = peer;
733 }
734
735 if (asoc->peer.active_path == asoc->peer.retran_path &&
736 peer->state != SCTP_UNCONFIRMED) {
737 asoc->peer.retran_path = peer;
738 }
739
740 return peer;
741 }
742
743 /* Delete a transport address from an association. */
sctp_assoc_del_peer(struct sctp_association * asoc,const union sctp_addr * addr)744 void sctp_assoc_del_peer(struct sctp_association *asoc,
745 const union sctp_addr *addr)
746 {
747 struct list_head *pos;
748 struct list_head *temp;
749 struct sctp_transport *transport;
750
751 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
752 transport = list_entry(pos, struct sctp_transport, transports);
753 if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) {
754 /* Do book keeping for removing the peer and free it. */
755 sctp_assoc_rm_peer(asoc, transport);
756 break;
757 }
758 }
759 }
760
761 /* Lookup a transport by address. */
sctp_assoc_lookup_paddr(const struct sctp_association * asoc,const union sctp_addr * address)762 struct sctp_transport *sctp_assoc_lookup_paddr(
763 const struct sctp_association *asoc,
764 const union sctp_addr *address)
765 {
766 struct sctp_transport *t;
767
768 /* Cycle through all transports searching for a peer address. */
769
770 list_for_each_entry(t, &asoc->peer.transport_addr_list,
771 transports) {
772 if (sctp_cmp_addr_exact(address, &t->ipaddr))
773 return t;
774 }
775
776 return NULL;
777 }
778
779 /* Remove all transports except a give one */
sctp_assoc_del_nonprimary_peers(struct sctp_association * asoc,struct sctp_transport * primary)780 void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc,
781 struct sctp_transport *primary)
782 {
783 struct sctp_transport *temp;
784 struct sctp_transport *t;
785
786 list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list,
787 transports) {
788 /* if the current transport is not the primary one, delete it */
789 if (t != primary)
790 sctp_assoc_rm_peer(asoc, t);
791 }
792 }
793
794 /* Engage in transport control operations.
795 * Mark the transport up or down and send a notification to the user.
796 * Select and update the new active and retran paths.
797 */
sctp_assoc_control_transport(struct sctp_association * asoc,struct sctp_transport * transport,enum sctp_transport_cmd command,sctp_sn_error_t error)798 void sctp_assoc_control_transport(struct sctp_association *asoc,
799 struct sctp_transport *transport,
800 enum sctp_transport_cmd command,
801 sctp_sn_error_t error)
802 {
803 struct sctp_ulpevent *event;
804 struct sockaddr_storage addr;
805 int spc_state = 0;
806 bool ulp_notify = true;
807
808 /* Record the transition on the transport. */
809 switch (command) {
810 case SCTP_TRANSPORT_UP:
811 /* If we are moving from UNCONFIRMED state due
812 * to heartbeat success, report the SCTP_ADDR_CONFIRMED
813 * state to the user, otherwise report SCTP_ADDR_AVAILABLE.
814 */
815 if (SCTP_UNCONFIRMED == transport->state &&
816 SCTP_HEARTBEAT_SUCCESS == error)
817 spc_state = SCTP_ADDR_CONFIRMED;
818 else
819 spc_state = SCTP_ADDR_AVAILABLE;
820 /* Don't inform ULP about transition from PF to
821 * active state and set cwnd to 1 MTU, see SCTP
822 * Quick failover draft section 5.1, point 5
823 */
824 if (transport->state == SCTP_PF) {
825 ulp_notify = false;
826 transport->cwnd = asoc->pathmtu;
827 }
828 transport->state = SCTP_ACTIVE;
829 break;
830
831 case SCTP_TRANSPORT_DOWN:
832 /* If the transport was never confirmed, do not transition it
833 * to inactive state. Also, release the cached route since
834 * there may be a better route next time.
835 */
836 if (transport->state != SCTP_UNCONFIRMED)
837 transport->state = SCTP_INACTIVE;
838 else {
839 sctp_transport_dst_release(transport);
840 ulp_notify = false;
841 }
842
843 spc_state = SCTP_ADDR_UNREACHABLE;
844 break;
845
846 case SCTP_TRANSPORT_PF:
847 transport->state = SCTP_PF;
848 ulp_notify = false;
849 break;
850
851 default:
852 return;
853 }
854
855 /* Generate and send a SCTP_PEER_ADDR_CHANGE notification
856 * to the user.
857 */
858 if (ulp_notify) {
859 memset(&addr, 0, sizeof(struct sockaddr_storage));
860 memcpy(&addr, &transport->ipaddr,
861 transport->af_specific->sockaddr_len);
862
863 event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
864 0, spc_state, error, GFP_ATOMIC);
865 if (event)
866 asoc->stream.si->enqueue_event(&asoc->ulpq, event);
867 }
868
869 /* Select new active and retran paths. */
870 sctp_select_active_and_retran_path(asoc);
871 }
872
873 /* Hold a reference to an association. */
sctp_association_hold(struct sctp_association * asoc)874 void sctp_association_hold(struct sctp_association *asoc)
875 {
876 refcount_inc(&asoc->base.refcnt);
877 }
878
879 /* Release a reference to an association and cleanup
880 * if there are no more references.
881 */
sctp_association_put(struct sctp_association * asoc)882 void sctp_association_put(struct sctp_association *asoc)
883 {
884 if (refcount_dec_and_test(&asoc->base.refcnt))
885 sctp_association_destroy(asoc);
886 }
887
888 /* Allocate the next TSN, Transmission Sequence Number, for the given
889 * association.
890 */
sctp_association_get_next_tsn(struct sctp_association * asoc)891 __u32 sctp_association_get_next_tsn(struct sctp_association *asoc)
892 {
893 /* From Section 1.6 Serial Number Arithmetic:
894 * Transmission Sequence Numbers wrap around when they reach
895 * 2**32 - 1. That is, the next TSN a DATA chunk MUST use
896 * after transmitting TSN = 2*32 - 1 is TSN = 0.
897 */
898 __u32 retval = asoc->next_tsn;
899 asoc->next_tsn++;
900 asoc->unack_data++;
901
902 return retval;
903 }
904
905 /* Compare two addresses to see if they match. Wildcard addresses
906 * only match themselves.
907 */
sctp_cmp_addr_exact(const union sctp_addr * ss1,const union sctp_addr * ss2)908 int sctp_cmp_addr_exact(const union sctp_addr *ss1,
909 const union sctp_addr *ss2)
910 {
911 struct sctp_af *af;
912
913 af = sctp_get_af_specific(ss1->sa.sa_family);
914 if (unlikely(!af))
915 return 0;
916
917 return af->cmp_addr(ss1, ss2);
918 }
919
920 /* Return an ecne chunk to get prepended to a packet.
921 * Note: We are sly and return a shared, prealloced chunk. FIXME:
922 * No we don't, but we could/should.
923 */
sctp_get_ecne_prepend(struct sctp_association * asoc)924 struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc)
925 {
926 if (!asoc->need_ecne)
927 return NULL;
928
929 /* Send ECNE if needed.
930 * Not being able to allocate a chunk here is not deadly.
931 */
932 return sctp_make_ecne(asoc, asoc->last_ecne_tsn);
933 }
934
935 /*
936 * Find which transport this TSN was sent on.
937 */
sctp_assoc_lookup_tsn(struct sctp_association * asoc,__u32 tsn)938 struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
939 __u32 tsn)
940 {
941 struct sctp_transport *active;
942 struct sctp_transport *match;
943 struct sctp_transport *transport;
944 struct sctp_chunk *chunk;
945 __be32 key = htonl(tsn);
946
947 match = NULL;
948
949 /*
950 * FIXME: In general, find a more efficient data structure for
951 * searching.
952 */
953
954 /*
955 * The general strategy is to search each transport's transmitted
956 * list. Return which transport this TSN lives on.
957 *
958 * Let's be hopeful and check the active_path first.
959 * Another optimization would be to know if there is only one
960 * outbound path and not have to look for the TSN at all.
961 *
962 */
963
964 active = asoc->peer.active_path;
965
966 list_for_each_entry(chunk, &active->transmitted,
967 transmitted_list) {
968
969 if (key == chunk->subh.data_hdr->tsn) {
970 match = active;
971 goto out;
972 }
973 }
974
975 /* If not found, go search all the other transports. */
976 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
977 transports) {
978
979 if (transport == active)
980 continue;
981 list_for_each_entry(chunk, &transport->transmitted,
982 transmitted_list) {
983 if (key == chunk->subh.data_hdr->tsn) {
984 match = transport;
985 goto out;
986 }
987 }
988 }
989 out:
990 return match;
991 }
992
993 /* Do delayed input processing. This is scheduled by sctp_rcv(). */
sctp_assoc_bh_rcv(struct work_struct * work)994 static void sctp_assoc_bh_rcv(struct work_struct *work)
995 {
996 struct sctp_association *asoc =
997 container_of(work, struct sctp_association,
998 base.inqueue.immediate);
999 struct net *net = sock_net(asoc->base.sk);
1000 union sctp_subtype subtype;
1001 struct sctp_endpoint *ep;
1002 struct sctp_chunk *chunk;
1003 struct sctp_inq *inqueue;
1004 int first_time = 1; /* is this the first time through the loop */
1005 int error = 0;
1006 int state;
1007
1008 /* The association should be held so we should be safe. */
1009 ep = asoc->ep;
1010
1011 inqueue = &asoc->base.inqueue;
1012 sctp_association_hold(asoc);
1013 while (NULL != (chunk = sctp_inq_pop(inqueue))) {
1014 state = asoc->state;
1015 subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
1016
1017 /* If the first chunk in the packet is AUTH, do special
1018 * processing specified in Section 6.3 of SCTP-AUTH spec
1019 */
1020 if (first_time && subtype.chunk == SCTP_CID_AUTH) {
1021 struct sctp_chunkhdr *next_hdr;
1022
1023 next_hdr = sctp_inq_peek(inqueue);
1024 if (!next_hdr)
1025 goto normal;
1026
1027 /* If the next chunk is COOKIE-ECHO, skip the AUTH
1028 * chunk while saving a pointer to it so we can do
1029 * Authentication later (during cookie-echo
1030 * processing).
1031 */
1032 if (next_hdr->type == SCTP_CID_COOKIE_ECHO) {
1033 chunk->auth_chunk = skb_clone(chunk->skb,
1034 GFP_ATOMIC);
1035 chunk->auth = 1;
1036 continue;
1037 }
1038 }
1039
1040 normal:
1041 /* SCTP-AUTH, Section 6.3:
1042 * The receiver has a list of chunk types which it expects
1043 * to be received only after an AUTH-chunk. This list has
1044 * been sent to the peer during the association setup. It
1045 * MUST silently discard these chunks if they are not placed
1046 * after an AUTH chunk in the packet.
1047 */
1048 if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
1049 continue;
1050
1051 /* Remember where the last DATA chunk came from so we
1052 * know where to send the SACK.
1053 */
1054 if (sctp_chunk_is_data(chunk))
1055 asoc->peer.last_data_from = chunk->transport;
1056 else {
1057 SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
1058 asoc->stats.ictrlchunks++;
1059 if (chunk->chunk_hdr->type == SCTP_CID_SACK)
1060 asoc->stats.isacks++;
1061 }
1062
1063 if (chunk->transport)
1064 chunk->transport->last_time_heard = ktime_get();
1065
1066 /* Run through the state machine. */
1067 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
1068 state, ep, asoc, chunk, GFP_ATOMIC);
1069
1070 /* Check to see if the association is freed in response to
1071 * the incoming chunk. If so, get out of the while loop.
1072 */
1073 if (asoc->base.dead)
1074 break;
1075
1076 /* If there is an error on chunk, discard this packet. */
1077 if (error && chunk)
1078 chunk->pdiscard = 1;
1079
1080 if (first_time)
1081 first_time = 0;
1082 }
1083 sctp_association_put(asoc);
1084 }
1085
1086 /* This routine moves an association from its old sk to a new sk. */
sctp_assoc_migrate(struct sctp_association * assoc,struct sock * newsk)1087 void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
1088 {
1089 struct sctp_sock *newsp = sctp_sk(newsk);
1090 struct sock *oldsk = assoc->base.sk;
1091
1092 /* Delete the association from the old endpoint's list of
1093 * associations.
1094 */
1095 list_del_init(&assoc->asocs);
1096
1097 /* Decrement the backlog value for a TCP-style socket. */
1098 if (sctp_style(oldsk, TCP))
1099 oldsk->sk_ack_backlog--;
1100
1101 /* Release references to the old endpoint and the sock. */
1102 sctp_endpoint_put(assoc->ep);
1103 sock_put(assoc->base.sk);
1104
1105 /* Get a reference to the new endpoint. */
1106 assoc->ep = newsp->ep;
1107 sctp_endpoint_hold(assoc->ep);
1108
1109 /* Get a reference to the new sock. */
1110 assoc->base.sk = newsk;
1111 sock_hold(assoc->base.sk);
1112
1113 /* Add the association to the new endpoint's list of associations. */
1114 sctp_endpoint_add_asoc(newsp->ep, assoc);
1115 }
1116
1117 /* Update an association (possibly from unexpected COOKIE-ECHO processing). */
sctp_assoc_update(struct sctp_association * asoc,struct sctp_association * new)1118 int sctp_assoc_update(struct sctp_association *asoc,
1119 struct sctp_association *new)
1120 {
1121 struct sctp_transport *trans;
1122 struct list_head *pos, *temp;
1123
1124 /* Copy in new parameters of peer. */
1125 asoc->c = new->c;
1126 asoc->peer.rwnd = new->peer.rwnd;
1127 asoc->peer.sack_needed = new->peer.sack_needed;
1128 asoc->peer.auth_capable = new->peer.auth_capable;
1129 asoc->peer.i = new->peer.i;
1130
1131 if (!sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
1132 asoc->peer.i.initial_tsn, GFP_ATOMIC))
1133 return -ENOMEM;
1134
1135 /* Remove any peer addresses not present in the new association. */
1136 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1137 trans = list_entry(pos, struct sctp_transport, transports);
1138 if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
1139 sctp_assoc_rm_peer(asoc, trans);
1140 continue;
1141 }
1142
1143 if (asoc->state >= SCTP_STATE_ESTABLISHED)
1144 sctp_transport_reset(trans);
1145 }
1146
1147 /* If the case is A (association restart), use
1148 * initial_tsn as next_tsn. If the case is B, use
1149 * current next_tsn in case data sent to peer
1150 * has been discarded and needs retransmission.
1151 */
1152 if (asoc->state >= SCTP_STATE_ESTABLISHED) {
1153 asoc->next_tsn = new->next_tsn;
1154 asoc->ctsn_ack_point = new->ctsn_ack_point;
1155 asoc->adv_peer_ack_point = new->adv_peer_ack_point;
1156
1157 /* Reinitialize SSN for both local streams
1158 * and peer's streams.
1159 */
1160 sctp_stream_clear(&asoc->stream);
1161
1162 /* Flush the ULP reassembly and ordered queue.
1163 * Any data there will now be stale and will
1164 * cause problems.
1165 */
1166 sctp_ulpq_flush(&asoc->ulpq);
1167
1168 /* reset the overall association error count so
1169 * that the restarted association doesn't get torn
1170 * down on the next retransmission timer.
1171 */
1172 asoc->overall_error_count = 0;
1173
1174 } else {
1175 /* Add any peer addresses from the new association. */
1176 list_for_each_entry(trans, &new->peer.transport_addr_list,
1177 transports)
1178 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr) &&
1179 !sctp_assoc_add_peer(asoc, &trans->ipaddr,
1180 GFP_ATOMIC, trans->state))
1181 return -ENOMEM;
1182
1183 asoc->ctsn_ack_point = asoc->next_tsn - 1;
1184 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
1185
1186 if (sctp_state(asoc, COOKIE_WAIT))
1187 sctp_stream_update(&asoc->stream, &new->stream);
1188
1189 /* get a new assoc id if we don't have one yet. */
1190 if (sctp_assoc_set_id(asoc, GFP_ATOMIC))
1191 return -ENOMEM;
1192 }
1193
1194 /* SCTP-AUTH: Save the peer parameters from the new associations
1195 * and also move the association shared keys over
1196 */
1197 kfree(asoc->peer.peer_random);
1198 asoc->peer.peer_random = new->peer.peer_random;
1199 new->peer.peer_random = NULL;
1200
1201 kfree(asoc->peer.peer_chunks);
1202 asoc->peer.peer_chunks = new->peer.peer_chunks;
1203 new->peer.peer_chunks = NULL;
1204
1205 kfree(asoc->peer.peer_hmacs);
1206 asoc->peer.peer_hmacs = new->peer.peer_hmacs;
1207 new->peer.peer_hmacs = NULL;
1208
1209 return sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
1210 }
1211
1212 /* Update the retran path for sending a retransmitted packet.
1213 * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints:
1214 *
1215 * When there is outbound data to send and the primary path
1216 * becomes inactive (e.g., due to failures), or where the
1217 * SCTP user explicitly requests to send data to an
1218 * inactive destination transport address, before reporting
1219 * an error to its ULP, the SCTP endpoint should try to send
1220 * the data to an alternate active destination transport
1221 * address if one exists.
1222 *
1223 * When retransmitting data that timed out, if the endpoint
1224 * is multihomed, it should consider each source-destination
1225 * address pair in its retransmission selection policy.
1226 * When retransmitting timed-out data, the endpoint should
1227 * attempt to pick the most divergent source-destination
1228 * pair from the original source-destination pair to which
1229 * the packet was transmitted.
1230 *
1231 * Note: Rules for picking the most divergent source-destination
1232 * pair are an implementation decision and are not specified
1233 * within this document.
1234 *
1235 * Our basic strategy is to round-robin transports in priorities
1236 * according to sctp_trans_score() e.g., if no such
1237 * transport with state SCTP_ACTIVE exists, round-robin through
1238 * SCTP_UNKNOWN, etc. You get the picture.
1239 */
sctp_trans_score(const struct sctp_transport * trans)1240 static u8 sctp_trans_score(const struct sctp_transport *trans)
1241 {
1242 switch (trans->state) {
1243 case SCTP_ACTIVE:
1244 return 3; /* best case */
1245 case SCTP_UNKNOWN:
1246 return 2;
1247 case SCTP_PF:
1248 return 1;
1249 default: /* case SCTP_INACTIVE */
1250 return 0; /* worst case */
1251 }
1252 }
1253
sctp_trans_elect_tie(struct sctp_transport * trans1,struct sctp_transport * trans2)1254 static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1,
1255 struct sctp_transport *trans2)
1256 {
1257 if (trans1->error_count > trans2->error_count) {
1258 return trans2;
1259 } else if (trans1->error_count == trans2->error_count &&
1260 ktime_after(trans2->last_time_heard,
1261 trans1->last_time_heard)) {
1262 return trans2;
1263 } else {
1264 return trans1;
1265 }
1266 }
1267
sctp_trans_elect_best(struct sctp_transport * curr,struct sctp_transport * best)1268 static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr,
1269 struct sctp_transport *best)
1270 {
1271 u8 score_curr, score_best;
1272
1273 if (best == NULL || curr == best)
1274 return curr;
1275
1276 score_curr = sctp_trans_score(curr);
1277 score_best = sctp_trans_score(best);
1278
1279 /* First, try a score-based selection if both transport states
1280 * differ. If we're in a tie, lets try to make a more clever
1281 * decision here based on error counts and last time heard.
1282 */
1283 if (score_curr > score_best)
1284 return curr;
1285 else if (score_curr == score_best)
1286 return sctp_trans_elect_tie(best, curr);
1287 else
1288 return best;
1289 }
1290
sctp_assoc_update_retran_path(struct sctp_association * asoc)1291 void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1292 {
1293 struct sctp_transport *trans = asoc->peer.retran_path;
1294 struct sctp_transport *trans_next = NULL;
1295
1296 /* We're done as we only have the one and only path. */
1297 if (asoc->peer.transport_count == 1)
1298 return;
1299 /* If active_path and retran_path are the same and active,
1300 * then this is the only active path. Use it.
1301 */
1302 if (asoc->peer.active_path == asoc->peer.retran_path &&
1303 asoc->peer.active_path->state == SCTP_ACTIVE)
1304 return;
1305
1306 /* Iterate from retran_path's successor back to retran_path. */
1307 for (trans = list_next_entry(trans, transports); 1;
1308 trans = list_next_entry(trans, transports)) {
1309 /* Manually skip the head element. */
1310 if (&trans->transports == &asoc->peer.transport_addr_list)
1311 continue;
1312 if (trans->state == SCTP_UNCONFIRMED)
1313 continue;
1314 trans_next = sctp_trans_elect_best(trans, trans_next);
1315 /* Active is good enough for immediate return. */
1316 if (trans_next->state == SCTP_ACTIVE)
1317 break;
1318 /* We've reached the end, time to update path. */
1319 if (trans == asoc->peer.retran_path)
1320 break;
1321 }
1322
1323 asoc->peer.retran_path = trans_next;
1324
1325 pr_debug("%s: association:%p updated new path to addr:%pISpc\n",
1326 __func__, asoc, &asoc->peer.retran_path->ipaddr.sa);
1327 }
1328
sctp_select_active_and_retran_path(struct sctp_association * asoc)1329 static void sctp_select_active_and_retran_path(struct sctp_association *asoc)
1330 {
1331 struct sctp_transport *trans, *trans_pri = NULL, *trans_sec = NULL;
1332 struct sctp_transport *trans_pf = NULL;
1333
1334 /* Look for the two most recently used active transports. */
1335 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
1336 transports) {
1337 /* Skip uninteresting transports. */
1338 if (trans->state == SCTP_INACTIVE ||
1339 trans->state == SCTP_UNCONFIRMED)
1340 continue;
1341 /* Keep track of the best PF transport from our
1342 * list in case we don't find an active one.
1343 */
1344 if (trans->state == SCTP_PF) {
1345 trans_pf = sctp_trans_elect_best(trans, trans_pf);
1346 continue;
1347 }
1348 /* For active transports, pick the most recent ones. */
1349 if (trans_pri == NULL ||
1350 ktime_after(trans->last_time_heard,
1351 trans_pri->last_time_heard)) {
1352 trans_sec = trans_pri;
1353 trans_pri = trans;
1354 } else if (trans_sec == NULL ||
1355 ktime_after(trans->last_time_heard,
1356 trans_sec->last_time_heard)) {
1357 trans_sec = trans;
1358 }
1359 }
1360
1361 /* RFC 2960 6.4 Multi-Homed SCTP Endpoints
1362 *
1363 * By default, an endpoint should always transmit to the primary
1364 * path, unless the SCTP user explicitly specifies the
1365 * destination transport address (and possibly source transport
1366 * address) to use. [If the primary is active but not most recent,
1367 * bump the most recently used transport.]
1368 */
1369 if ((asoc->peer.primary_path->state == SCTP_ACTIVE ||
1370 asoc->peer.primary_path->state == SCTP_UNKNOWN) &&
1371 asoc->peer.primary_path != trans_pri) {
1372 trans_sec = trans_pri;
1373 trans_pri = asoc->peer.primary_path;
1374 }
1375
1376 /* We did not find anything useful for a possible retransmission
1377 * path; either primary path that we found is the the same as
1378 * the current one, or we didn't generally find an active one.
1379 */
1380 if (trans_sec == NULL)
1381 trans_sec = trans_pri;
1382
1383 /* If we failed to find a usable transport, just camp on the
1384 * active or pick a PF iff it's the better choice.
1385 */
1386 if (trans_pri == NULL) {
1387 trans_pri = sctp_trans_elect_best(asoc->peer.active_path, trans_pf);
1388 trans_sec = trans_pri;
1389 }
1390
1391 /* Set the active and retran transports. */
1392 asoc->peer.active_path = trans_pri;
1393 asoc->peer.retran_path = trans_sec;
1394 }
1395
1396 struct sctp_transport *
sctp_assoc_choose_alter_transport(struct sctp_association * asoc,struct sctp_transport * last_sent_to)1397 sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
1398 struct sctp_transport *last_sent_to)
1399 {
1400 /* If this is the first time packet is sent, use the active path,
1401 * else use the retran path. If the last packet was sent over the
1402 * retran path, update the retran path and use it.
1403 */
1404 if (last_sent_to == NULL) {
1405 return asoc->peer.active_path;
1406 } else {
1407 if (last_sent_to == asoc->peer.retran_path)
1408 sctp_assoc_update_retran_path(asoc);
1409
1410 return asoc->peer.retran_path;
1411 }
1412 }
1413
sctp_assoc_update_frag_point(struct sctp_association * asoc)1414 void sctp_assoc_update_frag_point(struct sctp_association *asoc)
1415 {
1416 int frag = sctp_mtu_payload(sctp_sk(asoc->base.sk), asoc->pathmtu,
1417 sctp_datachk_len(&asoc->stream));
1418
1419 if (asoc->user_frag)
1420 frag = min_t(int, frag, asoc->user_frag);
1421
1422 frag = min_t(int, frag, SCTP_MAX_CHUNK_LEN -
1423 sctp_datachk_len(&asoc->stream));
1424
1425 asoc->frag_point = SCTP_TRUNC4(frag);
1426 }
1427
sctp_assoc_set_pmtu(struct sctp_association * asoc,__u32 pmtu)1428 void sctp_assoc_set_pmtu(struct sctp_association *asoc, __u32 pmtu)
1429 {
1430 if (asoc->pathmtu != pmtu) {
1431 asoc->pathmtu = pmtu;
1432 sctp_assoc_update_frag_point(asoc);
1433 }
1434
1435 pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc,
1436 asoc->pathmtu, asoc->frag_point);
1437 }
1438
1439 /* Update the association's pmtu and frag_point by going through all the
1440 * transports. This routine is called when a transport's PMTU has changed.
1441 */
sctp_assoc_sync_pmtu(struct sctp_association * asoc)1442 void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
1443 {
1444 struct sctp_transport *t;
1445 __u32 pmtu = 0;
1446
1447 if (!asoc)
1448 return;
1449
1450 /* Get the lowest pmtu of all the transports. */
1451 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
1452 if (t->pmtu_pending && t->dst) {
1453 sctp_transport_update_pmtu(t,
1454 atomic_read(&t->mtu_info));
1455 t->pmtu_pending = 0;
1456 }
1457 if (!pmtu || (t->pathmtu < pmtu))
1458 pmtu = t->pathmtu;
1459 }
1460
1461 sctp_assoc_set_pmtu(asoc, pmtu);
1462 }
1463
1464 /* Should we send a SACK to update our peer? */
sctp_peer_needs_update(struct sctp_association * asoc)1465 static inline bool sctp_peer_needs_update(struct sctp_association *asoc)
1466 {
1467 struct net *net = sock_net(asoc->base.sk);
1468 switch (asoc->state) {
1469 case SCTP_STATE_ESTABLISHED:
1470 case SCTP_STATE_SHUTDOWN_PENDING:
1471 case SCTP_STATE_SHUTDOWN_RECEIVED:
1472 case SCTP_STATE_SHUTDOWN_SENT:
1473 if ((asoc->rwnd > asoc->a_rwnd) &&
1474 ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
1475 (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
1476 asoc->pathmtu)))
1477 return true;
1478 break;
1479 default:
1480 break;
1481 }
1482 return false;
1483 }
1484
1485 /* Increase asoc's rwnd by len and send any window update SACK if needed. */
sctp_assoc_rwnd_increase(struct sctp_association * asoc,unsigned int len)1486 void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
1487 {
1488 struct sctp_chunk *sack;
1489 struct timer_list *timer;
1490
1491 if (asoc->rwnd_over) {
1492 if (asoc->rwnd_over >= len) {
1493 asoc->rwnd_over -= len;
1494 } else {
1495 asoc->rwnd += (len - asoc->rwnd_over);
1496 asoc->rwnd_over = 0;
1497 }
1498 } else {
1499 asoc->rwnd += len;
1500 }
1501
1502 /* If we had window pressure, start recovering it
1503 * once our rwnd had reached the accumulated pressure
1504 * threshold. The idea is to recover slowly, but up
1505 * to the initial advertised window.
1506 */
1507 if (asoc->rwnd_press) {
1508 int change = min(asoc->pathmtu, asoc->rwnd_press);
1509 asoc->rwnd += change;
1510 asoc->rwnd_press -= change;
1511 }
1512
1513 pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n",
1514 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1515 asoc->a_rwnd);
1516
1517 /* Send a window update SACK if the rwnd has increased by at least the
1518 * minimum of the association's PMTU and half of the receive buffer.
1519 * The algorithm used is similar to the one described in
1520 * Section 4.2.3.3 of RFC 1122.
1521 */
1522 if (sctp_peer_needs_update(asoc)) {
1523 asoc->a_rwnd = asoc->rwnd;
1524
1525 pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
1526 "a_rwnd:%u\n", __func__, asoc, asoc->rwnd,
1527 asoc->a_rwnd);
1528
1529 sack = sctp_make_sack(asoc);
1530 if (!sack)
1531 return;
1532
1533 asoc->peer.sack_needed = 0;
1534
1535 sctp_outq_tail(&asoc->outqueue, sack, GFP_ATOMIC);
1536
1537 /* Stop the SACK timer. */
1538 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
1539 if (del_timer(timer))
1540 sctp_association_put(asoc);
1541 }
1542 }
1543
1544 /* Decrease asoc's rwnd by len. */
sctp_assoc_rwnd_decrease(struct sctp_association * asoc,unsigned int len)1545 void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
1546 {
1547 int rx_count;
1548 int over = 0;
1549
1550 if (unlikely(!asoc->rwnd || asoc->rwnd_over))
1551 pr_debug("%s: association:%p has asoc->rwnd:%u, "
1552 "asoc->rwnd_over:%u!\n", __func__, asoc,
1553 asoc->rwnd, asoc->rwnd_over);
1554
1555 if (asoc->ep->rcvbuf_policy)
1556 rx_count = atomic_read(&asoc->rmem_alloc);
1557 else
1558 rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1559
1560 /* If we've reached or overflowed our receive buffer, announce
1561 * a 0 rwnd if rwnd would still be positive. Store the
1562 * the potential pressure overflow so that the window can be restored
1563 * back to original value.
1564 */
1565 if (rx_count >= asoc->base.sk->sk_rcvbuf)
1566 over = 1;
1567
1568 if (asoc->rwnd >= len) {
1569 asoc->rwnd -= len;
1570 if (over) {
1571 asoc->rwnd_press += asoc->rwnd;
1572 asoc->rwnd = 0;
1573 }
1574 } else {
1575 asoc->rwnd_over += len - asoc->rwnd;
1576 asoc->rwnd = 0;
1577 }
1578
1579 pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
1580 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1581 asoc->rwnd_press);
1582 }
1583
1584 /* Build the bind address list for the association based on info from the
1585 * local endpoint and the remote peer.
1586 */
sctp_assoc_set_bind_addr_from_ep(struct sctp_association * asoc,enum sctp_scope scope,gfp_t gfp)1587 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
1588 enum sctp_scope scope, gfp_t gfp)
1589 {
1590 int flags;
1591
1592 /* Use scoping rules to determine the subset of addresses from
1593 * the endpoint.
1594 */
1595 flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
1596 if (asoc->peer.ipv4_address)
1597 flags |= SCTP_ADDR4_PEERSUPP;
1598 if (asoc->peer.ipv6_address)
1599 flags |= SCTP_ADDR6_PEERSUPP;
1600
1601 return sctp_bind_addr_copy(sock_net(asoc->base.sk),
1602 &asoc->base.bind_addr,
1603 &asoc->ep->base.bind_addr,
1604 scope, gfp, flags);
1605 }
1606
1607 /* Build the association's bind address list from the cookie. */
sctp_assoc_set_bind_addr_from_cookie(struct sctp_association * asoc,struct sctp_cookie * cookie,gfp_t gfp)1608 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
1609 struct sctp_cookie *cookie,
1610 gfp_t gfp)
1611 {
1612 int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length);
1613 int var_size3 = cookie->raw_addr_list_len;
1614 __u8 *raw = (__u8 *)cookie->peer_init + var_size2;
1615
1616 return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3,
1617 asoc->ep->base.bind_addr.port, gfp);
1618 }
1619
1620 /* Lookup laddr in the bind address list of an association. */
sctp_assoc_lookup_laddr(struct sctp_association * asoc,const union sctp_addr * laddr)1621 int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1622 const union sctp_addr *laddr)
1623 {
1624 int found = 0;
1625
1626 if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
1627 sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1628 sctp_sk(asoc->base.sk)))
1629 found = 1;
1630
1631 return found;
1632 }
1633
1634 /* Set an association id for a given association */
sctp_assoc_set_id(struct sctp_association * asoc,gfp_t gfp)1635 int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
1636 {
1637 bool preload = gfpflags_allow_blocking(gfp);
1638 int ret;
1639
1640 /* If the id is already assigned, keep it. */
1641 if (asoc->assoc_id)
1642 return 0;
1643
1644 if (preload)
1645 idr_preload(gfp);
1646 spin_lock_bh(&sctp_assocs_id_lock);
1647 /* 0 is not a valid assoc_id, must be >= 1 */
1648 ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, 1, 0, GFP_NOWAIT);
1649 spin_unlock_bh(&sctp_assocs_id_lock);
1650 if (preload)
1651 idr_preload_end();
1652 if (ret < 0)
1653 return ret;
1654
1655 asoc->assoc_id = (sctp_assoc_t)ret;
1656 return 0;
1657 }
1658
1659 /* Free the ASCONF queue */
sctp_assoc_free_asconf_queue(struct sctp_association * asoc)1660 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
1661 {
1662 struct sctp_chunk *asconf;
1663 struct sctp_chunk *tmp;
1664
1665 list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
1666 list_del_init(&asconf->list);
1667 sctp_chunk_free(asconf);
1668 }
1669 }
1670
1671 /* Free asconf_ack cache */
sctp_assoc_free_asconf_acks(struct sctp_association * asoc)1672 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
1673 {
1674 struct sctp_chunk *ack;
1675 struct sctp_chunk *tmp;
1676
1677 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1678 transmitted_list) {
1679 list_del_init(&ack->transmitted_list);
1680 sctp_chunk_free(ack);
1681 }
1682 }
1683
1684 /* Clean up the ASCONF_ACK queue */
sctp_assoc_clean_asconf_ack_cache(const struct sctp_association * asoc)1685 void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc)
1686 {
1687 struct sctp_chunk *ack;
1688 struct sctp_chunk *tmp;
1689
1690 /* We can remove all the entries from the queue up to
1691 * the "Peer-Sequence-Number".
1692 */
1693 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1694 transmitted_list) {
1695 if (ack->subh.addip_hdr->serial ==
1696 htonl(asoc->peer.addip_serial))
1697 break;
1698
1699 list_del_init(&ack->transmitted_list);
1700 sctp_chunk_free(ack);
1701 }
1702 }
1703
1704 /* Find the ASCONF_ACK whose serial number matches ASCONF */
sctp_assoc_lookup_asconf_ack(const struct sctp_association * asoc,__be32 serial)1705 struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1706 const struct sctp_association *asoc,
1707 __be32 serial)
1708 {
1709 struct sctp_chunk *ack;
1710
1711 /* Walk through the list of cached ASCONF-ACKs and find the
1712 * ack chunk whose serial number matches that of the request.
1713 */
1714 list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
1715 if (sctp_chunk_pending(ack))
1716 continue;
1717 if (ack->subh.addip_hdr->serial == serial) {
1718 sctp_chunk_hold(ack);
1719 return ack;
1720 }
1721 }
1722
1723 return NULL;
1724 }
1725
sctp_asconf_queue_teardown(struct sctp_association * asoc)1726 void sctp_asconf_queue_teardown(struct sctp_association *asoc)
1727 {
1728 /* Free any cached ASCONF_ACK chunk. */
1729 sctp_assoc_free_asconf_acks(asoc);
1730
1731 /* Free the ASCONF queue. */
1732 sctp_assoc_free_asconf_queue(asoc);
1733
1734 /* Free any cached ASCONF chunk. */
1735 if (asoc->addip_last_asconf)
1736 sctp_chunk_free(asoc->addip_last_asconf);
1737 }
1738