1 /*
2 * net/tipc/socket.c: TIPC socket API
3 *
4 * Copyright (c) 2001-2007, 2012-2017, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <linux/rhashtable.h>
38 #include <linux/sched/signal.h>
39
40 #include "core.h"
41 #include "name_table.h"
42 #include "node.h"
43 #include "link.h"
44 #include "name_distr.h"
45 #include "socket.h"
46 #include "bcast.h"
47 #include "netlink.h"
48 #include "group.h"
49
50 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
51 #define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
52 #define TIPC_FWD_MSG 1
53 #define TIPC_MAX_PORT 0xffffffff
54 #define TIPC_MIN_PORT 1
55 #define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */
56
57 enum {
58 TIPC_LISTEN = TCP_LISTEN,
59 TIPC_ESTABLISHED = TCP_ESTABLISHED,
60 TIPC_OPEN = TCP_CLOSE,
61 TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
62 TIPC_CONNECTING = TCP_SYN_SENT,
63 };
64
65 struct sockaddr_pair {
66 struct sockaddr_tipc sock;
67 struct sockaddr_tipc member;
68 };
69
70 /**
71 * struct tipc_sock - TIPC socket structure
72 * @sk: socket - interacts with 'port' and with user via the socket API
73 * @conn_type: TIPC type used when connection was established
74 * @conn_instance: TIPC instance used when connection was established
75 * @published: non-zero if port has one or more associated names
76 * @max_pkt: maximum packet size "hint" used when building messages sent by port
77 * @portid: unique port identity in TIPC socket hash table
78 * @phdr: preformatted message header used when sending messages
79 * #cong_links: list of congested links
80 * @publications: list of publications for port
81 * @blocking_link: address of the congested link we are currently sleeping on
82 * @pub_count: total # of publications port has made during its lifetime
83 * @probing_state:
84 * @conn_timeout: the time we can wait for an unresponded setup request
85 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
86 * @cong_link_cnt: number of congested links
87 * @snt_unacked: # messages sent by socket, and not yet acked by peer
88 * @rcv_unacked: # messages read by user, but not yet acked back to peer
89 * @peer: 'connected' peer for dgram/rdm
90 * @node: hash table node
91 * @mc_method: cookie for use between socket and broadcast layer
92 * @rcu: rcu struct for tipc_sock
93 */
94 struct tipc_sock {
95 struct sock sk;
96 u32 conn_type;
97 u32 conn_instance;
98 int published;
99 u32 max_pkt;
100 u32 portid;
101 struct tipc_msg phdr;
102 struct list_head cong_links;
103 struct list_head publications;
104 u32 pub_count;
105 uint conn_timeout;
106 atomic_t dupl_rcvcnt;
107 bool probe_unacked;
108 u16 cong_link_cnt;
109 u16 snt_unacked;
110 u16 snd_win;
111 u16 peer_caps;
112 u16 rcv_unacked;
113 u16 rcv_win;
114 struct sockaddr_tipc peer;
115 struct rhash_head node;
116 struct tipc_mc_method mc_method;
117 struct rcu_head rcu;
118 struct tipc_group *group;
119 bool group_is_open;
120 };
121
122 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
123 static void tipc_data_ready(struct sock *sk);
124 static void tipc_write_space(struct sock *sk);
125 static void tipc_sock_destruct(struct sock *sk);
126 static int tipc_release(struct socket *sock);
127 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
128 bool kern);
129 static void tipc_sk_timeout(struct timer_list *t);
130 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
131 struct tipc_name_seq const *seq);
132 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
133 struct tipc_name_seq const *seq);
134 static int tipc_sk_leave(struct tipc_sock *tsk);
135 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
136 static int tipc_sk_insert(struct tipc_sock *tsk);
137 static void tipc_sk_remove(struct tipc_sock *tsk);
138 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
139 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
140
141 static const struct proto_ops packet_ops;
142 static const struct proto_ops stream_ops;
143 static const struct proto_ops msg_ops;
144 static struct proto tipc_proto;
145 static const struct rhashtable_params tsk_rht_params;
146
tsk_own_node(struct tipc_sock * tsk)147 static u32 tsk_own_node(struct tipc_sock *tsk)
148 {
149 return msg_prevnode(&tsk->phdr);
150 }
151
tsk_peer_node(struct tipc_sock * tsk)152 static u32 tsk_peer_node(struct tipc_sock *tsk)
153 {
154 return msg_destnode(&tsk->phdr);
155 }
156
tsk_peer_port(struct tipc_sock * tsk)157 static u32 tsk_peer_port(struct tipc_sock *tsk)
158 {
159 return msg_destport(&tsk->phdr);
160 }
161
tsk_unreliable(struct tipc_sock * tsk)162 static bool tsk_unreliable(struct tipc_sock *tsk)
163 {
164 return msg_src_droppable(&tsk->phdr) != 0;
165 }
166
tsk_set_unreliable(struct tipc_sock * tsk,bool unreliable)167 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
168 {
169 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
170 }
171
tsk_unreturnable(struct tipc_sock * tsk)172 static bool tsk_unreturnable(struct tipc_sock *tsk)
173 {
174 return msg_dest_droppable(&tsk->phdr) != 0;
175 }
176
tsk_set_unreturnable(struct tipc_sock * tsk,bool unreturnable)177 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
178 {
179 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
180 }
181
tsk_importance(struct tipc_sock * tsk)182 static int tsk_importance(struct tipc_sock *tsk)
183 {
184 return msg_importance(&tsk->phdr);
185 }
186
tsk_set_importance(struct tipc_sock * tsk,int imp)187 static int tsk_set_importance(struct tipc_sock *tsk, int imp)
188 {
189 if (imp > TIPC_CRITICAL_IMPORTANCE)
190 return -EINVAL;
191 msg_set_importance(&tsk->phdr, (u32)imp);
192 return 0;
193 }
194
tipc_sk(const struct sock * sk)195 static struct tipc_sock *tipc_sk(const struct sock *sk)
196 {
197 return container_of(sk, struct tipc_sock, sk);
198 }
199
tsk_conn_cong(struct tipc_sock * tsk)200 static bool tsk_conn_cong(struct tipc_sock *tsk)
201 {
202 return tsk->snt_unacked > tsk->snd_win;
203 }
204
tsk_blocks(int len)205 static u16 tsk_blocks(int len)
206 {
207 return ((len / FLOWCTL_BLK_SZ) + 1);
208 }
209
210 /* tsk_blocks(): translate a buffer size in bytes to number of
211 * advertisable blocks, taking into account the ratio truesize(len)/len
212 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
213 */
tsk_adv_blocks(int len)214 static u16 tsk_adv_blocks(int len)
215 {
216 return len / FLOWCTL_BLK_SZ / 4;
217 }
218
219 /* tsk_inc(): increment counter for sent or received data
220 * - If block based flow control is not supported by peer we
221 * fall back to message based ditto, incrementing the counter
222 */
tsk_inc(struct tipc_sock * tsk,int msglen)223 static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
224 {
225 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
226 return ((msglen / FLOWCTL_BLK_SZ) + 1);
227 return 1;
228 }
229
230 /**
231 * tsk_advance_rx_queue - discard first buffer in socket receive queue
232 *
233 * Caller must hold socket lock
234 */
tsk_advance_rx_queue(struct sock * sk)235 static void tsk_advance_rx_queue(struct sock *sk)
236 {
237 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
238 }
239
240 /* tipc_sk_respond() : send response message back to sender
241 */
tipc_sk_respond(struct sock * sk,struct sk_buff * skb,int err)242 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
243 {
244 u32 selector;
245 u32 dnode;
246 u32 onode = tipc_own_addr(sock_net(sk));
247
248 if (!tipc_msg_reverse(onode, &skb, err))
249 return;
250
251 dnode = msg_destnode(buf_msg(skb));
252 selector = msg_origport(buf_msg(skb));
253 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
254 }
255
256 /**
257 * tsk_rej_rx_queue - reject all buffers in socket receive queue
258 *
259 * Caller must hold socket lock
260 */
tsk_rej_rx_queue(struct sock * sk)261 static void tsk_rej_rx_queue(struct sock *sk)
262 {
263 struct sk_buff *skb;
264
265 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
266 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
267 }
268
tipc_sk_connected(struct sock * sk)269 static bool tipc_sk_connected(struct sock *sk)
270 {
271 return sk->sk_state == TIPC_ESTABLISHED;
272 }
273
274 /* tipc_sk_type_connectionless - check if the socket is datagram socket
275 * @sk: socket
276 *
277 * Returns true if connection less, false otherwise
278 */
tipc_sk_type_connectionless(struct sock * sk)279 static bool tipc_sk_type_connectionless(struct sock *sk)
280 {
281 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
282 }
283
284 /* tsk_peer_msg - verify if message was sent by connected port's peer
285 *
286 * Handles cases where the node's network address has changed from
287 * the default of <0.0.0> to its configured setting.
288 */
tsk_peer_msg(struct tipc_sock * tsk,struct tipc_msg * msg)289 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
290 {
291 struct sock *sk = &tsk->sk;
292 u32 self = tipc_own_addr(sock_net(sk));
293 u32 peer_port = tsk_peer_port(tsk);
294 u32 orig_node, peer_node;
295
296 if (unlikely(!tipc_sk_connected(sk)))
297 return false;
298
299 if (unlikely(msg_origport(msg) != peer_port))
300 return false;
301
302 orig_node = msg_orignode(msg);
303 peer_node = tsk_peer_node(tsk);
304
305 if (likely(orig_node == peer_node))
306 return true;
307
308 if (!orig_node && peer_node == self)
309 return true;
310
311 if (!peer_node && orig_node == self)
312 return true;
313
314 return false;
315 }
316
317 /* tipc_set_sk_state - set the sk_state of the socket
318 * @sk: socket
319 *
320 * Caller must hold socket lock
321 *
322 * Returns 0 on success, errno otherwise
323 */
tipc_set_sk_state(struct sock * sk,int state)324 static int tipc_set_sk_state(struct sock *sk, int state)
325 {
326 int oldsk_state = sk->sk_state;
327 int res = -EINVAL;
328
329 switch (state) {
330 case TIPC_OPEN:
331 res = 0;
332 break;
333 case TIPC_LISTEN:
334 case TIPC_CONNECTING:
335 if (oldsk_state == TIPC_OPEN)
336 res = 0;
337 break;
338 case TIPC_ESTABLISHED:
339 if (oldsk_state == TIPC_CONNECTING ||
340 oldsk_state == TIPC_OPEN)
341 res = 0;
342 break;
343 case TIPC_DISCONNECTING:
344 if (oldsk_state == TIPC_CONNECTING ||
345 oldsk_state == TIPC_ESTABLISHED)
346 res = 0;
347 break;
348 }
349
350 if (!res)
351 sk->sk_state = state;
352
353 return res;
354 }
355
tipc_sk_sock_err(struct socket * sock,long * timeout)356 static int tipc_sk_sock_err(struct socket *sock, long *timeout)
357 {
358 struct sock *sk = sock->sk;
359 int err = sock_error(sk);
360 int typ = sock->type;
361
362 if (err)
363 return err;
364 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
365 if (sk->sk_state == TIPC_DISCONNECTING)
366 return -EPIPE;
367 else if (!tipc_sk_connected(sk))
368 return -ENOTCONN;
369 }
370 if (!*timeout)
371 return -EAGAIN;
372 if (signal_pending(current))
373 return sock_intr_errno(*timeout);
374
375 return 0;
376 }
377
378 #define tipc_wait_for_cond(sock_, timeo_, condition_) \
379 ({ \
380 struct sock *sk_; \
381 int rc_; \
382 \
383 while ((rc_ = !(condition_))) { \
384 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
385 sk_ = (sock_)->sk; \
386 rc_ = tipc_sk_sock_err((sock_), timeo_); \
387 if (rc_) \
388 break; \
389 prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \
390 release_sock(sk_); \
391 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
392 sched_annotate_sleep(); \
393 lock_sock(sk_); \
394 remove_wait_queue(sk_sleep(sk_), &wait_); \
395 } \
396 rc_; \
397 })
398
399 /**
400 * tipc_sk_create - create a TIPC socket
401 * @net: network namespace (must be default network)
402 * @sock: pre-allocated socket structure
403 * @protocol: protocol indicator (must be 0)
404 * @kern: caused by kernel or by userspace?
405 *
406 * This routine creates additional data structures used by the TIPC socket,
407 * initializes them, and links them together.
408 *
409 * Returns 0 on success, errno otherwise
410 */
tipc_sk_create(struct net * net,struct socket * sock,int protocol,int kern)411 static int tipc_sk_create(struct net *net, struct socket *sock,
412 int protocol, int kern)
413 {
414 const struct proto_ops *ops;
415 struct sock *sk;
416 struct tipc_sock *tsk;
417 struct tipc_msg *msg;
418
419 /* Validate arguments */
420 if (unlikely(protocol != 0))
421 return -EPROTONOSUPPORT;
422
423 switch (sock->type) {
424 case SOCK_STREAM:
425 ops = &stream_ops;
426 break;
427 case SOCK_SEQPACKET:
428 ops = &packet_ops;
429 break;
430 case SOCK_DGRAM:
431 case SOCK_RDM:
432 ops = &msg_ops;
433 break;
434 default:
435 return -EPROTOTYPE;
436 }
437
438 /* Allocate socket's protocol area */
439 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
440 if (sk == NULL)
441 return -ENOMEM;
442
443 tsk = tipc_sk(sk);
444 tsk->max_pkt = MAX_PKT_DEFAULT;
445 INIT_LIST_HEAD(&tsk->publications);
446 INIT_LIST_HEAD(&tsk->cong_links);
447 msg = &tsk->phdr;
448
449 /* Finish initializing socket data structures */
450 sock->ops = ops;
451 sock_init_data(sock, sk);
452 tipc_set_sk_state(sk, TIPC_OPEN);
453 if (tipc_sk_insert(tsk)) {
454 pr_warn("Socket create failed; port number exhausted\n");
455 return -EINVAL;
456 }
457
458 /* Ensure tsk is visible before we read own_addr. */
459 smp_mb();
460
461 tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE,
462 TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
463
464 msg_set_origport(msg, tsk->portid);
465 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
466 sk->sk_shutdown = 0;
467 sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
468 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
469 sk->sk_data_ready = tipc_data_ready;
470 sk->sk_write_space = tipc_write_space;
471 sk->sk_destruct = tipc_sock_destruct;
472 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
473 tsk->group_is_open = true;
474 atomic_set(&tsk->dupl_rcvcnt, 0);
475
476 /* Start out with safe limits until we receive an advertised window */
477 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
478 tsk->rcv_win = tsk->snd_win;
479
480 if (tipc_sk_type_connectionless(sk)) {
481 tsk_set_unreturnable(tsk, true);
482 if (sock->type == SOCK_DGRAM)
483 tsk_set_unreliable(tsk, true);
484 }
485
486 return 0;
487 }
488
tipc_sk_callback(struct rcu_head * head)489 static void tipc_sk_callback(struct rcu_head *head)
490 {
491 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
492
493 sock_put(&tsk->sk);
494 }
495
496 /* Caller should hold socket lock for the socket. */
__tipc_shutdown(struct socket * sock,int error)497 static void __tipc_shutdown(struct socket *sock, int error)
498 {
499 struct sock *sk = sock->sk;
500 struct tipc_sock *tsk = tipc_sk(sk);
501 struct net *net = sock_net(sk);
502 long timeout = CONN_TIMEOUT_DEFAULT;
503 u32 dnode = tsk_peer_node(tsk);
504 struct sk_buff *skb;
505
506 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
507 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
508 !tsk_conn_cong(tsk)));
509
510 /* Reject all unreceived messages, except on an active connection
511 * (which disconnects locally & sends a 'FIN+' to peer).
512 */
513 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
514 if (TIPC_SKB_CB(skb)->bytes_read) {
515 kfree_skb(skb);
516 continue;
517 }
518 if (!tipc_sk_type_connectionless(sk) &&
519 sk->sk_state != TIPC_DISCONNECTING) {
520 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
521 tipc_node_remove_conn(net, dnode, tsk->portid);
522 }
523 tipc_sk_respond(sk, skb, error);
524 }
525
526 if (tipc_sk_type_connectionless(sk))
527 return;
528
529 if (sk->sk_state != TIPC_DISCONNECTING) {
530 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
531 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
532 tsk_own_node(tsk), tsk_peer_port(tsk),
533 tsk->portid, error);
534 if (skb)
535 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
536 tipc_node_remove_conn(net, dnode, tsk->portid);
537 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
538 }
539 }
540
541 /**
542 * tipc_release - destroy a TIPC socket
543 * @sock: socket to destroy
544 *
545 * This routine cleans up any messages that are still queued on the socket.
546 * For DGRAM and RDM socket types, all queued messages are rejected.
547 * For SEQPACKET and STREAM socket types, the first message is rejected
548 * and any others are discarded. (If the first message on a STREAM socket
549 * is partially-read, it is discarded and the next one is rejected instead.)
550 *
551 * NOTE: Rejected messages are not necessarily returned to the sender! They
552 * are returned or discarded according to the "destination droppable" setting
553 * specified for the message by the sender.
554 *
555 * Returns 0 on success, errno otherwise
556 */
tipc_release(struct socket * sock)557 static int tipc_release(struct socket *sock)
558 {
559 struct sock *sk = sock->sk;
560 struct tipc_sock *tsk;
561
562 /*
563 * Exit if socket isn't fully initialized (occurs when a failed accept()
564 * releases a pre-allocated child socket that was never used)
565 */
566 if (sk == NULL)
567 return 0;
568
569 tsk = tipc_sk(sk);
570 lock_sock(sk);
571
572 __tipc_shutdown(sock, TIPC_ERR_NO_PORT);
573 sk->sk_shutdown = SHUTDOWN_MASK;
574 tipc_sk_leave(tsk);
575 tipc_sk_withdraw(tsk, 0, NULL);
576 sk_stop_timer(sk, &sk->sk_timer);
577 tipc_sk_remove(tsk);
578
579 sock_orphan(sk);
580 /* Reject any messages that accumulated in backlog queue */
581 release_sock(sk);
582 tipc_dest_list_purge(&tsk->cong_links);
583 tsk->cong_link_cnt = 0;
584 call_rcu(&tsk->rcu, tipc_sk_callback);
585 sock->sk = NULL;
586
587 return 0;
588 }
589
590 /**
591 * tipc_bind - associate or disassocate TIPC name(s) with a socket
592 * @sock: socket structure
593 * @uaddr: socket address describing name(s) and desired operation
594 * @uaddr_len: size of socket address data structure
595 *
596 * Name and name sequence binding is indicated using a positive scope value;
597 * a negative scope value unbinds the specified name. Specifying no name
598 * (i.e. a socket address length of 0) unbinds all names from the socket.
599 *
600 * Returns 0 on success, errno otherwise
601 *
602 * NOTE: This routine doesn't need to take the socket lock since it doesn't
603 * access any non-constant socket information.
604 */
tipc_bind(struct socket * sock,struct sockaddr * uaddr,int uaddr_len)605 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
606 int uaddr_len)
607 {
608 struct sock *sk = sock->sk;
609 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
610 struct tipc_sock *tsk = tipc_sk(sk);
611 int res = -EINVAL;
612
613 lock_sock(sk);
614 if (unlikely(!uaddr_len)) {
615 res = tipc_sk_withdraw(tsk, 0, NULL);
616 goto exit;
617 }
618 if (tsk->group) {
619 res = -EACCES;
620 goto exit;
621 }
622 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
623 res = -EINVAL;
624 goto exit;
625 }
626 if (addr->family != AF_TIPC) {
627 res = -EAFNOSUPPORT;
628 goto exit;
629 }
630
631 if (addr->addrtype == TIPC_ADDR_NAME)
632 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
633 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
634 res = -EAFNOSUPPORT;
635 goto exit;
636 }
637
638 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
639 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
640 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
641 res = -EACCES;
642 goto exit;
643 }
644
645 res = (addr->scope >= 0) ?
646 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
647 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
648 exit:
649 release_sock(sk);
650 return res;
651 }
652
653 /**
654 * tipc_getname - get port ID of socket or peer socket
655 * @sock: socket structure
656 * @uaddr: area for returned socket address
657 * @uaddr_len: area for returned length of socket address
658 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
659 *
660 * Returns 0 on success, errno otherwise
661 *
662 * NOTE: This routine doesn't need to take the socket lock since it only
663 * accesses socket information that is unchanging (or which changes in
664 * a completely predictable manner).
665 */
tipc_getname(struct socket * sock,struct sockaddr * uaddr,int peer)666 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
667 int peer)
668 {
669 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
670 struct sock *sk = sock->sk;
671 struct tipc_sock *tsk = tipc_sk(sk);
672
673 memset(addr, 0, sizeof(*addr));
674 if (peer) {
675 if ((!tipc_sk_connected(sk)) &&
676 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
677 return -ENOTCONN;
678 addr->addr.id.ref = tsk_peer_port(tsk);
679 addr->addr.id.node = tsk_peer_node(tsk);
680 } else {
681 addr->addr.id.ref = tsk->portid;
682 addr->addr.id.node = tipc_own_addr(sock_net(sk));
683 }
684
685 addr->addrtype = TIPC_ADDR_ID;
686 addr->family = AF_TIPC;
687 addr->scope = 0;
688 addr->addr.name.domain = 0;
689
690 return sizeof(*addr);
691 }
692
693 /**
694 * tipc_poll - read and possibly block on pollmask
695 * @file: file structure associated with the socket
696 * @sock: socket for which to calculate the poll bits
697 * @wait: ???
698 *
699 * Returns pollmask value
700 *
701 * COMMENTARY:
702 * It appears that the usual socket locking mechanisms are not useful here
703 * since the pollmask info is potentially out-of-date the moment this routine
704 * exits. TCP and other protocols seem to rely on higher level poll routines
705 * to handle any preventable race conditions, so TIPC will do the same ...
706 *
707 * IMPORTANT: The fact that a read or write operation is indicated does NOT
708 * imply that the operation will succeed, merely that it should be performed
709 * and will not block.
710 */
tipc_poll(struct file * file,struct socket * sock,poll_table * wait)711 static __poll_t tipc_poll(struct file *file, struct socket *sock,
712 poll_table *wait)
713 {
714 struct sock *sk = sock->sk;
715 struct tipc_sock *tsk = tipc_sk(sk);
716 __poll_t revents = 0;
717
718 sock_poll_wait(file, wait);
719
720 if (sk->sk_shutdown & RCV_SHUTDOWN)
721 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
722 if (sk->sk_shutdown == SHUTDOWN_MASK)
723 revents |= EPOLLHUP;
724
725 switch (sk->sk_state) {
726 case TIPC_ESTABLISHED:
727 case TIPC_CONNECTING:
728 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
729 revents |= EPOLLOUT;
730 /* fall thru' */
731 case TIPC_LISTEN:
732 if (!skb_queue_empty(&sk->sk_receive_queue))
733 revents |= EPOLLIN | EPOLLRDNORM;
734 break;
735 case TIPC_OPEN:
736 if (tsk->group_is_open && !tsk->cong_link_cnt)
737 revents |= EPOLLOUT;
738 if (!tipc_sk_type_connectionless(sk))
739 break;
740 if (skb_queue_empty(&sk->sk_receive_queue))
741 break;
742 revents |= EPOLLIN | EPOLLRDNORM;
743 break;
744 case TIPC_DISCONNECTING:
745 revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
746 break;
747 }
748 return revents;
749 }
750
751 /**
752 * tipc_sendmcast - send multicast message
753 * @sock: socket structure
754 * @seq: destination address
755 * @msg: message to send
756 * @dlen: length of data to send
757 * @timeout: timeout to wait for wakeup
758 *
759 * Called from function tipc_sendmsg(), which has done all sanity checks
760 * Returns the number of bytes sent on success, or errno
761 */
tipc_sendmcast(struct socket * sock,struct tipc_name_seq * seq,struct msghdr * msg,size_t dlen,long timeout)762 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
763 struct msghdr *msg, size_t dlen, long timeout)
764 {
765 struct sock *sk = sock->sk;
766 struct tipc_sock *tsk = tipc_sk(sk);
767 struct tipc_msg *hdr = &tsk->phdr;
768 struct net *net = sock_net(sk);
769 int mtu = tipc_bcast_get_mtu(net);
770 struct tipc_mc_method *method = &tsk->mc_method;
771 struct sk_buff_head pkts;
772 struct tipc_nlist dsts;
773 int rc;
774
775 if (tsk->group)
776 return -EACCES;
777
778 /* Block or return if any destination link is congested */
779 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
780 if (unlikely(rc))
781 return rc;
782
783 /* Lookup destination nodes */
784 tipc_nlist_init(&dsts, tipc_own_addr(net));
785 tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
786 seq->upper, &dsts);
787 if (!dsts.local && !dsts.remote)
788 return -EHOSTUNREACH;
789
790 /* Build message header */
791 msg_set_type(hdr, TIPC_MCAST_MSG);
792 msg_set_hdr_sz(hdr, MCAST_H_SIZE);
793 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
794 msg_set_destport(hdr, 0);
795 msg_set_destnode(hdr, 0);
796 msg_set_nametype(hdr, seq->type);
797 msg_set_namelower(hdr, seq->lower);
798 msg_set_nameupper(hdr, seq->upper);
799
800 /* Build message as chain of buffers */
801 skb_queue_head_init(&pkts);
802 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
803
804 /* Send message if build was successful */
805 if (unlikely(rc == dlen))
806 rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
807 &tsk->cong_link_cnt);
808
809 tipc_nlist_purge(&dsts);
810
811 return rc ? rc : dlen;
812 }
813
814 /**
815 * tipc_send_group_msg - send a message to a member in the group
816 * @net: network namespace
817 * @m: message to send
818 * @mb: group member
819 * @dnode: destination node
820 * @dport: destination port
821 * @dlen: total length of message data
822 */
tipc_send_group_msg(struct net * net,struct tipc_sock * tsk,struct msghdr * m,struct tipc_member * mb,u32 dnode,u32 dport,int dlen)823 static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
824 struct msghdr *m, struct tipc_member *mb,
825 u32 dnode, u32 dport, int dlen)
826 {
827 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
828 struct tipc_mc_method *method = &tsk->mc_method;
829 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
830 struct tipc_msg *hdr = &tsk->phdr;
831 struct sk_buff_head pkts;
832 int mtu, rc;
833
834 /* Complete message header */
835 msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
836 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
837 msg_set_destport(hdr, dport);
838 msg_set_destnode(hdr, dnode);
839 msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
840
841 /* Build message as chain of buffers */
842 skb_queue_head_init(&pkts);
843 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
844 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
845 if (unlikely(rc != dlen))
846 return rc;
847
848 /* Send message */
849 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
850 if (unlikely(rc == -ELINKCONG)) {
851 tipc_dest_push(&tsk->cong_links, dnode, 0);
852 tsk->cong_link_cnt++;
853 }
854
855 /* Update send window */
856 tipc_group_update_member(mb, blks);
857
858 /* A broadcast sent within next EXPIRE period must follow same path */
859 method->rcast = true;
860 method->mandatory = true;
861 return dlen;
862 }
863
864 /**
865 * tipc_send_group_unicast - send message to a member in the group
866 * @sock: socket structure
867 * @m: message to send
868 * @dlen: total length of message data
869 * @timeout: timeout to wait for wakeup
870 *
871 * Called from function tipc_sendmsg(), which has done all sanity checks
872 * Returns the number of bytes sent on success, or errno
873 */
tipc_send_group_unicast(struct socket * sock,struct msghdr * m,int dlen,long timeout)874 static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
875 int dlen, long timeout)
876 {
877 struct sock *sk = sock->sk;
878 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
879 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
880 struct tipc_sock *tsk = tipc_sk(sk);
881 struct tipc_group *grp = tsk->group;
882 struct net *net = sock_net(sk);
883 struct tipc_member *mb = NULL;
884 u32 node, port;
885 int rc;
886
887 node = dest->addr.id.node;
888 port = dest->addr.id.ref;
889 if (!port && !node)
890 return -EHOSTUNREACH;
891
892 /* Block or return if destination link or member is congested */
893 rc = tipc_wait_for_cond(sock, &timeout,
894 !tipc_dest_find(&tsk->cong_links, node, 0) &&
895 !tipc_group_cong(grp, node, port, blks, &mb));
896 if (unlikely(rc))
897 return rc;
898
899 if (unlikely(!mb))
900 return -EHOSTUNREACH;
901
902 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
903
904 return rc ? rc : dlen;
905 }
906
907 /**
908 * tipc_send_group_anycast - send message to any member with given identity
909 * @sock: socket structure
910 * @m: message to send
911 * @dlen: total length of message data
912 * @timeout: timeout to wait for wakeup
913 *
914 * Called from function tipc_sendmsg(), which has done all sanity checks
915 * Returns the number of bytes sent on success, or errno
916 */
tipc_send_group_anycast(struct socket * sock,struct msghdr * m,int dlen,long timeout)917 static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
918 int dlen, long timeout)
919 {
920 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
921 struct sock *sk = sock->sk;
922 struct tipc_sock *tsk = tipc_sk(sk);
923 struct list_head *cong_links = &tsk->cong_links;
924 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
925 struct tipc_group *grp = tsk->group;
926 struct tipc_msg *hdr = &tsk->phdr;
927 struct tipc_member *first = NULL;
928 struct tipc_member *mbr = NULL;
929 struct net *net = sock_net(sk);
930 u32 node, port, exclude;
931 struct list_head dsts;
932 u32 type, inst, scope;
933 int lookups = 0;
934 int dstcnt, rc;
935 bool cong;
936
937 INIT_LIST_HEAD(&dsts);
938
939 type = msg_nametype(hdr);
940 inst = dest->addr.name.name.instance;
941 scope = msg_lookup_scope(hdr);
942 exclude = tipc_group_exclude(grp);
943
944 while (++lookups < 4) {
945 first = NULL;
946
947 /* Look for a non-congested destination member, if any */
948 while (1) {
949 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
950 &dstcnt, exclude, false))
951 return -EHOSTUNREACH;
952 tipc_dest_pop(&dsts, &node, &port);
953 cong = tipc_group_cong(grp, node, port, blks, &mbr);
954 if (!cong)
955 break;
956 if (mbr == first)
957 break;
958 if (!first)
959 first = mbr;
960 }
961
962 /* Start over if destination was not in member list */
963 if (unlikely(!mbr))
964 continue;
965
966 if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
967 break;
968
969 /* Block or return if destination link or member is congested */
970 rc = tipc_wait_for_cond(sock, &timeout,
971 !tipc_dest_find(cong_links, node, 0) &&
972 !tipc_group_cong(grp, node, port,
973 blks, &mbr));
974 if (unlikely(rc))
975 return rc;
976
977 /* Send, unless destination disappeared while waiting */
978 if (likely(mbr))
979 break;
980 }
981
982 if (unlikely(lookups >= 4))
983 return -EHOSTUNREACH;
984
985 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
986
987 return rc ? rc : dlen;
988 }
989
990 /**
991 * tipc_send_group_bcast - send message to all members in communication group
992 * @sk: socket structure
993 * @m: message to send
994 * @dlen: total length of message data
995 * @timeout: timeout to wait for wakeup
996 *
997 * Called from function tipc_sendmsg(), which has done all sanity checks
998 * Returns the number of bytes sent on success, or errno
999 */
tipc_send_group_bcast(struct socket * sock,struct msghdr * m,int dlen,long timeout)1000 static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1001 int dlen, long timeout)
1002 {
1003 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1004 struct sock *sk = sock->sk;
1005 struct net *net = sock_net(sk);
1006 struct tipc_sock *tsk = tipc_sk(sk);
1007 struct tipc_group *grp = tsk->group;
1008 struct tipc_nlist *dsts = tipc_group_dests(grp);
1009 struct tipc_mc_method *method = &tsk->mc_method;
1010 bool ack = method->mandatory && method->rcast;
1011 int blks = tsk_blocks(MCAST_H_SIZE + dlen);
1012 struct tipc_msg *hdr = &tsk->phdr;
1013 int mtu = tipc_bcast_get_mtu(net);
1014 struct sk_buff_head pkts;
1015 int rc = -EHOSTUNREACH;
1016
1017 if (!dsts->local && !dsts->remote)
1018 return -EHOSTUNREACH;
1019
1020 /* Block or return if any destination link or member is congested */
1021 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt &&
1022 !tipc_group_bc_cong(grp, blks));
1023 if (unlikely(rc))
1024 return rc;
1025
1026 /* Complete message header */
1027 if (dest) {
1028 msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
1029 msg_set_nameinst(hdr, dest->addr.name.name.instance);
1030 } else {
1031 msg_set_type(hdr, TIPC_GRP_BCAST_MSG);
1032 msg_set_nameinst(hdr, 0);
1033 }
1034 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
1035 msg_set_destport(hdr, 0);
1036 msg_set_destnode(hdr, 0);
1037 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(grp));
1038
1039 /* Avoid getting stuck with repeated forced replicasts */
1040 msg_set_grp_bc_ack_req(hdr, ack);
1041
1042 /* Build message as chain of buffers */
1043 skb_queue_head_init(&pkts);
1044 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1045 if (unlikely(rc != dlen))
1046 return rc;
1047
1048 /* Send message */
1049 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
1050 if (unlikely(rc))
1051 return rc;
1052
1053 /* Update broadcast sequence number and send windows */
1054 tipc_group_update_bc_members(tsk->group, blks, ack);
1055
1056 /* Broadcast link is now free to choose method for next broadcast */
1057 method->mandatory = false;
1058 method->expires = jiffies;
1059
1060 return dlen;
1061 }
1062
1063 /**
1064 * tipc_send_group_mcast - send message to all members with given identity
1065 * @sock: socket structure
1066 * @m: message to send
1067 * @dlen: total length of message data
1068 * @timeout: timeout to wait for wakeup
1069 *
1070 * Called from function tipc_sendmsg(), which has done all sanity checks
1071 * Returns the number of bytes sent on success, or errno
1072 */
tipc_send_group_mcast(struct socket * sock,struct msghdr * m,int dlen,long timeout)1073 static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
1074 int dlen, long timeout)
1075 {
1076 struct sock *sk = sock->sk;
1077 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1078 struct tipc_sock *tsk = tipc_sk(sk);
1079 struct tipc_group *grp = tsk->group;
1080 struct tipc_msg *hdr = &tsk->phdr;
1081 struct net *net = sock_net(sk);
1082 u32 type, inst, scope, exclude;
1083 struct list_head dsts;
1084 u32 dstcnt;
1085
1086 INIT_LIST_HEAD(&dsts);
1087
1088 type = msg_nametype(hdr);
1089 inst = dest->addr.name.name.instance;
1090 scope = msg_lookup_scope(hdr);
1091 exclude = tipc_group_exclude(grp);
1092
1093 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
1094 &dstcnt, exclude, true))
1095 return -EHOSTUNREACH;
1096
1097 if (dstcnt == 1) {
1098 tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref);
1099 return tipc_send_group_unicast(sock, m, dlen, timeout);
1100 }
1101
1102 tipc_dest_list_purge(&dsts);
1103 return tipc_send_group_bcast(sock, m, dlen, timeout);
1104 }
1105
1106 /**
1107 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
1108 * @arrvq: queue with arriving messages, to be cloned after destination lookup
1109 * @inputq: queue with cloned messages, delivered to socket after dest lookup
1110 *
1111 * Multi-threaded: parallel calls with reference to same queues may occur
1112 */
tipc_sk_mcast_rcv(struct net * net,struct sk_buff_head * arrvq,struct sk_buff_head * inputq)1113 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
1114 struct sk_buff_head *inputq)
1115 {
1116 u32 self = tipc_own_addr(net);
1117 u32 type, lower, upper, scope;
1118 struct sk_buff *skb, *_skb;
1119 u32 portid, onode;
1120 struct sk_buff_head tmpq;
1121 struct list_head dports;
1122 struct tipc_msg *hdr;
1123 int user, mtyp, hlen;
1124 bool exact;
1125
1126 __skb_queue_head_init(&tmpq);
1127 INIT_LIST_HEAD(&dports);
1128
1129 skb = tipc_skb_peek(arrvq, &inputq->lock);
1130 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
1131 hdr = buf_msg(skb);
1132 user = msg_user(hdr);
1133 mtyp = msg_type(hdr);
1134 hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
1135 onode = msg_orignode(hdr);
1136 type = msg_nametype(hdr);
1137
1138 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
1139 spin_lock_bh(&inputq->lock);
1140 if (skb_peek(arrvq) == skb) {
1141 __skb_dequeue(arrvq);
1142 __skb_queue_tail(inputq, skb);
1143 }
1144 kfree_skb(skb);
1145 spin_unlock_bh(&inputq->lock);
1146 continue;
1147 }
1148
1149 /* Group messages require exact scope match */
1150 if (msg_in_group(hdr)) {
1151 lower = 0;
1152 upper = ~0;
1153 scope = msg_lookup_scope(hdr);
1154 exact = true;
1155 } else {
1156 /* TIPC_NODE_SCOPE means "any scope" in this context */
1157 if (onode == self)
1158 scope = TIPC_NODE_SCOPE;
1159 else
1160 scope = TIPC_CLUSTER_SCOPE;
1161 exact = false;
1162 lower = msg_namelower(hdr);
1163 upper = msg_nameupper(hdr);
1164 }
1165
1166 /* Create destination port list: */
1167 tipc_nametbl_mc_lookup(net, type, lower, upper,
1168 scope, exact, &dports);
1169
1170 /* Clone message per destination */
1171 while (tipc_dest_pop(&dports, NULL, &portid)) {
1172 _skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
1173 if (_skb) {
1174 msg_set_destport(buf_msg(_skb), portid);
1175 __skb_queue_tail(&tmpq, _skb);
1176 continue;
1177 }
1178 pr_warn("Failed to clone mcast rcv buffer\n");
1179 }
1180 /* Append to inputq if not already done by other thread */
1181 spin_lock_bh(&inputq->lock);
1182 if (skb_peek(arrvq) == skb) {
1183 skb_queue_splice_tail_init(&tmpq, inputq);
1184 kfree_skb(__skb_dequeue(arrvq));
1185 }
1186 spin_unlock_bh(&inputq->lock);
1187 __skb_queue_purge(&tmpq);
1188 kfree_skb(skb);
1189 }
1190 tipc_sk_rcv(net, inputq);
1191 }
1192
1193 /**
1194 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
1195 * @tsk: receiving socket
1196 * @skb: pointer to message buffer.
1197 */
tipc_sk_conn_proto_rcv(struct tipc_sock * tsk,struct sk_buff * skb,struct sk_buff_head * inputq,struct sk_buff_head * xmitq)1198 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1199 struct sk_buff_head *inputq,
1200 struct sk_buff_head *xmitq)
1201 {
1202 struct tipc_msg *hdr = buf_msg(skb);
1203 u32 onode = tsk_own_node(tsk);
1204 struct sock *sk = &tsk->sk;
1205 int mtyp = msg_type(hdr);
1206 bool conn_cong;
1207
1208 /* Ignore if connection cannot be validated: */
1209 if (!tsk_peer_msg(tsk, hdr))
1210 goto exit;
1211
1212 if (unlikely(msg_errcode(hdr))) {
1213 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1214 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
1215 tsk_peer_port(tsk));
1216 sk->sk_state_change(sk);
1217
1218 /* State change is ignored if socket already awake,
1219 * - convert msg to abort msg and add to inqueue
1220 */
1221 msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
1222 msg_set_type(hdr, TIPC_CONN_MSG);
1223 msg_set_size(hdr, BASIC_H_SIZE);
1224 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1225 __skb_queue_tail(inputq, skb);
1226 return;
1227 }
1228
1229 tsk->probe_unacked = false;
1230
1231 if (mtyp == CONN_PROBE) {
1232 msg_set_type(hdr, CONN_PROBE_REPLY);
1233 if (tipc_msg_reverse(onode, &skb, TIPC_OK))
1234 __skb_queue_tail(xmitq, skb);
1235 return;
1236 } else if (mtyp == CONN_ACK) {
1237 conn_cong = tsk_conn_cong(tsk);
1238 tsk->snt_unacked -= msg_conn_ack(hdr);
1239 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1240 tsk->snd_win = msg_adv_win(hdr);
1241 if (conn_cong)
1242 sk->sk_write_space(sk);
1243 } else if (mtyp != CONN_PROBE_REPLY) {
1244 pr_warn("Received unknown CONN_PROTO msg\n");
1245 }
1246 exit:
1247 kfree_skb(skb);
1248 }
1249
1250 /**
1251 * tipc_sendmsg - send message in connectionless manner
1252 * @sock: socket structure
1253 * @m: message to send
1254 * @dsz: amount of user data to be sent
1255 *
1256 * Message must have an destination specified explicitly.
1257 * Used for SOCK_RDM and SOCK_DGRAM messages,
1258 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
1259 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
1260 *
1261 * Returns the number of bytes sent on success, or errno otherwise
1262 */
tipc_sendmsg(struct socket * sock,struct msghdr * m,size_t dsz)1263 static int tipc_sendmsg(struct socket *sock,
1264 struct msghdr *m, size_t dsz)
1265 {
1266 struct sock *sk = sock->sk;
1267 int ret;
1268
1269 lock_sock(sk);
1270 ret = __tipc_sendmsg(sock, m, dsz);
1271 release_sock(sk);
1272
1273 return ret;
1274 }
1275
__tipc_sendmsg(struct socket * sock,struct msghdr * m,size_t dlen)1276 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
1277 {
1278 struct sock *sk = sock->sk;
1279 struct net *net = sock_net(sk);
1280 struct tipc_sock *tsk = tipc_sk(sk);
1281 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1282 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1283 struct list_head *clinks = &tsk->cong_links;
1284 bool syn = !tipc_sk_type_connectionless(sk);
1285 struct tipc_group *grp = tsk->group;
1286 struct tipc_msg *hdr = &tsk->phdr;
1287 struct tipc_name_seq *seq;
1288 struct sk_buff_head pkts;
1289 u32 dport, dnode = 0;
1290 u32 type, inst;
1291 int mtu, rc;
1292
1293 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
1294 return -EMSGSIZE;
1295
1296 if (likely(dest)) {
1297 if (unlikely(m->msg_namelen < sizeof(*dest)))
1298 return -EINVAL;
1299 if (unlikely(dest->family != AF_TIPC))
1300 return -EINVAL;
1301 }
1302
1303 if (grp) {
1304 if (!dest)
1305 return tipc_send_group_bcast(sock, m, dlen, timeout);
1306 if (dest->addrtype == TIPC_ADDR_NAME)
1307 return tipc_send_group_anycast(sock, m, dlen, timeout);
1308 if (dest->addrtype == TIPC_ADDR_ID)
1309 return tipc_send_group_unicast(sock, m, dlen, timeout);
1310 if (dest->addrtype == TIPC_ADDR_MCAST)
1311 return tipc_send_group_mcast(sock, m, dlen, timeout);
1312 return -EINVAL;
1313 }
1314
1315 if (unlikely(!dest)) {
1316 dest = &tsk->peer;
1317 if (!syn || dest->family != AF_TIPC)
1318 return -EDESTADDRREQ;
1319 }
1320
1321 if (unlikely(syn)) {
1322 if (sk->sk_state == TIPC_LISTEN)
1323 return -EPIPE;
1324 if (sk->sk_state != TIPC_OPEN)
1325 return -EISCONN;
1326 if (tsk->published)
1327 return -EOPNOTSUPP;
1328 if (dest->addrtype == TIPC_ADDR_NAME) {
1329 tsk->conn_type = dest->addr.name.name.type;
1330 tsk->conn_instance = dest->addr.name.name.instance;
1331 }
1332 }
1333
1334 seq = &dest->addr.nameseq;
1335 if (dest->addrtype == TIPC_ADDR_MCAST)
1336 return tipc_sendmcast(sock, seq, m, dlen, timeout);
1337
1338 if (dest->addrtype == TIPC_ADDR_NAME) {
1339 type = dest->addr.name.name.type;
1340 inst = dest->addr.name.name.instance;
1341 dnode = dest->addr.name.domain;
1342 msg_set_type(hdr, TIPC_NAMED_MSG);
1343 msg_set_hdr_sz(hdr, NAMED_H_SIZE);
1344 msg_set_nametype(hdr, type);
1345 msg_set_nameinst(hdr, inst);
1346 msg_set_lookup_scope(hdr, tipc_node2scope(dnode));
1347 dport = tipc_nametbl_translate(net, type, inst, &dnode);
1348 msg_set_destnode(hdr, dnode);
1349 msg_set_destport(hdr, dport);
1350 if (unlikely(!dport && !dnode))
1351 return -EHOSTUNREACH;
1352 } else if (dest->addrtype == TIPC_ADDR_ID) {
1353 dnode = dest->addr.id.node;
1354 msg_set_type(hdr, TIPC_DIRECT_MSG);
1355 msg_set_lookup_scope(hdr, 0);
1356 msg_set_destnode(hdr, dnode);
1357 msg_set_destport(hdr, dest->addr.id.ref);
1358 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1359 } else {
1360 return -EINVAL;
1361 }
1362
1363 /* Block or return if destination link is congested */
1364 rc = tipc_wait_for_cond(sock, &timeout,
1365 !tipc_dest_find(clinks, dnode, 0));
1366 if (unlikely(rc))
1367 return rc;
1368
1369 skb_queue_head_init(&pkts);
1370 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
1371 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1372 if (unlikely(rc != dlen))
1373 return rc;
1374
1375 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1376 if (unlikely(rc == -ELINKCONG)) {
1377 tipc_dest_push(clinks, dnode, 0);
1378 tsk->cong_link_cnt++;
1379 rc = 0;
1380 }
1381
1382 if (unlikely(syn && !rc))
1383 tipc_set_sk_state(sk, TIPC_CONNECTING);
1384
1385 return rc ? rc : dlen;
1386 }
1387
1388 /**
1389 * tipc_sendstream - send stream-oriented data
1390 * @sock: socket structure
1391 * @m: data to send
1392 * @dsz: total length of data to be transmitted
1393 *
1394 * Used for SOCK_STREAM data.
1395 *
1396 * Returns the number of bytes sent on success (or partial success),
1397 * or errno if no data sent
1398 */
tipc_sendstream(struct socket * sock,struct msghdr * m,size_t dsz)1399 static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
1400 {
1401 struct sock *sk = sock->sk;
1402 int ret;
1403
1404 lock_sock(sk);
1405 ret = __tipc_sendstream(sock, m, dsz);
1406 release_sock(sk);
1407
1408 return ret;
1409 }
1410
__tipc_sendstream(struct socket * sock,struct msghdr * m,size_t dlen)1411 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1412 {
1413 struct sock *sk = sock->sk;
1414 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1415 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1416 struct tipc_sock *tsk = tipc_sk(sk);
1417 struct tipc_msg *hdr = &tsk->phdr;
1418 struct net *net = sock_net(sk);
1419 struct sk_buff_head pkts;
1420 u32 dnode = tsk_peer_node(tsk);
1421 int send, sent = 0;
1422 int rc = 0;
1423
1424 skb_queue_head_init(&pkts);
1425
1426 if (unlikely(dlen > INT_MAX))
1427 return -EMSGSIZE;
1428
1429 /* Handle implicit connection setup */
1430 if (unlikely(dest)) {
1431 rc = __tipc_sendmsg(sock, m, dlen);
1432 if (dlen && dlen == rc) {
1433 tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
1434 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1435 }
1436 return rc;
1437 }
1438
1439 do {
1440 rc = tipc_wait_for_cond(sock, &timeout,
1441 (!tsk->cong_link_cnt &&
1442 !tsk_conn_cong(tsk) &&
1443 tipc_sk_connected(sk)));
1444 if (unlikely(rc))
1445 break;
1446
1447 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
1448 rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
1449 if (unlikely(rc != send))
1450 break;
1451
1452 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1453 if (unlikely(rc == -ELINKCONG)) {
1454 tsk->cong_link_cnt = 1;
1455 rc = 0;
1456 }
1457 if (likely(!rc)) {
1458 tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
1459 sent += send;
1460 }
1461 } while (sent < dlen && !rc);
1462
1463 return sent ? sent : rc;
1464 }
1465
1466 /**
1467 * tipc_send_packet - send a connection-oriented message
1468 * @sock: socket structure
1469 * @m: message to send
1470 * @dsz: length of data to be transmitted
1471 *
1472 * Used for SOCK_SEQPACKET messages.
1473 *
1474 * Returns the number of bytes sent on success, or errno otherwise
1475 */
tipc_send_packet(struct socket * sock,struct msghdr * m,size_t dsz)1476 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1477 {
1478 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1479 return -EMSGSIZE;
1480
1481 return tipc_sendstream(sock, m, dsz);
1482 }
1483
1484 /* tipc_sk_finish_conn - complete the setup of a connection
1485 */
tipc_sk_finish_conn(struct tipc_sock * tsk,u32 peer_port,u32 peer_node)1486 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1487 u32 peer_node)
1488 {
1489 struct sock *sk = &tsk->sk;
1490 struct net *net = sock_net(sk);
1491 struct tipc_msg *msg = &tsk->phdr;
1492
1493 msg_set_destnode(msg, peer_node);
1494 msg_set_destport(msg, peer_port);
1495 msg_set_type(msg, TIPC_CONN_MSG);
1496 msg_set_lookup_scope(msg, 0);
1497 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1498
1499 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
1500 tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1501 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1502 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1503 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1504 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1505 return;
1506
1507 /* Fall back to message based flow control */
1508 tsk->rcv_win = FLOWCTL_MSG_WIN;
1509 tsk->snd_win = FLOWCTL_MSG_WIN;
1510 }
1511
1512 /**
1513 * tipc_sk_set_orig_addr - capture sender's address for received message
1514 * @m: descriptor for message info
1515 * @hdr: received message header
1516 *
1517 * Note: Address is not captured if not requested by receiver.
1518 */
tipc_sk_set_orig_addr(struct msghdr * m,struct sk_buff * skb)1519 static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1520 {
1521 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
1522 struct tipc_msg *hdr = buf_msg(skb);
1523
1524 if (!srcaddr)
1525 return;
1526
1527 srcaddr->sock.family = AF_TIPC;
1528 srcaddr->sock.addrtype = TIPC_ADDR_ID;
1529 srcaddr->sock.scope = 0;
1530 srcaddr->sock.addr.id.ref = msg_origport(hdr);
1531 srcaddr->sock.addr.id.node = msg_orignode(hdr);
1532 srcaddr->sock.addr.name.domain = 0;
1533 m->msg_namelen = sizeof(struct sockaddr_tipc);
1534
1535 if (!msg_in_group(hdr))
1536 return;
1537
1538 /* Group message users may also want to know sending member's id */
1539 srcaddr->member.family = AF_TIPC;
1540 srcaddr->member.addrtype = TIPC_ADDR_NAME;
1541 srcaddr->member.scope = 0;
1542 srcaddr->member.addr.name.name.type = msg_nametype(hdr);
1543 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
1544 srcaddr->member.addr.name.domain = 0;
1545 m->msg_namelen = sizeof(*srcaddr);
1546 }
1547
1548 /**
1549 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1550 * @m: descriptor for message info
1551 * @msg: received message header
1552 * @tsk: TIPC port associated with message
1553 *
1554 * Note: Ancillary data is not captured if not requested by receiver.
1555 *
1556 * Returns 0 if successful, otherwise errno
1557 */
tipc_sk_anc_data_recv(struct msghdr * m,struct tipc_msg * msg,struct tipc_sock * tsk)1558 static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1559 struct tipc_sock *tsk)
1560 {
1561 u32 anc_data[3];
1562 u32 err;
1563 u32 dest_type;
1564 int has_name;
1565 int res;
1566
1567 if (likely(m->msg_controllen == 0))
1568 return 0;
1569
1570 /* Optionally capture errored message object(s) */
1571 err = msg ? msg_errcode(msg) : 0;
1572 if (unlikely(err)) {
1573 anc_data[0] = err;
1574 anc_data[1] = msg_data_sz(msg);
1575 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1576 if (res)
1577 return res;
1578 if (anc_data[1]) {
1579 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1580 msg_data(msg));
1581 if (res)
1582 return res;
1583 }
1584 }
1585
1586 /* Optionally capture message destination object */
1587 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1588 switch (dest_type) {
1589 case TIPC_NAMED_MSG:
1590 has_name = 1;
1591 anc_data[0] = msg_nametype(msg);
1592 anc_data[1] = msg_namelower(msg);
1593 anc_data[2] = msg_namelower(msg);
1594 break;
1595 case TIPC_MCAST_MSG:
1596 has_name = 1;
1597 anc_data[0] = msg_nametype(msg);
1598 anc_data[1] = msg_namelower(msg);
1599 anc_data[2] = msg_nameupper(msg);
1600 break;
1601 case TIPC_CONN_MSG:
1602 has_name = (tsk->conn_type != 0);
1603 anc_data[0] = tsk->conn_type;
1604 anc_data[1] = tsk->conn_instance;
1605 anc_data[2] = tsk->conn_instance;
1606 break;
1607 default:
1608 has_name = 0;
1609 }
1610 if (has_name) {
1611 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1612 if (res)
1613 return res;
1614 }
1615
1616 return 0;
1617 }
1618
tipc_sk_send_ack(struct tipc_sock * tsk)1619 static void tipc_sk_send_ack(struct tipc_sock *tsk)
1620 {
1621 struct sock *sk = &tsk->sk;
1622 struct net *net = sock_net(sk);
1623 struct sk_buff *skb = NULL;
1624 struct tipc_msg *msg;
1625 u32 peer_port = tsk_peer_port(tsk);
1626 u32 dnode = tsk_peer_node(tsk);
1627
1628 if (!tipc_sk_connected(sk))
1629 return;
1630 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1631 dnode, tsk_own_node(tsk), peer_port,
1632 tsk->portid, TIPC_OK);
1633 if (!skb)
1634 return;
1635 msg = buf_msg(skb);
1636 msg_set_conn_ack(msg, tsk->rcv_unacked);
1637 tsk->rcv_unacked = 0;
1638
1639 /* Adjust to and advertize the correct window limit */
1640 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1641 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1642 msg_set_adv_win(msg, tsk->rcv_win);
1643 }
1644 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1645 }
1646
tipc_wait_for_rcvmsg(struct socket * sock,long * timeop)1647 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1648 {
1649 struct sock *sk = sock->sk;
1650 DEFINE_WAIT(wait);
1651 long timeo = *timeop;
1652 int err = sock_error(sk);
1653
1654 if (err)
1655 return err;
1656
1657 for (;;) {
1658 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1659 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1660 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1661 err = -ENOTCONN;
1662 break;
1663 }
1664 release_sock(sk);
1665 timeo = schedule_timeout(timeo);
1666 lock_sock(sk);
1667 }
1668 err = 0;
1669 if (!skb_queue_empty(&sk->sk_receive_queue))
1670 break;
1671 err = -EAGAIN;
1672 if (!timeo)
1673 break;
1674 err = sock_intr_errno(timeo);
1675 if (signal_pending(current))
1676 break;
1677
1678 err = sock_error(sk);
1679 if (err)
1680 break;
1681 }
1682 finish_wait(sk_sleep(sk), &wait);
1683 *timeop = timeo;
1684 return err;
1685 }
1686
1687 /**
1688 * tipc_recvmsg - receive packet-oriented message
1689 * @m: descriptor for message info
1690 * @buflen: length of user buffer area
1691 * @flags: receive flags
1692 *
1693 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1694 * If the complete message doesn't fit in user area, truncate it.
1695 *
1696 * Returns size of returned message data, errno otherwise
1697 */
tipc_recvmsg(struct socket * sock,struct msghdr * m,size_t buflen,int flags)1698 static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1699 size_t buflen, int flags)
1700 {
1701 struct sock *sk = sock->sk;
1702 bool connected = !tipc_sk_type_connectionless(sk);
1703 struct tipc_sock *tsk = tipc_sk(sk);
1704 int rc, err, hlen, dlen, copy;
1705 struct sk_buff_head xmitq;
1706 struct tipc_msg *hdr;
1707 struct sk_buff *skb;
1708 bool grp_evt;
1709 long timeout;
1710
1711 /* Catch invalid receive requests */
1712 if (unlikely(!buflen))
1713 return -EINVAL;
1714
1715 lock_sock(sk);
1716 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
1717 rc = -ENOTCONN;
1718 goto exit;
1719 }
1720 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1721
1722 /* Step rcv queue to first msg with data or error; wait if necessary */
1723 do {
1724 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1725 if (unlikely(rc))
1726 goto exit;
1727 skb = skb_peek(&sk->sk_receive_queue);
1728 hdr = buf_msg(skb);
1729 dlen = msg_data_sz(hdr);
1730 hlen = msg_hdr_sz(hdr);
1731 err = msg_errcode(hdr);
1732 grp_evt = msg_is_grp_evt(hdr);
1733 if (likely(dlen || err))
1734 break;
1735 tsk_advance_rx_queue(sk);
1736 } while (1);
1737
1738 /* Collect msg meta data, including error code and rejected data */
1739 tipc_sk_set_orig_addr(m, skb);
1740 rc = tipc_sk_anc_data_recv(m, hdr, tsk);
1741 if (unlikely(rc))
1742 goto exit;
1743
1744 /* Capture data if non-error msg, otherwise just set return value */
1745 if (likely(!err)) {
1746 copy = min_t(int, dlen, buflen);
1747 if (unlikely(copy != dlen))
1748 m->msg_flags |= MSG_TRUNC;
1749 rc = skb_copy_datagram_msg(skb, hlen, m, copy);
1750 } else {
1751 copy = 0;
1752 rc = 0;
1753 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control)
1754 rc = -ECONNRESET;
1755 }
1756 if (unlikely(rc))
1757 goto exit;
1758
1759 /* Mark message as group event if applicable */
1760 if (unlikely(grp_evt)) {
1761 if (msg_grp_evt(hdr) == TIPC_WITHDRAWN)
1762 m->msg_flags |= MSG_EOR;
1763 m->msg_flags |= MSG_OOB;
1764 copy = 0;
1765 }
1766
1767 /* Caption of data or error code/rejected data was successful */
1768 if (unlikely(flags & MSG_PEEK))
1769 goto exit;
1770
1771 /* Send group flow control advertisement when applicable */
1772 if (tsk->group && msg_in_group(hdr) && !grp_evt) {
1773 skb_queue_head_init(&xmitq);
1774 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
1775 msg_orignode(hdr), msg_origport(hdr),
1776 &xmitq);
1777 tipc_node_distr_xmit(sock_net(sk), &xmitq);
1778 }
1779
1780 tsk_advance_rx_queue(sk);
1781
1782 if (likely(!connected))
1783 goto exit;
1784
1785 /* Send connection flow control advertisement when applicable */
1786 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1787 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
1788 tipc_sk_send_ack(tsk);
1789 exit:
1790 release_sock(sk);
1791 return rc ? rc : copy;
1792 }
1793
1794 /**
1795 * tipc_recvstream - receive stream-oriented data
1796 * @m: descriptor for message info
1797 * @buflen: total size of user buffer area
1798 * @flags: receive flags
1799 *
1800 * Used for SOCK_STREAM messages only. If not enough data is available
1801 * will optionally wait for more; never truncates data.
1802 *
1803 * Returns size of returned message data, errno otherwise
1804 */
tipc_recvstream(struct socket * sock,struct msghdr * m,size_t buflen,int flags)1805 static int tipc_recvstream(struct socket *sock, struct msghdr *m,
1806 size_t buflen, int flags)
1807 {
1808 struct sock *sk = sock->sk;
1809 struct tipc_sock *tsk = tipc_sk(sk);
1810 struct sk_buff *skb;
1811 struct tipc_msg *hdr;
1812 struct tipc_skb_cb *skb_cb;
1813 bool peek = flags & MSG_PEEK;
1814 int offset, required, copy, copied = 0;
1815 int hlen, dlen, err, rc;
1816 long timeout;
1817
1818 /* Catch invalid receive attempts */
1819 if (unlikely(!buflen))
1820 return -EINVAL;
1821
1822 lock_sock(sk);
1823
1824 if (unlikely(sk->sk_state == TIPC_OPEN)) {
1825 rc = -ENOTCONN;
1826 goto exit;
1827 }
1828 required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
1829 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1830
1831 do {
1832 /* Look at first msg in receive queue; wait if necessary */
1833 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1834 if (unlikely(rc))
1835 break;
1836 skb = skb_peek(&sk->sk_receive_queue);
1837 skb_cb = TIPC_SKB_CB(skb);
1838 hdr = buf_msg(skb);
1839 dlen = msg_data_sz(hdr);
1840 hlen = msg_hdr_sz(hdr);
1841 err = msg_errcode(hdr);
1842
1843 /* Discard any empty non-errored (SYN-) message */
1844 if (unlikely(!dlen && !err)) {
1845 tsk_advance_rx_queue(sk);
1846 continue;
1847 }
1848
1849 /* Collect msg meta data, incl. error code and rejected data */
1850 if (!copied) {
1851 tipc_sk_set_orig_addr(m, skb);
1852 rc = tipc_sk_anc_data_recv(m, hdr, tsk);
1853 if (rc)
1854 break;
1855 }
1856
1857 /* Copy data if msg ok, otherwise return error/partial data */
1858 if (likely(!err)) {
1859 offset = skb_cb->bytes_read;
1860 copy = min_t(int, dlen - offset, buflen - copied);
1861 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
1862 if (unlikely(rc))
1863 break;
1864 copied += copy;
1865 offset += copy;
1866 if (unlikely(offset < dlen)) {
1867 if (!peek)
1868 skb_cb->bytes_read = offset;
1869 break;
1870 }
1871 } else {
1872 rc = 0;
1873 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
1874 rc = -ECONNRESET;
1875 if (copied || rc)
1876 break;
1877 }
1878
1879 if (unlikely(peek))
1880 break;
1881
1882 tsk_advance_rx_queue(sk);
1883
1884 /* Send connection flow control advertisement when applicable */
1885 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1886 if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE))
1887 tipc_sk_send_ack(tsk);
1888
1889 /* Exit if all requested data or FIN/error received */
1890 if (copied == buflen || err)
1891 break;
1892
1893 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
1894 exit:
1895 release_sock(sk);
1896 return copied ? copied : rc;
1897 }
1898
1899 /**
1900 * tipc_write_space - wake up thread if port congestion is released
1901 * @sk: socket
1902 */
tipc_write_space(struct sock * sk)1903 static void tipc_write_space(struct sock *sk)
1904 {
1905 struct socket_wq *wq;
1906
1907 rcu_read_lock();
1908 wq = rcu_dereference(sk->sk_wq);
1909 if (skwq_has_sleeper(wq))
1910 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
1911 EPOLLWRNORM | EPOLLWRBAND);
1912 rcu_read_unlock();
1913 }
1914
1915 /**
1916 * tipc_data_ready - wake up threads to indicate messages have been received
1917 * @sk: socket
1918 * @len: the length of messages
1919 */
tipc_data_ready(struct sock * sk)1920 static void tipc_data_ready(struct sock *sk)
1921 {
1922 struct socket_wq *wq;
1923
1924 rcu_read_lock();
1925 wq = rcu_dereference(sk->sk_wq);
1926 if (skwq_has_sleeper(wq))
1927 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
1928 EPOLLRDNORM | EPOLLRDBAND);
1929 rcu_read_unlock();
1930 }
1931
tipc_sock_destruct(struct sock * sk)1932 static void tipc_sock_destruct(struct sock *sk)
1933 {
1934 __skb_queue_purge(&sk->sk_receive_queue);
1935 }
1936
tipc_sk_proto_rcv(struct sock * sk,struct sk_buff_head * inputq,struct sk_buff_head * xmitq)1937 static void tipc_sk_proto_rcv(struct sock *sk,
1938 struct sk_buff_head *inputq,
1939 struct sk_buff_head *xmitq)
1940 {
1941 struct sk_buff *skb = __skb_dequeue(inputq);
1942 struct tipc_sock *tsk = tipc_sk(sk);
1943 struct tipc_msg *hdr = buf_msg(skb);
1944 struct tipc_group *grp = tsk->group;
1945 bool wakeup = false;
1946
1947 switch (msg_user(hdr)) {
1948 case CONN_MANAGER:
1949 tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
1950 return;
1951 case SOCK_WAKEUP:
1952 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
1953 tsk->cong_link_cnt--;
1954 wakeup = true;
1955 break;
1956 case GROUP_PROTOCOL:
1957 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
1958 break;
1959 case TOP_SRV:
1960 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
1961 hdr, inputq, xmitq);
1962 break;
1963 default:
1964 break;
1965 }
1966
1967 if (wakeup)
1968 sk->sk_write_space(sk);
1969
1970 kfree_skb(skb);
1971 }
1972
1973 /**
1974 * tipc_filter_connect - Handle incoming message for a connection-based socket
1975 * @tsk: TIPC socket
1976 * @skb: pointer to message buffer. Set to NULL if buffer is consumed
1977 *
1978 * Returns true if everything ok, false otherwise
1979 */
tipc_sk_filter_connect(struct tipc_sock * tsk,struct sk_buff * skb)1980 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
1981 {
1982 struct sock *sk = &tsk->sk;
1983 struct net *net = sock_net(sk);
1984 struct tipc_msg *hdr = buf_msg(skb);
1985 u32 pport = msg_origport(hdr);
1986 u32 pnode = msg_orignode(hdr);
1987
1988 if (unlikely(msg_mcast(hdr)))
1989 return false;
1990
1991 switch (sk->sk_state) {
1992 case TIPC_CONNECTING:
1993 /* Accept only ACK or NACK message */
1994 if (unlikely(!msg_connected(hdr))) {
1995 if (pport != tsk_peer_port(tsk) ||
1996 pnode != tsk_peer_node(tsk))
1997 return false;
1998
1999 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2000 sk->sk_err = ECONNREFUSED;
2001 sk->sk_state_change(sk);
2002 return true;
2003 }
2004
2005 if (unlikely(msg_errcode(hdr))) {
2006 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2007 sk->sk_err = ECONNREFUSED;
2008 sk->sk_state_change(sk);
2009 return true;
2010 }
2011
2012 if (unlikely(!msg_isdata(hdr))) {
2013 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2014 sk->sk_err = EINVAL;
2015 sk->sk_state_change(sk);
2016 return true;
2017 }
2018
2019 tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
2020 msg_set_importance(&tsk->phdr, msg_importance(hdr));
2021
2022 /* If 'ACK+' message, add to socket receive queue */
2023 if (msg_data_sz(hdr))
2024 return true;
2025
2026 /* If empty 'ACK-' message, wake up sleeping connect() */
2027 sk->sk_data_ready(sk);
2028
2029 /* 'ACK-' message is neither accepted nor rejected: */
2030 msg_set_dest_droppable(hdr, 1);
2031 return false;
2032
2033 case TIPC_OPEN:
2034 case TIPC_DISCONNECTING:
2035 break;
2036 case TIPC_LISTEN:
2037 /* Accept only SYN message */
2038 if (!msg_connected(hdr) && !(msg_errcode(hdr)))
2039 return true;
2040 break;
2041 case TIPC_ESTABLISHED:
2042 /* Accept only connection-based messages sent by peer */
2043 if (unlikely(!tsk_peer_msg(tsk, hdr)))
2044 return false;
2045
2046 if (unlikely(msg_errcode(hdr))) {
2047 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2048 /* Let timer expire on it's own */
2049 tipc_node_remove_conn(net, tsk_peer_node(tsk),
2050 tsk->portid);
2051 sk->sk_state_change(sk);
2052 }
2053 return true;
2054 default:
2055 pr_err("Unknown sk_state %u\n", sk->sk_state);
2056 }
2057
2058 return false;
2059 }
2060
2061 /**
2062 * rcvbuf_limit - get proper overload limit of socket receive queue
2063 * @sk: socket
2064 * @skb: message
2065 *
2066 * For connection oriented messages, irrespective of importance,
2067 * default queue limit is 2 MB.
2068 *
2069 * For connectionless messages, queue limits are based on message
2070 * importance as follows:
2071 *
2072 * TIPC_LOW_IMPORTANCE (2 MB)
2073 * TIPC_MEDIUM_IMPORTANCE (4 MB)
2074 * TIPC_HIGH_IMPORTANCE (8 MB)
2075 * TIPC_CRITICAL_IMPORTANCE (16 MB)
2076 *
2077 * Returns overload limit according to corresponding message importance
2078 */
rcvbuf_limit(struct sock * sk,struct sk_buff * skb)2079 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
2080 {
2081 struct tipc_sock *tsk = tipc_sk(sk);
2082 struct tipc_msg *hdr = buf_msg(skb);
2083
2084 if (unlikely(msg_in_group(hdr)))
2085 return sk->sk_rcvbuf;
2086
2087 if (unlikely(!msg_connected(hdr)))
2088 return sk->sk_rcvbuf << msg_importance(hdr);
2089
2090 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
2091 return sk->sk_rcvbuf;
2092
2093 return FLOWCTL_MSG_LIM;
2094 }
2095
2096 /**
2097 * tipc_sk_filter_rcv - validate incoming message
2098 * @sk: socket
2099 * @skb: pointer to message.
2100 *
2101 * Enqueues message on receive queue if acceptable; optionally handles
2102 * disconnect indication for a connected socket.
2103 *
2104 * Called with socket lock already taken
2105 *
2106 */
tipc_sk_filter_rcv(struct sock * sk,struct sk_buff * skb,struct sk_buff_head * xmitq)2107 static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
2108 struct sk_buff_head *xmitq)
2109 {
2110 bool sk_conn = !tipc_sk_type_connectionless(sk);
2111 struct tipc_sock *tsk = tipc_sk(sk);
2112 struct tipc_group *grp = tsk->group;
2113 struct tipc_msg *hdr = buf_msg(skb);
2114 struct net *net = sock_net(sk);
2115 struct sk_buff_head inputq;
2116 int limit, err = TIPC_OK;
2117
2118 TIPC_SKB_CB(skb)->bytes_read = 0;
2119 __skb_queue_head_init(&inputq);
2120 __skb_queue_tail(&inputq, skb);
2121
2122 if (unlikely(!msg_isdata(hdr)))
2123 tipc_sk_proto_rcv(sk, &inputq, xmitq);
2124
2125 if (unlikely(grp))
2126 tipc_group_filter_msg(grp, &inputq, xmitq);
2127
2128 /* Validate and add to receive buffer if there is space */
2129 while ((skb = __skb_dequeue(&inputq))) {
2130 hdr = buf_msg(skb);
2131 limit = rcvbuf_limit(sk, skb);
2132 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) ||
2133 (!sk_conn && msg_connected(hdr)) ||
2134 (!grp && msg_in_group(hdr)))
2135 err = TIPC_ERR_NO_PORT;
2136 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
2137 atomic_inc(&sk->sk_drops);
2138 err = TIPC_ERR_OVERLOAD;
2139 }
2140
2141 if (unlikely(err)) {
2142 tipc_skb_reject(net, err, skb, xmitq);
2143 err = TIPC_OK;
2144 continue;
2145 }
2146 __skb_queue_tail(&sk->sk_receive_queue, skb);
2147 skb_set_owner_r(skb, sk);
2148 sk->sk_data_ready(sk);
2149 }
2150 }
2151
2152 /**
2153 * tipc_sk_backlog_rcv - handle incoming message from backlog queue
2154 * @sk: socket
2155 * @skb: message
2156 *
2157 * Caller must hold socket lock
2158 */
tipc_sk_backlog_rcv(struct sock * sk,struct sk_buff * skb)2159 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
2160 {
2161 unsigned int before = sk_rmem_alloc_get(sk);
2162 struct sk_buff_head xmitq;
2163 unsigned int added;
2164
2165 __skb_queue_head_init(&xmitq);
2166
2167 tipc_sk_filter_rcv(sk, skb, &xmitq);
2168 added = sk_rmem_alloc_get(sk) - before;
2169 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
2170
2171 /* Send pending response/rejected messages, if any */
2172 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2173 return 0;
2174 }
2175
2176 /**
2177 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
2178 * inputq and try adding them to socket or backlog queue
2179 * @inputq: list of incoming buffers with potentially different destinations
2180 * @sk: socket where the buffers should be enqueued
2181 * @dport: port number for the socket
2182 *
2183 * Caller must hold socket lock
2184 */
tipc_sk_enqueue(struct sk_buff_head * inputq,struct sock * sk,u32 dport,struct sk_buff_head * xmitq)2185 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
2186 u32 dport, struct sk_buff_head *xmitq)
2187 {
2188 unsigned long time_limit = jiffies + 2;
2189 struct sk_buff *skb;
2190 unsigned int lim;
2191 atomic_t *dcnt;
2192 u32 onode;
2193
2194 while (skb_queue_len(inputq)) {
2195 if (unlikely(time_after_eq(jiffies, time_limit)))
2196 return;
2197
2198 skb = tipc_skb_dequeue(inputq, dport);
2199 if (unlikely(!skb))
2200 return;
2201
2202 /* Add message directly to receive queue if possible */
2203 if (!sock_owned_by_user(sk)) {
2204 tipc_sk_filter_rcv(sk, skb, xmitq);
2205 continue;
2206 }
2207
2208 /* Try backlog, compensating for double-counted bytes */
2209 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
2210 if (!sk->sk_backlog.len)
2211 atomic_set(dcnt, 0);
2212 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
2213 if (likely(!sk_add_backlog(sk, skb, lim)))
2214 continue;
2215
2216 /* Overload => reject message back to sender */
2217 onode = tipc_own_addr(sock_net(sk));
2218 atomic_inc(&sk->sk_drops);
2219 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
2220 __skb_queue_tail(xmitq, skb);
2221 break;
2222 }
2223 }
2224
2225 /**
2226 * tipc_sk_rcv - handle a chain of incoming buffers
2227 * @inputq: buffer list containing the buffers
2228 * Consumes all buffers in list until inputq is empty
2229 * Note: may be called in multiple threads referring to the same queue
2230 */
tipc_sk_rcv(struct net * net,struct sk_buff_head * inputq)2231 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
2232 {
2233 struct sk_buff_head xmitq;
2234 u32 dnode, dport = 0;
2235 int err;
2236 struct tipc_sock *tsk;
2237 struct sock *sk;
2238 struct sk_buff *skb;
2239
2240 __skb_queue_head_init(&xmitq);
2241 while (skb_queue_len(inputq)) {
2242 dport = tipc_skb_peek_port(inputq, dport);
2243 tsk = tipc_sk_lookup(net, dport);
2244
2245 if (likely(tsk)) {
2246 sk = &tsk->sk;
2247 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
2248 tipc_sk_enqueue(inputq, sk, dport, &xmitq);
2249 spin_unlock_bh(&sk->sk_lock.slock);
2250 }
2251 /* Send pending response/rejected messages, if any */
2252 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2253 sock_put(sk);
2254 continue;
2255 }
2256 /* No destination socket => dequeue skb if still there */
2257 skb = tipc_skb_dequeue(inputq, dport);
2258 if (!skb)
2259 return;
2260
2261 /* Try secondary lookup if unresolved named message */
2262 err = TIPC_ERR_NO_PORT;
2263 if (tipc_msg_lookup_dest(net, skb, &err))
2264 goto xmit;
2265
2266 /* Prepare for message rejection */
2267 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
2268 continue;
2269 xmit:
2270 dnode = msg_destnode(buf_msg(skb));
2271 tipc_node_xmit_skb(net, skb, dnode, dport);
2272 }
2273 }
2274
tipc_wait_for_connect(struct socket * sock,long * timeo_p)2275 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
2276 {
2277 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2278 struct sock *sk = sock->sk;
2279 int done;
2280
2281 do {
2282 int err = sock_error(sk);
2283 if (err)
2284 return err;
2285 if (!*timeo_p)
2286 return -ETIMEDOUT;
2287 if (signal_pending(current))
2288 return sock_intr_errno(*timeo_p);
2289
2290 add_wait_queue(sk_sleep(sk), &wait);
2291 done = sk_wait_event(sk, timeo_p,
2292 sk->sk_state != TIPC_CONNECTING, &wait);
2293 remove_wait_queue(sk_sleep(sk), &wait);
2294 } while (!done);
2295 return 0;
2296 }
2297
2298 /**
2299 * tipc_connect - establish a connection to another TIPC port
2300 * @sock: socket structure
2301 * @dest: socket address for destination port
2302 * @destlen: size of socket address data structure
2303 * @flags: file-related flags associated with socket
2304 *
2305 * Returns 0 on success, errno otherwise
2306 */
tipc_connect(struct socket * sock,struct sockaddr * dest,int destlen,int flags)2307 static int tipc_connect(struct socket *sock, struct sockaddr *dest,
2308 int destlen, int flags)
2309 {
2310 struct sock *sk = sock->sk;
2311 struct tipc_sock *tsk = tipc_sk(sk);
2312 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
2313 struct msghdr m = {NULL,};
2314 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
2315 int previous;
2316 int res = 0;
2317
2318 if (destlen != sizeof(struct sockaddr_tipc))
2319 return -EINVAL;
2320
2321 lock_sock(sk);
2322
2323 if (tsk->group) {
2324 res = -EINVAL;
2325 goto exit;
2326 }
2327
2328 if (dst->family == AF_UNSPEC) {
2329 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
2330 if (!tipc_sk_type_connectionless(sk))
2331 res = -EINVAL;
2332 goto exit;
2333 } else if (dst->family != AF_TIPC) {
2334 res = -EINVAL;
2335 }
2336 if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME)
2337 res = -EINVAL;
2338 if (res)
2339 goto exit;
2340
2341 /* DGRAM/RDM connect(), just save the destaddr */
2342 if (tipc_sk_type_connectionless(sk)) {
2343 memcpy(&tsk->peer, dest, destlen);
2344 goto exit;
2345 }
2346
2347 previous = sk->sk_state;
2348
2349 switch (sk->sk_state) {
2350 case TIPC_OPEN:
2351 /* Send a 'SYN-' to destination */
2352 m.msg_name = dest;
2353 m.msg_namelen = destlen;
2354
2355 /* If connect is in non-blocking case, set MSG_DONTWAIT to
2356 * indicate send_msg() is never blocked.
2357 */
2358 if (!timeout)
2359 m.msg_flags = MSG_DONTWAIT;
2360
2361 res = __tipc_sendmsg(sock, &m, 0);
2362 if ((res < 0) && (res != -EWOULDBLOCK))
2363 goto exit;
2364
2365 /* Just entered TIPC_CONNECTING state; the only
2366 * difference is that return value in non-blocking
2367 * case is EINPROGRESS, rather than EALREADY.
2368 */
2369 res = -EINPROGRESS;
2370 /* fall thru' */
2371 case TIPC_CONNECTING:
2372 if (!timeout) {
2373 if (previous == TIPC_CONNECTING)
2374 res = -EALREADY;
2375 goto exit;
2376 }
2377 timeout = msecs_to_jiffies(timeout);
2378 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
2379 res = tipc_wait_for_connect(sock, &timeout);
2380 break;
2381 case TIPC_ESTABLISHED:
2382 res = -EISCONN;
2383 break;
2384 default:
2385 res = -EINVAL;
2386 }
2387
2388 exit:
2389 release_sock(sk);
2390 return res;
2391 }
2392
2393 /**
2394 * tipc_listen - allow socket to listen for incoming connections
2395 * @sock: socket structure
2396 * @len: (unused)
2397 *
2398 * Returns 0 on success, errno otherwise
2399 */
tipc_listen(struct socket * sock,int len)2400 static int tipc_listen(struct socket *sock, int len)
2401 {
2402 struct sock *sk = sock->sk;
2403 int res;
2404
2405 lock_sock(sk);
2406 res = tipc_set_sk_state(sk, TIPC_LISTEN);
2407 release_sock(sk);
2408
2409 return res;
2410 }
2411
tipc_wait_for_accept(struct socket * sock,long timeo)2412 static int tipc_wait_for_accept(struct socket *sock, long timeo)
2413 {
2414 struct sock *sk = sock->sk;
2415 DEFINE_WAIT(wait);
2416 int err;
2417
2418 /* True wake-one mechanism for incoming connections: only
2419 * one process gets woken up, not the 'whole herd'.
2420 * Since we do not 'race & poll' for established sockets
2421 * anymore, the common case will execute the loop only once.
2422 */
2423 for (;;) {
2424 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
2425 TASK_INTERRUPTIBLE);
2426 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
2427 release_sock(sk);
2428 timeo = schedule_timeout(timeo);
2429 lock_sock(sk);
2430 }
2431 err = 0;
2432 if (!skb_queue_empty(&sk->sk_receive_queue))
2433 break;
2434 err = -EAGAIN;
2435 if (!timeo)
2436 break;
2437 err = sock_intr_errno(timeo);
2438 if (signal_pending(current))
2439 break;
2440 }
2441 finish_wait(sk_sleep(sk), &wait);
2442 return err;
2443 }
2444
2445 /**
2446 * tipc_accept - wait for connection request
2447 * @sock: listening socket
2448 * @newsock: new socket that is to be connected
2449 * @flags: file-related flags associated with socket
2450 *
2451 * Returns 0 on success, errno otherwise
2452 */
tipc_accept(struct socket * sock,struct socket * new_sock,int flags,bool kern)2453 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
2454 bool kern)
2455 {
2456 struct sock *new_sk, *sk = sock->sk;
2457 struct sk_buff *buf;
2458 struct tipc_sock *new_tsock;
2459 struct tipc_msg *msg;
2460 long timeo;
2461 int res;
2462
2463 lock_sock(sk);
2464
2465 if (sk->sk_state != TIPC_LISTEN) {
2466 res = -EINVAL;
2467 goto exit;
2468 }
2469 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2470 res = tipc_wait_for_accept(sock, timeo);
2471 if (res)
2472 goto exit;
2473
2474 buf = skb_peek(&sk->sk_receive_queue);
2475
2476 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2477 if (res)
2478 goto exit;
2479 security_sk_clone(sock->sk, new_sock->sk);
2480
2481 new_sk = new_sock->sk;
2482 new_tsock = tipc_sk(new_sk);
2483 msg = buf_msg(buf);
2484
2485 /* we lock on new_sk; but lockdep sees the lock on sk */
2486 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2487
2488 /*
2489 * Reject any stray messages received by new socket
2490 * before the socket lock was taken (very, very unlikely)
2491 */
2492 tsk_rej_rx_queue(new_sk);
2493
2494 /* Connect new socket to it's peer */
2495 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2496
2497 tsk_set_importance(new_tsock, msg_importance(msg));
2498 if (msg_named(msg)) {
2499 new_tsock->conn_type = msg_nametype(msg);
2500 new_tsock->conn_instance = msg_nameinst(msg);
2501 }
2502
2503 /*
2504 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2505 * Respond to 'SYN+' by queuing it on new socket.
2506 */
2507 if (!msg_data_sz(msg)) {
2508 struct msghdr m = {NULL,};
2509
2510 tsk_advance_rx_queue(sk);
2511 __tipc_sendstream(new_sock, &m, 0);
2512 } else {
2513 __skb_dequeue(&sk->sk_receive_queue);
2514 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2515 skb_set_owner_r(buf, new_sk);
2516 }
2517 release_sock(new_sk);
2518 exit:
2519 release_sock(sk);
2520 return res;
2521 }
2522
2523 /**
2524 * tipc_shutdown - shutdown socket connection
2525 * @sock: socket structure
2526 * @how: direction to close (must be SHUT_RDWR)
2527 *
2528 * Terminates connection (if necessary), then purges socket's receive queue.
2529 *
2530 * Returns 0 on success, errno otherwise
2531 */
tipc_shutdown(struct socket * sock,int how)2532 static int tipc_shutdown(struct socket *sock, int how)
2533 {
2534 struct sock *sk = sock->sk;
2535 int res;
2536
2537 if (how != SHUT_RDWR)
2538 return -EINVAL;
2539
2540 lock_sock(sk);
2541
2542 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2543 sk->sk_shutdown = SEND_SHUTDOWN;
2544
2545 if (sk->sk_state == TIPC_DISCONNECTING) {
2546 /* Discard any unreceived messages */
2547 __skb_queue_purge(&sk->sk_receive_queue);
2548
2549 /* Wake up anyone sleeping in poll */
2550 sk->sk_state_change(sk);
2551 res = 0;
2552 } else {
2553 res = -ENOTCONN;
2554 }
2555
2556 release_sock(sk);
2557 return res;
2558 }
2559
tipc_sk_timeout(struct timer_list * t)2560 static void tipc_sk_timeout(struct timer_list *t)
2561 {
2562 struct sock *sk = from_timer(sk, t, sk_timer);
2563 struct tipc_sock *tsk = tipc_sk(sk);
2564 u32 peer_port = tsk_peer_port(tsk);
2565 u32 peer_node = tsk_peer_node(tsk);
2566 u32 own_node = tsk_own_node(tsk);
2567 u32 own_port = tsk->portid;
2568 struct net *net = sock_net(sk);
2569 struct sk_buff *skb = NULL;
2570
2571 bh_lock_sock(sk);
2572 if (!tipc_sk_connected(sk))
2573 goto exit;
2574
2575 /* Try again later if socket is busy */
2576 if (sock_owned_by_user(sk)) {
2577 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
2578 goto exit;
2579 }
2580
2581 if (tsk->probe_unacked) {
2582 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2583 tipc_node_remove_conn(net, peer_node, peer_port);
2584 sk->sk_state_change(sk);
2585 goto exit;
2586 }
2587 /* Send new probe */
2588 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
2589 peer_node, own_node, peer_port, own_port,
2590 TIPC_OK);
2591 tsk->probe_unacked = true;
2592 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
2593 exit:
2594 bh_unlock_sock(sk);
2595 if (skb)
2596 tipc_node_xmit_skb(net, skb, peer_node, own_port);
2597 sock_put(sk);
2598 }
2599
tipc_sk_publish(struct tipc_sock * tsk,uint scope,struct tipc_name_seq const * seq)2600 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2601 struct tipc_name_seq const *seq)
2602 {
2603 struct sock *sk = &tsk->sk;
2604 struct net *net = sock_net(sk);
2605 struct publication *publ;
2606 u32 key;
2607
2608 if (scope != TIPC_NODE_SCOPE)
2609 scope = TIPC_CLUSTER_SCOPE;
2610
2611 if (tipc_sk_connected(sk))
2612 return -EINVAL;
2613 key = tsk->portid + tsk->pub_count + 1;
2614 if (key == tsk->portid)
2615 return -EADDRINUSE;
2616
2617 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2618 scope, tsk->portid, key);
2619 if (unlikely(!publ))
2620 return -EINVAL;
2621
2622 list_add(&publ->binding_sock, &tsk->publications);
2623 tsk->pub_count++;
2624 tsk->published = 1;
2625 return 0;
2626 }
2627
tipc_sk_withdraw(struct tipc_sock * tsk,uint scope,struct tipc_name_seq const * seq)2628 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2629 struct tipc_name_seq const *seq)
2630 {
2631 struct net *net = sock_net(&tsk->sk);
2632 struct publication *publ;
2633 struct publication *safe;
2634 int rc = -EINVAL;
2635
2636 if (scope != TIPC_NODE_SCOPE)
2637 scope = TIPC_CLUSTER_SCOPE;
2638
2639 list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) {
2640 if (seq) {
2641 if (publ->scope != scope)
2642 continue;
2643 if (publ->type != seq->type)
2644 continue;
2645 if (publ->lower != seq->lower)
2646 continue;
2647 if (publ->upper != seq->upper)
2648 break;
2649 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2650 publ->upper, publ->key);
2651 rc = 0;
2652 break;
2653 }
2654 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2655 publ->upper, publ->key);
2656 rc = 0;
2657 }
2658 if (list_empty(&tsk->publications))
2659 tsk->published = 0;
2660 return rc;
2661 }
2662
2663 /* tipc_sk_reinit: set non-zero address in all existing sockets
2664 * when we go from standalone to network mode.
2665 */
tipc_sk_reinit(struct net * net)2666 void tipc_sk_reinit(struct net *net)
2667 {
2668 struct tipc_net *tn = net_generic(net, tipc_net_id);
2669 struct rhashtable_iter iter;
2670 struct tipc_sock *tsk;
2671 struct tipc_msg *msg;
2672
2673 rhashtable_walk_enter(&tn->sk_rht, &iter);
2674
2675 do {
2676 rhashtable_walk_start(&iter);
2677
2678 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2679 spin_lock_bh(&tsk->sk.sk_lock.slock);
2680 msg = &tsk->phdr;
2681 msg_set_prevnode(msg, tipc_own_addr(net));
2682 msg_set_orignode(msg, tipc_own_addr(net));
2683 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2684 }
2685
2686 rhashtable_walk_stop(&iter);
2687 } while (tsk == ERR_PTR(-EAGAIN));
2688
2689 rhashtable_walk_exit(&iter);
2690 }
2691
tipc_sk_lookup(struct net * net,u32 portid)2692 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2693 {
2694 struct tipc_net *tn = net_generic(net, tipc_net_id);
2695 struct tipc_sock *tsk;
2696
2697 rcu_read_lock();
2698 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2699 if (tsk)
2700 sock_hold(&tsk->sk);
2701 rcu_read_unlock();
2702
2703 return tsk;
2704 }
2705
tipc_sk_insert(struct tipc_sock * tsk)2706 static int tipc_sk_insert(struct tipc_sock *tsk)
2707 {
2708 struct sock *sk = &tsk->sk;
2709 struct net *net = sock_net(sk);
2710 struct tipc_net *tn = net_generic(net, tipc_net_id);
2711 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2712 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2713
2714 while (remaining--) {
2715 portid++;
2716 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2717 portid = TIPC_MIN_PORT;
2718 tsk->portid = portid;
2719 sock_hold(&tsk->sk);
2720 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
2721 tsk_rht_params))
2722 return 0;
2723 sock_put(&tsk->sk);
2724 }
2725
2726 return -1;
2727 }
2728
tipc_sk_remove(struct tipc_sock * tsk)2729 static void tipc_sk_remove(struct tipc_sock *tsk)
2730 {
2731 struct sock *sk = &tsk->sk;
2732 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2733
2734 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2735 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
2736 __sock_put(sk);
2737 }
2738 }
2739
2740 static const struct rhashtable_params tsk_rht_params = {
2741 .nelem_hint = 192,
2742 .head_offset = offsetof(struct tipc_sock, node),
2743 .key_offset = offsetof(struct tipc_sock, portid),
2744 .key_len = sizeof(u32), /* portid */
2745 .max_size = 1048576,
2746 .min_size = 256,
2747 .automatic_shrinking = true,
2748 };
2749
tipc_sk_rht_init(struct net * net)2750 int tipc_sk_rht_init(struct net *net)
2751 {
2752 struct tipc_net *tn = net_generic(net, tipc_net_id);
2753
2754 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2755 }
2756
tipc_sk_rht_destroy(struct net * net)2757 void tipc_sk_rht_destroy(struct net *net)
2758 {
2759 struct tipc_net *tn = net_generic(net, tipc_net_id);
2760
2761 /* Wait for socket readers to complete */
2762 synchronize_net();
2763
2764 rhashtable_destroy(&tn->sk_rht);
2765 }
2766
tipc_sk_join(struct tipc_sock * tsk,struct tipc_group_req * mreq)2767 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
2768 {
2769 struct net *net = sock_net(&tsk->sk);
2770 struct tipc_group *grp = tsk->group;
2771 struct tipc_msg *hdr = &tsk->phdr;
2772 struct tipc_name_seq seq;
2773 int rc;
2774
2775 if (mreq->type < TIPC_RESERVED_TYPES)
2776 return -EACCES;
2777 if (mreq->scope > TIPC_NODE_SCOPE)
2778 return -EINVAL;
2779 if (grp)
2780 return -EACCES;
2781 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
2782 if (!grp)
2783 return -ENOMEM;
2784 tsk->group = grp;
2785 msg_set_lookup_scope(hdr, mreq->scope);
2786 msg_set_nametype(hdr, mreq->type);
2787 msg_set_dest_droppable(hdr, true);
2788 seq.type = mreq->type;
2789 seq.lower = mreq->instance;
2790 seq.upper = seq.lower;
2791 tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope);
2792 rc = tipc_sk_publish(tsk, mreq->scope, &seq);
2793 if (rc) {
2794 tipc_group_delete(net, grp);
2795 tsk->group = NULL;
2796 return rc;
2797 }
2798 /* Eliminate any risk that a broadcast overtakes sent JOINs */
2799 tsk->mc_method.rcast = true;
2800 tsk->mc_method.mandatory = true;
2801 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
2802 return rc;
2803 }
2804
tipc_sk_leave(struct tipc_sock * tsk)2805 static int tipc_sk_leave(struct tipc_sock *tsk)
2806 {
2807 struct net *net = sock_net(&tsk->sk);
2808 struct tipc_group *grp = tsk->group;
2809 struct tipc_name_seq seq;
2810 int scope;
2811
2812 if (!grp)
2813 return -EINVAL;
2814 tipc_group_self(grp, &seq, &scope);
2815 tipc_group_delete(net, grp);
2816 tsk->group = NULL;
2817 tipc_sk_withdraw(tsk, scope, &seq);
2818 return 0;
2819 }
2820
2821 /**
2822 * tipc_setsockopt - set socket option
2823 * @sock: socket structure
2824 * @lvl: option level
2825 * @opt: option identifier
2826 * @ov: pointer to new option value
2827 * @ol: length of option value
2828 *
2829 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2830 * (to ease compatibility).
2831 *
2832 * Returns 0 on success, errno otherwise
2833 */
tipc_setsockopt(struct socket * sock,int lvl,int opt,char __user * ov,unsigned int ol)2834 static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2835 char __user *ov, unsigned int ol)
2836 {
2837 struct sock *sk = sock->sk;
2838 struct tipc_sock *tsk = tipc_sk(sk);
2839 struct tipc_group_req mreq;
2840 u32 value = 0;
2841 int res = 0;
2842
2843 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2844 return 0;
2845 if (lvl != SOL_TIPC)
2846 return -ENOPROTOOPT;
2847
2848 switch (opt) {
2849 case TIPC_IMPORTANCE:
2850 case TIPC_SRC_DROPPABLE:
2851 case TIPC_DEST_DROPPABLE:
2852 case TIPC_CONN_TIMEOUT:
2853 if (ol < sizeof(value))
2854 return -EINVAL;
2855 if (get_user(value, (u32 __user *)ov))
2856 return -EFAULT;
2857 break;
2858 case TIPC_GROUP_JOIN:
2859 if (ol < sizeof(mreq))
2860 return -EINVAL;
2861 if (copy_from_user(&mreq, ov, sizeof(mreq)))
2862 return -EFAULT;
2863 break;
2864 default:
2865 if (ov || ol)
2866 return -EINVAL;
2867 }
2868
2869 lock_sock(sk);
2870
2871 switch (opt) {
2872 case TIPC_IMPORTANCE:
2873 res = tsk_set_importance(tsk, value);
2874 break;
2875 case TIPC_SRC_DROPPABLE:
2876 if (sock->type != SOCK_STREAM)
2877 tsk_set_unreliable(tsk, value);
2878 else
2879 res = -ENOPROTOOPT;
2880 break;
2881 case TIPC_DEST_DROPPABLE:
2882 tsk_set_unreturnable(tsk, value);
2883 break;
2884 case TIPC_CONN_TIMEOUT:
2885 tipc_sk(sk)->conn_timeout = value;
2886 break;
2887 case TIPC_MCAST_BROADCAST:
2888 tsk->mc_method.rcast = false;
2889 tsk->mc_method.mandatory = true;
2890 break;
2891 case TIPC_MCAST_REPLICAST:
2892 tsk->mc_method.rcast = true;
2893 tsk->mc_method.mandatory = true;
2894 break;
2895 case TIPC_GROUP_JOIN:
2896 res = tipc_sk_join(tsk, &mreq);
2897 break;
2898 case TIPC_GROUP_LEAVE:
2899 res = tipc_sk_leave(tsk);
2900 break;
2901 default:
2902 res = -EINVAL;
2903 }
2904
2905 release_sock(sk);
2906
2907 return res;
2908 }
2909
2910 /**
2911 * tipc_getsockopt - get socket option
2912 * @sock: socket structure
2913 * @lvl: option level
2914 * @opt: option identifier
2915 * @ov: receptacle for option value
2916 * @ol: receptacle for length of option value
2917 *
2918 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
2919 * (to ease compatibility).
2920 *
2921 * Returns 0 on success, errno otherwise
2922 */
tipc_getsockopt(struct socket * sock,int lvl,int opt,char __user * ov,int __user * ol)2923 static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
2924 char __user *ov, int __user *ol)
2925 {
2926 struct sock *sk = sock->sk;
2927 struct tipc_sock *tsk = tipc_sk(sk);
2928 struct tipc_name_seq seq;
2929 int len, scope;
2930 u32 value;
2931 int res;
2932
2933 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2934 return put_user(0, ol);
2935 if (lvl != SOL_TIPC)
2936 return -ENOPROTOOPT;
2937 res = get_user(len, ol);
2938 if (res)
2939 return res;
2940
2941 lock_sock(sk);
2942
2943 switch (opt) {
2944 case TIPC_IMPORTANCE:
2945 value = tsk_importance(tsk);
2946 break;
2947 case TIPC_SRC_DROPPABLE:
2948 value = tsk_unreliable(tsk);
2949 break;
2950 case TIPC_DEST_DROPPABLE:
2951 value = tsk_unreturnable(tsk);
2952 break;
2953 case TIPC_CONN_TIMEOUT:
2954 value = tsk->conn_timeout;
2955 /* no need to set "res", since already 0 at this point */
2956 break;
2957 case TIPC_NODE_RECVQ_DEPTH:
2958 value = 0; /* was tipc_queue_size, now obsolete */
2959 break;
2960 case TIPC_SOCK_RECVQ_DEPTH:
2961 value = skb_queue_len(&sk->sk_receive_queue);
2962 break;
2963 case TIPC_GROUP_JOIN:
2964 seq.type = 0;
2965 if (tsk->group)
2966 tipc_group_self(tsk->group, &seq, &scope);
2967 value = seq.type;
2968 break;
2969 default:
2970 res = -EINVAL;
2971 }
2972
2973 release_sock(sk);
2974
2975 if (res)
2976 return res; /* "get" failed */
2977
2978 if (len < sizeof(value))
2979 return -EINVAL;
2980
2981 if (copy_to_user(ov, &value, sizeof(value)))
2982 return -EFAULT;
2983
2984 return put_user(sizeof(value), ol);
2985 }
2986
tipc_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)2987 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2988 {
2989 struct net *net = sock_net(sock->sk);
2990 struct tipc_sioc_nodeid_req nr = {0};
2991 struct tipc_sioc_ln_req lnr;
2992 void __user *argp = (void __user *)arg;
2993
2994 switch (cmd) {
2995 case SIOCGETLINKNAME:
2996 if (copy_from_user(&lnr, argp, sizeof(lnr)))
2997 return -EFAULT;
2998 if (!tipc_node_get_linkname(net,
2999 lnr.bearer_id & 0xffff, lnr.peer,
3000 lnr.linkname, TIPC_MAX_LINK_NAME)) {
3001 if (copy_to_user(argp, &lnr, sizeof(lnr)))
3002 return -EFAULT;
3003 return 0;
3004 }
3005 return -EADDRNOTAVAIL;
3006 case SIOCGETNODEID:
3007 if (copy_from_user(&nr, argp, sizeof(nr)))
3008 return -EFAULT;
3009 if (!tipc_node_get_id(net, nr.peer, nr.node_id))
3010 return -EADDRNOTAVAIL;
3011 if (copy_to_user(argp, &nr, sizeof(nr)))
3012 return -EFAULT;
3013 return 0;
3014 default:
3015 return -ENOIOCTLCMD;
3016 }
3017 }
3018
tipc_socketpair(struct socket * sock1,struct socket * sock2)3019 static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
3020 {
3021 struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
3022 struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
3023 u32 onode = tipc_own_addr(sock_net(sock1->sk));
3024
3025 tsk1->peer.family = AF_TIPC;
3026 tsk1->peer.addrtype = TIPC_ADDR_ID;
3027 tsk1->peer.scope = TIPC_NODE_SCOPE;
3028 tsk1->peer.addr.id.ref = tsk2->portid;
3029 tsk1->peer.addr.id.node = onode;
3030 tsk2->peer.family = AF_TIPC;
3031 tsk2->peer.addrtype = TIPC_ADDR_ID;
3032 tsk2->peer.scope = TIPC_NODE_SCOPE;
3033 tsk2->peer.addr.id.ref = tsk1->portid;
3034 tsk2->peer.addr.id.node = onode;
3035
3036 tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
3037 tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
3038 return 0;
3039 }
3040
3041 /* Protocol switches for the various types of TIPC sockets */
3042
3043 static const struct proto_ops msg_ops = {
3044 .owner = THIS_MODULE,
3045 .family = AF_TIPC,
3046 .release = tipc_release,
3047 .bind = tipc_bind,
3048 .connect = tipc_connect,
3049 .socketpair = tipc_socketpair,
3050 .accept = sock_no_accept,
3051 .getname = tipc_getname,
3052 .poll = tipc_poll,
3053 .ioctl = tipc_ioctl,
3054 .listen = sock_no_listen,
3055 .shutdown = tipc_shutdown,
3056 .setsockopt = tipc_setsockopt,
3057 .getsockopt = tipc_getsockopt,
3058 .sendmsg = tipc_sendmsg,
3059 .recvmsg = tipc_recvmsg,
3060 .mmap = sock_no_mmap,
3061 .sendpage = sock_no_sendpage
3062 };
3063
3064 static const struct proto_ops packet_ops = {
3065 .owner = THIS_MODULE,
3066 .family = AF_TIPC,
3067 .release = tipc_release,
3068 .bind = tipc_bind,
3069 .connect = tipc_connect,
3070 .socketpair = tipc_socketpair,
3071 .accept = tipc_accept,
3072 .getname = tipc_getname,
3073 .poll = tipc_poll,
3074 .ioctl = tipc_ioctl,
3075 .listen = tipc_listen,
3076 .shutdown = tipc_shutdown,
3077 .setsockopt = tipc_setsockopt,
3078 .getsockopt = tipc_getsockopt,
3079 .sendmsg = tipc_send_packet,
3080 .recvmsg = tipc_recvmsg,
3081 .mmap = sock_no_mmap,
3082 .sendpage = sock_no_sendpage
3083 };
3084
3085 static const struct proto_ops stream_ops = {
3086 .owner = THIS_MODULE,
3087 .family = AF_TIPC,
3088 .release = tipc_release,
3089 .bind = tipc_bind,
3090 .connect = tipc_connect,
3091 .socketpair = tipc_socketpair,
3092 .accept = tipc_accept,
3093 .getname = tipc_getname,
3094 .poll = tipc_poll,
3095 .ioctl = tipc_ioctl,
3096 .listen = tipc_listen,
3097 .shutdown = tipc_shutdown,
3098 .setsockopt = tipc_setsockopt,
3099 .getsockopt = tipc_getsockopt,
3100 .sendmsg = tipc_sendstream,
3101 .recvmsg = tipc_recvstream,
3102 .mmap = sock_no_mmap,
3103 .sendpage = sock_no_sendpage
3104 };
3105
3106 static const struct net_proto_family tipc_family_ops = {
3107 .owner = THIS_MODULE,
3108 .family = AF_TIPC,
3109 .create = tipc_sk_create
3110 };
3111
3112 static struct proto tipc_proto = {
3113 .name = "TIPC",
3114 .owner = THIS_MODULE,
3115 .obj_size = sizeof(struct tipc_sock),
3116 .sysctl_rmem = sysctl_tipc_rmem
3117 };
3118
3119 /**
3120 * tipc_socket_init - initialize TIPC socket interface
3121 *
3122 * Returns 0 on success, errno otherwise
3123 */
tipc_socket_init(void)3124 int tipc_socket_init(void)
3125 {
3126 int res;
3127
3128 res = proto_register(&tipc_proto, 1);
3129 if (res) {
3130 pr_err("Failed to register TIPC protocol type\n");
3131 goto out;
3132 }
3133
3134 res = sock_register(&tipc_family_ops);
3135 if (res) {
3136 pr_err("Failed to register TIPC socket type\n");
3137 proto_unregister(&tipc_proto);
3138 goto out;
3139 }
3140 out:
3141 return res;
3142 }
3143
3144 /**
3145 * tipc_socket_stop - stop TIPC socket interface
3146 */
tipc_socket_stop(void)3147 void tipc_socket_stop(void)
3148 {
3149 sock_unregister(tipc_family_ops.family);
3150 proto_unregister(&tipc_proto);
3151 }
3152
3153 /* Caller should hold socket lock for the passed tipc socket. */
__tipc_nl_add_sk_con(struct sk_buff * skb,struct tipc_sock * tsk)3154 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
3155 {
3156 u32 peer_node;
3157 u32 peer_port;
3158 struct nlattr *nest;
3159
3160 peer_node = tsk_peer_node(tsk);
3161 peer_port = tsk_peer_port(tsk);
3162
3163 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
3164
3165 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
3166 goto msg_full;
3167 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
3168 goto msg_full;
3169
3170 if (tsk->conn_type != 0) {
3171 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
3172 goto msg_full;
3173 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
3174 goto msg_full;
3175 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
3176 goto msg_full;
3177 }
3178 nla_nest_end(skb, nest);
3179
3180 return 0;
3181
3182 msg_full:
3183 nla_nest_cancel(skb, nest);
3184
3185 return -EMSGSIZE;
3186 }
3187
__tipc_nl_add_sk_info(struct sk_buff * skb,struct tipc_sock * tsk)3188 static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock
3189 *tsk)
3190 {
3191 struct net *net = sock_net(skb->sk);
3192 struct sock *sk = &tsk->sk;
3193
3194 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
3195 nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net)))
3196 return -EMSGSIZE;
3197
3198 if (tipc_sk_connected(sk)) {
3199 if (__tipc_nl_add_sk_con(skb, tsk))
3200 return -EMSGSIZE;
3201 } else if (!list_empty(&tsk->publications)) {
3202 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
3203 return -EMSGSIZE;
3204 }
3205 return 0;
3206 }
3207
3208 /* Caller should hold socket lock for the passed tipc socket. */
__tipc_nl_add_sk(struct sk_buff * skb,struct netlink_callback * cb,struct tipc_sock * tsk)3209 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
3210 struct tipc_sock *tsk)
3211 {
3212 struct nlattr *attrs;
3213 void *hdr;
3214
3215 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3216 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
3217 if (!hdr)
3218 goto msg_cancel;
3219
3220 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
3221 if (!attrs)
3222 goto genlmsg_cancel;
3223
3224 if (__tipc_nl_add_sk_info(skb, tsk))
3225 goto attr_msg_cancel;
3226
3227 nla_nest_end(skb, attrs);
3228 genlmsg_end(skb, hdr);
3229
3230 return 0;
3231
3232 attr_msg_cancel:
3233 nla_nest_cancel(skb, attrs);
3234 genlmsg_cancel:
3235 genlmsg_cancel(skb, hdr);
3236 msg_cancel:
3237 return -EMSGSIZE;
3238 }
3239
tipc_nl_sk_walk(struct sk_buff * skb,struct netlink_callback * cb,int (* skb_handler)(struct sk_buff * skb,struct netlink_callback * cb,struct tipc_sock * tsk))3240 int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
3241 int (*skb_handler)(struct sk_buff *skb,
3242 struct netlink_callback *cb,
3243 struct tipc_sock *tsk))
3244 {
3245 struct rhashtable_iter *iter = (void *)cb->args[4];
3246 struct tipc_sock *tsk;
3247 int err;
3248
3249 rhashtable_walk_start(iter);
3250 while ((tsk = rhashtable_walk_next(iter)) != NULL) {
3251 if (IS_ERR(tsk)) {
3252 err = PTR_ERR(tsk);
3253 if (err == -EAGAIN) {
3254 err = 0;
3255 continue;
3256 }
3257 break;
3258 }
3259
3260 sock_hold(&tsk->sk);
3261 rhashtable_walk_stop(iter);
3262 lock_sock(&tsk->sk);
3263 err = skb_handler(skb, cb, tsk);
3264 if (err) {
3265 release_sock(&tsk->sk);
3266 sock_put(&tsk->sk);
3267 goto out;
3268 }
3269 release_sock(&tsk->sk);
3270 rhashtable_walk_start(iter);
3271 sock_put(&tsk->sk);
3272 }
3273 rhashtable_walk_stop(iter);
3274 out:
3275 return skb->len;
3276 }
3277 EXPORT_SYMBOL(tipc_nl_sk_walk);
3278
tipc_dump_start(struct netlink_callback * cb)3279 int tipc_dump_start(struct netlink_callback *cb)
3280 {
3281 return __tipc_dump_start(cb, sock_net(cb->skb->sk));
3282 }
3283 EXPORT_SYMBOL(tipc_dump_start);
3284
__tipc_dump_start(struct netlink_callback * cb,struct net * net)3285 int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
3286 {
3287 /* tipc_nl_name_table_dump() uses cb->args[0...3]. */
3288 struct rhashtable_iter *iter = (void *)cb->args[4];
3289 struct tipc_net *tn = tipc_net(net);
3290
3291 if (!iter) {
3292 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3293 if (!iter)
3294 return -ENOMEM;
3295
3296 cb->args[4] = (long)iter;
3297 }
3298
3299 rhashtable_walk_enter(&tn->sk_rht, iter);
3300 return 0;
3301 }
3302
tipc_dump_done(struct netlink_callback * cb)3303 int tipc_dump_done(struct netlink_callback *cb)
3304 {
3305 struct rhashtable_iter *hti = (void *)cb->args[4];
3306
3307 rhashtable_walk_exit(hti);
3308 kfree(hti);
3309 return 0;
3310 }
3311 EXPORT_SYMBOL(tipc_dump_done);
3312
tipc_sk_fill_sock_diag(struct sk_buff * skb,struct netlink_callback * cb,struct tipc_sock * tsk,u32 sk_filter_state,u64 (* tipc_diag_gen_cookie)(struct sock * sk))3313 int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
3314 struct tipc_sock *tsk, u32 sk_filter_state,
3315 u64 (*tipc_diag_gen_cookie)(struct sock *sk))
3316 {
3317 struct sock *sk = &tsk->sk;
3318 struct nlattr *attrs;
3319 struct nlattr *stat;
3320
3321 /*filter response w.r.t sk_state*/
3322 if (!(sk_filter_state & (1 << sk->sk_state)))
3323 return 0;
3324
3325 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
3326 if (!attrs)
3327 goto msg_cancel;
3328
3329 if (__tipc_nl_add_sk_info(skb, tsk))
3330 goto attr_msg_cancel;
3331
3332 if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
3333 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
3334 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
3335 nla_put_u32(skb, TIPC_NLA_SOCK_UID,
3336 from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
3337 sock_i_uid(sk))) ||
3338 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
3339 tipc_diag_gen_cookie(sk),
3340 TIPC_NLA_SOCK_PAD))
3341 goto attr_msg_cancel;
3342
3343 stat = nla_nest_start(skb, TIPC_NLA_SOCK_STAT);
3344 if (!stat)
3345 goto attr_msg_cancel;
3346
3347 if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
3348 skb_queue_len(&sk->sk_receive_queue)) ||
3349 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
3350 skb_queue_len(&sk->sk_write_queue)) ||
3351 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
3352 atomic_read(&sk->sk_drops)))
3353 goto stat_msg_cancel;
3354
3355 if (tsk->cong_link_cnt &&
3356 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG))
3357 goto stat_msg_cancel;
3358
3359 if (tsk_conn_cong(tsk) &&
3360 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG))
3361 goto stat_msg_cancel;
3362
3363 nla_nest_end(skb, stat);
3364
3365 if (tsk->group)
3366 if (tipc_group_fill_sock_diag(tsk->group, skb))
3367 goto stat_msg_cancel;
3368
3369 nla_nest_end(skb, attrs);
3370
3371 return 0;
3372
3373 stat_msg_cancel:
3374 nla_nest_cancel(skb, stat);
3375 attr_msg_cancel:
3376 nla_nest_cancel(skb, attrs);
3377 msg_cancel:
3378 return -EMSGSIZE;
3379 }
3380 EXPORT_SYMBOL(tipc_sk_fill_sock_diag);
3381
tipc_nl_sk_dump(struct sk_buff * skb,struct netlink_callback * cb)3382 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
3383 {
3384 return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
3385 }
3386
3387 /* Caller should hold socket lock for the passed tipc socket. */
__tipc_nl_add_sk_publ(struct sk_buff * skb,struct netlink_callback * cb,struct publication * publ)3388 static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
3389 struct netlink_callback *cb,
3390 struct publication *publ)
3391 {
3392 void *hdr;
3393 struct nlattr *attrs;
3394
3395 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3396 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
3397 if (!hdr)
3398 goto msg_cancel;
3399
3400 attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
3401 if (!attrs)
3402 goto genlmsg_cancel;
3403
3404 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
3405 goto attr_msg_cancel;
3406 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
3407 goto attr_msg_cancel;
3408 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
3409 goto attr_msg_cancel;
3410 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
3411 goto attr_msg_cancel;
3412
3413 nla_nest_end(skb, attrs);
3414 genlmsg_end(skb, hdr);
3415
3416 return 0;
3417
3418 attr_msg_cancel:
3419 nla_nest_cancel(skb, attrs);
3420 genlmsg_cancel:
3421 genlmsg_cancel(skb, hdr);
3422 msg_cancel:
3423 return -EMSGSIZE;
3424 }
3425
3426 /* Caller should hold socket lock for the passed tipc socket. */
__tipc_nl_list_sk_publ(struct sk_buff * skb,struct netlink_callback * cb,struct tipc_sock * tsk,u32 * last_publ)3427 static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
3428 struct netlink_callback *cb,
3429 struct tipc_sock *tsk, u32 *last_publ)
3430 {
3431 int err;
3432 struct publication *p;
3433
3434 if (*last_publ) {
3435 list_for_each_entry(p, &tsk->publications, binding_sock) {
3436 if (p->key == *last_publ)
3437 break;
3438 }
3439 if (p->key != *last_publ) {
3440 /* We never set seq or call nl_dump_check_consistent()
3441 * this means that setting prev_seq here will cause the
3442 * consistence check to fail in the netlink callback
3443 * handler. Resulting in the last NLMSG_DONE message
3444 * having the NLM_F_DUMP_INTR flag set.
3445 */
3446 cb->prev_seq = 1;
3447 *last_publ = 0;
3448 return -EPIPE;
3449 }
3450 } else {
3451 p = list_first_entry(&tsk->publications, struct publication,
3452 binding_sock);
3453 }
3454
3455 list_for_each_entry_from(p, &tsk->publications, binding_sock) {
3456 err = __tipc_nl_add_sk_publ(skb, cb, p);
3457 if (err) {
3458 *last_publ = p->key;
3459 return err;
3460 }
3461 }
3462 *last_publ = 0;
3463
3464 return 0;
3465 }
3466
tipc_nl_publ_dump(struct sk_buff * skb,struct netlink_callback * cb)3467 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
3468 {
3469 int err;
3470 u32 tsk_portid = cb->args[0];
3471 u32 last_publ = cb->args[1];
3472 u32 done = cb->args[2];
3473 struct net *net = sock_net(skb->sk);
3474 struct tipc_sock *tsk;
3475
3476 if (!tsk_portid) {
3477 struct nlattr **attrs;
3478 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
3479
3480 err = tipc_nlmsg_parse(cb->nlh, &attrs);
3481 if (err)
3482 return err;
3483
3484 if (!attrs[TIPC_NLA_SOCK])
3485 return -EINVAL;
3486
3487 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
3488 attrs[TIPC_NLA_SOCK],
3489 tipc_nl_sock_policy, NULL);
3490 if (err)
3491 return err;
3492
3493 if (!sock[TIPC_NLA_SOCK_REF])
3494 return -EINVAL;
3495
3496 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
3497 }
3498
3499 if (done)
3500 return 0;
3501
3502 tsk = tipc_sk_lookup(net, tsk_portid);
3503 if (!tsk)
3504 return -EINVAL;
3505
3506 lock_sock(&tsk->sk);
3507 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
3508 if (!err)
3509 done = 1;
3510 release_sock(&tsk->sk);
3511 sock_put(&tsk->sk);
3512
3513 cb->args[0] = tsk_portid;
3514 cb->args[1] = last_publ;
3515 cb->args[2] = done;
3516
3517 return skb->len;
3518 }
3519