1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth address family and sockets. */
26 
27 #include <linux/module.h>
28 #include <linux/debugfs.h>
29 #include <linux/stringify.h>
30 #include <linux/sched/signal.h>
31 
32 #include <asm/ioctls.h>
33 
34 #include <net/bluetooth/bluetooth.h>
35 #include <linux/proc_fs.h>
36 
37 #include "leds.h"
38 #include "selftest.h"
39 
40 /* Bluetooth sockets */
41 #define BT_MAX_PROTO	8
42 static const struct net_proto_family *bt_proto[BT_MAX_PROTO];
43 static DEFINE_RWLOCK(bt_proto_lock);
44 
45 static struct lock_class_key bt_lock_key[BT_MAX_PROTO];
46 static const char *const bt_key_strings[BT_MAX_PROTO] = {
47 	"sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP",
48 	"sk_lock-AF_BLUETOOTH-BTPROTO_HCI",
49 	"sk_lock-AF_BLUETOOTH-BTPROTO_SCO",
50 	"sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM",
51 	"sk_lock-AF_BLUETOOTH-BTPROTO_BNEP",
52 	"sk_lock-AF_BLUETOOTH-BTPROTO_CMTP",
53 	"sk_lock-AF_BLUETOOTH-BTPROTO_HIDP",
54 	"sk_lock-AF_BLUETOOTH-BTPROTO_AVDTP",
55 };
56 
57 static struct lock_class_key bt_slock_key[BT_MAX_PROTO];
58 static const char *const bt_slock_key_strings[BT_MAX_PROTO] = {
59 	"slock-AF_BLUETOOTH-BTPROTO_L2CAP",
60 	"slock-AF_BLUETOOTH-BTPROTO_HCI",
61 	"slock-AF_BLUETOOTH-BTPROTO_SCO",
62 	"slock-AF_BLUETOOTH-BTPROTO_RFCOMM",
63 	"slock-AF_BLUETOOTH-BTPROTO_BNEP",
64 	"slock-AF_BLUETOOTH-BTPROTO_CMTP",
65 	"slock-AF_BLUETOOTH-BTPROTO_HIDP",
66 	"slock-AF_BLUETOOTH-BTPROTO_AVDTP",
67 };
68 
bt_sock_reclassify_lock(struct sock * sk,int proto)69 void bt_sock_reclassify_lock(struct sock *sk, int proto)
70 {
71 	BUG_ON(!sk);
72 	BUG_ON(!sock_allow_reclassification(sk));
73 
74 	sock_lock_init_class_and_name(sk,
75 			bt_slock_key_strings[proto], &bt_slock_key[proto],
76 				bt_key_strings[proto], &bt_lock_key[proto]);
77 }
78 EXPORT_SYMBOL(bt_sock_reclassify_lock);
79 
bt_sock_register(int proto,const struct net_proto_family * ops)80 int bt_sock_register(int proto, const struct net_proto_family *ops)
81 {
82 	int err = 0;
83 
84 	if (proto < 0 || proto >= BT_MAX_PROTO)
85 		return -EINVAL;
86 
87 	write_lock(&bt_proto_lock);
88 
89 	if (bt_proto[proto])
90 		err = -EEXIST;
91 	else
92 		bt_proto[proto] = ops;
93 
94 	write_unlock(&bt_proto_lock);
95 
96 	return err;
97 }
98 EXPORT_SYMBOL(bt_sock_register);
99 
bt_sock_unregister(int proto)100 void bt_sock_unregister(int proto)
101 {
102 	if (proto < 0 || proto >= BT_MAX_PROTO)
103 		return;
104 
105 	write_lock(&bt_proto_lock);
106 	bt_proto[proto] = NULL;
107 	write_unlock(&bt_proto_lock);
108 }
109 EXPORT_SYMBOL(bt_sock_unregister);
110 
bt_sock_create(struct net * net,struct socket * sock,int proto,int kern)111 static int bt_sock_create(struct net *net, struct socket *sock, int proto,
112 			  int kern)
113 {
114 	int err;
115 
116 	if (net != &init_net)
117 		return -EAFNOSUPPORT;
118 
119 	if (proto < 0 || proto >= BT_MAX_PROTO)
120 		return -EINVAL;
121 
122 	if (!bt_proto[proto])
123 		request_module("bt-proto-%d", proto);
124 
125 	err = -EPROTONOSUPPORT;
126 
127 	read_lock(&bt_proto_lock);
128 
129 	if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
130 		err = bt_proto[proto]->create(net, sock, proto, kern);
131 		if (!err)
132 			bt_sock_reclassify_lock(sock->sk, proto);
133 		module_put(bt_proto[proto]->owner);
134 	}
135 
136 	read_unlock(&bt_proto_lock);
137 
138 	return err;
139 }
140 
bt_sock_link(struct bt_sock_list * l,struct sock * sk)141 void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
142 {
143 	write_lock(&l->lock);
144 	sk_add_node(sk, &l->head);
145 	write_unlock(&l->lock);
146 }
147 EXPORT_SYMBOL(bt_sock_link);
148 
bt_sock_unlink(struct bt_sock_list * l,struct sock * sk)149 void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
150 {
151 	write_lock(&l->lock);
152 	sk_del_node_init(sk);
153 	write_unlock(&l->lock);
154 }
155 EXPORT_SYMBOL(bt_sock_unlink);
156 
bt_accept_enqueue(struct sock * parent,struct sock * sk,bool bh)157 void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh)
158 {
159 	BT_DBG("parent %p, sk %p", parent, sk);
160 
161 	sock_hold(sk);
162 
163 	if (bh)
164 		bh_lock_sock_nested(sk);
165 	else
166 		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
167 
168 	list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
169 	bt_sk(sk)->parent = parent;
170 
171 	if (bh)
172 		bh_unlock_sock(sk);
173 	else
174 		release_sock(sk);
175 
176 	parent->sk_ack_backlog++;
177 }
178 EXPORT_SYMBOL(bt_accept_enqueue);
179 
180 /* Calling function must hold the sk lock.
181  * bt_sk(sk)->parent must be non-NULL meaning sk is in the parent list.
182  */
bt_accept_unlink(struct sock * sk)183 void bt_accept_unlink(struct sock *sk)
184 {
185 	BT_DBG("sk %p state %d", sk, sk->sk_state);
186 
187 	list_del_init(&bt_sk(sk)->accept_q);
188 	bt_sk(sk)->parent->sk_ack_backlog--;
189 	bt_sk(sk)->parent = NULL;
190 	sock_put(sk);
191 }
192 EXPORT_SYMBOL(bt_accept_unlink);
193 
bt_accept_dequeue(struct sock * parent,struct socket * newsock)194 struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
195 {
196 	struct bt_sock *s, *n;
197 	struct sock *sk;
198 
199 	BT_DBG("parent %p", parent);
200 
201 restart:
202 	list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) {
203 		sk = (struct sock *)s;
204 
205 		/* Prevent early freeing of sk due to unlink and sock_kill */
206 		sock_hold(sk);
207 		lock_sock(sk);
208 
209 		/* Check sk has not already been unlinked via
210 		 * bt_accept_unlink() due to serialisation caused by sk locking
211 		 */
212 		if (!bt_sk(sk)->parent) {
213 			BT_DBG("sk %p, already unlinked", sk);
214 			release_sock(sk);
215 			sock_put(sk);
216 
217 			/* Restart the loop as sk is no longer in the list
218 			 * and also avoid a potential infinite loop because
219 			 * list_for_each_entry_safe() is not thread safe.
220 			 */
221 			goto restart;
222 		}
223 
224 		/* sk is safely in the parent list so reduce reference count */
225 		sock_put(sk);
226 
227 		/* FIXME: Is this check still needed */
228 		if (sk->sk_state == BT_CLOSED) {
229 			bt_accept_unlink(sk);
230 			release_sock(sk);
231 			continue;
232 		}
233 
234 		if (sk->sk_state == BT_CONNECTED || !newsock ||
235 		    test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) {
236 			bt_accept_unlink(sk);
237 			if (newsock)
238 				sock_graft(sk, newsock);
239 
240 			release_sock(sk);
241 			return sk;
242 		}
243 
244 		release_sock(sk);
245 	}
246 
247 	return NULL;
248 }
249 EXPORT_SYMBOL(bt_accept_dequeue);
250 
bt_sock_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)251 int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
252 		    int flags)
253 {
254 	int noblock = flags & MSG_DONTWAIT;
255 	struct sock *sk = sock->sk;
256 	struct sk_buff *skb;
257 	size_t copied;
258 	size_t skblen;
259 	int err;
260 
261 	BT_DBG("sock %p sk %p len %zu", sock, sk, len);
262 
263 	if (flags & MSG_OOB)
264 		return -EOPNOTSUPP;
265 
266 	skb = skb_recv_datagram(sk, flags, noblock, &err);
267 	if (!skb) {
268 		if (sk->sk_shutdown & RCV_SHUTDOWN)
269 			return 0;
270 
271 		return err;
272 	}
273 
274 	skblen = skb->len;
275 	copied = skb->len;
276 	if (len < copied) {
277 		msg->msg_flags |= MSG_TRUNC;
278 		copied = len;
279 	}
280 
281 	skb_reset_transport_header(skb);
282 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
283 	if (err == 0) {
284 		sock_recv_ts_and_drops(msg, sk, skb);
285 
286 		if (msg->msg_name && bt_sk(sk)->skb_msg_name)
287 			bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
288 						&msg->msg_namelen);
289 	}
290 
291 	skb_free_datagram(sk, skb);
292 
293 	if (flags & MSG_TRUNC)
294 		copied = skblen;
295 
296 	return err ? : copied;
297 }
298 EXPORT_SYMBOL(bt_sock_recvmsg);
299 
bt_sock_data_wait(struct sock * sk,long timeo)300 static long bt_sock_data_wait(struct sock *sk, long timeo)
301 {
302 	DECLARE_WAITQUEUE(wait, current);
303 
304 	add_wait_queue(sk_sleep(sk), &wait);
305 	for (;;) {
306 		set_current_state(TASK_INTERRUPTIBLE);
307 
308 		if (!skb_queue_empty(&sk->sk_receive_queue))
309 			break;
310 
311 		if (sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN))
312 			break;
313 
314 		if (signal_pending(current) || !timeo)
315 			break;
316 
317 		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
318 		release_sock(sk);
319 		timeo = schedule_timeout(timeo);
320 		lock_sock(sk);
321 		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
322 	}
323 
324 	__set_current_state(TASK_RUNNING);
325 	remove_wait_queue(sk_sleep(sk), &wait);
326 	return timeo;
327 }
328 
bt_sock_stream_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)329 int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
330 			   size_t size, int flags)
331 {
332 	struct sock *sk = sock->sk;
333 	int err = 0;
334 	size_t target, copied = 0;
335 	long timeo;
336 
337 	if (flags & MSG_OOB)
338 		return -EOPNOTSUPP;
339 
340 	BT_DBG("sk %p size %zu", sk, size);
341 
342 	lock_sock(sk);
343 
344 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
345 	timeo  = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
346 
347 	do {
348 		struct sk_buff *skb;
349 		int chunk;
350 
351 		skb = skb_dequeue(&sk->sk_receive_queue);
352 		if (!skb) {
353 			if (copied >= target)
354 				break;
355 
356 			err = sock_error(sk);
357 			if (err)
358 				break;
359 			if (sk->sk_shutdown & RCV_SHUTDOWN)
360 				break;
361 
362 			err = -EAGAIN;
363 			if (!timeo)
364 				break;
365 
366 			timeo = bt_sock_data_wait(sk, timeo);
367 
368 			if (signal_pending(current)) {
369 				err = sock_intr_errno(timeo);
370 				goto out;
371 			}
372 			continue;
373 		}
374 
375 		chunk = min_t(unsigned int, skb->len, size);
376 		if (skb_copy_datagram_msg(skb, 0, msg, chunk)) {
377 			skb_queue_head(&sk->sk_receive_queue, skb);
378 			if (!copied)
379 				copied = -EFAULT;
380 			break;
381 		}
382 		copied += chunk;
383 		size   -= chunk;
384 
385 		sock_recv_ts_and_drops(msg, sk, skb);
386 
387 		if (!(flags & MSG_PEEK)) {
388 			int skb_len = skb_headlen(skb);
389 
390 			if (chunk <= skb_len) {
391 				__skb_pull(skb, chunk);
392 			} else {
393 				struct sk_buff *frag;
394 
395 				__skb_pull(skb, skb_len);
396 				chunk -= skb_len;
397 
398 				skb_walk_frags(skb, frag) {
399 					if (chunk <= frag->len) {
400 						/* Pulling partial data */
401 						skb->len -= chunk;
402 						skb->data_len -= chunk;
403 						__skb_pull(frag, chunk);
404 						break;
405 					} else if (frag->len) {
406 						/* Pulling all frag data */
407 						chunk -= frag->len;
408 						skb->len -= frag->len;
409 						skb->data_len -= frag->len;
410 						__skb_pull(frag, frag->len);
411 					}
412 				}
413 			}
414 
415 			if (skb->len) {
416 				skb_queue_head(&sk->sk_receive_queue, skb);
417 				break;
418 			}
419 			kfree_skb(skb);
420 
421 		} else {
422 			/* put message back and return */
423 			skb_queue_head(&sk->sk_receive_queue, skb);
424 			break;
425 		}
426 	} while (size);
427 
428 out:
429 	release_sock(sk);
430 	return copied ? : err;
431 }
432 EXPORT_SYMBOL(bt_sock_stream_recvmsg);
433 
bt_accept_poll(struct sock * parent)434 static inline __poll_t bt_accept_poll(struct sock *parent)
435 {
436 	struct bt_sock *s, *n;
437 	struct sock *sk;
438 
439 	list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) {
440 		sk = (struct sock *)s;
441 		if (sk->sk_state == BT_CONNECTED ||
442 		    (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
443 		     sk->sk_state == BT_CONNECT2))
444 			return EPOLLIN | EPOLLRDNORM;
445 	}
446 
447 	return 0;
448 }
449 
bt_sock_poll(struct file * file,struct socket * sock,poll_table * wait)450 __poll_t bt_sock_poll(struct file *file, struct socket *sock,
451 			  poll_table *wait)
452 {
453 	struct sock *sk = sock->sk;
454 	__poll_t mask = 0;
455 
456 	BT_DBG("sock %p, sk %p", sock, sk);
457 
458 	poll_wait(file, sk_sleep(sk), wait);
459 
460 	if (sk->sk_state == BT_LISTEN)
461 		return bt_accept_poll(sk);
462 
463 	if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
464 		mask |= EPOLLERR |
465 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
466 
467 	if (sk->sk_shutdown & RCV_SHUTDOWN)
468 		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
469 
470 	if (sk->sk_shutdown == SHUTDOWN_MASK)
471 		mask |= EPOLLHUP;
472 
473 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
474 		mask |= EPOLLIN | EPOLLRDNORM;
475 
476 	if (sk->sk_state == BT_CLOSED)
477 		mask |= EPOLLHUP;
478 
479 	if (sk->sk_state == BT_CONNECT ||
480 			sk->sk_state == BT_CONNECT2 ||
481 			sk->sk_state == BT_CONFIG)
482 		return mask;
483 
484 	if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
485 		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
486 	else
487 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
488 
489 	return mask;
490 }
491 EXPORT_SYMBOL(bt_sock_poll);
492 
bt_sock_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)493 int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
494 {
495 	struct sock *sk = sock->sk;
496 	struct sk_buff *skb;
497 	long amount;
498 	int err;
499 
500 	BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg);
501 
502 	switch (cmd) {
503 	case TIOCOUTQ:
504 		if (sk->sk_state == BT_LISTEN)
505 			return -EINVAL;
506 
507 		amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
508 		if (amount < 0)
509 			amount = 0;
510 		err = put_user(amount, (int __user *) arg);
511 		break;
512 
513 	case TIOCINQ:
514 		if (sk->sk_state == BT_LISTEN)
515 			return -EINVAL;
516 
517 		lock_sock(sk);
518 		skb = skb_peek(&sk->sk_receive_queue);
519 		amount = skb ? skb->len : 0;
520 		release_sock(sk);
521 		err = put_user(amount, (int __user *) arg);
522 		break;
523 
524 	default:
525 		err = -ENOIOCTLCMD;
526 		break;
527 	}
528 
529 	return err;
530 }
531 EXPORT_SYMBOL(bt_sock_ioctl);
532 
533 /* This function expects the sk lock to be held when called */
bt_sock_wait_state(struct sock * sk,int state,unsigned long timeo)534 int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
535 {
536 	DECLARE_WAITQUEUE(wait, current);
537 	int err = 0;
538 
539 	BT_DBG("sk %p", sk);
540 
541 	add_wait_queue(sk_sleep(sk), &wait);
542 	set_current_state(TASK_INTERRUPTIBLE);
543 	while (sk->sk_state != state) {
544 		if (!timeo) {
545 			err = -EINPROGRESS;
546 			break;
547 		}
548 
549 		if (signal_pending(current)) {
550 			err = sock_intr_errno(timeo);
551 			break;
552 		}
553 
554 		release_sock(sk);
555 		timeo = schedule_timeout(timeo);
556 		lock_sock(sk);
557 		set_current_state(TASK_INTERRUPTIBLE);
558 
559 		err = sock_error(sk);
560 		if (err)
561 			break;
562 	}
563 	__set_current_state(TASK_RUNNING);
564 	remove_wait_queue(sk_sleep(sk), &wait);
565 	return err;
566 }
567 EXPORT_SYMBOL(bt_sock_wait_state);
568 
569 /* This function expects the sk lock to be held when called */
bt_sock_wait_ready(struct sock * sk,unsigned long flags)570 int bt_sock_wait_ready(struct sock *sk, unsigned long flags)
571 {
572 	DECLARE_WAITQUEUE(wait, current);
573 	unsigned long timeo;
574 	int err = 0;
575 
576 	BT_DBG("sk %p", sk);
577 
578 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
579 
580 	add_wait_queue(sk_sleep(sk), &wait);
581 	set_current_state(TASK_INTERRUPTIBLE);
582 	while (test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags)) {
583 		if (!timeo) {
584 			err = -EAGAIN;
585 			break;
586 		}
587 
588 		if (signal_pending(current)) {
589 			err = sock_intr_errno(timeo);
590 			break;
591 		}
592 
593 		release_sock(sk);
594 		timeo = schedule_timeout(timeo);
595 		lock_sock(sk);
596 		set_current_state(TASK_INTERRUPTIBLE);
597 
598 		err = sock_error(sk);
599 		if (err)
600 			break;
601 	}
602 	__set_current_state(TASK_RUNNING);
603 	remove_wait_queue(sk_sleep(sk), &wait);
604 
605 	return err;
606 }
607 EXPORT_SYMBOL(bt_sock_wait_ready);
608 
609 #ifdef CONFIG_PROC_FS
bt_seq_start(struct seq_file * seq,loff_t * pos)610 static void *bt_seq_start(struct seq_file *seq, loff_t *pos)
611 	__acquires(seq->private->l->lock)
612 {
613 	struct bt_sock_list *l = PDE_DATA(file_inode(seq->file));
614 
615 	read_lock(&l->lock);
616 	return seq_hlist_start_head(&l->head, *pos);
617 }
618 
bt_seq_next(struct seq_file * seq,void * v,loff_t * pos)619 static void *bt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
620 {
621 	struct bt_sock_list *l = PDE_DATA(file_inode(seq->file));
622 
623 	return seq_hlist_next(v, &l->head, pos);
624 }
625 
bt_seq_stop(struct seq_file * seq,void * v)626 static void bt_seq_stop(struct seq_file *seq, void *v)
627 	__releases(seq->private->l->lock)
628 {
629 	struct bt_sock_list *l = PDE_DATA(file_inode(seq->file));
630 
631 	read_unlock(&l->lock);
632 }
633 
bt_seq_show(struct seq_file * seq,void * v)634 static int bt_seq_show(struct seq_file *seq, void *v)
635 {
636 	struct bt_sock_list *l = PDE_DATA(file_inode(seq->file));
637 
638 	if (v == SEQ_START_TOKEN) {
639 		seq_puts(seq ,"sk               RefCnt Rmem   Wmem   User   Inode  Parent");
640 
641 		if (l->custom_seq_show) {
642 			seq_putc(seq, ' ');
643 			l->custom_seq_show(seq, v);
644 		}
645 
646 		seq_putc(seq, '\n');
647 	} else {
648 		struct sock *sk = sk_entry(v);
649 		struct bt_sock *bt = bt_sk(sk);
650 
651 		seq_printf(seq,
652 			   "%pK %-6d %-6u %-6u %-6u %-6lu %-6lu",
653 			   sk,
654 			   refcount_read(&sk->sk_refcnt),
655 			   sk_rmem_alloc_get(sk),
656 			   sk_wmem_alloc_get(sk),
657 			   from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
658 			   sock_i_ino(sk),
659 			   bt->parent? sock_i_ino(bt->parent): 0LU);
660 
661 		if (l->custom_seq_show) {
662 			seq_putc(seq, ' ');
663 			l->custom_seq_show(seq, v);
664 		}
665 
666 		seq_putc(seq, '\n');
667 	}
668 	return 0;
669 }
670 
671 static const struct seq_operations bt_seq_ops = {
672 	.start = bt_seq_start,
673 	.next  = bt_seq_next,
674 	.stop  = bt_seq_stop,
675 	.show  = bt_seq_show,
676 };
677 
bt_procfs_init(struct net * net,const char * name,struct bt_sock_list * sk_list,int (* seq_show)(struct seq_file *,void *))678 int bt_procfs_init(struct net *net, const char *name,
679 		   struct bt_sock_list *sk_list,
680 		   int (* seq_show)(struct seq_file *, void *))
681 {
682 	sk_list->custom_seq_show = seq_show;
683 
684 	if (!proc_create_seq_data(name, 0, net->proc_net, &bt_seq_ops, sk_list))
685 		return -ENOMEM;
686 	return 0;
687 }
688 
bt_procfs_cleanup(struct net * net,const char * name)689 void bt_procfs_cleanup(struct net *net, const char *name)
690 {
691 	remove_proc_entry(name, net->proc_net);
692 }
693 #else
bt_procfs_init(struct net * net,const char * name,struct bt_sock_list * sk_list,int (* seq_show)(struct seq_file *,void *))694 int bt_procfs_init(struct net *net, const char *name,
695 		   struct bt_sock_list *sk_list,
696 		   int (* seq_show)(struct seq_file *, void *))
697 {
698 	return 0;
699 }
700 
bt_procfs_cleanup(struct net * net,const char * name)701 void bt_procfs_cleanup(struct net *net, const char *name)
702 {
703 }
704 #endif
705 EXPORT_SYMBOL(bt_procfs_init);
706 EXPORT_SYMBOL(bt_procfs_cleanup);
707 
708 static const struct net_proto_family bt_sock_family_ops = {
709 	.owner	= THIS_MODULE,
710 	.family	= PF_BLUETOOTH,
711 	.create	= bt_sock_create,
712 };
713 
714 struct dentry *bt_debugfs;
715 EXPORT_SYMBOL_GPL(bt_debugfs);
716 
717 #define VERSION __stringify(BT_SUBSYS_VERSION) "." \
718 		__stringify(BT_SUBSYS_REVISION)
719 
bt_init(void)720 static int __init bt_init(void)
721 {
722 	int err;
723 
724 	sock_skb_cb_check_size(sizeof(struct bt_skb_cb));
725 
726 	BT_INFO("Core ver %s", VERSION);
727 
728 	err = bt_selftest();
729 	if (err < 0)
730 		return err;
731 
732 	bt_debugfs = debugfs_create_dir("bluetooth", NULL);
733 
734 	bt_leds_init();
735 
736 	err = bt_sysfs_init();
737 	if (err < 0)
738 		return err;
739 
740 	err = sock_register(&bt_sock_family_ops);
741 	if (err)
742 		goto cleanup_sysfs;
743 
744 	BT_INFO("HCI device and connection manager initialized");
745 
746 	err = hci_sock_init();
747 	if (err)
748 		goto unregister_socket;
749 
750 	err = l2cap_init();
751 	if (err)
752 		goto cleanup_socket;
753 
754 	err = sco_init();
755 	if (err)
756 		goto cleanup_cap;
757 
758 	err = mgmt_init();
759 	if (err)
760 		goto cleanup_sco;
761 
762 	return 0;
763 
764 cleanup_sco:
765 	sco_exit();
766 cleanup_cap:
767 	l2cap_exit();
768 cleanup_socket:
769 	hci_sock_cleanup();
770 unregister_socket:
771 	sock_unregister(PF_BLUETOOTH);
772 cleanup_sysfs:
773 	bt_sysfs_cleanup();
774 	return err;
775 }
776 
bt_exit(void)777 static void __exit bt_exit(void)
778 {
779 	mgmt_exit();
780 
781 	sco_exit();
782 
783 	l2cap_exit();
784 
785 	hci_sock_cleanup();
786 
787 	sock_unregister(PF_BLUETOOTH);
788 
789 	bt_sysfs_cleanup();
790 
791 	bt_leds_cleanup();
792 
793 	debugfs_remove_recursive(bt_debugfs);
794 }
795 
796 subsys_initcall(bt_init);
797 module_exit(bt_exit);
798 
799 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
800 MODULE_DESCRIPTION("Bluetooth Core ver " VERSION);
801 MODULE_VERSION(VERSION);
802 MODULE_LICENSE("GPL");
803 MODULE_ALIAS_NETPROTO(PF_BLUETOOTH);
804