1 /* l2cap_br.c - L2CAP BREDR oriented handling */
2
3 /*
4 * Copyright (c) 2016 Intel Corporation
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 #include <zephyr/kernel.h>
10 #include <string.h>
11 #include <errno.h>
12 #include <zephyr/sys/atomic.h>
13 #include <zephyr/sys/byteorder.h>
14 #include <zephyr/sys/iterable_sections.h>
15 #include <zephyr/sys/util.h>
16
17 #include <zephyr/bluetooth/hci.h>
18 #include <zephyr/bluetooth/bluetooth.h>
19 #include <zephyr/bluetooth/conn.h>
20
21 #include "host/buf_view.h"
22 #include "host/hci_core.h"
23 #include "host/conn_internal.h"
24 #include "l2cap_br_internal.h"
25 #include "avdtp_internal.h"
26 #include "a2dp_internal.h"
27 #include "avctp_internal.h"
28 #include "avrcp_internal.h"
29 #include "rfcomm_internal.h"
30 #include "sdp_internal.h"
31
32 #include <zephyr/logging/log.h>
33 LOG_MODULE_REGISTER(bt_l2cap_br, CONFIG_BT_L2CAP_LOG_LEVEL);
34
35 #define BR_CHAN_RTX(_w) CONTAINER_OF(k_work_delayable_from_work(_w), \
36 struct bt_l2cap_br_chan, rtx_work)
37
38 #define L2CAP_BR_PSM_START 0x0001
39 #define L2CAP_BR_PSM_END 0xffff
40
41 #define L2CAP_BR_CID_DYN_START 0x0040
42 #define L2CAP_BR_CID_DYN_END 0xffff
43 #define L2CAP_BR_CID_IS_DYN(_cid) \
44 (_cid >= L2CAP_BR_CID_DYN_START && _cid <= L2CAP_BR_CID_DYN_END)
45
46 #define L2CAP_BR_MIN_MTU 48
47 #define L2CAP_BR_DEFAULT_MTU 672
48
49 #define L2CAP_BR_PSM_SDP 0x0001
50
51 #define L2CAP_BR_INFO_TIMEOUT K_SECONDS(4)
52 #define L2CAP_BR_CFG_TIMEOUT K_SECONDS(4)
53 #define L2CAP_BR_DISCONN_TIMEOUT K_SECONDS(1)
54 #define L2CAP_BR_CONN_TIMEOUT K_SECONDS(40)
55
56 /*
57 * L2CAP extended feature mask:
58 * BR/EDR fixed channel support enabled
59 */
60 #define L2CAP_FEAT_FIXED_CHAN_MASK 0x00000080
61
62 enum {
63 /* Connection oriented channels flags */
64 L2CAP_FLAG_CONN_LCONF_DONE, /* local config accepted by remote */
65 L2CAP_FLAG_CONN_RCONF_DONE, /* remote config accepted by local */
66 L2CAP_FLAG_CONN_ACCEPTOR, /* getting incoming connection req */
67 L2CAP_FLAG_CONN_PENDING, /* remote sent pending result in rsp */
68
69 /* Signaling channel flags */
70 L2CAP_FLAG_SIG_INFO_PENDING, /* retrieving remote l2cap info */
71 L2CAP_FLAG_SIG_INFO_DONE, /* remote l2cap info is done */
72
73 /* fixed channels flags */
74 L2CAP_FLAG_FIXED_CONNECTED, /* fixed connected */
75 };
76
77 static sys_slist_t br_servers;
78
79
80 /* Pool for outgoing BR/EDR signaling packets, min MTU is 48 */
81 NET_BUF_POOL_FIXED_DEFINE(br_sig_pool, CONFIG_BT_MAX_CONN,
82 BT_L2CAP_BUF_SIZE(L2CAP_BR_MIN_MTU), 8, NULL);
83
84 /* BR/EDR L2CAP signalling channel specific context */
85 struct bt_l2cap_br {
86 /* The channel this context is associated with */
87 struct bt_l2cap_br_chan chan;
88 uint8_t info_ident;
89 /*
90 * 2.1 CHANNEL IDENTIFIERS in
91 * BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 3, Part A.
92 * The range of fixed L2CAP CID is 0x0001 ~ 0x0007 both for LE and BR.
93 * So use one octet buffer to keep the `Fixed channels supported`
94 * of peer device.
95 */
96 uint8_t info_fixed_chan;
97 uint32_t info_feat_mask;
98 };
99
100 static struct bt_l2cap_br bt_l2cap_br_pool[CONFIG_BT_MAX_CONN];
101
bt_l2cap_br_lookup_rx_cid(struct bt_conn * conn,uint16_t cid)102 struct bt_l2cap_chan *bt_l2cap_br_lookup_rx_cid(struct bt_conn *conn,
103 uint16_t cid)
104 {
105 struct bt_l2cap_chan *chan;
106
107 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
108 if (BR_CHAN(chan)->rx.cid == cid) {
109 return chan;
110 }
111 }
112
113 return NULL;
114 }
115
bt_l2cap_br_lookup_tx_cid(struct bt_conn * conn,uint16_t cid)116 struct bt_l2cap_chan *bt_l2cap_br_lookup_tx_cid(struct bt_conn *conn,
117 uint16_t cid)
118 {
119 struct bt_l2cap_chan *chan;
120
121 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
122 if (BR_CHAN(chan)->tx.cid == cid) {
123 return chan;
124 }
125 }
126
127 return NULL;
128 }
129
bt_l2cap_br_get_remote_fixed_chan(struct bt_conn * conn)130 uint8_t bt_l2cap_br_get_remote_fixed_chan(struct bt_conn *conn)
131 {
132 struct bt_l2cap_chan *chan_sig;
133 struct bt_l2cap_br *br_chan_sig;
134
135 chan_sig = bt_l2cap_br_lookup_rx_cid(conn, BT_L2CAP_CID_BR_SIG);
136 if (!chan_sig) {
137 return (uint8_t)0U;
138 }
139
140 br_chan_sig = CONTAINER_OF(chan_sig, struct bt_l2cap_br, chan.chan);
141
142 return br_chan_sig->info_fixed_chan;
143 }
144
145 static struct bt_l2cap_br_chan*
l2cap_br_chan_alloc_cid(struct bt_conn * conn,struct bt_l2cap_chan * chan)146 l2cap_br_chan_alloc_cid(struct bt_conn *conn, struct bt_l2cap_chan *chan)
147 {
148 struct bt_l2cap_br_chan *br_chan = BR_CHAN(chan);
149 uint16_t cid;
150
151 /*
152 * No action needed if there's already a CID allocated, e.g. in
153 * the case of a fixed channel.
154 */
155 if (br_chan->rx.cid > 0) {
156 return br_chan;
157 }
158
159 /*
160 * L2CAP_BR_CID_DYN_END is 0xffff so we don't check against it since
161 * cid is uint16_t, just check against uint16_t overflow
162 */
163 for (cid = L2CAP_BR_CID_DYN_START; cid; cid++) {
164 if (!bt_l2cap_br_lookup_rx_cid(conn, cid)) {
165 br_chan->rx.cid = cid;
166 return br_chan;
167 }
168 }
169
170 return NULL;
171 }
172
l2cap_br_chan_cleanup(struct bt_l2cap_chan * chan)173 static void l2cap_br_chan_cleanup(struct bt_l2cap_chan *chan)
174 {
175 bt_l2cap_chan_remove(chan->conn, chan);
176 bt_l2cap_br_chan_del(chan);
177 }
178
l2cap_br_chan_destroy(struct bt_l2cap_chan * chan)179 static void l2cap_br_chan_destroy(struct bt_l2cap_chan *chan)
180 {
181 struct bt_l2cap_br_chan *br_chan = BR_CHAN(chan);
182
183 LOG_DBG("chan %p cid 0x%04x", br_chan, br_chan->rx.cid);
184
185 /* Cancel ongoing work. Since the channel can be re-used after this
186 * we need to sync to make sure that the kernel does not have it
187 * in its queue anymore.
188 *
189 * In the case where we are in the context of executing the rtx_work
190 * item, we don't sync as it will deadlock the workqueue.
191 */
192 struct k_work_q *rtx_work_queue = br_chan->rtx_work.queue;
193
194 if (rtx_work_queue == NULL || k_current_get() != &rtx_work_queue->thread) {
195 k_work_cancel_delayable_sync(&br_chan->rtx_work, &br_chan->rtx_sync);
196 } else {
197 k_work_cancel_delayable(&br_chan->rtx_work);
198 }
199
200 atomic_clear(BR_CHAN(chan)->flags);
201 }
202
l2cap_br_rtx_timeout(struct k_work * work)203 static void l2cap_br_rtx_timeout(struct k_work *work)
204 {
205 struct bt_l2cap_br_chan *chan = BR_CHAN_RTX(work);
206
207 LOG_WRN("chan %p timeout", chan);
208
209 if (chan->rx.cid == BT_L2CAP_CID_BR_SIG) {
210 LOG_DBG("Skip BR/EDR signalling channel ");
211 atomic_clear_bit(chan->flags, L2CAP_FLAG_SIG_INFO_PENDING);
212 return;
213 }
214
215 LOG_DBG("chan %p %s scid 0x%04x", chan, bt_l2cap_chan_state_str(chan->state), chan->rx.cid);
216
217 switch (chan->state) {
218 case BT_L2CAP_CONFIG:
219 bt_l2cap_br_chan_disconnect(&chan->chan);
220 break;
221 case BT_L2CAP_DISCONNECTING:
222 case BT_L2CAP_CONNECTING:
223 l2cap_br_chan_cleanup(&chan->chan);
224 break;
225 default:
226 break;
227 }
228 }
229
l2cap_br_chan_add(struct bt_conn * conn,struct bt_l2cap_chan * chan,bt_l2cap_chan_destroy_t destroy)230 static bool l2cap_br_chan_add(struct bt_conn *conn, struct bt_l2cap_chan *chan,
231 bt_l2cap_chan_destroy_t destroy)
232 {
233 struct bt_l2cap_br_chan *ch = l2cap_br_chan_alloc_cid(conn, chan);
234
235 if (!ch) {
236 LOG_DBG("Unable to allocate L2CAP CID");
237 return false;
238 }
239
240 k_fifo_init(&ch->_pdu_tx_queue);
241
242 /* All dynamic channels have the destroy handler which makes sure that
243 * the RTX work structure is properly released with a cancel sync.
244 * The fixed signal channel is only removed when disconnected and the
245 * disconnected handler is always called from the workqueue itself so
246 * canceling from there should always succeed.
247 */
248 k_work_init_delayable(&ch->rtx_work, l2cap_br_rtx_timeout);
249 bt_l2cap_chan_add(conn, chan, destroy);
250
251 return true;
252 }
253
l2cap_br_get_ident(void)254 static uint8_t l2cap_br_get_ident(void)
255 {
256 static uint8_t ident;
257
258 ident++;
259 /* handle integer overflow (0 is not valid) */
260 if (!ident) {
261 ident++;
262 }
263
264 return ident;
265 }
266
raise_data_ready(struct bt_l2cap_br_chan * br_chan)267 static void raise_data_ready(struct bt_l2cap_br_chan *br_chan)
268 {
269 if (!atomic_set(&br_chan->_pdu_ready_lock, 1)) {
270 sys_slist_append(&br_chan->chan.conn->l2cap_data_ready,
271 &br_chan->_pdu_ready);
272 LOG_DBG("data ready raised");
273 } else {
274 LOG_DBG("data ready already");
275 }
276
277 bt_conn_data_ready(br_chan->chan.conn);
278 }
279
lower_data_ready(struct bt_l2cap_br_chan * br_chan)280 static void lower_data_ready(struct bt_l2cap_br_chan *br_chan)
281 {
282 struct bt_conn *conn = br_chan->chan.conn;
283 __maybe_unused sys_snode_t *s = sys_slist_get(&conn->l2cap_data_ready);
284
285 __ASSERT_NO_MSG(s == &br_chan->_pdu_ready);
286
287 __maybe_unused atomic_t old = atomic_set(&br_chan->_pdu_ready_lock, 0);
288
289 __ASSERT_NO_MSG(old);
290 }
291
cancel_data_ready(struct bt_l2cap_br_chan * br_chan)292 static void cancel_data_ready(struct bt_l2cap_br_chan *br_chan)
293 {
294 struct bt_conn *conn = br_chan->chan.conn;
295
296 sys_slist_find_and_remove(&conn->l2cap_data_ready,
297 &br_chan->_pdu_ready);
298
299 atomic_set(&br_chan->_pdu_ready_lock, 0);
300 }
301
bt_l2cap_br_send_cb(struct bt_conn * conn,uint16_t cid,struct net_buf * buf,bt_conn_tx_cb_t cb,void * user_data)302 int bt_l2cap_br_send_cb(struct bt_conn *conn, uint16_t cid, struct net_buf *buf,
303 bt_conn_tx_cb_t cb, void *user_data)
304 {
305 struct bt_l2cap_hdr *hdr;
306 struct bt_l2cap_chan *ch = bt_l2cap_br_lookup_tx_cid(conn, cid);
307 struct bt_l2cap_br_chan *br_chan = CONTAINER_OF(ch, struct bt_l2cap_br_chan, chan);
308
309 LOG_DBG("chan %p buf %p len %zu", br_chan, buf, buf->len);
310
311 hdr = net_buf_push(buf, sizeof(*hdr));
312 hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
313 hdr->cid = sys_cpu_to_le16(cid);
314
315 if (buf->user_data_size < sizeof(struct closure)) {
316 LOG_WRN("not enough room in user_data %d < %d pool %u",
317 buf->user_data_size,
318 CONFIG_BT_CONN_TX_USER_DATA_SIZE,
319 buf->pool_id);
320 return -EINVAL;
321 }
322
323 LOG_DBG("push PDU: cb %p userdata %p", cb, user_data);
324
325 make_closure(buf->user_data, cb, user_data);
326 k_fifo_put(&br_chan->_pdu_tx_queue, buf);
327 raise_data_ready(br_chan);
328
329 return 0;
330 }
331
332 /* Send the buffer and release it in case of failure.
333 * Any other cleanup in failure to send should be handled by the disconnected
334 * handler.
335 */
l2cap_send(struct bt_conn * conn,uint16_t cid,struct net_buf * buf)336 static inline void l2cap_send(struct bt_conn *conn, uint16_t cid,
337 struct net_buf *buf)
338 {
339 if (bt_l2cap_br_send_cb(conn, cid, buf, NULL, NULL)) {
340 net_buf_unref(buf);
341 }
342 }
343
l2cap_br_chan_send_req(struct bt_l2cap_br_chan * chan,struct net_buf * buf,k_timeout_t timeout)344 static void l2cap_br_chan_send_req(struct bt_l2cap_br_chan *chan,
345 struct net_buf *buf, k_timeout_t timeout)
346 {
347
348 if (bt_l2cap_br_send_cb(chan->chan.conn, BT_L2CAP_CID_BR_SIG, buf,
349 NULL, NULL)) {
350 net_buf_unref(buf);
351 return;
352 }
353
354 /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part A] page 126:
355 *
356 * The value of this timer is implementation-dependent but the minimum
357 * initial value is 1 second and the maximum initial value is 60
358 * seconds. One RTX timer shall exist for each outstanding signaling
359 * request, including each Echo Request. The timer disappears on the
360 * final expiration, when the response is received, or the physical
361 * link is lost.
362 */
363 k_work_reschedule(&chan->rtx_work, timeout);
364 }
365
366 /* L2CAP channel wants to send a PDU */
chan_has_data(struct bt_l2cap_br_chan * br_chan)367 static bool chan_has_data(struct bt_l2cap_br_chan *br_chan)
368 {
369 return !k_fifo_is_empty(&br_chan->_pdu_tx_queue);
370 }
371
l2cap_br_data_pull(struct bt_conn * conn,size_t amount,size_t * length)372 struct net_buf *l2cap_br_data_pull(struct bt_conn *conn,
373 size_t amount,
374 size_t *length)
375 {
376 const sys_snode_t *pdu_ready = sys_slist_peek_head(&conn->l2cap_data_ready);
377
378 if (!pdu_ready) {
379 LOG_DBG("nothing to send on this conn");
380 return NULL;
381 }
382
383 struct bt_l2cap_br_chan *br_chan = CONTAINER_OF(pdu_ready,
384 struct bt_l2cap_br_chan,
385 _pdu_ready);
386
387 /* Leave the PDU buffer in the queue until we have sent all its
388 * fragments.
389 */
390 struct net_buf *pdu = k_fifo_peek_head(&br_chan->_pdu_tx_queue);
391
392 __ASSERT(pdu, "signaled ready but no PDUs in the TX queue");
393
394 if (bt_buf_has_view(pdu)) {
395 LOG_ERR("already have view on %p", pdu);
396 return NULL;
397 }
398
399 /* We can't interleave ACL fragments from different channels for the
400 * same ACL conn -> we have to wait until a full L2 PDU is transferred
401 * before switching channels.
402 */
403 bool last_frag = amount >= pdu->len;
404
405 if (last_frag) {
406 LOG_DBG("last frag, removing %p", pdu);
407 __maybe_unused struct net_buf *b = k_fifo_get(&br_chan->_pdu_tx_queue, K_NO_WAIT);
408
409 __ASSERT_NO_MSG(b == pdu);
410
411 LOG_DBG("chan %p done", br_chan);
412 lower_data_ready(br_chan);
413
414 /* Append channel to list if it still has data */
415 if (chan_has_data(br_chan)) {
416 LOG_DBG("chan %p ready", br_chan);
417 raise_data_ready(br_chan);
418 }
419 }
420
421 *length = pdu->len;
422
423 return pdu;
424 }
425
l2cap_br_get_info(struct bt_l2cap_br * l2cap,uint16_t info_type)426 static void l2cap_br_get_info(struct bt_l2cap_br *l2cap, uint16_t info_type)
427 {
428 struct bt_l2cap_info_req *info;
429 struct net_buf *buf;
430 struct bt_l2cap_sig_hdr *hdr;
431
432 LOG_DBG("info type %u", info_type);
433
434 if (atomic_test_bit(l2cap->chan.flags, L2CAP_FLAG_SIG_INFO_PENDING)) {
435 return;
436 }
437
438 switch (info_type) {
439 case BT_L2CAP_INFO_FEAT_MASK:
440 case BT_L2CAP_INFO_FIXED_CHAN:
441 break;
442 default:
443 LOG_WRN("Unsupported info type %u", info_type);
444 return;
445 }
446
447 buf = bt_l2cap_create_pdu(&br_sig_pool, 0);
448
449 atomic_set_bit(l2cap->chan.flags, L2CAP_FLAG_SIG_INFO_PENDING);
450 l2cap->info_ident = l2cap_br_get_ident();
451
452 hdr = net_buf_add(buf, sizeof(*hdr));
453 hdr->code = BT_L2CAP_INFO_REQ;
454 hdr->ident = l2cap->info_ident;
455 hdr->len = sys_cpu_to_le16(sizeof(*info));
456
457 info = net_buf_add(buf, sizeof(*info));
458 info->type = sys_cpu_to_le16(info_type);
459
460 l2cap_br_chan_send_req(&l2cap->chan, buf, L2CAP_BR_INFO_TIMEOUT);
461 }
462
connect_fixed_channel(struct bt_l2cap_br_chan * chan)463 static void connect_fixed_channel(struct bt_l2cap_br_chan *chan)
464 {
465 if (atomic_test_and_set_bit(chan->flags, L2CAP_FLAG_FIXED_CONNECTED)) {
466 return;
467 }
468
469 if (chan->chan.ops && chan->chan.ops->connected) {
470 chan->chan.ops->connected(&chan->chan);
471 }
472 }
473
connect_optional_fixed_channels(struct bt_l2cap_br * l2cap)474 static void connect_optional_fixed_channels(struct bt_l2cap_br *l2cap)
475 {
476 /* can be change to loop if more BR/EDR fixed channels are added */
477 if (l2cap->info_fixed_chan & BIT(BT_L2CAP_CID_BR_SMP)) {
478 struct bt_l2cap_chan *chan;
479
480 chan = bt_l2cap_br_lookup_rx_cid(l2cap->chan.chan.conn,
481 BT_L2CAP_CID_BR_SMP);
482 if (chan) {
483 connect_fixed_channel(BR_CHAN(chan));
484 }
485 }
486 }
487
l2cap_br_info_rsp(struct bt_l2cap_br * l2cap,uint8_t ident,struct net_buf * buf)488 static int l2cap_br_info_rsp(struct bt_l2cap_br *l2cap, uint8_t ident,
489 struct net_buf *buf)
490 {
491 struct bt_l2cap_info_rsp *rsp;
492 uint16_t type, result;
493 int err = 0;
494
495 if (atomic_test_bit(l2cap->chan.flags, L2CAP_FLAG_SIG_INFO_DONE)) {
496 return 0;
497 }
498
499 if (atomic_test_and_clear_bit(l2cap->chan.flags,
500 L2CAP_FLAG_SIG_INFO_PENDING)) {
501 /*
502 * Release RTX timer since got the response & there's pending
503 * command request.
504 */
505 k_work_cancel_delayable(&l2cap->chan.rtx_work);
506 }
507
508 if (buf->len < sizeof(*rsp)) {
509 LOG_ERR("Too small info rsp packet size");
510 err = -EINVAL;
511 goto done;
512 }
513
514 if (ident != l2cap->info_ident) {
515 LOG_WRN("Idents mismatch");
516 err = -EINVAL;
517 goto done;
518 }
519
520 rsp = net_buf_pull_mem(buf, sizeof(*rsp));
521 result = sys_le16_to_cpu(rsp->result);
522 if (result != BT_L2CAP_INFO_SUCCESS) {
523 LOG_WRN("Result unsuccessful");
524 err = -EINVAL;
525 goto done;
526 }
527
528 type = sys_le16_to_cpu(rsp->type);
529
530 switch (type) {
531 case BT_L2CAP_INFO_FEAT_MASK:
532 if (buf->len < sizeof(uint32_t)) {
533 LOG_ERR("Invalid remote info feat mask");
534 err = -EINVAL;
535 break;
536 }
537 l2cap->info_feat_mask = net_buf_pull_le32(buf);
538 LOG_DBG("remote info mask 0x%08x", l2cap->info_feat_mask);
539
540 if (!(l2cap->info_feat_mask & L2CAP_FEAT_FIXED_CHAN_MASK)) {
541 break;
542 }
543
544 l2cap_br_get_info(l2cap, BT_L2CAP_INFO_FIXED_CHAN);
545 return 0;
546 case BT_L2CAP_INFO_FIXED_CHAN:
547 if (buf->len < sizeof(uint8_t)) {
548 LOG_ERR("Invalid remote info fixed chan");
549 err = -EINVAL;
550 break;
551 }
552 /*
553 * 2.1 CHANNEL IDENTIFIERS in
554 * BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 3, Part A.
555 * The info length of `Fixed channels supported` is 8 octets.
556 * Then the range of fixed L2CAP CID is 0x0001 ~ 0x0007 both for LE and BR.
557 * So use one octet buffer to keep the `Fixed channels supported`
558 * of peer device.
559 */
560 l2cap->info_fixed_chan = net_buf_pull_u8(buf);
561 LOG_DBG("remote fixed channel mask 0x%02x", l2cap->info_fixed_chan);
562
563 connect_optional_fixed_channels(l2cap);
564
565 break;
566 default:
567 LOG_WRN("type 0x%04x unsupported", type);
568 err = -EINVAL;
569 break;
570 }
571 done:
572 atomic_set_bit(l2cap->chan.flags, L2CAP_FLAG_SIG_INFO_DONE);
573 l2cap->info_ident = 0U;
574 return err;
575 }
576
get_fixed_channels_mask(void)577 static uint8_t get_fixed_channels_mask(void)
578 {
579 uint8_t mask = 0U;
580
581 /* this needs to be enhanced if AMP Test Manager support is added */
582 STRUCT_SECTION_FOREACH(bt_l2cap_br_fixed_chan, fchan) {
583 mask |= BIT(fchan->cid);
584 }
585
586 return mask;
587 }
588
l2cap_br_info_req(struct bt_l2cap_br * l2cap,uint8_t ident,struct net_buf * buf)589 static int l2cap_br_info_req(struct bt_l2cap_br *l2cap, uint8_t ident,
590 struct net_buf *buf)
591 {
592 struct bt_conn *conn = l2cap->chan.chan.conn;
593 struct bt_l2cap_info_req *req = (void *)buf->data;
594 struct bt_l2cap_info_rsp *rsp;
595 struct net_buf *rsp_buf;
596 struct bt_l2cap_sig_hdr *hdr_info;
597 uint16_t type;
598
599 if (buf->len < sizeof(*req)) {
600 LOG_ERR("Too small info req packet size");
601 return -EINVAL;
602 }
603
604 rsp_buf = bt_l2cap_create_pdu(&br_sig_pool, 0);
605
606 type = sys_le16_to_cpu(req->type);
607 LOG_DBG("type 0x%04x", type);
608
609 hdr_info = net_buf_add(rsp_buf, sizeof(*hdr_info));
610 hdr_info->code = BT_L2CAP_INFO_RSP;
611 hdr_info->ident = ident;
612
613 rsp = net_buf_add(rsp_buf, sizeof(*rsp));
614
615 switch (type) {
616 case BT_L2CAP_INFO_FEAT_MASK:
617 rsp->type = sys_cpu_to_le16(BT_L2CAP_INFO_FEAT_MASK);
618 rsp->result = sys_cpu_to_le16(BT_L2CAP_INFO_SUCCESS);
619 net_buf_add_le32(rsp_buf, L2CAP_FEAT_FIXED_CHAN_MASK);
620 hdr_info->len = sys_cpu_to_le16(sizeof(*rsp) + sizeof(uint32_t));
621 break;
622 case BT_L2CAP_INFO_FIXED_CHAN:
623 rsp->type = sys_cpu_to_le16(BT_L2CAP_INFO_FIXED_CHAN);
624 rsp->result = sys_cpu_to_le16(BT_L2CAP_INFO_SUCCESS);
625 /* fixed channel mask protocol data is 8 octets wide */
626 (void)memset(net_buf_add(rsp_buf, 8), 0, 8);
627 rsp->data[0] = get_fixed_channels_mask();
628
629 hdr_info->len = sys_cpu_to_le16(sizeof(*rsp) + 8);
630 break;
631 default:
632 rsp->type = req->type;
633 rsp->result = sys_cpu_to_le16(BT_L2CAP_INFO_NOTSUPP);
634 hdr_info->len = sys_cpu_to_le16(sizeof(*rsp));
635 break;
636 }
637
638 l2cap_send(conn, BT_L2CAP_CID_BR_SIG, rsp_buf);
639
640 return 0;
641 }
642
bt_l2cap_br_connected(struct bt_conn * conn)643 void bt_l2cap_br_connected(struct bt_conn *conn)
644 {
645 struct bt_l2cap_chan *chan;
646
647 STRUCT_SECTION_FOREACH(bt_l2cap_br_fixed_chan, fchan) {
648 struct bt_l2cap_br_chan *br_chan;
649
650 if (!fchan->accept) {
651 continue;
652 }
653
654 if (fchan->accept(conn, &chan) < 0) {
655 continue;
656 }
657
658 br_chan = BR_CHAN(chan);
659
660 br_chan->rx.cid = fchan->cid;
661 br_chan->tx.cid = fchan->cid;
662
663 if (!l2cap_br_chan_add(conn, chan, NULL)) {
664 return;
665 }
666
667 /*
668 * other fixed channels will be connected after Information
669 * Response is received
670 */
671 if (fchan->cid == BT_L2CAP_CID_BR_SIG) {
672 struct bt_l2cap_br *sig_ch;
673
674 connect_fixed_channel(br_chan);
675
676 sig_ch = CONTAINER_OF(br_chan, struct bt_l2cap_br, chan);
677 l2cap_br_get_info(sig_ch, BT_L2CAP_INFO_FEAT_MASK);
678 }
679 }
680 }
681
bt_l2cap_br_disconnected(struct bt_conn * conn)682 void bt_l2cap_br_disconnected(struct bt_conn *conn)
683 {
684 struct bt_l2cap_chan *chan, *next;
685
686 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&conn->channels, chan, next, node) {
687 bt_l2cap_br_chan_del(chan);
688 }
689 }
690
l2cap_br_server_lookup_psm(uint16_t psm)691 static struct bt_l2cap_server *l2cap_br_server_lookup_psm(uint16_t psm)
692 {
693 struct bt_l2cap_server *server;
694
695 SYS_SLIST_FOR_EACH_CONTAINER(&br_servers, server, node) {
696 if (server->psm == psm) {
697 return server;
698 }
699 }
700
701 return NULL;
702 }
703
l2cap_br_conf_add_mtu(struct net_buf * buf,const uint16_t mtu)704 static void l2cap_br_conf_add_mtu(struct net_buf *buf, const uint16_t mtu)
705 {
706 net_buf_add_u8(buf, BT_L2CAP_CONF_OPT_MTU);
707 net_buf_add_u8(buf, sizeof(mtu));
708 net_buf_add_le16(buf, mtu);
709 }
710
l2cap_br_conf_add_opt(struct net_buf * buf,const struct bt_l2cap_conf_opt * opt)711 static void l2cap_br_conf_add_opt(struct net_buf *buf, const struct bt_l2cap_conf_opt *opt)
712 {
713 net_buf_add_u8(buf, opt->type & BT_L2CAP_CONF_MASK);
714 net_buf_add_u8(buf, opt->len);
715 net_buf_add_mem(buf, opt->data, opt->len);
716 }
717
l2cap_br_conf(struct bt_l2cap_chan * chan)718 static void l2cap_br_conf(struct bt_l2cap_chan *chan)
719 {
720 struct bt_l2cap_sig_hdr *hdr;
721 struct bt_l2cap_conf_req *conf;
722 struct net_buf *buf;
723
724 buf = bt_l2cap_create_pdu(&br_sig_pool, 0);
725
726 hdr = net_buf_add(buf, sizeof(*hdr));
727 hdr->code = BT_L2CAP_CONF_REQ;
728 hdr->ident = l2cap_br_get_ident();
729 conf = net_buf_add(buf, sizeof(*conf));
730 (void)memset(conf, 0, sizeof(*conf));
731
732 conf->dcid = sys_cpu_to_le16(BR_CHAN(chan)->tx.cid);
733 /*
734 * Add MTU option if app set non default BR/EDR L2CAP MTU,
735 * otherwise sent empty configuration data meaning default MTU
736 * to be used.
737 */
738 if (BR_CHAN(chan)->rx.mtu != L2CAP_BR_DEFAULT_MTU) {
739 l2cap_br_conf_add_mtu(buf, BR_CHAN(chan)->rx.mtu);
740 }
741
742 hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
743
744 /*
745 * TODO:
746 * might be needed to start tracking number of configuration iterations
747 * on both directions
748 */
749 l2cap_br_chan_send_req(BR_CHAN(chan), buf, L2CAP_BR_CFG_TIMEOUT);
750 }
751
752 enum l2cap_br_conn_security_result {
753 L2CAP_CONN_SECURITY_PASSED,
754 L2CAP_CONN_SECURITY_REJECT,
755 L2CAP_CONN_SECURITY_PENDING
756 };
757
758 /*
759 * Security helper against channel connection.
760 * Returns L2CAP_CONN_SECURITY_PASSED if:
761 * - existing security on link is applicable for requested PSM in connection,
762 * - legacy (non SSP) devices connecting with low security requirements,
763 * Returns L2CAP_CONN_SECURITY_PENDING if:
764 * - channel connection process is on hold since there were valid security
765 * conditions triggering authentication indirectly in subcall.
766 * Returns L2CAP_CONN_SECURITY_REJECT if:
767 * - bt_conn_set_security API returns < 0.
768 */
769
770 static enum l2cap_br_conn_security_result
l2cap_br_conn_security(struct bt_l2cap_chan * chan,const uint16_t psm)771 l2cap_br_conn_security(struct bt_l2cap_chan *chan, const uint16_t psm)
772 {
773 int check;
774 struct bt_l2cap_br_chan *br_chan = BR_CHAN(chan);
775
776 /* For SDP PSM there's no need to change existing security on link */
777 if (br_chan->required_sec_level == BT_SECURITY_L0) {
778 return L2CAP_CONN_SECURITY_PASSED;
779 }
780
781 /*
782 * No link key needed for legacy devices (pre 2.1) and when low security
783 * level is required.
784 */
785 if (br_chan->required_sec_level == BT_SECURITY_L1 &&
786 !BT_FEAT_HOST_SSP(chan->conn->br.features)) {
787 return L2CAP_CONN_SECURITY_PASSED;
788 }
789
790 switch (br_chan->required_sec_level) {
791 case BT_SECURITY_L4:
792 case BT_SECURITY_L3:
793 case BT_SECURITY_L2:
794 break;
795 default:
796 /*
797 * For non SDP PSM connections GAP's Security Mode 4 requires at
798 * least unauthenticated link key and enabled encryption if
799 * remote supports SSP before any L2CAP CoC traffic. So preset
800 * local to MEDIUM security to trigger it if needed.
801 */
802 if (BT_FEAT_HOST_SSP(chan->conn->br.features)) {
803 br_chan->required_sec_level = BT_SECURITY_L2;
804 }
805 break;
806 }
807
808 check = bt_conn_set_security(chan->conn, br_chan->required_sec_level);
809
810 /*
811 * Check case when on existing connection security level already covers
812 * channel (service) security requirements against link security and
813 * bt_conn_set_security API returns 0 what implies also there was no
814 * need to trigger authentication.
815 */
816 if (check == 0 &&
817 chan->conn->sec_level >= br_chan->required_sec_level) {
818 return L2CAP_CONN_SECURITY_PASSED;
819 }
820
821 /*
822 * If 'check' still holds 0, it means local host just sent HCI
823 * authentication command to start procedure to increase link security
824 * since service/profile requires that.
825 */
826 if (check == 0) {
827 /*
828 * General Bonding refers to the process of performing bonding
829 * during connection setup or channel establishment procedures
830 * as a precursor to accessing a service.
831 * For current case, it is dedicated bonding.
832 */
833 atomic_set_bit(chan->conn->flags, BT_CONN_BR_GENERAL_BONDING);
834 return L2CAP_CONN_SECURITY_PENDING;
835 }
836
837 /*
838 * For any other values in 'check' it means there was internal
839 * validation condition forbidding to start authentication at this
840 * moment.
841 */
842 return L2CAP_CONN_SECURITY_REJECT;
843 }
844
l2cap_br_send_conn_rsp(struct bt_conn * conn,uint16_t scid,uint16_t dcid,uint8_t ident,uint16_t result)845 static void l2cap_br_send_conn_rsp(struct bt_conn *conn, uint16_t scid,
846 uint16_t dcid, uint8_t ident, uint16_t result)
847 {
848 struct net_buf *buf;
849 struct bt_l2cap_conn_rsp *rsp;
850 struct bt_l2cap_sig_hdr *hdr;
851
852 buf = bt_l2cap_create_pdu(&br_sig_pool, 0);
853
854 hdr = net_buf_add(buf, sizeof(*hdr));
855 hdr->code = BT_L2CAP_CONN_RSP;
856 hdr->ident = ident;
857 hdr->len = sys_cpu_to_le16(sizeof(*rsp));
858
859 rsp = net_buf_add(buf, sizeof(*rsp));
860 rsp->dcid = sys_cpu_to_le16(dcid);
861 rsp->scid = sys_cpu_to_le16(scid);
862 rsp->result = sys_cpu_to_le16(result);
863
864 if (result == BT_L2CAP_BR_PENDING) {
865 rsp->status = sys_cpu_to_le16(BT_L2CAP_CS_AUTHEN_PEND);
866 } else {
867 rsp->status = sys_cpu_to_le16(BT_L2CAP_CS_NO_INFO);
868 }
869
870 l2cap_send(conn, BT_L2CAP_CID_BR_SIG, buf);
871 }
872
l2cap_br_conn_req_reply(struct bt_l2cap_chan * chan,uint16_t result)873 static int l2cap_br_conn_req_reply(struct bt_l2cap_chan *chan, uint16_t result)
874 {
875 /* Send response to connection request only when in acceptor role */
876 if (!atomic_test_bit(BR_CHAN(chan)->flags, L2CAP_FLAG_CONN_ACCEPTOR)) {
877 return -ESRCH;
878 }
879
880 l2cap_br_send_conn_rsp(chan->conn, BR_CHAN(chan)->tx.cid,
881 BR_CHAN(chan)->rx.cid, BR_CHAN(chan)->ident, result);
882 BR_CHAN(chan)->ident = 0U;
883
884 return 0;
885 }
886
887 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
888 #if defined(CONFIG_BT_L2CAP_LOG_LEVEL_DBG)
bt_l2cap_br_chan_set_state_debug(struct bt_l2cap_chan * chan,bt_l2cap_chan_state_t state,const char * func,int line)889 void bt_l2cap_br_chan_set_state_debug(struct bt_l2cap_chan *chan,
890 bt_l2cap_chan_state_t state,
891 const char *func, int line)
892 {
893 struct bt_l2cap_br_chan *br_chan;
894
895 br_chan = BR_CHAN(chan);
896
897 LOG_DBG("chan %p psm 0x%04x %s -> %s", chan, br_chan->psm,
898 bt_l2cap_chan_state_str(br_chan->state), bt_l2cap_chan_state_str(state));
899
900 /* check transitions validness */
901 switch (state) {
902 case BT_L2CAP_DISCONNECTED:
903 /* regardless of old state always allows this state */
904 break;
905 case BT_L2CAP_CONNECTING:
906 if (br_chan->state != BT_L2CAP_DISCONNECTED) {
907 LOG_WRN("%s()%d: invalid transition", func, line);
908 }
909 break;
910 case BT_L2CAP_CONFIG:
911 if (br_chan->state != BT_L2CAP_CONNECTING) {
912 LOG_WRN("%s()%d: invalid transition", func, line);
913 }
914 break;
915 case BT_L2CAP_CONNECTED:
916 if (br_chan->state != BT_L2CAP_CONFIG &&
917 br_chan->state != BT_L2CAP_CONNECTING) {
918 LOG_WRN("%s()%d: invalid transition", func, line);
919 }
920 break;
921 case BT_L2CAP_DISCONNECTING:
922 if (br_chan->state != BT_L2CAP_CONFIG &&
923 br_chan->state != BT_L2CAP_CONNECTED) {
924 LOG_WRN("%s()%d: invalid transition", func, line);
925 }
926 break;
927 default:
928 LOG_ERR("%s()%d: unknown (%u) state was set", func, line, state);
929 return;
930 }
931
932 br_chan->state = state;
933 }
934 #else
bt_l2cap_br_chan_set_state(struct bt_l2cap_chan * chan,bt_l2cap_chan_state_t state)935 void bt_l2cap_br_chan_set_state(struct bt_l2cap_chan *chan,
936 bt_l2cap_chan_state_t state)
937 {
938 BR_CHAN(chan)->state = state;
939 }
940 #endif /* CONFIG_BT_L2CAP_LOG_LEVEL_DBG */
941 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
942
bt_l2cap_br_chan_del(struct bt_l2cap_chan * chan)943 void bt_l2cap_br_chan_del(struct bt_l2cap_chan *chan)
944 {
945 const struct bt_l2cap_chan_ops *ops = chan->ops;
946 struct bt_l2cap_br_chan *br_chan = CONTAINER_OF(chan, struct bt_l2cap_br_chan, chan);
947
948 LOG_DBG("conn %p chan %p", chan->conn, chan);
949
950 if (!chan->conn) {
951 goto destroy;
952 }
953
954 cancel_data_ready(br_chan);
955
956 /* Remove buffers on the PDU TX queue. */
957 while (chan_has_data(br_chan)) {
958 struct net_buf *buf = k_fifo_get(&br_chan->_pdu_tx_queue, K_NO_WAIT);
959
960 net_buf_unref(buf);
961 }
962
963 if (ops->disconnected) {
964 ops->disconnected(chan);
965 }
966
967 chan->conn = NULL;
968
969 destroy:
970 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
971 /* Reset internal members of common channel */
972 bt_l2cap_br_chan_set_state(chan, BT_L2CAP_DISCONNECTED);
973 BR_CHAN(chan)->psm = 0U;
974 #endif
975 if (chan->destroy) {
976 chan->destroy(chan);
977 }
978
979 if (ops->released) {
980 ops->released(chan);
981 }
982 }
983
l2cap_br_conn_req(struct bt_l2cap_br * l2cap,uint8_t ident,struct net_buf * buf)984 static void l2cap_br_conn_req(struct bt_l2cap_br *l2cap, uint8_t ident,
985 struct net_buf *buf)
986 {
987 struct bt_conn *conn = l2cap->chan.chan.conn;
988 struct bt_l2cap_chan *chan;
989 struct bt_l2cap_server *server;
990 struct bt_l2cap_conn_req *req = (void *)buf->data;
991 uint16_t psm, scid, result;
992 struct bt_l2cap_br_chan *br_chan;
993
994 if (buf->len < sizeof(*req)) {
995 LOG_ERR("Too small L2CAP conn req packet size");
996 return;
997 }
998
999 psm = sys_le16_to_cpu(req->psm);
1000 scid = sys_le16_to_cpu(req->scid);
1001
1002 LOG_DBG("psm 0x%02x scid 0x%04x", psm, scid);
1003
1004 /* Check if there is a server registered */
1005 server = l2cap_br_server_lookup_psm(psm);
1006 if (!server) {
1007 result = BT_L2CAP_BR_ERR_PSM_NOT_SUPP;
1008 goto no_chan;
1009 }
1010
1011 /*
1012 * Report security violation for non SDP channel without encryption when
1013 * remote supports SSP.
1014 */
1015 if (server->sec_level != BT_SECURITY_L0 &&
1016 BT_FEAT_HOST_SSP(conn->br.features) && !conn->encrypt) {
1017 result = BT_L2CAP_BR_ERR_SEC_BLOCK;
1018 goto no_chan;
1019 }
1020
1021 if (!L2CAP_BR_CID_IS_DYN(scid)) {
1022 result = BT_L2CAP_BR_ERR_INVALID_SCID;
1023 goto no_chan;
1024 }
1025
1026 chan = bt_l2cap_br_lookup_tx_cid(conn, scid);
1027 if (chan) {
1028 /*
1029 * we have a chan here but this is due to SCID being already in
1030 * use so it is not channel we are suppose to pass to
1031 * l2cap_br_conn_req_reply as wrong DCID would be used
1032 */
1033 result = BT_L2CAP_BR_ERR_SCID_IN_USE;
1034 goto no_chan;
1035 }
1036
1037 /*
1038 * Request server to accept the new connection and allocate the
1039 * channel. If no free channels available for PSM server reply with
1040 * proper result and quit since chan pointer is uninitialized then.
1041 */
1042 if (server->accept(conn, server, &chan) < 0) {
1043 result = BT_L2CAP_BR_ERR_NO_RESOURCES;
1044 goto no_chan;
1045 }
1046
1047 br_chan = BR_CHAN(chan);
1048 br_chan->required_sec_level = server->sec_level;
1049
1050 l2cap_br_chan_add(conn, chan, l2cap_br_chan_destroy);
1051 BR_CHAN(chan)->tx.cid = scid;
1052 br_chan->ident = ident;
1053 bt_l2cap_br_chan_set_state(chan, BT_L2CAP_CONNECTING);
1054 atomic_set_bit(BR_CHAN(chan)->flags, L2CAP_FLAG_CONN_ACCEPTOR);
1055
1056 /* Disable fragmentation of l2cap rx pdu */
1057 BR_CHAN(chan)->rx.mtu = MIN(BR_CHAN(chan)->rx.mtu, BT_L2CAP_RX_MTU);
1058
1059 switch (l2cap_br_conn_security(chan, psm)) {
1060 case L2CAP_CONN_SECURITY_PENDING:
1061 result = BT_L2CAP_BR_PENDING;
1062 /* TODO: auth timeout */
1063 break;
1064 case L2CAP_CONN_SECURITY_PASSED:
1065 result = BT_L2CAP_BR_SUCCESS;
1066 break;
1067 case L2CAP_CONN_SECURITY_REJECT:
1068 default:
1069 result = BT_L2CAP_BR_ERR_SEC_BLOCK;
1070 break;
1071 }
1072 /* Reply on connection request as acceptor */
1073 l2cap_br_conn_req_reply(chan, result);
1074
1075 if (result != BT_L2CAP_BR_SUCCESS) {
1076 /* Disconnect link when security rules were violated */
1077 if (result == BT_L2CAP_BR_ERR_SEC_BLOCK) {
1078 bt_conn_disconnect(conn, BT_HCI_ERR_AUTH_FAIL);
1079 } else if (result == BT_L2CAP_BR_PENDING) {
1080 /* Recover the ident when conn is pending */
1081 br_chan->ident = ident;
1082 }
1083
1084 return;
1085 }
1086
1087 bt_l2cap_br_chan_set_state(chan, BT_L2CAP_CONFIG);
1088 l2cap_br_conf(chan);
1089 return;
1090
1091 no_chan:
1092 l2cap_br_send_conn_rsp(conn, scid, 0, ident, result);
1093 }
1094
l2cap_br_conf_rsp(struct bt_l2cap_br * l2cap,uint8_t ident,uint16_t len,struct net_buf * buf)1095 static void l2cap_br_conf_rsp(struct bt_l2cap_br *l2cap, uint8_t ident,
1096 uint16_t len, struct net_buf *buf)
1097 {
1098 struct bt_conn *conn = l2cap->chan.chan.conn;
1099 struct bt_l2cap_chan *chan;
1100 struct bt_l2cap_conf_rsp *rsp = (void *)buf->data;
1101 uint16_t flags, scid, result, opt_len;
1102 struct bt_l2cap_br_chan *br_chan;
1103
1104 if (buf->len < sizeof(*rsp)) {
1105 LOG_ERR("Too small L2CAP conf rsp packet size");
1106 return;
1107 }
1108
1109 flags = sys_le16_to_cpu(rsp->flags);
1110 scid = sys_le16_to_cpu(rsp->scid);
1111 result = sys_le16_to_cpu(rsp->result);
1112 opt_len = len - sizeof(*rsp);
1113
1114 LOG_DBG("scid 0x%04x flags 0x%02x result 0x%02x len %u", scid, flags, result, opt_len);
1115
1116 chan = bt_l2cap_br_lookup_rx_cid(conn, scid);
1117 if (!chan) {
1118 LOG_ERR("channel mismatch!");
1119 return;
1120 }
1121
1122 br_chan = BR_CHAN(chan);
1123
1124 /* Release RTX work since got the response */
1125 k_work_cancel_delayable(&br_chan->rtx_work);
1126
1127 /*
1128 * TODO: handle other results than success and parse response data if
1129 * available
1130 */
1131 switch (result) {
1132 case BT_L2CAP_CONF_SUCCESS:
1133 atomic_set_bit(br_chan->flags, L2CAP_FLAG_CONN_LCONF_DONE);
1134
1135 if (br_chan->state == BT_L2CAP_CONFIG &&
1136 atomic_test_bit(br_chan->flags,
1137 L2CAP_FLAG_CONN_RCONF_DONE)) {
1138 LOG_DBG("scid 0x%04x rx MTU %u dcid 0x%04x tx MTU %u", br_chan->rx.cid,
1139 br_chan->rx.mtu, br_chan->tx.cid, br_chan->tx.mtu);
1140
1141 bt_l2cap_br_chan_set_state(chan, BT_L2CAP_CONNECTED);
1142 if (chan->ops && chan->ops->connected) {
1143 chan->ops->connected(chan);
1144 }
1145 }
1146 break;
1147 default:
1148 /* currently disconnect channel on non success result */
1149 bt_l2cap_chan_disconnect(chan);
1150 break;
1151 }
1152 }
1153
bt_l2cap_br_server_register(struct bt_l2cap_server * server)1154 int bt_l2cap_br_server_register(struct bt_l2cap_server *server)
1155 {
1156 if (server->psm < L2CAP_BR_PSM_START || !server->accept) {
1157 return -EINVAL;
1158 }
1159
1160 /* PSM must be odd and lsb of upper byte must be 0 */
1161 if ((server->psm & 0x0101) != 0x0001) {
1162 return -EINVAL;
1163 }
1164
1165 if (server->sec_level > BT_SECURITY_L4) {
1166 return -EINVAL;
1167 } else if (server->sec_level == BT_SECURITY_L0 &&
1168 server->psm != L2CAP_BR_PSM_SDP) {
1169 server->sec_level = BT_SECURITY_L1;
1170 }
1171
1172 /* Check if given PSM is already in use */
1173 if (l2cap_br_server_lookup_psm(server->psm)) {
1174 LOG_DBG("PSM already registered");
1175 return -EADDRINUSE;
1176 }
1177
1178 LOG_DBG("PSM 0x%04x", server->psm);
1179
1180 sys_slist_append(&br_servers, &server->node);
1181
1182 return 0;
1183 }
1184
l2cap_br_send_reject(struct bt_conn * conn,uint8_t ident,uint16_t reason,void * data,uint8_t data_len)1185 static void l2cap_br_send_reject(struct bt_conn *conn, uint8_t ident,
1186 uint16_t reason, void *data, uint8_t data_len)
1187 {
1188 struct bt_l2cap_cmd_reject *rej;
1189 struct bt_l2cap_sig_hdr *hdr;
1190 struct net_buf *buf;
1191
1192 buf = bt_l2cap_create_pdu(&br_sig_pool, 0);
1193
1194 hdr = net_buf_add(buf, sizeof(*hdr));
1195 hdr->code = BT_L2CAP_CMD_REJECT;
1196 hdr->ident = ident;
1197 hdr->len = sys_cpu_to_le16(sizeof(*rej) + data_len);
1198
1199 rej = net_buf_add(buf, sizeof(*rej));
1200 rej->reason = sys_cpu_to_le16(reason);
1201
1202 /*
1203 * optional data if available must be already in little-endian format
1204 * made by caller.and be compliant with Core 4.2 [Vol 3, Part A, 4.1,
1205 * table 4.4]
1206 */
1207 if (data) {
1208 net_buf_add_mem(buf, data, data_len);
1209 }
1210
1211 l2cap_send(conn, BT_L2CAP_CID_BR_SIG, buf);
1212 }
1213
l2cap_br_conf_opt_mtu(struct bt_l2cap_chan * chan,struct net_buf * buf,size_t len)1214 static uint16_t l2cap_br_conf_opt_mtu(struct bt_l2cap_chan *chan,
1215 struct net_buf *buf, size_t len)
1216 {
1217 uint16_t mtu, result = BT_L2CAP_CONF_SUCCESS;
1218 struct bt_l2cap_conf_opt_mtu *opt_mtu;
1219
1220 /* Core 4.2 [Vol 3, Part A, 5.1] MTU payload length */
1221 if (len != sizeof(*opt_mtu)) {
1222 LOG_ERR("tx MTU length %zu invalid", len);
1223 result = BT_L2CAP_CONF_REJECT;
1224 goto done;
1225 }
1226
1227 opt_mtu = (struct bt_l2cap_conf_opt_mtu *)buf->data;
1228
1229 mtu = sys_le16_to_cpu(opt_mtu->mtu);
1230 if (mtu < L2CAP_BR_MIN_MTU) {
1231 result = BT_L2CAP_CONF_UNACCEPT;
1232 BR_CHAN(chan)->tx.mtu = L2CAP_BR_MIN_MTU;
1233 opt_mtu->mtu = sys_cpu_to_le16(L2CAP_BR_MIN_MTU);
1234 LOG_DBG("tx MTU %u invalid", mtu);
1235 goto done;
1236 }
1237
1238 BR_CHAN(chan)->tx.mtu = mtu;
1239 LOG_DBG("tx MTU %u", mtu);
1240 done:
1241 return result;
1242 }
1243
l2cap_br_conf_opt_flush_timeout(struct bt_l2cap_chan * chan,struct net_buf * buf,size_t len)1244 static uint16_t l2cap_br_conf_opt_flush_timeout(struct bt_l2cap_chan *chan,
1245 struct net_buf *buf, size_t len)
1246 {
1247 uint16_t result = BT_L2CAP_CONF_SUCCESS;
1248 struct bt_l2cap_conf_opt_flush_timeout *opt_to;
1249
1250 if (len != sizeof(*opt_to)) {
1251 LOG_ERR("qos frame length %zu invalid", len);
1252 result = BT_L2CAP_CONF_REJECT;
1253 goto done;
1254 }
1255
1256 opt_to = (struct bt_l2cap_conf_opt_flush_timeout *)buf->data;
1257
1258 LOG_DBG("Flush timeout %u", opt_to->timeout);
1259
1260 opt_to->timeout = sys_cpu_to_le16(0xFFFF);
1261 result = BT_L2CAP_CONF_UNACCEPT;
1262 done:
1263 return result;
1264 }
1265
l2cap_br_conf_opt_qos(struct bt_l2cap_chan * chan,struct net_buf * buf,size_t len)1266 static uint16_t l2cap_br_conf_opt_qos(struct bt_l2cap_chan *chan,
1267 struct net_buf *buf, size_t len)
1268 {
1269 uint16_t result = BT_L2CAP_CONF_SUCCESS;
1270 struct bt_l2cap_conf_opt_qos *opt_qos;
1271
1272 if (len != sizeof(*opt_qos)) {
1273 LOG_ERR("qos frame length %zu invalid", len);
1274 result = BT_L2CAP_CONF_REJECT;
1275 goto done;
1276 }
1277
1278 opt_qos = (struct bt_l2cap_conf_opt_qos *)buf->data;
1279
1280 LOG_DBG("QOS Type %u", opt_qos->service_type);
1281
1282 if (opt_qos->service_type == BT_L2CAP_QOS_TYPE_GUARANTEED) {
1283 /* Set to default value */
1284 result = BT_L2CAP_CONF_UNACCEPT;
1285 opt_qos->flags = 0x00;
1286 /* do not care */
1287 opt_qos->token_rate = sys_cpu_to_le32(0x00000000);
1288 /* no token bucket is needed */
1289 opt_qos->token_bucket_size = sys_cpu_to_le32(0x00000000);
1290 /* do not care */
1291 opt_qos->peak_bandwidth = sys_cpu_to_le32(0x00000000);
1292 /* do not care */
1293 opt_qos->latency = sys_cpu_to_le32(0xFFFFFFFF);
1294 /* do not care */
1295 opt_qos->delay_variation = sys_cpu_to_le32(0xFFFFFFFF);
1296 }
1297
1298 done:
1299 return result;
1300 }
1301
l2cap_br_conf_opt_ret_fc(struct bt_l2cap_chan * chan,struct net_buf * buf,size_t len)1302 static uint16_t l2cap_br_conf_opt_ret_fc(struct bt_l2cap_chan *chan,
1303 struct net_buf *buf, size_t len)
1304 {
1305 uint16_t result = BT_L2CAP_CONF_SUCCESS;
1306 struct bt_l2cap_conf_opt_ret_fc *opt_ret_fc;
1307
1308 if (len != sizeof(*opt_ret_fc)) {
1309 LOG_ERR("ret_fc frame length %zu invalid", len);
1310 result = BT_L2CAP_CONF_REJECT;
1311 goto done;
1312 }
1313
1314 opt_ret_fc = (struct bt_l2cap_conf_opt_ret_fc *)buf->data;
1315
1316 LOG_DBG("ret_fc mode %u", opt_ret_fc->mode);
1317
1318 if (opt_ret_fc->mode != BT_L2CAP_RET_FC_MODE_BASIC) {
1319 /* Set to default value */
1320 result = BT_L2CAP_CONF_UNACCEPT;
1321 opt_ret_fc->mode = BT_L2CAP_RET_FC_MODE_BASIC;
1322 }
1323
1324 done:
1325 return result;
1326 }
1327
l2cap_br_conf_opt_fcs(struct bt_l2cap_chan * chan,struct net_buf * buf,size_t len)1328 static uint16_t l2cap_br_conf_opt_fcs(struct bt_l2cap_chan *chan,
1329 struct net_buf *buf, size_t len)
1330 {
1331 uint16_t result = BT_L2CAP_CONF_SUCCESS;
1332 struct bt_l2cap_conf_opt_fcs *opt_fcs;
1333
1334 if (len != sizeof(*opt_fcs)) {
1335 LOG_ERR("fcs frame length %zu invalid", len);
1336 result = BT_L2CAP_CONF_REJECT;
1337 goto done;
1338 }
1339
1340 opt_fcs = (struct bt_l2cap_conf_opt_fcs *)buf->data;
1341
1342 LOG_DBG("FCS type %u", opt_fcs->type);
1343
1344 if (opt_fcs->type != BT_L2CAP_FCS_TYPE_NO) {
1345 /* Set to default value */
1346 result = BT_L2CAP_CONF_UNACCEPT;
1347 opt_fcs->type = BT_L2CAP_FCS_TYPE_NO;
1348 }
1349
1350 done:
1351 return result;
1352 }
1353
l2cap_br_conf_req(struct bt_l2cap_br * l2cap,uint8_t ident,uint16_t len,struct net_buf * buf)1354 static void l2cap_br_conf_req(struct bt_l2cap_br *l2cap, uint8_t ident,
1355 uint16_t len, struct net_buf *buf)
1356 {
1357 struct bt_conn *conn = l2cap->chan.chan.conn;
1358 struct bt_l2cap_chan *chan;
1359 struct bt_l2cap_conf_req *req;
1360 struct bt_l2cap_sig_hdr *hdr;
1361 struct bt_l2cap_conf_rsp *rsp;
1362 struct bt_l2cap_conf_opt *opt = NULL;
1363 uint16_t flags, dcid, opt_len, hint, result = BT_L2CAP_CONF_SUCCESS;
1364
1365 if (buf->len < sizeof(*req)) {
1366 LOG_ERR("Too small L2CAP conf req packet size");
1367 return;
1368 }
1369
1370 req = net_buf_pull_mem(buf, sizeof(*req));
1371 flags = sys_le16_to_cpu(req->flags);
1372 dcid = sys_le16_to_cpu(req->dcid);
1373 opt_len = len - sizeof(*req);
1374
1375 LOG_DBG("dcid 0x%04x flags 0x%02x len %u", dcid, flags, opt_len);
1376
1377 chan = bt_l2cap_br_lookup_rx_cid(conn, dcid);
1378 if (!chan) {
1379 LOG_ERR("rx channel mismatch!");
1380 struct bt_l2cap_cmd_reject_cid_data data = {
1381 .scid = req->dcid,
1382 .dcid = 0,
1383 };
1384
1385 l2cap_br_send_reject(conn, ident, BT_L2CAP_REJ_INVALID_CID,
1386 &data, sizeof(data));
1387 return;
1388 }
1389
1390 if (!opt_len) {
1391 LOG_DBG("tx default MTU %u", L2CAP_BR_DEFAULT_MTU);
1392 BR_CHAN(chan)->tx.mtu = L2CAP_BR_DEFAULT_MTU;
1393 goto send_rsp;
1394 }
1395
1396 while (buf->len >= sizeof(*opt)) {
1397 opt = net_buf_pull_mem(buf, sizeof(*opt));
1398
1399 /* make sure opt object can get safe dereference in iteration */
1400 if (buf->len < opt->len) {
1401 LOG_ERR("Received too short option data");
1402 result = BT_L2CAP_CONF_REJECT;
1403 break;
1404 }
1405
1406 hint = opt->type & BT_L2CAP_CONF_HINT;
1407
1408 switch (opt->type & BT_L2CAP_CONF_MASK) {
1409 case BT_L2CAP_CONF_OPT_MTU:
1410 /* getting MTU modifies buf internals */
1411 result = l2cap_br_conf_opt_mtu(chan, buf, opt->len);
1412 /*
1413 * MTU is done. For now bailout the loop but later on
1414 * there can be a need to continue checking next options
1415 * that are after MTU value and then goto is not proper
1416 * way out here.
1417 */
1418 goto send_rsp;
1419 case BT_L2CAP_CONF_OPT_FLUSH_TIMEOUT:
1420 result = l2cap_br_conf_opt_flush_timeout(chan, buf, opt->len);
1421 if (result != BT_L2CAP_CONF_SUCCESS) {
1422 goto send_rsp;
1423 }
1424 break;
1425 case BT_L2CAP_CONF_OPT_QOS:
1426 result = l2cap_br_conf_opt_qos(chan, buf, opt->len);
1427 if (result != BT_L2CAP_CONF_SUCCESS) {
1428 goto send_rsp;
1429 }
1430 break;
1431 case BT_L2CAP_CONF_OPT_RET_FC:
1432 result = l2cap_br_conf_opt_ret_fc(chan, buf, opt->len);
1433 if (result != BT_L2CAP_CONF_SUCCESS) {
1434 goto send_rsp;
1435 }
1436 break;
1437 case BT_L2CAP_CONF_OPT_FCS:
1438 result = l2cap_br_conf_opt_fcs(chan, buf, opt->len);
1439 if (result != BT_L2CAP_CONF_SUCCESS) {
1440 goto send_rsp;
1441 }
1442 break;
1443 case BT_L2CAP_CONF_OPT_EXT_FLOW_SPEC:
1444 __fallthrough;
1445 case BT_L2CAP_CONF_OPT_EXT_WIN_SIZE:
1446 result = BT_L2CAP_CONF_REJECT;
1447 goto send_rsp;
1448 default:
1449 if (!hint) {
1450 LOG_DBG("option %u not handled", opt->type);
1451 result = BT_L2CAP_CONF_UNKNOWN_OPT;
1452 goto send_rsp;
1453 }
1454 break;
1455 }
1456
1457 /* Update buffer to point at next option */
1458 net_buf_pull(buf, opt->len);
1459 }
1460
1461 send_rsp:
1462 buf = bt_l2cap_create_pdu(&br_sig_pool, 0);
1463
1464 hdr = net_buf_add(buf, sizeof(*hdr));
1465 hdr->code = BT_L2CAP_CONF_RSP;
1466 hdr->ident = ident;
1467 rsp = net_buf_add(buf, sizeof(*rsp));
1468 (void)memset(rsp, 0, sizeof(*rsp));
1469
1470 rsp->result = sys_cpu_to_le16(result);
1471 rsp->scid = sys_cpu_to_le16(BR_CHAN(chan)->tx.cid);
1472
1473 /*
1474 * Core 5.4, Vol 3, Part A, section 4.5.
1475 * When used in the L2CAP_CONFIGURATION_RSP packet,
1476 * the continuation flag shall be set to one if the
1477 * flag is set to one in the Request, except for
1478 * those error conditions more appropriate for an
1479 * L2CAP_COMMAND_REJECT_RSP packet.
1480 */
1481 rsp->flags = sys_cpu_to_le16(flags & BT_L2CAP_CONF_FLAGS_MASK);
1482
1483 /*
1484 * TODO: If options other than MTU became meaningful then processing
1485 * the options chain need to be modified and taken into account when
1486 * sending back to peer.
1487 */
1488 if ((result == BT_L2CAP_CONF_UNKNOWN_OPT) || (result == BT_L2CAP_CONF_UNACCEPT)) {
1489 if (opt) {
1490 l2cap_br_conf_add_opt(buf, opt);
1491 }
1492 }
1493
1494 hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
1495
1496 l2cap_send(conn, BT_L2CAP_CID_BR_SIG, buf);
1497
1498 if (result != BT_L2CAP_CONF_SUCCESS) {
1499 return;
1500 }
1501
1502 atomic_set_bit(BR_CHAN(chan)->flags, L2CAP_FLAG_CONN_RCONF_DONE);
1503
1504 if (atomic_test_bit(BR_CHAN(chan)->flags, L2CAP_FLAG_CONN_LCONF_DONE) &&
1505 BR_CHAN(chan)->state == BT_L2CAP_CONFIG) {
1506 LOG_DBG("scid 0x%04x rx MTU %u dcid 0x%04x tx MTU %u", BR_CHAN(chan)->rx.cid,
1507 BR_CHAN(chan)->rx.mtu, BR_CHAN(chan)->tx.cid, BR_CHAN(chan)->tx.mtu);
1508
1509 bt_l2cap_br_chan_set_state(chan, BT_L2CAP_CONNECTED);
1510 if (chan->ops && chan->ops->connected) {
1511 chan->ops->connected(chan);
1512 }
1513 }
1514 }
1515
l2cap_br_remove_tx_cid(struct bt_conn * conn,uint16_t cid)1516 static struct bt_l2cap_br_chan *l2cap_br_remove_tx_cid(struct bt_conn *conn,
1517 uint16_t cid)
1518 {
1519 struct bt_l2cap_chan *chan;
1520 sys_snode_t *prev = NULL;
1521
1522 /* Protect fixed channels against accidental removal */
1523 if (!L2CAP_BR_CID_IS_DYN(cid)) {
1524 return NULL;
1525 }
1526
1527 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
1528 if (BR_CHAN(chan)->tx.cid == cid) {
1529 sys_slist_remove(&conn->channels, prev, &chan->node);
1530 return BR_CHAN(chan);
1531 }
1532
1533 prev = &chan->node;
1534 }
1535
1536 return NULL;
1537 }
1538
l2cap_br_disconn_req(struct bt_l2cap_br * l2cap,uint8_t ident,struct net_buf * buf)1539 static void l2cap_br_disconn_req(struct bt_l2cap_br *l2cap, uint8_t ident,
1540 struct net_buf *buf)
1541 {
1542 struct bt_conn *conn = l2cap->chan.chan.conn;
1543 struct bt_l2cap_br_chan *chan;
1544 struct bt_l2cap_disconn_req *req = (void *)buf->data;
1545 struct bt_l2cap_disconn_rsp *rsp;
1546 struct bt_l2cap_sig_hdr *hdr;
1547 uint16_t scid, dcid;
1548
1549 if (buf->len < sizeof(*req)) {
1550 LOG_ERR("Too small disconn req packet size");
1551 return;
1552 }
1553
1554 dcid = sys_le16_to_cpu(req->dcid);
1555 scid = sys_le16_to_cpu(req->scid);
1556
1557 LOG_DBG("scid 0x%04x dcid 0x%04x", dcid, scid);
1558
1559 chan = l2cap_br_remove_tx_cid(conn, scid);
1560 if (!chan) {
1561 struct bt_l2cap_cmd_reject_cid_data data;
1562
1563 data.scid = req->scid;
1564 data.dcid = req->dcid;
1565 l2cap_br_send_reject(conn, ident, BT_L2CAP_REJ_INVALID_CID,
1566 &data, sizeof(data));
1567 return;
1568 }
1569
1570 buf = bt_l2cap_create_pdu(&br_sig_pool, 0);
1571
1572 hdr = net_buf_add(buf, sizeof(*hdr));
1573 hdr->code = BT_L2CAP_DISCONN_RSP;
1574 hdr->ident = ident;
1575 hdr->len = sys_cpu_to_le16(sizeof(*rsp));
1576
1577 rsp = net_buf_add(buf, sizeof(*rsp));
1578 rsp->dcid = sys_cpu_to_le16(chan->rx.cid);
1579 rsp->scid = sys_cpu_to_le16(chan->tx.cid);
1580
1581 bt_l2cap_br_chan_del(&chan->chan);
1582
1583 l2cap_send(conn, BT_L2CAP_CID_BR_SIG, buf);
1584 }
1585
l2cap_br_connected(struct bt_l2cap_chan * chan)1586 static void l2cap_br_connected(struct bt_l2cap_chan *chan)
1587 {
1588 LOG_DBG("ch %p cid 0x%04x", BR_CHAN(chan), BR_CHAN(chan)->rx.cid);
1589 }
1590
l2cap_br_disconnected(struct bt_l2cap_chan * chan)1591 static void l2cap_br_disconnected(struct bt_l2cap_chan *chan)
1592 {
1593 struct bt_l2cap_br_chan *br_chan = BR_CHAN(chan);
1594
1595 LOG_DBG("ch %p cid 0x%04x", br_chan, br_chan->rx.cid);
1596
1597 if (atomic_test_and_clear_bit(br_chan->flags,
1598 L2CAP_FLAG_SIG_INFO_PENDING)) {
1599 /* Cancel RTX work on signal channel.
1600 * Disconnected callback is always called from system workqueue
1601 * so this should always succeed.
1602 */
1603 (void)k_work_cancel_delayable(&br_chan->rtx_work);
1604 }
1605 }
1606
bt_l2cap_br_chan_disconnect(struct bt_l2cap_chan * chan)1607 int bt_l2cap_br_chan_disconnect(struct bt_l2cap_chan *chan)
1608 {
1609 struct bt_conn *conn = chan->conn;
1610 struct net_buf *buf;
1611 struct bt_l2cap_disconn_req *req;
1612 struct bt_l2cap_sig_hdr *hdr;
1613 struct bt_l2cap_br_chan *br_chan;
1614
1615 if (!conn) {
1616 return -ENOTCONN;
1617 }
1618
1619 br_chan = BR_CHAN(chan);
1620
1621 if (br_chan->state == BT_L2CAP_DISCONNECTING) {
1622 return -EALREADY;
1623 }
1624
1625 LOG_DBG("chan %p scid 0x%04x dcid 0x%04x", chan, br_chan->rx.cid, br_chan->tx.cid);
1626
1627 buf = bt_l2cap_create_pdu(&br_sig_pool, 0);
1628
1629 hdr = net_buf_add(buf, sizeof(*hdr));
1630 hdr->code = BT_L2CAP_DISCONN_REQ;
1631 hdr->ident = l2cap_br_get_ident();
1632 hdr->len = sys_cpu_to_le16(sizeof(*req));
1633
1634 req = net_buf_add(buf, sizeof(*req));
1635 req->dcid = sys_cpu_to_le16(br_chan->tx.cid);
1636 req->scid = sys_cpu_to_le16(br_chan->rx.cid);
1637
1638 l2cap_br_chan_send_req(br_chan, buf, L2CAP_BR_DISCONN_TIMEOUT);
1639 bt_l2cap_br_chan_set_state(chan, BT_L2CAP_DISCONNECTING);
1640
1641 return 0;
1642 }
1643
l2cap_br_disconn_rsp(struct bt_l2cap_br * l2cap,uint8_t ident,struct net_buf * buf)1644 static void l2cap_br_disconn_rsp(struct bt_l2cap_br *l2cap, uint8_t ident,
1645 struct net_buf *buf)
1646 {
1647 struct bt_conn *conn = l2cap->chan.chan.conn;
1648 struct bt_l2cap_br_chan *chan;
1649 struct bt_l2cap_disconn_rsp *rsp = (void *)buf->data;
1650 uint16_t dcid, scid;
1651
1652 if (buf->len < sizeof(*rsp)) {
1653 LOG_ERR("Too small disconn rsp packet size");
1654 return;
1655 }
1656
1657 dcid = sys_le16_to_cpu(rsp->dcid);
1658 scid = sys_le16_to_cpu(rsp->scid);
1659
1660 LOG_DBG("dcid 0x%04x scid 0x%04x", dcid, scid);
1661
1662 chan = l2cap_br_remove_tx_cid(conn, dcid);
1663 if (!chan) {
1664 LOG_WRN("No dcid 0x%04x channel found", dcid);
1665 return;
1666 }
1667
1668 bt_l2cap_br_chan_del(&chan->chan);
1669 }
1670
bt_l2cap_br_chan_connect(struct bt_conn * conn,struct bt_l2cap_chan * chan,uint16_t psm)1671 int bt_l2cap_br_chan_connect(struct bt_conn *conn, struct bt_l2cap_chan *chan,
1672 uint16_t psm)
1673 {
1674 struct net_buf *buf;
1675 struct bt_l2cap_sig_hdr *hdr;
1676 struct bt_l2cap_conn_req *req;
1677 struct bt_l2cap_br_chan *br_chan = BR_CHAN(chan);
1678
1679 if (!psm) {
1680 return -EINVAL;
1681 }
1682
1683 if (br_chan->psm) {
1684 return -EEXIST;
1685 }
1686
1687 /* PSM must be odd and lsb of upper byte must be 0 */
1688 if ((psm & 0x0101) != 0x0001) {
1689 return -EINVAL;
1690 }
1691
1692 if (br_chan->required_sec_level > BT_SECURITY_L4) {
1693 return -EINVAL;
1694 } else if (br_chan->required_sec_level == BT_SECURITY_L0 &&
1695 psm != L2CAP_BR_PSM_SDP) {
1696 br_chan->required_sec_level = BT_SECURITY_L1;
1697 }
1698
1699 switch (br_chan->state) {
1700 case BT_L2CAP_CONNECTED:
1701 /* Already connected */
1702 return -EISCONN;
1703 case BT_L2CAP_DISCONNECTED:
1704 /* Can connect */
1705 break;
1706 case BT_L2CAP_CONFIG:
1707 case BT_L2CAP_DISCONNECTING:
1708 default:
1709 /* Bad context */
1710 return -EBUSY;
1711 }
1712
1713 if (!l2cap_br_chan_add(conn, chan, l2cap_br_chan_destroy)) {
1714 return -ENOMEM;
1715 }
1716
1717 br_chan->psm = psm;
1718 bt_l2cap_br_chan_set_state(chan, BT_L2CAP_CONNECTING);
1719 atomic_set_bit(BR_CHAN(chan)->flags, L2CAP_FLAG_CONN_PENDING);
1720
1721 switch (l2cap_br_conn_security(chan, psm)) {
1722 case L2CAP_CONN_SECURITY_PENDING:
1723 /*
1724 * Authentication was triggered, wait with sending request on
1725 * connection security changed callback context.
1726 */
1727 return 0;
1728 case L2CAP_CONN_SECURITY_PASSED:
1729 break;
1730 case L2CAP_CONN_SECURITY_REJECT:
1731 default:
1732 l2cap_br_chan_cleanup(chan);
1733 return -EIO;
1734 }
1735
1736 buf = bt_l2cap_create_pdu(&br_sig_pool, 0);
1737
1738 hdr = net_buf_add(buf, sizeof(*hdr));
1739 hdr->code = BT_L2CAP_CONN_REQ;
1740 hdr->ident = l2cap_br_get_ident();
1741 hdr->len = sys_cpu_to_le16(sizeof(*req));
1742
1743 req = net_buf_add(buf, sizeof(*req));
1744 req->psm = sys_cpu_to_le16(psm);
1745 req->scid = sys_cpu_to_le16(BR_CHAN(chan)->rx.cid);
1746
1747 l2cap_br_chan_send_req(BR_CHAN(chan), buf, L2CAP_BR_CONN_TIMEOUT);
1748
1749 return 0;
1750 }
1751
l2cap_br_conn_rsp(struct bt_l2cap_br * l2cap,uint8_t ident,struct net_buf * buf)1752 static void l2cap_br_conn_rsp(struct bt_l2cap_br *l2cap, uint8_t ident,
1753 struct net_buf *buf)
1754 {
1755 struct bt_conn *conn = l2cap->chan.chan.conn;
1756 struct bt_l2cap_chan *chan;
1757 struct bt_l2cap_conn_rsp *rsp = (void *)buf->data;
1758 uint16_t dcid, scid, result, status;
1759 struct bt_l2cap_br_chan *br_chan;
1760
1761 if (buf->len < sizeof(*rsp)) {
1762 LOG_ERR("Too small L2CAP conn rsp packet size");
1763 return;
1764 }
1765
1766 dcid = sys_le16_to_cpu(rsp->dcid);
1767 scid = sys_le16_to_cpu(rsp->scid);
1768 result = sys_le16_to_cpu(rsp->result);
1769 status = sys_le16_to_cpu(rsp->status);
1770
1771 LOG_DBG("dcid 0x%04x scid 0x%04x result %u status %u", dcid, scid, result, status);
1772
1773 chan = bt_l2cap_br_lookup_rx_cid(conn, scid);
1774 if (!chan) {
1775 LOG_ERR("No scid 0x%04x channel found", scid);
1776 return;
1777 }
1778
1779 br_chan = BR_CHAN(chan);
1780
1781 /* Release RTX work since got the response */
1782 k_work_cancel_delayable(&br_chan->rtx_work);
1783
1784 if (br_chan->state != BT_L2CAP_CONNECTING) {
1785 LOG_DBG("Invalid channel %p state %s", chan,
1786 bt_l2cap_chan_state_str(br_chan->state));
1787 return;
1788 }
1789
1790 switch (result) {
1791 case BT_L2CAP_BR_SUCCESS:
1792 br_chan->ident = 0U;
1793 BR_CHAN(chan)->tx.cid = dcid;
1794 l2cap_br_conf(chan);
1795 bt_l2cap_br_chan_set_state(chan, BT_L2CAP_CONFIG);
1796 atomic_clear_bit(BR_CHAN(chan)->flags, L2CAP_FLAG_CONN_PENDING);
1797 break;
1798 case BT_L2CAP_BR_PENDING:
1799 k_work_reschedule(&br_chan->rtx_work, L2CAP_BR_CONN_TIMEOUT);
1800 break;
1801 default:
1802 l2cap_br_chan_cleanup(chan);
1803 break;
1804 }
1805 }
1806
bt_l2cap_br_chan_send_cb(struct bt_l2cap_chan * chan,struct net_buf * buf,bt_conn_tx_cb_t cb,void * user_data)1807 int bt_l2cap_br_chan_send_cb(struct bt_l2cap_chan *chan, struct net_buf *buf, bt_conn_tx_cb_t cb,
1808 void *user_data)
1809 {
1810 struct bt_l2cap_br_chan *br_chan;
1811
1812 if (!buf || !chan) {
1813 return -EINVAL;
1814 }
1815
1816 br_chan = BR_CHAN(chan);
1817
1818 LOG_DBG("chan %p buf %p len %zu", chan, buf, net_buf_frags_len(buf));
1819
1820 if (!chan->conn || chan->conn->state != BT_CONN_CONNECTED) {
1821 return -ENOTCONN;
1822 }
1823
1824 if (atomic_test_bit(chan->status, BT_L2CAP_STATUS_SHUTDOWN)) {
1825 return -ESHUTDOWN;
1826 }
1827
1828 if (buf->len > br_chan->tx.mtu) {
1829 return -EMSGSIZE;
1830 }
1831
1832 return bt_l2cap_br_send_cb(br_chan->chan.conn, br_chan->tx.cid, buf, cb, user_data);
1833 }
1834
bt_l2cap_br_chan_send(struct bt_l2cap_chan * chan,struct net_buf * buf)1835 int bt_l2cap_br_chan_send(struct bt_l2cap_chan *chan, struct net_buf *buf)
1836 {
1837 return bt_l2cap_br_chan_send_cb(chan, buf, NULL, NULL);
1838 }
1839
l2cap_br_sig_handle(struct bt_l2cap_br * l2cap,struct bt_l2cap_sig_hdr * hdr,struct net_buf * buf)1840 static void l2cap_br_sig_handle(struct bt_l2cap_br *l2cap, struct bt_l2cap_sig_hdr *hdr,
1841 struct net_buf *buf)
1842 {
1843 uint16_t len;
1844 struct net_buf_simple_state state;
1845
1846 len = sys_le16_to_cpu(hdr->len);
1847
1848 net_buf_simple_save(&buf->b, &state);
1849
1850 switch (hdr->code) {
1851 case BT_L2CAP_INFO_RSP:
1852 l2cap_br_info_rsp(l2cap, hdr->ident, buf);
1853 break;
1854 case BT_L2CAP_INFO_REQ:
1855 l2cap_br_info_req(l2cap, hdr->ident, buf);
1856 break;
1857 case BT_L2CAP_DISCONN_REQ:
1858 l2cap_br_disconn_req(l2cap, hdr->ident, buf);
1859 break;
1860 case BT_L2CAP_CONN_REQ:
1861 l2cap_br_conn_req(l2cap, hdr->ident, buf);
1862 break;
1863 case BT_L2CAP_CONF_RSP:
1864 l2cap_br_conf_rsp(l2cap, hdr->ident, len, buf);
1865 break;
1866 case BT_L2CAP_CONF_REQ:
1867 l2cap_br_conf_req(l2cap, hdr->ident, len, buf);
1868 break;
1869 case BT_L2CAP_DISCONN_RSP:
1870 l2cap_br_disconn_rsp(l2cap, hdr->ident, buf);
1871 break;
1872 case BT_L2CAP_CONN_RSP:
1873 l2cap_br_conn_rsp(l2cap, hdr->ident, buf);
1874 break;
1875 default:
1876 LOG_WRN("Unknown/Unsupported L2CAP PDU code 0x%02x", hdr->code);
1877 l2cap_br_send_reject(l2cap->chan.chan.conn, hdr->ident,
1878 BT_L2CAP_REJ_NOT_UNDERSTOOD, NULL, 0);
1879 break;
1880 }
1881
1882 net_buf_simple_restore(&buf->b, &state);
1883 (void)net_buf_pull_mem(buf, len);
1884 }
1885
l2cap_br_recv(struct bt_l2cap_chan * chan,struct net_buf * buf)1886 static int l2cap_br_recv(struct bt_l2cap_chan *chan, struct net_buf *buf)
1887 {
1888 struct bt_l2cap_br *l2cap = CONTAINER_OF(chan, struct bt_l2cap_br, chan.chan);
1889 struct bt_l2cap_sig_hdr *hdr;
1890 uint16_t len;
1891
1892 while (buf->len > 0) {
1893 if (buf->len < sizeof(*hdr)) {
1894 LOG_ERR("Too small L2CAP signaling PDU");
1895 return 0;
1896 }
1897
1898 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
1899 len = sys_le16_to_cpu(hdr->len);
1900
1901 LOG_DBG("Signaling code 0x%02x ident %u len %u", hdr->code, hdr->ident, len);
1902
1903 if (buf->len < len) {
1904 LOG_ERR("L2CAP length is short (%u < %u)", buf->len, len);
1905 return 0;
1906 }
1907
1908 if (!hdr->ident) {
1909 LOG_ERR("Invalid ident value in L2CAP PDU");
1910 (void)net_buf_pull_mem(buf, len);
1911 continue;
1912 }
1913
1914 l2cap_br_sig_handle(l2cap, hdr, buf);
1915 }
1916
1917 return 0;
1918 }
1919
l2cap_br_conn_pend(struct bt_l2cap_chan * chan,uint8_t status)1920 static void l2cap_br_conn_pend(struct bt_l2cap_chan *chan, uint8_t status)
1921 {
1922 struct net_buf *buf;
1923 struct bt_l2cap_sig_hdr *hdr;
1924 struct bt_l2cap_conn_req *req;
1925
1926 if (BR_CHAN(chan)->state != BT_L2CAP_CONNECTING) {
1927 return;
1928 }
1929
1930 LOG_DBG("chan %p status 0x%02x encr 0x%02x", chan, status, chan->conn->encrypt);
1931
1932 if (status) {
1933 /*
1934 * Security procedure status is non-zero so respond with
1935 * security violation only as channel acceptor.
1936 */
1937 l2cap_br_conn_req_reply(chan, BT_L2CAP_BR_ERR_SEC_BLOCK);
1938
1939 /* Release channel allocated to outgoing connection request */
1940 if (atomic_test_bit(BR_CHAN(chan)->flags,
1941 L2CAP_FLAG_CONN_PENDING)) {
1942 l2cap_br_chan_cleanup(chan);
1943 }
1944
1945 return;
1946 }
1947
1948 if (!chan->conn->encrypt) {
1949 return;
1950 }
1951
1952 /*
1953 * For incoming connection state send confirming outstanding
1954 * response and initiate configuration request.
1955 */
1956 if (l2cap_br_conn_req_reply(chan, BT_L2CAP_BR_SUCCESS) == 0) {
1957 bt_l2cap_br_chan_set_state(chan, BT_L2CAP_CONFIG);
1958 /*
1959 * Initialize config request since remote needs to know
1960 * local MTU segmentation.
1961 */
1962 l2cap_br_conf(chan);
1963 } else if (atomic_test_and_clear_bit(BR_CHAN(chan)->flags,
1964 L2CAP_FLAG_CONN_PENDING)) {
1965 buf = bt_l2cap_create_pdu(&br_sig_pool, 0);
1966
1967 hdr = net_buf_add(buf, sizeof(*hdr));
1968 hdr->code = BT_L2CAP_CONN_REQ;
1969 hdr->ident = l2cap_br_get_ident();
1970 hdr->len = sys_cpu_to_le16(sizeof(*req));
1971
1972 req = net_buf_add(buf, sizeof(*req));
1973 req->psm = sys_cpu_to_le16(BR_CHAN(chan)->psm);
1974 req->scid = sys_cpu_to_le16(BR_CHAN(chan)->rx.cid);
1975
1976 l2cap_br_chan_send_req(BR_CHAN(chan), buf,
1977 L2CAP_BR_CONN_TIMEOUT);
1978 }
1979 }
1980
l2cap_br_encrypt_change(struct bt_conn * conn,uint8_t hci_status)1981 void l2cap_br_encrypt_change(struct bt_conn *conn, uint8_t hci_status)
1982 {
1983 struct bt_l2cap_chan *chan;
1984
1985 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
1986 l2cap_br_conn_pend(chan, hci_status);
1987
1988 if (chan->ops && chan->ops->encrypt_change) {
1989 chan->ops->encrypt_change(chan, hci_status);
1990 }
1991 }
1992 }
1993
check_fixed_channel(struct bt_l2cap_chan * chan)1994 static void check_fixed_channel(struct bt_l2cap_chan *chan)
1995 {
1996 struct bt_l2cap_br_chan *br_chan = BR_CHAN(chan);
1997
1998 if (br_chan->rx.cid < L2CAP_BR_CID_DYN_START) {
1999 connect_fixed_channel(br_chan);
2000 }
2001 }
2002
bt_l2cap_br_recv(struct bt_conn * conn,struct net_buf * buf)2003 void bt_l2cap_br_recv(struct bt_conn *conn, struct net_buf *buf)
2004 {
2005 struct bt_l2cap_hdr *hdr;
2006 struct bt_l2cap_chan *chan;
2007 uint16_t cid;
2008
2009 if (buf->len < sizeof(*hdr)) {
2010 LOG_ERR("Too small L2CAP PDU received");
2011 net_buf_unref(buf);
2012 return;
2013 }
2014
2015 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
2016 cid = sys_le16_to_cpu(hdr->cid);
2017
2018 chan = bt_l2cap_br_lookup_rx_cid(conn, cid);
2019 if (!chan) {
2020 LOG_WRN("Ignoring data for unknown channel ID 0x%04x", cid);
2021 net_buf_unref(buf);
2022 return;
2023 }
2024
2025 /*
2026 * if data was received for fixed channel before Information
2027 * Response we connect channel here.
2028 */
2029 check_fixed_channel(chan);
2030
2031 chan->ops->recv(chan, buf);
2032 net_buf_unref(buf);
2033 }
2034
l2cap_br_accept(struct bt_conn * conn,struct bt_l2cap_chan ** chan)2035 static int l2cap_br_accept(struct bt_conn *conn, struct bt_l2cap_chan **chan)
2036 {
2037 int i;
2038 static const struct bt_l2cap_chan_ops ops = {
2039 .connected = l2cap_br_connected,
2040 .disconnected = l2cap_br_disconnected,
2041 .recv = l2cap_br_recv,
2042 };
2043
2044 LOG_DBG("conn %p handle %u", conn, conn->handle);
2045
2046 for (i = 0; i < ARRAY_SIZE(bt_l2cap_br_pool); i++) {
2047 struct bt_l2cap_br *l2cap = &bt_l2cap_br_pool[i];
2048
2049 if (l2cap->chan.chan.conn) {
2050 continue;
2051 }
2052
2053 l2cap->chan.chan.ops = &ops;
2054 *chan = &l2cap->chan.chan;
2055 atomic_set(l2cap->chan.flags, 0);
2056 return 0;
2057 }
2058
2059 LOG_ERR("No available L2CAP context for conn %p", conn);
2060
2061 return -ENOMEM;
2062 }
2063
2064 BT_L2CAP_BR_CHANNEL_DEFINE(br_fixed_chan, BT_L2CAP_CID_BR_SIG, l2cap_br_accept);
2065
bt_l2cap_br_init(void)2066 void bt_l2cap_br_init(void)
2067 {
2068 sys_slist_init(&br_servers);
2069
2070 if (IS_ENABLED(CONFIG_BT_RFCOMM)) {
2071 bt_rfcomm_init();
2072 }
2073
2074 if (IS_ENABLED(CONFIG_BT_AVDTP)) {
2075 bt_avdtp_init();
2076 }
2077
2078 if (IS_ENABLED(CONFIG_BT_AVCTP)) {
2079 bt_avctp_init();
2080 }
2081
2082 bt_sdp_init();
2083
2084 if (IS_ENABLED(CONFIG_BT_A2DP)) {
2085 bt_a2dp_init();
2086 }
2087
2088 if (IS_ENABLED(CONFIG_BT_AVRCP)) {
2089 bt_avrcp_init();
2090 }
2091 }
2092