1 /* l2cap_br.c - L2CAP BREDR oriented handling */
2
3 /*
4 * Copyright (c) 2016 Intel Corporation
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 #include <zephyr/kernel.h>
10 #include <string.h>
11 #include <errno.h>
12 #include <zephyr/sys/atomic.h>
13 #include <zephyr/sys/byteorder.h>
14 #include <zephyr/sys/iterable_sections.h>
15 #include <zephyr/sys/util.h>
16
17 #include <zephyr/bluetooth/hci.h>
18 #include <zephyr/bluetooth/bluetooth.h>
19 #include <zephyr/bluetooth/conn.h>
20 #include <zephyr/drivers/bluetooth/hci_driver.h>
21
22 #include "host/buf_view.h"
23 #include "host/hci_core.h"
24 #include "host/conn_internal.h"
25 #include "l2cap_br_internal.h"
26 #include "avdtp_internal.h"
27 #include "a2dp_internal.h"
28 #include "rfcomm_internal.h"
29 #include "sdp_internal.h"
30
31 #include <zephyr/logging/log.h>
32 LOG_MODULE_REGISTER(bt_l2cap_br, CONFIG_BT_L2CAP_LOG_LEVEL);
33
34 #define BR_CHAN_RTX(_w) CONTAINER_OF(k_work_delayable_from_work(_w), \
35 struct bt_l2cap_br_chan, rtx_work)
36
37 #define L2CAP_BR_PSM_START 0x0001
38 #define L2CAP_BR_PSM_END 0xffff
39
40 #define L2CAP_BR_CID_DYN_START 0x0040
41 #define L2CAP_BR_CID_DYN_END 0xffff
42 #define L2CAP_BR_CID_IS_DYN(_cid) \
43 (_cid >= L2CAP_BR_CID_DYN_START && _cid <= L2CAP_BR_CID_DYN_END)
44
45 #define L2CAP_BR_MIN_MTU 48
46 #define L2CAP_BR_DEFAULT_MTU 672
47
48 #define L2CAP_BR_PSM_SDP 0x0001
49
50 #define L2CAP_BR_INFO_TIMEOUT K_SECONDS(4)
51 #define L2CAP_BR_CFG_TIMEOUT K_SECONDS(4)
52 #define L2CAP_BR_DISCONN_TIMEOUT K_SECONDS(1)
53 #define L2CAP_BR_CONN_TIMEOUT K_SECONDS(40)
54
55 /*
56 * L2CAP extended feature mask:
57 * BR/EDR fixed channel support enabled
58 */
59 #define L2CAP_FEAT_FIXED_CHAN_MASK 0x00000080
60
61 enum {
62 /* Connection oriented channels flags */
63 L2CAP_FLAG_CONN_LCONF_DONE, /* local config accepted by remote */
64 L2CAP_FLAG_CONN_RCONF_DONE, /* remote config accepted by local */
65 L2CAP_FLAG_CONN_ACCEPTOR, /* getting incoming connection req */
66 L2CAP_FLAG_CONN_PENDING, /* remote sent pending result in rsp */
67
68 /* Signaling channel flags */
69 L2CAP_FLAG_SIG_INFO_PENDING, /* retrieving remote l2cap info */
70 L2CAP_FLAG_SIG_INFO_DONE, /* remote l2cap info is done */
71
72 /* fixed channels flags */
73 L2CAP_FLAG_FIXED_CONNECTED, /* fixed connected */
74 };
75
76 static sys_slist_t br_servers;
77
78
79 /* Pool for outgoing BR/EDR signaling packets, min MTU is 48 */
80 NET_BUF_POOL_FIXED_DEFINE(br_sig_pool, CONFIG_BT_MAX_CONN,
81 BT_L2CAP_BUF_SIZE(L2CAP_BR_MIN_MTU), 8, NULL);
82
83 /* BR/EDR L2CAP signalling channel specific context */
84 struct bt_l2cap_br {
85 /* The channel this context is associated with */
86 struct bt_l2cap_br_chan chan;
87 uint8_t info_ident;
88 uint8_t info_fixed_chan;
89 uint32_t info_feat_mask;
90 };
91
92 static struct bt_l2cap_br bt_l2cap_br_pool[CONFIG_BT_MAX_CONN];
93
bt_l2cap_br_lookup_rx_cid(struct bt_conn * conn,uint16_t cid)94 struct bt_l2cap_chan *bt_l2cap_br_lookup_rx_cid(struct bt_conn *conn,
95 uint16_t cid)
96 {
97 struct bt_l2cap_chan *chan;
98
99 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
100 if (BR_CHAN(chan)->rx.cid == cid) {
101 return chan;
102 }
103 }
104
105 return NULL;
106 }
107
bt_l2cap_br_lookup_tx_cid(struct bt_conn * conn,uint16_t cid)108 struct bt_l2cap_chan *bt_l2cap_br_lookup_tx_cid(struct bt_conn *conn,
109 uint16_t cid)
110 {
111 struct bt_l2cap_chan *chan;
112
113 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
114 if (BR_CHAN(chan)->tx.cid == cid) {
115 return chan;
116 }
117 }
118
119 return NULL;
120 }
121
122 static struct bt_l2cap_br_chan*
l2cap_br_chan_alloc_cid(struct bt_conn * conn,struct bt_l2cap_chan * chan)123 l2cap_br_chan_alloc_cid(struct bt_conn *conn, struct bt_l2cap_chan *chan)
124 {
125 struct bt_l2cap_br_chan *br_chan = BR_CHAN(chan);
126 uint16_t cid;
127
128 /*
129 * No action needed if there's already a CID allocated, e.g. in
130 * the case of a fixed channel.
131 */
132 if (br_chan->rx.cid > 0) {
133 return br_chan;
134 }
135
136 /*
137 * L2CAP_BR_CID_DYN_END is 0xffff so we don't check against it since
138 * cid is uint16_t, just check against uint16_t overflow
139 */
140 for (cid = L2CAP_BR_CID_DYN_START; cid; cid++) {
141 if (!bt_l2cap_br_lookup_rx_cid(conn, cid)) {
142 br_chan->rx.cid = cid;
143 return br_chan;
144 }
145 }
146
147 return NULL;
148 }
149
l2cap_br_chan_cleanup(struct bt_l2cap_chan * chan)150 static void l2cap_br_chan_cleanup(struct bt_l2cap_chan *chan)
151 {
152 bt_l2cap_chan_remove(chan->conn, chan);
153 bt_l2cap_br_chan_del(chan);
154 }
155
l2cap_br_chan_destroy(struct bt_l2cap_chan * chan)156 static void l2cap_br_chan_destroy(struct bt_l2cap_chan *chan)
157 {
158 struct bt_l2cap_br_chan *br_chan = BR_CHAN(chan);
159
160 LOG_DBG("chan %p cid 0x%04x", br_chan, br_chan->rx.cid);
161
162 /* Cancel ongoing work. Since the channel can be re-used after this
163 * we need to sync to make sure that the kernel does not have it
164 * in its queue anymore.
165 *
166 * In the case where we are in the context of executing the rtx_work
167 * item, we don't sync as it will deadlock the workqueue.
168 */
169 struct k_work_q *rtx_work_queue = br_chan->rtx_work.queue;
170
171 if (rtx_work_queue == NULL || k_current_get() != &rtx_work_queue->thread) {
172 k_work_cancel_delayable_sync(&br_chan->rtx_work, &br_chan->rtx_sync);
173 } else {
174 k_work_cancel_delayable(&br_chan->rtx_work);
175 }
176
177 atomic_clear(BR_CHAN(chan)->flags);
178 }
179
l2cap_br_rtx_timeout(struct k_work * work)180 static void l2cap_br_rtx_timeout(struct k_work *work)
181 {
182 struct bt_l2cap_br_chan *chan = BR_CHAN_RTX(work);
183
184 LOG_WRN("chan %p timeout", chan);
185
186 if (chan->rx.cid == BT_L2CAP_CID_BR_SIG) {
187 LOG_DBG("Skip BR/EDR signalling channel ");
188 atomic_clear_bit(chan->flags, L2CAP_FLAG_SIG_INFO_PENDING);
189 return;
190 }
191
192 LOG_DBG("chan %p %s scid 0x%04x", chan, bt_l2cap_chan_state_str(chan->state), chan->rx.cid);
193
194 switch (chan->state) {
195 case BT_L2CAP_CONFIG:
196 bt_l2cap_br_chan_disconnect(&chan->chan);
197 break;
198 case BT_L2CAP_DISCONNECTING:
199 case BT_L2CAP_CONNECTING:
200 l2cap_br_chan_cleanup(&chan->chan);
201 break;
202 default:
203 break;
204 }
205 }
206
l2cap_br_chan_add(struct bt_conn * conn,struct bt_l2cap_chan * chan,bt_l2cap_chan_destroy_t destroy)207 static bool l2cap_br_chan_add(struct bt_conn *conn, struct bt_l2cap_chan *chan,
208 bt_l2cap_chan_destroy_t destroy)
209 {
210 struct bt_l2cap_br_chan *ch = l2cap_br_chan_alloc_cid(conn, chan);
211
212 if (!ch) {
213 LOG_DBG("Unable to allocate L2CAP CID");
214 return false;
215 }
216
217 k_fifo_init(&ch->_pdu_tx_queue);
218
219 /* All dynamic channels have the destroy handler which makes sure that
220 * the RTX work structure is properly released with a cancel sync.
221 * The fixed signal channel is only removed when disconnected and the
222 * disconnected handler is always called from the workqueue itself so
223 * canceling from there should always succeed.
224 */
225 k_work_init_delayable(&ch->rtx_work, l2cap_br_rtx_timeout);
226 bt_l2cap_chan_add(conn, chan, destroy);
227
228 return true;
229 }
230
l2cap_br_get_ident(void)231 static uint8_t l2cap_br_get_ident(void)
232 {
233 static uint8_t ident;
234
235 ident++;
236 /* handle integer overflow (0 is not valid) */
237 if (!ident) {
238 ident++;
239 }
240
241 return ident;
242 }
243
raise_data_ready(struct bt_l2cap_br_chan * br_chan)244 static void raise_data_ready(struct bt_l2cap_br_chan *br_chan)
245 {
246 if (!atomic_set(&br_chan->_pdu_ready_lock, 1)) {
247 sys_slist_append(&br_chan->chan.conn->l2cap_data_ready,
248 &br_chan->_pdu_ready);
249 LOG_DBG("data ready raised");
250 } else {
251 LOG_DBG("data ready already");
252 }
253
254 bt_conn_data_ready(br_chan->chan.conn);
255 }
256
lower_data_ready(struct bt_l2cap_br_chan * br_chan)257 static void lower_data_ready(struct bt_l2cap_br_chan *br_chan)
258 {
259 struct bt_conn *conn = br_chan->chan.conn;
260 __maybe_unused sys_snode_t *s = sys_slist_get(&conn->l2cap_data_ready);
261
262 __ASSERT_NO_MSG(s == &br_chan->_pdu_ready);
263
264 __maybe_unused atomic_t old = atomic_set(&br_chan->_pdu_ready_lock, 0);
265
266 __ASSERT_NO_MSG(old);
267 }
268
cancel_data_ready(struct bt_l2cap_br_chan * br_chan)269 static void cancel_data_ready(struct bt_l2cap_br_chan *br_chan)
270 {
271 struct bt_conn *conn = br_chan->chan.conn;
272
273 sys_slist_find_and_remove(&conn->l2cap_data_ready,
274 &br_chan->_pdu_ready);
275
276 atomic_set(&br_chan->_pdu_ready_lock, 0);
277 }
278
bt_l2cap_br_send_cb(struct bt_conn * conn,uint16_t cid,struct net_buf * buf,bt_conn_tx_cb_t cb,void * user_data)279 int bt_l2cap_br_send_cb(struct bt_conn *conn, uint16_t cid, struct net_buf *buf,
280 bt_conn_tx_cb_t cb, void *user_data)
281 {
282 struct bt_l2cap_hdr *hdr;
283 struct bt_l2cap_chan *ch = bt_l2cap_br_lookup_tx_cid(conn, cid);
284 struct bt_l2cap_br_chan *br_chan = CONTAINER_OF(ch, struct bt_l2cap_br_chan, chan);
285
286 LOG_DBG("chan %p buf %p len %zu", br_chan, buf, buf->len);
287
288 hdr = net_buf_push(buf, sizeof(*hdr));
289 hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
290 hdr->cid = sys_cpu_to_le16(cid);
291
292 if (buf->user_data_size < sizeof(struct closure)) {
293 LOG_DBG("not enough room in user_data %d < %d pool %u",
294 buf->user_data_size,
295 CONFIG_BT_CONN_TX_USER_DATA_SIZE,
296 buf->pool_id);
297 return -EINVAL;
298 }
299
300 LOG_DBG("push PDU: cb %p userdata %p", cb, user_data);
301
302 make_closure(buf->user_data, cb, user_data);
303 net_buf_put(&br_chan->_pdu_tx_queue, buf);
304 raise_data_ready(br_chan);
305
306 return 0;
307 }
308
309 /* Send the buffer and release it in case of failure.
310 * Any other cleanup in failure to send should be handled by the disconnected
311 * handler.
312 */
l2cap_send(struct bt_conn * conn,uint16_t cid,struct net_buf * buf)313 static inline void l2cap_send(struct bt_conn *conn, uint16_t cid,
314 struct net_buf *buf)
315 {
316 if (bt_l2cap_br_send_cb(conn, cid, buf, NULL, NULL)) {
317 net_buf_unref(buf);
318 }
319 }
320
l2cap_br_chan_send_req(struct bt_l2cap_br_chan * chan,struct net_buf * buf,k_timeout_t timeout)321 static void l2cap_br_chan_send_req(struct bt_l2cap_br_chan *chan,
322 struct net_buf *buf, k_timeout_t timeout)
323 {
324
325 if (bt_l2cap_br_send_cb(chan->chan.conn, BT_L2CAP_CID_BR_SIG, buf,
326 NULL, NULL)) {
327 net_buf_unref(buf);
328 return;
329 }
330
331 /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part A] page 126:
332 *
333 * The value of this timer is implementation-dependent but the minimum
334 * initial value is 1 second and the maximum initial value is 60
335 * seconds. One RTX timer shall exist for each outstanding signaling
336 * request, including each Echo Request. The timer disappears on the
337 * final expiration, when the response is received, or the physical
338 * link is lost.
339 */
340 k_work_reschedule(&chan->rtx_work, timeout);
341 }
342
343 /* L2CAP channel wants to send a PDU */
chan_has_data(struct bt_l2cap_br_chan * br_chan)344 static bool chan_has_data(struct bt_l2cap_br_chan *br_chan)
345 {
346 return !k_fifo_is_empty(&br_chan->_pdu_tx_queue);
347 }
348
l2cap_br_data_pull(struct bt_conn * conn,size_t amount,size_t * length)349 struct net_buf *l2cap_br_data_pull(struct bt_conn *conn,
350 size_t amount,
351 size_t *length)
352 {
353 const sys_snode_t *pdu_ready = sys_slist_peek_head(&conn->l2cap_data_ready);
354
355 if (!pdu_ready) {
356 LOG_DBG("nothing to send on this conn");
357 return NULL;
358 }
359
360 struct bt_l2cap_br_chan *br_chan = CONTAINER_OF(pdu_ready,
361 struct bt_l2cap_br_chan,
362 _pdu_ready);
363
364 /* Leave the PDU buffer in the queue until we have sent all its
365 * fragments.
366 */
367 struct net_buf *pdu = k_fifo_peek_head(&br_chan->_pdu_tx_queue);
368
369 __ASSERT(pdu, "signaled ready but no PDUs in the TX queue");
370
371 if (bt_buf_has_view(pdu)) {
372 LOG_ERR("already have view on %p", pdu);
373 return NULL;
374 }
375
376 /* We can't interleave ACL fragments from different channels for the
377 * same ACL conn -> we have to wait until a full L2 PDU is transferred
378 * before switching channels.
379 */
380 bool last_frag = amount >= pdu->len;
381
382 if (last_frag) {
383 LOG_DBG("last frag, removing %p", pdu);
384 __maybe_unused struct net_buf *b = k_fifo_get(&br_chan->_pdu_tx_queue, K_NO_WAIT);
385
386 __ASSERT_NO_MSG(b == pdu);
387
388 LOG_DBG("chan %p done", br_chan);
389 lower_data_ready(br_chan);
390
391 /* Append channel to list if it still has data */
392 if (chan_has_data(br_chan)) {
393 LOG_DBG("chan %p ready", br_chan);
394 raise_data_ready(br_chan);
395 }
396 }
397
398 *length = pdu->len;
399
400 return pdu;
401 }
402
l2cap_br_get_info(struct bt_l2cap_br * l2cap,uint16_t info_type)403 static void l2cap_br_get_info(struct bt_l2cap_br *l2cap, uint16_t info_type)
404 {
405 struct bt_l2cap_info_req *info;
406 struct net_buf *buf;
407 struct bt_l2cap_sig_hdr *hdr;
408
409 LOG_DBG("info type %u", info_type);
410
411 if (atomic_test_bit(l2cap->chan.flags, L2CAP_FLAG_SIG_INFO_PENDING)) {
412 return;
413 }
414
415 switch (info_type) {
416 case BT_L2CAP_INFO_FEAT_MASK:
417 case BT_L2CAP_INFO_FIXED_CHAN:
418 break;
419 default:
420 LOG_WRN("Unsupported info type %u", info_type);
421 return;
422 }
423
424 buf = bt_l2cap_create_pdu(&br_sig_pool, 0);
425
426 atomic_set_bit(l2cap->chan.flags, L2CAP_FLAG_SIG_INFO_PENDING);
427 l2cap->info_ident = l2cap_br_get_ident();
428
429 hdr = net_buf_add(buf, sizeof(*hdr));
430 hdr->code = BT_L2CAP_INFO_REQ;
431 hdr->ident = l2cap->info_ident;
432 hdr->len = sys_cpu_to_le16(sizeof(*info));
433
434 info = net_buf_add(buf, sizeof(*info));
435 info->type = sys_cpu_to_le16(info_type);
436
437 l2cap_br_chan_send_req(&l2cap->chan, buf, L2CAP_BR_INFO_TIMEOUT);
438 }
439
connect_fixed_channel(struct bt_l2cap_br_chan * chan)440 static void connect_fixed_channel(struct bt_l2cap_br_chan *chan)
441 {
442 if (atomic_test_and_set_bit(chan->flags, L2CAP_FLAG_FIXED_CONNECTED)) {
443 return;
444 }
445
446 if (chan->chan.ops && chan->chan.ops->connected) {
447 chan->chan.ops->connected(&chan->chan);
448 }
449 }
450
connect_optional_fixed_channels(struct bt_l2cap_br * l2cap)451 static void connect_optional_fixed_channels(struct bt_l2cap_br *l2cap)
452 {
453 /* can be change to loop if more BR/EDR fixed channels are added */
454 if (l2cap->info_fixed_chan & BIT(BT_L2CAP_CID_BR_SMP)) {
455 struct bt_l2cap_chan *chan;
456
457 chan = bt_l2cap_br_lookup_rx_cid(l2cap->chan.chan.conn,
458 BT_L2CAP_CID_BR_SMP);
459 if (chan) {
460 connect_fixed_channel(BR_CHAN(chan));
461 }
462 }
463 }
464
l2cap_br_info_rsp(struct bt_l2cap_br * l2cap,uint8_t ident,struct net_buf * buf)465 static int l2cap_br_info_rsp(struct bt_l2cap_br *l2cap, uint8_t ident,
466 struct net_buf *buf)
467 {
468 struct bt_l2cap_info_rsp *rsp;
469 uint16_t type, result;
470 int err = 0;
471
472 if (atomic_test_bit(l2cap->chan.flags, L2CAP_FLAG_SIG_INFO_DONE)) {
473 return 0;
474 }
475
476 if (atomic_test_and_clear_bit(l2cap->chan.flags,
477 L2CAP_FLAG_SIG_INFO_PENDING)) {
478 /*
479 * Release RTX timer since got the response & there's pending
480 * command request.
481 */
482 k_work_cancel_delayable(&l2cap->chan.rtx_work);
483 }
484
485 if (buf->len < sizeof(*rsp)) {
486 LOG_ERR("Too small info rsp packet size");
487 err = -EINVAL;
488 goto done;
489 }
490
491 if (ident != l2cap->info_ident) {
492 LOG_WRN("Idents mismatch");
493 err = -EINVAL;
494 goto done;
495 }
496
497 rsp = net_buf_pull_mem(buf, sizeof(*rsp));
498 result = sys_le16_to_cpu(rsp->result);
499 if (result != BT_L2CAP_INFO_SUCCESS) {
500 LOG_WRN("Result unsuccessful");
501 err = -EINVAL;
502 goto done;
503 }
504
505 type = sys_le16_to_cpu(rsp->type);
506
507 switch (type) {
508 case BT_L2CAP_INFO_FEAT_MASK:
509 if (buf->len < sizeof(uint32_t)) {
510 LOG_ERR("Invalid remote info feat mask");
511 err = -EINVAL;
512 break;
513 }
514 l2cap->info_feat_mask = net_buf_pull_le32(buf);
515 LOG_DBG("remote info mask 0x%08x", l2cap->info_feat_mask);
516
517 if (!(l2cap->info_feat_mask & L2CAP_FEAT_FIXED_CHAN_MASK)) {
518 break;
519 }
520
521 l2cap_br_get_info(l2cap, BT_L2CAP_INFO_FIXED_CHAN);
522 return 0;
523 case BT_L2CAP_INFO_FIXED_CHAN:
524 if (buf->len < sizeof(uint8_t)) {
525 LOG_ERR("Invalid remote info fixed chan");
526 err = -EINVAL;
527 break;
528 }
529 l2cap->info_fixed_chan = net_buf_pull_u8(buf);
530 LOG_DBG("remote fixed channel mask 0x%02x", l2cap->info_fixed_chan);
531
532 connect_optional_fixed_channels(l2cap);
533
534 break;
535 default:
536 LOG_WRN("type 0x%04x unsupported", type);
537 err = -EINVAL;
538 break;
539 }
540 done:
541 atomic_set_bit(l2cap->chan.flags, L2CAP_FLAG_SIG_INFO_DONE);
542 l2cap->info_ident = 0U;
543 return err;
544 }
545
get_fixed_channels_mask(void)546 static uint8_t get_fixed_channels_mask(void)
547 {
548 uint8_t mask = 0U;
549
550 /* this needs to be enhanced if AMP Test Manager support is added */
551 STRUCT_SECTION_FOREACH(bt_l2cap_br_fixed_chan, fchan) {
552 mask |= BIT(fchan->cid);
553 }
554
555 return mask;
556 }
557
l2cap_br_info_req(struct bt_l2cap_br * l2cap,uint8_t ident,struct net_buf * buf)558 static int l2cap_br_info_req(struct bt_l2cap_br *l2cap, uint8_t ident,
559 struct net_buf *buf)
560 {
561 struct bt_conn *conn = l2cap->chan.chan.conn;
562 struct bt_l2cap_info_req *req = (void *)buf->data;
563 struct bt_l2cap_info_rsp *rsp;
564 struct net_buf *rsp_buf;
565 struct bt_l2cap_sig_hdr *hdr_info;
566 uint16_t type;
567
568 if (buf->len < sizeof(*req)) {
569 LOG_ERR("Too small info req packet size");
570 return -EINVAL;
571 }
572
573 rsp_buf = bt_l2cap_create_pdu(&br_sig_pool, 0);
574
575 type = sys_le16_to_cpu(req->type);
576 LOG_DBG("type 0x%04x", type);
577
578 hdr_info = net_buf_add(rsp_buf, sizeof(*hdr_info));
579 hdr_info->code = BT_L2CAP_INFO_RSP;
580 hdr_info->ident = ident;
581
582 rsp = net_buf_add(rsp_buf, sizeof(*rsp));
583
584 switch (type) {
585 case BT_L2CAP_INFO_FEAT_MASK:
586 rsp->type = sys_cpu_to_le16(BT_L2CAP_INFO_FEAT_MASK);
587 rsp->result = sys_cpu_to_le16(BT_L2CAP_INFO_SUCCESS);
588 net_buf_add_le32(rsp_buf, L2CAP_FEAT_FIXED_CHAN_MASK);
589 hdr_info->len = sys_cpu_to_le16(sizeof(*rsp) + sizeof(uint32_t));
590 break;
591 case BT_L2CAP_INFO_FIXED_CHAN:
592 rsp->type = sys_cpu_to_le16(BT_L2CAP_INFO_FIXED_CHAN);
593 rsp->result = sys_cpu_to_le16(BT_L2CAP_INFO_SUCCESS);
594 /* fixed channel mask protocol data is 8 octets wide */
595 (void)memset(net_buf_add(rsp_buf, 8), 0, 8);
596 rsp->data[0] = get_fixed_channels_mask();
597
598 hdr_info->len = sys_cpu_to_le16(sizeof(*rsp) + 8);
599 break;
600 default:
601 rsp->type = req->type;
602 rsp->result = sys_cpu_to_le16(BT_L2CAP_INFO_NOTSUPP);
603 hdr_info->len = sys_cpu_to_le16(sizeof(*rsp));
604 break;
605 }
606
607 l2cap_send(conn, BT_L2CAP_CID_BR_SIG, rsp_buf);
608
609 return 0;
610 }
611
bt_l2cap_br_connected(struct bt_conn * conn)612 void bt_l2cap_br_connected(struct bt_conn *conn)
613 {
614 struct bt_l2cap_chan *chan;
615
616 STRUCT_SECTION_FOREACH(bt_l2cap_br_fixed_chan, fchan) {
617 struct bt_l2cap_br_chan *br_chan;
618
619 if (!fchan->accept) {
620 continue;
621 }
622
623 if (fchan->accept(conn, &chan) < 0) {
624 continue;
625 }
626
627 br_chan = BR_CHAN(chan);
628
629 br_chan->rx.cid = fchan->cid;
630 br_chan->tx.cid = fchan->cid;
631
632 if (!l2cap_br_chan_add(conn, chan, NULL)) {
633 return;
634 }
635
636 /*
637 * other fixed channels will be connected after Information
638 * Response is received
639 */
640 if (fchan->cid == BT_L2CAP_CID_BR_SIG) {
641 struct bt_l2cap_br *sig_ch;
642
643 connect_fixed_channel(br_chan);
644
645 sig_ch = CONTAINER_OF(br_chan, struct bt_l2cap_br, chan);
646 l2cap_br_get_info(sig_ch, BT_L2CAP_INFO_FEAT_MASK);
647 }
648 }
649 }
650
bt_l2cap_br_disconnected(struct bt_conn * conn)651 void bt_l2cap_br_disconnected(struct bt_conn *conn)
652 {
653 struct bt_l2cap_chan *chan, *next;
654
655 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&conn->channels, chan, next, node) {
656 bt_l2cap_br_chan_del(chan);
657 }
658 }
659
l2cap_br_server_lookup_psm(uint16_t psm)660 static struct bt_l2cap_server *l2cap_br_server_lookup_psm(uint16_t psm)
661 {
662 struct bt_l2cap_server *server;
663
664 SYS_SLIST_FOR_EACH_CONTAINER(&br_servers, server, node) {
665 if (server->psm == psm) {
666 return server;
667 }
668 }
669
670 return NULL;
671 }
672
l2cap_br_conf_add_mtu(struct net_buf * buf,const uint16_t mtu)673 static void l2cap_br_conf_add_mtu(struct net_buf *buf, const uint16_t mtu)
674 {
675 net_buf_add_u8(buf, BT_L2CAP_CONF_OPT_MTU);
676 net_buf_add_u8(buf, sizeof(mtu));
677 net_buf_add_le16(buf, mtu);
678 }
679
l2cap_br_conf(struct bt_l2cap_chan * chan)680 static void l2cap_br_conf(struct bt_l2cap_chan *chan)
681 {
682 struct bt_l2cap_sig_hdr *hdr;
683 struct bt_l2cap_conf_req *conf;
684 struct net_buf *buf;
685
686 buf = bt_l2cap_create_pdu(&br_sig_pool, 0);
687
688 hdr = net_buf_add(buf, sizeof(*hdr));
689 hdr->code = BT_L2CAP_CONF_REQ;
690 hdr->ident = l2cap_br_get_ident();
691 conf = net_buf_add(buf, sizeof(*conf));
692 (void)memset(conf, 0, sizeof(*conf));
693
694 conf->dcid = sys_cpu_to_le16(BR_CHAN(chan)->tx.cid);
695 /*
696 * Add MTU option if app set non default BR/EDR L2CAP MTU,
697 * otherwise sent empty configuration data meaning default MTU
698 * to be used.
699 */
700 if (BR_CHAN(chan)->rx.mtu != L2CAP_BR_DEFAULT_MTU) {
701 l2cap_br_conf_add_mtu(buf, BR_CHAN(chan)->rx.mtu);
702 }
703
704 hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
705
706 /*
707 * TODO:
708 * might be needed to start tracking number of configuration iterations
709 * on both directions
710 */
711 l2cap_br_chan_send_req(BR_CHAN(chan), buf, L2CAP_BR_CFG_TIMEOUT);
712 }
713
714 enum l2cap_br_conn_security_result {
715 L2CAP_CONN_SECURITY_PASSED,
716 L2CAP_CONN_SECURITY_REJECT,
717 L2CAP_CONN_SECURITY_PENDING
718 };
719
720 /*
721 * Security helper against channel connection.
722 * Returns L2CAP_CONN_SECURITY_PASSED if:
723 * - existing security on link is applicable for requested PSM in connection,
724 * - legacy (non SSP) devices connecting with low security requirements,
725 * Returns L2CAP_CONN_SECURITY_PENDING if:
726 * - channel connection process is on hold since there were valid security
727 * conditions triggering authentication indirectly in subcall.
728 * Returns L2CAP_CONN_SECURITY_REJECT if:
729 * - bt_conn_set_security API returns < 0.
730 */
731
732 static enum l2cap_br_conn_security_result
l2cap_br_conn_security(struct bt_l2cap_chan * chan,const uint16_t psm)733 l2cap_br_conn_security(struct bt_l2cap_chan *chan, const uint16_t psm)
734 {
735 int check;
736 struct bt_l2cap_br_chan *br_chan = BR_CHAN(chan);
737
738 /* For SDP PSM there's no need to change existing security on link */
739 if (br_chan->required_sec_level == BT_SECURITY_L0) {
740 return L2CAP_CONN_SECURITY_PASSED;
741 }
742
743 /*
744 * No link key needed for legacy devices (pre 2.1) and when low security
745 * level is required.
746 */
747 if (br_chan->required_sec_level == BT_SECURITY_L1 &&
748 !BT_FEAT_HOST_SSP(chan->conn->br.features)) {
749 return L2CAP_CONN_SECURITY_PASSED;
750 }
751
752 switch (br_chan->required_sec_level) {
753 case BT_SECURITY_L4:
754 case BT_SECURITY_L3:
755 case BT_SECURITY_L2:
756 break;
757 default:
758 /*
759 * For non SDP PSM connections GAP's Security Mode 4 requires at
760 * least unauthenticated link key and enabled encryption if
761 * remote supports SSP before any L2CAP CoC traffic. So preset
762 * local to MEDIUM security to trigger it if needed.
763 */
764 if (BT_FEAT_HOST_SSP(chan->conn->br.features)) {
765 br_chan->required_sec_level = BT_SECURITY_L2;
766 }
767 break;
768 }
769
770 check = bt_conn_set_security(chan->conn, br_chan->required_sec_level);
771
772 /*
773 * Check case when on existing connection security level already covers
774 * channel (service) security requirements against link security and
775 * bt_conn_set_security API returns 0 what implies also there was no
776 * need to trigger authentication.
777 */
778 if (check == 0 &&
779 chan->conn->sec_level >= br_chan->required_sec_level) {
780 return L2CAP_CONN_SECURITY_PASSED;
781 }
782
783 /*
784 * If 'check' still holds 0, it means local host just sent HCI
785 * authentication command to start procedure to increase link security
786 * since service/profile requires that.
787 */
788 if (check == 0) {
789 return L2CAP_CONN_SECURITY_PENDING;
790 }
791
792 /*
793 * For any other values in 'check' it means there was internal
794 * validation condition forbidding to start authentication at this
795 * moment.
796 */
797 return L2CAP_CONN_SECURITY_REJECT;
798 }
799
l2cap_br_send_conn_rsp(struct bt_conn * conn,uint16_t scid,uint16_t dcid,uint8_t ident,uint16_t result)800 static void l2cap_br_send_conn_rsp(struct bt_conn *conn, uint16_t scid,
801 uint16_t dcid, uint8_t ident, uint16_t result)
802 {
803 struct net_buf *buf;
804 struct bt_l2cap_conn_rsp *rsp;
805 struct bt_l2cap_sig_hdr *hdr;
806
807 buf = bt_l2cap_create_pdu(&br_sig_pool, 0);
808
809 hdr = net_buf_add(buf, sizeof(*hdr));
810 hdr->code = BT_L2CAP_CONN_RSP;
811 hdr->ident = ident;
812 hdr->len = sys_cpu_to_le16(sizeof(*rsp));
813
814 rsp = net_buf_add(buf, sizeof(*rsp));
815 rsp->dcid = sys_cpu_to_le16(dcid);
816 rsp->scid = sys_cpu_to_le16(scid);
817 rsp->result = sys_cpu_to_le16(result);
818
819 if (result == BT_L2CAP_BR_PENDING) {
820 rsp->status = sys_cpu_to_le16(BT_L2CAP_CS_AUTHEN_PEND);
821 } else {
822 rsp->status = sys_cpu_to_le16(BT_L2CAP_CS_NO_INFO);
823 }
824
825 l2cap_send(conn, BT_L2CAP_CID_BR_SIG, buf);
826 }
827
l2cap_br_conn_req_reply(struct bt_l2cap_chan * chan,uint16_t result)828 static int l2cap_br_conn_req_reply(struct bt_l2cap_chan *chan, uint16_t result)
829 {
830 /* Send response to connection request only when in acceptor role */
831 if (!atomic_test_bit(BR_CHAN(chan)->flags, L2CAP_FLAG_CONN_ACCEPTOR)) {
832 return -ESRCH;
833 }
834
835 l2cap_br_send_conn_rsp(chan->conn, BR_CHAN(chan)->tx.cid,
836 BR_CHAN(chan)->rx.cid, BR_CHAN(chan)->ident, result);
837 BR_CHAN(chan)->ident = 0U;
838
839 return 0;
840 }
841
842 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
843 #if defined(CONFIG_BT_L2CAP_LOG_LEVEL_DBG)
bt_l2cap_br_chan_set_state_debug(struct bt_l2cap_chan * chan,bt_l2cap_chan_state_t state,const char * func,int line)844 void bt_l2cap_br_chan_set_state_debug(struct bt_l2cap_chan *chan,
845 bt_l2cap_chan_state_t state,
846 const char *func, int line)
847 {
848 struct bt_l2cap_br_chan *br_chan;
849
850 br_chan = BR_CHAN(chan);
851
852 LOG_DBG("chan %p psm 0x%04x %s -> %s", chan, br_chan->psm,
853 bt_l2cap_chan_state_str(br_chan->state), bt_l2cap_chan_state_str(state));
854
855 /* check transitions validness */
856 switch (state) {
857 case BT_L2CAP_DISCONNECTED:
858 /* regardless of old state always allows this state */
859 break;
860 case BT_L2CAP_CONNECTING:
861 if (br_chan->state != BT_L2CAP_DISCONNECTED) {
862 LOG_WRN("%s()%d: invalid transition", func, line);
863 }
864 break;
865 case BT_L2CAP_CONFIG:
866 if (br_chan->state != BT_L2CAP_CONNECTING) {
867 LOG_WRN("%s()%d: invalid transition", func, line);
868 }
869 break;
870 case BT_L2CAP_CONNECTED:
871 if (br_chan->state != BT_L2CAP_CONFIG &&
872 br_chan->state != BT_L2CAP_CONNECTING) {
873 LOG_WRN("%s()%d: invalid transition", func, line);
874 }
875 break;
876 case BT_L2CAP_DISCONNECTING:
877 if (br_chan->state != BT_L2CAP_CONFIG &&
878 br_chan->state != BT_L2CAP_CONNECTED) {
879 LOG_WRN("%s()%d: invalid transition", func, line);
880 }
881 break;
882 default:
883 LOG_ERR("%s()%d: unknown (%u) state was set", func, line, state);
884 return;
885 }
886
887 br_chan->state = state;
888 }
889 #else
bt_l2cap_br_chan_set_state(struct bt_l2cap_chan * chan,bt_l2cap_chan_state_t state)890 void bt_l2cap_br_chan_set_state(struct bt_l2cap_chan *chan,
891 bt_l2cap_chan_state_t state)
892 {
893 BR_CHAN(chan)->state = state;
894 }
895 #endif /* CONFIG_BT_L2CAP_LOG_LEVEL_DBG */
896 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
897
bt_l2cap_br_chan_del(struct bt_l2cap_chan * chan)898 void bt_l2cap_br_chan_del(struct bt_l2cap_chan *chan)
899 {
900 const struct bt_l2cap_chan_ops *ops = chan->ops;
901 struct bt_l2cap_br_chan *br_chan = CONTAINER_OF(chan, struct bt_l2cap_br_chan, chan);
902
903 LOG_DBG("conn %p chan %p", chan->conn, chan);
904
905 if (!chan->conn) {
906 goto destroy;
907 }
908
909 cancel_data_ready(br_chan);
910
911 /* Remove buffers on the PDU TX queue. */
912 while (chan_has_data(br_chan)) {
913 struct net_buf *buf = net_buf_get(&br_chan->_pdu_tx_queue, K_NO_WAIT);
914
915 net_buf_unref(buf);
916 }
917
918 if (ops->disconnected) {
919 ops->disconnected(chan);
920 }
921
922 chan->conn = NULL;
923
924 destroy:
925 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
926 /* Reset internal members of common channel */
927 bt_l2cap_br_chan_set_state(chan, BT_L2CAP_DISCONNECTED);
928 BR_CHAN(chan)->psm = 0U;
929 #endif
930 if (chan->destroy) {
931 chan->destroy(chan);
932 }
933
934 if (ops->released) {
935 ops->released(chan);
936 }
937 }
938
l2cap_br_conn_req(struct bt_l2cap_br * l2cap,uint8_t ident,struct net_buf * buf)939 static void l2cap_br_conn_req(struct bt_l2cap_br *l2cap, uint8_t ident,
940 struct net_buf *buf)
941 {
942 struct bt_conn *conn = l2cap->chan.chan.conn;
943 struct bt_l2cap_chan *chan;
944 struct bt_l2cap_server *server;
945 struct bt_l2cap_conn_req *req = (void *)buf->data;
946 uint16_t psm, scid, result;
947 struct bt_l2cap_br_chan *br_chan;
948
949 if (buf->len < sizeof(*req)) {
950 LOG_ERR("Too small L2CAP conn req packet size");
951 return;
952 }
953
954 psm = sys_le16_to_cpu(req->psm);
955 scid = sys_le16_to_cpu(req->scid);
956
957 LOG_DBG("psm 0x%02x scid 0x%04x", psm, scid);
958
959 /* Check if there is a server registered */
960 server = l2cap_br_server_lookup_psm(psm);
961 if (!server) {
962 result = BT_L2CAP_BR_ERR_PSM_NOT_SUPP;
963 goto no_chan;
964 }
965
966 /*
967 * Report security violation for non SDP channel without encryption when
968 * remote supports SSP.
969 */
970 if (server->sec_level != BT_SECURITY_L0 &&
971 BT_FEAT_HOST_SSP(conn->br.features) && !conn->encrypt) {
972 result = BT_L2CAP_BR_ERR_SEC_BLOCK;
973 goto no_chan;
974 }
975
976 if (!L2CAP_BR_CID_IS_DYN(scid)) {
977 result = BT_L2CAP_BR_ERR_INVALID_SCID;
978 goto no_chan;
979 }
980
981 chan = bt_l2cap_br_lookup_tx_cid(conn, scid);
982 if (chan) {
983 /*
984 * we have a chan here but this is due to SCID being already in
985 * use so it is not channel we are suppose to pass to
986 * l2cap_br_conn_req_reply as wrong DCID would be used
987 */
988 result = BT_L2CAP_BR_ERR_SCID_IN_USE;
989 goto no_chan;
990 }
991
992 /*
993 * Request server to accept the new connection and allocate the
994 * channel. If no free channels available for PSM server reply with
995 * proper result and quit since chan pointer is uninitialized then.
996 */
997 if (server->accept(conn, server, &chan) < 0) {
998 result = BT_L2CAP_BR_ERR_NO_RESOURCES;
999 goto no_chan;
1000 }
1001
1002 br_chan = BR_CHAN(chan);
1003 br_chan->required_sec_level = server->sec_level;
1004
1005 l2cap_br_chan_add(conn, chan, l2cap_br_chan_destroy);
1006 BR_CHAN(chan)->tx.cid = scid;
1007 br_chan->ident = ident;
1008 bt_l2cap_br_chan_set_state(chan, BT_L2CAP_CONNECTING);
1009 atomic_set_bit(BR_CHAN(chan)->flags, L2CAP_FLAG_CONN_ACCEPTOR);
1010
1011 /* Disable fragmentation of l2cap rx pdu */
1012 BR_CHAN(chan)->rx.mtu = MIN(BR_CHAN(chan)->rx.mtu, BT_L2CAP_RX_MTU);
1013
1014 switch (l2cap_br_conn_security(chan, psm)) {
1015 case L2CAP_CONN_SECURITY_PENDING:
1016 result = BT_L2CAP_BR_PENDING;
1017 /* TODO: auth timeout */
1018 break;
1019 case L2CAP_CONN_SECURITY_PASSED:
1020 result = BT_L2CAP_BR_SUCCESS;
1021 break;
1022 case L2CAP_CONN_SECURITY_REJECT:
1023 default:
1024 result = BT_L2CAP_BR_ERR_SEC_BLOCK;
1025 break;
1026 }
1027 /* Reply on connection request as acceptor */
1028 l2cap_br_conn_req_reply(chan, result);
1029
1030 if (result != BT_L2CAP_BR_SUCCESS) {
1031 /* Disconnect link when security rules were violated */
1032 if (result == BT_L2CAP_BR_ERR_SEC_BLOCK) {
1033 bt_conn_disconnect(conn, BT_HCI_ERR_AUTH_FAIL);
1034 }
1035
1036 return;
1037 }
1038
1039 bt_l2cap_br_chan_set_state(chan, BT_L2CAP_CONFIG);
1040 l2cap_br_conf(chan);
1041 return;
1042
1043 no_chan:
1044 l2cap_br_send_conn_rsp(conn, scid, 0, ident, result);
1045 }
1046
l2cap_br_conf_rsp(struct bt_l2cap_br * l2cap,uint8_t ident,uint16_t len,struct net_buf * buf)1047 static void l2cap_br_conf_rsp(struct bt_l2cap_br *l2cap, uint8_t ident,
1048 uint16_t len, struct net_buf *buf)
1049 {
1050 struct bt_conn *conn = l2cap->chan.chan.conn;
1051 struct bt_l2cap_chan *chan;
1052 struct bt_l2cap_conf_rsp *rsp = (void *)buf->data;
1053 uint16_t flags, scid, result, opt_len;
1054 struct bt_l2cap_br_chan *br_chan;
1055
1056 if (buf->len < sizeof(*rsp)) {
1057 LOG_ERR("Too small L2CAP conf rsp packet size");
1058 return;
1059 }
1060
1061 flags = sys_le16_to_cpu(rsp->flags);
1062 scid = sys_le16_to_cpu(rsp->scid);
1063 result = sys_le16_to_cpu(rsp->result);
1064 opt_len = len - sizeof(*rsp);
1065
1066 LOG_DBG("scid 0x%04x flags 0x%02x result 0x%02x len %u", scid, flags, result, opt_len);
1067
1068 chan = bt_l2cap_br_lookup_rx_cid(conn, scid);
1069 if (!chan) {
1070 LOG_ERR("channel mismatch!");
1071 return;
1072 }
1073
1074 br_chan = BR_CHAN(chan);
1075
1076 /* Release RTX work since got the response */
1077 k_work_cancel_delayable(&br_chan->rtx_work);
1078
1079 /*
1080 * TODO: handle other results than success and parse response data if
1081 * available
1082 */
1083 switch (result) {
1084 case BT_L2CAP_CONF_SUCCESS:
1085 atomic_set_bit(br_chan->flags, L2CAP_FLAG_CONN_LCONF_DONE);
1086
1087 if (br_chan->state == BT_L2CAP_CONFIG &&
1088 atomic_test_bit(br_chan->flags,
1089 L2CAP_FLAG_CONN_RCONF_DONE)) {
1090 LOG_DBG("scid 0x%04x rx MTU %u dcid 0x%04x tx MTU %u", br_chan->rx.cid,
1091 br_chan->rx.mtu, br_chan->tx.cid, br_chan->tx.mtu);
1092
1093 bt_l2cap_br_chan_set_state(chan, BT_L2CAP_CONNECTED);
1094 if (chan->ops && chan->ops->connected) {
1095 chan->ops->connected(chan);
1096 }
1097 }
1098 break;
1099 default:
1100 /* currently disconnect channel on non success result */
1101 bt_l2cap_chan_disconnect(chan);
1102 break;
1103 }
1104 }
1105
bt_l2cap_br_server_register(struct bt_l2cap_server * server)1106 int bt_l2cap_br_server_register(struct bt_l2cap_server *server)
1107 {
1108 if (server->psm < L2CAP_BR_PSM_START || !server->accept) {
1109 return -EINVAL;
1110 }
1111
1112 /* PSM must be odd and lsb of upper byte must be 0 */
1113 if ((server->psm & 0x0101) != 0x0001) {
1114 return -EINVAL;
1115 }
1116
1117 if (server->sec_level > BT_SECURITY_L4) {
1118 return -EINVAL;
1119 } else if (server->sec_level == BT_SECURITY_L0 &&
1120 server->psm != L2CAP_BR_PSM_SDP) {
1121 server->sec_level = BT_SECURITY_L1;
1122 }
1123
1124 /* Check if given PSM is already in use */
1125 if (l2cap_br_server_lookup_psm(server->psm)) {
1126 LOG_DBG("PSM already registered");
1127 return -EADDRINUSE;
1128 }
1129
1130 LOG_DBG("PSM 0x%04x", server->psm);
1131
1132 sys_slist_append(&br_servers, &server->node);
1133
1134 return 0;
1135 }
1136
l2cap_br_send_reject(struct bt_conn * conn,uint8_t ident,uint16_t reason,void * data,uint8_t data_len)1137 static void l2cap_br_send_reject(struct bt_conn *conn, uint8_t ident,
1138 uint16_t reason, void *data, uint8_t data_len)
1139 {
1140 struct bt_l2cap_cmd_reject *rej;
1141 struct bt_l2cap_sig_hdr *hdr;
1142 struct net_buf *buf;
1143
1144 buf = bt_l2cap_create_pdu(&br_sig_pool, 0);
1145
1146 hdr = net_buf_add(buf, sizeof(*hdr));
1147 hdr->code = BT_L2CAP_CMD_REJECT;
1148 hdr->ident = ident;
1149 hdr->len = sys_cpu_to_le16(sizeof(*rej) + data_len);
1150
1151 rej = net_buf_add(buf, sizeof(*rej));
1152 rej->reason = sys_cpu_to_le16(reason);
1153
1154 /*
1155 * optional data if available must be already in little-endian format
1156 * made by caller.and be compliant with Core 4.2 [Vol 3, Part A, 4.1,
1157 * table 4.4]
1158 */
1159 if (data) {
1160 net_buf_add_mem(buf, data, data_len);
1161 }
1162
1163 l2cap_send(conn, BT_L2CAP_CID_BR_SIG, buf);
1164 }
1165
l2cap_br_conf_opt_mtu(struct bt_l2cap_chan * chan,struct net_buf * buf,size_t len)1166 static uint16_t l2cap_br_conf_opt_mtu(struct bt_l2cap_chan *chan,
1167 struct net_buf *buf, size_t len)
1168 {
1169 uint16_t mtu, result = BT_L2CAP_CONF_SUCCESS;
1170
1171 /* Core 4.2 [Vol 3, Part A, 5.1] MTU payload length */
1172 if (len != 2) {
1173 LOG_ERR("tx MTU length %zu invalid", len);
1174 result = BT_L2CAP_CONF_REJECT;
1175 goto done;
1176 }
1177
1178 /* pulling MTU value moves buf data to next option item */
1179 mtu = net_buf_pull_le16(buf);
1180 if (mtu < L2CAP_BR_MIN_MTU) {
1181 result = BT_L2CAP_CONF_UNACCEPT;
1182 BR_CHAN(chan)->tx.mtu = L2CAP_BR_MIN_MTU;
1183 LOG_DBG("tx MTU %u invalid", mtu);
1184 goto done;
1185 }
1186
1187 BR_CHAN(chan)->tx.mtu = mtu;
1188 LOG_DBG("tx MTU %u", mtu);
1189 done:
1190 return result;
1191 }
1192
l2cap_br_conf_req(struct bt_l2cap_br * l2cap,uint8_t ident,uint16_t len,struct net_buf * buf)1193 static void l2cap_br_conf_req(struct bt_l2cap_br *l2cap, uint8_t ident,
1194 uint16_t len, struct net_buf *buf)
1195 {
1196 struct bt_conn *conn = l2cap->chan.chan.conn;
1197 struct bt_l2cap_chan *chan;
1198 struct bt_l2cap_conf_req *req;
1199 struct bt_l2cap_sig_hdr *hdr;
1200 struct bt_l2cap_conf_rsp *rsp;
1201 struct bt_l2cap_conf_opt *opt;
1202 uint16_t flags, dcid, opt_len, hint, result = BT_L2CAP_CONF_SUCCESS;
1203
1204 if (buf->len < sizeof(*req)) {
1205 LOG_ERR("Too small L2CAP conf req packet size");
1206 return;
1207 }
1208
1209 req = net_buf_pull_mem(buf, sizeof(*req));
1210 flags = sys_le16_to_cpu(req->flags);
1211 dcid = sys_le16_to_cpu(req->dcid);
1212 opt_len = len - sizeof(*req);
1213
1214 LOG_DBG("dcid 0x%04x flags 0x%02x len %u", dcid, flags, opt_len);
1215
1216 chan = bt_l2cap_br_lookup_rx_cid(conn, dcid);
1217 if (!chan) {
1218 LOG_ERR("rx channel mismatch!");
1219 struct bt_l2cap_cmd_reject_cid_data data = {.scid = req->dcid,
1220 .dcid = 0,
1221 };
1222
1223 l2cap_br_send_reject(conn, ident, BT_L2CAP_REJ_INVALID_CID,
1224 &data, sizeof(data));
1225 return;
1226 }
1227
1228 if (!opt_len) {
1229 LOG_DBG("tx default MTU %u", L2CAP_BR_DEFAULT_MTU);
1230 BR_CHAN(chan)->tx.mtu = L2CAP_BR_DEFAULT_MTU;
1231 goto send_rsp;
1232 }
1233
1234 while (buf->len >= sizeof(*opt)) {
1235 opt = net_buf_pull_mem(buf, sizeof(*opt));
1236
1237 /* make sure opt object can get safe dereference in iteration */
1238 if (buf->len < opt->len) {
1239 LOG_ERR("Received too short option data");
1240 result = BT_L2CAP_CONF_REJECT;
1241 break;
1242 }
1243
1244 hint = opt->type & BT_L2CAP_CONF_HINT;
1245
1246 switch (opt->type & BT_L2CAP_CONF_MASK) {
1247 case BT_L2CAP_CONF_OPT_MTU:
1248 /* getting MTU modifies buf internals */
1249 result = l2cap_br_conf_opt_mtu(chan, buf, opt->len);
1250 /*
1251 * MTU is done. For now bailout the loop but later on
1252 * there can be a need to continue checking next options
1253 * that are after MTU value and then goto is not proper
1254 * way out here.
1255 */
1256 goto send_rsp;
1257 default:
1258 if (!hint) {
1259 LOG_DBG("option %u not handled", opt->type);
1260 goto send_rsp;
1261 }
1262
1263 /* Update buffer to point at next option */
1264 net_buf_pull(buf, opt->len);
1265 break;
1266 }
1267 }
1268
1269 send_rsp:
1270 buf = bt_l2cap_create_pdu(&br_sig_pool, 0);
1271
1272 hdr = net_buf_add(buf, sizeof(*hdr));
1273 hdr->code = BT_L2CAP_CONF_RSP;
1274 hdr->ident = ident;
1275 rsp = net_buf_add(buf, sizeof(*rsp));
1276 (void)memset(rsp, 0, sizeof(*rsp));
1277
1278 rsp->result = sys_cpu_to_le16(result);
1279 rsp->scid = sys_cpu_to_le16(BR_CHAN(chan)->tx.cid);
1280
1281 /*
1282 * TODO: If options other than MTU became meaningful then processing
1283 * the options chain need to be modified and taken into account when
1284 * sending back to peer.
1285 */
1286 if (result == BT_L2CAP_CONF_UNACCEPT) {
1287 l2cap_br_conf_add_mtu(buf, BR_CHAN(chan)->tx.mtu);
1288 }
1289
1290 hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
1291
1292 l2cap_send(conn, BT_L2CAP_CID_BR_SIG, buf);
1293
1294 if (result != BT_L2CAP_CONF_SUCCESS) {
1295 return;
1296 }
1297
1298 atomic_set_bit(BR_CHAN(chan)->flags, L2CAP_FLAG_CONN_RCONF_DONE);
1299
1300 if (atomic_test_bit(BR_CHAN(chan)->flags, L2CAP_FLAG_CONN_LCONF_DONE) &&
1301 BR_CHAN(chan)->state == BT_L2CAP_CONFIG) {
1302 LOG_DBG("scid 0x%04x rx MTU %u dcid 0x%04x tx MTU %u", BR_CHAN(chan)->rx.cid,
1303 BR_CHAN(chan)->rx.mtu, BR_CHAN(chan)->tx.cid, BR_CHAN(chan)->tx.mtu);
1304
1305 bt_l2cap_br_chan_set_state(chan, BT_L2CAP_CONNECTED);
1306 if (chan->ops && chan->ops->connected) {
1307 chan->ops->connected(chan);
1308 }
1309 }
1310 }
1311
l2cap_br_remove_tx_cid(struct bt_conn * conn,uint16_t cid)1312 static struct bt_l2cap_br_chan *l2cap_br_remove_tx_cid(struct bt_conn *conn,
1313 uint16_t cid)
1314 {
1315 struct bt_l2cap_chan *chan;
1316 sys_snode_t *prev = NULL;
1317
1318 /* Protect fixed channels against accidental removal */
1319 if (!L2CAP_BR_CID_IS_DYN(cid)) {
1320 return NULL;
1321 }
1322
1323 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
1324 if (BR_CHAN(chan)->tx.cid == cid) {
1325 sys_slist_remove(&conn->channels, prev, &chan->node);
1326 return BR_CHAN(chan);
1327 }
1328
1329 prev = &chan->node;
1330 }
1331
1332 return NULL;
1333 }
1334
l2cap_br_disconn_req(struct bt_l2cap_br * l2cap,uint8_t ident,struct net_buf * buf)1335 static void l2cap_br_disconn_req(struct bt_l2cap_br *l2cap, uint8_t ident,
1336 struct net_buf *buf)
1337 {
1338 struct bt_conn *conn = l2cap->chan.chan.conn;
1339 struct bt_l2cap_br_chan *chan;
1340 struct bt_l2cap_disconn_req *req = (void *)buf->data;
1341 struct bt_l2cap_disconn_rsp *rsp;
1342 struct bt_l2cap_sig_hdr *hdr;
1343 uint16_t scid, dcid;
1344
1345 if (buf->len < sizeof(*req)) {
1346 LOG_ERR("Too small disconn req packet size");
1347 return;
1348 }
1349
1350 dcid = sys_le16_to_cpu(req->dcid);
1351 scid = sys_le16_to_cpu(req->scid);
1352
1353 LOG_DBG("scid 0x%04x dcid 0x%04x", dcid, scid);
1354
1355 chan = l2cap_br_remove_tx_cid(conn, scid);
1356 if (!chan) {
1357 struct bt_l2cap_cmd_reject_cid_data data;
1358
1359 data.scid = req->scid;
1360 data.dcid = req->dcid;
1361 l2cap_br_send_reject(conn, ident, BT_L2CAP_REJ_INVALID_CID,
1362 &data, sizeof(data));
1363 return;
1364 }
1365
1366 buf = bt_l2cap_create_pdu(&br_sig_pool, 0);
1367
1368 hdr = net_buf_add(buf, sizeof(*hdr));
1369 hdr->code = BT_L2CAP_DISCONN_RSP;
1370 hdr->ident = ident;
1371 hdr->len = sys_cpu_to_le16(sizeof(*rsp));
1372
1373 rsp = net_buf_add(buf, sizeof(*rsp));
1374 rsp->dcid = sys_cpu_to_le16(chan->rx.cid);
1375 rsp->scid = sys_cpu_to_le16(chan->tx.cid);
1376
1377 bt_l2cap_br_chan_del(&chan->chan);
1378
1379 l2cap_send(conn, BT_L2CAP_CID_BR_SIG, buf);
1380 }
1381
l2cap_br_connected(struct bt_l2cap_chan * chan)1382 static void l2cap_br_connected(struct bt_l2cap_chan *chan)
1383 {
1384 LOG_DBG("ch %p cid 0x%04x", BR_CHAN(chan), BR_CHAN(chan)->rx.cid);
1385 }
1386
l2cap_br_disconnected(struct bt_l2cap_chan * chan)1387 static void l2cap_br_disconnected(struct bt_l2cap_chan *chan)
1388 {
1389 struct bt_l2cap_br_chan *br_chan = BR_CHAN(chan);
1390
1391 LOG_DBG("ch %p cid 0x%04x", br_chan, br_chan->rx.cid);
1392
1393 if (atomic_test_and_clear_bit(br_chan->flags,
1394 L2CAP_FLAG_SIG_INFO_PENDING)) {
1395 /* Cancel RTX work on signal channel.
1396 * Disconnected callback is always called from system workqueue
1397 * so this should always succeed.
1398 */
1399 (void)k_work_cancel_delayable(&br_chan->rtx_work);
1400 }
1401 }
1402
bt_l2cap_br_chan_disconnect(struct bt_l2cap_chan * chan)1403 int bt_l2cap_br_chan_disconnect(struct bt_l2cap_chan *chan)
1404 {
1405 struct bt_conn *conn = chan->conn;
1406 struct net_buf *buf;
1407 struct bt_l2cap_disconn_req *req;
1408 struct bt_l2cap_sig_hdr *hdr;
1409 struct bt_l2cap_br_chan *br_chan;
1410
1411 if (!conn) {
1412 return -ENOTCONN;
1413 }
1414
1415 br_chan = BR_CHAN(chan);
1416
1417 if (br_chan->state == BT_L2CAP_DISCONNECTING) {
1418 return -EALREADY;
1419 }
1420
1421 LOG_DBG("chan %p scid 0x%04x dcid 0x%04x", chan, br_chan->rx.cid, br_chan->tx.cid);
1422
1423 buf = bt_l2cap_create_pdu(&br_sig_pool, 0);
1424
1425 hdr = net_buf_add(buf, sizeof(*hdr));
1426 hdr->code = BT_L2CAP_DISCONN_REQ;
1427 hdr->ident = l2cap_br_get_ident();
1428 hdr->len = sys_cpu_to_le16(sizeof(*req));
1429
1430 req = net_buf_add(buf, sizeof(*req));
1431 req->dcid = sys_cpu_to_le16(br_chan->tx.cid);
1432 req->scid = sys_cpu_to_le16(br_chan->rx.cid);
1433
1434 l2cap_br_chan_send_req(br_chan, buf, L2CAP_BR_DISCONN_TIMEOUT);
1435 bt_l2cap_br_chan_set_state(chan, BT_L2CAP_DISCONNECTING);
1436
1437 return 0;
1438 }
1439
l2cap_br_disconn_rsp(struct bt_l2cap_br * l2cap,uint8_t ident,struct net_buf * buf)1440 static void l2cap_br_disconn_rsp(struct bt_l2cap_br *l2cap, uint8_t ident,
1441 struct net_buf *buf)
1442 {
1443 struct bt_conn *conn = l2cap->chan.chan.conn;
1444 struct bt_l2cap_br_chan *chan;
1445 struct bt_l2cap_disconn_rsp *rsp = (void *)buf->data;
1446 uint16_t dcid, scid;
1447
1448 if (buf->len < sizeof(*rsp)) {
1449 LOG_ERR("Too small disconn rsp packet size");
1450 return;
1451 }
1452
1453 dcid = sys_le16_to_cpu(rsp->dcid);
1454 scid = sys_le16_to_cpu(rsp->scid);
1455
1456 LOG_DBG("dcid 0x%04x scid 0x%04x", dcid, scid);
1457
1458 chan = l2cap_br_remove_tx_cid(conn, dcid);
1459 if (!chan) {
1460 LOG_WRN("No dcid 0x%04x channel found", dcid);
1461 return;
1462 }
1463
1464 bt_l2cap_br_chan_del(&chan->chan);
1465 }
1466
bt_l2cap_br_chan_connect(struct bt_conn * conn,struct bt_l2cap_chan * chan,uint16_t psm)1467 int bt_l2cap_br_chan_connect(struct bt_conn *conn, struct bt_l2cap_chan *chan,
1468 uint16_t psm)
1469 {
1470 struct net_buf *buf;
1471 struct bt_l2cap_sig_hdr *hdr;
1472 struct bt_l2cap_conn_req *req;
1473 struct bt_l2cap_br_chan *br_chan = BR_CHAN(chan);
1474
1475 if (!psm) {
1476 return -EINVAL;
1477 }
1478
1479 if (br_chan->psm) {
1480 return -EEXIST;
1481 }
1482
1483 /* PSM must be odd and lsb of upper byte must be 0 */
1484 if ((psm & 0x0101) != 0x0001) {
1485 return -EINVAL;
1486 }
1487
1488 if (br_chan->required_sec_level > BT_SECURITY_L4) {
1489 return -EINVAL;
1490 } else if (br_chan->required_sec_level == BT_SECURITY_L0 &&
1491 psm != L2CAP_BR_PSM_SDP) {
1492 br_chan->required_sec_level = BT_SECURITY_L1;
1493 }
1494
1495 switch (br_chan->state) {
1496 case BT_L2CAP_CONNECTED:
1497 /* Already connected */
1498 return -EISCONN;
1499 case BT_L2CAP_DISCONNECTED:
1500 /* Can connect */
1501 break;
1502 case BT_L2CAP_CONFIG:
1503 case BT_L2CAP_DISCONNECTING:
1504 default:
1505 /* Bad context */
1506 return -EBUSY;
1507 }
1508
1509 if (!l2cap_br_chan_add(conn, chan, l2cap_br_chan_destroy)) {
1510 return -ENOMEM;
1511 }
1512
1513 br_chan->psm = psm;
1514 bt_l2cap_br_chan_set_state(chan, BT_L2CAP_CONNECTING);
1515 atomic_set_bit(BR_CHAN(chan)->flags, L2CAP_FLAG_CONN_PENDING);
1516
1517 switch (l2cap_br_conn_security(chan, psm)) {
1518 case L2CAP_CONN_SECURITY_PENDING:
1519 /*
1520 * Authentication was triggered, wait with sending request on
1521 * connection security changed callback context.
1522 */
1523 return 0;
1524 case L2CAP_CONN_SECURITY_PASSED:
1525 break;
1526 case L2CAP_CONN_SECURITY_REJECT:
1527 default:
1528 l2cap_br_chan_cleanup(chan);
1529 return -EIO;
1530 }
1531
1532 buf = bt_l2cap_create_pdu(&br_sig_pool, 0);
1533
1534 hdr = net_buf_add(buf, sizeof(*hdr));
1535 hdr->code = BT_L2CAP_CONN_REQ;
1536 hdr->ident = l2cap_br_get_ident();
1537 hdr->len = sys_cpu_to_le16(sizeof(*req));
1538
1539 req = net_buf_add(buf, sizeof(*req));
1540 req->psm = sys_cpu_to_le16(psm);
1541 req->scid = sys_cpu_to_le16(BR_CHAN(chan)->rx.cid);
1542
1543 l2cap_br_chan_send_req(BR_CHAN(chan), buf, L2CAP_BR_CONN_TIMEOUT);
1544
1545 return 0;
1546 }
1547
l2cap_br_conn_rsp(struct bt_l2cap_br * l2cap,uint8_t ident,struct net_buf * buf)1548 static void l2cap_br_conn_rsp(struct bt_l2cap_br *l2cap, uint8_t ident,
1549 struct net_buf *buf)
1550 {
1551 struct bt_conn *conn = l2cap->chan.chan.conn;
1552 struct bt_l2cap_chan *chan;
1553 struct bt_l2cap_conn_rsp *rsp = (void *)buf->data;
1554 uint16_t dcid, scid, result, status;
1555 struct bt_l2cap_br_chan *br_chan;
1556
1557 if (buf->len < sizeof(*rsp)) {
1558 LOG_ERR("Too small L2CAP conn rsp packet size");
1559 return;
1560 }
1561
1562 dcid = sys_le16_to_cpu(rsp->dcid);
1563 scid = sys_le16_to_cpu(rsp->scid);
1564 result = sys_le16_to_cpu(rsp->result);
1565 status = sys_le16_to_cpu(rsp->status);
1566
1567 LOG_DBG("dcid 0x%04x scid 0x%04x result %u status %u", dcid, scid, result, status);
1568
1569 chan = bt_l2cap_br_lookup_rx_cid(conn, scid);
1570 if (!chan) {
1571 LOG_ERR("No scid 0x%04x channel found", scid);
1572 return;
1573 }
1574
1575 br_chan = BR_CHAN(chan);
1576
1577 /* Release RTX work since got the response */
1578 k_work_cancel_delayable(&br_chan->rtx_work);
1579
1580 if (br_chan->state != BT_L2CAP_CONNECTING) {
1581 LOG_DBG("Invalid channel %p state %s", chan,
1582 bt_l2cap_chan_state_str(br_chan->state));
1583 return;
1584 }
1585
1586 switch (result) {
1587 case BT_L2CAP_BR_SUCCESS:
1588 br_chan->ident = 0U;
1589 BR_CHAN(chan)->tx.cid = dcid;
1590 l2cap_br_conf(chan);
1591 bt_l2cap_br_chan_set_state(chan, BT_L2CAP_CONFIG);
1592 atomic_clear_bit(BR_CHAN(chan)->flags, L2CAP_FLAG_CONN_PENDING);
1593 break;
1594 case BT_L2CAP_BR_PENDING:
1595 k_work_reschedule(&br_chan->rtx_work, L2CAP_BR_CONN_TIMEOUT);
1596 break;
1597 default:
1598 l2cap_br_chan_cleanup(chan);
1599 break;
1600 }
1601 }
1602
bt_l2cap_br_chan_send_cb(struct bt_l2cap_chan * chan,struct net_buf * buf,bt_conn_tx_cb_t cb,void * user_data)1603 int bt_l2cap_br_chan_send_cb(struct bt_l2cap_chan *chan, struct net_buf *buf, bt_conn_tx_cb_t cb,
1604 void *user_data)
1605 {
1606 struct bt_l2cap_br_chan *br_chan;
1607
1608 if (!buf || !chan) {
1609 return -EINVAL;
1610 }
1611
1612 br_chan = BR_CHAN(chan);
1613
1614 LOG_DBG("chan %p buf %p len %zu", chan, buf, net_buf_frags_len(buf));
1615
1616 if (!chan->conn || chan->conn->state != BT_CONN_CONNECTED) {
1617 return -ENOTCONN;
1618 }
1619
1620 if (atomic_test_bit(chan->status, BT_L2CAP_STATUS_SHUTDOWN)) {
1621 return -ESHUTDOWN;
1622 }
1623
1624 if (buf->len > br_chan->tx.mtu) {
1625 return -EMSGSIZE;
1626 }
1627
1628 return bt_l2cap_br_send_cb(br_chan->chan.conn, br_chan->tx.cid, buf, cb, user_data);
1629 }
1630
bt_l2cap_br_chan_send(struct bt_l2cap_chan * chan,struct net_buf * buf)1631 int bt_l2cap_br_chan_send(struct bt_l2cap_chan *chan, struct net_buf *buf)
1632 {
1633 return bt_l2cap_br_chan_send_cb(chan, buf, NULL, NULL);
1634 }
1635
l2cap_br_recv(struct bt_l2cap_chan * chan,struct net_buf * buf)1636 static int l2cap_br_recv(struct bt_l2cap_chan *chan, struct net_buf *buf)
1637 {
1638 struct bt_l2cap_br *l2cap = CONTAINER_OF(chan, struct bt_l2cap_br, chan.chan);
1639 struct bt_l2cap_sig_hdr *hdr;
1640 uint16_t len;
1641
1642 if (buf->len < sizeof(*hdr)) {
1643 LOG_ERR("Too small L2CAP signaling PDU");
1644 return 0;
1645 }
1646
1647 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
1648 len = sys_le16_to_cpu(hdr->len);
1649
1650 LOG_DBG("Signaling code 0x%02x ident %u len %u", hdr->code, hdr->ident, len);
1651
1652 if (buf->len != len) {
1653 LOG_ERR("L2CAP length mismatch (%u != %u)", buf->len, len);
1654 return 0;
1655 }
1656
1657 if (!hdr->ident) {
1658 LOG_ERR("Invalid ident value in L2CAP PDU");
1659 return 0;
1660 }
1661
1662 switch (hdr->code) {
1663 case BT_L2CAP_INFO_RSP:
1664 l2cap_br_info_rsp(l2cap, hdr->ident, buf);
1665 break;
1666 case BT_L2CAP_INFO_REQ:
1667 l2cap_br_info_req(l2cap, hdr->ident, buf);
1668 break;
1669 case BT_L2CAP_DISCONN_REQ:
1670 l2cap_br_disconn_req(l2cap, hdr->ident, buf);
1671 break;
1672 case BT_L2CAP_CONN_REQ:
1673 l2cap_br_conn_req(l2cap, hdr->ident, buf);
1674 break;
1675 case BT_L2CAP_CONF_RSP:
1676 l2cap_br_conf_rsp(l2cap, hdr->ident, len, buf);
1677 break;
1678 case BT_L2CAP_CONF_REQ:
1679 l2cap_br_conf_req(l2cap, hdr->ident, len, buf);
1680 break;
1681 case BT_L2CAP_DISCONN_RSP:
1682 l2cap_br_disconn_rsp(l2cap, hdr->ident, buf);
1683 break;
1684 case BT_L2CAP_CONN_RSP:
1685 l2cap_br_conn_rsp(l2cap, hdr->ident, buf);
1686 break;
1687 default:
1688 LOG_WRN("Unknown/Unsupported L2CAP PDU code 0x%02x", hdr->code);
1689 l2cap_br_send_reject(chan->conn, hdr->ident,
1690 BT_L2CAP_REJ_NOT_UNDERSTOOD, NULL, 0);
1691 break;
1692 }
1693
1694 return 0;
1695 }
1696
l2cap_br_conn_pend(struct bt_l2cap_chan * chan,uint8_t status)1697 static void l2cap_br_conn_pend(struct bt_l2cap_chan *chan, uint8_t status)
1698 {
1699 struct net_buf *buf;
1700 struct bt_l2cap_sig_hdr *hdr;
1701 struct bt_l2cap_conn_req *req;
1702
1703 if (BR_CHAN(chan)->state != BT_L2CAP_CONNECTING) {
1704 return;
1705 }
1706
1707 LOG_DBG("chan %p status 0x%02x encr 0x%02x", chan, status, chan->conn->encrypt);
1708
1709 if (status) {
1710 /*
1711 * Security procedure status is non-zero so respond with
1712 * security violation only as channel acceptor.
1713 */
1714 l2cap_br_conn_req_reply(chan, BT_L2CAP_BR_ERR_SEC_BLOCK);
1715
1716 /* Release channel allocated to outgoing connection request */
1717 if (atomic_test_bit(BR_CHAN(chan)->flags,
1718 L2CAP_FLAG_CONN_PENDING)) {
1719 l2cap_br_chan_cleanup(chan);
1720 }
1721
1722 return;
1723 }
1724
1725 if (!chan->conn->encrypt) {
1726 return;
1727 }
1728
1729 /*
1730 * For incoming connection state send confirming outstanding
1731 * response and initiate configuration request.
1732 */
1733 if (l2cap_br_conn_req_reply(chan, BT_L2CAP_BR_SUCCESS) == 0) {
1734 bt_l2cap_br_chan_set_state(chan, BT_L2CAP_CONFIG);
1735 /*
1736 * Initialize config request since remote needs to know
1737 * local MTU segmentation.
1738 */
1739 l2cap_br_conf(chan);
1740 } else if (atomic_test_and_clear_bit(BR_CHAN(chan)->flags,
1741 L2CAP_FLAG_CONN_PENDING)) {
1742 buf = bt_l2cap_create_pdu(&br_sig_pool, 0);
1743
1744 hdr = net_buf_add(buf, sizeof(*hdr));
1745 hdr->code = BT_L2CAP_CONN_REQ;
1746 hdr->ident = l2cap_br_get_ident();
1747 hdr->len = sys_cpu_to_le16(sizeof(*req));
1748
1749 req = net_buf_add(buf, sizeof(*req));
1750 req->psm = sys_cpu_to_le16(BR_CHAN(chan)->psm);
1751 req->scid = sys_cpu_to_le16(BR_CHAN(chan)->rx.cid);
1752
1753 l2cap_br_chan_send_req(BR_CHAN(chan), buf,
1754 L2CAP_BR_CONN_TIMEOUT);
1755 }
1756 }
1757
l2cap_br_encrypt_change(struct bt_conn * conn,uint8_t hci_status)1758 void l2cap_br_encrypt_change(struct bt_conn *conn, uint8_t hci_status)
1759 {
1760 struct bt_l2cap_chan *chan;
1761
1762 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
1763 l2cap_br_conn_pend(chan, hci_status);
1764
1765 if (chan->ops && chan->ops->encrypt_change) {
1766 chan->ops->encrypt_change(chan, hci_status);
1767 }
1768 }
1769 }
1770
check_fixed_channel(struct bt_l2cap_chan * chan)1771 static void check_fixed_channel(struct bt_l2cap_chan *chan)
1772 {
1773 struct bt_l2cap_br_chan *br_chan = BR_CHAN(chan);
1774
1775 if (br_chan->rx.cid < L2CAP_BR_CID_DYN_START) {
1776 connect_fixed_channel(br_chan);
1777 }
1778 }
1779
bt_l2cap_br_recv(struct bt_conn * conn,struct net_buf * buf)1780 void bt_l2cap_br_recv(struct bt_conn *conn, struct net_buf *buf)
1781 {
1782 struct bt_l2cap_hdr *hdr;
1783 struct bt_l2cap_chan *chan;
1784 uint16_t cid;
1785
1786 if (buf->len < sizeof(*hdr)) {
1787 LOG_ERR("Too small L2CAP PDU received");
1788 net_buf_unref(buf);
1789 return;
1790 }
1791
1792 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
1793 cid = sys_le16_to_cpu(hdr->cid);
1794
1795 chan = bt_l2cap_br_lookup_rx_cid(conn, cid);
1796 if (!chan) {
1797 LOG_WRN("Ignoring data for unknown channel ID 0x%04x", cid);
1798 net_buf_unref(buf);
1799 return;
1800 }
1801
1802 /*
1803 * if data was received for fixed channel before Information
1804 * Response we connect channel here.
1805 */
1806 check_fixed_channel(chan);
1807
1808 chan->ops->recv(chan, buf);
1809 net_buf_unref(buf);
1810 }
1811
l2cap_br_accept(struct bt_conn * conn,struct bt_l2cap_chan ** chan)1812 static int l2cap_br_accept(struct bt_conn *conn, struct bt_l2cap_chan **chan)
1813 {
1814 int i;
1815 static const struct bt_l2cap_chan_ops ops = {
1816 .connected = l2cap_br_connected,
1817 .disconnected = l2cap_br_disconnected,
1818 .recv = l2cap_br_recv,
1819 };
1820
1821 LOG_DBG("conn %p handle %u", conn, conn->handle);
1822
1823 for (i = 0; i < ARRAY_SIZE(bt_l2cap_br_pool); i++) {
1824 struct bt_l2cap_br *l2cap = &bt_l2cap_br_pool[i];
1825
1826 if (l2cap->chan.chan.conn) {
1827 continue;
1828 }
1829
1830 l2cap->chan.chan.ops = &ops;
1831 *chan = &l2cap->chan.chan;
1832 atomic_set(l2cap->chan.flags, 0);
1833 return 0;
1834 }
1835
1836 LOG_ERR("No available L2CAP context for conn %p", conn);
1837
1838 return -ENOMEM;
1839 }
1840
1841 BT_L2CAP_BR_CHANNEL_DEFINE(br_fixed_chan, BT_L2CAP_CID_BR_SIG, l2cap_br_accept);
1842
bt_l2cap_br_init(void)1843 void bt_l2cap_br_init(void)
1844 {
1845 sys_slist_init(&br_servers);
1846
1847 if (IS_ENABLED(CONFIG_BT_RFCOMM)) {
1848 bt_rfcomm_init();
1849 }
1850
1851 if (IS_ENABLED(CONFIG_BT_AVDTP)) {
1852 bt_avdtp_init();
1853 }
1854
1855 bt_sdp_init();
1856
1857 if (IS_ENABLED(CONFIG_BT_A2DP)) {
1858 bt_a2dp_init();
1859 }
1860 }
1861