1 /* l2cap.c - L2CAP handling */
2
3 /*
4 * Copyright (c) 2015-2016 Intel Corporation
5 * Copyright (c) 2023 Nordic Semiconductor
6 *
7 * SPDX-License-Identifier: Apache-2.0
8 */
9
10 #include <zephyr/kernel.h>
11 #include <string.h>
12 #include <errno.h>
13 #include <zephyr/sys/atomic.h>
14 #include <zephyr/sys/iterable_sections.h>
15 #include <zephyr/sys/byteorder.h>
16 #include <zephyr/sys/math_extras.h>
17 #include <zephyr/sys/util.h>
18
19 #include <zephyr/bluetooth/hci.h>
20 #include <zephyr/bluetooth/bluetooth.h>
21 #include <zephyr/bluetooth/conn.h>
22 #include <zephyr/bluetooth/l2cap.h>
23 #include <zephyr/drivers/bluetooth/hci_driver.h>
24
25 #define LOG_DBG_ENABLED IS_ENABLED(CONFIG_BT_L2CAP_LOG_LEVEL_DBG)
26
27 #include "hci_core.h"
28 #include "conn_internal.h"
29 #include "l2cap_internal.h"
30 #include "keys.h"
31
32 #include <zephyr/logging/log.h>
33 LOG_MODULE_REGISTER(bt_l2cap, CONFIG_BT_L2CAP_LOG_LEVEL);
34
35 #define LE_CHAN_RTX(_w) CONTAINER_OF(k_work_delayable_from_work(_w), \
36 struct bt_l2cap_le_chan, rtx_work)
37 #define CHAN_RX(_w) CONTAINER_OF(_w, struct bt_l2cap_le_chan, rx_work)
38
39 #define L2CAP_LE_MIN_MTU 23
40 #define L2CAP_ECRED_MIN_MTU 64
41
42 #define L2CAP_LE_MAX_CREDITS (CONFIG_BT_BUF_ACL_RX_COUNT - 1)
43
44 #define L2CAP_LE_CID_DYN_START 0x0040
45 #define L2CAP_LE_CID_DYN_END 0x007f
46 #define L2CAP_LE_CID_IS_DYN(_cid) \
47 (_cid >= L2CAP_LE_CID_DYN_START && _cid <= L2CAP_LE_CID_DYN_END)
48
49 #define L2CAP_LE_PSM_FIXED_START 0x0001
50 #define L2CAP_LE_PSM_FIXED_END 0x007f
51 #define L2CAP_LE_PSM_DYN_START 0x0080
52 #define L2CAP_LE_PSM_DYN_END 0x00ff
53 #define L2CAP_LE_PSM_IS_DYN(_psm) \
54 (_psm >= L2CAP_LE_PSM_DYN_START && _psm <= L2CAP_LE_PSM_DYN_END)
55
56 #define L2CAP_CONN_TIMEOUT K_SECONDS(40)
57 #define L2CAP_DISC_TIMEOUT K_SECONDS(2)
58 #define L2CAP_RTX_TIMEOUT K_SECONDS(2)
59
60 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
61 /* Dedicated pool for disconnect buffers so they are guaranteed to be send
62 * even in case of data congestion due to flooding.
63 */
64 NET_BUF_POOL_FIXED_DEFINE(disc_pool, 1,
65 BT_L2CAP_BUF_SIZE(
66 sizeof(struct bt_l2cap_sig_hdr) +
67 sizeof(struct bt_l2cap_disconn_req)),
68 CONFIG_BT_CONN_TX_USER_DATA_SIZE, NULL);
69
70 #define l2cap_lookup_ident(conn, ident) __l2cap_lookup_ident(conn, ident, false)
71 #define l2cap_remove_ident(conn, ident) __l2cap_lookup_ident(conn, ident, true)
72
73 static sys_slist_t servers = SYS_SLIST_STATIC_INIT(&servers);
74
l2cap_tx_buf_destroy(struct bt_conn * conn,struct net_buf * buf,int err)75 static void l2cap_tx_buf_destroy(struct bt_conn *conn, struct net_buf *buf, int err)
76 {
77 net_buf_unref(buf);
78 }
79 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
80
81 /* L2CAP signalling channel specific context */
82 struct bt_l2cap {
83 /* The channel this context is associated with */
84 struct bt_l2cap_le_chan chan;
85 };
86
87 static const struct bt_l2cap_ecred_cb *ecred_cb;
88 static struct bt_l2cap bt_l2cap_pool[CONFIG_BT_MAX_CONN];
89
bt_l2cap_register_ecred_cb(const struct bt_l2cap_ecred_cb * cb)90 void bt_l2cap_register_ecred_cb(const struct bt_l2cap_ecred_cb *cb)
91 {
92 ecred_cb = cb;
93 }
94
get_ident(void)95 static uint8_t get_ident(void)
96 {
97 static uint8_t ident;
98
99 ident++;
100 /* handle integer overflow (0 is not valid) */
101 if (!ident) {
102 ident++;
103 }
104
105 return ident;
106 }
107
108 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_chan_alloc_cid(struct bt_conn * conn,struct bt_l2cap_chan * chan)109 static struct bt_l2cap_le_chan *l2cap_chan_alloc_cid(struct bt_conn *conn,
110 struct bt_l2cap_chan *chan)
111 {
112 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
113 uint16_t cid;
114
115 /*
116 * No action needed if there's already a CID allocated, e.g. in
117 * the case of a fixed channel.
118 */
119 if (le_chan->rx.cid > 0) {
120 return le_chan;
121 }
122
123 for (cid = L2CAP_LE_CID_DYN_START; cid <= L2CAP_LE_CID_DYN_END; cid++) {
124 if (!bt_l2cap_le_lookup_rx_cid(conn, cid)) {
125 le_chan->rx.cid = cid;
126 return le_chan;
127 }
128 }
129
130 return NULL;
131 }
132
133 static struct bt_l2cap_le_chan *
__l2cap_lookup_ident(struct bt_conn * conn,uint16_t ident,bool remove)134 __l2cap_lookup_ident(struct bt_conn *conn, uint16_t ident, bool remove)
135 {
136 struct bt_l2cap_chan *chan;
137 sys_snode_t *prev = NULL;
138
139 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
140 if (BT_L2CAP_LE_CHAN(chan)->ident == ident) {
141 if (remove) {
142 sys_slist_remove(&conn->channels, prev,
143 &chan->node);
144 }
145 return BT_L2CAP_LE_CHAN(chan);
146 }
147
148 prev = &chan->node;
149 }
150
151 return NULL;
152 }
153 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
154
bt_l2cap_chan_remove(struct bt_conn * conn,struct bt_l2cap_chan * ch)155 void bt_l2cap_chan_remove(struct bt_conn *conn, struct bt_l2cap_chan *ch)
156 {
157 struct bt_l2cap_chan *chan;
158 sys_snode_t *prev = NULL;
159
160 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
161 if (chan == ch) {
162 sys_slist_remove(&conn->channels, prev, &chan->node);
163 return;
164 }
165
166 prev = &chan->node;
167 }
168 }
169
bt_l2cap_chan_state_str(bt_l2cap_chan_state_t state)170 const char *bt_l2cap_chan_state_str(bt_l2cap_chan_state_t state)
171 {
172 switch (state) {
173 case BT_L2CAP_DISCONNECTED:
174 return "disconnected";
175 case BT_L2CAP_CONNECTING:
176 return "connecting";
177 case BT_L2CAP_CONFIG:
178 return "config";
179 case BT_L2CAP_CONNECTED:
180 return "connected";
181 case BT_L2CAP_DISCONNECTING:
182 return "disconnecting";
183 default:
184 return "unknown";
185 }
186 }
187
188 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
189 #if defined(CONFIG_BT_L2CAP_LOG_LEVEL_DBG)
bt_l2cap_chan_set_state_debug(struct bt_l2cap_chan * chan,bt_l2cap_chan_state_t state,const char * func,int line)190 void bt_l2cap_chan_set_state_debug(struct bt_l2cap_chan *chan,
191 bt_l2cap_chan_state_t state,
192 const char *func, int line)
193 {
194 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
195
196 LOG_DBG("chan %p psm 0x%04x %s -> %s", chan, le_chan->psm,
197 bt_l2cap_chan_state_str(le_chan->state), bt_l2cap_chan_state_str(state));
198
199 /* check transitions validness */
200 switch (state) {
201 case BT_L2CAP_DISCONNECTED:
202 /* regardless of old state always allows this state */
203 break;
204 case BT_L2CAP_CONNECTING:
205 if (le_chan->state != BT_L2CAP_DISCONNECTED) {
206 LOG_WRN("%s()%d: invalid transition", func, line);
207 }
208 break;
209 case BT_L2CAP_CONFIG:
210 if (le_chan->state != BT_L2CAP_CONNECTING) {
211 LOG_WRN("%s()%d: invalid transition", func, line);
212 }
213 break;
214 case BT_L2CAP_CONNECTED:
215 if (le_chan->state != BT_L2CAP_CONFIG &&
216 le_chan->state != BT_L2CAP_CONNECTING) {
217 LOG_WRN("%s()%d: invalid transition", func, line);
218 }
219 break;
220 case BT_L2CAP_DISCONNECTING:
221 if (le_chan->state != BT_L2CAP_CONFIG &&
222 le_chan->state != BT_L2CAP_CONNECTED) {
223 LOG_WRN("%s()%d: invalid transition", func, line);
224 }
225 break;
226 default:
227 LOG_ERR("%s()%d: unknown (%u) state was set", func, line, state);
228 return;
229 }
230
231 le_chan->state = state;
232 }
233 #else
bt_l2cap_chan_set_state(struct bt_l2cap_chan * chan,bt_l2cap_chan_state_t state)234 void bt_l2cap_chan_set_state(struct bt_l2cap_chan *chan,
235 bt_l2cap_chan_state_t state)
236 {
237 BT_L2CAP_LE_CHAN(chan)->state = state;
238 }
239 #endif /* CONFIG_BT_L2CAP_LOG_LEVEL_DBG */
240 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
241
bt_l2cap_chan_del(struct bt_l2cap_chan * chan)242 void bt_l2cap_chan_del(struct bt_l2cap_chan *chan)
243 {
244 const struct bt_l2cap_chan_ops *ops = chan->ops;
245
246 LOG_DBG("conn %p chan %p", chan->conn, chan);
247
248 if (!chan->conn) {
249 goto destroy;
250 }
251
252 if (ops->disconnected) {
253 ops->disconnected(chan);
254 }
255
256 chan->conn = NULL;
257
258 destroy:
259 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
260 /* Reset internal members of common channel */
261 bt_l2cap_chan_set_state(chan, BT_L2CAP_DISCONNECTED);
262 BT_L2CAP_LE_CHAN(chan)->psm = 0U;
263 #endif
264 if (chan->destroy) {
265 chan->destroy(chan);
266 }
267
268 if (ops->released) {
269 ops->released(chan);
270 }
271 }
272
273 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_rtx_timeout(struct k_work * work)274 static void l2cap_rtx_timeout(struct k_work *work)
275 {
276 struct bt_l2cap_le_chan *chan = LE_CHAN_RTX(work);
277 struct bt_conn *conn = chan->chan.conn;
278
279 LOG_ERR("chan %p timeout", chan);
280
281 bt_l2cap_chan_remove(conn, &chan->chan);
282 bt_l2cap_chan_del(&chan->chan);
283
284 /* Remove other channels if pending on the same ident */
285 while ((chan = l2cap_remove_ident(conn, chan->ident))) {
286 bt_l2cap_chan_del(&chan->chan);
287 }
288 }
289
290 static void l2cap_chan_le_recv(struct bt_l2cap_le_chan *chan,
291 struct net_buf *buf);
292
l2cap_rx_process(struct k_work * work)293 static void l2cap_rx_process(struct k_work *work)
294 {
295 struct bt_l2cap_le_chan *ch = CHAN_RX(work);
296 struct net_buf *buf;
297
298 while ((buf = net_buf_get(&ch->rx_queue, K_NO_WAIT))) {
299 LOG_DBG("ch %p buf %p", ch, buf);
300 l2cap_chan_le_recv(ch, buf);
301 net_buf_unref(buf);
302 }
303 }
304 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
305
bt_l2cap_chan_add(struct bt_conn * conn,struct bt_l2cap_chan * chan,bt_l2cap_chan_destroy_t destroy)306 void bt_l2cap_chan_add(struct bt_conn *conn, struct bt_l2cap_chan *chan,
307 bt_l2cap_chan_destroy_t destroy)
308 {
309 /* Attach channel to the connection */
310 sys_slist_append(&conn->channels, &chan->node);
311 chan->conn = conn;
312 chan->destroy = destroy;
313
314 LOG_DBG("conn %p chan %p", conn, chan);
315 }
316
l2cap_chan_add(struct bt_conn * conn,struct bt_l2cap_chan * chan,bt_l2cap_chan_destroy_t destroy)317 static bool l2cap_chan_add(struct bt_conn *conn, struct bt_l2cap_chan *chan,
318 bt_l2cap_chan_destroy_t destroy)
319 {
320 struct bt_l2cap_le_chan *le_chan;
321
322 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
323 le_chan = l2cap_chan_alloc_cid(conn, chan);
324 #else
325 le_chan = BT_L2CAP_LE_CHAN(chan);
326 #endif
327
328 if (!le_chan) {
329 LOG_ERR("Unable to allocate L2CAP channel ID");
330 return false;
331 }
332
333 atomic_clear(chan->status);
334
335 bt_l2cap_chan_add(conn, chan, destroy);
336
337 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
338 /* All dynamic channels have the destroy handler which makes sure that
339 * the RTX work structure is properly released with a cancel sync.
340 * The fixed signal channel is only removed when disconnected and the
341 * disconnected handler is always called from the workqueue itself so
342 * canceling from there should always succeed.
343 */
344 k_work_init_delayable(&le_chan->rtx_work, l2cap_rtx_timeout);
345
346 if (L2CAP_LE_CID_IS_DYN(le_chan->rx.cid)) {
347 k_work_init(&le_chan->rx_work, l2cap_rx_process);
348 k_fifo_init(&le_chan->rx_queue);
349 bt_l2cap_chan_set_state(chan, BT_L2CAP_CONNECTING);
350 }
351 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
352
353 return true;
354 }
355
bt_l2cap_connected(struct bt_conn * conn)356 void bt_l2cap_connected(struct bt_conn *conn)
357 {
358 struct bt_l2cap_chan *chan;
359
360 if (IS_ENABLED(CONFIG_BT_BREDR) &&
361 conn->type == BT_CONN_TYPE_BR) {
362 bt_l2cap_br_connected(conn);
363 return;
364 }
365
366 STRUCT_SECTION_FOREACH(bt_l2cap_fixed_chan, fchan) {
367 struct bt_l2cap_le_chan *le_chan;
368
369 if (fchan->accept(conn, &chan) < 0) {
370 continue;
371 }
372
373 le_chan = BT_L2CAP_LE_CHAN(chan);
374
375 /* Fill up remaining fixed channel context attached in
376 * fchan->accept()
377 */
378 le_chan->rx.cid = fchan->cid;
379 le_chan->tx.cid = fchan->cid;
380
381 if (!l2cap_chan_add(conn, chan, fchan->destroy)) {
382 return;
383 }
384
385 if (chan->ops->connected) {
386 chan->ops->connected(chan);
387 }
388
389 /* Always set output status to fixed channels */
390 atomic_set_bit(chan->status, BT_L2CAP_STATUS_OUT);
391
392 if (chan->ops->status) {
393 chan->ops->status(chan, chan->status);
394 }
395 }
396 }
397
bt_l2cap_disconnected(struct bt_conn * conn)398 void bt_l2cap_disconnected(struct bt_conn *conn)
399 {
400 struct bt_l2cap_chan *chan, *next;
401
402 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&conn->channels, chan, next, node) {
403 bt_l2cap_chan_del(chan);
404 }
405 }
406
l2cap_create_le_sig_pdu(uint8_t code,uint8_t ident,uint16_t len)407 static struct net_buf *l2cap_create_le_sig_pdu(uint8_t code, uint8_t ident,
408 uint16_t len)
409 {
410 struct bt_l2cap_sig_hdr *hdr;
411 struct net_buf_pool *pool = NULL;
412 struct net_buf *buf;
413
414 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
415 if (code == BT_L2CAP_DISCONN_REQ) {
416 pool = &disc_pool;
417 }
418 #endif
419 /* Don't wait more than the minimum RTX timeout of 2 seconds */
420 buf = bt_l2cap_create_pdu_timeout(pool, 0, L2CAP_RTX_TIMEOUT);
421 if (!buf) {
422 /* If it was not possible to allocate a buffer within the
423 * timeout return NULL.
424 */
425 LOG_ERR("Unable to allocate buffer for op 0x%02x", code);
426 return NULL;
427 }
428
429 hdr = net_buf_add(buf, sizeof(*hdr));
430 hdr->code = code;
431 hdr->ident = ident;
432 hdr->len = sys_cpu_to_le16(len);
433
434 return buf;
435 }
436
437 /* Send the buffer and release it in case of failure.
438 * Any other cleanup in failure to send should be handled by the disconnected
439 * handler.
440 */
l2cap_send(struct bt_conn * conn,uint16_t cid,struct net_buf * buf)441 static inline int l2cap_send(struct bt_conn *conn, uint16_t cid, struct net_buf *buf)
442 {
443 int err = bt_l2cap_send(conn, cid, buf);
444
445 if (err) {
446 net_buf_unref(buf);
447 }
448
449 return err;
450 }
451
452 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_chan_send_req(struct bt_l2cap_chan * chan,struct net_buf * buf,k_timeout_t timeout)453 static void l2cap_chan_send_req(struct bt_l2cap_chan *chan,
454 struct net_buf *buf, k_timeout_t timeout)
455 {
456 if (l2cap_send(chan->conn, BT_L2CAP_CID_LE_SIG, buf)) {
457 return;
458 }
459
460 /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part A] page 126:
461 *
462 * The value of this timer is implementation-dependent but the minimum
463 * initial value is 1 second and the maximum initial value is 60
464 * seconds. One RTX timer shall exist for each outstanding signaling
465 * request, including each Echo Request. The timer disappears on the
466 * final expiration, when the response is received, or the physical
467 * link is lost.
468 */
469 k_work_reschedule(&(BT_L2CAP_LE_CHAN(chan)->rtx_work), timeout);
470 }
471
l2cap_le_conn_req(struct bt_l2cap_le_chan * ch)472 static int l2cap_le_conn_req(struct bt_l2cap_le_chan *ch)
473 {
474 struct net_buf *buf;
475 struct bt_l2cap_le_conn_req *req;
476
477 ch->ident = get_ident();
478
479 buf = l2cap_create_le_sig_pdu(BT_L2CAP_LE_CONN_REQ,
480 ch->ident, sizeof(*req));
481 if (!buf) {
482 return -ENOMEM;
483 }
484
485 req = net_buf_add(buf, sizeof(*req));
486 req->psm = sys_cpu_to_le16(ch->psm);
487 req->scid = sys_cpu_to_le16(ch->rx.cid);
488 req->mtu = sys_cpu_to_le16(ch->rx.mtu);
489 req->mps = sys_cpu_to_le16(ch->rx.mps);
490 req->credits = sys_cpu_to_le16(ch->rx.credits);
491
492 l2cap_chan_send_req(&ch->chan, buf, L2CAP_CONN_TIMEOUT);
493
494 return 0;
495 }
496
497 #if defined(CONFIG_BT_L2CAP_ECRED)
l2cap_ecred_conn_req(struct bt_l2cap_chan ** chan,int channels)498 static int l2cap_ecred_conn_req(struct bt_l2cap_chan **chan, int channels)
499 {
500 struct net_buf *buf;
501 struct bt_l2cap_ecred_conn_req *req;
502 struct bt_l2cap_le_chan *ch;
503 int i;
504 uint8_t ident;
505 uint16_t req_psm;
506 uint16_t req_mtu;
507
508 if (!chan || !channels) {
509 return -EINVAL;
510 }
511
512 ident = get_ident();
513
514 buf = l2cap_create_le_sig_pdu(BT_L2CAP_ECRED_CONN_REQ, ident,
515 sizeof(*req) +
516 (channels * sizeof(uint16_t)));
517
518 req = net_buf_add(buf, sizeof(*req));
519
520 ch = BT_L2CAP_LE_CHAN(chan[0]);
521
522 /* Init common parameters */
523 req->psm = sys_cpu_to_le16(ch->psm);
524 req->mtu = sys_cpu_to_le16(ch->rx.mtu);
525 req->mps = sys_cpu_to_le16(ch->rx.mps);
526 req->credits = sys_cpu_to_le16(ch->rx.credits);
527 req_psm = ch->psm;
528 req_mtu = ch->tx.mtu;
529
530 for (i = 0; i < channels; i++) {
531 ch = BT_L2CAP_LE_CHAN(chan[i]);
532
533 __ASSERT(ch->psm == req_psm,
534 "The PSM shall be the same for channels in the same request.");
535 __ASSERT(ch->tx.mtu == req_mtu,
536 "The MTU shall be the same for channels in the same request.");
537
538 ch->ident = ident;
539
540 net_buf_add_le16(buf, ch->rx.cid);
541 }
542
543 l2cap_chan_send_req(*chan, buf, L2CAP_CONN_TIMEOUT);
544
545 return 0;
546 }
547 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
548
l2cap_le_encrypt_change(struct bt_l2cap_chan * chan,uint8_t status)549 static void l2cap_le_encrypt_change(struct bt_l2cap_chan *chan, uint8_t status)
550 {
551 int err;
552 struct bt_l2cap_le_chan *le = BT_L2CAP_LE_CHAN(chan);
553
554 /* Skip channels that are not pending waiting for encryption */
555 if (!atomic_test_and_clear_bit(chan->status,
556 BT_L2CAP_STATUS_ENCRYPT_PENDING)) {
557 return;
558 }
559
560 if (status) {
561 goto fail;
562 }
563
564 #if defined(CONFIG_BT_L2CAP_ECRED)
565 if (le->ident) {
566 struct bt_l2cap_chan *echan[L2CAP_ECRED_CHAN_MAX_PER_REQ];
567 struct bt_l2cap_chan *ch;
568 int i = 0;
569
570 SYS_SLIST_FOR_EACH_CONTAINER(&chan->conn->channels, ch, node) {
571 if (le->ident == BT_L2CAP_LE_CHAN(ch)->ident) {
572 __ASSERT(i < L2CAP_ECRED_CHAN_MAX_PER_REQ,
573 "There can only be L2CAP_ECRED_CHAN_MAX_PER_REQ channels "
574 "from the same request.");
575 atomic_clear_bit(ch->status, BT_L2CAP_STATUS_ENCRYPT_PENDING);
576 echan[i++] = ch;
577 }
578 }
579
580 /* Retry ecred connect */
581 l2cap_ecred_conn_req(echan, i);
582 return;
583 }
584 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
585
586 /* Retry to connect */
587 err = l2cap_le_conn_req(le);
588 if (err) {
589 goto fail;
590 }
591
592 return;
593 fail:
594 bt_l2cap_chan_remove(chan->conn, chan);
595 bt_l2cap_chan_del(chan);
596 }
597 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
598
bt_l2cap_security_changed(struct bt_conn * conn,uint8_t hci_status)599 void bt_l2cap_security_changed(struct bt_conn *conn, uint8_t hci_status)
600 {
601 struct bt_l2cap_chan *chan, *next;
602
603 if (IS_ENABLED(CONFIG_BT_BREDR) &&
604 conn->type == BT_CONN_TYPE_BR) {
605 l2cap_br_encrypt_change(conn, hci_status);
606 return;
607 }
608
609 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&conn->channels, chan, next, node) {
610 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
611 l2cap_le_encrypt_change(chan, hci_status);
612 #endif
613
614 if (chan->ops->encrypt_change) {
615 chan->ops->encrypt_change(chan, hci_status);
616 }
617 }
618 }
619
bt_l2cap_create_pdu_timeout(struct net_buf_pool * pool,size_t reserve,k_timeout_t timeout)620 struct net_buf *bt_l2cap_create_pdu_timeout(struct net_buf_pool *pool,
621 size_t reserve,
622 k_timeout_t timeout)
623 {
624 return bt_conn_create_pdu_timeout(pool,
625 sizeof(struct bt_l2cap_hdr) + reserve,
626 timeout);
627 }
628
bt_l2cap_send_cb(struct bt_conn * conn,uint16_t cid,struct net_buf * buf,bt_conn_tx_cb_t cb,void * user_data)629 int bt_l2cap_send_cb(struct bt_conn *conn, uint16_t cid, struct net_buf *buf,
630 bt_conn_tx_cb_t cb, void *user_data)
631 {
632 struct bt_l2cap_hdr *hdr;
633
634 LOG_DBG("conn %p cid %u len %zu", conn, cid, net_buf_frags_len(buf));
635
636 hdr = net_buf_push(buf, sizeof(*hdr));
637 hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
638 hdr->cid = sys_cpu_to_le16(cid);
639
640 return bt_conn_send_cb(conn, buf, cb, user_data);
641 }
642
l2cap_send_reject(struct bt_conn * conn,uint8_t ident,uint16_t reason,void * data,uint8_t data_len)643 static void l2cap_send_reject(struct bt_conn *conn, uint8_t ident,
644 uint16_t reason, void *data, uint8_t data_len)
645 {
646 struct bt_l2cap_cmd_reject *rej;
647 struct net_buf *buf;
648
649 buf = l2cap_create_le_sig_pdu(BT_L2CAP_CMD_REJECT, ident,
650 sizeof(*rej) + data_len);
651 if (!buf) {
652 return;
653 }
654
655 rej = net_buf_add(buf, sizeof(*rej));
656 rej->reason = sys_cpu_to_le16(reason);
657
658 if (data) {
659 net_buf_add_mem(buf, data, data_len);
660 }
661
662 l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf);
663 }
664
le_conn_param_rsp(struct bt_l2cap * l2cap,struct net_buf * buf)665 static void le_conn_param_rsp(struct bt_l2cap *l2cap, struct net_buf *buf)
666 {
667 struct bt_l2cap_conn_param_rsp *rsp = (void *)buf->data;
668
669 if (buf->len < sizeof(*rsp)) {
670 LOG_ERR("Too small LE conn param rsp");
671 return;
672 }
673
674 LOG_DBG("LE conn param rsp result %u", sys_le16_to_cpu(rsp->result));
675 }
676
le_conn_param_update_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)677 static void le_conn_param_update_req(struct bt_l2cap *l2cap, uint8_t ident,
678 struct net_buf *buf)
679 {
680 struct bt_conn *conn = l2cap->chan.chan.conn;
681 struct bt_le_conn_param param;
682 struct bt_l2cap_conn_param_rsp *rsp;
683 struct bt_l2cap_conn_param_req *req = (void *)buf->data;
684 bool accepted;
685
686 if (buf->len < sizeof(*req)) {
687 LOG_ERR("Too small LE conn update param req");
688 return;
689 }
690
691 if (conn->state != BT_CONN_CONNECTED) {
692 LOG_WRN("Not connected");
693 return;
694 }
695
696 if (conn->role != BT_HCI_ROLE_CENTRAL) {
697 l2cap_send_reject(conn, ident, BT_L2CAP_REJ_NOT_UNDERSTOOD,
698 NULL, 0);
699 return;
700 }
701
702 param.interval_min = sys_le16_to_cpu(req->min_interval);
703 param.interval_max = sys_le16_to_cpu(req->max_interval);
704 param.latency = sys_le16_to_cpu(req->latency);
705 param.timeout = sys_le16_to_cpu(req->timeout);
706
707 LOG_DBG("min 0x%04x max 0x%04x latency: 0x%04x timeout: 0x%04x", param.interval_min,
708 param.interval_max, param.latency, param.timeout);
709
710 buf = l2cap_create_le_sig_pdu(BT_L2CAP_CONN_PARAM_RSP, ident,
711 sizeof(*rsp));
712 if (!buf) {
713 return;
714 }
715
716 accepted = le_param_req(conn, ¶m);
717
718 rsp = net_buf_add(buf, sizeof(*rsp));
719 if (accepted) {
720 rsp->result = sys_cpu_to_le16(BT_L2CAP_CONN_PARAM_ACCEPTED);
721 } else {
722 rsp->result = sys_cpu_to_le16(BT_L2CAP_CONN_PARAM_REJECTED);
723 }
724
725 l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf);
726
727 if (accepted) {
728 bt_conn_le_conn_update(conn, ¶m);
729 }
730 }
731
bt_l2cap_le_lookup_tx_cid(struct bt_conn * conn,uint16_t cid)732 struct bt_l2cap_chan *bt_l2cap_le_lookup_tx_cid(struct bt_conn *conn,
733 uint16_t cid)
734 {
735 struct bt_l2cap_chan *chan;
736
737 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
738 if (BT_L2CAP_LE_CHAN(chan)->tx.cid == cid) {
739 return chan;
740 }
741 }
742
743 return NULL;
744 }
745
bt_l2cap_le_lookup_rx_cid(struct bt_conn * conn,uint16_t cid)746 struct bt_l2cap_chan *bt_l2cap_le_lookup_rx_cid(struct bt_conn *conn,
747 uint16_t cid)
748 {
749 struct bt_l2cap_chan *chan;
750
751 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
752 if (BT_L2CAP_LE_CHAN(chan)->rx.cid == cid) {
753 return chan;
754 }
755 }
756
757 return NULL;
758 }
759
760 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
bt_l2cap_server_lookup_psm(uint16_t psm)761 struct bt_l2cap_server *bt_l2cap_server_lookup_psm(uint16_t psm)
762 {
763 struct bt_l2cap_server *server;
764
765 SYS_SLIST_FOR_EACH_CONTAINER(&servers, server, node) {
766 if (server->psm == psm) {
767 return server;
768 }
769 }
770
771 return NULL;
772 }
773
bt_l2cap_server_register(struct bt_l2cap_server * server)774 int bt_l2cap_server_register(struct bt_l2cap_server *server)
775 {
776 if (!server->accept) {
777 return -EINVAL;
778 }
779
780 if (server->psm) {
781 if (server->psm < L2CAP_LE_PSM_FIXED_START ||
782 server->psm > L2CAP_LE_PSM_DYN_END) {
783 return -EINVAL;
784 }
785
786 /* Check if given PSM is already in use */
787 if (bt_l2cap_server_lookup_psm(server->psm)) {
788 LOG_DBG("PSM already registered");
789 return -EADDRINUSE;
790 }
791 } else {
792 uint16_t psm;
793
794 for (psm = L2CAP_LE_PSM_DYN_START;
795 psm <= L2CAP_LE_PSM_DYN_END; psm++) {
796 if (!bt_l2cap_server_lookup_psm(psm)) {
797 break;
798 }
799 }
800
801 if (psm > L2CAP_LE_PSM_DYN_END) {
802 LOG_WRN("No free dynamic PSMs available");
803 return -EADDRNOTAVAIL;
804 }
805
806 LOG_DBG("Allocated PSM 0x%04x for new server", psm);
807 server->psm = psm;
808 }
809
810 if (server->sec_level > BT_SECURITY_L4) {
811 return -EINVAL;
812 } else if (server->sec_level < BT_SECURITY_L1) {
813 /* Level 0 is only applicable for BR/EDR */
814 server->sec_level = BT_SECURITY_L1;
815 }
816
817 LOG_DBG("PSM 0x%04x", server->psm);
818
819 sys_slist_append(&servers, &server->node);
820
821 return 0;
822 }
823
824 #if defined(CONFIG_BT_L2CAP_SEG_RECV)
l2cap_chan_seg_recv_rx_init(struct bt_l2cap_le_chan * chan)825 static void l2cap_chan_seg_recv_rx_init(struct bt_l2cap_le_chan *chan)
826 {
827 if (chan->rx.mps > BT_L2CAP_RX_MTU) {
828 LOG_ERR("Limiting RX MPS by stack buffer size.");
829 chan->rx.mps = BT_L2CAP_RX_MTU;
830 }
831
832 chan->_sdu_len = 0;
833 chan->_sdu_len_done = 0;
834 }
835 #endif /* CONFIG_BT_L2CAP_SEG_RECV */
836
l2cap_chan_rx_init(struct bt_l2cap_le_chan * chan)837 static void l2cap_chan_rx_init(struct bt_l2cap_le_chan *chan)
838 {
839 LOG_DBG("chan %p", chan);
840
841 /* Redirect to experimental API. */
842 IF_ENABLED(CONFIG_BT_L2CAP_SEG_RECV, ({
843 if (chan->chan.ops->seg_recv) {
844 l2cap_chan_seg_recv_rx_init(chan);
845 return;
846 }
847 }))
848
849 /* Use existing MTU if defined */
850 if (!chan->rx.mtu) {
851 /* If application has not provide the incoming L2CAP SDU MTU use
852 * an MTU that does not require segmentation.
853 */
854 chan->rx.mtu = BT_L2CAP_SDU_RX_MTU;
855 }
856
857 /* MPS shall not be bigger than MTU + BT_L2CAP_SDU_HDR_SIZE as the
858 * remaining bytes cannot be used.
859 */
860 chan->rx.mps = MIN(chan->rx.mtu + BT_L2CAP_SDU_HDR_SIZE,
861 BT_L2CAP_RX_MTU);
862
863 /* Truncate MTU if channel have disabled segmentation but still have
864 * set an MTU which requires it.
865 */
866 if (!chan->chan.ops->alloc_buf &&
867 (chan->rx.mps < chan->rx.mtu + BT_L2CAP_SDU_HDR_SIZE)) {
868 LOG_WRN("Segmentation disabled but MTU > MPS, truncating MTU");
869 chan->rx.mtu = chan->rx.mps - BT_L2CAP_SDU_HDR_SIZE;
870 }
871
872 atomic_set(&chan->rx.credits, 1);
873 }
874
l2cap_chan_le_get_tx_buf(struct bt_l2cap_le_chan * ch)875 static struct net_buf *l2cap_chan_le_get_tx_buf(struct bt_l2cap_le_chan *ch)
876 {
877 struct net_buf *buf;
878
879 /* Return current buffer */
880 if (ch->tx_buf) {
881 buf = ch->tx_buf;
882 ch->tx_buf = NULL;
883 return buf;
884 }
885
886 return net_buf_get(&ch->tx_queue, K_NO_WAIT);
887 }
888
889 static int l2cap_chan_le_send_sdu(struct bt_l2cap_le_chan *ch,
890 struct net_buf **buf);
891
l2cap_chan_tx_process(struct k_work * work)892 static void l2cap_chan_tx_process(struct k_work *work)
893 {
894 struct bt_l2cap_le_chan *ch;
895 struct net_buf *buf;
896 int ret;
897
898 ch = CONTAINER_OF(k_work_delayable_from_work(work), struct bt_l2cap_le_chan, tx_work);
899
900 /* Resume tx in case there are buffers in the queue */
901 while ((buf = l2cap_chan_le_get_tx_buf(ch))) {
902 /* Here buf is either:
903 * - a partially-sent SDU le_chan->tx_buf
904 * - a new SDU from the TX queue
905 */
906 LOG_DBG("chan %p buf %p", ch, buf);
907
908 ret = l2cap_chan_le_send_sdu(ch, &buf);
909 if (ret < 0) {
910 if (ret == -EAGAIN) {
911 ch->tx_buf = buf;
912 /* If we don't reschedule, and the app doesn't nudge l2cap (e.g. by
913 * sending another SDU), the channel will be stuck in limbo. To
914 * prevent this, we reschedule with a configurable delay.
915 */
916 k_work_schedule(&ch->tx_work, K_MSEC(CONFIG_BT_L2CAP_RESCHED_MS));
917 } else {
918 LOG_WRN("Failed to send (err %d), dropping buf %p", ret, buf);
919 l2cap_tx_buf_destroy(ch->chan.conn, buf, ret);
920 }
921 break;
922 }
923 }
924 }
925
l2cap_chan_tx_init(struct bt_l2cap_le_chan * chan)926 static void l2cap_chan_tx_init(struct bt_l2cap_le_chan *chan)
927 {
928 LOG_DBG("chan %p", chan);
929
930 (void)memset(&chan->tx, 0, sizeof(chan->tx));
931 atomic_set(&chan->tx.credits, 0);
932 k_fifo_init(&chan->tx_queue);
933 k_work_init_delayable(&chan->tx_work, l2cap_chan_tx_process);
934 }
935
l2cap_chan_tx_give_credits(struct bt_l2cap_le_chan * chan,uint16_t credits)936 static void l2cap_chan_tx_give_credits(struct bt_l2cap_le_chan *chan,
937 uint16_t credits)
938 {
939 LOG_DBG("chan %p credits %u", chan, credits);
940
941 atomic_add(&chan->tx.credits, credits);
942
943 if (!atomic_test_and_set_bit(chan->chan.status, BT_L2CAP_STATUS_OUT)) {
944 LOG_DBG("chan %p unpaused", chan);
945 if (chan->chan.ops->status) {
946 chan->chan.ops->status(&chan->chan, chan->chan.status);
947 }
948 }
949 }
950
l2cap_chan_destroy(struct bt_l2cap_chan * chan)951 static void l2cap_chan_destroy(struct bt_l2cap_chan *chan)
952 {
953 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
954 struct net_buf *buf;
955
956 LOG_DBG("chan %p cid 0x%04x", le_chan, le_chan->rx.cid);
957
958 /* Cancel ongoing work. Since the channel can be re-used after this
959 * we need to sync to make sure that the kernel does not have it
960 * in its queue anymore.
961 *
962 * In the case where we are in the context of executing the rtx_work
963 * item, we don't sync as it will deadlock the workqueue.
964 */
965 struct k_work_q *rtx_work_queue = le_chan->rtx_work.queue;
966
967 if (rtx_work_queue == NULL || k_current_get() != &rtx_work_queue->thread) {
968 k_work_cancel_delayable_sync(&le_chan->rtx_work, &le_chan->rtx_sync);
969 } else {
970 k_work_cancel_delayable(&le_chan->rtx_work);
971 }
972
973 if (le_chan->tx_buf) {
974 l2cap_tx_buf_destroy(chan->conn, le_chan->tx_buf, -ESHUTDOWN);
975 le_chan->tx_buf = NULL;
976 }
977
978 /* Remove buffers on the TX queue */
979 while ((buf = net_buf_get(&le_chan->tx_queue, K_NO_WAIT))) {
980 l2cap_tx_buf_destroy(chan->conn, buf, -ESHUTDOWN);
981 }
982
983 /* Remove buffers on the RX queue */
984 while ((buf = net_buf_get(&le_chan->rx_queue, K_NO_WAIT))) {
985 net_buf_unref(buf);
986 }
987
988 /* Destroy segmented SDU if it exists */
989 if (le_chan->_sdu) {
990 net_buf_unref(le_chan->_sdu);
991 le_chan->_sdu = NULL;
992 le_chan->_sdu_len = 0U;
993 }
994 }
995
le_err_to_result(int err)996 static uint16_t le_err_to_result(int err)
997 {
998 switch (err) {
999 case -ENOMEM:
1000 return BT_L2CAP_LE_ERR_NO_RESOURCES;
1001 case -EACCES:
1002 return BT_L2CAP_LE_ERR_AUTHORIZATION;
1003 case -EPERM:
1004 return BT_L2CAP_LE_ERR_KEY_SIZE;
1005 case -ENOTSUP:
1006 /* This handle the cases where a fixed channel is registered but
1007 * for some reason (e.g. controller not suporting a feature)
1008 * cannot be used.
1009 */
1010 return BT_L2CAP_LE_ERR_PSM_NOT_SUPP;
1011 default:
1012 return BT_L2CAP_LE_ERR_UNACCEPT_PARAMS;
1013 }
1014 }
1015
l2cap_chan_accept(struct bt_conn * conn,struct bt_l2cap_server * server,uint16_t scid,uint16_t mtu,uint16_t mps,uint16_t credits,struct bt_l2cap_chan ** chan)1016 static uint16_t l2cap_chan_accept(struct bt_conn *conn,
1017 struct bt_l2cap_server *server, uint16_t scid,
1018 uint16_t mtu, uint16_t mps, uint16_t credits,
1019 struct bt_l2cap_chan **chan)
1020 {
1021 struct bt_l2cap_le_chan *le_chan;
1022 int err;
1023
1024 LOG_DBG("conn %p scid 0x%04x chan %p", conn, scid, chan);
1025
1026 if (!L2CAP_LE_CID_IS_DYN(scid)) {
1027 return BT_L2CAP_LE_ERR_INVALID_SCID;
1028 }
1029
1030 *chan = bt_l2cap_le_lookup_tx_cid(conn, scid);
1031 if (*chan) {
1032 return BT_L2CAP_LE_ERR_SCID_IN_USE;
1033 }
1034
1035 /* Request server to accept the new connection and allocate the
1036 * channel.
1037 */
1038 err = server->accept(conn, server, chan);
1039 if (err < 0) {
1040 return le_err_to_result(err);
1041 }
1042
1043 #if defined(CONFIG_BT_L2CAP_SEG_RECV)
1044 if (!(*chan)->ops->recv == !(*chan)->ops->seg_recv) {
1045 LOG_ERR("Exactly one of 'recv' or 'seg_recv' must be set");
1046 return BT_L2CAP_LE_ERR_UNACCEPT_PARAMS;
1047 }
1048 #else
1049 if (!(*chan)->ops->recv) {
1050 LOG_ERR("Mandatory callback 'recv' missing");
1051 return BT_L2CAP_LE_ERR_UNACCEPT_PARAMS;
1052 }
1053 #endif
1054
1055 le_chan = BT_L2CAP_LE_CHAN(*chan);
1056
1057 le_chan->required_sec_level = server->sec_level;
1058
1059 if (!l2cap_chan_add(conn, *chan, l2cap_chan_destroy)) {
1060 return BT_L2CAP_LE_ERR_NO_RESOURCES;
1061 }
1062
1063 /* Init TX parameters */
1064 l2cap_chan_tx_init(le_chan);
1065 le_chan->tx.cid = scid;
1066 le_chan->tx.mps = mps;
1067 le_chan->tx.mtu = mtu;
1068 l2cap_chan_tx_give_credits(le_chan, credits);
1069
1070 /* Init RX parameters */
1071 l2cap_chan_rx_init(le_chan);
1072
1073 /* Set channel PSM */
1074 le_chan->psm = server->psm;
1075
1076 /* Update state */
1077 bt_l2cap_chan_set_state(*chan, BT_L2CAP_CONNECTED);
1078
1079 return BT_L2CAP_LE_SUCCESS;
1080 }
1081
l2cap_check_security(struct bt_conn * conn,struct bt_l2cap_server * server)1082 static uint16_t l2cap_check_security(struct bt_conn *conn,
1083 struct bt_l2cap_server *server)
1084 {
1085 if (IS_ENABLED(CONFIG_BT_CONN_DISABLE_SECURITY)) {
1086 return BT_L2CAP_LE_SUCCESS;
1087 }
1088
1089 if (conn->sec_level >= server->sec_level) {
1090 return BT_L2CAP_LE_SUCCESS;
1091 }
1092
1093 if (conn->sec_level > BT_SECURITY_L1) {
1094 return BT_L2CAP_LE_ERR_AUTHENTICATION;
1095 }
1096
1097 /* If an LTK or an STK is available and encryption is required
1098 * (LE security mode 1) but encryption is not enabled, the
1099 * service request shall be rejected with the error code
1100 * "Insufficient Encryption".
1101 */
1102 if (bt_conn_ltk_present(conn)) {
1103 return BT_L2CAP_LE_ERR_ENCRYPTION;
1104 }
1105
1106 return BT_L2CAP_LE_ERR_AUTHENTICATION;
1107 }
1108
le_conn_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1109 static void le_conn_req(struct bt_l2cap *l2cap, uint8_t ident,
1110 struct net_buf *buf)
1111 {
1112 struct bt_conn *conn = l2cap->chan.chan.conn;
1113 struct bt_l2cap_chan *chan;
1114 struct bt_l2cap_le_chan *le_chan;
1115 struct bt_l2cap_server *server;
1116 struct bt_l2cap_le_conn_req *req = (void *)buf->data;
1117 struct bt_l2cap_le_conn_rsp *rsp;
1118 uint16_t psm, scid, mtu, mps, credits;
1119 uint16_t result;
1120
1121 if (buf->len < sizeof(*req)) {
1122 LOG_ERR("Too small LE conn req packet size");
1123 return;
1124 }
1125
1126 psm = sys_le16_to_cpu(req->psm);
1127 scid = sys_le16_to_cpu(req->scid);
1128 mtu = sys_le16_to_cpu(req->mtu);
1129 mps = sys_le16_to_cpu(req->mps);
1130 credits = sys_le16_to_cpu(req->credits);
1131
1132 LOG_DBG("psm 0x%02x scid 0x%04x mtu %u mps %u credits %u", psm, scid, mtu, mps, credits);
1133
1134 if (mtu < L2CAP_LE_MIN_MTU || mps < L2CAP_LE_MIN_MTU) {
1135 LOG_ERR("Invalid LE-Conn Req params: mtu %u mps %u", mtu, mps);
1136 return;
1137 }
1138
1139 buf = l2cap_create_le_sig_pdu(BT_L2CAP_LE_CONN_RSP, ident,
1140 sizeof(*rsp));
1141 if (!buf) {
1142 return;
1143 }
1144
1145 rsp = net_buf_add(buf, sizeof(*rsp));
1146 (void)memset(rsp, 0, sizeof(*rsp));
1147
1148 /* Check if there is a server registered */
1149 server = bt_l2cap_server_lookup_psm(psm);
1150 if (!server) {
1151 result = BT_L2CAP_LE_ERR_PSM_NOT_SUPP;
1152 goto rsp;
1153 }
1154
1155 /* Check if connection has minimum required security level */
1156 result = l2cap_check_security(conn, server);
1157 if (result != BT_L2CAP_LE_SUCCESS) {
1158 goto rsp;
1159 }
1160
1161 result = l2cap_chan_accept(conn, server, scid, mtu, mps, credits,
1162 &chan);
1163 if (result != BT_L2CAP_LE_SUCCESS) {
1164 goto rsp;
1165 }
1166
1167 le_chan = BT_L2CAP_LE_CHAN(chan);
1168
1169 /* Prepare response protocol data */
1170 rsp->dcid = sys_cpu_to_le16(le_chan->rx.cid);
1171 rsp->mps = sys_cpu_to_le16(le_chan->rx.mps);
1172 rsp->mtu = sys_cpu_to_le16(le_chan->rx.mtu);
1173 rsp->credits = sys_cpu_to_le16(le_chan->rx.credits);
1174
1175 result = BT_L2CAP_LE_SUCCESS;
1176
1177 rsp:
1178 rsp->result = sys_cpu_to_le16(result);
1179
1180 if (l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf)) {
1181 return;
1182 }
1183
1184 /* Raise connected callback on success */
1185 if ((result == BT_L2CAP_LE_SUCCESS) && (chan->ops->connected != NULL)) {
1186 chan->ops->connected(chan);
1187 }
1188 }
1189
1190 #if defined(CONFIG_BT_L2CAP_ECRED)
le_ecred_conn_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1191 static void le_ecred_conn_req(struct bt_l2cap *l2cap, uint8_t ident,
1192 struct net_buf *buf)
1193 {
1194 struct bt_conn *conn = l2cap->chan.chan.conn;
1195 struct bt_l2cap_chan *chan[L2CAP_ECRED_CHAN_MAX_PER_REQ];
1196 struct bt_l2cap_le_chan *ch = NULL;
1197 struct bt_l2cap_server *server;
1198 struct bt_l2cap_ecred_conn_req *req;
1199 struct bt_l2cap_ecred_conn_rsp *rsp;
1200 uint16_t mtu, mps, credits, result = BT_L2CAP_LE_SUCCESS;
1201 uint16_t psm = 0x0000;
1202 uint16_t scid, dcid[L2CAP_ECRED_CHAN_MAX_PER_REQ];
1203 int i = 0;
1204 uint8_t req_cid_count;
1205 bool rsp_queued = false;
1206
1207 /* set dcid to zeros here, in case of all connections refused error */
1208 memset(dcid, 0, sizeof(dcid));
1209 if (buf->len < sizeof(*req)) {
1210 LOG_ERR("Too small LE conn req packet size");
1211 result = BT_L2CAP_LE_ERR_INVALID_PARAMS;
1212 req_cid_count = 0;
1213 goto response;
1214 }
1215
1216 req = net_buf_pull_mem(buf, sizeof(*req));
1217 req_cid_count = buf->len / sizeof(scid);
1218
1219 if (buf->len > sizeof(dcid)) {
1220 LOG_ERR("Too large LE conn req packet size");
1221 req_cid_count = L2CAP_ECRED_CHAN_MAX_PER_REQ;
1222 result = BT_L2CAP_LE_ERR_INVALID_PARAMS;
1223 goto response;
1224 }
1225
1226 psm = sys_le16_to_cpu(req->psm);
1227 mtu = sys_le16_to_cpu(req->mtu);
1228 mps = sys_le16_to_cpu(req->mps);
1229 credits = sys_le16_to_cpu(req->credits);
1230
1231 LOG_DBG("psm 0x%02x mtu %u mps %u credits %u", psm, mtu, mps, credits);
1232
1233 if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MTU) {
1234 LOG_ERR("Invalid ecred conn req params. mtu %u mps %u", mtu, mps);
1235 result = BT_L2CAP_LE_ERR_INVALID_PARAMS;
1236 goto response;
1237 }
1238
1239 /* Check if there is a server registered */
1240 server = bt_l2cap_server_lookup_psm(psm);
1241 if (!server) {
1242 result = BT_L2CAP_LE_ERR_PSM_NOT_SUPP;
1243 goto response;
1244 }
1245
1246 /* Check if connection has minimum required security level */
1247 result = l2cap_check_security(conn, server);
1248 if (result != BT_L2CAP_LE_SUCCESS) {
1249 goto response;
1250 }
1251
1252 while (buf->len >= sizeof(scid)) {
1253 uint16_t rc;
1254 scid = net_buf_pull_le16(buf);
1255
1256 rc = l2cap_chan_accept(conn, server, scid, mtu, mps,
1257 credits, &chan[i]);
1258 if (rc != BT_L2CAP_LE_SUCCESS) {
1259 result = rc;
1260 }
1261 switch (rc) {
1262 case BT_L2CAP_LE_SUCCESS:
1263 ch = BT_L2CAP_LE_CHAN(chan[i]);
1264 dcid[i++] = sys_cpu_to_le16(ch->rx.cid);
1265 continue;
1266 /* Some connections refused – invalid Source CID */
1267 /* Some connections refused – Source CID already allocated */
1268 /* Some connections refused – not enough resources
1269 * available.
1270 */
1271 default:
1272 /* If a Destination CID is 0x0000, the channel was not
1273 * established.
1274 */
1275 dcid[i++] = 0x0000;
1276 continue;
1277 }
1278 }
1279
1280 response:
1281 buf = l2cap_create_le_sig_pdu(BT_L2CAP_ECRED_CONN_RSP, ident,
1282 sizeof(*rsp) +
1283 (sizeof(scid) * req_cid_count));
1284 if (!buf) {
1285 goto callback;
1286 }
1287
1288 rsp = net_buf_add(buf, sizeof(*rsp));
1289 (void)memset(rsp, 0, sizeof(*rsp));
1290 if (ch) {
1291 rsp->mps = sys_cpu_to_le16(ch->rx.mps);
1292 rsp->mtu = sys_cpu_to_le16(ch->rx.mtu);
1293 rsp->credits = sys_cpu_to_le16(ch->rx.credits);
1294 }
1295 rsp->result = sys_cpu_to_le16(result);
1296
1297 net_buf_add_mem(buf, dcid, sizeof(scid) * req_cid_count);
1298
1299 if (l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf)) {
1300 goto callback;
1301 }
1302
1303 rsp_queued = true;
1304
1305 callback:
1306 if (ecred_cb && ecred_cb->ecred_conn_req) {
1307 ecred_cb->ecred_conn_req(conn, result, psm);
1308 }
1309 if (rsp_queued) {
1310 for (i = 0; i < req_cid_count; i++) {
1311 /* Raise connected callback for established channels */
1312 if ((dcid[i] != 0x00) && (chan[i]->ops->connected != NULL)) {
1313 chan[i]->ops->connected(chan[i]);
1314 }
1315 }
1316 }
1317 }
1318
le_ecred_reconf_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1319 static void le_ecred_reconf_req(struct bt_l2cap *l2cap, uint8_t ident,
1320 struct net_buf *buf)
1321 {
1322 struct bt_conn *conn = l2cap->chan.chan.conn;
1323 struct bt_l2cap_chan *chans[L2CAP_ECRED_CHAN_MAX_PER_REQ];
1324 struct bt_l2cap_ecred_reconf_req *req;
1325 struct bt_l2cap_ecred_reconf_rsp *rsp;
1326 uint16_t mtu, mps;
1327 uint16_t scid, result = BT_L2CAP_RECONF_SUCCESS;
1328 int chan_count = 0;
1329 bool mps_reduced = false;
1330
1331 if (buf->len < sizeof(*req)) {
1332 LOG_ERR("Too small ecred reconf req packet size");
1333 return;
1334 }
1335
1336 req = net_buf_pull_mem(buf, sizeof(*req));
1337
1338 mtu = sys_le16_to_cpu(req->mtu);
1339 mps = sys_le16_to_cpu(req->mps);
1340
1341 if (mps < L2CAP_ECRED_MIN_MTU) {
1342 result = BT_L2CAP_RECONF_OTHER_UNACCEPT;
1343 goto response;
1344 }
1345
1346 if (mtu < L2CAP_ECRED_MIN_MTU) {
1347 result = BT_L2CAP_RECONF_INVALID_MTU;
1348 goto response;
1349 }
1350
1351 /* The specification only allows up to 5 CIDs in this packet */
1352 if (buf->len > (L2CAP_ECRED_CHAN_MAX_PER_REQ * sizeof(scid))) {
1353 result = BT_L2CAP_RECONF_OTHER_UNACCEPT;
1354 goto response;
1355 }
1356
1357 while (buf->len >= sizeof(scid)) {
1358 struct bt_l2cap_chan *chan;
1359 scid = net_buf_pull_le16(buf);
1360 chan = bt_l2cap_le_lookup_tx_cid(conn, scid);
1361 if (!chan) {
1362 result = BT_L2CAP_RECONF_INVALID_CID;
1363 goto response;
1364 }
1365
1366 if (BT_L2CAP_LE_CHAN(chan)->tx.mtu > mtu) {
1367 LOG_ERR("chan %p decreased MTU %u -> %u", chan,
1368 BT_L2CAP_LE_CHAN(chan)->tx.mtu, mtu);
1369 result = BT_L2CAP_RECONF_INVALID_MTU;
1370 goto response;
1371 }
1372
1373 if (BT_L2CAP_LE_CHAN(chan)->tx.mps > mps) {
1374 mps_reduced = true;
1375 }
1376
1377 chans[chan_count] = chan;
1378 chan_count++;
1379 }
1380
1381 /* As per BT Core Spec V5.2 Vol. 3, Part A, section 7.11
1382 * The request (...) shall not decrease the MPS of a channel
1383 * if more than one channel is specified.
1384 */
1385 if (mps_reduced && chan_count > 1) {
1386 result = BT_L2CAP_RECONF_INVALID_MPS;
1387 goto response;
1388 }
1389
1390 for (int i = 0; i < chan_count; i++) {
1391 BT_L2CAP_LE_CHAN(chans[i])->tx.mtu = mtu;
1392 BT_L2CAP_LE_CHAN(chans[i])->tx.mps = mps;
1393
1394 if (chans[i]->ops->reconfigured) {
1395 chans[i]->ops->reconfigured(chans[i]);
1396 }
1397 }
1398
1399 LOG_DBG("mtu %u mps %u", mtu, mps);
1400
1401 response:
1402 buf = l2cap_create_le_sig_pdu(BT_L2CAP_ECRED_RECONF_RSP, ident,
1403 sizeof(*rsp));
1404
1405 rsp = net_buf_add(buf, sizeof(*rsp));
1406 rsp->result = sys_cpu_to_le16(result);
1407
1408 l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf);
1409 }
1410
le_ecred_reconf_rsp(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1411 static void le_ecred_reconf_rsp(struct bt_l2cap *l2cap, uint8_t ident,
1412 struct net_buf *buf)
1413 {
1414 struct bt_conn *conn = l2cap->chan.chan.conn;
1415 struct bt_l2cap_ecred_reconf_rsp *rsp;
1416 struct bt_l2cap_le_chan *ch;
1417 uint16_t result;
1418
1419 if (buf->len < sizeof(*rsp)) {
1420 LOG_ERR("Too small ecred reconf rsp packet size");
1421 return;
1422 }
1423
1424 rsp = net_buf_pull_mem(buf, sizeof(*rsp));
1425 result = sys_le16_to_cpu(rsp->result);
1426
1427 while ((ch = l2cap_lookup_ident(conn, ident))) {
1428 /* Stop timer started on REQ send. The timer is only set on one
1429 * of the channels, but we don't want to make assumptions on
1430 * which one it is.
1431 */
1432 k_work_cancel_delayable(&ch->rtx_work);
1433
1434 if (result == BT_L2CAP_LE_SUCCESS) {
1435 ch->rx.mtu = ch->pending_rx_mtu;
1436 }
1437
1438 ch->pending_rx_mtu = 0;
1439 ch->ident = 0U;
1440
1441 if (ch->chan.ops->reconfigured) {
1442 ch->chan.ops->reconfigured(&ch->chan);
1443 }
1444 }
1445 }
1446 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
1447
l2cap_remove_rx_cid(struct bt_conn * conn,uint16_t cid)1448 static struct bt_l2cap_le_chan *l2cap_remove_rx_cid(struct bt_conn *conn,
1449 uint16_t cid)
1450 {
1451 struct bt_l2cap_chan *chan;
1452 sys_snode_t *prev = NULL;
1453
1454 /* Protect fixed channels against accidental removal */
1455 if (!L2CAP_LE_CID_IS_DYN(cid)) {
1456 return NULL;
1457 }
1458
1459 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
1460 if (BT_L2CAP_LE_CHAN(chan)->rx.cid == cid) {
1461 sys_slist_remove(&conn->channels, prev, &chan->node);
1462 return BT_L2CAP_LE_CHAN(chan);
1463 }
1464
1465 prev = &chan->node;
1466 }
1467
1468 return NULL;
1469 }
1470
le_disconn_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1471 static void le_disconn_req(struct bt_l2cap *l2cap, uint8_t ident,
1472 struct net_buf *buf)
1473 {
1474 struct bt_conn *conn = l2cap->chan.chan.conn;
1475 struct bt_l2cap_le_chan *chan;
1476 struct bt_l2cap_disconn_req *req = (void *)buf->data;
1477 struct bt_l2cap_disconn_rsp *rsp;
1478 uint16_t dcid;
1479
1480 if (buf->len < sizeof(*req)) {
1481 LOG_ERR("Too small LE conn req packet size");
1482 return;
1483 }
1484
1485 dcid = sys_le16_to_cpu(req->dcid);
1486
1487 LOG_DBG("dcid 0x%04x scid 0x%04x", dcid, sys_le16_to_cpu(req->scid));
1488
1489 chan = l2cap_remove_rx_cid(conn, dcid);
1490 if (!chan) {
1491 struct bt_l2cap_cmd_reject_cid_data data;
1492
1493 data.scid = req->scid;
1494 data.dcid = req->dcid;
1495
1496 l2cap_send_reject(conn, ident, BT_L2CAP_REJ_INVALID_CID, &data,
1497 sizeof(data));
1498 return;
1499 }
1500
1501 buf = l2cap_create_le_sig_pdu(BT_L2CAP_DISCONN_RSP, ident,
1502 sizeof(*rsp));
1503 if (!buf) {
1504 return;
1505 }
1506
1507 rsp = net_buf_add(buf, sizeof(*rsp));
1508 rsp->dcid = sys_cpu_to_le16(chan->rx.cid);
1509 rsp->scid = sys_cpu_to_le16(chan->tx.cid);
1510
1511 bt_l2cap_chan_del(&chan->chan);
1512
1513 l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf);
1514 }
1515
l2cap_change_security(struct bt_l2cap_le_chan * chan,uint16_t err)1516 static int l2cap_change_security(struct bt_l2cap_le_chan *chan, uint16_t err)
1517 {
1518 struct bt_conn *conn = chan->chan.conn;
1519 bt_security_t sec;
1520 int ret;
1521
1522 if (atomic_test_bit(chan->chan.status,
1523 BT_L2CAP_STATUS_ENCRYPT_PENDING)) {
1524 return -EINPROGRESS;
1525 }
1526
1527 switch (err) {
1528 case BT_L2CAP_LE_ERR_ENCRYPTION:
1529 if (conn->sec_level >= BT_SECURITY_L2) {
1530 return -EALREADY;
1531 }
1532
1533 sec = BT_SECURITY_L2;
1534 break;
1535 case BT_L2CAP_LE_ERR_AUTHENTICATION:
1536 if (conn->sec_level < BT_SECURITY_L2) {
1537 sec = BT_SECURITY_L2;
1538 } else if (conn->sec_level < BT_SECURITY_L3) {
1539 sec = BT_SECURITY_L3;
1540 } else if (conn->sec_level < BT_SECURITY_L4) {
1541 sec = BT_SECURITY_L4;
1542 } else {
1543 return -EALREADY;
1544 }
1545 break;
1546 default:
1547 return -EINVAL;
1548 }
1549
1550 ret = bt_conn_set_security(chan->chan.conn, sec);
1551 if (ret < 0) {
1552 return ret;
1553 }
1554
1555 atomic_set_bit(chan->chan.status, BT_L2CAP_STATUS_ENCRYPT_PENDING);
1556
1557 return 0;
1558 }
1559
1560 #if defined(CONFIG_BT_L2CAP_ECRED)
le_ecred_conn_rsp(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1561 static void le_ecred_conn_rsp(struct bt_l2cap *l2cap, uint8_t ident,
1562 struct net_buf *buf)
1563 {
1564 struct bt_conn *conn = l2cap->chan.chan.conn;
1565 struct bt_l2cap_le_chan *chan;
1566 struct bt_l2cap_ecred_conn_rsp *rsp;
1567 uint16_t dcid, mtu, mps, credits, result, psm;
1568 uint8_t attempted = 0;
1569 uint8_t succeeded = 0;
1570
1571 if (buf->len < sizeof(*rsp)) {
1572 LOG_ERR("Too small ecred conn rsp packet size");
1573 return;
1574 }
1575
1576 rsp = net_buf_pull_mem(buf, sizeof(*rsp));
1577 mtu = sys_le16_to_cpu(rsp->mtu);
1578 mps = sys_le16_to_cpu(rsp->mps);
1579 credits = sys_le16_to_cpu(rsp->credits);
1580 result = sys_le16_to_cpu(rsp->result);
1581
1582 LOG_DBG("mtu 0x%04x mps 0x%04x credits 0x%04x result %u", mtu, mps, credits, result);
1583
1584 chan = l2cap_lookup_ident(conn, ident);
1585 if (chan) {
1586 psm = chan->psm;
1587 } else {
1588 psm = 0x0000;
1589 }
1590
1591 switch (result) {
1592 case BT_L2CAP_LE_ERR_AUTHENTICATION:
1593 case BT_L2CAP_LE_ERR_ENCRYPTION:
1594 while ((chan = l2cap_lookup_ident(conn, ident))) {
1595
1596 /* Cancel RTX work */
1597 k_work_cancel_delayable(&chan->rtx_work);
1598
1599 /* If security needs changing wait it to be completed */
1600 if (!l2cap_change_security(chan, result)) {
1601 return;
1602 }
1603 bt_l2cap_chan_remove(conn, &chan->chan);
1604 bt_l2cap_chan_del(&chan->chan);
1605 }
1606 break;
1607 case BT_L2CAP_LE_SUCCESS:
1608 /* Some connections refused – invalid Source CID */
1609 case BT_L2CAP_LE_ERR_INVALID_SCID:
1610 /* Some connections refused – Source CID already allocated */
1611 case BT_L2CAP_LE_ERR_SCID_IN_USE:
1612 /* Some connections refused – not enough resources available */
1613 case BT_L2CAP_LE_ERR_NO_RESOURCES:
1614 while ((chan = l2cap_lookup_ident(conn, ident))) {
1615 struct bt_l2cap_chan *c;
1616
1617 /* Cancel RTX work */
1618 k_work_cancel_delayable(&chan->rtx_work);
1619
1620 if (buf->len < sizeof(dcid)) {
1621 LOG_ERR("Fewer dcid values than expected");
1622 bt_l2cap_chan_remove(conn, &chan->chan);
1623 bt_l2cap_chan_del(&chan->chan);
1624 continue;
1625 }
1626
1627 dcid = net_buf_pull_le16(buf);
1628 attempted++;
1629
1630 LOG_DBG("dcid 0x%04x", dcid);
1631
1632 /* If a Destination CID is 0x0000, the channel was not
1633 * established.
1634 */
1635 if (!dcid) {
1636 bt_l2cap_chan_remove(conn, &chan->chan);
1637 bt_l2cap_chan_del(&chan->chan);
1638 continue;
1639 }
1640
1641 c = bt_l2cap_le_lookup_tx_cid(conn, dcid);
1642 if (c) {
1643 /* If a device receives a
1644 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet
1645 * with an already assigned Destination CID,
1646 * then both the original channel and the new
1647 * channel shall be immediately discarded and
1648 * not used.
1649 */
1650 bt_l2cap_chan_remove(conn, &chan->chan);
1651 bt_l2cap_chan_del(&chan->chan);
1652 bt_l2cap_chan_disconnect(c);
1653 continue;
1654 }
1655
1656 chan->tx.cid = dcid;
1657
1658 chan->ident = 0U;
1659
1660 chan->tx.mtu = mtu;
1661 chan->tx.mps = mps;
1662
1663 /* Update state */
1664 bt_l2cap_chan_set_state(&chan->chan,
1665 BT_L2CAP_CONNECTED);
1666
1667 if (chan->chan.ops->connected) {
1668 chan->chan.ops->connected(&chan->chan);
1669 }
1670
1671 /* Give credits */
1672 l2cap_chan_tx_give_credits(chan, credits);
1673
1674 succeeded++;
1675 }
1676 break;
1677 case BT_L2CAP_LE_ERR_PSM_NOT_SUPP:
1678 default:
1679 while ((chan = l2cap_remove_ident(conn, ident))) {
1680 bt_l2cap_chan_del(&chan->chan);
1681 }
1682 break;
1683 }
1684
1685 if (ecred_cb && ecred_cb->ecred_conn_rsp) {
1686 ecred_cb->ecred_conn_rsp(conn, result, attempted, succeeded, psm);
1687 }
1688 }
1689 #endif /* CONFIG_BT_L2CAP_ECRED */
1690
le_conn_rsp(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1691 static void le_conn_rsp(struct bt_l2cap *l2cap, uint8_t ident,
1692 struct net_buf *buf)
1693 {
1694 struct bt_conn *conn = l2cap->chan.chan.conn;
1695 struct bt_l2cap_le_chan *chan;
1696 struct bt_l2cap_le_conn_rsp *rsp = (void *)buf->data;
1697 uint16_t dcid, mtu, mps, credits, result;
1698
1699 if (buf->len < sizeof(*rsp)) {
1700 LOG_ERR("Too small LE conn rsp packet size");
1701 return;
1702 }
1703
1704 dcid = sys_le16_to_cpu(rsp->dcid);
1705 mtu = sys_le16_to_cpu(rsp->mtu);
1706 mps = sys_le16_to_cpu(rsp->mps);
1707 credits = sys_le16_to_cpu(rsp->credits);
1708 result = sys_le16_to_cpu(rsp->result);
1709
1710 LOG_DBG("dcid 0x%04x mtu %u mps %u credits %u result 0x%04x", dcid, mtu, mps, credits,
1711 result);
1712
1713 /* Keep the channel in case of security errors */
1714 if (result == BT_L2CAP_LE_SUCCESS ||
1715 result == BT_L2CAP_LE_ERR_AUTHENTICATION ||
1716 result == BT_L2CAP_LE_ERR_ENCRYPTION) {
1717 chan = l2cap_lookup_ident(conn, ident);
1718 } else {
1719 chan = l2cap_remove_ident(conn, ident);
1720 }
1721
1722 if (!chan) {
1723 LOG_ERR("Cannot find channel for ident %u", ident);
1724 return;
1725 }
1726
1727 /* Cancel RTX work */
1728 k_work_cancel_delayable(&chan->rtx_work);
1729
1730 /* Reset ident since it got a response */
1731 chan->ident = 0U;
1732
1733 switch (result) {
1734 case BT_L2CAP_LE_SUCCESS:
1735 chan->tx.cid = dcid;
1736 chan->tx.mtu = mtu;
1737 chan->tx.mps = mps;
1738
1739 /* Update state */
1740 bt_l2cap_chan_set_state(&chan->chan, BT_L2CAP_CONNECTED);
1741
1742 if (chan->chan.ops->connected) {
1743 chan->chan.ops->connected(&chan->chan);
1744 }
1745
1746 /* Give credits */
1747 l2cap_chan_tx_give_credits(chan, credits);
1748
1749 break;
1750 case BT_L2CAP_LE_ERR_AUTHENTICATION:
1751 case BT_L2CAP_LE_ERR_ENCRYPTION:
1752 /* If security needs changing wait it to be completed */
1753 if (l2cap_change_security(chan, result) == 0) {
1754 return;
1755 }
1756 bt_l2cap_chan_remove(conn, &chan->chan);
1757 __fallthrough;
1758 default:
1759 bt_l2cap_chan_del(&chan->chan);
1760 }
1761 }
1762
le_disconn_rsp(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1763 static void le_disconn_rsp(struct bt_l2cap *l2cap, uint8_t ident,
1764 struct net_buf *buf)
1765 {
1766 struct bt_conn *conn = l2cap->chan.chan.conn;
1767 struct bt_l2cap_le_chan *chan;
1768 struct bt_l2cap_disconn_rsp *rsp = (void *)buf->data;
1769 uint16_t scid;
1770
1771 if (buf->len < sizeof(*rsp)) {
1772 LOG_ERR("Too small LE disconn rsp packet size");
1773 return;
1774 }
1775
1776 scid = sys_le16_to_cpu(rsp->scid);
1777
1778 LOG_DBG("dcid 0x%04x scid 0x%04x", sys_le16_to_cpu(rsp->dcid), scid);
1779
1780 chan = l2cap_remove_rx_cid(conn, scid);
1781 if (!chan) {
1782 return;
1783 }
1784
1785 bt_l2cap_chan_del(&chan->chan);
1786 }
1787
l2cap_alloc_seg(struct bt_l2cap_le_chan * ch)1788 static struct net_buf *l2cap_alloc_seg(struct bt_l2cap_le_chan *ch)
1789 {
1790 struct net_buf *seg = NULL;
1791
1792 /* Use the user-defined allocator */
1793 if (ch->chan.ops->alloc_seg) {
1794 seg = ch->chan.ops->alloc_seg(&ch->chan);
1795 __ASSERT_NO_MSG(seg);
1796 }
1797
1798 /* Fallback to using global connection tx pool */
1799 if (!seg) {
1800 seg = bt_l2cap_create_pdu_timeout(NULL, 0, K_NO_WAIT);
1801 }
1802
1803 if (seg) {
1804 net_buf_reserve(seg, BT_L2CAP_CHAN_SEND_RESERVE);
1805 }
1806
1807 return seg;
1808 }
1809
l2cap_chan_tx_resume(struct bt_l2cap_le_chan * ch)1810 static void l2cap_chan_tx_resume(struct bt_l2cap_le_chan *ch)
1811 {
1812 if (!atomic_get(&ch->tx.credits) ||
1813 (k_fifo_is_empty(&ch->tx_queue) && !ch->tx_buf)) {
1814 return;
1815 }
1816
1817 k_work_reschedule(&ch->tx_work, K_NO_WAIT);
1818 }
1819
l2cap_chan_sdu_sent(struct bt_conn * conn,void * user_data,int err)1820 static void l2cap_chan_sdu_sent(struct bt_conn *conn, void *user_data, int err)
1821 {
1822 struct bt_l2cap_chan *chan;
1823 uint16_t cid = POINTER_TO_UINT(user_data);
1824
1825 LOG_DBG("conn %p CID 0x%04x err %d", conn, cid, err);
1826
1827 if (err) {
1828 LOG_DBG("error %d when sending SDU", err);
1829
1830 return;
1831 }
1832
1833 chan = bt_l2cap_le_lookup_tx_cid(conn, cid);
1834 if (!chan) {
1835 LOG_DBG("got SDU sent cb for disconnected chan (CID %u)", cid);
1836
1837 return;
1838 }
1839
1840 if (chan->ops->sent) {
1841 chan->ops->sent(chan);
1842 }
1843
1844 /* Resume the current channel */
1845 l2cap_chan_tx_resume(BT_L2CAP_LE_CHAN(chan));
1846 }
1847
l2cap_chan_seg_sent(struct bt_conn * conn,void * user_data,int err)1848 static void l2cap_chan_seg_sent(struct bt_conn *conn, void *user_data, int err)
1849 {
1850 struct bt_l2cap_chan *chan;
1851 uint16_t cid = POINTER_TO_UINT(user_data);
1852
1853 LOG_DBG("conn %p CID 0x%04x err %d", conn, cid, err);
1854
1855 if (err) {
1856 return;
1857 }
1858
1859 chan = bt_l2cap_le_lookup_tx_cid(conn, cid);
1860 if (!chan) {
1861 /* Received segment sent callback for disconnected channel */
1862 return;
1863 }
1864
1865 l2cap_chan_tx_resume(BT_L2CAP_LE_CHAN(chan));
1866 }
1867
test_and_dec(atomic_t * target)1868 static bool test_and_dec(atomic_t *target)
1869 {
1870 atomic_t old_value, new_value;
1871
1872 do {
1873 old_value = atomic_get(target);
1874 if (!old_value) {
1875 return false;
1876 }
1877
1878 new_value = old_value - 1;
1879 } while (atomic_cas(target, old_value, new_value) == 0);
1880
1881 return true;
1882 }
1883
1884 /* This returns -EAGAIN whenever a segment cannot be send immediately which can
1885 * happen under the following circuntances:
1886 *
1887 * 1. There are no credits
1888 * 2. There are no buffers
1889 * 3. There are no TX contexts
1890 *
1891 * In all cases the original buffer is unaffected so it can be pushed back to
1892 * be sent later.
1893 */
l2cap_chan_le_send(struct bt_l2cap_le_chan * ch,struct net_buf * buf,uint16_t sdu_hdr_len)1894 static int l2cap_chan_le_send(struct bt_l2cap_le_chan *ch,
1895 struct net_buf *buf, uint16_t sdu_hdr_len)
1896 {
1897 struct net_buf *seg;
1898 struct net_buf_simple_state state;
1899 int len, err;
1900 bt_conn_tx_cb_t cb;
1901
1902 if (!test_and_dec(&ch->tx.credits)) {
1903 LOG_DBG("No credits to transmit packet");
1904 return -EAGAIN;
1905 }
1906
1907 /* Save state so it can be restored if we failed to send */
1908 net_buf_simple_save(&buf->b, &state);
1909
1910 if ((buf->len <= ch->tx.mps) &&
1911 (net_buf_headroom(buf) >= BT_L2CAP_BUF_SIZE(0))) {
1912 LOG_DBG("len <= MPS, not allocating seg for %p", buf);
1913 seg = net_buf_ref(buf);
1914
1915 len = seg->len;
1916 } else {
1917 LOG_DBG("allocating segment for %p (%u bytes left)", buf, buf->len);
1918 seg = l2cap_alloc_seg(ch);
1919 if (!seg) {
1920 LOG_DBG("failed to allocate seg for %p", buf);
1921 atomic_inc(&ch->tx.credits);
1922
1923 return -EAGAIN;
1924 }
1925
1926 /* Don't send more than TX MPS */
1927 len = MIN(net_buf_tailroom(seg), ch->tx.mps);
1928
1929 /* Limit if original buffer is smaller than the segment */
1930 len = MIN(buf->len, len);
1931
1932 net_buf_add_mem(seg, buf->data, len);
1933 net_buf_pull(buf, len);
1934 }
1935
1936 LOG_DBG("ch %p cid 0x%04x len %u credits %lu", ch, ch->tx.cid, seg->len,
1937 atomic_get(&ch->tx.credits));
1938
1939 len = seg->len - sdu_hdr_len;
1940
1941 /* SDU will be considered sent when there is no data left in the
1942 * buffers, or if there will be no data left, if we are sending `buf`
1943 * directly.
1944 */
1945 if (net_buf_frags_len(buf) == 0 ||
1946 (buf == seg && net_buf_frags_len(buf) == len)) {
1947 cb = l2cap_chan_sdu_sent;
1948 } else {
1949 cb = l2cap_chan_seg_sent;
1950 }
1951
1952 /* Forward the PDU to the lower layer.
1953 *
1954 * Note: after this call, anything in buf->user_data should be
1955 * considered lost, as the lower layers are free to re-use it as they
1956 * see fit. Reading from it later is obviously a no-no.
1957 */
1958 err = bt_l2cap_send_cb(ch->chan.conn, ch->tx.cid, seg,
1959 cb, UINT_TO_POINTER(ch->tx.cid));
1960
1961 if (err) {
1962 LOG_DBG("Unable to send seg %d", err);
1963 atomic_inc(&ch->tx.credits);
1964
1965 /* The host takes ownership of the reference in seg when
1966 * bt_l2cap_send_cb is successful. The call returned an error,
1967 * so we must get rid of the reference that was taken above.
1968 */
1969 LOG_DBG("unref %p (%s)", seg,
1970 buf == seg ? "orig" : "seg");
1971 net_buf_unref(seg);
1972
1973 if (err == -ENOBUFS) {
1974 /* Restore state since segment could not be sent */
1975 net_buf_simple_restore(&buf->b, &state);
1976 return -EAGAIN;
1977 }
1978
1979 return err;
1980 }
1981
1982 /* Notify channel user that it can't send anymore on this channel. */
1983 if (!atomic_get(&ch->tx.credits)) {
1984 LOG_DBG("chan %p paused", ch);
1985 atomic_clear_bit(ch->chan.status, BT_L2CAP_STATUS_OUT);
1986
1987 if (ch->chan.ops->status) {
1988 ch->chan.ops->status(&ch->chan, ch->chan.status);
1989 }
1990 }
1991
1992 return len;
1993 }
1994
1995 /* return next netbuf fragment if present, also assign metadata */
l2cap_chan_le_send_sdu(struct bt_l2cap_le_chan * ch,struct net_buf ** buf)1996 static int l2cap_chan_le_send_sdu(struct bt_l2cap_le_chan *ch,
1997 struct net_buf **buf)
1998 {
1999 int ret;
2000 size_t sent, rem_len, frag_len;
2001 struct net_buf *frag;
2002
2003 frag = *buf;
2004 if (!frag->len && frag->frags) {
2005 frag = frag->frags;
2006 }
2007
2008 rem_len = net_buf_frags_len(frag);
2009 sent = 0;
2010 while (frag && sent != rem_len) {
2011 LOG_DBG("send frag %p (orig buf %p)", frag, *buf);
2012
2013 frag_len = frag->len;
2014 ret = l2cap_chan_le_send(ch, frag, 0);
2015 if (ret < 0) {
2016 *buf = frag;
2017
2018 LOG_DBG("failed to send frag (ch %p cid 0x%04x sent %d)",
2019 ch, ch->tx.cid, sent);
2020
2021 return ret;
2022 }
2023
2024 sent += ret;
2025
2026 /* If the current buffer has been fully consumed, destroy it and
2027 * proceed to the next fragment of the netbuf chain.
2028 */
2029 if (ret == frag_len) {
2030 frag = net_buf_frag_del(NULL, frag);
2031 }
2032 }
2033
2034 LOG_DBG("ch %p cid 0x%04x sent %u", ch, ch->tx.cid, sent);
2035
2036 return sent;
2037 }
2038
le_credits(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)2039 static void le_credits(struct bt_l2cap *l2cap, uint8_t ident,
2040 struct net_buf *buf)
2041 {
2042 struct bt_conn *conn = l2cap->chan.chan.conn;
2043 struct bt_l2cap_chan *chan;
2044 struct bt_l2cap_le_credits *ev = (void *)buf->data;
2045 struct bt_l2cap_le_chan *le_chan;
2046 uint16_t credits, cid;
2047
2048 if (buf->len < sizeof(*ev)) {
2049 LOG_ERR("Too small LE Credits packet size");
2050 return;
2051 }
2052
2053 cid = sys_le16_to_cpu(ev->cid);
2054 credits = sys_le16_to_cpu(ev->credits);
2055
2056 LOG_DBG("cid 0x%04x credits %u", cid, credits);
2057
2058 chan = bt_l2cap_le_lookup_tx_cid(conn, cid);
2059 if (!chan) {
2060 LOG_ERR("Unable to find channel of LE Credits packet");
2061 return;
2062 }
2063
2064 le_chan = BT_L2CAP_LE_CHAN(chan);
2065
2066 if (atomic_get(&le_chan->tx.credits) + credits > UINT16_MAX) {
2067 LOG_ERR("Credits overflow");
2068 bt_l2cap_chan_disconnect(chan);
2069 return;
2070 }
2071
2072 l2cap_chan_tx_give_credits(le_chan, credits);
2073
2074 LOG_DBG("chan %p total credits %lu", le_chan, atomic_get(&le_chan->tx.credits));
2075
2076 l2cap_chan_tx_resume(le_chan);
2077 }
2078
reject_cmd(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)2079 static void reject_cmd(struct bt_l2cap *l2cap, uint8_t ident,
2080 struct net_buf *buf)
2081 {
2082 struct bt_conn *conn = l2cap->chan.chan.conn;
2083 struct bt_l2cap_le_chan *chan;
2084
2085 /* Check if there is a outstanding channel */
2086 chan = l2cap_remove_ident(conn, ident);
2087 if (!chan) {
2088 return;
2089 }
2090
2091 bt_l2cap_chan_del(&chan->chan);
2092 }
2093 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2094
l2cap_recv(struct bt_l2cap_chan * chan,struct net_buf * buf)2095 static int l2cap_recv(struct bt_l2cap_chan *chan, struct net_buf *buf)
2096 {
2097 struct bt_l2cap_le_chan *l2chan = CONTAINER_OF(chan, struct bt_l2cap_le_chan, chan);
2098 struct bt_l2cap *l2cap = CONTAINER_OF(l2chan, struct bt_l2cap, chan);
2099 struct bt_l2cap_sig_hdr *hdr;
2100 uint16_t len;
2101
2102 if (buf->len < sizeof(*hdr)) {
2103 LOG_ERR("Too small L2CAP signaling PDU");
2104 return 0;
2105 }
2106
2107 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
2108 len = sys_le16_to_cpu(hdr->len);
2109
2110 LOG_DBG("Signaling code 0x%02x ident %u len %u", hdr->code, hdr->ident, len);
2111
2112 if (buf->len != len) {
2113 LOG_ERR("L2CAP length mismatch (%u != %u)", buf->len, len);
2114 return 0;
2115 }
2116
2117 if (!hdr->ident) {
2118 LOG_ERR("Invalid ident value in L2CAP PDU");
2119 return 0;
2120 }
2121
2122 switch (hdr->code) {
2123 case BT_L2CAP_CONN_PARAM_RSP:
2124 le_conn_param_rsp(l2cap, buf);
2125 break;
2126 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
2127 case BT_L2CAP_LE_CONN_REQ:
2128 le_conn_req(l2cap, hdr->ident, buf);
2129 break;
2130 case BT_L2CAP_LE_CONN_RSP:
2131 le_conn_rsp(l2cap, hdr->ident, buf);
2132 break;
2133 case BT_L2CAP_DISCONN_REQ:
2134 le_disconn_req(l2cap, hdr->ident, buf);
2135 break;
2136 case BT_L2CAP_DISCONN_RSP:
2137 le_disconn_rsp(l2cap, hdr->ident, buf);
2138 break;
2139 case BT_L2CAP_LE_CREDITS:
2140 le_credits(l2cap, hdr->ident, buf);
2141 break;
2142 case BT_L2CAP_CMD_REJECT:
2143 reject_cmd(l2cap, hdr->ident, buf);
2144 break;
2145 #if defined(CONFIG_BT_L2CAP_ECRED)
2146 case BT_L2CAP_ECRED_CONN_REQ:
2147 le_ecred_conn_req(l2cap, hdr->ident, buf);
2148 break;
2149 case BT_L2CAP_ECRED_CONN_RSP:
2150 le_ecred_conn_rsp(l2cap, hdr->ident, buf);
2151 break;
2152 case BT_L2CAP_ECRED_RECONF_REQ:
2153 le_ecred_reconf_req(l2cap, hdr->ident, buf);
2154 break;
2155 case BT_L2CAP_ECRED_RECONF_RSP:
2156 le_ecred_reconf_rsp(l2cap, hdr->ident, buf);
2157 break;
2158 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
2159 #else
2160 case BT_L2CAP_CMD_REJECT:
2161 /* Ignored */
2162 break;
2163 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2164 case BT_L2CAP_CONN_PARAM_REQ:
2165 if (IS_ENABLED(CONFIG_BT_CENTRAL)) {
2166 le_conn_param_update_req(l2cap, hdr->ident, buf);
2167 break;
2168 }
2169 __fallthrough;
2170 default:
2171 LOG_WRN("Rejecting unknown L2CAP PDU code 0x%02x", hdr->code);
2172 l2cap_send_reject(chan->conn, hdr->ident,
2173 BT_L2CAP_REJ_NOT_UNDERSTOOD, NULL, 0);
2174 break;
2175 }
2176
2177 return 0;
2178 }
2179
2180 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_chan_shutdown(struct bt_l2cap_chan * chan)2181 static void l2cap_chan_shutdown(struct bt_l2cap_chan *chan)
2182 {
2183 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
2184 struct net_buf *buf;
2185
2186 LOG_DBG("chan %p", chan);
2187
2188 atomic_set_bit(chan->status, BT_L2CAP_STATUS_SHUTDOWN);
2189
2190 /* Destroy segmented SDU if it exists */
2191 if (le_chan->_sdu) {
2192 net_buf_unref(le_chan->_sdu);
2193 le_chan->_sdu = NULL;
2194 le_chan->_sdu_len = 0U;
2195 }
2196
2197 /* Cleanup outstanding request */
2198 if (le_chan->tx_buf) {
2199 l2cap_tx_buf_destroy(chan->conn, le_chan->tx_buf, -ESHUTDOWN);
2200 le_chan->tx_buf = NULL;
2201 }
2202
2203 /* Remove buffers on the TX queue */
2204 while ((buf = net_buf_get(&le_chan->tx_queue, K_NO_WAIT))) {
2205 l2cap_tx_buf_destroy(chan->conn, buf, -ESHUTDOWN);
2206 }
2207
2208 /* Remove buffers on the RX queue */
2209 while ((buf = net_buf_get(&le_chan->rx_queue, K_NO_WAIT))) {
2210 net_buf_unref(buf);
2211 }
2212
2213 /* Update status */
2214 if (chan->ops->status) {
2215 chan->ops->status(chan, chan->status);
2216 }
2217 }
2218
2219 /** @brief Get @c chan->state.
2220 *
2221 * This field does not exist when @kconfig{CONFIG_BT_L2CAP_DYNAMIC_CHANNEL} is
2222 * disabled. In that case, this function returns @ref BT_L2CAP_CONNECTED since
2223 * the struct can only represent static channels in that case and static
2224 * channels are always connected.
2225 */
bt_l2cap_chan_get_state(struct bt_l2cap_chan * chan)2226 static inline bt_l2cap_chan_state_t bt_l2cap_chan_get_state(struct bt_l2cap_chan *chan)
2227 {
2228 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
2229 return BT_L2CAP_LE_CHAN(chan)->state;
2230 #else
2231 return BT_L2CAP_CONNECTED;
2232 #endif
2233 }
2234
l2cap_chan_send_credits(struct bt_l2cap_le_chan * chan,uint16_t credits)2235 static void l2cap_chan_send_credits(struct bt_l2cap_le_chan *chan,
2236 uint16_t credits)
2237 {
2238 struct bt_l2cap_le_credits *ev;
2239 struct net_buf *buf;
2240
2241 __ASSERT_NO_MSG(bt_l2cap_chan_get_state(&chan->chan) == BT_L2CAP_CONNECTED);
2242
2243 buf = l2cap_create_le_sig_pdu(BT_L2CAP_LE_CREDITS, get_ident(),
2244 sizeof(*ev));
2245 if (!buf) {
2246 LOG_ERR("Unable to send credits update");
2247 /* Disconnect would probably not work either so the only
2248 * option left is to shutdown the channel.
2249 */
2250 l2cap_chan_shutdown(&chan->chan);
2251 return;
2252 }
2253
2254 __ASSERT_NO_MSG(atomic_get(&chan->rx.credits) == 0);
2255 atomic_set(&chan->rx.credits, credits);
2256
2257 ev = net_buf_add(buf, sizeof(*ev));
2258 ev->cid = sys_cpu_to_le16(chan->rx.cid);
2259 ev->credits = sys_cpu_to_le16(credits);
2260
2261 l2cap_send(chan->chan.conn, BT_L2CAP_CID_LE_SIG, buf);
2262
2263 LOG_DBG("chan %p credits %lu", chan, atomic_get(&chan->rx.credits));
2264 }
2265
2266 #if defined(CONFIG_BT_L2CAP_SEG_RECV)
l2cap_chan_send_credits_pdu(struct bt_conn * conn,uint16_t cid,uint16_t credits)2267 static int l2cap_chan_send_credits_pdu(struct bt_conn *conn, uint16_t cid, uint16_t credits)
2268 {
2269 struct net_buf *buf;
2270 struct bt_l2cap_le_credits *ev;
2271
2272 buf = l2cap_create_le_sig_pdu(BT_L2CAP_LE_CREDITS, get_ident(), sizeof(*ev));
2273 if (!buf) {
2274 return -ENOBUFS;
2275 }
2276
2277 ev = net_buf_add(buf, sizeof(*ev));
2278 *ev = (struct bt_l2cap_le_credits){
2279 .cid = sys_cpu_to_le16(cid),
2280 .credits = sys_cpu_to_le16(credits),
2281 };
2282
2283 return l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf);
2284 }
2285
2286 /**
2287 * Combination of @ref atomic_add and @ref u16_add_overflow. Leaves @p
2288 * target unchanged if an overflow would occur. Assumes the current
2289 * value of @p target is representable by uint16_t.
2290 */
atomic_add_safe_u16(atomic_t * target,uint16_t addition)2291 static bool atomic_add_safe_u16(atomic_t *target, uint16_t addition)
2292 {
2293 uint16_t target_old, target_new;
2294
2295 do {
2296 target_old = atomic_get(target);
2297 if (u16_add_overflow(target_old, addition, &target_new)) {
2298 return true;
2299 }
2300 } while (!atomic_cas(target, target_old, target_new));
2301
2302 return false;
2303 }
2304
bt_l2cap_chan_give_credits(struct bt_l2cap_chan * chan,uint16_t additional_credits)2305 int bt_l2cap_chan_give_credits(struct bt_l2cap_chan *chan, uint16_t additional_credits)
2306 {
2307 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
2308
2309 if (!chan || !chan->ops) {
2310 LOG_ERR("%s: Invalid chan object.", __func__);
2311 return -EINVAL;
2312 }
2313
2314 if (!chan->ops->seg_recv) {
2315 LOG_ERR("%s: Available only with seg_recv.", __func__);
2316 return -EINVAL;
2317 }
2318
2319 if (additional_credits == 0) {
2320 LOG_ERR("%s: Refusing to give 0.", __func__);
2321 return -EINVAL;
2322 }
2323
2324 if (bt_l2cap_chan_get_state(chan) == BT_L2CAP_CONNECTING) {
2325 LOG_ERR("%s: Cannot give credits while connecting.", __func__);
2326 return -EBUSY;
2327 }
2328
2329 if (atomic_add_safe_u16(&le_chan->rx.credits, additional_credits)) {
2330 LOG_ERR("%s: Overflow.", __func__);
2331 return -EOVERFLOW;
2332 }
2333
2334 if (bt_l2cap_chan_get_state(chan) == BT_L2CAP_CONNECTED) {
2335 int err;
2336
2337 err = l2cap_chan_send_credits_pdu(chan->conn, le_chan->rx.cid, additional_credits);
2338 if (err) {
2339 LOG_ERR("%s: PDU failed %d.", __func__, err);
2340 return err;
2341 }
2342 }
2343
2344 return 0;
2345 }
2346 #endif /* CONFIG_BT_L2CAP_SEG_RECV */
2347
bt_l2cap_chan_recv_complete(struct bt_l2cap_chan * chan,struct net_buf * buf)2348 int bt_l2cap_chan_recv_complete(struct bt_l2cap_chan *chan, struct net_buf *buf)
2349 {
2350 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
2351 struct bt_conn *conn = chan->conn;
2352
2353 __ASSERT_NO_MSG(chan);
2354 __ASSERT_NO_MSG(buf);
2355
2356 net_buf_unref(buf);
2357
2358 if (!conn) {
2359 return -ENOTCONN;
2360 }
2361
2362 if (conn->type != BT_CONN_TYPE_LE) {
2363 return -ENOTSUP;
2364 }
2365
2366 LOG_DBG("chan %p buf %p", chan, buf);
2367
2368 if (bt_l2cap_chan_get_state(&le_chan->chan) == BT_L2CAP_CONNECTED) {
2369 l2cap_chan_send_credits(le_chan, 1);
2370 }
2371
2372 return 0;
2373 }
2374
l2cap_alloc_frag(k_timeout_t timeout,void * user_data)2375 static struct net_buf *l2cap_alloc_frag(k_timeout_t timeout, void *user_data)
2376 {
2377 struct bt_l2cap_le_chan *chan = user_data;
2378 struct net_buf *frag = NULL;
2379
2380 frag = chan->chan.ops->alloc_buf(&chan->chan);
2381 if (!frag) {
2382 return NULL;
2383 }
2384
2385 LOG_DBG("frag %p tailroom %zu", frag, net_buf_tailroom(frag));
2386
2387 return frag;
2388 }
2389
l2cap_chan_le_recv_sdu(struct bt_l2cap_le_chan * chan,struct net_buf * buf,uint16_t seg)2390 static void l2cap_chan_le_recv_sdu(struct bt_l2cap_le_chan *chan,
2391 struct net_buf *buf, uint16_t seg)
2392 {
2393 int err;
2394
2395 LOG_DBG("chan %p len %zu", chan, net_buf_frags_len(buf));
2396
2397 __ASSERT_NO_MSG(bt_l2cap_chan_get_state(&chan->chan) == BT_L2CAP_CONNECTED);
2398 __ASSERT_NO_MSG(atomic_get(&chan->rx.credits) == 0);
2399
2400 /* Receiving complete SDU, notify channel and reset SDU buf */
2401 err = chan->chan.ops->recv(&chan->chan, buf);
2402 if (err < 0) {
2403 if (err != -EINPROGRESS) {
2404 LOG_ERR("err %d", err);
2405 bt_l2cap_chan_disconnect(&chan->chan);
2406 net_buf_unref(buf);
2407 }
2408 return;
2409 } else if (bt_l2cap_chan_get_state(&chan->chan) == BT_L2CAP_CONNECTED) {
2410 l2cap_chan_send_credits(chan, 1);
2411 }
2412
2413 net_buf_unref(buf);
2414 }
2415
l2cap_chan_le_recv_seg(struct bt_l2cap_le_chan * chan,struct net_buf * buf)2416 static void l2cap_chan_le_recv_seg(struct bt_l2cap_le_chan *chan,
2417 struct net_buf *buf)
2418 {
2419 uint16_t len;
2420 uint16_t seg = 0U;
2421
2422 len = net_buf_frags_len(chan->_sdu);
2423 if (len) {
2424 memcpy(&seg, net_buf_user_data(chan->_sdu), sizeof(seg));
2425 }
2426
2427 if (len + buf->len > chan->_sdu_len) {
2428 LOG_ERR("SDU length mismatch");
2429 bt_l2cap_chan_disconnect(&chan->chan);
2430 return;
2431 }
2432
2433 seg++;
2434 /* Store received segments in user_data */
2435 memcpy(net_buf_user_data(chan->_sdu), &seg, sizeof(seg));
2436
2437 LOG_DBG("chan %p seg %d len %zu", chan, seg, net_buf_frags_len(buf));
2438
2439 /* Append received segment to SDU */
2440 len = net_buf_append_bytes(chan->_sdu, buf->len, buf->data, K_NO_WAIT,
2441 l2cap_alloc_frag, chan);
2442 if (len != buf->len) {
2443 LOG_ERR("Unable to store SDU");
2444 bt_l2cap_chan_disconnect(&chan->chan);
2445 return;
2446 }
2447
2448 if (net_buf_frags_len(chan->_sdu) < chan->_sdu_len) {
2449 /* Give more credits if remote has run out of them, this
2450 * should only happen if the remote cannot fully utilize the
2451 * MPS for some reason.
2452 *
2453 * We can't send more than one credit, because if the remote
2454 * decides to start fully utilizing the MPS for the remainder of
2455 * the SDU, then the remote will end up with more credits than
2456 * the app has buffers.
2457 */
2458 if (atomic_get(&chan->rx.credits) == 0) {
2459 LOG_DBG("remote is not fully utilizing MPS");
2460 l2cap_chan_send_credits(chan, 1);
2461 }
2462
2463 return;
2464 }
2465
2466 buf = chan->_sdu;
2467 chan->_sdu = NULL;
2468 chan->_sdu_len = 0U;
2469
2470 l2cap_chan_le_recv_sdu(chan, buf, seg);
2471 }
2472
2473 #if defined(CONFIG_BT_L2CAP_SEG_RECV)
l2cap_chan_le_recv_seg_direct(struct bt_l2cap_le_chan * chan,struct net_buf * seg)2474 static void l2cap_chan_le_recv_seg_direct(struct bt_l2cap_le_chan *chan, struct net_buf *seg)
2475 {
2476 uint16_t seg_offset;
2477 uint16_t sdu_remaining;
2478
2479 if (chan->_sdu_len_done == chan->_sdu_len) {
2480
2481 /* This is the first PDU in a SDU. */
2482
2483 if (seg->len < 2) {
2484 LOG_WRN("Missing SDU header");
2485 bt_l2cap_chan_disconnect(&chan->chan);
2486 return;
2487 }
2488
2489 /* Pop off the "SDU header". */
2490 chan->_sdu_len = net_buf_pull_le16(seg);
2491 chan->_sdu_len_done = 0;
2492
2493 if (chan->_sdu_len > chan->rx.mtu) {
2494 LOG_WRN("SDU exceeds MTU");
2495 bt_l2cap_chan_disconnect(&chan->chan);
2496 return;
2497 }
2498 }
2499
2500 seg_offset = chan->_sdu_len_done;
2501 sdu_remaining = chan->_sdu_len - chan->_sdu_len_done;
2502
2503 if (seg->len > sdu_remaining) {
2504 LOG_WRN("L2CAP RX PDU total exceeds SDU");
2505 bt_l2cap_chan_disconnect(&chan->chan);
2506 }
2507
2508 /* Commit receive. */
2509 chan->_sdu_len_done += seg->len;
2510
2511 /* Tail call. */
2512 chan->chan.ops->seg_recv(&chan->chan, chan->_sdu_len, seg_offset, &seg->b);
2513 }
2514 #endif /* CONFIG_BT_L2CAP_SEG_RECV */
2515
l2cap_chan_le_recv(struct bt_l2cap_le_chan * chan,struct net_buf * buf)2516 static void l2cap_chan_le_recv(struct bt_l2cap_le_chan *chan,
2517 struct net_buf *buf)
2518 {
2519 uint16_t sdu_len;
2520 int err;
2521
2522 if (!test_and_dec(&chan->rx.credits)) {
2523 LOG_ERR("No credits to receive packet");
2524 bt_l2cap_chan_disconnect(&chan->chan);
2525 return;
2526 }
2527
2528 if (buf->len > chan->rx.mps) {
2529 LOG_WRN("PDU size > MPS (%u > %u)", buf->len, chan->rx.mps);
2530 bt_l2cap_chan_disconnect(&chan->chan);
2531 return;
2532 }
2533
2534 /* Redirect to experimental API. */
2535 IF_ENABLED(CONFIG_BT_L2CAP_SEG_RECV, (
2536 if (chan->chan.ops->seg_recv) {
2537 l2cap_chan_le_recv_seg_direct(chan, buf);
2538 return;
2539 }
2540 ))
2541
2542 /* Check if segments already exist */
2543 if (chan->_sdu) {
2544 l2cap_chan_le_recv_seg(chan, buf);
2545 return;
2546 }
2547
2548 if (buf->len < 2) {
2549 LOG_WRN("Too short data packet");
2550 bt_l2cap_chan_disconnect(&chan->chan);
2551 return;
2552 }
2553
2554 sdu_len = net_buf_pull_le16(buf);
2555
2556 LOG_DBG("chan %p len %u sdu_len %u", chan, buf->len, sdu_len);
2557
2558 if (sdu_len > chan->rx.mtu) {
2559 LOG_ERR("Invalid SDU length");
2560 bt_l2cap_chan_disconnect(&chan->chan);
2561 return;
2562 }
2563
2564 /* Always allocate buffer from the channel if supported. */
2565 if (chan->chan.ops->alloc_buf) {
2566 chan->_sdu = chan->chan.ops->alloc_buf(&chan->chan);
2567 if (!chan->_sdu) {
2568 LOG_ERR("Unable to allocate buffer for SDU");
2569 bt_l2cap_chan_disconnect(&chan->chan);
2570 return;
2571 }
2572 chan->_sdu_len = sdu_len;
2573
2574 /* Send sdu_len/mps worth of credits */
2575 uint16_t credits = DIV_ROUND_UP(
2576 MIN(sdu_len - buf->len, net_buf_tailroom(chan->_sdu)),
2577 chan->rx.mps);
2578
2579 if (credits) {
2580 LOG_DBG("sending %d extra credits (sdu_len %d buf_len %d mps %d)",
2581 credits,
2582 sdu_len,
2583 buf->len,
2584 chan->rx.mps);
2585 l2cap_chan_send_credits(chan, credits);
2586 }
2587
2588 l2cap_chan_le_recv_seg(chan, buf);
2589 return;
2590 }
2591
2592 err = chan->chan.ops->recv(&chan->chan, buf);
2593 if (err < 0) {
2594 if (err != -EINPROGRESS) {
2595 LOG_ERR("err %d", err);
2596 bt_l2cap_chan_disconnect(&chan->chan);
2597 }
2598 return;
2599 }
2600
2601 /* Only attempt to send credits if the channel wasn't disconnected
2602 * in the recv() callback above
2603 */
2604 if (bt_l2cap_chan_get_state(&chan->chan) == BT_L2CAP_CONNECTED) {
2605 l2cap_chan_send_credits(chan, 1);
2606 }
2607 }
2608
l2cap_chan_recv_queue(struct bt_l2cap_le_chan * chan,struct net_buf * buf)2609 static void l2cap_chan_recv_queue(struct bt_l2cap_le_chan *chan,
2610 struct net_buf *buf)
2611 {
2612 if (chan->state == BT_L2CAP_DISCONNECTING) {
2613 LOG_WRN("Ignoring data received while disconnecting");
2614 net_buf_unref(buf);
2615 return;
2616 }
2617
2618 if (atomic_test_bit(chan->chan.status, BT_L2CAP_STATUS_SHUTDOWN)) {
2619 LOG_WRN("Ignoring data received while channel has shutdown");
2620 net_buf_unref(buf);
2621 return;
2622 }
2623
2624 if (!L2CAP_LE_PSM_IS_DYN(chan->psm)) {
2625 l2cap_chan_le_recv(chan, buf);
2626 net_buf_unref(buf);
2627 return;
2628 }
2629
2630 net_buf_put(&chan->rx_queue, buf);
2631 k_work_submit(&chan->rx_work);
2632 }
2633 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2634
l2cap_chan_recv(struct bt_l2cap_chan * chan,struct net_buf * buf,bool complete)2635 static void l2cap_chan_recv(struct bt_l2cap_chan *chan, struct net_buf *buf,
2636 bool complete)
2637 {
2638 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
2639 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
2640
2641 if (L2CAP_LE_CID_IS_DYN(le_chan->rx.cid)) {
2642 if (complete) {
2643 l2cap_chan_recv_queue(le_chan, buf);
2644 } else {
2645 /* if packet was not complete this means peer device
2646 * overflowed our RX and channel shall be disconnected
2647 */
2648 bt_l2cap_chan_disconnect(chan);
2649 net_buf_unref(buf);
2650 }
2651
2652 return;
2653 }
2654 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2655
2656 LOG_DBG("chan %p len %u", chan, buf->len);
2657
2658 chan->ops->recv(chan, buf);
2659 net_buf_unref(buf);
2660 }
2661
bt_l2cap_recv(struct bt_conn * conn,struct net_buf * buf,bool complete)2662 void bt_l2cap_recv(struct bt_conn *conn, struct net_buf *buf, bool complete)
2663 {
2664 struct bt_l2cap_hdr *hdr;
2665 struct bt_l2cap_chan *chan;
2666 uint16_t cid;
2667
2668 if (IS_ENABLED(CONFIG_BT_BREDR) &&
2669 conn->type == BT_CONN_TYPE_BR) {
2670 bt_l2cap_br_recv(conn, buf);
2671 return;
2672 }
2673
2674 if (buf->len < sizeof(*hdr)) {
2675 LOG_ERR("Too small L2CAP PDU received");
2676 net_buf_unref(buf);
2677 return;
2678 }
2679
2680 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
2681 cid = sys_le16_to_cpu(hdr->cid);
2682
2683 LOG_DBG("Packet for CID %u len %u", cid, buf->len);
2684
2685 chan = bt_l2cap_le_lookup_rx_cid(conn, cid);
2686 if (!chan) {
2687 LOG_WRN("Ignoring data for unknown channel ID 0x%04x", cid);
2688 net_buf_unref(buf);
2689 return;
2690 }
2691
2692 l2cap_chan_recv(chan, buf, complete);
2693 }
2694
bt_l2cap_update_conn_param(struct bt_conn * conn,const struct bt_le_conn_param * param)2695 int bt_l2cap_update_conn_param(struct bt_conn *conn,
2696 const struct bt_le_conn_param *param)
2697 {
2698 struct bt_l2cap_conn_param_req *req;
2699 struct net_buf *buf;
2700
2701 buf = l2cap_create_le_sig_pdu(BT_L2CAP_CONN_PARAM_REQ,
2702 get_ident(), sizeof(*req));
2703 if (!buf) {
2704 return -ENOMEM;
2705 }
2706
2707 req = net_buf_add(buf, sizeof(*req));
2708 req->min_interval = sys_cpu_to_le16(param->interval_min);
2709 req->max_interval = sys_cpu_to_le16(param->interval_max);
2710 req->latency = sys_cpu_to_le16(param->latency);
2711 req->timeout = sys_cpu_to_le16(param->timeout);
2712
2713 return l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf);
2714 }
2715
l2cap_connected(struct bt_l2cap_chan * chan)2716 static void l2cap_connected(struct bt_l2cap_chan *chan)
2717 {
2718 LOG_DBG("ch %p cid 0x%04x", BT_L2CAP_LE_CHAN(chan), BT_L2CAP_LE_CHAN(chan)->rx.cid);
2719 }
2720
l2cap_disconnected(struct bt_l2cap_chan * chan)2721 static void l2cap_disconnected(struct bt_l2cap_chan *chan)
2722 {
2723 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
2724
2725 LOG_DBG("ch %p cid 0x%04x", le_chan, le_chan->rx.cid);
2726
2727 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
2728 /* Cancel RTX work on signal channel.
2729 * Disconnected callback is always called from system workqueue
2730 * so this should always succeed.
2731 */
2732 (void)k_work_cancel_delayable(&le_chan->rtx_work);
2733 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2734 }
2735
l2cap_accept(struct bt_conn * conn,struct bt_l2cap_chan ** chan)2736 static int l2cap_accept(struct bt_conn *conn, struct bt_l2cap_chan **chan)
2737 {
2738 int i;
2739 static const struct bt_l2cap_chan_ops ops = {
2740 .connected = l2cap_connected,
2741 .disconnected = l2cap_disconnected,
2742 .recv = l2cap_recv,
2743 };
2744
2745 LOG_DBG("conn %p handle %u", conn, conn->handle);
2746
2747 for (i = 0; i < ARRAY_SIZE(bt_l2cap_pool); i++) {
2748 struct bt_l2cap *l2cap = &bt_l2cap_pool[i];
2749
2750 if (l2cap->chan.chan.conn) {
2751 continue;
2752 }
2753
2754 l2cap->chan.chan.ops = &ops;
2755 *chan = &l2cap->chan.chan;
2756
2757 return 0;
2758 }
2759
2760 LOG_ERR("No available L2CAP context for conn %p", conn);
2761
2762 return -ENOMEM;
2763 }
2764
2765 BT_L2CAP_CHANNEL_DEFINE(le_fixed_chan, BT_L2CAP_CID_LE_SIG, l2cap_accept, NULL);
2766
bt_l2cap_init(void)2767 void bt_l2cap_init(void)
2768 {
2769 if (IS_ENABLED(CONFIG_BT_BREDR)) {
2770 bt_l2cap_br_init();
2771 }
2772 }
2773
2774 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_le_connect(struct bt_conn * conn,struct bt_l2cap_le_chan * ch,uint16_t psm)2775 static int l2cap_le_connect(struct bt_conn *conn, struct bt_l2cap_le_chan *ch,
2776 uint16_t psm)
2777 {
2778 int err;
2779
2780 if (psm < L2CAP_LE_PSM_FIXED_START || psm > L2CAP_LE_PSM_DYN_END) {
2781 return -EINVAL;
2782 }
2783
2784 l2cap_chan_tx_init(ch);
2785 l2cap_chan_rx_init(ch);
2786
2787 if (!l2cap_chan_add(conn, &ch->chan, l2cap_chan_destroy)) {
2788 return -ENOMEM;
2789 }
2790
2791 ch->psm = psm;
2792
2793 if (conn->sec_level < ch->required_sec_level) {
2794 err = bt_conn_set_security(conn, ch->required_sec_level);
2795 if (err) {
2796 goto fail;
2797 }
2798
2799 atomic_set_bit(ch->chan.status,
2800 BT_L2CAP_STATUS_ENCRYPT_PENDING);
2801
2802 return 0;
2803 }
2804
2805 err = l2cap_le_conn_req(ch);
2806 if (err) {
2807 goto fail;
2808 }
2809
2810 return 0;
2811
2812 fail:
2813 bt_l2cap_chan_remove(conn, &ch->chan);
2814 bt_l2cap_chan_del(&ch->chan);
2815 return err;
2816 }
2817
2818 #if defined(CONFIG_BT_L2CAP_ECRED)
l2cap_ecred_init(struct bt_conn * conn,struct bt_l2cap_le_chan * ch,uint16_t psm)2819 static int l2cap_ecred_init(struct bt_conn *conn,
2820 struct bt_l2cap_le_chan *ch, uint16_t psm)
2821 {
2822
2823 if (psm < L2CAP_LE_PSM_FIXED_START || psm > L2CAP_LE_PSM_DYN_END) {
2824 return -EINVAL;
2825 }
2826
2827 l2cap_chan_tx_init(ch);
2828 l2cap_chan_rx_init(ch);
2829
2830 if (!l2cap_chan_add(conn, &ch->chan, l2cap_chan_destroy)) {
2831 return -ENOMEM;
2832 }
2833
2834 ch->psm = psm;
2835
2836 LOG_DBG("ch %p psm 0x%02x mtu %u mps %u credits 1", ch, ch->psm, ch->rx.mtu, ch->rx.mps);
2837
2838 return 0;
2839 }
2840
bt_l2cap_ecred_chan_connect(struct bt_conn * conn,struct bt_l2cap_chan ** chan,uint16_t psm)2841 int bt_l2cap_ecred_chan_connect(struct bt_conn *conn,
2842 struct bt_l2cap_chan **chan, uint16_t psm)
2843 {
2844 int i, err;
2845
2846 LOG_DBG("conn %p chan %p psm 0x%04x", conn, chan, psm);
2847
2848 if (!conn || !chan) {
2849 return -EINVAL;
2850 }
2851
2852 /* Init non-null channels */
2853 for (i = 0; i < L2CAP_ECRED_CHAN_MAX_PER_REQ; i++) {
2854 if (!chan[i]) {
2855 break;
2856 }
2857
2858 err = l2cap_ecred_init(conn, BT_L2CAP_LE_CHAN(chan[i]), psm);
2859 if (err < 0) {
2860 i--;
2861 goto fail;
2862 }
2863 }
2864
2865 return l2cap_ecred_conn_req(chan, i);
2866 fail:
2867 /* Remove channels added */
2868 for (; i >= 0; i--) {
2869 if (!chan[i]) {
2870 continue;
2871 }
2872
2873 bt_l2cap_chan_remove(conn, chan[i]);
2874 }
2875
2876 return err;
2877 }
2878
l2cap_find_pending_reconf(struct bt_conn * conn)2879 static struct bt_l2cap_le_chan *l2cap_find_pending_reconf(struct bt_conn *conn)
2880 {
2881 struct bt_l2cap_chan *chan;
2882
2883 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
2884 if (BT_L2CAP_LE_CHAN(chan)->pending_rx_mtu) {
2885 return BT_L2CAP_LE_CHAN(chan);
2886 }
2887 }
2888
2889 return NULL;
2890 }
2891
bt_l2cap_ecred_chan_reconfigure(struct bt_l2cap_chan ** chans,uint16_t mtu)2892 int bt_l2cap_ecred_chan_reconfigure(struct bt_l2cap_chan **chans, uint16_t mtu)
2893 {
2894 struct bt_l2cap_ecred_reconf_req *req;
2895 struct bt_conn *conn = NULL;
2896 struct bt_l2cap_le_chan *ch;
2897 struct net_buf *buf;
2898 uint8_t ident;
2899 int i;
2900
2901 LOG_DBG("chans %p mtu 0x%04x", chans, mtu);
2902
2903 if (!chans) {
2904 return -EINVAL;
2905 }
2906
2907 for (i = 0; i < L2CAP_ECRED_CHAN_MAX_PER_REQ; i++) {
2908 if (!chans[i]) {
2909 break;
2910 }
2911
2912 /* validate that all channels are from same connection */
2913 if (conn) {
2914 if (conn != chans[i]->conn) {
2915 return -EINVAL;
2916 }
2917 } else {
2918 conn = chans[i]->conn;
2919 }
2920
2921 /* validate MTU is not decreased */
2922 if (mtu < BT_L2CAP_LE_CHAN(chans[i])->rx.mtu) {
2923 return -EINVAL;
2924 }
2925 }
2926
2927 if (i == 0) {
2928 return -EINVAL;
2929 }
2930
2931 if (!conn) {
2932 return -ENOTCONN;
2933 }
2934
2935 if (conn->type != BT_CONN_TYPE_LE) {
2936 return -EINVAL;
2937 }
2938
2939 /* allow only 1 request at time */
2940 if (l2cap_find_pending_reconf(conn)) {
2941 return -EBUSY;
2942 }
2943
2944 ident = get_ident();
2945
2946 buf = l2cap_create_le_sig_pdu(BT_L2CAP_ECRED_RECONF_REQ,
2947 ident,
2948 sizeof(*req) + (i * sizeof(uint16_t)));
2949 if (!buf) {
2950 return -ENOMEM;
2951 }
2952
2953 req = net_buf_add(buf, sizeof(*req));
2954 req->mtu = sys_cpu_to_le16(mtu);
2955
2956 /* MPS shall not be bigger than MTU + BT_L2CAP_SDU_HDR_SIZE
2957 * as the remaining bytes cannot be used.
2958 */
2959 req->mps = sys_cpu_to_le16(MIN(mtu + BT_L2CAP_SDU_HDR_SIZE,
2960 BT_L2CAP_RX_MTU));
2961
2962 for (int j = 0; j < i; j++) {
2963 ch = BT_L2CAP_LE_CHAN(chans[j]);
2964
2965 ch->ident = ident;
2966 ch->pending_rx_mtu = mtu;
2967
2968 net_buf_add_le16(buf, ch->rx.cid);
2969 };
2970
2971 /* We set the RTX timer on one of the supplied channels, but when the
2972 * request resolves or times out we will act on all the channels in the
2973 * supplied array, using the ident field to find them.
2974 */
2975 l2cap_chan_send_req(chans[0], buf, L2CAP_CONN_TIMEOUT);
2976
2977 return 0;
2978 }
2979
2980 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
2981
bt_l2cap_chan_connect(struct bt_conn * conn,struct bt_l2cap_chan * chan,uint16_t psm)2982 int bt_l2cap_chan_connect(struct bt_conn *conn, struct bt_l2cap_chan *chan,
2983 uint16_t psm)
2984 {
2985 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
2986
2987 LOG_DBG("conn %p chan %p psm 0x%04x", conn, chan, psm);
2988
2989 if (!conn || conn->state != BT_CONN_CONNECTED) {
2990 return -ENOTCONN;
2991 }
2992
2993 if (!chan) {
2994 return -EINVAL;
2995 }
2996
2997 if (IS_ENABLED(CONFIG_BT_BREDR) &&
2998 conn->type == BT_CONN_TYPE_BR) {
2999 return bt_l2cap_br_chan_connect(conn, chan, psm);
3000 }
3001
3002 if (le_chan->required_sec_level > BT_SECURITY_L4) {
3003 return -EINVAL;
3004 } else if (le_chan->required_sec_level == BT_SECURITY_L0) {
3005 le_chan->required_sec_level = BT_SECURITY_L1;
3006 }
3007
3008 return l2cap_le_connect(conn, le_chan, psm);
3009 }
3010
bt_l2cap_chan_disconnect(struct bt_l2cap_chan * chan)3011 int bt_l2cap_chan_disconnect(struct bt_l2cap_chan *chan)
3012 {
3013 struct bt_conn *conn = chan->conn;
3014 struct net_buf *buf;
3015 struct bt_l2cap_disconn_req *req;
3016 struct bt_l2cap_le_chan *le_chan;
3017
3018 if (!conn) {
3019 return -ENOTCONN;
3020 }
3021
3022 if (IS_ENABLED(CONFIG_BT_BREDR) &&
3023 conn->type == BT_CONN_TYPE_BR) {
3024 return bt_l2cap_br_chan_disconnect(chan);
3025 }
3026
3027 le_chan = BT_L2CAP_LE_CHAN(chan);
3028
3029 LOG_DBG("chan %p scid 0x%04x dcid 0x%04x", chan, le_chan->rx.cid, le_chan->tx.cid);
3030
3031 le_chan->ident = get_ident();
3032
3033 buf = l2cap_create_le_sig_pdu(BT_L2CAP_DISCONN_REQ,
3034 le_chan->ident, sizeof(*req));
3035 if (!buf) {
3036 return -ENOMEM;
3037 }
3038
3039 req = net_buf_add(buf, sizeof(*req));
3040 req->dcid = sys_cpu_to_le16(le_chan->tx.cid);
3041 req->scid = sys_cpu_to_le16(le_chan->rx.cid);
3042
3043 l2cap_chan_send_req(chan, buf, L2CAP_DISC_TIMEOUT);
3044 bt_l2cap_chan_set_state(chan, BT_L2CAP_DISCONNECTING);
3045
3046 return 0;
3047 }
3048
bt_l2cap_dyn_chan_send(struct bt_l2cap_le_chan * le_chan,struct net_buf * buf)3049 static int bt_l2cap_dyn_chan_send(struct bt_l2cap_le_chan *le_chan, struct net_buf *buf)
3050 {
3051 uint16_t sdu_len = net_buf_frags_len(buf);
3052
3053 LOG_DBG("chan %p buf %p", le_chan, buf);
3054
3055 if (sdu_len > le_chan->tx.mtu) {
3056 LOG_ERR("attempt to send %u bytes on %u MTU chan",
3057 sdu_len, le_chan->tx.mtu);
3058 return -EMSGSIZE;
3059 }
3060
3061 if (net_buf_headroom(buf) < BT_L2CAP_SDU_CHAN_SEND_RESERVE) {
3062 /* Call `net_buf_reserve(buf, BT_L2CAP_SDU_CHAN_SEND_RESERVE)`
3063 * when allocating buffers intended for bt_l2cap_chan_send().
3064 */
3065 LOG_DBG("Not enough headroom in buf %p", buf);
3066 return -EINVAL;
3067 }
3068
3069 /* Prepend SDU "header".
3070 *
3071 * L2CAP LE CoC SDUs are segmented into PDUs and sent over so-called
3072 * K-frames that each have their own L2CAP header (ie channel, PDU
3073 * length).
3074 *
3075 * The SDU header is right before the data that will be segmented and is
3076 * only present in the first segment/PDU. Here's an example:
3077 *
3078 * Sent data payload of 50 bytes over channel 0x4040 with MPS of 30 bytes:
3079 * First PDU / segment / K-frame:
3080 * | L2CAP K-frame header | K-frame payload |
3081 * | PDU length | Channel ID | SDU header | SDU payload |
3082 * | 30 | 0x4040 | 50 | 28 bytes of data |
3083 *
3084 * Second and last PDU / segment / K-frame:
3085 * | L2CAP K-frame header | K-frame payload |
3086 * | PDU length | Channel ID | rest of SDU payload |
3087 * | 22 | 0x4040 | 22 bytes of data |
3088 */
3089 net_buf_push_le16(buf, sdu_len);
3090
3091 /* Put buffer on TX queue */
3092 net_buf_put(&le_chan->tx_queue, buf);
3093
3094 /* Always process the queue in the same context */
3095 k_work_reschedule(&le_chan->tx_work, K_NO_WAIT);
3096
3097 return 0;
3098 }
3099
bt_l2cap_chan_send(struct bt_l2cap_chan * chan,struct net_buf * buf)3100 int bt_l2cap_chan_send(struct bt_l2cap_chan *chan, struct net_buf *buf)
3101 {
3102 if (!buf || !chan) {
3103 return -EINVAL;
3104 }
3105
3106 LOG_DBG("chan %p buf %p len %zu", chan, buf, net_buf_frags_len(buf));
3107
3108 if (!chan->conn || chan->conn->state != BT_CONN_CONNECTED) {
3109 return -ENOTCONN;
3110 }
3111
3112 if (atomic_test_bit(chan->status, BT_L2CAP_STATUS_SHUTDOWN)) {
3113 return -ESHUTDOWN;
3114 }
3115
3116 if (IS_ENABLED(CONFIG_BT_BREDR) &&
3117 chan->conn->type == BT_CONN_TYPE_BR) {
3118 return bt_l2cap_br_chan_send_cb(chan, buf, NULL, NULL);
3119 }
3120
3121 /* Sending over static channels is not supported by this fn. Use
3122 * `bt_l2cap_send()` if external to this file, or `l2cap_send` if
3123 * internal.
3124 */
3125 if (IS_ENABLED(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)) {
3126 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
3127
3128 __ASSERT_NO_MSG(le_chan);
3129 __ASSERT_NO_MSG(L2CAP_LE_CID_IS_DYN(le_chan->tx.cid));
3130
3131 return bt_l2cap_dyn_chan_send(le_chan, buf);
3132 }
3133
3134 LOG_DBG("Invalid channel type (chan %p)", chan);
3135
3136 return -EINVAL;
3137 }
3138 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
3139