1 /* l2cap.c - L2CAP handling */
2
3 /*
4 * Copyright (c) 2015-2016 Intel Corporation
5 * Copyright (c) 2023 Nordic Semiconductor
6 *
7 * SPDX-License-Identifier: Apache-2.0
8 */
9
10 #include <zephyr/kernel.h>
11 #include <string.h>
12 #include <errno.h>
13 #include <zephyr/sys/__assert.h>
14 #include <zephyr/sys/atomic.h>
15 #include <zephyr/sys/check.h>
16 #include <zephyr/sys/iterable_sections.h>
17 #include <zephyr/sys/byteorder.h>
18 #include <zephyr/sys/math_extras.h>
19 #include <zephyr/sys/util.h>
20 #include <zephyr/net_buf.h>
21
22 #include <zephyr/bluetooth/hci.h>
23 #include <zephyr/bluetooth/bluetooth.h>
24 #include <zephyr/bluetooth/conn.h>
25 #include <zephyr/bluetooth/l2cap.h>
26
27 #define LOG_DBG_ENABLED IS_ENABLED(CONFIG_BT_L2CAP_LOG_LEVEL_DBG)
28
29 #include "buf_view.h"
30 #include "hci_core.h"
31 #include "conn_internal.h"
32 #include "l2cap_internal.h"
33 #include "keys.h"
34
35 #include <zephyr/logging/log.h>
36 LOG_MODULE_REGISTER(bt_l2cap, CONFIG_BT_L2CAP_LOG_LEVEL);
37
38 #define LE_CHAN_RTX(_w) CONTAINER_OF(k_work_delayable_from_work(_w), \
39 struct bt_l2cap_le_chan, rtx_work)
40 #define CHAN_RX(_w) CONTAINER_OF(_w, struct bt_l2cap_le_chan, rx_work)
41
42 #define L2CAP_LE_MIN_MTU 23
43
44 #define L2CAP_LE_MAX_CREDITS (BT_BUF_ACL_RX_COUNT - 1)
45
46 #define L2CAP_LE_CID_DYN_START 0x0040
47 #define L2CAP_LE_CID_DYN_END 0x007f
48 #define L2CAP_LE_CID_IS_DYN(_cid) \
49 (_cid >= L2CAP_LE_CID_DYN_START && _cid <= L2CAP_LE_CID_DYN_END)
50
51 #define L2CAP_LE_PSM_FIXED_START 0x0001
52 #define L2CAP_LE_PSM_FIXED_END 0x007f
53 #define L2CAP_LE_PSM_DYN_START 0x0080
54 #define L2CAP_LE_PSM_DYN_END 0x00ff
55 #define L2CAP_LE_PSM_IS_DYN(_psm) \
56 (_psm >= L2CAP_LE_PSM_DYN_START && _psm <= L2CAP_LE_PSM_DYN_END)
57
58 #define L2CAP_CONN_TIMEOUT K_SECONDS(40)
59 #define L2CAP_DISC_TIMEOUT K_SECONDS(2)
60 /** @brief Local L2CAP RTX (Response Timeout eXpired)
61 *
62 * Specification-allowed range for the value of RTX is 1 to 60 seconds.
63 */
64 #define L2CAP_RTX_TIMEOUT K_SECONDS(2)
65
66 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
67 /* Dedicated pool for disconnect buffers so they are guaranteed to be send
68 * even in case of data congestion due to flooding.
69 */
70 NET_BUF_POOL_FIXED_DEFINE(disc_pool, 1,
71 BT_L2CAP_BUF_SIZE(
72 sizeof(struct bt_l2cap_sig_hdr) +
73 sizeof(struct bt_l2cap_disconn_req)),
74 CONFIG_BT_CONN_TX_USER_DATA_SIZE, NULL);
75
76 #define l2cap_lookup_ident(conn, ident) __l2cap_lookup_ident(conn, ident, false)
77 #define l2cap_remove_ident(conn, ident) __l2cap_lookup_ident(conn, ident, true)
78
79 static sys_slist_t servers = SYS_SLIST_STATIC_INIT(&servers);
80
l2cap_tx_buf_destroy(struct bt_conn * conn,struct net_buf * buf,int err)81 static void l2cap_tx_buf_destroy(struct bt_conn *conn, struct net_buf *buf, int err)
82 {
83 net_buf_unref(buf);
84 }
85 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
86
87 /* L2CAP signalling channel specific context */
88 struct bt_l2cap {
89 /* The channel this context is associated with */
90 struct bt_l2cap_le_chan chan;
91 };
92
93 static const struct bt_l2cap_ecred_cb *ecred_cb;
94 static struct bt_l2cap bt_l2cap_pool[CONFIG_BT_MAX_CONN];
95
bt_l2cap_register_ecred_cb(const struct bt_l2cap_ecred_cb * cb)96 void bt_l2cap_register_ecred_cb(const struct bt_l2cap_ecred_cb *cb)
97 {
98 ecred_cb = cb;
99 }
100
get_ident(void)101 static uint8_t get_ident(void)
102 {
103 static uint8_t ident;
104
105 ident++;
106 /* handle integer overflow (0 is not valid) */
107 if (!ident) {
108 ident++;
109 }
110
111 return ident;
112 }
113
114 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_chan_alloc_cid(struct bt_conn * conn,struct bt_l2cap_chan * chan)115 static struct bt_l2cap_le_chan *l2cap_chan_alloc_cid(struct bt_conn *conn,
116 struct bt_l2cap_chan *chan)
117 {
118 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
119 uint16_t cid;
120
121 /*
122 * No action needed if there's already a CID allocated, e.g. in
123 * the case of a fixed channel.
124 */
125 if (le_chan->rx.cid > 0) {
126 return le_chan;
127 }
128
129 for (cid = L2CAP_LE_CID_DYN_START; cid <= L2CAP_LE_CID_DYN_END; cid++) {
130 if (!bt_l2cap_le_lookup_rx_cid(conn, cid)) {
131 le_chan->rx.cid = cid;
132 return le_chan;
133 }
134 }
135
136 return NULL;
137 }
138
139 static struct bt_l2cap_le_chan *
__l2cap_lookup_ident(struct bt_conn * conn,uint16_t ident,bool remove)140 __l2cap_lookup_ident(struct bt_conn *conn, uint16_t ident, bool remove)
141 {
142 struct bt_l2cap_chan *chan;
143 sys_snode_t *prev = NULL;
144
145 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
146 if (BT_L2CAP_LE_CHAN(chan)->ident == ident) {
147 if (remove) {
148 sys_slist_remove(&conn->channels, prev,
149 &chan->node);
150 }
151 return BT_L2CAP_LE_CHAN(chan);
152 }
153
154 prev = &chan->node;
155 }
156
157 return NULL;
158 }
159 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
160
bt_l2cap_chan_remove(struct bt_conn * conn,struct bt_l2cap_chan * ch)161 void bt_l2cap_chan_remove(struct bt_conn *conn, struct bt_l2cap_chan *ch)
162 {
163 struct bt_l2cap_chan *chan;
164 sys_snode_t *prev = NULL;
165
166 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
167 if (chan == ch) {
168 sys_slist_remove(&conn->channels, prev, &chan->node);
169 return;
170 }
171
172 prev = &chan->node;
173 }
174 }
175
bt_l2cap_chan_state_str(bt_l2cap_chan_state_t state)176 const char *bt_l2cap_chan_state_str(bt_l2cap_chan_state_t state)
177 {
178 switch (state) {
179 case BT_L2CAP_DISCONNECTED:
180 return "disconnected";
181 case BT_L2CAP_CONNECTING:
182 return "connecting";
183 case BT_L2CAP_CONFIG:
184 return "config";
185 case BT_L2CAP_CONNECTED:
186 return "connected";
187 case BT_L2CAP_DISCONNECTING:
188 return "disconnecting";
189 default:
190 return "unknown";
191 }
192 }
193
194 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
195 #if defined(CONFIG_BT_L2CAP_LOG_LEVEL_DBG)
bt_l2cap_chan_set_state_debug(struct bt_l2cap_chan * chan,bt_l2cap_chan_state_t state,const char * func,int line)196 void bt_l2cap_chan_set_state_debug(struct bt_l2cap_chan *chan,
197 bt_l2cap_chan_state_t state,
198 const char *func, int line)
199 {
200 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
201
202 LOG_DBG("chan %p psm 0x%04x %s -> %s", chan, le_chan->psm,
203 bt_l2cap_chan_state_str(le_chan->state), bt_l2cap_chan_state_str(state));
204
205 /* check transitions validness */
206 switch (state) {
207 case BT_L2CAP_DISCONNECTED:
208 /* regardless of old state always allows this state */
209 break;
210 case BT_L2CAP_CONNECTING:
211 if (le_chan->state != BT_L2CAP_DISCONNECTED) {
212 LOG_WRN("%s()%d: invalid transition", func, line);
213 }
214 break;
215 case BT_L2CAP_CONFIG:
216 if (le_chan->state != BT_L2CAP_CONNECTING) {
217 LOG_WRN("%s()%d: invalid transition", func, line);
218 }
219 break;
220 case BT_L2CAP_CONNECTED:
221 if (le_chan->state != BT_L2CAP_CONFIG &&
222 le_chan->state != BT_L2CAP_CONNECTING) {
223 LOG_WRN("%s()%d: invalid transition", func, line);
224 }
225 break;
226 case BT_L2CAP_DISCONNECTING:
227 if (le_chan->state != BT_L2CAP_CONFIG &&
228 le_chan->state != BT_L2CAP_CONNECTED) {
229 LOG_WRN("%s()%d: invalid transition", func, line);
230 }
231 break;
232 default:
233 LOG_ERR("%s()%d: unknown (%u) state was set", func, line, state);
234 return;
235 }
236
237 le_chan->state = state;
238 }
239 #else
bt_l2cap_chan_set_state(struct bt_l2cap_chan * chan,bt_l2cap_chan_state_t state)240 void bt_l2cap_chan_set_state(struct bt_l2cap_chan *chan,
241 bt_l2cap_chan_state_t state)
242 {
243 BT_L2CAP_LE_CHAN(chan)->state = state;
244 }
245 #endif /* CONFIG_BT_L2CAP_LOG_LEVEL_DBG */
246 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
247
248 static void cancel_data_ready(struct bt_l2cap_le_chan *lechan);
249 static bool chan_has_data(struct bt_l2cap_le_chan *lechan);
bt_l2cap_chan_del(struct bt_l2cap_chan * chan)250 void bt_l2cap_chan_del(struct bt_l2cap_chan *chan)
251 {
252 const struct bt_l2cap_chan_ops *ops = chan->ops;
253 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
254
255 LOG_DBG("conn %p chan %p", chan->conn, chan);
256
257 if (!chan->conn) {
258 goto destroy;
259 }
260
261 cancel_data_ready(le_chan);
262
263 /* Remove buffers on the PDU TX queue. We can't do that in
264 * `l2cap_chan_destroy()` as it is not called for fixed channels.
265 */
266 while (chan_has_data(le_chan)) {
267 struct net_buf *buf = k_fifo_get(&le_chan->tx_queue, K_NO_WAIT);
268
269 net_buf_unref(buf);
270 }
271
272 if (ops->disconnected) {
273 ops->disconnected(chan);
274 }
275
276 chan->conn = NULL;
277
278 destroy:
279 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
280 /* Reset internal members of common channel */
281 bt_l2cap_chan_set_state(chan, BT_L2CAP_DISCONNECTED);
282 BT_L2CAP_LE_CHAN(chan)->psm = 0U;
283 #endif
284 if (chan->destroy) {
285 chan->destroy(chan);
286 }
287
288 if (ops->released) {
289 ops->released(chan);
290 }
291 }
292
293 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_rtx_timeout(struct k_work * work)294 static void l2cap_rtx_timeout(struct k_work *work)
295 {
296 struct bt_l2cap_le_chan *chan = LE_CHAN_RTX(work);
297 struct bt_conn *conn = chan->chan.conn;
298
299 LOG_ERR("chan %p timeout", chan);
300
301 bt_l2cap_chan_remove(conn, &chan->chan);
302 bt_l2cap_chan_del(&chan->chan);
303
304 /* Remove other channels if pending on the same ident */
305 while ((chan = l2cap_remove_ident(conn, chan->ident))) {
306 bt_l2cap_chan_del(&chan->chan);
307 }
308 }
309
310 static void l2cap_chan_le_recv(struct bt_l2cap_le_chan *chan,
311 struct net_buf *buf);
312
l2cap_rx_process(struct k_work * work)313 static void l2cap_rx_process(struct k_work *work)
314 {
315 struct bt_l2cap_le_chan *ch = CHAN_RX(work);
316 struct net_buf *buf;
317
318 while ((buf = k_fifo_get(&ch->rx_queue, K_NO_WAIT))) {
319 LOG_DBG("ch %p buf %p", ch, buf);
320 l2cap_chan_le_recv(ch, buf);
321 net_buf_unref(buf);
322 }
323 }
324 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
325
bt_l2cap_chan_add(struct bt_conn * conn,struct bt_l2cap_chan * chan,bt_l2cap_chan_destroy_t destroy)326 void bt_l2cap_chan_add(struct bt_conn *conn, struct bt_l2cap_chan *chan,
327 bt_l2cap_chan_destroy_t destroy)
328 {
329 /* Attach channel to the connection */
330 sys_slist_append(&conn->channels, &chan->node);
331 chan->conn = conn;
332 chan->destroy = destroy;
333
334 LOG_DBG("conn %p chan %p", conn, chan);
335 }
336
init_le_chan_private(struct bt_l2cap_le_chan * le_chan)337 static void init_le_chan_private(struct bt_l2cap_le_chan *le_chan)
338 {
339 /* Initialize private members of the struct. We can't "just memset" as
340 * some members are used as application parameters.
341 */
342 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
343 le_chan->_sdu = NULL;
344 le_chan->_sdu_len = 0;
345 #if defined(CONFIG_BT_L2CAP_SEG_RECV)
346 le_chan->_sdu_len_done = 0;
347 #endif /* CONFIG_BT_L2CAP_SEG_RECV */
348 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
349 memset(&le_chan->_pdu_ready, 0, sizeof(le_chan->_pdu_ready));
350 le_chan->_pdu_ready_lock = 0;
351 le_chan->_pdu_remaining = 0;
352 }
353
l2cap_chan_add(struct bt_conn * conn,struct bt_l2cap_chan * chan,bt_l2cap_chan_destroy_t destroy)354 static bool l2cap_chan_add(struct bt_conn *conn, struct bt_l2cap_chan *chan,
355 bt_l2cap_chan_destroy_t destroy)
356 {
357 struct bt_l2cap_le_chan *le_chan;
358
359 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
360 le_chan = l2cap_chan_alloc_cid(conn, chan);
361 #else
362 le_chan = BT_L2CAP_LE_CHAN(chan);
363 #endif
364
365 if (!le_chan) {
366 LOG_ERR("Unable to allocate L2CAP channel ID");
367 return false;
368 }
369
370 atomic_clear(chan->status);
371 init_le_chan_private(le_chan);
372
373 bt_l2cap_chan_add(conn, chan, destroy);
374
375 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
376 /* All dynamic channels have the destroy handler which makes sure that
377 * the RTX work structure is properly released with a cancel sync.
378 * The fixed signal channel is only removed when disconnected and the
379 * disconnected handler is always called from the workqueue itself so
380 * canceling from there should always succeed.
381 */
382 k_work_init_delayable(&le_chan->rtx_work, l2cap_rtx_timeout);
383
384 if (L2CAP_LE_CID_IS_DYN(le_chan->rx.cid)) {
385 k_work_init(&le_chan->rx_work, l2cap_rx_process);
386 k_fifo_init(&le_chan->rx_queue);
387 bt_l2cap_chan_set_state(chan, BT_L2CAP_CONNECTING);
388 }
389 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
390
391 return true;
392 }
393
bt_l2cap_connected(struct bt_conn * conn)394 void bt_l2cap_connected(struct bt_conn *conn)
395 {
396 struct bt_l2cap_chan *chan;
397
398 if (IS_ENABLED(CONFIG_BT_CLASSIC) &&
399 conn->type == BT_CONN_TYPE_BR) {
400 bt_l2cap_br_connected(conn);
401 return;
402 }
403
404 STRUCT_SECTION_FOREACH(bt_l2cap_fixed_chan, fchan) {
405 struct bt_l2cap_le_chan *le_chan;
406
407 if (fchan->accept(conn, &chan) < 0) {
408 continue;
409 }
410
411 le_chan = BT_L2CAP_LE_CHAN(chan);
412
413 /* Fill up remaining fixed channel context attached in
414 * fchan->accept()
415 */
416 le_chan->rx.cid = fchan->cid;
417 le_chan->tx.cid = fchan->cid;
418
419 if (!l2cap_chan_add(conn, chan, fchan->destroy)) {
420 return;
421 }
422
423 k_fifo_init(&le_chan->tx_queue);
424
425 if (chan->ops->connected) {
426 chan->ops->connected(chan);
427 }
428
429 /* Always set output status to fixed channels */
430 atomic_set_bit(chan->status, BT_L2CAP_STATUS_OUT);
431
432 if (chan->ops->status) {
433 chan->ops->status(chan, chan->status);
434 }
435 }
436 }
437
bt_l2cap_disconnected(struct bt_conn * conn)438 void bt_l2cap_disconnected(struct bt_conn *conn)
439 {
440 struct bt_l2cap_chan *chan, *next;
441
442 if (IS_ENABLED(CONFIG_BT_CLASSIC) &&
443 conn->type == BT_CONN_TYPE_BR) {
444 bt_l2cap_br_disconnected(conn);
445 return;
446 }
447
448 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&conn->channels, chan, next, node) {
449 bt_l2cap_chan_del(chan);
450 }
451 }
452
l2cap_create_le_sig_pdu(uint8_t code,uint8_t ident,uint16_t len)453 static struct net_buf *l2cap_create_le_sig_pdu(uint8_t code, uint8_t ident,
454 uint16_t len)
455 {
456 struct bt_l2cap_sig_hdr *hdr;
457 struct net_buf_pool *pool = NULL;
458 struct net_buf *buf;
459
460 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
461 if (code == BT_L2CAP_DISCONN_REQ) {
462 pool = &disc_pool;
463 }
464 #endif
465 /* Don't wait more than the minimum RTX timeout of 2 seconds */
466 buf = bt_l2cap_create_pdu_timeout(pool, 0, L2CAP_RTX_TIMEOUT);
467 if (!buf) {
468 /* If it was not possible to allocate a buffer within the
469 * timeout return NULL.
470 */
471 LOG_ERR("Unable to allocate buffer for op 0x%02x", code);
472 return NULL;
473 }
474
475 hdr = net_buf_add(buf, sizeof(*hdr));
476 hdr->code = code;
477 hdr->ident = ident;
478 hdr->len = sys_cpu_to_le16(len);
479
480 return buf;
481 }
482
483 /* Send the buffer over the signalling channel. Release it in case of failure.
484 * Any other cleanup in failure to send should be handled by the disconnected
485 * handler.
486 */
l2cap_send_sig(struct bt_conn * conn,struct net_buf * buf)487 static int l2cap_send_sig(struct bt_conn *conn, struct net_buf *buf)
488 {
489 struct bt_l2cap_chan *ch = bt_l2cap_le_lookup_tx_cid(conn, BT_L2CAP_CID_LE_SIG);
490 struct bt_l2cap_le_chan *chan = BT_L2CAP_LE_CHAN(ch);
491
492 int err = bt_l2cap_send_pdu(chan, buf, NULL, NULL);
493
494 if (err) {
495 net_buf_unref(buf);
496 }
497
498 return err;
499 }
500
501 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_chan_send_req(struct bt_l2cap_chan * chan,struct net_buf * buf,k_timeout_t timeout)502 static void l2cap_chan_send_req(struct bt_l2cap_chan *chan,
503 struct net_buf *buf, k_timeout_t timeout)
504 {
505 if (l2cap_send_sig(chan->conn, buf)) {
506 return;
507 }
508
509 /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part A] page 126:
510 *
511 * The value of this timer is implementation-dependent but the minimum
512 * initial value is 1 second and the maximum initial value is 60
513 * seconds. One RTX timer shall exist for each outstanding signaling
514 * request, including each Echo Request. The timer disappears on the
515 * final expiration, when the response is received, or the physical
516 * link is lost.
517 */
518 k_work_reschedule(&(BT_L2CAP_LE_CHAN(chan)->rtx_work), timeout);
519 }
520
l2cap_le_conn_req(struct bt_l2cap_le_chan * ch)521 static int l2cap_le_conn_req(struct bt_l2cap_le_chan *ch)
522 {
523 struct net_buf *buf;
524 struct bt_l2cap_le_conn_req *req;
525
526 ch->ident = get_ident();
527
528 buf = l2cap_create_le_sig_pdu(BT_L2CAP_LE_CONN_REQ,
529 ch->ident, sizeof(*req));
530 if (!buf) {
531 return -ENOMEM;
532 }
533
534 req = net_buf_add(buf, sizeof(*req));
535 req->psm = sys_cpu_to_le16(ch->psm);
536 req->scid = sys_cpu_to_le16(ch->rx.cid);
537 req->mtu = sys_cpu_to_le16(ch->rx.mtu);
538 req->mps = sys_cpu_to_le16(ch->rx.mps);
539 req->credits = sys_cpu_to_le16(ch->rx.credits);
540
541 l2cap_chan_send_req(&ch->chan, buf, L2CAP_CONN_TIMEOUT);
542
543 return 0;
544 }
545
546 #if defined(CONFIG_BT_L2CAP_ECRED)
l2cap_ecred_conn_req(struct bt_l2cap_chan ** chan,int channels)547 static int l2cap_ecred_conn_req(struct bt_l2cap_chan **chan, int channels)
548 {
549 struct net_buf *buf;
550 struct bt_l2cap_ecred_conn_req *req;
551 struct bt_l2cap_le_chan *ch;
552 int i;
553 uint8_t ident;
554 uint16_t req_psm;
555 uint16_t req_mtu;
556
557 if (!chan || !channels) {
558 return -EINVAL;
559 }
560
561 ident = get_ident();
562
563 buf = l2cap_create_le_sig_pdu(BT_L2CAP_ECRED_CONN_REQ, ident,
564 sizeof(*req) +
565 (channels * sizeof(uint16_t)));
566
567 if (!buf) {
568 return -ENOMEM;
569 }
570
571 req = net_buf_add(buf, sizeof(*req));
572
573 ch = BT_L2CAP_LE_CHAN(chan[0]);
574
575 /* Init common parameters */
576 req->psm = sys_cpu_to_le16(ch->psm);
577 req->mtu = sys_cpu_to_le16(ch->rx.mtu);
578 req->mps = sys_cpu_to_le16(ch->rx.mps);
579 req->credits = sys_cpu_to_le16(ch->rx.credits);
580 req_psm = ch->psm;
581 req_mtu = ch->tx.mtu;
582
583 for (i = 0; i < channels; i++) {
584 ch = BT_L2CAP_LE_CHAN(chan[i]);
585
586 __ASSERT(ch->psm == req_psm,
587 "The PSM shall be the same for channels in the same request.");
588 __ASSERT(ch->tx.mtu == req_mtu,
589 "The MTU shall be the same for channels in the same request.");
590
591 ch->ident = ident;
592
593 net_buf_add_le16(buf, ch->rx.cid);
594 }
595
596 l2cap_chan_send_req(*chan, buf, L2CAP_CONN_TIMEOUT);
597
598 return 0;
599 }
600 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
601
l2cap_le_encrypt_change(struct bt_l2cap_chan * chan,uint8_t status)602 static void l2cap_le_encrypt_change(struct bt_l2cap_chan *chan, uint8_t status)
603 {
604 int err;
605 struct bt_l2cap_le_chan *le = BT_L2CAP_LE_CHAN(chan);
606
607 /* Skip channels that are not pending waiting for encryption */
608 if (!atomic_test_and_clear_bit(chan->status,
609 BT_L2CAP_STATUS_ENCRYPT_PENDING)) {
610 return;
611 }
612
613 if (status) {
614 goto fail;
615 }
616
617 #if defined(CONFIG_BT_L2CAP_ECRED)
618 if (le->ident) {
619 struct bt_l2cap_chan *echan[BT_L2CAP_ECRED_CHAN_MAX_PER_REQ];
620 struct bt_l2cap_chan *ch;
621 int i = 0;
622
623 SYS_SLIST_FOR_EACH_CONTAINER(&chan->conn->channels, ch, node) {
624 if (le->ident == BT_L2CAP_LE_CHAN(ch)->ident) {
625 __ASSERT(i < BT_L2CAP_ECRED_CHAN_MAX_PER_REQ,
626 "There can only be BT_L2CAP_ECRED_CHAN_MAX_PER_REQ "
627 "channels from the same request.");
628 atomic_clear_bit(ch->status, BT_L2CAP_STATUS_ENCRYPT_PENDING);
629 echan[i++] = ch;
630 }
631 }
632
633 /* Retry ecred connect */
634 l2cap_ecred_conn_req(echan, i);
635 return;
636 }
637 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
638
639 /* Retry to connect */
640 err = l2cap_le_conn_req(le);
641 if (err) {
642 goto fail;
643 }
644
645 return;
646 fail:
647 bt_l2cap_chan_remove(chan->conn, chan);
648 bt_l2cap_chan_del(chan);
649 }
650 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
651
bt_l2cap_security_changed(struct bt_conn * conn,uint8_t hci_status)652 void bt_l2cap_security_changed(struct bt_conn *conn, uint8_t hci_status)
653 {
654 struct bt_l2cap_chan *chan, *next;
655
656 if (IS_ENABLED(CONFIG_BT_CLASSIC) &&
657 conn->type == BT_CONN_TYPE_BR) {
658 l2cap_br_encrypt_change(conn, hci_status);
659 return;
660 }
661
662 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&conn->channels, chan, next, node) {
663 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
664 l2cap_le_encrypt_change(chan, hci_status);
665 #endif
666
667 if (chan->ops->encrypt_change) {
668 chan->ops->encrypt_change(chan, hci_status);
669 }
670 }
671 }
672
bt_l2cap_create_pdu_timeout(struct net_buf_pool * pool,size_t reserve,k_timeout_t timeout)673 struct net_buf *bt_l2cap_create_pdu_timeout(struct net_buf_pool *pool,
674 size_t reserve,
675 k_timeout_t timeout)
676 {
677 if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT) &&
678 k_current_get() == k_work_queue_thread_get(&k_sys_work_q)) {
679 timeout = K_NO_WAIT;
680 }
681
682 return bt_conn_create_pdu_timeout(pool,
683 sizeof(struct bt_l2cap_hdr) + reserve,
684 timeout);
685 }
686
raise_data_ready(struct bt_l2cap_le_chan * le_chan)687 static void raise_data_ready(struct bt_l2cap_le_chan *le_chan)
688 {
689 if (!atomic_set(&le_chan->_pdu_ready_lock, 1)) {
690 sys_slist_append(&le_chan->chan.conn->l2cap_data_ready,
691 &le_chan->_pdu_ready);
692 LOG_DBG("data ready raised %p", le_chan);
693 } else {
694 LOG_DBG("data ready already %p", le_chan);
695 }
696
697 bt_conn_data_ready(le_chan->chan.conn);
698 }
699
lower_data_ready(struct bt_l2cap_le_chan * le_chan)700 static void lower_data_ready(struct bt_l2cap_le_chan *le_chan)
701 {
702 struct bt_conn *conn = le_chan->chan.conn;
703 __maybe_unused sys_snode_t *s = sys_slist_get(&conn->l2cap_data_ready);
704
705 LOG_DBG("%p", le_chan);
706
707 __ASSERT_NO_MSG(s == &le_chan->_pdu_ready);
708
709 __maybe_unused atomic_t old = atomic_set(&le_chan->_pdu_ready_lock, 0);
710
711 __ASSERT_NO_MSG(old);
712 }
713
cancel_data_ready(struct bt_l2cap_le_chan * le_chan)714 static void cancel_data_ready(struct bt_l2cap_le_chan *le_chan)
715 {
716 struct bt_conn *conn = le_chan->chan.conn;
717
718 LOG_DBG("%p", le_chan);
719
720 sys_slist_find_and_remove(&conn->l2cap_data_ready,
721 &le_chan->_pdu_ready);
722 atomic_set(&le_chan->_pdu_ready_lock, 0);
723 }
724
bt_l2cap_send_pdu(struct bt_l2cap_le_chan * le_chan,struct net_buf * pdu,bt_conn_tx_cb_t cb,void * user_data)725 int bt_l2cap_send_pdu(struct bt_l2cap_le_chan *le_chan, struct net_buf *pdu,
726 bt_conn_tx_cb_t cb, void *user_data)
727 {
728 if (!le_chan->chan.conn || le_chan->chan.conn->state != BT_CONN_CONNECTED) {
729 return -ENOTCONN;
730 }
731
732 if (pdu->ref != 1) {
733 /* The host may alter the buf contents when fragmenting. Higher
734 * layers cannot expect the buf contents to stay intact. Extra
735 * refs suggests a silent data corruption would occur if not for
736 * this error.
737 */
738 LOG_ERR("Expecting 1 ref, got %d", pdu->ref);
739 return -EINVAL;
740 }
741
742 if (pdu->user_data_size < sizeof(struct closure)) {
743 LOG_DBG("not enough room in user_data %d < %d pool %u",
744 pdu->user_data_size,
745 CONFIG_BT_CONN_TX_USER_DATA_SIZE,
746 pdu->pool_id);
747 return -EINVAL;
748 }
749
750 make_closure(pdu->user_data, cb, user_data);
751 LOG_DBG("push: pdu %p len %d cb %p userdata %p", pdu, pdu->len, cb, user_data);
752
753 k_fifo_put(&le_chan->tx_queue, pdu);
754
755 raise_data_ready(le_chan); /* tis just a flag */
756
757 return 0; /* look ma, no failures */
758 }
759
760 /* L2CAP channel wants to send a PDU */
chan_has_data(struct bt_l2cap_le_chan * lechan)761 static bool chan_has_data(struct bt_l2cap_le_chan *lechan)
762 {
763 return !k_fifo_is_empty(&lechan->tx_queue);
764 }
765
766 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
test_and_dec(atomic_t * target)767 static bool test_and_dec(atomic_t *target)
768 {
769 atomic_t old_value, new_value;
770
771 do {
772 old_value = atomic_get(target);
773 if (!old_value) {
774 return false;
775 }
776
777 new_value = old_value - 1;
778 } while (atomic_cas(target, old_value, new_value) == 0);
779
780 return true;
781 }
782 #endif
783
784 /* Just like in group projects :p */
chan_take_credit(struct bt_l2cap_le_chan * lechan)785 static void chan_take_credit(struct bt_l2cap_le_chan *lechan)
786 {
787 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
788 if (!L2CAP_LE_CID_IS_DYN(lechan->tx.cid)) {
789 return;
790 }
791
792 if (!test_and_dec(&lechan->tx.credits)) {
793 /* Always ensure you have credits before calling this fn */
794 __ASSERT_NO_MSG(0);
795 }
796
797 /* Notify channel user that it can't send anymore on this channel. */
798 if (!atomic_get(&lechan->tx.credits)) {
799 LOG_DBG("chan %p paused", lechan);
800 atomic_clear_bit(lechan->chan.status, BT_L2CAP_STATUS_OUT);
801
802 if (lechan->chan.ops->status) {
803 lechan->chan.ops->status(&lechan->chan, lechan->chan.status);
804 }
805 }
806 #endif
807 }
808
get_ready_chan(struct bt_conn * conn)809 static struct bt_l2cap_le_chan *get_ready_chan(struct bt_conn *conn)
810 {
811 struct bt_l2cap_le_chan *lechan;
812
813 sys_snode_t *pdu_ready = sys_slist_peek_head(&conn->l2cap_data_ready);
814
815 if (!pdu_ready) {
816 LOG_DBG("nothing to send on this conn");
817 return NULL;
818 }
819
820 SYS_SLIST_FOR_EACH_CONTAINER(&conn->l2cap_data_ready, lechan, _pdu_ready) {
821 if (chan_has_data(lechan)) {
822 LOG_DBG("sending from chan %p (%s) data %d", lechan,
823 L2CAP_LE_CID_IS_DYN(lechan->tx.cid) ? "dynamic" : "static",
824 chan_has_data(lechan));
825 return lechan;
826 }
827
828 LOG_DBG("chan %p has no data", lechan);
829 lower_data_ready(lechan);
830 }
831
832 return NULL;
833 }
834
l2cap_chan_sdu_sent(struct bt_conn * conn,void * user_data,int err)835 static void l2cap_chan_sdu_sent(struct bt_conn *conn, void *user_data, int err)
836 {
837 struct bt_l2cap_chan *chan;
838 uint16_t cid = POINTER_TO_UINT(user_data);
839
840 LOG_DBG("conn %p CID 0x%04x err %d", conn, cid, err);
841
842 if (err) {
843 LOG_DBG("error %d when sending SDU", err);
844
845 return;
846 }
847
848 chan = bt_l2cap_le_lookup_tx_cid(conn, cid);
849 if (!chan) {
850 LOG_DBG("got SDU sent cb for disconnected chan (CID %u)", cid);
851
852 return;
853 }
854
855 if (chan->ops->sent) {
856 chan->ops->sent(chan);
857 }
858 }
859
get_pdu_len(struct bt_l2cap_le_chan * lechan,struct net_buf * buf)860 static uint16_t get_pdu_len(struct bt_l2cap_le_chan *lechan,
861 struct net_buf *buf)
862 {
863 if (!L2CAP_LE_CID_IS_DYN(lechan->tx.cid)) {
864 /* No segmentation shenanigans on static channels */
865 return buf->len;
866 }
867
868 return MIN(buf->len, lechan->tx.mps);
869 }
870
chan_has_credits(struct bt_l2cap_le_chan * lechan)871 static bool chan_has_credits(struct bt_l2cap_le_chan *lechan)
872 {
873 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
874 if (!L2CAP_LE_CID_IS_DYN(lechan->tx.cid)) {
875 return true;
876 }
877
878 LOG_DBG("chan %p credits %ld", lechan, atomic_get(&lechan->tx.credits));
879
880 return atomic_get(&lechan->tx.credits) >= 1;
881 #else
882 return true;
883 #endif
884 }
885
bt_test_l2cap_data_pull_spy(struct bt_conn * conn,struct bt_l2cap_le_chan * lechan,size_t amount,size_t * length)886 __weak void bt_test_l2cap_data_pull_spy(struct bt_conn *conn,
887 struct bt_l2cap_le_chan *lechan,
888 size_t amount,
889 size_t *length)
890 {
891 }
892
l2cap_data_pull(struct bt_conn * conn,size_t amount,size_t * length)893 struct net_buf *l2cap_data_pull(struct bt_conn *conn,
894 size_t amount,
895 size_t *length)
896 {
897 struct bt_l2cap_le_chan *lechan = get_ready_chan(conn);
898
899 if (IS_ENABLED(CONFIG_BT_TESTING)) {
900 /* Allow tests to snoop in */
901 bt_test_l2cap_data_pull_spy(conn, lechan, amount, length);
902 }
903
904 if (!lechan) {
905 LOG_DBG("no channel conn %p", conn);
906 bt_tx_irq_raise();
907 return NULL;
908 }
909
910 /* Leave the PDU buffer in the queue until we have sent all its
911 * fragments.
912 *
913 * For SDUs we do the same, we keep it in the queue until all the
914 * segments have been sent, adding the PDU headers just-in-time.
915 */
916 struct net_buf *pdu = k_fifo_peek_head(&lechan->tx_queue);
917
918 /* We don't have anything to send for the current channel. We could
919 * however have something to send on another channel that is attached to
920 * the same ACL connection. Re-trigger the TX processor: it will call us
921 * again and this time we will select another channel to pull data from.
922 */
923 if (!pdu) {
924 bt_tx_irq_raise();
925 return NULL;
926 }
927
928 if (bt_buf_has_view(pdu)) {
929 LOG_ERR("already have view on %p", pdu);
930 return NULL;
931 }
932
933 if (lechan->_pdu_remaining == 0 && !chan_has_credits(lechan)) {
934 /* We don't have credits to send a new K-frame PDU. Remove the
935 * channel from the ready-list, it will be added back later when
936 * we get more credits.
937 */
938 LOG_DBG("no credits for new K-frame on %p", lechan);
939 lower_data_ready(lechan);
940 return NULL;
941 }
942
943 /* Add PDU header */
944 if (lechan->_pdu_remaining == 0) {
945 struct bt_l2cap_hdr *hdr;
946 uint16_t pdu_len = get_pdu_len(lechan, pdu);
947
948 LOG_DBG("Adding L2CAP PDU header: buf %p chan %p len %u / %u",
949 pdu, lechan, pdu_len, pdu->len);
950
951 LOG_HEXDUMP_DBG(pdu->data, pdu->len, "PDU payload");
952
953 hdr = net_buf_push(pdu, sizeof(*hdr));
954 hdr->len = sys_cpu_to_le16(pdu_len);
955 hdr->cid = sys_cpu_to_le16(lechan->tx.cid);
956
957 lechan->_pdu_remaining = pdu_len + sizeof(*hdr);
958 chan_take_credit(lechan);
959 }
960
961 /* Whether the data to be pulled is the last ACL fragment */
962 bool last_frag = amount >= lechan->_pdu_remaining;
963
964 /* Whether the data to be pulled is part of the last L2CAP segment. For
965 * static channels, this variable will always be true, even though
966 * static channels don't have the concept of L2CAP segments.
967 */
968 bool last_seg = lechan->_pdu_remaining == pdu->len;
969
970 if (last_frag && last_seg) {
971 LOG_DBG("last frag of last seg, dequeuing %p", pdu);
972 __maybe_unused struct net_buf *b = k_fifo_get(&lechan->tx_queue, K_NO_WAIT);
973
974 __ASSERT_NO_MSG(b == pdu);
975 }
976
977 if (last_frag && L2CAP_LE_CID_IS_DYN(lechan->tx.cid)) {
978 bool sdu_end = last_frag && last_seg;
979
980 LOG_DBG("adding %s callback", sdu_end ? "`sdu_sent`" : "NULL");
981 /* No user callbacks for SDUs */
982 make_closure(pdu->user_data,
983 sdu_end ? l2cap_chan_sdu_sent : NULL,
984 sdu_end ? UINT_TO_POINTER(lechan->tx.cid) : NULL);
985 }
986
987 if (last_frag) {
988 LOG_DBG("done sending PDU");
989
990 /* Lowering the "request to send" and raising it again allows
991 * fair scheduling of channels on an ACL link: the channel is
992 * marked as "ready to send" by adding a reference to it on a
993 * FIFO on `conn`. Adding it again will send it to the back of
994 * the queue.
995 *
996 * TODO: add a user-controlled QoS function.
997 */
998 LOG_DBG("chan %p done", lechan);
999 lower_data_ready(lechan);
1000
1001 /* Append channel to list if it still has data */
1002 if (chan_has_data(lechan)) {
1003 LOG_DBG("chan %p ready", lechan);
1004 raise_data_ready(lechan);
1005 }
1006 }
1007
1008 /* This is used by `conn.c` to figure out if the PDU is done sending. */
1009 *length = lechan->_pdu_remaining;
1010
1011 if (lechan->_pdu_remaining > amount) {
1012 lechan->_pdu_remaining -= amount;
1013 } else {
1014 lechan->_pdu_remaining = 0;
1015 }
1016
1017 return pdu;
1018 }
1019
l2cap_send_reject(struct bt_conn * conn,uint8_t ident,uint16_t reason,void * data,uint8_t data_len)1020 static void l2cap_send_reject(struct bt_conn *conn, uint8_t ident,
1021 uint16_t reason, void *data, uint8_t data_len)
1022 {
1023 struct bt_l2cap_cmd_reject *rej;
1024 struct net_buf *buf;
1025
1026 buf = l2cap_create_le_sig_pdu(BT_L2CAP_CMD_REJECT, ident,
1027 sizeof(*rej) + data_len);
1028 if (!buf) {
1029 return;
1030 }
1031
1032 rej = net_buf_add(buf, sizeof(*rej));
1033 rej->reason = sys_cpu_to_le16(reason);
1034
1035 if (data) {
1036 net_buf_add_mem(buf, data, data_len);
1037 }
1038
1039 l2cap_send_sig(conn, buf);
1040 }
1041
le_conn_param_rsp(struct bt_l2cap * l2cap,struct net_buf * buf)1042 static void le_conn_param_rsp(struct bt_l2cap *l2cap, struct net_buf *buf)
1043 {
1044 struct bt_l2cap_conn_param_rsp *rsp = (void *)buf->data;
1045
1046 if (buf->len < sizeof(*rsp)) {
1047 LOG_ERR("Too small LE conn param rsp");
1048 return;
1049 }
1050
1051 LOG_DBG("LE conn param rsp result %u", sys_le16_to_cpu(rsp->result));
1052 }
1053
le_conn_param_update_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1054 static void le_conn_param_update_req(struct bt_l2cap *l2cap, uint8_t ident,
1055 struct net_buf *buf)
1056 {
1057 struct bt_conn *conn = l2cap->chan.chan.conn;
1058 struct bt_le_conn_param param;
1059 struct bt_l2cap_conn_param_rsp *rsp;
1060 struct bt_l2cap_conn_param_req *req = (void *)buf->data;
1061 bool accepted;
1062
1063 if (buf->len < sizeof(*req)) {
1064 LOG_ERR("Too small LE conn update param req");
1065 return;
1066 }
1067
1068 if (conn->state != BT_CONN_CONNECTED) {
1069 LOG_WRN("Not connected");
1070 return;
1071 }
1072
1073 if (conn->role != BT_HCI_ROLE_CENTRAL) {
1074 l2cap_send_reject(conn, ident, BT_L2CAP_REJ_NOT_UNDERSTOOD,
1075 NULL, 0);
1076 return;
1077 }
1078
1079 param.interval_min = sys_le16_to_cpu(req->min_interval);
1080 param.interval_max = sys_le16_to_cpu(req->max_interval);
1081 param.latency = sys_le16_to_cpu(req->latency);
1082 param.timeout = sys_le16_to_cpu(req->timeout);
1083
1084 LOG_DBG("min 0x%04x max 0x%04x latency: 0x%04x timeout: 0x%04x", param.interval_min,
1085 param.interval_max, param.latency, param.timeout);
1086
1087 buf = l2cap_create_le_sig_pdu(BT_L2CAP_CONN_PARAM_RSP, ident,
1088 sizeof(*rsp));
1089 if (!buf) {
1090 return;
1091 }
1092
1093 accepted = le_param_req(conn, ¶m);
1094
1095 rsp = net_buf_add(buf, sizeof(*rsp));
1096 if (accepted) {
1097 rsp->result = sys_cpu_to_le16(BT_L2CAP_CONN_PARAM_ACCEPTED);
1098 } else {
1099 rsp->result = sys_cpu_to_le16(BT_L2CAP_CONN_PARAM_REJECTED);
1100 }
1101
1102 l2cap_send_sig(conn, buf);
1103
1104 if (accepted) {
1105 bt_conn_le_conn_update(conn, ¶m);
1106 }
1107 }
1108
bt_l2cap_le_lookup_tx_cid(struct bt_conn * conn,uint16_t cid)1109 struct bt_l2cap_chan *bt_l2cap_le_lookup_tx_cid(struct bt_conn *conn,
1110 uint16_t cid)
1111 {
1112 struct bt_l2cap_chan *chan;
1113
1114 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
1115 if (BT_L2CAP_LE_CHAN(chan)->tx.cid == cid) {
1116 return chan;
1117 }
1118 }
1119
1120 return NULL;
1121 }
1122
bt_l2cap_le_lookup_rx_cid(struct bt_conn * conn,uint16_t cid)1123 struct bt_l2cap_chan *bt_l2cap_le_lookup_rx_cid(struct bt_conn *conn,
1124 uint16_t cid)
1125 {
1126 struct bt_l2cap_chan *chan;
1127
1128 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
1129 if (BT_L2CAP_LE_CHAN(chan)->rx.cid == cid) {
1130 return chan;
1131 }
1132 }
1133
1134 return NULL;
1135 }
1136
1137 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
bt_l2cap_server_lookup_psm(uint16_t psm)1138 struct bt_l2cap_server *bt_l2cap_server_lookup_psm(uint16_t psm)
1139 {
1140 struct bt_l2cap_server *server;
1141
1142 SYS_SLIST_FOR_EACH_CONTAINER(&servers, server, node) {
1143 if (server->psm == psm) {
1144 return server;
1145 }
1146 }
1147
1148 return NULL;
1149 }
1150
bt_l2cap_server_register(struct bt_l2cap_server * server)1151 int bt_l2cap_server_register(struct bt_l2cap_server *server)
1152 {
1153 if (!server->accept) {
1154 return -EINVAL;
1155 }
1156
1157 if (server->psm) {
1158 if (server->psm < L2CAP_LE_PSM_FIXED_START ||
1159 server->psm > L2CAP_LE_PSM_DYN_END) {
1160 return -EINVAL;
1161 }
1162
1163 /* Check if given PSM is already in use */
1164 if (bt_l2cap_server_lookup_psm(server->psm)) {
1165 LOG_DBG("PSM already registered");
1166 return -EADDRINUSE;
1167 }
1168 } else {
1169 uint16_t psm;
1170
1171 for (psm = L2CAP_LE_PSM_DYN_START;
1172 psm <= L2CAP_LE_PSM_DYN_END; psm++) {
1173 if (!bt_l2cap_server_lookup_psm(psm)) {
1174 break;
1175 }
1176 }
1177
1178 if (psm > L2CAP_LE_PSM_DYN_END) {
1179 LOG_WRN("No free dynamic PSMs available");
1180 return -EADDRNOTAVAIL;
1181 }
1182
1183 LOG_DBG("Allocated PSM 0x%04x for new server", psm);
1184 server->psm = psm;
1185 }
1186
1187 if (server->sec_level > BT_SECURITY_L4) {
1188 return -EINVAL;
1189 } else if (server->sec_level < BT_SECURITY_L1) {
1190 /* Level 0 is only applicable for BR/EDR */
1191 server->sec_level = BT_SECURITY_L1;
1192 }
1193
1194 LOG_DBG("PSM 0x%04x", server->psm);
1195
1196 sys_slist_append(&servers, &server->node);
1197
1198 return 0;
1199 }
1200
1201 #if defined(CONFIG_BT_L2CAP_SEG_RECV)
l2cap_chan_seg_recv_rx_init(struct bt_l2cap_le_chan * chan)1202 static void l2cap_chan_seg_recv_rx_init(struct bt_l2cap_le_chan *chan)
1203 {
1204 if (chan->rx.mps > BT_L2CAP_RX_MTU) {
1205 LOG_ERR("Limiting RX MPS by stack buffer size.");
1206 chan->rx.mps = BT_L2CAP_RX_MTU;
1207 }
1208
1209 chan->_sdu_len = 0;
1210 chan->_sdu_len_done = 0;
1211 }
1212 #endif /* CONFIG_BT_L2CAP_SEG_RECV */
1213
l2cap_chan_rx_init(struct bt_l2cap_le_chan * chan)1214 static void l2cap_chan_rx_init(struct bt_l2cap_le_chan *chan)
1215 {
1216 LOG_DBG("chan %p", chan);
1217
1218 /* Redirect to experimental API. */
1219 IF_ENABLED(CONFIG_BT_L2CAP_SEG_RECV, ({
1220 if (chan->chan.ops->seg_recv) {
1221 l2cap_chan_seg_recv_rx_init(chan);
1222 return;
1223 }
1224 }))
1225
1226 /* Use existing MTU if defined */
1227 if (!chan->rx.mtu) {
1228 /* If application has not provide the incoming L2CAP SDU MTU use
1229 * an MTU that does not require segmentation.
1230 */
1231 chan->rx.mtu = BT_L2CAP_SDU_RX_MTU;
1232 }
1233
1234 /* MPS shall not be bigger than MTU + BT_L2CAP_SDU_HDR_SIZE as the
1235 * remaining bytes cannot be used.
1236 */
1237 chan->rx.mps = MIN(chan->rx.mtu + BT_L2CAP_SDU_HDR_SIZE,
1238 BT_L2CAP_RX_MTU);
1239
1240 /* Truncate MTU if channel have disabled segmentation but still have
1241 * set an MTU which requires it.
1242 */
1243 if (!chan->chan.ops->alloc_buf &&
1244 (chan->rx.mps < chan->rx.mtu + BT_L2CAP_SDU_HDR_SIZE)) {
1245 LOG_WRN("Segmentation disabled but MTU > MPS, truncating MTU");
1246 chan->rx.mtu = chan->rx.mps - BT_L2CAP_SDU_HDR_SIZE;
1247 }
1248
1249 atomic_set(&chan->rx.credits, 1);
1250 }
1251
1252 /** @brief Get @c chan->state.
1253 *
1254 * This field does not exist when @kconfig{CONFIG_BT_L2CAP_DYNAMIC_CHANNEL} is
1255 * disabled. In that case, this function returns @ref BT_L2CAP_CONNECTED since
1256 * the struct can only represent static channels in that case and static
1257 * channels are always connected.
1258 */
bt_l2cap_chan_get_state(struct bt_l2cap_chan * chan)1259 static bt_l2cap_chan_state_t bt_l2cap_chan_get_state(struct bt_l2cap_chan *chan)
1260 {
1261 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
1262 return BT_L2CAP_LE_CHAN(chan)->state;
1263 #else
1264 return BT_L2CAP_CONNECTED;
1265 #endif
1266 }
1267
l2cap_chan_tx_init(struct bt_l2cap_le_chan * chan)1268 static void l2cap_chan_tx_init(struct bt_l2cap_le_chan *chan)
1269 {
1270 LOG_DBG("chan %p", chan);
1271
1272 (void)memset(&chan->tx, 0, sizeof(chan->tx));
1273 atomic_set(&chan->tx.credits, 0);
1274 k_fifo_init(&chan->tx_queue);
1275 }
1276
l2cap_chan_tx_give_credits(struct bt_l2cap_le_chan * chan,uint16_t credits)1277 static void l2cap_chan_tx_give_credits(struct bt_l2cap_le_chan *chan,
1278 uint16_t credits)
1279 {
1280 LOG_DBG("chan %p credits %u", chan, credits);
1281
1282 atomic_add(&chan->tx.credits, credits);
1283
1284 if (!atomic_test_and_set_bit(chan->chan.status, BT_L2CAP_STATUS_OUT)) {
1285 LOG_DBG("chan %p unpaused", chan);
1286 if (chan->chan.ops->status) {
1287 chan->chan.ops->status(&chan->chan, chan->chan.status);
1288 }
1289 if (chan_has_data(chan)) {
1290 raise_data_ready(chan);
1291 }
1292 }
1293 }
1294
l2cap_chan_destroy(struct bt_l2cap_chan * chan)1295 static void l2cap_chan_destroy(struct bt_l2cap_chan *chan)
1296 {
1297 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
1298 struct net_buf *buf;
1299
1300 LOG_DBG("chan %p cid 0x%04x", le_chan, le_chan->rx.cid);
1301
1302 /* Cancel ongoing work. Since the channel can be re-used after this
1303 * we need to sync to make sure that the kernel does not have it
1304 * in its queue anymore.
1305 *
1306 * In the case where we are in the context of executing the rtx_work
1307 * item, we don't sync as it will deadlock the workqueue.
1308 */
1309 struct k_work_q *rtx_work_queue = le_chan->rtx_work.queue;
1310
1311 if (rtx_work_queue == NULL || k_current_get() != &rtx_work_queue->thread) {
1312 k_work_cancel_delayable_sync(&le_chan->rtx_work, &le_chan->rtx_sync);
1313 } else {
1314 k_work_cancel_delayable(&le_chan->rtx_work);
1315 }
1316
1317 /* Remove buffers on the SDU RX queue */
1318 while ((buf = k_fifo_get(&le_chan->rx_queue, K_NO_WAIT))) {
1319 net_buf_unref(buf);
1320 }
1321
1322 /* Destroy segmented SDU if it exists */
1323 if (le_chan->_sdu) {
1324 net_buf_unref(le_chan->_sdu);
1325 le_chan->_sdu = NULL;
1326 le_chan->_sdu_len = 0U;
1327 }
1328 }
1329
le_err_to_result(int err)1330 static uint16_t le_err_to_result(int err)
1331 {
1332 switch (err) {
1333 case -ENOMEM:
1334 return BT_L2CAP_LE_ERR_NO_RESOURCES;
1335 case -EACCES:
1336 return BT_L2CAP_LE_ERR_AUTHORIZATION;
1337 case -EPERM:
1338 return BT_L2CAP_LE_ERR_KEY_SIZE;
1339 case -ENOTSUP:
1340 /* This handle the cases where a fixed channel is registered but
1341 * for some reason (e.g. controller not supporting a feature)
1342 * cannot be used.
1343 */
1344 return BT_L2CAP_LE_ERR_PSM_NOT_SUPP;
1345 default:
1346 return BT_L2CAP_LE_ERR_UNACCEPT_PARAMS;
1347 }
1348 }
1349
l2cap_chan_accept(struct bt_conn * conn,struct bt_l2cap_server * server,uint16_t scid,uint16_t mtu,uint16_t mps,uint16_t credits,struct bt_l2cap_chan ** chan)1350 static uint16_t l2cap_chan_accept(struct bt_conn *conn,
1351 struct bt_l2cap_server *server, uint16_t scid,
1352 uint16_t mtu, uint16_t mps, uint16_t credits,
1353 struct bt_l2cap_chan **chan)
1354 {
1355 struct bt_l2cap_le_chan *le_chan;
1356 int err;
1357
1358 LOG_DBG("conn %p scid 0x%04x chan %p", conn, scid, chan);
1359
1360 if (!L2CAP_LE_CID_IS_DYN(scid)) {
1361 return BT_L2CAP_LE_ERR_INVALID_SCID;
1362 }
1363
1364 *chan = bt_l2cap_le_lookup_tx_cid(conn, scid);
1365 if (*chan) {
1366 return BT_L2CAP_LE_ERR_SCID_IN_USE;
1367 }
1368
1369 /* Request server to accept the new connection and allocate the
1370 * channel.
1371 */
1372 err = server->accept(conn, server, chan);
1373 if (err < 0) {
1374 return le_err_to_result(err);
1375 }
1376
1377 #if defined(CONFIG_BT_L2CAP_SEG_RECV)
1378 if (!(*chan)->ops->recv == !(*chan)->ops->seg_recv) {
1379 LOG_ERR("Exactly one of 'recv' or 'seg_recv' must be set");
1380 return BT_L2CAP_LE_ERR_UNACCEPT_PARAMS;
1381 }
1382 #else
1383 if (!(*chan)->ops->recv) {
1384 LOG_ERR("Mandatory callback 'recv' missing");
1385 return BT_L2CAP_LE_ERR_UNACCEPT_PARAMS;
1386 }
1387 #endif
1388
1389 le_chan = BT_L2CAP_LE_CHAN(*chan);
1390
1391 le_chan->required_sec_level = server->sec_level;
1392
1393 if (!l2cap_chan_add(conn, *chan, l2cap_chan_destroy)) {
1394 return BT_L2CAP_LE_ERR_NO_RESOURCES;
1395 }
1396
1397 /* Init TX parameters */
1398 l2cap_chan_tx_init(le_chan);
1399 le_chan->tx.cid = scid;
1400 le_chan->tx.mps = mps;
1401 le_chan->tx.mtu = mtu;
1402 l2cap_chan_tx_give_credits(le_chan, credits);
1403
1404 /* Init RX parameters */
1405 l2cap_chan_rx_init(le_chan);
1406
1407 /* Set channel PSM */
1408 le_chan->psm = server->psm;
1409
1410 /* Update state */
1411 bt_l2cap_chan_set_state(*chan, BT_L2CAP_CONNECTED);
1412
1413 return BT_L2CAP_LE_SUCCESS;
1414 }
1415
l2cap_check_security(struct bt_conn * conn,struct bt_l2cap_server * server)1416 static uint16_t l2cap_check_security(struct bt_conn *conn,
1417 struct bt_l2cap_server *server)
1418 {
1419 if (IS_ENABLED(CONFIG_BT_CONN_DISABLE_SECURITY)) {
1420 return BT_L2CAP_LE_SUCCESS;
1421 }
1422
1423 if (conn->sec_level >= server->sec_level) {
1424 return BT_L2CAP_LE_SUCCESS;
1425 }
1426
1427 if (conn->sec_level > BT_SECURITY_L1) {
1428 return BT_L2CAP_LE_ERR_AUTHENTICATION;
1429 }
1430
1431 /* If an LTK or an STK is available and encryption is required
1432 * (LE security mode 1) but encryption is not enabled, the
1433 * service request shall be rejected with the error code
1434 * "Insufficient Encryption".
1435 */
1436 if (bt_conn_ltk_present(conn)) {
1437 return BT_L2CAP_LE_ERR_ENCRYPTION;
1438 }
1439
1440 return BT_L2CAP_LE_ERR_AUTHENTICATION;
1441 }
1442
le_conn_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1443 static void le_conn_req(struct bt_l2cap *l2cap, uint8_t ident,
1444 struct net_buf *buf)
1445 {
1446 struct bt_conn *conn = l2cap->chan.chan.conn;
1447 struct bt_l2cap_chan *chan;
1448 struct bt_l2cap_le_chan *le_chan;
1449 struct bt_l2cap_server *server;
1450 struct bt_l2cap_le_conn_req *req = (void *)buf->data;
1451 struct bt_l2cap_le_conn_rsp *rsp;
1452 uint16_t psm, scid, mtu, mps, credits;
1453 uint16_t result;
1454
1455 if (buf->len < sizeof(*req)) {
1456 LOG_ERR("Too small LE conn req packet size");
1457 return;
1458 }
1459
1460 psm = sys_le16_to_cpu(req->psm);
1461 scid = sys_le16_to_cpu(req->scid);
1462 mtu = sys_le16_to_cpu(req->mtu);
1463 mps = sys_le16_to_cpu(req->mps);
1464 credits = sys_le16_to_cpu(req->credits);
1465
1466 LOG_DBG("psm 0x%02x scid 0x%04x mtu %u mps %u credits %u", psm, scid, mtu, mps, credits);
1467
1468 if (mtu < L2CAP_LE_MIN_MTU || mps < L2CAP_LE_MIN_MTU) {
1469 LOG_ERR("Invalid LE-Conn Req params: mtu %u mps %u", mtu, mps);
1470 return;
1471 }
1472
1473 buf = l2cap_create_le_sig_pdu(BT_L2CAP_LE_CONN_RSP, ident,
1474 sizeof(*rsp));
1475 if (!buf) {
1476 return;
1477 }
1478
1479 rsp = net_buf_add(buf, sizeof(*rsp));
1480 (void)memset(rsp, 0, sizeof(*rsp));
1481
1482 /* Check if there is a server registered */
1483 server = bt_l2cap_server_lookup_psm(psm);
1484 if (!server) {
1485 result = BT_L2CAP_LE_ERR_PSM_NOT_SUPP;
1486 goto rsp;
1487 }
1488
1489 /* Check if connection has minimum required security level */
1490 result = l2cap_check_security(conn, server);
1491 if (result != BT_L2CAP_LE_SUCCESS) {
1492 goto rsp;
1493 }
1494
1495 result = l2cap_chan_accept(conn, server, scid, mtu, mps, credits,
1496 &chan);
1497 if (result != BT_L2CAP_LE_SUCCESS) {
1498 goto rsp;
1499 }
1500
1501 le_chan = BT_L2CAP_LE_CHAN(chan);
1502
1503 /* Prepare response protocol data */
1504 rsp->dcid = sys_cpu_to_le16(le_chan->rx.cid);
1505 rsp->mps = sys_cpu_to_le16(le_chan->rx.mps);
1506 rsp->mtu = sys_cpu_to_le16(le_chan->rx.mtu);
1507 rsp->credits = sys_cpu_to_le16(le_chan->rx.credits);
1508
1509 result = BT_L2CAP_LE_SUCCESS;
1510
1511 rsp:
1512 rsp->result = sys_cpu_to_le16(result);
1513
1514 if (l2cap_send_sig(conn, buf)) {
1515 return;
1516 }
1517
1518 /* Raise connected callback on success */
1519 if ((result == BT_L2CAP_LE_SUCCESS) && (chan->ops->connected != NULL)) {
1520 chan->ops->connected(chan);
1521 }
1522 }
1523
1524 #if defined(CONFIG_BT_L2CAP_ECRED)
le_ecred_conn_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1525 static void le_ecred_conn_req(struct bt_l2cap *l2cap, uint8_t ident,
1526 struct net_buf *buf)
1527 {
1528 struct bt_conn *conn = l2cap->chan.chan.conn;
1529 struct bt_l2cap_chan *chan[BT_L2CAP_ECRED_CHAN_MAX_PER_REQ];
1530 struct bt_l2cap_le_chan *ch = NULL;
1531 struct bt_l2cap_server *server;
1532 struct bt_l2cap_ecred_conn_req *req;
1533 struct bt_l2cap_ecred_conn_rsp *rsp;
1534 uint16_t mtu, mps, credits, result = BT_L2CAP_LE_SUCCESS;
1535 uint16_t psm = 0x0000;
1536 uint16_t scid, dcid[BT_L2CAP_ECRED_CHAN_MAX_PER_REQ];
1537 int i = 0;
1538 uint8_t req_cid_count;
1539 bool rsp_queued = false;
1540
1541 /* set dcid to zeros here, in case of all connections refused error */
1542 memset(dcid, 0, sizeof(dcid));
1543 if (buf->len < sizeof(*req)) {
1544 LOG_ERR("Too small LE conn req packet size");
1545 result = BT_L2CAP_LE_ERR_INVALID_PARAMS;
1546 req_cid_count = 0;
1547 goto response;
1548 }
1549
1550 req = net_buf_pull_mem(buf, sizeof(*req));
1551 req_cid_count = buf->len / sizeof(scid);
1552
1553 if (buf->len > sizeof(dcid)) {
1554 LOG_ERR("Too large LE conn req packet size");
1555 req_cid_count = BT_L2CAP_ECRED_CHAN_MAX_PER_REQ;
1556 result = BT_L2CAP_LE_ERR_INVALID_PARAMS;
1557 goto response;
1558 }
1559
1560 psm = sys_le16_to_cpu(req->psm);
1561 mtu = sys_le16_to_cpu(req->mtu);
1562 mps = sys_le16_to_cpu(req->mps);
1563 credits = sys_le16_to_cpu(req->credits);
1564
1565 LOG_DBG("psm 0x%02x mtu %u mps %u credits %u", psm, mtu, mps, credits);
1566
1567 if (mtu < BT_L2CAP_ECRED_MIN_MTU || mps < BT_L2CAP_ECRED_MIN_MTU) {
1568 LOG_ERR("Invalid ecred conn req params. mtu %u mps %u", mtu, mps);
1569 result = BT_L2CAP_LE_ERR_INVALID_PARAMS;
1570 goto response;
1571 }
1572
1573 /* Check if there is a server registered */
1574 server = bt_l2cap_server_lookup_psm(psm);
1575 if (!server) {
1576 result = BT_L2CAP_LE_ERR_PSM_NOT_SUPP;
1577 goto response;
1578 }
1579
1580 /* Check if connection has minimum required security level */
1581 result = l2cap_check_security(conn, server);
1582 if (result != BT_L2CAP_LE_SUCCESS) {
1583 goto response;
1584 }
1585
1586 while (buf->len >= sizeof(scid)) {
1587 uint16_t rc;
1588 scid = net_buf_pull_le16(buf);
1589
1590 rc = l2cap_chan_accept(conn, server, scid, mtu, mps,
1591 credits, &chan[i]);
1592 if (rc != BT_L2CAP_LE_SUCCESS) {
1593 result = rc;
1594 }
1595 switch (rc) {
1596 case BT_L2CAP_LE_SUCCESS:
1597 ch = BT_L2CAP_LE_CHAN(chan[i]);
1598 dcid[i++] = sys_cpu_to_le16(ch->rx.cid);
1599 continue;
1600 /* Some connections refused – invalid Source CID */
1601 /* Some connections refused – Source CID already allocated */
1602 /* Some connections refused – not enough resources
1603 * available.
1604 */
1605 default:
1606 /* If a Destination CID is 0x0000, the channel was not
1607 * established.
1608 */
1609 dcid[i++] = 0x0000;
1610 continue;
1611 }
1612 }
1613
1614 response:
1615 buf = l2cap_create_le_sig_pdu(BT_L2CAP_ECRED_CONN_RSP, ident,
1616 sizeof(*rsp) +
1617 (sizeof(scid) * req_cid_count));
1618 if (!buf) {
1619 goto callback;
1620 }
1621
1622 rsp = net_buf_add(buf, sizeof(*rsp));
1623 (void)memset(rsp, 0, sizeof(*rsp));
1624 if (ch) {
1625 rsp->mps = sys_cpu_to_le16(ch->rx.mps);
1626 rsp->mtu = sys_cpu_to_le16(ch->rx.mtu);
1627 rsp->credits = sys_cpu_to_le16(ch->rx.credits);
1628 }
1629 rsp->result = sys_cpu_to_le16(result);
1630
1631 net_buf_add_mem(buf, dcid, sizeof(scid) * req_cid_count);
1632
1633 if (l2cap_send_sig(conn, buf)) {
1634 goto callback;
1635 }
1636
1637 rsp_queued = true;
1638
1639 callback:
1640 if (ecred_cb && ecred_cb->ecred_conn_req) {
1641 ecred_cb->ecred_conn_req(conn, result, psm);
1642 }
1643 if (rsp_queued) {
1644 for (i = 0; i < req_cid_count; i++) {
1645 /* Raise connected callback for established channels */
1646 if ((dcid[i] != 0x00) && (chan[i]->ops->connected != NULL)) {
1647 chan[i]->ops->connected(chan[i]);
1648 }
1649 }
1650 }
1651 }
1652
le_ecred_reconf_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1653 static void le_ecred_reconf_req(struct bt_l2cap *l2cap, uint8_t ident,
1654 struct net_buf *buf)
1655 {
1656 struct bt_conn *conn = l2cap->chan.chan.conn;
1657 struct bt_l2cap_chan *chans[BT_L2CAP_ECRED_CHAN_MAX_PER_REQ];
1658 struct bt_l2cap_ecred_reconf_req *req;
1659 struct bt_l2cap_ecred_reconf_rsp *rsp;
1660 uint16_t mtu, mps;
1661 uint16_t scid, result = BT_L2CAP_RECONF_SUCCESS;
1662 int chan_count = 0;
1663 bool mps_reduced = false;
1664
1665 if (buf->len < sizeof(*req)) {
1666 LOG_ERR("Too small ecred reconf req packet size");
1667 return;
1668 }
1669
1670 req = net_buf_pull_mem(buf, sizeof(*req));
1671
1672 mtu = sys_le16_to_cpu(req->mtu);
1673 mps = sys_le16_to_cpu(req->mps);
1674
1675 if (mps < BT_L2CAP_ECRED_MIN_MTU) {
1676 result = BT_L2CAP_RECONF_OTHER_UNACCEPT;
1677 goto response;
1678 }
1679
1680 if (mtu < BT_L2CAP_ECRED_MIN_MTU) {
1681 result = BT_L2CAP_RECONF_INVALID_MTU;
1682 goto response;
1683 }
1684
1685 /* The specification only allows up to 5 CIDs in this packet */
1686 if (buf->len > (BT_L2CAP_ECRED_CHAN_MAX_PER_REQ * sizeof(scid))) {
1687 result = BT_L2CAP_RECONF_OTHER_UNACCEPT;
1688 goto response;
1689 }
1690
1691 while (buf->len >= sizeof(scid)) {
1692 struct bt_l2cap_chan *chan;
1693 scid = net_buf_pull_le16(buf);
1694 chan = bt_l2cap_le_lookup_tx_cid(conn, scid);
1695 if (!chan) {
1696 result = BT_L2CAP_RECONF_INVALID_CID;
1697 goto response;
1698 }
1699
1700 if (BT_L2CAP_LE_CHAN(chan)->tx.mtu > mtu) {
1701 LOG_ERR("chan %p decreased MTU %u -> %u", chan,
1702 BT_L2CAP_LE_CHAN(chan)->tx.mtu, mtu);
1703 result = BT_L2CAP_RECONF_INVALID_MTU;
1704 goto response;
1705 }
1706
1707 if (BT_L2CAP_LE_CHAN(chan)->tx.mps > mps) {
1708 mps_reduced = true;
1709 }
1710
1711 chans[chan_count] = chan;
1712 chan_count++;
1713 }
1714
1715 /* As per BT Core Spec V5.2 Vol. 3, Part A, section 7.11
1716 * The request (...) shall not decrease the MPS of a channel
1717 * if more than one channel is specified.
1718 */
1719 if (mps_reduced && chan_count > 1) {
1720 result = BT_L2CAP_RECONF_INVALID_MPS;
1721 goto response;
1722 }
1723
1724 for (int i = 0; i < chan_count; i++) {
1725 BT_L2CAP_LE_CHAN(chans[i])->tx.mtu = mtu;
1726 BT_L2CAP_LE_CHAN(chans[i])->tx.mps = mps;
1727
1728 if (chans[i]->ops->reconfigured) {
1729 chans[i]->ops->reconfigured(chans[i]);
1730 }
1731 }
1732
1733 LOG_DBG("mtu %u mps %u", mtu, mps);
1734
1735 response:
1736 buf = l2cap_create_le_sig_pdu(BT_L2CAP_ECRED_RECONF_RSP, ident,
1737 sizeof(*rsp));
1738 if (!buf) {
1739 return;
1740 }
1741
1742 rsp = net_buf_add(buf, sizeof(*rsp));
1743 rsp->result = sys_cpu_to_le16(result);
1744
1745 l2cap_send_sig(conn, buf);
1746 }
1747
le_ecred_reconf_rsp(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1748 static void le_ecred_reconf_rsp(struct bt_l2cap *l2cap, uint8_t ident,
1749 struct net_buf *buf)
1750 {
1751 struct bt_conn *conn = l2cap->chan.chan.conn;
1752 struct bt_l2cap_ecred_reconf_rsp *rsp;
1753 struct bt_l2cap_le_chan *ch;
1754 uint16_t result;
1755
1756 if (buf->len < sizeof(*rsp)) {
1757 LOG_ERR("Too small ecred reconf rsp packet size");
1758 return;
1759 }
1760
1761 rsp = net_buf_pull_mem(buf, sizeof(*rsp));
1762 result = sys_le16_to_cpu(rsp->result);
1763
1764 while ((ch = l2cap_lookup_ident(conn, ident))) {
1765 /* Stop timer started on REQ send. The timer is only set on one
1766 * of the channels, but we don't want to make assumptions on
1767 * which one it is.
1768 */
1769 k_work_cancel_delayable(&ch->rtx_work);
1770
1771 if (result == BT_L2CAP_LE_SUCCESS) {
1772 ch->rx.mtu = ch->pending_rx_mtu;
1773 }
1774
1775 ch->pending_rx_mtu = 0;
1776 ch->ident = 0U;
1777
1778 if (ch->chan.ops->reconfigured) {
1779 ch->chan.ops->reconfigured(&ch->chan);
1780 }
1781 }
1782 }
1783 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
1784
l2cap_remove_rx_cid(struct bt_conn * conn,uint16_t cid)1785 static struct bt_l2cap_le_chan *l2cap_remove_rx_cid(struct bt_conn *conn,
1786 uint16_t cid)
1787 {
1788 struct bt_l2cap_chan *chan;
1789 sys_snode_t *prev = NULL;
1790
1791 /* Protect fixed channels against accidental removal */
1792 if (!L2CAP_LE_CID_IS_DYN(cid)) {
1793 return NULL;
1794 }
1795
1796 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
1797 if (BT_L2CAP_LE_CHAN(chan)->rx.cid == cid) {
1798 sys_slist_remove(&conn->channels, prev, &chan->node);
1799 return BT_L2CAP_LE_CHAN(chan);
1800 }
1801
1802 prev = &chan->node;
1803 }
1804
1805 return NULL;
1806 }
1807
le_disconn_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1808 static void le_disconn_req(struct bt_l2cap *l2cap, uint8_t ident,
1809 struct net_buf *buf)
1810 {
1811 struct bt_conn *conn = l2cap->chan.chan.conn;
1812 struct bt_l2cap_le_chan *chan;
1813 struct bt_l2cap_disconn_req *req = (void *)buf->data;
1814 struct bt_l2cap_disconn_rsp *rsp;
1815 uint16_t dcid;
1816
1817 if (buf->len < sizeof(*req)) {
1818 LOG_ERR("Too small LE conn req packet size");
1819 return;
1820 }
1821
1822 dcid = sys_le16_to_cpu(req->dcid);
1823
1824 LOG_DBG("dcid 0x%04x scid 0x%04x", dcid, sys_le16_to_cpu(req->scid));
1825
1826 chan = l2cap_remove_rx_cid(conn, dcid);
1827 if (!chan) {
1828 struct bt_l2cap_cmd_reject_cid_data data;
1829
1830 data.scid = req->scid;
1831 data.dcid = req->dcid;
1832
1833 l2cap_send_reject(conn, ident, BT_L2CAP_REJ_INVALID_CID, &data,
1834 sizeof(data));
1835 return;
1836 }
1837
1838 buf = l2cap_create_le_sig_pdu(BT_L2CAP_DISCONN_RSP, ident,
1839 sizeof(*rsp));
1840 if (!buf) {
1841 return;
1842 }
1843
1844 rsp = net_buf_add(buf, sizeof(*rsp));
1845 rsp->dcid = sys_cpu_to_le16(chan->rx.cid);
1846 rsp->scid = sys_cpu_to_le16(chan->tx.cid);
1847
1848 bt_l2cap_chan_del(&chan->chan);
1849
1850 l2cap_send_sig(conn, buf);
1851 }
1852
l2cap_change_security(struct bt_l2cap_le_chan * chan,uint16_t err)1853 static int l2cap_change_security(struct bt_l2cap_le_chan *chan, uint16_t err)
1854 {
1855 struct bt_conn *conn = chan->chan.conn;
1856 bt_security_t sec;
1857 int ret;
1858
1859 if (atomic_test_bit(chan->chan.status,
1860 BT_L2CAP_STATUS_ENCRYPT_PENDING)) {
1861 return -EINPROGRESS;
1862 }
1863
1864 switch (err) {
1865 case BT_L2CAP_LE_ERR_ENCRYPTION:
1866 if (conn->sec_level >= BT_SECURITY_L2) {
1867 return -EALREADY;
1868 }
1869
1870 sec = BT_SECURITY_L2;
1871 break;
1872 case BT_L2CAP_LE_ERR_AUTHENTICATION:
1873 if (conn->sec_level < BT_SECURITY_L2) {
1874 sec = BT_SECURITY_L2;
1875 } else if (conn->sec_level < BT_SECURITY_L3) {
1876 sec = BT_SECURITY_L3;
1877 } else if (conn->sec_level < BT_SECURITY_L4) {
1878 sec = BT_SECURITY_L4;
1879 } else {
1880 return -EALREADY;
1881 }
1882 break;
1883 default:
1884 return -EINVAL;
1885 }
1886
1887 ret = bt_conn_set_security(chan->chan.conn, sec);
1888 if (ret < 0) {
1889 return ret;
1890 }
1891
1892 atomic_set_bit(chan->chan.status, BT_L2CAP_STATUS_ENCRYPT_PENDING);
1893
1894 return 0;
1895 }
1896
1897 #if defined(CONFIG_BT_L2CAP_ECRED)
le_ecred_conn_rsp(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1898 static void le_ecred_conn_rsp(struct bt_l2cap *l2cap, uint8_t ident,
1899 struct net_buf *buf)
1900 {
1901 struct bt_conn *conn = l2cap->chan.chan.conn;
1902 struct bt_l2cap_le_chan *chan;
1903 struct bt_l2cap_ecred_conn_rsp *rsp;
1904 uint16_t dcid, mtu, mps, credits, result, psm;
1905 uint8_t attempted = 0;
1906 uint8_t succeeded = 0;
1907
1908 if (buf->len < sizeof(*rsp)) {
1909 LOG_ERR("Too small ecred conn rsp packet size");
1910 return;
1911 }
1912
1913 rsp = net_buf_pull_mem(buf, sizeof(*rsp));
1914 mtu = sys_le16_to_cpu(rsp->mtu);
1915 mps = sys_le16_to_cpu(rsp->mps);
1916 credits = sys_le16_to_cpu(rsp->credits);
1917 result = sys_le16_to_cpu(rsp->result);
1918
1919 LOG_DBG("mtu 0x%04x mps 0x%04x credits 0x%04x result %u", mtu, mps, credits, result);
1920
1921 chan = l2cap_lookup_ident(conn, ident);
1922 if (chan) {
1923 psm = chan->psm;
1924 } else {
1925 psm = 0x0000;
1926 }
1927
1928 switch (result) {
1929 case BT_L2CAP_LE_ERR_AUTHENTICATION:
1930 case BT_L2CAP_LE_ERR_ENCRYPTION:
1931 while ((chan = l2cap_lookup_ident(conn, ident))) {
1932
1933 /* Cancel RTX work */
1934 k_work_cancel_delayable(&chan->rtx_work);
1935
1936 /* If security needs changing wait it to be completed */
1937 if (!l2cap_change_security(chan, result)) {
1938 return;
1939 }
1940 bt_l2cap_chan_remove(conn, &chan->chan);
1941 bt_l2cap_chan_del(&chan->chan);
1942 }
1943 break;
1944 case BT_L2CAP_LE_SUCCESS:
1945 /* Some connections refused – invalid Source CID */
1946 case BT_L2CAP_LE_ERR_INVALID_SCID:
1947 /* Some connections refused – Source CID already allocated */
1948 case BT_L2CAP_LE_ERR_SCID_IN_USE:
1949 /* Some connections refused – not enough resources available */
1950 case BT_L2CAP_LE_ERR_NO_RESOURCES:
1951 while ((chan = l2cap_lookup_ident(conn, ident))) {
1952 struct bt_l2cap_chan *c;
1953
1954 /* Cancel RTX work */
1955 k_work_cancel_delayable(&chan->rtx_work);
1956
1957 if (buf->len < sizeof(dcid)) {
1958 LOG_ERR("Fewer dcid values than expected");
1959 bt_l2cap_chan_remove(conn, &chan->chan);
1960 bt_l2cap_chan_del(&chan->chan);
1961 continue;
1962 }
1963
1964 dcid = net_buf_pull_le16(buf);
1965 attempted++;
1966
1967 LOG_DBG("dcid 0x%04x", dcid);
1968
1969 /* If a Destination CID is 0x0000, the channel was not
1970 * established.
1971 */
1972 if (!dcid) {
1973 bt_l2cap_chan_remove(conn, &chan->chan);
1974 bt_l2cap_chan_del(&chan->chan);
1975 continue;
1976 }
1977
1978 c = bt_l2cap_le_lookup_tx_cid(conn, dcid);
1979 if (c) {
1980 /* If a device receives a
1981 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet
1982 * with an already assigned Destination CID,
1983 * then both the original channel and the new
1984 * channel shall be immediately discarded and
1985 * not used.
1986 */
1987 bt_l2cap_chan_remove(conn, &chan->chan);
1988 bt_l2cap_chan_del(&chan->chan);
1989 bt_l2cap_chan_disconnect(c);
1990 continue;
1991 }
1992
1993 chan->tx.cid = dcid;
1994
1995 chan->ident = 0U;
1996
1997 chan->tx.mtu = mtu;
1998 chan->tx.mps = mps;
1999
2000 /* Update state */
2001 bt_l2cap_chan_set_state(&chan->chan,
2002 BT_L2CAP_CONNECTED);
2003
2004 if (chan->chan.ops->connected) {
2005 chan->chan.ops->connected(&chan->chan);
2006 }
2007
2008 /* Give credits */
2009 l2cap_chan_tx_give_credits(chan, credits);
2010
2011 succeeded++;
2012 }
2013 break;
2014 case BT_L2CAP_LE_ERR_PSM_NOT_SUPP:
2015 default:
2016 while ((chan = l2cap_remove_ident(conn, ident))) {
2017 bt_l2cap_chan_del(&chan->chan);
2018 }
2019 break;
2020 }
2021
2022 if (ecred_cb && ecred_cb->ecred_conn_rsp) {
2023 ecred_cb->ecred_conn_rsp(conn, result, attempted, succeeded, psm);
2024 }
2025 }
2026 #endif /* CONFIG_BT_L2CAP_ECRED */
2027
le_conn_rsp(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)2028 static void le_conn_rsp(struct bt_l2cap *l2cap, uint8_t ident,
2029 struct net_buf *buf)
2030 {
2031 struct bt_conn *conn = l2cap->chan.chan.conn;
2032 struct bt_l2cap_le_chan *chan;
2033 struct bt_l2cap_le_conn_rsp *rsp = (void *)buf->data;
2034 uint16_t dcid, mtu, mps, credits, result;
2035
2036 if (buf->len < sizeof(*rsp)) {
2037 LOG_ERR("Too small LE conn rsp packet size");
2038 return;
2039 }
2040
2041 dcid = sys_le16_to_cpu(rsp->dcid);
2042 mtu = sys_le16_to_cpu(rsp->mtu);
2043 mps = sys_le16_to_cpu(rsp->mps);
2044 credits = sys_le16_to_cpu(rsp->credits);
2045 result = sys_le16_to_cpu(rsp->result);
2046
2047 LOG_DBG("dcid 0x%04x mtu %u mps %u credits %u result 0x%04x", dcid, mtu, mps, credits,
2048 result);
2049
2050 /* Keep the channel in case of security errors */
2051 if (result == BT_L2CAP_LE_SUCCESS ||
2052 result == BT_L2CAP_LE_ERR_AUTHENTICATION ||
2053 result == BT_L2CAP_LE_ERR_ENCRYPTION) {
2054 chan = l2cap_lookup_ident(conn, ident);
2055 } else {
2056 chan = l2cap_remove_ident(conn, ident);
2057 }
2058
2059 if (!chan) {
2060 LOG_ERR("Cannot find channel for ident %u", ident);
2061 return;
2062 }
2063
2064 /* Cancel RTX work */
2065 k_work_cancel_delayable(&chan->rtx_work);
2066
2067 /* Reset ident since it got a response */
2068 chan->ident = 0U;
2069
2070 switch (result) {
2071 case BT_L2CAP_LE_SUCCESS:
2072 chan->tx.cid = dcid;
2073 chan->tx.mtu = mtu;
2074 chan->tx.mps = mps;
2075
2076 /* Update state */
2077 bt_l2cap_chan_set_state(&chan->chan, BT_L2CAP_CONNECTED);
2078
2079 if (chan->chan.ops->connected) {
2080 chan->chan.ops->connected(&chan->chan);
2081 }
2082
2083 /* Give credits */
2084 l2cap_chan_tx_give_credits(chan, credits);
2085
2086 break;
2087 case BT_L2CAP_LE_ERR_AUTHENTICATION:
2088 case BT_L2CAP_LE_ERR_ENCRYPTION:
2089 /* If security needs changing wait it to be completed */
2090 if (l2cap_change_security(chan, result) == 0) {
2091 return;
2092 }
2093 bt_l2cap_chan_remove(conn, &chan->chan);
2094 __fallthrough;
2095 default:
2096 bt_l2cap_chan_del(&chan->chan);
2097 }
2098 }
2099
le_disconn_rsp(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)2100 static void le_disconn_rsp(struct bt_l2cap *l2cap, uint8_t ident,
2101 struct net_buf *buf)
2102 {
2103 struct bt_conn *conn = l2cap->chan.chan.conn;
2104 struct bt_l2cap_le_chan *chan;
2105 struct bt_l2cap_disconn_rsp *rsp = (void *)buf->data;
2106 uint16_t scid;
2107
2108 if (buf->len < sizeof(*rsp)) {
2109 LOG_ERR("Too small LE disconn rsp packet size");
2110 return;
2111 }
2112
2113 scid = sys_le16_to_cpu(rsp->scid);
2114
2115 LOG_DBG("dcid 0x%04x scid 0x%04x", sys_le16_to_cpu(rsp->dcid), scid);
2116
2117 chan = l2cap_remove_rx_cid(conn, scid);
2118 if (!chan) {
2119 return;
2120 }
2121
2122 bt_l2cap_chan_del(&chan->chan);
2123 }
2124
le_credits(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)2125 static void le_credits(struct bt_l2cap *l2cap, uint8_t ident,
2126 struct net_buf *buf)
2127 {
2128 struct bt_conn *conn = l2cap->chan.chan.conn;
2129 struct bt_l2cap_chan *chan;
2130 struct bt_l2cap_le_credits *ev = (void *)buf->data;
2131 struct bt_l2cap_le_chan *le_chan;
2132 uint16_t credits, cid;
2133
2134 if (buf->len < sizeof(*ev)) {
2135 LOG_ERR("Too small LE Credits packet size");
2136 return;
2137 }
2138
2139 cid = sys_le16_to_cpu(ev->cid);
2140 credits = sys_le16_to_cpu(ev->credits);
2141
2142 LOG_DBG("cid 0x%04x credits %u", cid, credits);
2143
2144 chan = bt_l2cap_le_lookup_tx_cid(conn, cid);
2145 if (!chan) {
2146 LOG_ERR("Unable to find channel of LE Credits packet");
2147 return;
2148 }
2149
2150 le_chan = BT_L2CAP_LE_CHAN(chan);
2151
2152 if (atomic_get(&le_chan->tx.credits) + credits > UINT16_MAX) {
2153 LOG_ERR("Credits overflow");
2154 bt_l2cap_chan_disconnect(chan);
2155 return;
2156 }
2157
2158 l2cap_chan_tx_give_credits(le_chan, credits);
2159
2160 LOG_DBG("chan %p total credits %lu", le_chan, atomic_get(&le_chan->tx.credits));
2161 }
2162
reject_cmd(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)2163 static void reject_cmd(struct bt_l2cap *l2cap, uint8_t ident,
2164 struct net_buf *buf)
2165 {
2166 struct bt_conn *conn = l2cap->chan.chan.conn;
2167 struct bt_l2cap_le_chan *chan;
2168
2169 while ((chan = l2cap_remove_ident(conn, ident))) {
2170 bt_l2cap_chan_del(&chan->chan);
2171 }
2172 }
2173 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2174
l2cap_recv(struct bt_l2cap_chan * chan,struct net_buf * buf)2175 static int l2cap_recv(struct bt_l2cap_chan *chan, struct net_buf *buf)
2176 {
2177 struct bt_l2cap_le_chan *l2chan = CONTAINER_OF(chan, struct bt_l2cap_le_chan, chan);
2178 struct bt_l2cap *l2cap = CONTAINER_OF(l2chan, struct bt_l2cap, chan);
2179 struct bt_l2cap_sig_hdr *hdr;
2180 uint16_t len;
2181
2182 if (buf->len < sizeof(*hdr)) {
2183 LOG_ERR("Too small L2CAP signaling PDU");
2184 return 0;
2185 }
2186
2187 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
2188 len = sys_le16_to_cpu(hdr->len);
2189
2190 LOG_DBG("Signaling code 0x%02x ident %u len %u", hdr->code, hdr->ident, len);
2191
2192 if (buf->len != len) {
2193 LOG_ERR("L2CAP length mismatch (%u != %u)", buf->len, len);
2194 return 0;
2195 }
2196
2197 if (!hdr->ident) {
2198 LOG_ERR("Invalid ident value in L2CAP PDU");
2199 return 0;
2200 }
2201
2202 switch (hdr->code) {
2203 case BT_L2CAP_CONN_PARAM_RSP:
2204 le_conn_param_rsp(l2cap, buf);
2205 break;
2206 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
2207 case BT_L2CAP_LE_CONN_REQ:
2208 le_conn_req(l2cap, hdr->ident, buf);
2209 break;
2210 case BT_L2CAP_LE_CONN_RSP:
2211 le_conn_rsp(l2cap, hdr->ident, buf);
2212 break;
2213 case BT_L2CAP_DISCONN_REQ:
2214 le_disconn_req(l2cap, hdr->ident, buf);
2215 break;
2216 case BT_L2CAP_DISCONN_RSP:
2217 le_disconn_rsp(l2cap, hdr->ident, buf);
2218 break;
2219 case BT_L2CAP_LE_CREDITS:
2220 le_credits(l2cap, hdr->ident, buf);
2221 break;
2222 case BT_L2CAP_CMD_REJECT:
2223 reject_cmd(l2cap, hdr->ident, buf);
2224 break;
2225 #if defined(CONFIG_BT_L2CAP_ECRED)
2226 case BT_L2CAP_ECRED_CONN_REQ:
2227 le_ecred_conn_req(l2cap, hdr->ident, buf);
2228 break;
2229 case BT_L2CAP_ECRED_CONN_RSP:
2230 le_ecred_conn_rsp(l2cap, hdr->ident, buf);
2231 break;
2232 case BT_L2CAP_ECRED_RECONF_REQ:
2233 le_ecred_reconf_req(l2cap, hdr->ident, buf);
2234 break;
2235 case BT_L2CAP_ECRED_RECONF_RSP:
2236 le_ecred_reconf_rsp(l2cap, hdr->ident, buf);
2237 break;
2238 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
2239 #else
2240 case BT_L2CAP_CMD_REJECT:
2241 /* Ignored */
2242 break;
2243 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2244 case BT_L2CAP_CONN_PARAM_REQ:
2245 if (IS_ENABLED(CONFIG_BT_CENTRAL)) {
2246 le_conn_param_update_req(l2cap, hdr->ident, buf);
2247 break;
2248 }
2249 __fallthrough;
2250 default:
2251 LOG_WRN("Rejecting unknown L2CAP PDU code 0x%02x", hdr->code);
2252 l2cap_send_reject(chan->conn, hdr->ident,
2253 BT_L2CAP_REJ_NOT_UNDERSTOOD, NULL, 0);
2254 break;
2255 }
2256
2257 return 0;
2258 }
2259
2260 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_chan_shutdown(struct bt_l2cap_chan * chan)2261 static void l2cap_chan_shutdown(struct bt_l2cap_chan *chan)
2262 {
2263 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
2264 struct net_buf *buf;
2265
2266 LOG_DBG("chan %p", chan);
2267
2268 atomic_set_bit(chan->status, BT_L2CAP_STATUS_SHUTDOWN);
2269
2270 /* Destroy segmented SDU if it exists */
2271 if (le_chan->_sdu) {
2272 net_buf_unref(le_chan->_sdu);
2273 le_chan->_sdu = NULL;
2274 le_chan->_sdu_len = 0U;
2275 }
2276
2277 /* Remove buffers on the TX queue */
2278 while ((buf = k_fifo_get(&le_chan->tx_queue, K_NO_WAIT))) {
2279 l2cap_tx_buf_destroy(chan->conn, buf, -ESHUTDOWN);
2280 }
2281
2282 /* Remove buffers on the RX queue */
2283 while ((buf = k_fifo_get(&le_chan->rx_queue, K_NO_WAIT))) {
2284 net_buf_unref(buf);
2285 }
2286
2287 /* Update status */
2288 if (chan->ops->status) {
2289 chan->ops->status(chan, chan->status);
2290 }
2291 }
2292
l2cap_chan_send_credits(struct bt_l2cap_le_chan * chan,uint16_t credits)2293 static void l2cap_chan_send_credits(struct bt_l2cap_le_chan *chan,
2294 uint16_t credits)
2295 {
2296 struct bt_l2cap_le_credits *ev;
2297 struct net_buf *buf;
2298
2299 __ASSERT_NO_MSG(bt_l2cap_chan_get_state(&chan->chan) == BT_L2CAP_CONNECTED);
2300
2301 buf = l2cap_create_le_sig_pdu(BT_L2CAP_LE_CREDITS, get_ident(),
2302 sizeof(*ev));
2303 if (!buf) {
2304 LOG_ERR("Unable to send credits update");
2305 /* Disconnect would probably not work either so the only
2306 * option left is to shutdown the channel.
2307 */
2308 l2cap_chan_shutdown(&chan->chan);
2309 return;
2310 }
2311
2312 __ASSERT_NO_MSG(atomic_get(&chan->rx.credits) == 0);
2313 atomic_set(&chan->rx.credits, credits);
2314
2315 ev = net_buf_add(buf, sizeof(*ev));
2316 ev->cid = sys_cpu_to_le16(chan->rx.cid);
2317 ev->credits = sys_cpu_to_le16(credits);
2318
2319 l2cap_send_sig(chan->chan.conn, buf);
2320
2321 LOG_DBG("chan %p credits %lu", chan, atomic_get(&chan->rx.credits));
2322 }
2323
2324 #if defined(CONFIG_BT_L2CAP_SEG_RECV)
l2cap_chan_send_credits_pdu(struct bt_conn * conn,uint16_t cid,uint16_t credits)2325 static int l2cap_chan_send_credits_pdu(struct bt_conn *conn, uint16_t cid, uint16_t credits)
2326 {
2327 struct net_buf *buf;
2328 struct bt_l2cap_le_credits *ev;
2329
2330 buf = l2cap_create_le_sig_pdu(BT_L2CAP_LE_CREDITS, get_ident(), sizeof(*ev));
2331 if (!buf) {
2332 return -ENOBUFS;
2333 }
2334
2335 ev = net_buf_add(buf, sizeof(*ev));
2336 *ev = (struct bt_l2cap_le_credits){
2337 .cid = sys_cpu_to_le16(cid),
2338 .credits = sys_cpu_to_le16(credits),
2339 };
2340
2341 return l2cap_send_sig(conn, buf);
2342 }
2343
2344 /**
2345 * Combination of @ref atomic_add and @ref u16_add_overflow. Leaves @p
2346 * target unchanged if an overflow would occur. Assumes the current
2347 * value of @p target is representable by uint16_t.
2348 */
atomic_add_safe_u16(atomic_t * target,uint16_t addition)2349 static bool atomic_add_safe_u16(atomic_t *target, uint16_t addition)
2350 {
2351 uint16_t target_old, target_new;
2352
2353 do {
2354 target_old = atomic_get(target);
2355 if (u16_add_overflow(target_old, addition, &target_new)) {
2356 return true;
2357 }
2358 } while (!atomic_cas(target, target_old, target_new));
2359
2360 return false;
2361 }
2362
bt_l2cap_chan_give_credits(struct bt_l2cap_chan * chan,uint16_t additional_credits)2363 int bt_l2cap_chan_give_credits(struct bt_l2cap_chan *chan, uint16_t additional_credits)
2364 {
2365 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
2366
2367 if (!chan || !chan->ops) {
2368 LOG_ERR("%s: Invalid chan object.", __func__);
2369 return -EINVAL;
2370 }
2371
2372 if (!chan->ops->seg_recv) {
2373 LOG_ERR("%s: Available only with seg_recv.", __func__);
2374 return -EINVAL;
2375 }
2376
2377 if (additional_credits == 0) {
2378 LOG_ERR("%s: Refusing to give 0.", __func__);
2379 return -EINVAL;
2380 }
2381
2382 if (bt_l2cap_chan_get_state(chan) == BT_L2CAP_CONNECTING) {
2383 LOG_ERR("%s: Cannot give credits while connecting.", __func__);
2384 return -EBUSY;
2385 }
2386
2387 if (atomic_add_safe_u16(&le_chan->rx.credits, additional_credits)) {
2388 LOG_ERR("%s: Overflow.", __func__);
2389 return -EOVERFLOW;
2390 }
2391
2392 if (bt_l2cap_chan_get_state(chan) == BT_L2CAP_CONNECTED) {
2393 int err;
2394
2395 err = l2cap_chan_send_credits_pdu(chan->conn, le_chan->rx.cid, additional_credits);
2396 if (err) {
2397 LOG_ERR("%s: PDU failed %d.", __func__, err);
2398 return err;
2399 }
2400 }
2401
2402 return 0;
2403 }
2404 #endif /* CONFIG_BT_L2CAP_SEG_RECV */
2405
bt_l2cap_chan_recv_complete(struct bt_l2cap_chan * chan,struct net_buf * buf)2406 int bt_l2cap_chan_recv_complete(struct bt_l2cap_chan *chan, struct net_buf *buf)
2407 {
2408 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
2409 struct bt_conn *conn = chan->conn;
2410
2411 __ASSERT_NO_MSG(chan);
2412 __ASSERT_NO_MSG(buf);
2413
2414 net_buf_unref(buf);
2415
2416 if (!conn) {
2417 return -ENOTCONN;
2418 }
2419
2420 if (conn->type != BT_CONN_TYPE_LE) {
2421 return -ENOTSUP;
2422 }
2423
2424 LOG_DBG("chan %p buf %p", chan, buf);
2425
2426 if (bt_l2cap_chan_get_state(&le_chan->chan) == BT_L2CAP_CONNECTED) {
2427 l2cap_chan_send_credits(le_chan, 1);
2428 }
2429
2430 return 0;
2431 }
2432
l2cap_alloc_frag(k_timeout_t timeout,void * user_data)2433 static struct net_buf *l2cap_alloc_frag(k_timeout_t timeout, void *user_data)
2434 {
2435 struct bt_l2cap_le_chan *chan = user_data;
2436 struct net_buf *frag = NULL;
2437
2438 frag = chan->chan.ops->alloc_buf(&chan->chan);
2439 if (!frag) {
2440 return NULL;
2441 }
2442
2443 LOG_DBG("frag %p tailroom %zu", frag, net_buf_tailroom(frag));
2444
2445 return frag;
2446 }
2447
l2cap_chan_le_recv_sdu(struct bt_l2cap_le_chan * chan,struct net_buf * buf,uint16_t seg)2448 static void l2cap_chan_le_recv_sdu(struct bt_l2cap_le_chan *chan,
2449 struct net_buf *buf, uint16_t seg)
2450 {
2451 int err;
2452
2453 LOG_DBG("chan %p len %u", chan, buf->len);
2454
2455 __ASSERT_NO_MSG(bt_l2cap_chan_get_state(&chan->chan) == BT_L2CAP_CONNECTED);
2456 __ASSERT_NO_MSG(atomic_get(&chan->rx.credits) == 0);
2457
2458 /* Receiving complete SDU, notify channel and reset SDU buf */
2459 err = chan->chan.ops->recv(&chan->chan, buf);
2460 if (err < 0) {
2461 if (err != -EINPROGRESS) {
2462 LOG_ERR("err %d", err);
2463 bt_l2cap_chan_disconnect(&chan->chan);
2464 net_buf_unref(buf);
2465 }
2466 return;
2467 } else if (bt_l2cap_chan_get_state(&chan->chan) == BT_L2CAP_CONNECTED) {
2468 l2cap_chan_send_credits(chan, 1);
2469 }
2470
2471 net_buf_unref(buf);
2472 }
2473
l2cap_chan_le_recv_seg(struct bt_l2cap_le_chan * chan,struct net_buf * buf)2474 static void l2cap_chan_le_recv_seg(struct bt_l2cap_le_chan *chan,
2475 struct net_buf *buf)
2476 {
2477 uint16_t len;
2478 uint16_t seg = 0U;
2479
2480 len = chan->_sdu->len;
2481 if (len) {
2482 memcpy(&seg, net_buf_user_data(chan->_sdu), sizeof(seg));
2483 }
2484
2485 if (len + buf->len > chan->_sdu_len) {
2486 LOG_ERR("SDU length mismatch");
2487 bt_l2cap_chan_disconnect(&chan->chan);
2488 return;
2489 }
2490
2491 seg++;
2492 /* Store received segments in user_data */
2493 memcpy(net_buf_user_data(chan->_sdu), &seg, sizeof(seg));
2494
2495 LOG_DBG("chan %p seg %d len %u", chan, seg, buf->len);
2496
2497 /* Append received segment to SDU */
2498 len = net_buf_append_bytes(chan->_sdu, buf->len, buf->data, K_NO_WAIT,
2499 l2cap_alloc_frag, chan);
2500 if (len != buf->len) {
2501 LOG_ERR("Unable to store SDU");
2502 bt_l2cap_chan_disconnect(&chan->chan);
2503 return;
2504 }
2505
2506 if (chan->_sdu->len < chan->_sdu_len) {
2507 /* Give more credits if remote has run out of them, this
2508 * should only happen if the remote cannot fully utilize the
2509 * MPS for some reason.
2510 *
2511 * We can't send more than one credit, because if the remote
2512 * decides to start fully utilizing the MPS for the remainder of
2513 * the SDU, then the remote will end up with more credits than
2514 * the app has buffers.
2515 */
2516 if (atomic_get(&chan->rx.credits) == 0) {
2517 LOG_DBG("remote is not fully utilizing MPS");
2518 l2cap_chan_send_credits(chan, 1);
2519 }
2520
2521 return;
2522 }
2523
2524 buf = chan->_sdu;
2525 chan->_sdu = NULL;
2526 chan->_sdu_len = 0U;
2527
2528 l2cap_chan_le_recv_sdu(chan, buf, seg);
2529 }
2530
2531 #if defined(CONFIG_BT_L2CAP_SEG_RECV)
l2cap_chan_le_recv_seg_direct(struct bt_l2cap_le_chan * chan,struct net_buf * seg)2532 static void l2cap_chan_le_recv_seg_direct(struct bt_l2cap_le_chan *chan, struct net_buf *seg)
2533 {
2534 uint16_t seg_offset;
2535 uint16_t sdu_remaining;
2536
2537 if (chan->_sdu_len_done == chan->_sdu_len) {
2538
2539 /* This is the first PDU in a SDU. */
2540
2541 if (seg->len < 2) {
2542 LOG_WRN("Missing SDU header");
2543 bt_l2cap_chan_disconnect(&chan->chan);
2544 return;
2545 }
2546
2547 /* Pop off the "SDU header". */
2548 chan->_sdu_len = net_buf_pull_le16(seg);
2549 chan->_sdu_len_done = 0;
2550
2551 if (chan->_sdu_len > chan->rx.mtu) {
2552 LOG_WRN("SDU exceeds MTU");
2553 bt_l2cap_chan_disconnect(&chan->chan);
2554 return;
2555 }
2556 }
2557
2558 seg_offset = chan->_sdu_len_done;
2559 sdu_remaining = chan->_sdu_len - chan->_sdu_len_done;
2560
2561 if (seg->len > sdu_remaining) {
2562 LOG_WRN("L2CAP RX PDU total exceeds SDU");
2563 bt_l2cap_chan_disconnect(&chan->chan);
2564 return;
2565 }
2566
2567 /* Commit receive. */
2568 chan->_sdu_len_done += seg->len;
2569
2570 /* Tail call. */
2571 chan->chan.ops->seg_recv(&chan->chan, chan->_sdu_len, seg_offset, &seg->b);
2572 }
2573 #endif /* CONFIG_BT_L2CAP_SEG_RECV */
2574
l2cap_chan_le_recv(struct bt_l2cap_le_chan * chan,struct net_buf * buf)2575 static void l2cap_chan_le_recv(struct bt_l2cap_le_chan *chan,
2576 struct net_buf *buf)
2577 {
2578 struct net_buf *owned_ref;
2579 uint16_t sdu_len;
2580 int err;
2581
2582 if (!test_and_dec(&chan->rx.credits)) {
2583 LOG_ERR("No credits to receive packet");
2584 bt_l2cap_chan_disconnect(&chan->chan);
2585 return;
2586 }
2587
2588 if (buf->len > chan->rx.mps) {
2589 LOG_WRN("PDU size > MPS (%u > %u)", buf->len, chan->rx.mps);
2590 bt_l2cap_chan_disconnect(&chan->chan);
2591 return;
2592 }
2593
2594 /* Redirect to experimental API. */
2595 IF_ENABLED(CONFIG_BT_L2CAP_SEG_RECV, (
2596 if (chan->chan.ops->seg_recv) {
2597 l2cap_chan_le_recv_seg_direct(chan, buf);
2598 return;
2599 }
2600 ))
2601
2602 /* Check if segments already exist */
2603 if (chan->_sdu) {
2604 l2cap_chan_le_recv_seg(chan, buf);
2605 return;
2606 }
2607
2608 if (buf->len < 2) {
2609 LOG_WRN("Too short data packet");
2610 bt_l2cap_chan_disconnect(&chan->chan);
2611 return;
2612 }
2613
2614 sdu_len = net_buf_pull_le16(buf);
2615
2616 LOG_DBG("chan %p len %u sdu_len %u", chan, buf->len, sdu_len);
2617
2618 if (sdu_len > chan->rx.mtu) {
2619 LOG_ERR("Invalid SDU length");
2620 bt_l2cap_chan_disconnect(&chan->chan);
2621 return;
2622 }
2623
2624 /* Always allocate buffer from the channel if supported. */
2625 if (chan->chan.ops->alloc_buf) {
2626 chan->_sdu = chan->chan.ops->alloc_buf(&chan->chan);
2627 if (!chan->_sdu) {
2628 LOG_ERR("Unable to allocate buffer for SDU");
2629 bt_l2cap_chan_disconnect(&chan->chan);
2630 return;
2631 }
2632 chan->_sdu_len = sdu_len;
2633
2634 /* Send sdu_len/mps worth of credits */
2635 uint16_t credits = DIV_ROUND_UP(
2636 MIN(sdu_len - buf->len, net_buf_tailroom(chan->_sdu)),
2637 chan->rx.mps);
2638
2639 if (credits) {
2640 LOG_DBG("sending %d extra credits (sdu_len %d buf_len %d mps %d)",
2641 credits,
2642 sdu_len,
2643 buf->len,
2644 chan->rx.mps);
2645 l2cap_chan_send_credits(chan, credits);
2646 }
2647
2648 l2cap_chan_le_recv_seg(chan, buf);
2649 return;
2650 }
2651
2652 owned_ref = net_buf_ref(buf);
2653 err = chan->chan.ops->recv(&chan->chan, owned_ref);
2654 if (err != -EINPROGRESS) {
2655 net_buf_unref(owned_ref);
2656 owned_ref = NULL;
2657 }
2658
2659 if (err < 0) {
2660 if (err != -EINPROGRESS) {
2661 LOG_ERR("err %d", err);
2662 bt_l2cap_chan_disconnect(&chan->chan);
2663 }
2664 return;
2665 }
2666
2667 /* Only attempt to send credits if the channel wasn't disconnected
2668 * in the recv() callback above
2669 */
2670 if (bt_l2cap_chan_get_state(&chan->chan) == BT_L2CAP_CONNECTED) {
2671 l2cap_chan_send_credits(chan, 1);
2672 }
2673 }
2674
l2cap_chan_recv_queue(struct bt_l2cap_le_chan * chan,struct net_buf * buf)2675 static void l2cap_chan_recv_queue(struct bt_l2cap_le_chan *chan,
2676 struct net_buf *buf)
2677 {
2678 if (chan->state == BT_L2CAP_DISCONNECTING) {
2679 LOG_WRN("Ignoring data received while disconnecting");
2680 net_buf_unref(buf);
2681 return;
2682 }
2683
2684 if (atomic_test_bit(chan->chan.status, BT_L2CAP_STATUS_SHUTDOWN)) {
2685 LOG_WRN("Ignoring data received while channel has shutdown");
2686 net_buf_unref(buf);
2687 return;
2688 }
2689
2690 if (!L2CAP_LE_PSM_IS_DYN(chan->psm)) {
2691 l2cap_chan_le_recv(chan, buf);
2692 net_buf_unref(buf);
2693 return;
2694 }
2695
2696 k_fifo_put(&chan->rx_queue, buf);
2697 k_work_submit(&chan->rx_work);
2698 }
2699 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2700
l2cap_chan_recv(struct bt_l2cap_chan * chan,struct net_buf * buf,bool complete)2701 static void l2cap_chan_recv(struct bt_l2cap_chan *chan, struct net_buf *buf,
2702 bool complete)
2703 {
2704 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
2705 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
2706
2707 if (L2CAP_LE_CID_IS_DYN(le_chan->rx.cid)) {
2708 if (complete) {
2709 l2cap_chan_recv_queue(le_chan, buf);
2710 } else {
2711 /* if packet was not complete this means peer device
2712 * overflowed our RX and channel shall be disconnected
2713 */
2714 bt_l2cap_chan_disconnect(chan);
2715 net_buf_unref(buf);
2716 }
2717
2718 return;
2719 }
2720 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2721
2722 LOG_DBG("chan %p len %u", chan, buf->len);
2723
2724 chan->ops->recv(chan, buf);
2725 net_buf_unref(buf);
2726 }
2727
bt_l2cap_recv(struct bt_conn * conn,struct net_buf * buf,bool complete)2728 void bt_l2cap_recv(struct bt_conn *conn, struct net_buf *buf, bool complete)
2729 {
2730 struct bt_l2cap_hdr *hdr;
2731 struct bt_l2cap_chan *chan;
2732 uint16_t cid;
2733
2734 if (IS_ENABLED(CONFIG_BT_CLASSIC) &&
2735 conn->type == BT_CONN_TYPE_BR) {
2736 bt_l2cap_br_recv(conn, buf);
2737 return;
2738 }
2739
2740 if (buf->len < sizeof(*hdr)) {
2741 LOG_ERR("Too small L2CAP PDU received");
2742 net_buf_unref(buf);
2743 return;
2744 }
2745
2746 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
2747 cid = sys_le16_to_cpu(hdr->cid);
2748
2749 LOG_DBG("Packet for CID %u len %u", cid, buf->len);
2750
2751 chan = bt_l2cap_le_lookup_rx_cid(conn, cid);
2752 if (!chan) {
2753 LOG_WRN("Ignoring data for unknown channel ID 0x%04x", cid);
2754 net_buf_unref(buf);
2755 return;
2756 }
2757
2758 l2cap_chan_recv(chan, buf, complete);
2759 }
2760
bt_l2cap_update_conn_param(struct bt_conn * conn,const struct bt_le_conn_param * param)2761 int bt_l2cap_update_conn_param(struct bt_conn *conn,
2762 const struct bt_le_conn_param *param)
2763 {
2764 struct bt_l2cap_conn_param_req *req;
2765 struct net_buf *buf;
2766
2767 buf = l2cap_create_le_sig_pdu(BT_L2CAP_CONN_PARAM_REQ,
2768 get_ident(), sizeof(*req));
2769 if (!buf) {
2770 return -ENOMEM;
2771 }
2772
2773 req = net_buf_add(buf, sizeof(*req));
2774 req->min_interval = sys_cpu_to_le16(param->interval_min);
2775 req->max_interval = sys_cpu_to_le16(param->interval_max);
2776 req->latency = sys_cpu_to_le16(param->latency);
2777 req->timeout = sys_cpu_to_le16(param->timeout);
2778
2779 return l2cap_send_sig(conn, buf);
2780 }
2781
l2cap_connected(struct bt_l2cap_chan * chan)2782 static void l2cap_connected(struct bt_l2cap_chan *chan)
2783 {
2784 LOG_DBG("ch %p cid 0x%04x", BT_L2CAP_LE_CHAN(chan), BT_L2CAP_LE_CHAN(chan)->rx.cid);
2785 }
2786
l2cap_disconnected(struct bt_l2cap_chan * chan)2787 static void l2cap_disconnected(struct bt_l2cap_chan *chan)
2788 {
2789 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
2790
2791 LOG_DBG("ch %p cid 0x%04x", le_chan, le_chan->rx.cid);
2792
2793 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
2794 /* Cancel RTX work on signal channel.
2795 * Disconnected callback is always called from system workqueue
2796 * so this should always succeed.
2797 */
2798 (void)k_work_cancel_delayable(&le_chan->rtx_work);
2799 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2800 }
2801
l2cap_accept(struct bt_conn * conn,struct bt_l2cap_chan ** chan)2802 static int l2cap_accept(struct bt_conn *conn, struct bt_l2cap_chan **chan)
2803 {
2804 int i;
2805 static const struct bt_l2cap_chan_ops ops = {
2806 .connected = l2cap_connected,
2807 .disconnected = l2cap_disconnected,
2808 .recv = l2cap_recv,
2809 };
2810
2811 LOG_DBG("conn %p handle %u", conn, conn->handle);
2812
2813 for (i = 0; i < ARRAY_SIZE(bt_l2cap_pool); i++) {
2814 struct bt_l2cap *l2cap = &bt_l2cap_pool[i];
2815
2816 if (l2cap->chan.chan.conn) {
2817 continue;
2818 }
2819
2820 l2cap->chan.chan.ops = &ops;
2821 *chan = &l2cap->chan.chan;
2822
2823 return 0;
2824 }
2825
2826 LOG_ERR("No available L2CAP context for conn %p", conn);
2827
2828 return -ENOMEM;
2829 }
2830
2831 BT_L2CAP_CHANNEL_DEFINE(le_fixed_chan, BT_L2CAP_CID_LE_SIG, l2cap_accept, NULL);
2832
bt_l2cap_init(void)2833 void bt_l2cap_init(void)
2834 {
2835 if (IS_ENABLED(CONFIG_BT_CLASSIC)) {
2836 bt_l2cap_br_init();
2837 }
2838 }
2839
2840 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_le_connect(struct bt_conn * conn,struct bt_l2cap_le_chan * ch,uint16_t psm)2841 static int l2cap_le_connect(struct bt_conn *conn, struct bt_l2cap_le_chan *ch,
2842 uint16_t psm)
2843 {
2844 int err;
2845
2846 if (psm < L2CAP_LE_PSM_FIXED_START || psm > L2CAP_LE_PSM_DYN_END) {
2847 return -EINVAL;
2848 }
2849
2850 l2cap_chan_tx_init(ch);
2851 l2cap_chan_rx_init(ch);
2852
2853 if (!l2cap_chan_add(conn, &ch->chan, l2cap_chan_destroy)) {
2854 return -ENOMEM;
2855 }
2856
2857 ch->psm = psm;
2858
2859 if (conn->sec_level < ch->required_sec_level) {
2860 err = bt_conn_set_security(conn, ch->required_sec_level);
2861 if (err) {
2862 goto fail;
2863 }
2864
2865 atomic_set_bit(ch->chan.status,
2866 BT_L2CAP_STATUS_ENCRYPT_PENDING);
2867
2868 return 0;
2869 }
2870
2871 err = l2cap_le_conn_req(ch);
2872 if (err) {
2873 goto fail;
2874 }
2875
2876 return 0;
2877
2878 fail:
2879 bt_l2cap_chan_remove(conn, &ch->chan);
2880 bt_l2cap_chan_del(&ch->chan);
2881 return err;
2882 }
2883
2884 #if defined(CONFIG_BT_L2CAP_ECRED)
l2cap_ecred_init(struct bt_conn * conn,struct bt_l2cap_le_chan * ch,uint16_t psm)2885 static int l2cap_ecred_init(struct bt_conn *conn,
2886 struct bt_l2cap_le_chan *ch, uint16_t psm)
2887 {
2888
2889 if (psm < L2CAP_LE_PSM_FIXED_START || psm > L2CAP_LE_PSM_DYN_END) {
2890 return -EINVAL;
2891 }
2892
2893 l2cap_chan_tx_init(ch);
2894 l2cap_chan_rx_init(ch);
2895
2896 if (!l2cap_chan_add(conn, &ch->chan, l2cap_chan_destroy)) {
2897 return -ENOMEM;
2898 }
2899
2900 ch->psm = psm;
2901
2902 LOG_DBG("ch %p psm 0x%02x mtu %u mps %u credits 1", ch, ch->psm, ch->rx.mtu, ch->rx.mps);
2903
2904 return 0;
2905 }
2906
bt_l2cap_ecred_chan_connect(struct bt_conn * conn,struct bt_l2cap_chan ** chan,uint16_t psm)2907 int bt_l2cap_ecred_chan_connect(struct bt_conn *conn,
2908 struct bt_l2cap_chan **chan, uint16_t psm)
2909 {
2910 int i, err;
2911
2912 LOG_DBG("conn %p chan %p psm 0x%04x", conn, chan, psm);
2913
2914 if (!conn || !chan) {
2915 return -EINVAL;
2916 }
2917
2918 /* Init non-null channels */
2919 for (i = 0; i < BT_L2CAP_ECRED_CHAN_MAX_PER_REQ; i++) {
2920 if (!chan[i]) {
2921 break;
2922 }
2923
2924 err = l2cap_ecred_init(conn, BT_L2CAP_LE_CHAN(chan[i]), psm);
2925 if (err < 0) {
2926 i--;
2927 goto fail;
2928 }
2929 }
2930
2931 return l2cap_ecred_conn_req(chan, i);
2932 fail:
2933 /* Remove channels added */
2934 for (; i >= 0; i--) {
2935 if (!chan[i]) {
2936 continue;
2937 }
2938
2939 bt_l2cap_chan_remove(conn, chan[i]);
2940 }
2941
2942 return err;
2943 }
2944
l2cap_find_pending_reconf(struct bt_conn * conn)2945 static struct bt_l2cap_le_chan *l2cap_find_pending_reconf(struct bt_conn *conn)
2946 {
2947 struct bt_l2cap_chan *chan;
2948
2949 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
2950 if (BT_L2CAP_LE_CHAN(chan)->pending_rx_mtu) {
2951 return BT_L2CAP_LE_CHAN(chan);
2952 }
2953 }
2954
2955 return NULL;
2956 }
2957
bt_l2cap_ecred_chan_reconfigure(struct bt_l2cap_chan ** chans,uint16_t mtu)2958 int bt_l2cap_ecred_chan_reconfigure(struct bt_l2cap_chan **chans, uint16_t mtu)
2959 {
2960 struct bt_l2cap_ecred_reconf_req *req;
2961 struct bt_conn *conn = NULL;
2962 struct bt_l2cap_le_chan *ch;
2963 struct net_buf *buf;
2964 uint8_t ident;
2965 int i;
2966
2967 LOG_DBG("chans %p mtu 0x%04x", chans, mtu);
2968
2969 if (!chans) {
2970 return -EINVAL;
2971 }
2972
2973 for (i = 0; i < BT_L2CAP_ECRED_CHAN_MAX_PER_REQ; i++) {
2974 if (!chans[i]) {
2975 break;
2976 }
2977
2978 /* validate that all channels are from same connection */
2979 if (conn) {
2980 if (conn != chans[i]->conn) {
2981 return -EINVAL;
2982 }
2983 } else {
2984 conn = chans[i]->conn;
2985 }
2986
2987 /* validate MTU is not decreased */
2988 if (mtu < BT_L2CAP_LE_CHAN(chans[i])->rx.mtu) {
2989 return -EINVAL;
2990 }
2991 }
2992
2993 if (i == 0) {
2994 return -EINVAL;
2995 }
2996
2997 if (!conn) {
2998 return -ENOTCONN;
2999 }
3000
3001 if (conn->type != BT_CONN_TYPE_LE) {
3002 return -EINVAL;
3003 }
3004
3005 /* allow only 1 request at time */
3006 if (l2cap_find_pending_reconf(conn)) {
3007 return -EBUSY;
3008 }
3009
3010 ident = get_ident();
3011
3012 buf = l2cap_create_le_sig_pdu(BT_L2CAP_ECRED_RECONF_REQ,
3013 ident,
3014 sizeof(*req) + (i * sizeof(uint16_t)));
3015 if (!buf) {
3016 return -ENOMEM;
3017 }
3018
3019 req = net_buf_add(buf, sizeof(*req));
3020 req->mtu = sys_cpu_to_le16(mtu);
3021
3022 /* MPS shall not be bigger than MTU + BT_L2CAP_SDU_HDR_SIZE
3023 * as the remaining bytes cannot be used.
3024 */
3025 req->mps = sys_cpu_to_le16(MIN(mtu + BT_L2CAP_SDU_HDR_SIZE,
3026 BT_L2CAP_RX_MTU));
3027
3028 for (int j = 0; j < i; j++) {
3029 ch = BT_L2CAP_LE_CHAN(chans[j]);
3030
3031 ch->ident = ident;
3032 ch->pending_rx_mtu = mtu;
3033
3034 net_buf_add_le16(buf, ch->rx.cid);
3035 };
3036
3037 /* We set the RTX timer on one of the supplied channels, but when the
3038 * request resolves or times out we will act on all the channels in the
3039 * supplied array, using the ident field to find them.
3040 */
3041 l2cap_chan_send_req(chans[0], buf, L2CAP_CONN_TIMEOUT);
3042
3043 return 0;
3044 }
3045
3046 #if defined(CONFIG_BT_L2CAP_RECONFIGURE_EXPLICIT)
bt_l2cap_ecred_chan_reconfigure_explicit(struct bt_l2cap_chan ** chans,size_t chan_count,uint16_t mtu,uint16_t mps)3047 int bt_l2cap_ecred_chan_reconfigure_explicit(struct bt_l2cap_chan **chans, size_t chan_count,
3048 uint16_t mtu, uint16_t mps)
3049 {
3050 struct bt_l2cap_ecred_reconf_req *req;
3051 struct bt_conn *conn = NULL;
3052 struct net_buf *buf;
3053 uint8_t ident;
3054
3055 LOG_DBG("chans %p chan_count %u mtu 0x%04x mps 0x%04x", chans, chan_count, mtu, mps);
3056
3057 if (!chans || !IN_RANGE(chan_count, 1, BT_L2CAP_ECRED_CHAN_MAX_PER_REQ)) {
3058 return -EINVAL;
3059 }
3060
3061 if (!IN_RANGE(mps, BT_L2CAP_ECRED_MIN_MPS, BT_L2CAP_RX_MTU)) {
3062 return -EINVAL;
3063 }
3064
3065 for (size_t i = 0; i < chan_count; i++) {
3066 /* validate that all channels are from same connection */
3067 if (conn) {
3068 if (conn != chans[i]->conn) {
3069 return -EINVAL;
3070 }
3071 } else {
3072 conn = chans[i]->conn;
3073 }
3074
3075 /* validate MTU is not decreased */
3076 if (mtu < BT_L2CAP_LE_CHAN(chans[i])->rx.mtu) {
3077 return -EINVAL;
3078 }
3079
3080 /* MPS is not allowed to decrease when reconfiguring multiple channels.
3081 * Core Specification 3.A.4.27 v6.0
3082 */
3083 if (chan_count > 1 && mps < BT_L2CAP_LE_CHAN(chans[i])->rx.mps) {
3084 return -EINVAL;
3085 }
3086 }
3087
3088 if (!conn) {
3089 return -ENOTCONN;
3090 }
3091
3092 if (conn->type != BT_CONN_TYPE_LE) {
3093 return -EINVAL;
3094 }
3095
3096 /* allow only 1 request at time */
3097 if (l2cap_find_pending_reconf(conn)) {
3098 return -EBUSY;
3099 }
3100
3101 ident = get_ident();
3102
3103 buf = l2cap_create_le_sig_pdu(BT_L2CAP_ECRED_RECONF_REQ, ident,
3104 sizeof(*req) + (chan_count * sizeof(uint16_t)));
3105 if (!buf) {
3106 return -ENOMEM;
3107 }
3108
3109 req = net_buf_add(buf, sizeof(*req));
3110 req->mtu = sys_cpu_to_le16(mtu);
3111 req->mps = sys_cpu_to_le16(mps);
3112
3113 for (size_t i = 0; i < chan_count; i++) {
3114 struct bt_l2cap_le_chan *ch;
3115
3116 ch = BT_L2CAP_LE_CHAN(chans[i]);
3117
3118 ch->ident = ident;
3119 ch->pending_rx_mtu = mtu;
3120
3121 net_buf_add_le16(buf, ch->rx.cid);
3122 };
3123
3124 /* We set the RTX timer on one of the supplied channels, but when the
3125 * request resolves or times out we will act on all the channels in the
3126 * supplied array, using the ident field to find them.
3127 */
3128 l2cap_chan_send_req(chans[0], buf, L2CAP_CONN_TIMEOUT);
3129
3130 return 0;
3131 }
3132 #endif /* defined(CONFIG_BT_L2CAP_RECONFIGURE_EXPLICIT) */
3133
3134 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
3135
bt_l2cap_chan_connect(struct bt_conn * conn,struct bt_l2cap_chan * chan,uint16_t psm)3136 int bt_l2cap_chan_connect(struct bt_conn *conn, struct bt_l2cap_chan *chan,
3137 uint16_t psm)
3138 {
3139 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
3140
3141 LOG_DBG("conn %p chan %p psm 0x%04x", conn, chan, psm);
3142
3143 if (!conn || conn->state != BT_CONN_CONNECTED) {
3144 return -ENOTCONN;
3145 }
3146
3147 if (!chan) {
3148 return -EINVAL;
3149 }
3150
3151 if (IS_ENABLED(CONFIG_BT_CLASSIC) &&
3152 conn->type == BT_CONN_TYPE_BR) {
3153 return bt_l2cap_br_chan_connect(conn, chan, psm);
3154 }
3155
3156 if (le_chan->required_sec_level > BT_SECURITY_L4) {
3157 return -EINVAL;
3158 } else if (le_chan->required_sec_level == BT_SECURITY_L0) {
3159 le_chan->required_sec_level = BT_SECURITY_L1;
3160 }
3161
3162 return l2cap_le_connect(conn, le_chan, psm);
3163 }
3164
bt_l2cap_chan_disconnect(struct bt_l2cap_chan * chan)3165 int bt_l2cap_chan_disconnect(struct bt_l2cap_chan *chan)
3166 {
3167 struct bt_conn *conn = chan->conn;
3168 struct net_buf *buf;
3169 struct bt_l2cap_disconn_req *req;
3170 struct bt_l2cap_le_chan *le_chan;
3171
3172 if (!conn) {
3173 return -ENOTCONN;
3174 }
3175
3176 if (IS_ENABLED(CONFIG_BT_CLASSIC) &&
3177 conn->type == BT_CONN_TYPE_BR) {
3178 return bt_l2cap_br_chan_disconnect(chan);
3179 }
3180
3181 le_chan = BT_L2CAP_LE_CHAN(chan);
3182
3183 LOG_DBG("chan %p scid 0x%04x dcid 0x%04x", chan, le_chan->rx.cid, le_chan->tx.cid);
3184
3185 le_chan->ident = get_ident();
3186
3187 buf = l2cap_create_le_sig_pdu(BT_L2CAP_DISCONN_REQ,
3188 le_chan->ident, sizeof(*req));
3189 if (!buf) {
3190 return -ENOMEM;
3191 }
3192
3193 req = net_buf_add(buf, sizeof(*req));
3194 req->dcid = sys_cpu_to_le16(le_chan->tx.cid);
3195 req->scid = sys_cpu_to_le16(le_chan->rx.cid);
3196
3197 l2cap_chan_send_req(chan, buf, L2CAP_DISC_TIMEOUT);
3198 bt_l2cap_chan_set_state(chan, BT_L2CAP_DISCONNECTING);
3199
3200 return 0;
3201 }
3202
user_data_not_empty(const struct net_buf * buf)3203 __maybe_unused static bool user_data_not_empty(const struct net_buf *buf)
3204 {
3205 size_t ud_len = sizeof(struct closure);
3206 const uint8_t *ud = net_buf_user_data(buf);
3207
3208 for (size_t i = 0; i < ud_len; i++) {
3209 if (ud[i] != 0) {
3210 return true;
3211 }
3212 }
3213
3214 return false;
3215 }
3216
bt_l2cap_dyn_chan_send(struct bt_l2cap_le_chan * le_chan,struct net_buf * buf)3217 static int bt_l2cap_dyn_chan_send(struct bt_l2cap_le_chan *le_chan, struct net_buf *buf)
3218 {
3219 uint16_t sdu_len = buf->len;
3220
3221 LOG_DBG("chan %p buf %p", le_chan, buf);
3222
3223 /* Frags are not supported. */
3224 __ASSERT_NO_MSG(buf->frags == NULL);
3225
3226 if (sdu_len > le_chan->tx.mtu) {
3227 LOG_ERR("attempt to send %u bytes on %u MTU chan",
3228 sdu_len, le_chan->tx.mtu);
3229 return -EMSGSIZE;
3230 }
3231
3232 if (buf->ref != 1) {
3233 /* The host may alter the buf contents when segmenting. Higher
3234 * layers cannot expect the buf contents to stay intact. Extra
3235 * refs suggests a silent data corruption would occur if not for
3236 * this error.
3237 */
3238 LOG_ERR("buf given to l2cap has other refs");
3239 return -EINVAL;
3240 }
3241
3242 if (net_buf_headroom(buf) < BT_L2CAP_SDU_CHAN_SEND_RESERVE) {
3243 /* Call `net_buf_reserve(buf, BT_L2CAP_SDU_CHAN_SEND_RESERVE)`
3244 * when allocating buffers intended for bt_l2cap_chan_send().
3245 */
3246 LOG_ERR("Not enough headroom in buf %p", buf);
3247 return -EINVAL;
3248 }
3249
3250 if (user_data_not_empty(buf)) {
3251 /* There may be issues if user_data is not empty. */
3252 LOG_WRN("user_data is not empty");
3253 }
3254
3255 /* Prepend SDU length.
3256 *
3257 * L2CAP LE CoC SDUs are segmented and put into K-frames PDUs which have
3258 * their own L2CAP header (i.e. PDU length, channel id).
3259 *
3260 * The SDU length is right before the data that will be segmented and is
3261 * only present in the first PDU. Here's an example:
3262 *
3263 * Sent data payload of 50 bytes over channel 0x4040 with MPS of 30 bytes:
3264 * First PDU (K-frame):
3265 * | L2CAP K-frame header | K-frame payload |
3266 * | PDU length | Channel ID | SDU length | SDU payload |
3267 * | 0x001e | 0x4040 | 0x0032 | 28 bytes of data |
3268 *
3269 * Second and last PDU (K-frame):
3270 * | L2CAP K-frame header | K-frame payload |
3271 * | PDU length | Channel ID | rest of SDU payload |
3272 * | 0x0016 | 0x4040 | 22 bytes of data |
3273 */
3274 net_buf_push_le16(buf, sdu_len);
3275
3276 /* Put buffer on TX queue */
3277 k_fifo_put(&le_chan->tx_queue, buf);
3278
3279 /* Always process the queue in the same context */
3280 raise_data_ready(le_chan);
3281
3282 return 0;
3283 }
3284
bt_l2cap_chan_send(struct bt_l2cap_chan * chan,struct net_buf * buf)3285 int bt_l2cap_chan_send(struct bt_l2cap_chan *chan, struct net_buf *buf)
3286 {
3287 if (!buf || !chan) {
3288 return -EINVAL;
3289 }
3290
3291 LOG_DBG("chan %p buf %p len %u", chan, buf, buf->len);
3292
3293 if (buf->ref != 1) {
3294 LOG_WRN("Expecting 1 ref, got %d", buf->ref);
3295 return -EINVAL;
3296 }
3297
3298 if (!chan->conn || chan->conn->state != BT_CONN_CONNECTED) {
3299 return -ENOTCONN;
3300 }
3301
3302 if (atomic_test_bit(chan->status, BT_L2CAP_STATUS_SHUTDOWN)) {
3303 return -ESHUTDOWN;
3304 }
3305
3306 if (IS_ENABLED(CONFIG_BT_CLASSIC) &&
3307 chan->conn->type == BT_CONN_TYPE_BR) {
3308 return bt_l2cap_br_chan_send_cb(chan, buf, NULL, NULL);
3309 }
3310
3311 /* Sending over static channels is not supported by this fn. Use
3312 * `bt_l2cap_send_pdu()` instead.
3313 */
3314 if (IS_ENABLED(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)) {
3315 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
3316
3317 __ASSERT_NO_MSG(le_chan);
3318 __ASSERT_NO_MSG(L2CAP_LE_CID_IS_DYN(le_chan->tx.cid));
3319
3320 return bt_l2cap_dyn_chan_send(le_chan, buf);
3321 }
3322
3323 LOG_DBG("Invalid channel type (chan %p)", chan);
3324
3325 return -EINVAL;
3326 }
3327 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
3328