1 /* l2cap.c - L2CAP handling */
2
3 /*
4 * Copyright (c) 2015-2016 Intel Corporation
5 * Copyright (c) 2023 Nordic Semiconductor
6 *
7 * SPDX-License-Identifier: Apache-2.0
8 */
9
10 #include <zephyr/kernel.h>
11 #include <string.h>
12 #include <errno.h>
13 #include <zephyr/sys/__assert.h>
14 #include <zephyr/sys/atomic.h>
15 #include <zephyr/sys/check.h>
16 #include <zephyr/sys/iterable_sections.h>
17 #include <zephyr/sys/byteorder.h>
18 #include <zephyr/sys/math_extras.h>
19 #include <zephyr/sys/util.h>
20 #include <zephyr/net_buf.h>
21
22 #include <zephyr/bluetooth/hci.h>
23 #include <zephyr/bluetooth/bluetooth.h>
24 #include <zephyr/bluetooth/conn.h>
25 #include <zephyr/bluetooth/l2cap.h>
26
27 #define LOG_DBG_ENABLED IS_ENABLED(CONFIG_BT_L2CAP_LOG_LEVEL_DBG)
28
29 #include "buf_view.h"
30 #include "hci_core.h"
31 #include "conn_internal.h"
32 #include "l2cap_internal.h"
33 #include "keys.h"
34
35 #include <zephyr/logging/log.h>
36 LOG_MODULE_REGISTER(bt_l2cap, CONFIG_BT_L2CAP_LOG_LEVEL);
37
38 #define LE_CHAN_RTX(_w) CONTAINER_OF(k_work_delayable_from_work(_w), \
39 struct bt_l2cap_le_chan, rtx_work)
40 #define CHAN_RX(_w) CONTAINER_OF(_w, struct bt_l2cap_le_chan, rx_work)
41
42 #define L2CAP_LE_MIN_MTU 23
43
44 #define L2CAP_LE_MAX_CREDITS (BT_BUF_ACL_RX_COUNT - 1)
45
46 #define L2CAP_LE_CID_DYN_START 0x0040
47 #define L2CAP_LE_CID_DYN_END 0x007f
48 #define L2CAP_LE_CID_IS_DYN(_cid) \
49 (_cid >= L2CAP_LE_CID_DYN_START && _cid <= L2CAP_LE_CID_DYN_END)
50
51 #define L2CAP_LE_PSM_FIXED_START 0x0001
52 #define L2CAP_LE_PSM_FIXED_END 0x007f
53 #define L2CAP_LE_PSM_DYN_START 0x0080
54 #define L2CAP_LE_PSM_DYN_END 0x00ff
55 #define L2CAP_LE_PSM_IS_DYN(_psm) \
56 (_psm >= L2CAP_LE_PSM_DYN_START && _psm <= L2CAP_LE_PSM_DYN_END)
57
58 #define L2CAP_CONN_TIMEOUT K_SECONDS(40)
59 #define L2CAP_DISC_TIMEOUT K_SECONDS(2)
60 /** @brief Local L2CAP RTX (Response Timeout eXpired)
61 *
62 * Specification-allowed range for the value of RTX is 1 to 60 seconds.
63 */
64 #define L2CAP_RTX_TIMEOUT K_SECONDS(2)
65
66 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
67 /* Dedicated pool for disconnect buffers so they are guaranteed to be send
68 * even in case of data congestion due to flooding.
69 */
70 NET_BUF_POOL_FIXED_DEFINE(disc_pool, 1,
71 BT_L2CAP_BUF_SIZE(
72 sizeof(struct bt_l2cap_sig_hdr) +
73 sizeof(struct bt_l2cap_disconn_req)),
74 CONFIG_BT_CONN_TX_USER_DATA_SIZE, NULL);
75
76 #define l2cap_lookup_ident(conn, ident) __l2cap_lookup_ident(conn, ident, false)
77 #define l2cap_remove_ident(conn, ident) __l2cap_lookup_ident(conn, ident, true)
78
79 static sys_slist_t servers = SYS_SLIST_STATIC_INIT(&servers);
80
l2cap_tx_buf_destroy(struct bt_conn * conn,struct net_buf * buf,int err)81 static void l2cap_tx_buf_destroy(struct bt_conn *conn, struct net_buf *buf, int err)
82 {
83 net_buf_unref(buf);
84 }
85 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
86
87 /* L2CAP signalling channel specific context */
88 struct bt_l2cap {
89 /* The channel this context is associated with */
90 struct bt_l2cap_le_chan chan;
91 };
92
93 static const struct bt_l2cap_ecred_cb *ecred_cb;
94 static struct bt_l2cap bt_l2cap_pool[CONFIG_BT_MAX_CONN];
95
bt_l2cap_register_ecred_cb(const struct bt_l2cap_ecred_cb * cb)96 void bt_l2cap_register_ecred_cb(const struct bt_l2cap_ecred_cb *cb)
97 {
98 ecred_cb = cb;
99 }
100
get_ident(void)101 static uint8_t get_ident(void)
102 {
103 static uint8_t ident;
104
105 ident++;
106 /* handle integer overflow (0 is not valid) */
107 if (!ident) {
108 ident++;
109 }
110
111 return ident;
112 }
113
114 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_chan_alloc_cid(struct bt_conn * conn,struct bt_l2cap_chan * chan)115 static struct bt_l2cap_le_chan *l2cap_chan_alloc_cid(struct bt_conn *conn,
116 struct bt_l2cap_chan *chan)
117 {
118 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
119 uint16_t cid;
120
121 /*
122 * No action needed if there's already a CID allocated, e.g. in
123 * the case of a fixed channel.
124 */
125 if (le_chan->rx.cid > 0) {
126 return le_chan;
127 }
128
129 for (cid = L2CAP_LE_CID_DYN_START; cid <= L2CAP_LE_CID_DYN_END; cid++) {
130 if (!bt_l2cap_le_lookup_rx_cid(conn, cid)) {
131 le_chan->rx.cid = cid;
132 return le_chan;
133 }
134 }
135
136 return NULL;
137 }
138
139 static struct bt_l2cap_le_chan *
__l2cap_lookup_ident(struct bt_conn * conn,uint16_t ident,bool remove)140 __l2cap_lookup_ident(struct bt_conn *conn, uint16_t ident, bool remove)
141 {
142 struct bt_l2cap_chan *chan;
143 sys_snode_t *prev = NULL;
144
145 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
146 if (BT_L2CAP_LE_CHAN(chan)->ident == ident) {
147 if (remove) {
148 sys_slist_remove(&conn->channels, prev,
149 &chan->node);
150 }
151 return BT_L2CAP_LE_CHAN(chan);
152 }
153
154 prev = &chan->node;
155 }
156
157 return NULL;
158 }
159 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
160
bt_l2cap_chan_remove(struct bt_conn * conn,struct bt_l2cap_chan * ch)161 void bt_l2cap_chan_remove(struct bt_conn *conn, struct bt_l2cap_chan *ch)
162 {
163 struct bt_l2cap_chan *chan;
164 sys_snode_t *prev = NULL;
165
166 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
167 if (chan == ch) {
168 sys_slist_remove(&conn->channels, prev, &chan->node);
169 return;
170 }
171
172 prev = &chan->node;
173 }
174 }
175
bt_l2cap_chan_state_str(bt_l2cap_chan_state_t state)176 const char *bt_l2cap_chan_state_str(bt_l2cap_chan_state_t state)
177 {
178 switch (state) {
179 case BT_L2CAP_DISCONNECTED:
180 return "disconnected";
181 case BT_L2CAP_CONNECTING:
182 return "connecting";
183 case BT_L2CAP_CONFIG:
184 return "config";
185 case BT_L2CAP_CONNECTED:
186 return "connected";
187 case BT_L2CAP_DISCONNECTING:
188 return "disconnecting";
189 default:
190 return "unknown";
191 }
192 }
193
194 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
195 #if defined(CONFIG_BT_L2CAP_LOG_LEVEL_DBG)
bt_l2cap_chan_set_state_debug(struct bt_l2cap_chan * chan,bt_l2cap_chan_state_t state,const char * func,int line)196 void bt_l2cap_chan_set_state_debug(struct bt_l2cap_chan *chan,
197 bt_l2cap_chan_state_t state,
198 const char *func, int line)
199 {
200 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
201
202 LOG_DBG("chan %p psm 0x%04x %s -> %s", chan, le_chan->psm,
203 bt_l2cap_chan_state_str(le_chan->state), bt_l2cap_chan_state_str(state));
204
205 /* check transitions validness */
206 switch (state) {
207 case BT_L2CAP_DISCONNECTED:
208 /* regardless of old state always allows this state */
209 break;
210 case BT_L2CAP_CONNECTING:
211 if (le_chan->state != BT_L2CAP_DISCONNECTED) {
212 LOG_WRN("%s()%d: invalid transition", func, line);
213 }
214 break;
215 case BT_L2CAP_CONFIG:
216 if (le_chan->state != BT_L2CAP_CONNECTING) {
217 LOG_WRN("%s()%d: invalid transition", func, line);
218 }
219 break;
220 case BT_L2CAP_CONNECTED:
221 if (le_chan->state != BT_L2CAP_CONFIG &&
222 le_chan->state != BT_L2CAP_CONNECTING) {
223 LOG_WRN("%s()%d: invalid transition", func, line);
224 }
225 break;
226 case BT_L2CAP_DISCONNECTING:
227 if (le_chan->state != BT_L2CAP_CONFIG &&
228 le_chan->state != BT_L2CAP_CONNECTED) {
229 LOG_WRN("%s()%d: invalid transition", func, line);
230 }
231 break;
232 default:
233 LOG_ERR("%s()%d: unknown (%u) state was set", func, line, state);
234 return;
235 }
236
237 le_chan->state = state;
238 }
239 #else
bt_l2cap_chan_set_state(struct bt_l2cap_chan * chan,bt_l2cap_chan_state_t state)240 void bt_l2cap_chan_set_state(struct bt_l2cap_chan *chan,
241 bt_l2cap_chan_state_t state)
242 {
243 BT_L2CAP_LE_CHAN(chan)->state = state;
244 }
245 #endif /* CONFIG_BT_L2CAP_LOG_LEVEL_DBG */
246 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
247
248 static void cancel_data_ready(struct bt_l2cap_le_chan *lechan);
249 static bool chan_has_data(struct bt_l2cap_le_chan *lechan);
bt_l2cap_chan_del(struct bt_l2cap_chan * chan)250 void bt_l2cap_chan_del(struct bt_l2cap_chan *chan)
251 {
252 const struct bt_l2cap_chan_ops *ops = chan->ops;
253 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
254
255 LOG_DBG("conn %p chan %p", chan->conn, chan);
256
257 if (!chan->conn) {
258 goto destroy;
259 }
260
261 cancel_data_ready(le_chan);
262
263 /* Remove buffers on the PDU TX queue. We can't do that in
264 * `l2cap_chan_destroy()` as it is not called for fixed channels.
265 */
266 while (chan_has_data(le_chan)) {
267 struct net_buf *buf = k_fifo_get(&le_chan->tx_queue, K_NO_WAIT);
268
269 net_buf_unref(buf);
270 }
271
272 if (ops->disconnected) {
273 ops->disconnected(chan);
274 }
275
276 chan->conn = NULL;
277
278 destroy:
279 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
280 /* Reset internal members of common channel */
281 bt_l2cap_chan_set_state(chan, BT_L2CAP_DISCONNECTED);
282 BT_L2CAP_LE_CHAN(chan)->psm = 0U;
283 #endif
284 if (chan->destroy) {
285 chan->destroy(chan);
286 }
287
288 if (ops->released) {
289 ops->released(chan);
290 }
291 }
292
293 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_rtx_timeout(struct k_work * work)294 static void l2cap_rtx_timeout(struct k_work *work)
295 {
296 struct bt_l2cap_le_chan *chan = LE_CHAN_RTX(work);
297 struct bt_conn *conn = chan->chan.conn;
298
299 LOG_ERR("chan %p timeout", chan);
300
301 bt_l2cap_chan_remove(conn, &chan->chan);
302 bt_l2cap_chan_del(&chan->chan);
303
304 /* Remove other channels if pending on the same ident */
305 while ((chan = l2cap_remove_ident(conn, chan->ident))) {
306 bt_l2cap_chan_del(&chan->chan);
307 }
308 }
309
310 static void l2cap_chan_le_recv(struct bt_l2cap_le_chan *chan,
311 struct net_buf *buf);
312
l2cap_rx_process(struct k_work * work)313 static void l2cap_rx_process(struct k_work *work)
314 {
315 struct bt_l2cap_le_chan *ch = CHAN_RX(work);
316 struct net_buf *buf;
317
318 while ((buf = k_fifo_get(&ch->rx_queue, K_NO_WAIT))) {
319 LOG_DBG("ch %p buf %p", ch, buf);
320 l2cap_chan_le_recv(ch, buf);
321 net_buf_unref(buf);
322 }
323 }
324 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
325
bt_l2cap_chan_add(struct bt_conn * conn,struct bt_l2cap_chan * chan,bt_l2cap_chan_destroy_t destroy)326 void bt_l2cap_chan_add(struct bt_conn *conn, struct bt_l2cap_chan *chan,
327 bt_l2cap_chan_destroy_t destroy)
328 {
329 /* Attach channel to the connection */
330 sys_slist_append(&conn->channels, &chan->node);
331 chan->conn = conn;
332 chan->destroy = destroy;
333
334 LOG_DBG("conn %p chan %p", conn, chan);
335 }
336
init_le_chan_private(struct bt_l2cap_le_chan * le_chan)337 static void init_le_chan_private(struct bt_l2cap_le_chan *le_chan)
338 {
339 /* Initialize private members of the struct. We can't "just memset" as
340 * some members are used as application parameters.
341 */
342 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
343 le_chan->_sdu = NULL;
344 le_chan->_sdu_len = 0;
345 #if defined(CONFIG_BT_L2CAP_SEG_RECV)
346 le_chan->_sdu_len_done = 0;
347 #endif /* CONFIG_BT_L2CAP_SEG_RECV */
348 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
349 memset(&le_chan->_pdu_ready, 0, sizeof(le_chan->_pdu_ready));
350 le_chan->_pdu_ready_lock = 0;
351 le_chan->_pdu_remaining = 0;
352 }
353
l2cap_chan_add(struct bt_conn * conn,struct bt_l2cap_chan * chan,bt_l2cap_chan_destroy_t destroy)354 static bool l2cap_chan_add(struct bt_conn *conn, struct bt_l2cap_chan *chan,
355 bt_l2cap_chan_destroy_t destroy)
356 {
357 struct bt_l2cap_le_chan *le_chan;
358
359 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
360 le_chan = l2cap_chan_alloc_cid(conn, chan);
361 #else
362 le_chan = BT_L2CAP_LE_CHAN(chan);
363 #endif
364
365 if (!le_chan) {
366 LOG_ERR("Unable to allocate L2CAP channel ID");
367 return false;
368 }
369
370 atomic_clear(chan->status);
371 init_le_chan_private(le_chan);
372
373 bt_l2cap_chan_add(conn, chan, destroy);
374
375 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
376 /* All dynamic channels have the destroy handler which makes sure that
377 * the RTX work structure is properly released with a cancel sync.
378 * The fixed signal channel is only removed when disconnected and the
379 * disconnected handler is always called from the workqueue itself so
380 * canceling from there should always succeed.
381 */
382 k_work_init_delayable(&le_chan->rtx_work, l2cap_rtx_timeout);
383
384 if (L2CAP_LE_CID_IS_DYN(le_chan->rx.cid)) {
385 k_work_init(&le_chan->rx_work, l2cap_rx_process);
386 k_fifo_init(&le_chan->rx_queue);
387 bt_l2cap_chan_set_state(chan, BT_L2CAP_CONNECTING);
388 }
389 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
390
391 return true;
392 }
393
bt_l2cap_connected(struct bt_conn * conn)394 void bt_l2cap_connected(struct bt_conn *conn)
395 {
396 struct bt_l2cap_chan *chan;
397
398 if (IS_ENABLED(CONFIG_BT_CLASSIC) &&
399 conn->type == BT_CONN_TYPE_BR) {
400 bt_l2cap_br_connected(conn);
401 return;
402 }
403
404 STRUCT_SECTION_FOREACH(bt_l2cap_fixed_chan, fchan) {
405 struct bt_l2cap_le_chan *le_chan;
406
407 if (fchan->accept(conn, &chan) < 0) {
408 continue;
409 }
410
411 le_chan = BT_L2CAP_LE_CHAN(chan);
412
413 /* Fill up remaining fixed channel context attached in
414 * fchan->accept()
415 */
416 le_chan->rx.cid = fchan->cid;
417 le_chan->tx.cid = fchan->cid;
418
419 if (!l2cap_chan_add(conn, chan, fchan->destroy)) {
420 return;
421 }
422
423 k_fifo_init(&le_chan->tx_queue);
424
425 if (chan->ops->connected) {
426 chan->ops->connected(chan);
427 }
428
429 /* Always set output status to fixed channels */
430 atomic_set_bit(chan->status, BT_L2CAP_STATUS_OUT);
431
432 if (chan->ops->status) {
433 chan->ops->status(chan, chan->status);
434 }
435 }
436 }
437
bt_l2cap_disconnected(struct bt_conn * conn)438 void bt_l2cap_disconnected(struct bt_conn *conn)
439 {
440 struct bt_l2cap_chan *chan, *next;
441
442 if (IS_ENABLED(CONFIG_BT_CLASSIC) &&
443 conn->type == BT_CONN_TYPE_BR) {
444 bt_l2cap_br_disconnected(conn);
445 return;
446 }
447
448 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&conn->channels, chan, next, node) {
449 bt_l2cap_chan_del(chan);
450 }
451 }
452
l2cap_create_le_sig_pdu(uint8_t code,uint8_t ident,uint16_t len)453 static struct net_buf *l2cap_create_le_sig_pdu(uint8_t code, uint8_t ident,
454 uint16_t len)
455 {
456 struct bt_l2cap_sig_hdr *hdr;
457 struct net_buf_pool *pool = NULL;
458 struct net_buf *buf;
459
460 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
461 if (code == BT_L2CAP_DISCONN_REQ) {
462 pool = &disc_pool;
463 }
464 #endif
465 /* Don't wait more than the minimum RTX timeout of 2 seconds */
466 buf = bt_l2cap_create_pdu_timeout(pool, 0, L2CAP_RTX_TIMEOUT);
467 if (!buf) {
468 /* If it was not possible to allocate a buffer within the
469 * timeout return NULL.
470 */
471 LOG_ERR("Unable to allocate buffer for op 0x%02x", code);
472 return NULL;
473 }
474
475 hdr = net_buf_add(buf, sizeof(*hdr));
476 hdr->code = code;
477 hdr->ident = ident;
478 hdr->len = sys_cpu_to_le16(len);
479
480 return buf;
481 }
482
483 /* Send the buffer over the signalling channel. Release it in case of failure.
484 * Any other cleanup in failure to send should be handled by the disconnected
485 * handler.
486 */
l2cap_send_sig(struct bt_conn * conn,struct net_buf * buf)487 static int l2cap_send_sig(struct bt_conn *conn, struct net_buf *buf)
488 {
489 struct bt_l2cap_chan *ch = bt_l2cap_le_lookup_tx_cid(conn, BT_L2CAP_CID_LE_SIG);
490 struct bt_l2cap_le_chan *chan = BT_L2CAP_LE_CHAN(ch);
491
492 int err = bt_l2cap_send_pdu(chan, buf, NULL, NULL);
493
494 if (err) {
495 net_buf_unref(buf);
496 }
497
498 return err;
499 }
500
501 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_chan_send_req(struct bt_l2cap_chan * chan,struct net_buf * buf,k_timeout_t timeout)502 static void l2cap_chan_send_req(struct bt_l2cap_chan *chan,
503 struct net_buf *buf, k_timeout_t timeout)
504 {
505 if (l2cap_send_sig(chan->conn, buf)) {
506 return;
507 }
508
509 /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part A] page 126:
510 *
511 * The value of this timer is implementation-dependent but the minimum
512 * initial value is 1 second and the maximum initial value is 60
513 * seconds. One RTX timer shall exist for each outstanding signaling
514 * request, including each Echo Request. The timer disappears on the
515 * final expiration, when the response is received, or the physical
516 * link is lost.
517 */
518 k_work_reschedule(&(BT_L2CAP_LE_CHAN(chan)->rtx_work), timeout);
519 }
520
l2cap_le_conn_req(struct bt_l2cap_le_chan * ch)521 static int l2cap_le_conn_req(struct bt_l2cap_le_chan *ch)
522 {
523 struct net_buf *buf;
524 struct bt_l2cap_le_conn_req *req;
525
526 ch->ident = get_ident();
527
528 buf = l2cap_create_le_sig_pdu(BT_L2CAP_LE_CONN_REQ,
529 ch->ident, sizeof(*req));
530 if (!buf) {
531 return -ENOMEM;
532 }
533
534 req = net_buf_add(buf, sizeof(*req));
535 req->psm = sys_cpu_to_le16(ch->psm);
536 req->scid = sys_cpu_to_le16(ch->rx.cid);
537 req->mtu = sys_cpu_to_le16(ch->rx.mtu);
538 req->mps = sys_cpu_to_le16(ch->rx.mps);
539 req->credits = sys_cpu_to_le16(ch->rx.credits);
540
541 l2cap_chan_send_req(&ch->chan, buf, L2CAP_CONN_TIMEOUT);
542
543 return 0;
544 }
545
546 #if defined(CONFIG_BT_L2CAP_ECRED)
l2cap_ecred_conn_req(struct bt_l2cap_chan ** chan,int channels)547 static int l2cap_ecred_conn_req(struct bt_l2cap_chan **chan, int channels)
548 {
549 struct net_buf *buf;
550 struct bt_l2cap_ecred_conn_req *req;
551 struct bt_l2cap_le_chan *ch;
552 int i;
553 uint8_t ident;
554 uint16_t req_psm;
555 uint16_t req_mtu;
556
557 if (!chan || !channels) {
558 return -EINVAL;
559 }
560
561 ident = get_ident();
562
563 buf = l2cap_create_le_sig_pdu(BT_L2CAP_ECRED_CONN_REQ, ident,
564 sizeof(*req) +
565 (channels * sizeof(uint16_t)));
566
567 if (!buf) {
568 return -ENOMEM;
569 }
570
571 req = net_buf_add(buf, sizeof(*req));
572
573 ch = BT_L2CAP_LE_CHAN(chan[0]);
574
575 /* Init common parameters */
576 req->psm = sys_cpu_to_le16(ch->psm);
577 req->mtu = sys_cpu_to_le16(ch->rx.mtu);
578 req->mps = sys_cpu_to_le16(ch->rx.mps);
579 req->credits = sys_cpu_to_le16(ch->rx.credits);
580 req_psm = ch->psm;
581 req_mtu = ch->tx.mtu;
582
583 for (i = 0; i < channels; i++) {
584 ch = BT_L2CAP_LE_CHAN(chan[i]);
585
586 __ASSERT(ch->psm == req_psm,
587 "The PSM shall be the same for channels in the same request.");
588 __ASSERT(ch->tx.mtu == req_mtu,
589 "The MTU shall be the same for channels in the same request.");
590
591 ch->ident = ident;
592
593 net_buf_add_le16(buf, ch->rx.cid);
594 }
595
596 l2cap_chan_send_req(*chan, buf, L2CAP_CONN_TIMEOUT);
597
598 return 0;
599 }
600 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
601
l2cap_le_encrypt_change(struct bt_l2cap_chan * chan,uint8_t status)602 static void l2cap_le_encrypt_change(struct bt_l2cap_chan *chan, uint8_t status)
603 {
604 int err;
605 struct bt_l2cap_le_chan *le = BT_L2CAP_LE_CHAN(chan);
606
607 /* Skip channels that are not pending waiting for encryption */
608 if (!atomic_test_and_clear_bit(chan->status,
609 BT_L2CAP_STATUS_ENCRYPT_PENDING)) {
610 return;
611 }
612
613 if (status) {
614 goto fail;
615 }
616
617 #if defined(CONFIG_BT_L2CAP_ECRED)
618 if (le->ident) {
619 struct bt_l2cap_chan *echan[BT_L2CAP_ECRED_CHAN_MAX_PER_REQ];
620 struct bt_l2cap_chan *ch;
621 int i = 0;
622
623 SYS_SLIST_FOR_EACH_CONTAINER(&chan->conn->channels, ch, node) {
624 if (le->ident == BT_L2CAP_LE_CHAN(ch)->ident) {
625 __ASSERT(i < BT_L2CAP_ECRED_CHAN_MAX_PER_REQ,
626 "There can only be BT_L2CAP_ECRED_CHAN_MAX_PER_REQ "
627 "channels from the same request.");
628 atomic_clear_bit(ch->status, BT_L2CAP_STATUS_ENCRYPT_PENDING);
629 echan[i++] = ch;
630 }
631 }
632
633 /* Retry ecred connect */
634 l2cap_ecred_conn_req(echan, i);
635 return;
636 }
637 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
638
639 /* Retry to connect */
640 err = l2cap_le_conn_req(le);
641 if (err) {
642 goto fail;
643 }
644
645 return;
646 fail:
647 bt_l2cap_chan_remove(chan->conn, chan);
648 bt_l2cap_chan_del(chan);
649 }
650 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
651
bt_l2cap_security_changed(struct bt_conn * conn,uint8_t hci_status)652 void bt_l2cap_security_changed(struct bt_conn *conn, uint8_t hci_status)
653 {
654 struct bt_l2cap_chan *chan, *next;
655
656 if (IS_ENABLED(CONFIG_BT_CLASSIC) &&
657 conn->type == BT_CONN_TYPE_BR) {
658 l2cap_br_encrypt_change(conn, hci_status);
659 return;
660 }
661
662 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&conn->channels, chan, next, node) {
663 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
664 l2cap_le_encrypt_change(chan, hci_status);
665 #endif
666
667 if (chan->ops->encrypt_change) {
668 chan->ops->encrypt_change(chan, hci_status);
669 }
670 }
671 }
672
bt_l2cap_create_pdu_timeout(struct net_buf_pool * pool,size_t reserve,k_timeout_t timeout)673 struct net_buf *bt_l2cap_create_pdu_timeout(struct net_buf_pool *pool,
674 size_t reserve,
675 k_timeout_t timeout)
676 {
677 return bt_conn_create_pdu_timeout(pool,
678 sizeof(struct bt_l2cap_hdr) + reserve,
679 timeout);
680 }
681
raise_data_ready(struct bt_l2cap_le_chan * le_chan)682 static void raise_data_ready(struct bt_l2cap_le_chan *le_chan)
683 {
684 if (!atomic_set(&le_chan->_pdu_ready_lock, 1)) {
685 sys_slist_append(&le_chan->chan.conn->l2cap_data_ready,
686 &le_chan->_pdu_ready);
687 LOG_DBG("data ready raised %p", le_chan);
688 } else {
689 LOG_DBG("data ready already %p", le_chan);
690 }
691
692 bt_conn_data_ready(le_chan->chan.conn);
693 }
694
lower_data_ready(struct bt_l2cap_le_chan * le_chan)695 static void lower_data_ready(struct bt_l2cap_le_chan *le_chan)
696 {
697 struct bt_conn *conn = le_chan->chan.conn;
698 __maybe_unused sys_snode_t *s = sys_slist_get(&conn->l2cap_data_ready);
699
700 LOG_DBG("%p", le_chan);
701
702 __ASSERT_NO_MSG(s == &le_chan->_pdu_ready);
703
704 __maybe_unused atomic_t old = atomic_set(&le_chan->_pdu_ready_lock, 0);
705
706 __ASSERT_NO_MSG(old);
707 }
708
cancel_data_ready(struct bt_l2cap_le_chan * le_chan)709 static void cancel_data_ready(struct bt_l2cap_le_chan *le_chan)
710 {
711 struct bt_conn *conn = le_chan->chan.conn;
712
713 LOG_DBG("%p", le_chan);
714
715 sys_slist_find_and_remove(&conn->l2cap_data_ready,
716 &le_chan->_pdu_ready);
717 atomic_set(&le_chan->_pdu_ready_lock, 0);
718 }
719
bt_l2cap_send_pdu(struct bt_l2cap_le_chan * le_chan,struct net_buf * pdu,bt_conn_tx_cb_t cb,void * user_data)720 int bt_l2cap_send_pdu(struct bt_l2cap_le_chan *le_chan, struct net_buf *pdu,
721 bt_conn_tx_cb_t cb, void *user_data)
722 {
723 if (!le_chan->chan.conn || le_chan->chan.conn->state != BT_CONN_CONNECTED) {
724 return -ENOTCONN;
725 }
726
727 if (pdu->ref != 1) {
728 /* The host may alter the buf contents when fragmenting. Higher
729 * layers cannot expect the buf contents to stay intact. Extra
730 * refs suggests a silent data corruption would occur if not for
731 * this error.
732 */
733 LOG_ERR("Expecting 1 ref, got %d", pdu->ref);
734 return -EINVAL;
735 }
736
737 if (pdu->user_data_size < sizeof(struct closure)) {
738 LOG_DBG("not enough room in user_data %d < %d pool %u",
739 pdu->user_data_size,
740 CONFIG_BT_CONN_TX_USER_DATA_SIZE,
741 pdu->pool_id);
742 return -EINVAL;
743 }
744
745 make_closure(pdu->user_data, cb, user_data);
746 LOG_DBG("push: pdu %p len %d cb %p userdata %p", pdu, pdu->len, cb, user_data);
747
748 k_fifo_put(&le_chan->tx_queue, pdu);
749
750 raise_data_ready(le_chan); /* tis just a flag */
751
752 return 0; /* look ma, no failures */
753 }
754
755 /* L2CAP channel wants to send a PDU */
chan_has_data(struct bt_l2cap_le_chan * lechan)756 static bool chan_has_data(struct bt_l2cap_le_chan *lechan)
757 {
758 return !k_fifo_is_empty(&lechan->tx_queue);
759 }
760
761 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
test_and_dec(atomic_t * target)762 static bool test_and_dec(atomic_t *target)
763 {
764 atomic_t old_value, new_value;
765
766 do {
767 old_value = atomic_get(target);
768 if (!old_value) {
769 return false;
770 }
771
772 new_value = old_value - 1;
773 } while (atomic_cas(target, old_value, new_value) == 0);
774
775 return true;
776 }
777 #endif
778
779 /* Just like in group projects :p */
chan_take_credit(struct bt_l2cap_le_chan * lechan)780 static void chan_take_credit(struct bt_l2cap_le_chan *lechan)
781 {
782 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
783 if (!L2CAP_LE_CID_IS_DYN(lechan->tx.cid)) {
784 return;
785 }
786
787 if (!test_and_dec(&lechan->tx.credits)) {
788 /* Always ensure you have credits before calling this fn */
789 __ASSERT_NO_MSG(0);
790 }
791
792 /* Notify channel user that it can't send anymore on this channel. */
793 if (!atomic_get(&lechan->tx.credits)) {
794 LOG_DBG("chan %p paused", lechan);
795 atomic_clear_bit(lechan->chan.status, BT_L2CAP_STATUS_OUT);
796
797 if (lechan->chan.ops->status) {
798 lechan->chan.ops->status(&lechan->chan, lechan->chan.status);
799 }
800 }
801 #endif
802 }
803
get_ready_chan(struct bt_conn * conn)804 static struct bt_l2cap_le_chan *get_ready_chan(struct bt_conn *conn)
805 {
806 struct bt_l2cap_le_chan *lechan;
807
808 sys_snode_t *pdu_ready = sys_slist_peek_head(&conn->l2cap_data_ready);
809
810 if (!pdu_ready) {
811 LOG_DBG("nothing to send on this conn");
812 return NULL;
813 }
814
815 SYS_SLIST_FOR_EACH_CONTAINER(&conn->l2cap_data_ready, lechan, _pdu_ready) {
816 if (chan_has_data(lechan)) {
817 LOG_DBG("sending from chan %p (%s) data %d", lechan,
818 L2CAP_LE_CID_IS_DYN(lechan->tx.cid) ? "dynamic" : "static",
819 chan_has_data(lechan));
820 return lechan;
821 }
822
823 LOG_DBG("chan %p has no data", lechan);
824 lower_data_ready(lechan);
825 }
826
827 return NULL;
828 }
829
l2cap_chan_sdu_sent(struct bt_conn * conn,void * user_data,int err)830 static void l2cap_chan_sdu_sent(struct bt_conn *conn, void *user_data, int err)
831 {
832 struct bt_l2cap_chan *chan;
833 uint16_t cid = POINTER_TO_UINT(user_data);
834
835 LOG_DBG("conn %p CID 0x%04x err %d", conn, cid, err);
836
837 if (err) {
838 LOG_DBG("error %d when sending SDU", err);
839
840 return;
841 }
842
843 chan = bt_l2cap_le_lookup_tx_cid(conn, cid);
844 if (!chan) {
845 LOG_DBG("got SDU sent cb for disconnected chan (CID %u)", cid);
846
847 return;
848 }
849
850 if (chan->ops->sent) {
851 chan->ops->sent(chan);
852 }
853 }
854
get_pdu_len(struct bt_l2cap_le_chan * lechan,struct net_buf * buf)855 static uint16_t get_pdu_len(struct bt_l2cap_le_chan *lechan,
856 struct net_buf *buf)
857 {
858 if (!L2CAP_LE_CID_IS_DYN(lechan->tx.cid)) {
859 /* No segmentation shenanigans on static channels */
860 return buf->len;
861 }
862
863 return MIN(buf->len, lechan->tx.mps);
864 }
865
chan_has_credits(struct bt_l2cap_le_chan * lechan)866 static bool chan_has_credits(struct bt_l2cap_le_chan *lechan)
867 {
868 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
869 if (!L2CAP_LE_CID_IS_DYN(lechan->tx.cid)) {
870 return true;
871 }
872
873 LOG_DBG("chan %p credits %ld", lechan, atomic_get(&lechan->tx.credits));
874
875 return atomic_get(&lechan->tx.credits) >= 1;
876 #else
877 return true;
878 #endif
879 }
880
bt_test_l2cap_data_pull_spy(struct bt_conn * conn,struct bt_l2cap_le_chan * lechan,size_t amount,size_t * length)881 __weak void bt_test_l2cap_data_pull_spy(struct bt_conn *conn,
882 struct bt_l2cap_le_chan *lechan,
883 size_t amount,
884 size_t *length)
885 {
886 }
887
l2cap_data_pull(struct bt_conn * conn,size_t amount,size_t * length)888 struct net_buf *l2cap_data_pull(struct bt_conn *conn,
889 size_t amount,
890 size_t *length)
891 {
892 struct bt_l2cap_le_chan *lechan = get_ready_chan(conn);
893
894 if (IS_ENABLED(CONFIG_BT_TESTING)) {
895 /* Allow tests to snoop in */
896 bt_test_l2cap_data_pull_spy(conn, lechan, amount, length);
897 }
898
899 if (!lechan) {
900 LOG_DBG("no channel conn %p", conn);
901 bt_tx_irq_raise();
902 return NULL;
903 }
904
905 /* Leave the PDU buffer in the queue until we have sent all its
906 * fragments.
907 *
908 * For SDUs we do the same, we keep it in the queue until all the
909 * segments have been sent, adding the PDU headers just-in-time.
910 */
911 struct net_buf *pdu = k_fifo_peek_head(&lechan->tx_queue);
912
913 /* We don't have anything to send for the current channel. We could
914 * however have something to send on another channel that is attached to
915 * the same ACL connection. Re-trigger the TX processor: it will call us
916 * again and this time we will select another channel to pull data from.
917 */
918 if (!pdu) {
919 bt_tx_irq_raise();
920 return NULL;
921 }
922
923 if (bt_buf_has_view(pdu)) {
924 LOG_ERR("already have view on %p", pdu);
925 return NULL;
926 }
927
928 if (lechan->_pdu_remaining == 0 && !chan_has_credits(lechan)) {
929 /* We don't have credits to send a new K-frame PDU. Remove the
930 * channel from the ready-list, it will be added back later when
931 * we get more credits.
932 */
933 LOG_DBG("no credits for new K-frame on %p", lechan);
934 lower_data_ready(lechan);
935 return NULL;
936 }
937
938 /* Add PDU header */
939 if (lechan->_pdu_remaining == 0) {
940 struct bt_l2cap_hdr *hdr;
941 uint16_t pdu_len = get_pdu_len(lechan, pdu);
942
943 LOG_DBG("Adding L2CAP PDU header: buf %p chan %p len %u / %u",
944 pdu, lechan, pdu_len, pdu->len);
945
946 LOG_HEXDUMP_DBG(pdu->data, pdu->len, "PDU payload");
947
948 hdr = net_buf_push(pdu, sizeof(*hdr));
949 hdr->len = sys_cpu_to_le16(pdu_len);
950 hdr->cid = sys_cpu_to_le16(lechan->tx.cid);
951
952 lechan->_pdu_remaining = pdu_len + sizeof(*hdr);
953 chan_take_credit(lechan);
954 }
955
956 /* Whether the data to be pulled is the last ACL fragment */
957 bool last_frag = amount >= lechan->_pdu_remaining;
958
959 /* Whether the data to be pulled is part of the last L2CAP segment. For
960 * static channels, this variable will always be true, even though
961 * static channels don't have the concept of L2CAP segments.
962 */
963 bool last_seg = lechan->_pdu_remaining == pdu->len;
964
965 if (last_frag && last_seg) {
966 LOG_DBG("last frag of last seg, dequeuing %p", pdu);
967 __maybe_unused struct net_buf *b = k_fifo_get(&lechan->tx_queue, K_NO_WAIT);
968
969 __ASSERT_NO_MSG(b == pdu);
970 }
971
972 if (last_frag && L2CAP_LE_CID_IS_DYN(lechan->tx.cid)) {
973 bool sdu_end = last_frag && last_seg;
974
975 LOG_DBG("adding %s callback", sdu_end ? "`sdu_sent`" : "NULL");
976 /* No user callbacks for SDUs */
977 make_closure(pdu->user_data,
978 sdu_end ? l2cap_chan_sdu_sent : NULL,
979 sdu_end ? UINT_TO_POINTER(lechan->tx.cid) : NULL);
980 }
981
982 if (last_frag) {
983 LOG_DBG("done sending PDU");
984
985 /* Lowering the "request to send" and raising it again allows
986 * fair scheduling of channels on an ACL link: the channel is
987 * marked as "ready to send" by adding a reference to it on a
988 * FIFO on `conn`. Adding it again will send it to the back of
989 * the queue.
990 *
991 * TODO: add a user-controlled QoS function.
992 */
993 LOG_DBG("chan %p done", lechan);
994 lower_data_ready(lechan);
995
996 /* Append channel to list if it still has data */
997 if (chan_has_data(lechan)) {
998 LOG_DBG("chan %p ready", lechan);
999 raise_data_ready(lechan);
1000 }
1001 }
1002
1003 /* This is used by `conn.c` to figure out if the PDU is done sending. */
1004 *length = lechan->_pdu_remaining;
1005
1006 if (lechan->_pdu_remaining > amount) {
1007 lechan->_pdu_remaining -= amount;
1008 } else {
1009 lechan->_pdu_remaining = 0;
1010 }
1011
1012 return pdu;
1013 }
1014
l2cap_send_reject(struct bt_conn * conn,uint8_t ident,uint16_t reason,void * data,uint8_t data_len)1015 static void l2cap_send_reject(struct bt_conn *conn, uint8_t ident,
1016 uint16_t reason, void *data, uint8_t data_len)
1017 {
1018 struct bt_l2cap_cmd_reject *rej;
1019 struct net_buf *buf;
1020
1021 buf = l2cap_create_le_sig_pdu(BT_L2CAP_CMD_REJECT, ident,
1022 sizeof(*rej) + data_len);
1023 if (!buf) {
1024 return;
1025 }
1026
1027 rej = net_buf_add(buf, sizeof(*rej));
1028 rej->reason = sys_cpu_to_le16(reason);
1029
1030 if (data) {
1031 net_buf_add_mem(buf, data, data_len);
1032 }
1033
1034 l2cap_send_sig(conn, buf);
1035 }
1036
le_conn_param_rsp(struct bt_l2cap * l2cap,struct net_buf * buf)1037 static void le_conn_param_rsp(struct bt_l2cap *l2cap, struct net_buf *buf)
1038 {
1039 struct bt_l2cap_conn_param_rsp *rsp = (void *)buf->data;
1040
1041 if (buf->len < sizeof(*rsp)) {
1042 LOG_ERR("Too small LE conn param rsp");
1043 return;
1044 }
1045
1046 LOG_DBG("LE conn param rsp result %u", sys_le16_to_cpu(rsp->result));
1047 }
1048
le_conn_param_update_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1049 static void le_conn_param_update_req(struct bt_l2cap *l2cap, uint8_t ident,
1050 struct net_buf *buf)
1051 {
1052 struct bt_conn *conn = l2cap->chan.chan.conn;
1053 struct bt_le_conn_param param;
1054 struct bt_l2cap_conn_param_rsp *rsp;
1055 struct bt_l2cap_conn_param_req *req = (void *)buf->data;
1056 bool accepted;
1057
1058 if (buf->len < sizeof(*req)) {
1059 LOG_ERR("Too small LE conn update param req");
1060 return;
1061 }
1062
1063 if (conn->state != BT_CONN_CONNECTED) {
1064 LOG_WRN("Not connected");
1065 return;
1066 }
1067
1068 if (conn->role != BT_HCI_ROLE_CENTRAL) {
1069 l2cap_send_reject(conn, ident, BT_L2CAP_REJ_NOT_UNDERSTOOD,
1070 NULL, 0);
1071 return;
1072 }
1073
1074 param.interval_min = sys_le16_to_cpu(req->min_interval);
1075 param.interval_max = sys_le16_to_cpu(req->max_interval);
1076 param.latency = sys_le16_to_cpu(req->latency);
1077 param.timeout = sys_le16_to_cpu(req->timeout);
1078
1079 LOG_DBG("min 0x%04x max 0x%04x latency: 0x%04x timeout: 0x%04x", param.interval_min,
1080 param.interval_max, param.latency, param.timeout);
1081
1082 buf = l2cap_create_le_sig_pdu(BT_L2CAP_CONN_PARAM_RSP, ident,
1083 sizeof(*rsp));
1084 if (!buf) {
1085 return;
1086 }
1087
1088 accepted = le_param_req(conn, ¶m);
1089
1090 rsp = net_buf_add(buf, sizeof(*rsp));
1091 if (accepted) {
1092 rsp->result = sys_cpu_to_le16(BT_L2CAP_CONN_PARAM_ACCEPTED);
1093 } else {
1094 rsp->result = sys_cpu_to_le16(BT_L2CAP_CONN_PARAM_REJECTED);
1095 }
1096
1097 l2cap_send_sig(conn, buf);
1098
1099 if (accepted) {
1100 bt_conn_le_conn_update(conn, ¶m);
1101 }
1102 }
1103
bt_l2cap_le_lookup_tx_cid(struct bt_conn * conn,uint16_t cid)1104 struct bt_l2cap_chan *bt_l2cap_le_lookup_tx_cid(struct bt_conn *conn,
1105 uint16_t cid)
1106 {
1107 struct bt_l2cap_chan *chan;
1108
1109 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
1110 if (BT_L2CAP_LE_CHAN(chan)->tx.cid == cid) {
1111 return chan;
1112 }
1113 }
1114
1115 return NULL;
1116 }
1117
bt_l2cap_le_lookup_rx_cid(struct bt_conn * conn,uint16_t cid)1118 struct bt_l2cap_chan *bt_l2cap_le_lookup_rx_cid(struct bt_conn *conn,
1119 uint16_t cid)
1120 {
1121 struct bt_l2cap_chan *chan;
1122
1123 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
1124 if (BT_L2CAP_LE_CHAN(chan)->rx.cid == cid) {
1125 return chan;
1126 }
1127 }
1128
1129 return NULL;
1130 }
1131
1132 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
bt_l2cap_server_lookup_psm(uint16_t psm)1133 struct bt_l2cap_server *bt_l2cap_server_lookup_psm(uint16_t psm)
1134 {
1135 struct bt_l2cap_server *server;
1136
1137 SYS_SLIST_FOR_EACH_CONTAINER(&servers, server, node) {
1138 if (server->psm == psm) {
1139 return server;
1140 }
1141 }
1142
1143 return NULL;
1144 }
1145
bt_l2cap_server_register(struct bt_l2cap_server * server)1146 int bt_l2cap_server_register(struct bt_l2cap_server *server)
1147 {
1148 if (!server->accept) {
1149 return -EINVAL;
1150 }
1151
1152 if (server->psm) {
1153 if (server->psm < L2CAP_LE_PSM_FIXED_START ||
1154 server->psm > L2CAP_LE_PSM_DYN_END) {
1155 return -EINVAL;
1156 }
1157
1158 /* Check if given PSM is already in use */
1159 if (bt_l2cap_server_lookup_psm(server->psm)) {
1160 LOG_DBG("PSM already registered");
1161 return -EADDRINUSE;
1162 }
1163 } else {
1164 uint16_t psm;
1165
1166 for (psm = L2CAP_LE_PSM_DYN_START;
1167 psm <= L2CAP_LE_PSM_DYN_END; psm++) {
1168 if (!bt_l2cap_server_lookup_psm(psm)) {
1169 break;
1170 }
1171 }
1172
1173 if (psm > L2CAP_LE_PSM_DYN_END) {
1174 LOG_WRN("No free dynamic PSMs available");
1175 return -EADDRNOTAVAIL;
1176 }
1177
1178 LOG_DBG("Allocated PSM 0x%04x for new server", psm);
1179 server->psm = psm;
1180 }
1181
1182 if (server->sec_level > BT_SECURITY_L4) {
1183 return -EINVAL;
1184 } else if (server->sec_level < BT_SECURITY_L1) {
1185 /* Level 0 is only applicable for BR/EDR */
1186 server->sec_level = BT_SECURITY_L1;
1187 }
1188
1189 LOG_DBG("PSM 0x%04x", server->psm);
1190
1191 sys_slist_append(&servers, &server->node);
1192
1193 return 0;
1194 }
1195
1196 #if defined(CONFIG_BT_L2CAP_SEG_RECV)
l2cap_chan_seg_recv_rx_init(struct bt_l2cap_le_chan * chan)1197 static void l2cap_chan_seg_recv_rx_init(struct bt_l2cap_le_chan *chan)
1198 {
1199 if (chan->rx.mps > BT_L2CAP_RX_MTU) {
1200 LOG_ERR("Limiting RX MPS by stack buffer size.");
1201 chan->rx.mps = BT_L2CAP_RX_MTU;
1202 }
1203
1204 chan->_sdu_len = 0;
1205 chan->_sdu_len_done = 0;
1206 }
1207 #endif /* CONFIG_BT_L2CAP_SEG_RECV */
1208
l2cap_chan_rx_init(struct bt_l2cap_le_chan * chan)1209 static void l2cap_chan_rx_init(struct bt_l2cap_le_chan *chan)
1210 {
1211 LOG_DBG("chan %p", chan);
1212
1213 /* Redirect to experimental API. */
1214 IF_ENABLED(CONFIG_BT_L2CAP_SEG_RECV, ({
1215 if (chan->chan.ops->seg_recv) {
1216 l2cap_chan_seg_recv_rx_init(chan);
1217 return;
1218 }
1219 }))
1220
1221 /* Use existing MTU if defined */
1222 if (!chan->rx.mtu) {
1223 /* If application has not provide the incoming L2CAP SDU MTU use
1224 * an MTU that does not require segmentation.
1225 */
1226 chan->rx.mtu = BT_L2CAP_SDU_RX_MTU;
1227 }
1228
1229 /* MPS shall not be bigger than MTU + BT_L2CAP_SDU_HDR_SIZE as the
1230 * remaining bytes cannot be used.
1231 */
1232 chan->rx.mps = MIN(chan->rx.mtu + BT_L2CAP_SDU_HDR_SIZE,
1233 BT_L2CAP_RX_MTU);
1234
1235 /* Truncate MTU if channel have disabled segmentation but still have
1236 * set an MTU which requires it.
1237 */
1238 if (!chan->chan.ops->alloc_buf &&
1239 (chan->rx.mps < chan->rx.mtu + BT_L2CAP_SDU_HDR_SIZE)) {
1240 LOG_WRN("Segmentation disabled but MTU > MPS, truncating MTU");
1241 chan->rx.mtu = chan->rx.mps - BT_L2CAP_SDU_HDR_SIZE;
1242 }
1243
1244 atomic_set(&chan->rx.credits, 1);
1245 }
1246
1247 /** @brief Get @c chan->state.
1248 *
1249 * This field does not exist when @kconfig{CONFIG_BT_L2CAP_DYNAMIC_CHANNEL} is
1250 * disabled. In that case, this function returns @ref BT_L2CAP_CONNECTED since
1251 * the struct can only represent static channels in that case and static
1252 * channels are always connected.
1253 */
bt_l2cap_chan_get_state(struct bt_l2cap_chan * chan)1254 static bt_l2cap_chan_state_t bt_l2cap_chan_get_state(struct bt_l2cap_chan *chan)
1255 {
1256 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
1257 return BT_L2CAP_LE_CHAN(chan)->state;
1258 #else
1259 return BT_L2CAP_CONNECTED;
1260 #endif
1261 }
1262
l2cap_chan_tx_init(struct bt_l2cap_le_chan * chan)1263 static void l2cap_chan_tx_init(struct bt_l2cap_le_chan *chan)
1264 {
1265 LOG_DBG("chan %p", chan);
1266
1267 (void)memset(&chan->tx, 0, sizeof(chan->tx));
1268 atomic_set(&chan->tx.credits, 0);
1269 k_fifo_init(&chan->tx_queue);
1270 }
1271
l2cap_chan_tx_give_credits(struct bt_l2cap_le_chan * chan,uint16_t credits)1272 static void l2cap_chan_tx_give_credits(struct bt_l2cap_le_chan *chan,
1273 uint16_t credits)
1274 {
1275 LOG_DBG("chan %p credits %u", chan, credits);
1276
1277 atomic_add(&chan->tx.credits, credits);
1278
1279 if (!atomic_test_and_set_bit(chan->chan.status, BT_L2CAP_STATUS_OUT)) {
1280 LOG_DBG("chan %p unpaused", chan);
1281 if (chan->chan.ops->status) {
1282 chan->chan.ops->status(&chan->chan, chan->chan.status);
1283 }
1284 if (chan_has_data(chan)) {
1285 raise_data_ready(chan);
1286 }
1287 }
1288 }
1289
l2cap_chan_destroy(struct bt_l2cap_chan * chan)1290 static void l2cap_chan_destroy(struct bt_l2cap_chan *chan)
1291 {
1292 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
1293 struct net_buf *buf;
1294
1295 LOG_DBG("chan %p cid 0x%04x", le_chan, le_chan->rx.cid);
1296
1297 /* Cancel ongoing work. Since the channel can be re-used after this
1298 * we need to sync to make sure that the kernel does not have it
1299 * in its queue anymore.
1300 *
1301 * In the case where we are in the context of executing the rtx_work
1302 * item, we don't sync as it will deadlock the workqueue.
1303 */
1304 struct k_work_q *rtx_work_queue = le_chan->rtx_work.queue;
1305
1306 if (rtx_work_queue == NULL || k_current_get() != &rtx_work_queue->thread) {
1307 k_work_cancel_delayable_sync(&le_chan->rtx_work, &le_chan->rtx_sync);
1308 } else {
1309 k_work_cancel_delayable(&le_chan->rtx_work);
1310 }
1311
1312 /* Remove buffers on the SDU RX queue */
1313 while ((buf = k_fifo_get(&le_chan->rx_queue, K_NO_WAIT))) {
1314 net_buf_unref(buf);
1315 }
1316
1317 /* Destroy segmented SDU if it exists */
1318 if (le_chan->_sdu) {
1319 net_buf_unref(le_chan->_sdu);
1320 le_chan->_sdu = NULL;
1321 le_chan->_sdu_len = 0U;
1322 }
1323 }
1324
le_err_to_result(int err)1325 static uint16_t le_err_to_result(int err)
1326 {
1327 switch (err) {
1328 case -ENOMEM:
1329 return BT_L2CAP_LE_ERR_NO_RESOURCES;
1330 case -EACCES:
1331 return BT_L2CAP_LE_ERR_AUTHORIZATION;
1332 case -EPERM:
1333 return BT_L2CAP_LE_ERR_KEY_SIZE;
1334 case -ENOTSUP:
1335 /* This handle the cases where a fixed channel is registered but
1336 * for some reason (e.g. controller not supporting a feature)
1337 * cannot be used.
1338 */
1339 return BT_L2CAP_LE_ERR_PSM_NOT_SUPP;
1340 default:
1341 return BT_L2CAP_LE_ERR_UNACCEPT_PARAMS;
1342 }
1343 }
1344
l2cap_chan_accept(struct bt_conn * conn,struct bt_l2cap_server * server,uint16_t scid,uint16_t mtu,uint16_t mps,uint16_t credits,struct bt_l2cap_chan ** chan)1345 static uint16_t l2cap_chan_accept(struct bt_conn *conn,
1346 struct bt_l2cap_server *server, uint16_t scid,
1347 uint16_t mtu, uint16_t mps, uint16_t credits,
1348 struct bt_l2cap_chan **chan)
1349 {
1350 struct bt_l2cap_le_chan *le_chan;
1351 int err;
1352
1353 LOG_DBG("conn %p scid 0x%04x chan %p", conn, scid, chan);
1354
1355 if (!L2CAP_LE_CID_IS_DYN(scid)) {
1356 return BT_L2CAP_LE_ERR_INVALID_SCID;
1357 }
1358
1359 *chan = bt_l2cap_le_lookup_tx_cid(conn, scid);
1360 if (*chan) {
1361 return BT_L2CAP_LE_ERR_SCID_IN_USE;
1362 }
1363
1364 /* Request server to accept the new connection and allocate the
1365 * channel.
1366 */
1367 err = server->accept(conn, server, chan);
1368 if (err < 0) {
1369 return le_err_to_result(err);
1370 }
1371
1372 #if defined(CONFIG_BT_L2CAP_SEG_RECV)
1373 if (!(*chan)->ops->recv == !(*chan)->ops->seg_recv) {
1374 LOG_ERR("Exactly one of 'recv' or 'seg_recv' must be set");
1375 return BT_L2CAP_LE_ERR_UNACCEPT_PARAMS;
1376 }
1377 #else
1378 if (!(*chan)->ops->recv) {
1379 LOG_ERR("Mandatory callback 'recv' missing");
1380 return BT_L2CAP_LE_ERR_UNACCEPT_PARAMS;
1381 }
1382 #endif
1383
1384 le_chan = BT_L2CAP_LE_CHAN(*chan);
1385
1386 le_chan->required_sec_level = server->sec_level;
1387
1388 if (!l2cap_chan_add(conn, *chan, l2cap_chan_destroy)) {
1389 return BT_L2CAP_LE_ERR_NO_RESOURCES;
1390 }
1391
1392 /* Init TX parameters */
1393 l2cap_chan_tx_init(le_chan);
1394 le_chan->tx.cid = scid;
1395 le_chan->tx.mps = mps;
1396 le_chan->tx.mtu = mtu;
1397 l2cap_chan_tx_give_credits(le_chan, credits);
1398
1399 /* Init RX parameters */
1400 l2cap_chan_rx_init(le_chan);
1401
1402 /* Set channel PSM */
1403 le_chan->psm = server->psm;
1404
1405 /* Update state */
1406 bt_l2cap_chan_set_state(*chan, BT_L2CAP_CONNECTED);
1407
1408 return BT_L2CAP_LE_SUCCESS;
1409 }
1410
l2cap_check_security(struct bt_conn * conn,struct bt_l2cap_server * server)1411 static uint16_t l2cap_check_security(struct bt_conn *conn,
1412 struct bt_l2cap_server *server)
1413 {
1414 if (IS_ENABLED(CONFIG_BT_CONN_DISABLE_SECURITY)) {
1415 return BT_L2CAP_LE_SUCCESS;
1416 }
1417
1418 if (conn->sec_level >= server->sec_level) {
1419 return BT_L2CAP_LE_SUCCESS;
1420 }
1421
1422 if (conn->sec_level > BT_SECURITY_L1) {
1423 return BT_L2CAP_LE_ERR_AUTHENTICATION;
1424 }
1425
1426 /* If an LTK or an STK is available and encryption is required
1427 * (LE security mode 1) but encryption is not enabled, the
1428 * service request shall be rejected with the error code
1429 * "Insufficient Encryption".
1430 */
1431 if (bt_conn_ltk_present(conn)) {
1432 return BT_L2CAP_LE_ERR_ENCRYPTION;
1433 }
1434
1435 return BT_L2CAP_LE_ERR_AUTHENTICATION;
1436 }
1437
le_conn_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1438 static void le_conn_req(struct bt_l2cap *l2cap, uint8_t ident,
1439 struct net_buf *buf)
1440 {
1441 struct bt_conn *conn = l2cap->chan.chan.conn;
1442 struct bt_l2cap_chan *chan;
1443 struct bt_l2cap_le_chan *le_chan;
1444 struct bt_l2cap_server *server;
1445 struct bt_l2cap_le_conn_req *req = (void *)buf->data;
1446 struct bt_l2cap_le_conn_rsp *rsp;
1447 uint16_t psm, scid, mtu, mps, credits;
1448 uint16_t result;
1449
1450 if (buf->len < sizeof(*req)) {
1451 LOG_ERR("Too small LE conn req packet size");
1452 return;
1453 }
1454
1455 psm = sys_le16_to_cpu(req->psm);
1456 scid = sys_le16_to_cpu(req->scid);
1457 mtu = sys_le16_to_cpu(req->mtu);
1458 mps = sys_le16_to_cpu(req->mps);
1459 credits = sys_le16_to_cpu(req->credits);
1460
1461 LOG_DBG("psm 0x%02x scid 0x%04x mtu %u mps %u credits %u", psm, scid, mtu, mps, credits);
1462
1463 if (mtu < L2CAP_LE_MIN_MTU || mps < L2CAP_LE_MIN_MTU) {
1464 LOG_ERR("Invalid LE-Conn Req params: mtu %u mps %u", mtu, mps);
1465 return;
1466 }
1467
1468 buf = l2cap_create_le_sig_pdu(BT_L2CAP_LE_CONN_RSP, ident,
1469 sizeof(*rsp));
1470 if (!buf) {
1471 return;
1472 }
1473
1474 rsp = net_buf_add(buf, sizeof(*rsp));
1475 (void)memset(rsp, 0, sizeof(*rsp));
1476
1477 /* Check if there is a server registered */
1478 server = bt_l2cap_server_lookup_psm(psm);
1479 if (!server) {
1480 result = BT_L2CAP_LE_ERR_PSM_NOT_SUPP;
1481 goto rsp;
1482 }
1483
1484 /* Check if connection has minimum required security level */
1485 result = l2cap_check_security(conn, server);
1486 if (result != BT_L2CAP_LE_SUCCESS) {
1487 goto rsp;
1488 }
1489
1490 result = l2cap_chan_accept(conn, server, scid, mtu, mps, credits,
1491 &chan);
1492 if (result != BT_L2CAP_LE_SUCCESS) {
1493 goto rsp;
1494 }
1495
1496 le_chan = BT_L2CAP_LE_CHAN(chan);
1497
1498 /* Prepare response protocol data */
1499 rsp->dcid = sys_cpu_to_le16(le_chan->rx.cid);
1500 rsp->mps = sys_cpu_to_le16(le_chan->rx.mps);
1501 rsp->mtu = sys_cpu_to_le16(le_chan->rx.mtu);
1502 rsp->credits = sys_cpu_to_le16(le_chan->rx.credits);
1503
1504 result = BT_L2CAP_LE_SUCCESS;
1505
1506 rsp:
1507 rsp->result = sys_cpu_to_le16(result);
1508
1509 if (l2cap_send_sig(conn, buf)) {
1510 return;
1511 }
1512
1513 /* Raise connected callback on success */
1514 if ((result == BT_L2CAP_LE_SUCCESS) && (chan->ops->connected != NULL)) {
1515 chan->ops->connected(chan);
1516 }
1517 }
1518
1519 #if defined(CONFIG_BT_L2CAP_ECRED)
le_ecred_conn_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1520 static void le_ecred_conn_req(struct bt_l2cap *l2cap, uint8_t ident,
1521 struct net_buf *buf)
1522 {
1523 struct bt_conn *conn = l2cap->chan.chan.conn;
1524 struct bt_l2cap_chan *chan[BT_L2CAP_ECRED_CHAN_MAX_PER_REQ];
1525 struct bt_l2cap_le_chan *ch = NULL;
1526 struct bt_l2cap_server *server;
1527 struct bt_l2cap_ecred_conn_req *req;
1528 struct bt_l2cap_ecred_conn_rsp *rsp;
1529 uint16_t mtu, mps, credits, result = BT_L2CAP_LE_SUCCESS;
1530 uint16_t psm = 0x0000;
1531 uint16_t scid, dcid[BT_L2CAP_ECRED_CHAN_MAX_PER_REQ];
1532 int i = 0;
1533 uint8_t req_cid_count;
1534 bool rsp_queued = false;
1535
1536 /* set dcid to zeros here, in case of all connections refused error */
1537 memset(dcid, 0, sizeof(dcid));
1538 if (buf->len < sizeof(*req)) {
1539 LOG_ERR("Too small LE conn req packet size");
1540 result = BT_L2CAP_LE_ERR_INVALID_PARAMS;
1541 req_cid_count = 0;
1542 goto response;
1543 }
1544
1545 req = net_buf_pull_mem(buf, sizeof(*req));
1546 req_cid_count = buf->len / sizeof(scid);
1547
1548 if (buf->len > sizeof(dcid)) {
1549 LOG_ERR("Too large LE conn req packet size");
1550 req_cid_count = BT_L2CAP_ECRED_CHAN_MAX_PER_REQ;
1551 result = BT_L2CAP_LE_ERR_INVALID_PARAMS;
1552 goto response;
1553 }
1554
1555 psm = sys_le16_to_cpu(req->psm);
1556 mtu = sys_le16_to_cpu(req->mtu);
1557 mps = sys_le16_to_cpu(req->mps);
1558 credits = sys_le16_to_cpu(req->credits);
1559
1560 LOG_DBG("psm 0x%02x mtu %u mps %u credits %u", psm, mtu, mps, credits);
1561
1562 if (mtu < BT_L2CAP_ECRED_MIN_MTU || mps < BT_L2CAP_ECRED_MIN_MTU) {
1563 LOG_ERR("Invalid ecred conn req params. mtu %u mps %u", mtu, mps);
1564 result = BT_L2CAP_LE_ERR_INVALID_PARAMS;
1565 goto response;
1566 }
1567
1568 /* Check if there is a server registered */
1569 server = bt_l2cap_server_lookup_psm(psm);
1570 if (!server) {
1571 result = BT_L2CAP_LE_ERR_PSM_NOT_SUPP;
1572 goto response;
1573 }
1574
1575 /* Check if connection has minimum required security level */
1576 result = l2cap_check_security(conn, server);
1577 if (result != BT_L2CAP_LE_SUCCESS) {
1578 goto response;
1579 }
1580
1581 while (buf->len >= sizeof(scid)) {
1582 uint16_t rc;
1583 scid = net_buf_pull_le16(buf);
1584
1585 rc = l2cap_chan_accept(conn, server, scid, mtu, mps,
1586 credits, &chan[i]);
1587 if (rc != BT_L2CAP_LE_SUCCESS) {
1588 result = rc;
1589 }
1590 switch (rc) {
1591 case BT_L2CAP_LE_SUCCESS:
1592 ch = BT_L2CAP_LE_CHAN(chan[i]);
1593 dcid[i++] = sys_cpu_to_le16(ch->rx.cid);
1594 continue;
1595 /* Some connections refused – invalid Source CID */
1596 /* Some connections refused – Source CID already allocated */
1597 /* Some connections refused – not enough resources
1598 * available.
1599 */
1600 default:
1601 /* If a Destination CID is 0x0000, the channel was not
1602 * established.
1603 */
1604 dcid[i++] = 0x0000;
1605 continue;
1606 }
1607 }
1608
1609 response:
1610 buf = l2cap_create_le_sig_pdu(BT_L2CAP_ECRED_CONN_RSP, ident,
1611 sizeof(*rsp) +
1612 (sizeof(scid) * req_cid_count));
1613 if (!buf) {
1614 goto callback;
1615 }
1616
1617 rsp = net_buf_add(buf, sizeof(*rsp));
1618 (void)memset(rsp, 0, sizeof(*rsp));
1619 if (ch) {
1620 rsp->mps = sys_cpu_to_le16(ch->rx.mps);
1621 rsp->mtu = sys_cpu_to_le16(ch->rx.mtu);
1622 rsp->credits = sys_cpu_to_le16(ch->rx.credits);
1623 }
1624 rsp->result = sys_cpu_to_le16(result);
1625
1626 net_buf_add_mem(buf, dcid, sizeof(scid) * req_cid_count);
1627
1628 if (l2cap_send_sig(conn, buf)) {
1629 goto callback;
1630 }
1631
1632 rsp_queued = true;
1633
1634 callback:
1635 if (ecred_cb && ecred_cb->ecred_conn_req) {
1636 ecred_cb->ecred_conn_req(conn, result, psm);
1637 }
1638 if (rsp_queued) {
1639 for (i = 0; i < req_cid_count; i++) {
1640 /* Raise connected callback for established channels */
1641 if ((dcid[i] != 0x00) && (chan[i]->ops->connected != NULL)) {
1642 chan[i]->ops->connected(chan[i]);
1643 }
1644 }
1645 }
1646 }
1647
le_ecred_reconf_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1648 static void le_ecred_reconf_req(struct bt_l2cap *l2cap, uint8_t ident,
1649 struct net_buf *buf)
1650 {
1651 struct bt_conn *conn = l2cap->chan.chan.conn;
1652 struct bt_l2cap_chan *chans[BT_L2CAP_ECRED_CHAN_MAX_PER_REQ];
1653 struct bt_l2cap_ecred_reconf_req *req;
1654 struct bt_l2cap_ecred_reconf_rsp *rsp;
1655 uint16_t mtu, mps;
1656 uint16_t scid, result = BT_L2CAP_RECONF_SUCCESS;
1657 int chan_count = 0;
1658 bool mps_reduced = false;
1659
1660 if (buf->len < sizeof(*req)) {
1661 LOG_ERR("Too small ecred reconf req packet size");
1662 return;
1663 }
1664
1665 req = net_buf_pull_mem(buf, sizeof(*req));
1666
1667 mtu = sys_le16_to_cpu(req->mtu);
1668 mps = sys_le16_to_cpu(req->mps);
1669
1670 if (mps < BT_L2CAP_ECRED_MIN_MTU) {
1671 result = BT_L2CAP_RECONF_OTHER_UNACCEPT;
1672 goto response;
1673 }
1674
1675 if (mtu < BT_L2CAP_ECRED_MIN_MTU) {
1676 result = BT_L2CAP_RECONF_INVALID_MTU;
1677 goto response;
1678 }
1679
1680 /* The specification only allows up to 5 CIDs in this packet */
1681 if (buf->len > (BT_L2CAP_ECRED_CHAN_MAX_PER_REQ * sizeof(scid))) {
1682 result = BT_L2CAP_RECONF_OTHER_UNACCEPT;
1683 goto response;
1684 }
1685
1686 while (buf->len >= sizeof(scid)) {
1687 struct bt_l2cap_chan *chan;
1688 scid = net_buf_pull_le16(buf);
1689 chan = bt_l2cap_le_lookup_tx_cid(conn, scid);
1690 if (!chan) {
1691 result = BT_L2CAP_RECONF_INVALID_CID;
1692 goto response;
1693 }
1694
1695 if (BT_L2CAP_LE_CHAN(chan)->tx.mtu > mtu) {
1696 LOG_ERR("chan %p decreased MTU %u -> %u", chan,
1697 BT_L2CAP_LE_CHAN(chan)->tx.mtu, mtu);
1698 result = BT_L2CAP_RECONF_INVALID_MTU;
1699 goto response;
1700 }
1701
1702 if (BT_L2CAP_LE_CHAN(chan)->tx.mps > mps) {
1703 mps_reduced = true;
1704 }
1705
1706 chans[chan_count] = chan;
1707 chan_count++;
1708 }
1709
1710 /* As per BT Core Spec V5.2 Vol. 3, Part A, section 7.11
1711 * The request (...) shall not decrease the MPS of a channel
1712 * if more than one channel is specified.
1713 */
1714 if (mps_reduced && chan_count > 1) {
1715 result = BT_L2CAP_RECONF_INVALID_MPS;
1716 goto response;
1717 }
1718
1719 for (int i = 0; i < chan_count; i++) {
1720 BT_L2CAP_LE_CHAN(chans[i])->tx.mtu = mtu;
1721 BT_L2CAP_LE_CHAN(chans[i])->tx.mps = mps;
1722
1723 if (chans[i]->ops->reconfigured) {
1724 chans[i]->ops->reconfigured(chans[i]);
1725 }
1726 }
1727
1728 LOG_DBG("mtu %u mps %u", mtu, mps);
1729
1730 response:
1731 buf = l2cap_create_le_sig_pdu(BT_L2CAP_ECRED_RECONF_RSP, ident,
1732 sizeof(*rsp));
1733 if (!buf) {
1734 return;
1735 }
1736
1737 rsp = net_buf_add(buf, sizeof(*rsp));
1738 rsp->result = sys_cpu_to_le16(result);
1739
1740 l2cap_send_sig(conn, buf);
1741 }
1742
le_ecred_reconf_rsp(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1743 static void le_ecred_reconf_rsp(struct bt_l2cap *l2cap, uint8_t ident,
1744 struct net_buf *buf)
1745 {
1746 struct bt_conn *conn = l2cap->chan.chan.conn;
1747 struct bt_l2cap_ecred_reconf_rsp *rsp;
1748 struct bt_l2cap_le_chan *ch;
1749 uint16_t result;
1750
1751 if (buf->len < sizeof(*rsp)) {
1752 LOG_ERR("Too small ecred reconf rsp packet size");
1753 return;
1754 }
1755
1756 rsp = net_buf_pull_mem(buf, sizeof(*rsp));
1757 result = sys_le16_to_cpu(rsp->result);
1758
1759 while ((ch = l2cap_lookup_ident(conn, ident))) {
1760 /* Stop timer started on REQ send. The timer is only set on one
1761 * of the channels, but we don't want to make assumptions on
1762 * which one it is.
1763 */
1764 k_work_cancel_delayable(&ch->rtx_work);
1765
1766 if (result == BT_L2CAP_LE_SUCCESS) {
1767 ch->rx.mtu = ch->pending_rx_mtu;
1768 }
1769
1770 ch->pending_rx_mtu = 0;
1771 ch->ident = 0U;
1772
1773 if (ch->chan.ops->reconfigured) {
1774 ch->chan.ops->reconfigured(&ch->chan);
1775 }
1776 }
1777 }
1778 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
1779
l2cap_remove_rx_cid(struct bt_conn * conn,uint16_t cid)1780 static struct bt_l2cap_le_chan *l2cap_remove_rx_cid(struct bt_conn *conn,
1781 uint16_t cid)
1782 {
1783 struct bt_l2cap_chan *chan;
1784 sys_snode_t *prev = NULL;
1785
1786 /* Protect fixed channels against accidental removal */
1787 if (!L2CAP_LE_CID_IS_DYN(cid)) {
1788 return NULL;
1789 }
1790
1791 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
1792 if (BT_L2CAP_LE_CHAN(chan)->rx.cid == cid) {
1793 sys_slist_remove(&conn->channels, prev, &chan->node);
1794 return BT_L2CAP_LE_CHAN(chan);
1795 }
1796
1797 prev = &chan->node;
1798 }
1799
1800 return NULL;
1801 }
1802
le_disconn_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1803 static void le_disconn_req(struct bt_l2cap *l2cap, uint8_t ident,
1804 struct net_buf *buf)
1805 {
1806 struct bt_conn *conn = l2cap->chan.chan.conn;
1807 struct bt_l2cap_le_chan *chan;
1808 struct bt_l2cap_disconn_req *req = (void *)buf->data;
1809 struct bt_l2cap_disconn_rsp *rsp;
1810 uint16_t dcid;
1811
1812 if (buf->len < sizeof(*req)) {
1813 LOG_ERR("Too small LE conn req packet size");
1814 return;
1815 }
1816
1817 dcid = sys_le16_to_cpu(req->dcid);
1818
1819 LOG_DBG("dcid 0x%04x scid 0x%04x", dcid, sys_le16_to_cpu(req->scid));
1820
1821 chan = l2cap_remove_rx_cid(conn, dcid);
1822 if (!chan) {
1823 struct bt_l2cap_cmd_reject_cid_data data;
1824
1825 data.scid = req->scid;
1826 data.dcid = req->dcid;
1827
1828 l2cap_send_reject(conn, ident, BT_L2CAP_REJ_INVALID_CID, &data,
1829 sizeof(data));
1830 return;
1831 }
1832
1833 buf = l2cap_create_le_sig_pdu(BT_L2CAP_DISCONN_RSP, ident,
1834 sizeof(*rsp));
1835 if (!buf) {
1836 return;
1837 }
1838
1839 rsp = net_buf_add(buf, sizeof(*rsp));
1840 rsp->dcid = sys_cpu_to_le16(chan->rx.cid);
1841 rsp->scid = sys_cpu_to_le16(chan->tx.cid);
1842
1843 bt_l2cap_chan_del(&chan->chan);
1844
1845 l2cap_send_sig(conn, buf);
1846 }
1847
l2cap_change_security(struct bt_l2cap_le_chan * chan,uint16_t err)1848 static int l2cap_change_security(struct bt_l2cap_le_chan *chan, uint16_t err)
1849 {
1850 struct bt_conn *conn = chan->chan.conn;
1851 bt_security_t sec;
1852 int ret;
1853
1854 if (atomic_test_bit(chan->chan.status,
1855 BT_L2CAP_STATUS_ENCRYPT_PENDING)) {
1856 return -EINPROGRESS;
1857 }
1858
1859 switch (err) {
1860 case BT_L2CAP_LE_ERR_ENCRYPTION:
1861 if (conn->sec_level >= BT_SECURITY_L2) {
1862 return -EALREADY;
1863 }
1864
1865 sec = BT_SECURITY_L2;
1866 break;
1867 case BT_L2CAP_LE_ERR_AUTHENTICATION:
1868 if (conn->sec_level < BT_SECURITY_L2) {
1869 sec = BT_SECURITY_L2;
1870 } else if (conn->sec_level < BT_SECURITY_L3) {
1871 sec = BT_SECURITY_L3;
1872 } else if (conn->sec_level < BT_SECURITY_L4) {
1873 sec = BT_SECURITY_L4;
1874 } else {
1875 return -EALREADY;
1876 }
1877 break;
1878 default:
1879 return -EINVAL;
1880 }
1881
1882 ret = bt_conn_set_security(chan->chan.conn, sec);
1883 if (ret < 0) {
1884 return ret;
1885 }
1886
1887 atomic_set_bit(chan->chan.status, BT_L2CAP_STATUS_ENCRYPT_PENDING);
1888
1889 return 0;
1890 }
1891
1892 #if defined(CONFIG_BT_L2CAP_ECRED)
le_ecred_conn_rsp(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1893 static void le_ecred_conn_rsp(struct bt_l2cap *l2cap, uint8_t ident,
1894 struct net_buf *buf)
1895 {
1896 struct bt_conn *conn = l2cap->chan.chan.conn;
1897 struct bt_l2cap_le_chan *chan;
1898 struct bt_l2cap_ecred_conn_rsp *rsp;
1899 uint16_t dcid, mtu, mps, credits, result, psm;
1900 uint8_t attempted = 0;
1901 uint8_t succeeded = 0;
1902
1903 if (buf->len < sizeof(*rsp)) {
1904 LOG_ERR("Too small ecred conn rsp packet size");
1905 return;
1906 }
1907
1908 rsp = net_buf_pull_mem(buf, sizeof(*rsp));
1909 mtu = sys_le16_to_cpu(rsp->mtu);
1910 mps = sys_le16_to_cpu(rsp->mps);
1911 credits = sys_le16_to_cpu(rsp->credits);
1912 result = sys_le16_to_cpu(rsp->result);
1913
1914 LOG_DBG("mtu 0x%04x mps 0x%04x credits 0x%04x result %u", mtu, mps, credits, result);
1915
1916 chan = l2cap_lookup_ident(conn, ident);
1917 if (chan) {
1918 psm = chan->psm;
1919 } else {
1920 psm = 0x0000;
1921 }
1922
1923 switch (result) {
1924 case BT_L2CAP_LE_ERR_AUTHENTICATION:
1925 case BT_L2CAP_LE_ERR_ENCRYPTION:
1926 while ((chan = l2cap_lookup_ident(conn, ident))) {
1927
1928 /* Cancel RTX work */
1929 k_work_cancel_delayable(&chan->rtx_work);
1930
1931 /* If security needs changing wait it to be completed */
1932 if (!l2cap_change_security(chan, result)) {
1933 return;
1934 }
1935 bt_l2cap_chan_remove(conn, &chan->chan);
1936 bt_l2cap_chan_del(&chan->chan);
1937 }
1938 break;
1939 case BT_L2CAP_LE_SUCCESS:
1940 /* Some connections refused – invalid Source CID */
1941 case BT_L2CAP_LE_ERR_INVALID_SCID:
1942 /* Some connections refused – Source CID already allocated */
1943 case BT_L2CAP_LE_ERR_SCID_IN_USE:
1944 /* Some connections refused – not enough resources available */
1945 case BT_L2CAP_LE_ERR_NO_RESOURCES:
1946 while ((chan = l2cap_lookup_ident(conn, ident))) {
1947 struct bt_l2cap_chan *c;
1948
1949 /* Cancel RTX work */
1950 k_work_cancel_delayable(&chan->rtx_work);
1951
1952 if (buf->len < sizeof(dcid)) {
1953 LOG_ERR("Fewer dcid values than expected");
1954 bt_l2cap_chan_remove(conn, &chan->chan);
1955 bt_l2cap_chan_del(&chan->chan);
1956 continue;
1957 }
1958
1959 dcid = net_buf_pull_le16(buf);
1960 attempted++;
1961
1962 LOG_DBG("dcid 0x%04x", dcid);
1963
1964 /* If a Destination CID is 0x0000, the channel was not
1965 * established.
1966 */
1967 if (!dcid) {
1968 bt_l2cap_chan_remove(conn, &chan->chan);
1969 bt_l2cap_chan_del(&chan->chan);
1970 continue;
1971 }
1972
1973 c = bt_l2cap_le_lookup_tx_cid(conn, dcid);
1974 if (c) {
1975 /* If a device receives a
1976 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet
1977 * with an already assigned Destination CID,
1978 * then both the original channel and the new
1979 * channel shall be immediately discarded and
1980 * not used.
1981 */
1982 bt_l2cap_chan_remove(conn, &chan->chan);
1983 bt_l2cap_chan_del(&chan->chan);
1984 bt_l2cap_chan_disconnect(c);
1985 continue;
1986 }
1987
1988 chan->tx.cid = dcid;
1989
1990 chan->ident = 0U;
1991
1992 chan->tx.mtu = mtu;
1993 chan->tx.mps = mps;
1994
1995 /* Update state */
1996 bt_l2cap_chan_set_state(&chan->chan,
1997 BT_L2CAP_CONNECTED);
1998
1999 if (chan->chan.ops->connected) {
2000 chan->chan.ops->connected(&chan->chan);
2001 }
2002
2003 /* Give credits */
2004 l2cap_chan_tx_give_credits(chan, credits);
2005
2006 succeeded++;
2007 }
2008 break;
2009 case BT_L2CAP_LE_ERR_PSM_NOT_SUPP:
2010 default:
2011 while ((chan = l2cap_remove_ident(conn, ident))) {
2012 bt_l2cap_chan_del(&chan->chan);
2013 }
2014 break;
2015 }
2016
2017 if (ecred_cb && ecred_cb->ecred_conn_rsp) {
2018 ecred_cb->ecred_conn_rsp(conn, result, attempted, succeeded, psm);
2019 }
2020 }
2021 #endif /* CONFIG_BT_L2CAP_ECRED */
2022
le_conn_rsp(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)2023 static void le_conn_rsp(struct bt_l2cap *l2cap, uint8_t ident,
2024 struct net_buf *buf)
2025 {
2026 struct bt_conn *conn = l2cap->chan.chan.conn;
2027 struct bt_l2cap_le_chan *chan;
2028 struct bt_l2cap_le_conn_rsp *rsp = (void *)buf->data;
2029 uint16_t dcid, mtu, mps, credits, result;
2030
2031 if (buf->len < sizeof(*rsp)) {
2032 LOG_ERR("Too small LE conn rsp packet size");
2033 return;
2034 }
2035
2036 dcid = sys_le16_to_cpu(rsp->dcid);
2037 mtu = sys_le16_to_cpu(rsp->mtu);
2038 mps = sys_le16_to_cpu(rsp->mps);
2039 credits = sys_le16_to_cpu(rsp->credits);
2040 result = sys_le16_to_cpu(rsp->result);
2041
2042 LOG_DBG("dcid 0x%04x mtu %u mps %u credits %u result 0x%04x", dcid, mtu, mps, credits,
2043 result);
2044
2045 /* Keep the channel in case of security errors */
2046 if (result == BT_L2CAP_LE_SUCCESS ||
2047 result == BT_L2CAP_LE_ERR_AUTHENTICATION ||
2048 result == BT_L2CAP_LE_ERR_ENCRYPTION) {
2049 chan = l2cap_lookup_ident(conn, ident);
2050 } else {
2051 chan = l2cap_remove_ident(conn, ident);
2052 }
2053
2054 if (!chan) {
2055 LOG_ERR("Cannot find channel for ident %u", ident);
2056 return;
2057 }
2058
2059 /* Cancel RTX work */
2060 k_work_cancel_delayable(&chan->rtx_work);
2061
2062 /* Reset ident since it got a response */
2063 chan->ident = 0U;
2064
2065 switch (result) {
2066 case BT_L2CAP_LE_SUCCESS:
2067 chan->tx.cid = dcid;
2068 chan->tx.mtu = mtu;
2069 chan->tx.mps = mps;
2070
2071 /* Update state */
2072 bt_l2cap_chan_set_state(&chan->chan, BT_L2CAP_CONNECTED);
2073
2074 if (chan->chan.ops->connected) {
2075 chan->chan.ops->connected(&chan->chan);
2076 }
2077
2078 /* Give credits */
2079 l2cap_chan_tx_give_credits(chan, credits);
2080
2081 break;
2082 case BT_L2CAP_LE_ERR_AUTHENTICATION:
2083 case BT_L2CAP_LE_ERR_ENCRYPTION:
2084 /* If security needs changing wait it to be completed */
2085 if (l2cap_change_security(chan, result) == 0) {
2086 return;
2087 }
2088 bt_l2cap_chan_remove(conn, &chan->chan);
2089 __fallthrough;
2090 default:
2091 bt_l2cap_chan_del(&chan->chan);
2092 }
2093 }
2094
le_disconn_rsp(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)2095 static void le_disconn_rsp(struct bt_l2cap *l2cap, uint8_t ident,
2096 struct net_buf *buf)
2097 {
2098 struct bt_conn *conn = l2cap->chan.chan.conn;
2099 struct bt_l2cap_le_chan *chan;
2100 struct bt_l2cap_disconn_rsp *rsp = (void *)buf->data;
2101 uint16_t scid;
2102
2103 if (buf->len < sizeof(*rsp)) {
2104 LOG_ERR("Too small LE disconn rsp packet size");
2105 return;
2106 }
2107
2108 scid = sys_le16_to_cpu(rsp->scid);
2109
2110 LOG_DBG("dcid 0x%04x scid 0x%04x", sys_le16_to_cpu(rsp->dcid), scid);
2111
2112 chan = l2cap_remove_rx_cid(conn, scid);
2113 if (!chan) {
2114 return;
2115 }
2116
2117 bt_l2cap_chan_del(&chan->chan);
2118 }
2119
le_credits(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)2120 static void le_credits(struct bt_l2cap *l2cap, uint8_t ident,
2121 struct net_buf *buf)
2122 {
2123 struct bt_conn *conn = l2cap->chan.chan.conn;
2124 struct bt_l2cap_chan *chan;
2125 struct bt_l2cap_le_credits *ev = (void *)buf->data;
2126 struct bt_l2cap_le_chan *le_chan;
2127 uint16_t credits, cid;
2128
2129 if (buf->len < sizeof(*ev)) {
2130 LOG_ERR("Too small LE Credits packet size");
2131 return;
2132 }
2133
2134 cid = sys_le16_to_cpu(ev->cid);
2135 credits = sys_le16_to_cpu(ev->credits);
2136
2137 LOG_DBG("cid 0x%04x credits %u", cid, credits);
2138
2139 chan = bt_l2cap_le_lookup_tx_cid(conn, cid);
2140 if (!chan) {
2141 LOG_ERR("Unable to find channel of LE Credits packet");
2142 return;
2143 }
2144
2145 le_chan = BT_L2CAP_LE_CHAN(chan);
2146
2147 if (atomic_get(&le_chan->tx.credits) + credits > UINT16_MAX) {
2148 LOG_ERR("Credits overflow");
2149 bt_l2cap_chan_disconnect(chan);
2150 return;
2151 }
2152
2153 l2cap_chan_tx_give_credits(le_chan, credits);
2154
2155 LOG_DBG("chan %p total credits %lu", le_chan, atomic_get(&le_chan->tx.credits));
2156 }
2157
reject_cmd(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)2158 static void reject_cmd(struct bt_l2cap *l2cap, uint8_t ident,
2159 struct net_buf *buf)
2160 {
2161 struct bt_conn *conn = l2cap->chan.chan.conn;
2162 struct bt_l2cap_le_chan *chan;
2163
2164 while ((chan = l2cap_remove_ident(conn, ident))) {
2165 bt_l2cap_chan_del(&chan->chan);
2166 }
2167 }
2168 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2169
l2cap_recv(struct bt_l2cap_chan * chan,struct net_buf * buf)2170 static int l2cap_recv(struct bt_l2cap_chan *chan, struct net_buf *buf)
2171 {
2172 struct bt_l2cap_le_chan *l2chan = CONTAINER_OF(chan, struct bt_l2cap_le_chan, chan);
2173 struct bt_l2cap *l2cap = CONTAINER_OF(l2chan, struct bt_l2cap, chan);
2174 struct bt_l2cap_sig_hdr *hdr;
2175 uint16_t len;
2176
2177 if (buf->len < sizeof(*hdr)) {
2178 LOG_ERR("Too small L2CAP signaling PDU");
2179 return 0;
2180 }
2181
2182 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
2183 len = sys_le16_to_cpu(hdr->len);
2184
2185 LOG_DBG("Signaling code 0x%02x ident %u len %u", hdr->code, hdr->ident, len);
2186
2187 if (buf->len != len) {
2188 LOG_ERR("L2CAP length mismatch (%u != %u)", buf->len, len);
2189 return 0;
2190 }
2191
2192 if (!hdr->ident) {
2193 LOG_ERR("Invalid ident value in L2CAP PDU");
2194 return 0;
2195 }
2196
2197 switch (hdr->code) {
2198 case BT_L2CAP_CONN_PARAM_RSP:
2199 le_conn_param_rsp(l2cap, buf);
2200 break;
2201 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
2202 case BT_L2CAP_LE_CONN_REQ:
2203 le_conn_req(l2cap, hdr->ident, buf);
2204 break;
2205 case BT_L2CAP_LE_CONN_RSP:
2206 le_conn_rsp(l2cap, hdr->ident, buf);
2207 break;
2208 case BT_L2CAP_DISCONN_REQ:
2209 le_disconn_req(l2cap, hdr->ident, buf);
2210 break;
2211 case BT_L2CAP_DISCONN_RSP:
2212 le_disconn_rsp(l2cap, hdr->ident, buf);
2213 break;
2214 case BT_L2CAP_LE_CREDITS:
2215 le_credits(l2cap, hdr->ident, buf);
2216 break;
2217 case BT_L2CAP_CMD_REJECT:
2218 reject_cmd(l2cap, hdr->ident, buf);
2219 break;
2220 #if defined(CONFIG_BT_L2CAP_ECRED)
2221 case BT_L2CAP_ECRED_CONN_REQ:
2222 le_ecred_conn_req(l2cap, hdr->ident, buf);
2223 break;
2224 case BT_L2CAP_ECRED_CONN_RSP:
2225 le_ecred_conn_rsp(l2cap, hdr->ident, buf);
2226 break;
2227 case BT_L2CAP_ECRED_RECONF_REQ:
2228 le_ecred_reconf_req(l2cap, hdr->ident, buf);
2229 break;
2230 case BT_L2CAP_ECRED_RECONF_RSP:
2231 le_ecred_reconf_rsp(l2cap, hdr->ident, buf);
2232 break;
2233 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
2234 #else
2235 case BT_L2CAP_CMD_REJECT:
2236 /* Ignored */
2237 break;
2238 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2239 case BT_L2CAP_CONN_PARAM_REQ:
2240 if (IS_ENABLED(CONFIG_BT_CENTRAL)) {
2241 le_conn_param_update_req(l2cap, hdr->ident, buf);
2242 break;
2243 }
2244 __fallthrough;
2245 default:
2246 LOG_WRN("Rejecting unknown L2CAP PDU code 0x%02x", hdr->code);
2247 l2cap_send_reject(chan->conn, hdr->ident,
2248 BT_L2CAP_REJ_NOT_UNDERSTOOD, NULL, 0);
2249 break;
2250 }
2251
2252 return 0;
2253 }
2254
2255 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_chan_shutdown(struct bt_l2cap_chan * chan)2256 static void l2cap_chan_shutdown(struct bt_l2cap_chan *chan)
2257 {
2258 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
2259 struct net_buf *buf;
2260
2261 LOG_DBG("chan %p", chan);
2262
2263 atomic_set_bit(chan->status, BT_L2CAP_STATUS_SHUTDOWN);
2264
2265 /* Destroy segmented SDU if it exists */
2266 if (le_chan->_sdu) {
2267 net_buf_unref(le_chan->_sdu);
2268 le_chan->_sdu = NULL;
2269 le_chan->_sdu_len = 0U;
2270 }
2271
2272 /* Remove buffers on the TX queue */
2273 while ((buf = k_fifo_get(&le_chan->tx_queue, K_NO_WAIT))) {
2274 l2cap_tx_buf_destroy(chan->conn, buf, -ESHUTDOWN);
2275 }
2276
2277 /* Remove buffers on the RX queue */
2278 while ((buf = k_fifo_get(&le_chan->rx_queue, K_NO_WAIT))) {
2279 net_buf_unref(buf);
2280 }
2281
2282 /* Update status */
2283 if (chan->ops->status) {
2284 chan->ops->status(chan, chan->status);
2285 }
2286 }
2287
l2cap_chan_send_credits(struct bt_l2cap_le_chan * chan,uint16_t credits)2288 static void l2cap_chan_send_credits(struct bt_l2cap_le_chan *chan,
2289 uint16_t credits)
2290 {
2291 struct bt_l2cap_le_credits *ev;
2292 struct net_buf *buf;
2293
2294 __ASSERT_NO_MSG(bt_l2cap_chan_get_state(&chan->chan) == BT_L2CAP_CONNECTED);
2295
2296 buf = l2cap_create_le_sig_pdu(BT_L2CAP_LE_CREDITS, get_ident(),
2297 sizeof(*ev));
2298 if (!buf) {
2299 LOG_ERR("Unable to send credits update");
2300 /* Disconnect would probably not work either so the only
2301 * option left is to shutdown the channel.
2302 */
2303 l2cap_chan_shutdown(&chan->chan);
2304 return;
2305 }
2306
2307 __ASSERT_NO_MSG(atomic_get(&chan->rx.credits) == 0);
2308 atomic_set(&chan->rx.credits, credits);
2309
2310 ev = net_buf_add(buf, sizeof(*ev));
2311 ev->cid = sys_cpu_to_le16(chan->rx.cid);
2312 ev->credits = sys_cpu_to_le16(credits);
2313
2314 l2cap_send_sig(chan->chan.conn, buf);
2315
2316 LOG_DBG("chan %p credits %lu", chan, atomic_get(&chan->rx.credits));
2317 }
2318
2319 #if defined(CONFIG_BT_L2CAP_SEG_RECV)
l2cap_chan_send_credits_pdu(struct bt_conn * conn,uint16_t cid,uint16_t credits)2320 static int l2cap_chan_send_credits_pdu(struct bt_conn *conn, uint16_t cid, uint16_t credits)
2321 {
2322 struct net_buf *buf;
2323 struct bt_l2cap_le_credits *ev;
2324
2325 buf = l2cap_create_le_sig_pdu(BT_L2CAP_LE_CREDITS, get_ident(), sizeof(*ev));
2326 if (!buf) {
2327 return -ENOBUFS;
2328 }
2329
2330 ev = net_buf_add(buf, sizeof(*ev));
2331 *ev = (struct bt_l2cap_le_credits){
2332 .cid = sys_cpu_to_le16(cid),
2333 .credits = sys_cpu_to_le16(credits),
2334 };
2335
2336 return l2cap_send_sig(conn, buf);
2337 }
2338
2339 /**
2340 * Combination of @ref atomic_add and @ref u16_add_overflow. Leaves @p
2341 * target unchanged if an overflow would occur. Assumes the current
2342 * value of @p target is representable by uint16_t.
2343 */
atomic_add_safe_u16(atomic_t * target,uint16_t addition)2344 static bool atomic_add_safe_u16(atomic_t *target, uint16_t addition)
2345 {
2346 uint16_t target_old, target_new;
2347
2348 do {
2349 target_old = atomic_get(target);
2350 if (u16_add_overflow(target_old, addition, &target_new)) {
2351 return true;
2352 }
2353 } while (!atomic_cas(target, target_old, target_new));
2354
2355 return false;
2356 }
2357
bt_l2cap_chan_give_credits(struct bt_l2cap_chan * chan,uint16_t additional_credits)2358 int bt_l2cap_chan_give_credits(struct bt_l2cap_chan *chan, uint16_t additional_credits)
2359 {
2360 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
2361
2362 if (!chan || !chan->ops) {
2363 LOG_ERR("%s: Invalid chan object.", __func__);
2364 return -EINVAL;
2365 }
2366
2367 if (!chan->ops->seg_recv) {
2368 LOG_ERR("%s: Available only with seg_recv.", __func__);
2369 return -EINVAL;
2370 }
2371
2372 if (additional_credits == 0) {
2373 LOG_ERR("%s: Refusing to give 0.", __func__);
2374 return -EINVAL;
2375 }
2376
2377 if (bt_l2cap_chan_get_state(chan) == BT_L2CAP_CONNECTING) {
2378 LOG_ERR("%s: Cannot give credits while connecting.", __func__);
2379 return -EBUSY;
2380 }
2381
2382 if (atomic_add_safe_u16(&le_chan->rx.credits, additional_credits)) {
2383 LOG_ERR("%s: Overflow.", __func__);
2384 return -EOVERFLOW;
2385 }
2386
2387 if (bt_l2cap_chan_get_state(chan) == BT_L2CAP_CONNECTED) {
2388 int err;
2389
2390 err = l2cap_chan_send_credits_pdu(chan->conn, le_chan->rx.cid, additional_credits);
2391 if (err) {
2392 LOG_ERR("%s: PDU failed %d.", __func__, err);
2393 return err;
2394 }
2395 }
2396
2397 return 0;
2398 }
2399 #endif /* CONFIG_BT_L2CAP_SEG_RECV */
2400
bt_l2cap_chan_recv_complete(struct bt_l2cap_chan * chan,struct net_buf * buf)2401 int bt_l2cap_chan_recv_complete(struct bt_l2cap_chan *chan, struct net_buf *buf)
2402 {
2403 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
2404 struct bt_conn *conn = chan->conn;
2405
2406 __ASSERT_NO_MSG(chan);
2407 __ASSERT_NO_MSG(buf);
2408
2409 net_buf_unref(buf);
2410
2411 if (!conn) {
2412 return -ENOTCONN;
2413 }
2414
2415 if (conn->type != BT_CONN_TYPE_LE) {
2416 return -ENOTSUP;
2417 }
2418
2419 LOG_DBG("chan %p buf %p", chan, buf);
2420
2421 if (bt_l2cap_chan_get_state(&le_chan->chan) == BT_L2CAP_CONNECTED) {
2422 l2cap_chan_send_credits(le_chan, 1);
2423 }
2424
2425 return 0;
2426 }
2427
l2cap_alloc_frag(k_timeout_t timeout,void * user_data)2428 static struct net_buf *l2cap_alloc_frag(k_timeout_t timeout, void *user_data)
2429 {
2430 struct bt_l2cap_le_chan *chan = user_data;
2431 struct net_buf *frag = NULL;
2432
2433 frag = chan->chan.ops->alloc_buf(&chan->chan);
2434 if (!frag) {
2435 return NULL;
2436 }
2437
2438 LOG_DBG("frag %p tailroom %zu", frag, net_buf_tailroom(frag));
2439
2440 return frag;
2441 }
2442
l2cap_chan_le_recv_sdu(struct bt_l2cap_le_chan * chan,struct net_buf * buf,uint16_t seg)2443 static void l2cap_chan_le_recv_sdu(struct bt_l2cap_le_chan *chan,
2444 struct net_buf *buf, uint16_t seg)
2445 {
2446 int err;
2447
2448 LOG_DBG("chan %p len %zu", chan, buf->len);
2449
2450 __ASSERT_NO_MSG(bt_l2cap_chan_get_state(&chan->chan) == BT_L2CAP_CONNECTED);
2451 __ASSERT_NO_MSG(atomic_get(&chan->rx.credits) == 0);
2452
2453 /* Receiving complete SDU, notify channel and reset SDU buf */
2454 err = chan->chan.ops->recv(&chan->chan, buf);
2455 if (err < 0) {
2456 if (err != -EINPROGRESS) {
2457 LOG_ERR("err %d", err);
2458 bt_l2cap_chan_disconnect(&chan->chan);
2459 net_buf_unref(buf);
2460 }
2461 return;
2462 } else if (bt_l2cap_chan_get_state(&chan->chan) == BT_L2CAP_CONNECTED) {
2463 l2cap_chan_send_credits(chan, 1);
2464 }
2465
2466 net_buf_unref(buf);
2467 }
2468
l2cap_chan_le_recv_seg(struct bt_l2cap_le_chan * chan,struct net_buf * buf)2469 static void l2cap_chan_le_recv_seg(struct bt_l2cap_le_chan *chan,
2470 struct net_buf *buf)
2471 {
2472 uint16_t len;
2473 uint16_t seg = 0U;
2474
2475 len = chan->_sdu->len;
2476 if (len) {
2477 memcpy(&seg, net_buf_user_data(chan->_sdu), sizeof(seg));
2478 }
2479
2480 if (len + buf->len > chan->_sdu_len) {
2481 LOG_ERR("SDU length mismatch");
2482 bt_l2cap_chan_disconnect(&chan->chan);
2483 return;
2484 }
2485
2486 seg++;
2487 /* Store received segments in user_data */
2488 memcpy(net_buf_user_data(chan->_sdu), &seg, sizeof(seg));
2489
2490 LOG_DBG("chan %p seg %d len %zu", chan, seg, buf->len);
2491
2492 /* Append received segment to SDU */
2493 len = net_buf_append_bytes(chan->_sdu, buf->len, buf->data, K_NO_WAIT,
2494 l2cap_alloc_frag, chan);
2495 if (len != buf->len) {
2496 LOG_ERR("Unable to store SDU");
2497 bt_l2cap_chan_disconnect(&chan->chan);
2498 return;
2499 }
2500
2501 if (chan->_sdu->len < chan->_sdu_len) {
2502 /* Give more credits if remote has run out of them, this
2503 * should only happen if the remote cannot fully utilize the
2504 * MPS for some reason.
2505 *
2506 * We can't send more than one credit, because if the remote
2507 * decides to start fully utilizing the MPS for the remainder of
2508 * the SDU, then the remote will end up with more credits than
2509 * the app has buffers.
2510 */
2511 if (atomic_get(&chan->rx.credits) == 0) {
2512 LOG_DBG("remote is not fully utilizing MPS");
2513 l2cap_chan_send_credits(chan, 1);
2514 }
2515
2516 return;
2517 }
2518
2519 buf = chan->_sdu;
2520 chan->_sdu = NULL;
2521 chan->_sdu_len = 0U;
2522
2523 l2cap_chan_le_recv_sdu(chan, buf, seg);
2524 }
2525
2526 #if defined(CONFIG_BT_L2CAP_SEG_RECV)
l2cap_chan_le_recv_seg_direct(struct bt_l2cap_le_chan * chan,struct net_buf * seg)2527 static void l2cap_chan_le_recv_seg_direct(struct bt_l2cap_le_chan *chan, struct net_buf *seg)
2528 {
2529 uint16_t seg_offset;
2530 uint16_t sdu_remaining;
2531
2532 if (chan->_sdu_len_done == chan->_sdu_len) {
2533
2534 /* This is the first PDU in a SDU. */
2535
2536 if (seg->len < 2) {
2537 LOG_WRN("Missing SDU header");
2538 bt_l2cap_chan_disconnect(&chan->chan);
2539 return;
2540 }
2541
2542 /* Pop off the "SDU header". */
2543 chan->_sdu_len = net_buf_pull_le16(seg);
2544 chan->_sdu_len_done = 0;
2545
2546 if (chan->_sdu_len > chan->rx.mtu) {
2547 LOG_WRN("SDU exceeds MTU");
2548 bt_l2cap_chan_disconnect(&chan->chan);
2549 return;
2550 }
2551 }
2552
2553 seg_offset = chan->_sdu_len_done;
2554 sdu_remaining = chan->_sdu_len - chan->_sdu_len_done;
2555
2556 if (seg->len > sdu_remaining) {
2557 LOG_WRN("L2CAP RX PDU total exceeds SDU");
2558 bt_l2cap_chan_disconnect(&chan->chan);
2559 return;
2560 }
2561
2562 /* Commit receive. */
2563 chan->_sdu_len_done += seg->len;
2564
2565 /* Tail call. */
2566 chan->chan.ops->seg_recv(&chan->chan, chan->_sdu_len, seg_offset, &seg->b);
2567 }
2568 #endif /* CONFIG_BT_L2CAP_SEG_RECV */
2569
l2cap_chan_le_recv(struct bt_l2cap_le_chan * chan,struct net_buf * buf)2570 static void l2cap_chan_le_recv(struct bt_l2cap_le_chan *chan,
2571 struct net_buf *buf)
2572 {
2573 struct net_buf *owned_ref;
2574 uint16_t sdu_len;
2575 int err;
2576
2577 if (!test_and_dec(&chan->rx.credits)) {
2578 LOG_ERR("No credits to receive packet");
2579 bt_l2cap_chan_disconnect(&chan->chan);
2580 return;
2581 }
2582
2583 if (buf->len > chan->rx.mps) {
2584 LOG_WRN("PDU size > MPS (%u > %u)", buf->len, chan->rx.mps);
2585 bt_l2cap_chan_disconnect(&chan->chan);
2586 return;
2587 }
2588
2589 /* Redirect to experimental API. */
2590 IF_ENABLED(CONFIG_BT_L2CAP_SEG_RECV, (
2591 if (chan->chan.ops->seg_recv) {
2592 l2cap_chan_le_recv_seg_direct(chan, buf);
2593 return;
2594 }
2595 ))
2596
2597 /* Check if segments already exist */
2598 if (chan->_sdu) {
2599 l2cap_chan_le_recv_seg(chan, buf);
2600 return;
2601 }
2602
2603 if (buf->len < 2) {
2604 LOG_WRN("Too short data packet");
2605 bt_l2cap_chan_disconnect(&chan->chan);
2606 return;
2607 }
2608
2609 sdu_len = net_buf_pull_le16(buf);
2610
2611 LOG_DBG("chan %p len %u sdu_len %u", chan, buf->len, sdu_len);
2612
2613 if (sdu_len > chan->rx.mtu) {
2614 LOG_ERR("Invalid SDU length");
2615 bt_l2cap_chan_disconnect(&chan->chan);
2616 return;
2617 }
2618
2619 /* Always allocate buffer from the channel if supported. */
2620 if (chan->chan.ops->alloc_buf) {
2621 chan->_sdu = chan->chan.ops->alloc_buf(&chan->chan);
2622 if (!chan->_sdu) {
2623 LOG_ERR("Unable to allocate buffer for SDU");
2624 bt_l2cap_chan_disconnect(&chan->chan);
2625 return;
2626 }
2627 chan->_sdu_len = sdu_len;
2628
2629 /* Send sdu_len/mps worth of credits */
2630 uint16_t credits = DIV_ROUND_UP(
2631 MIN(sdu_len - buf->len, net_buf_tailroom(chan->_sdu)),
2632 chan->rx.mps);
2633
2634 if (credits) {
2635 LOG_DBG("sending %d extra credits (sdu_len %d buf_len %d mps %d)",
2636 credits,
2637 sdu_len,
2638 buf->len,
2639 chan->rx.mps);
2640 l2cap_chan_send_credits(chan, credits);
2641 }
2642
2643 l2cap_chan_le_recv_seg(chan, buf);
2644 return;
2645 }
2646
2647 owned_ref = net_buf_ref(buf);
2648 err = chan->chan.ops->recv(&chan->chan, owned_ref);
2649 if (err != -EINPROGRESS) {
2650 net_buf_unref(owned_ref);
2651 owned_ref = NULL;
2652 }
2653
2654 if (err < 0) {
2655 if (err != -EINPROGRESS) {
2656 LOG_ERR("err %d", err);
2657 bt_l2cap_chan_disconnect(&chan->chan);
2658 }
2659 return;
2660 }
2661
2662 /* Only attempt to send credits if the channel wasn't disconnected
2663 * in the recv() callback above
2664 */
2665 if (bt_l2cap_chan_get_state(&chan->chan) == BT_L2CAP_CONNECTED) {
2666 l2cap_chan_send_credits(chan, 1);
2667 }
2668 }
2669
l2cap_chan_recv_queue(struct bt_l2cap_le_chan * chan,struct net_buf * buf)2670 static void l2cap_chan_recv_queue(struct bt_l2cap_le_chan *chan,
2671 struct net_buf *buf)
2672 {
2673 if (chan->state == BT_L2CAP_DISCONNECTING) {
2674 LOG_WRN("Ignoring data received while disconnecting");
2675 net_buf_unref(buf);
2676 return;
2677 }
2678
2679 if (atomic_test_bit(chan->chan.status, BT_L2CAP_STATUS_SHUTDOWN)) {
2680 LOG_WRN("Ignoring data received while channel has shutdown");
2681 net_buf_unref(buf);
2682 return;
2683 }
2684
2685 if (!L2CAP_LE_PSM_IS_DYN(chan->psm)) {
2686 l2cap_chan_le_recv(chan, buf);
2687 net_buf_unref(buf);
2688 return;
2689 }
2690
2691 k_fifo_put(&chan->rx_queue, buf);
2692 k_work_submit(&chan->rx_work);
2693 }
2694 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2695
l2cap_chan_recv(struct bt_l2cap_chan * chan,struct net_buf * buf,bool complete)2696 static void l2cap_chan_recv(struct bt_l2cap_chan *chan, struct net_buf *buf,
2697 bool complete)
2698 {
2699 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
2700 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
2701
2702 if (L2CAP_LE_CID_IS_DYN(le_chan->rx.cid)) {
2703 if (complete) {
2704 l2cap_chan_recv_queue(le_chan, buf);
2705 } else {
2706 /* if packet was not complete this means peer device
2707 * overflowed our RX and channel shall be disconnected
2708 */
2709 bt_l2cap_chan_disconnect(chan);
2710 net_buf_unref(buf);
2711 }
2712
2713 return;
2714 }
2715 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2716
2717 LOG_DBG("chan %p len %u", chan, buf->len);
2718
2719 chan->ops->recv(chan, buf);
2720 net_buf_unref(buf);
2721 }
2722
bt_l2cap_recv(struct bt_conn * conn,struct net_buf * buf,bool complete)2723 void bt_l2cap_recv(struct bt_conn *conn, struct net_buf *buf, bool complete)
2724 {
2725 struct bt_l2cap_hdr *hdr;
2726 struct bt_l2cap_chan *chan;
2727 uint16_t cid;
2728
2729 if (IS_ENABLED(CONFIG_BT_CLASSIC) &&
2730 conn->type == BT_CONN_TYPE_BR) {
2731 bt_l2cap_br_recv(conn, buf);
2732 return;
2733 }
2734
2735 if (buf->len < sizeof(*hdr)) {
2736 LOG_ERR("Too small L2CAP PDU received");
2737 net_buf_unref(buf);
2738 return;
2739 }
2740
2741 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
2742 cid = sys_le16_to_cpu(hdr->cid);
2743
2744 LOG_DBG("Packet for CID %u len %u", cid, buf->len);
2745
2746 chan = bt_l2cap_le_lookup_rx_cid(conn, cid);
2747 if (!chan) {
2748 LOG_WRN("Ignoring data for unknown channel ID 0x%04x", cid);
2749 net_buf_unref(buf);
2750 return;
2751 }
2752
2753 l2cap_chan_recv(chan, buf, complete);
2754 }
2755
bt_l2cap_update_conn_param(struct bt_conn * conn,const struct bt_le_conn_param * param)2756 int bt_l2cap_update_conn_param(struct bt_conn *conn,
2757 const struct bt_le_conn_param *param)
2758 {
2759 struct bt_l2cap_conn_param_req *req;
2760 struct net_buf *buf;
2761
2762 buf = l2cap_create_le_sig_pdu(BT_L2CAP_CONN_PARAM_REQ,
2763 get_ident(), sizeof(*req));
2764 if (!buf) {
2765 return -ENOMEM;
2766 }
2767
2768 req = net_buf_add(buf, sizeof(*req));
2769 req->min_interval = sys_cpu_to_le16(param->interval_min);
2770 req->max_interval = sys_cpu_to_le16(param->interval_max);
2771 req->latency = sys_cpu_to_le16(param->latency);
2772 req->timeout = sys_cpu_to_le16(param->timeout);
2773
2774 return l2cap_send_sig(conn, buf);
2775 }
2776
l2cap_connected(struct bt_l2cap_chan * chan)2777 static void l2cap_connected(struct bt_l2cap_chan *chan)
2778 {
2779 LOG_DBG("ch %p cid 0x%04x", BT_L2CAP_LE_CHAN(chan), BT_L2CAP_LE_CHAN(chan)->rx.cid);
2780 }
2781
l2cap_disconnected(struct bt_l2cap_chan * chan)2782 static void l2cap_disconnected(struct bt_l2cap_chan *chan)
2783 {
2784 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
2785
2786 LOG_DBG("ch %p cid 0x%04x", le_chan, le_chan->rx.cid);
2787
2788 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
2789 /* Cancel RTX work on signal channel.
2790 * Disconnected callback is always called from system workqueue
2791 * so this should always succeed.
2792 */
2793 (void)k_work_cancel_delayable(&le_chan->rtx_work);
2794 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2795 }
2796
l2cap_accept(struct bt_conn * conn,struct bt_l2cap_chan ** chan)2797 static int l2cap_accept(struct bt_conn *conn, struct bt_l2cap_chan **chan)
2798 {
2799 int i;
2800 static const struct bt_l2cap_chan_ops ops = {
2801 .connected = l2cap_connected,
2802 .disconnected = l2cap_disconnected,
2803 .recv = l2cap_recv,
2804 };
2805
2806 LOG_DBG("conn %p handle %u", conn, conn->handle);
2807
2808 for (i = 0; i < ARRAY_SIZE(bt_l2cap_pool); i++) {
2809 struct bt_l2cap *l2cap = &bt_l2cap_pool[i];
2810
2811 if (l2cap->chan.chan.conn) {
2812 continue;
2813 }
2814
2815 l2cap->chan.chan.ops = &ops;
2816 *chan = &l2cap->chan.chan;
2817
2818 return 0;
2819 }
2820
2821 LOG_ERR("No available L2CAP context for conn %p", conn);
2822
2823 return -ENOMEM;
2824 }
2825
2826 BT_L2CAP_CHANNEL_DEFINE(le_fixed_chan, BT_L2CAP_CID_LE_SIG, l2cap_accept, NULL);
2827
bt_l2cap_init(void)2828 void bt_l2cap_init(void)
2829 {
2830 if (IS_ENABLED(CONFIG_BT_CLASSIC)) {
2831 bt_l2cap_br_init();
2832 }
2833 }
2834
2835 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_le_connect(struct bt_conn * conn,struct bt_l2cap_le_chan * ch,uint16_t psm)2836 static int l2cap_le_connect(struct bt_conn *conn, struct bt_l2cap_le_chan *ch,
2837 uint16_t psm)
2838 {
2839 int err;
2840
2841 if (psm < L2CAP_LE_PSM_FIXED_START || psm > L2CAP_LE_PSM_DYN_END) {
2842 return -EINVAL;
2843 }
2844
2845 l2cap_chan_tx_init(ch);
2846 l2cap_chan_rx_init(ch);
2847
2848 if (!l2cap_chan_add(conn, &ch->chan, l2cap_chan_destroy)) {
2849 return -ENOMEM;
2850 }
2851
2852 ch->psm = psm;
2853
2854 if (conn->sec_level < ch->required_sec_level) {
2855 err = bt_conn_set_security(conn, ch->required_sec_level);
2856 if (err) {
2857 goto fail;
2858 }
2859
2860 atomic_set_bit(ch->chan.status,
2861 BT_L2CAP_STATUS_ENCRYPT_PENDING);
2862
2863 return 0;
2864 }
2865
2866 err = l2cap_le_conn_req(ch);
2867 if (err) {
2868 goto fail;
2869 }
2870
2871 return 0;
2872
2873 fail:
2874 bt_l2cap_chan_remove(conn, &ch->chan);
2875 bt_l2cap_chan_del(&ch->chan);
2876 return err;
2877 }
2878
2879 #if defined(CONFIG_BT_L2CAP_ECRED)
l2cap_ecred_init(struct bt_conn * conn,struct bt_l2cap_le_chan * ch,uint16_t psm)2880 static int l2cap_ecred_init(struct bt_conn *conn,
2881 struct bt_l2cap_le_chan *ch, uint16_t psm)
2882 {
2883
2884 if (psm < L2CAP_LE_PSM_FIXED_START || psm > L2CAP_LE_PSM_DYN_END) {
2885 return -EINVAL;
2886 }
2887
2888 l2cap_chan_tx_init(ch);
2889 l2cap_chan_rx_init(ch);
2890
2891 if (!l2cap_chan_add(conn, &ch->chan, l2cap_chan_destroy)) {
2892 return -ENOMEM;
2893 }
2894
2895 ch->psm = psm;
2896
2897 LOG_DBG("ch %p psm 0x%02x mtu %u mps %u credits 1", ch, ch->psm, ch->rx.mtu, ch->rx.mps);
2898
2899 return 0;
2900 }
2901
bt_l2cap_ecred_chan_connect(struct bt_conn * conn,struct bt_l2cap_chan ** chan,uint16_t psm)2902 int bt_l2cap_ecred_chan_connect(struct bt_conn *conn,
2903 struct bt_l2cap_chan **chan, uint16_t psm)
2904 {
2905 int i, err;
2906
2907 LOG_DBG("conn %p chan %p psm 0x%04x", conn, chan, psm);
2908
2909 if (!conn || !chan) {
2910 return -EINVAL;
2911 }
2912
2913 /* Init non-null channels */
2914 for (i = 0; i < BT_L2CAP_ECRED_CHAN_MAX_PER_REQ; i++) {
2915 if (!chan[i]) {
2916 break;
2917 }
2918
2919 err = l2cap_ecred_init(conn, BT_L2CAP_LE_CHAN(chan[i]), psm);
2920 if (err < 0) {
2921 i--;
2922 goto fail;
2923 }
2924 }
2925
2926 return l2cap_ecred_conn_req(chan, i);
2927 fail:
2928 /* Remove channels added */
2929 for (; i >= 0; i--) {
2930 if (!chan[i]) {
2931 continue;
2932 }
2933
2934 bt_l2cap_chan_remove(conn, chan[i]);
2935 }
2936
2937 return err;
2938 }
2939
l2cap_find_pending_reconf(struct bt_conn * conn)2940 static struct bt_l2cap_le_chan *l2cap_find_pending_reconf(struct bt_conn *conn)
2941 {
2942 struct bt_l2cap_chan *chan;
2943
2944 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
2945 if (BT_L2CAP_LE_CHAN(chan)->pending_rx_mtu) {
2946 return BT_L2CAP_LE_CHAN(chan);
2947 }
2948 }
2949
2950 return NULL;
2951 }
2952
bt_l2cap_ecred_chan_reconfigure(struct bt_l2cap_chan ** chans,uint16_t mtu)2953 int bt_l2cap_ecred_chan_reconfigure(struct bt_l2cap_chan **chans, uint16_t mtu)
2954 {
2955 struct bt_l2cap_ecred_reconf_req *req;
2956 struct bt_conn *conn = NULL;
2957 struct bt_l2cap_le_chan *ch;
2958 struct net_buf *buf;
2959 uint8_t ident;
2960 int i;
2961
2962 LOG_DBG("chans %p mtu 0x%04x", chans, mtu);
2963
2964 if (!chans) {
2965 return -EINVAL;
2966 }
2967
2968 for (i = 0; i < BT_L2CAP_ECRED_CHAN_MAX_PER_REQ; i++) {
2969 if (!chans[i]) {
2970 break;
2971 }
2972
2973 /* validate that all channels are from same connection */
2974 if (conn) {
2975 if (conn != chans[i]->conn) {
2976 return -EINVAL;
2977 }
2978 } else {
2979 conn = chans[i]->conn;
2980 }
2981
2982 /* validate MTU is not decreased */
2983 if (mtu < BT_L2CAP_LE_CHAN(chans[i])->rx.mtu) {
2984 return -EINVAL;
2985 }
2986 }
2987
2988 if (i == 0) {
2989 return -EINVAL;
2990 }
2991
2992 if (!conn) {
2993 return -ENOTCONN;
2994 }
2995
2996 if (conn->type != BT_CONN_TYPE_LE) {
2997 return -EINVAL;
2998 }
2999
3000 /* allow only 1 request at time */
3001 if (l2cap_find_pending_reconf(conn)) {
3002 return -EBUSY;
3003 }
3004
3005 ident = get_ident();
3006
3007 buf = l2cap_create_le_sig_pdu(BT_L2CAP_ECRED_RECONF_REQ,
3008 ident,
3009 sizeof(*req) + (i * sizeof(uint16_t)));
3010 if (!buf) {
3011 return -ENOMEM;
3012 }
3013
3014 req = net_buf_add(buf, sizeof(*req));
3015 req->mtu = sys_cpu_to_le16(mtu);
3016
3017 /* MPS shall not be bigger than MTU + BT_L2CAP_SDU_HDR_SIZE
3018 * as the remaining bytes cannot be used.
3019 */
3020 req->mps = sys_cpu_to_le16(MIN(mtu + BT_L2CAP_SDU_HDR_SIZE,
3021 BT_L2CAP_RX_MTU));
3022
3023 for (int j = 0; j < i; j++) {
3024 ch = BT_L2CAP_LE_CHAN(chans[j]);
3025
3026 ch->ident = ident;
3027 ch->pending_rx_mtu = mtu;
3028
3029 net_buf_add_le16(buf, ch->rx.cid);
3030 };
3031
3032 /* We set the RTX timer on one of the supplied channels, but when the
3033 * request resolves or times out we will act on all the channels in the
3034 * supplied array, using the ident field to find them.
3035 */
3036 l2cap_chan_send_req(chans[0], buf, L2CAP_CONN_TIMEOUT);
3037
3038 return 0;
3039 }
3040
3041 #if defined(CONFIG_BT_L2CAP_RECONFIGURE_EXPLICIT)
bt_l2cap_ecred_chan_reconfigure_explicit(struct bt_l2cap_chan ** chans,size_t chan_count,uint16_t mtu,uint16_t mps)3042 int bt_l2cap_ecred_chan_reconfigure_explicit(struct bt_l2cap_chan **chans, size_t chan_count,
3043 uint16_t mtu, uint16_t mps)
3044 {
3045 struct bt_l2cap_ecred_reconf_req *req;
3046 struct bt_conn *conn = NULL;
3047 struct net_buf *buf;
3048 uint8_t ident;
3049
3050 LOG_DBG("chans %p chan_count %u mtu 0x%04x mps 0x%04x", chans, chan_count, mtu, mps);
3051
3052 if (!chans || !IN_RANGE(chan_count, 1, BT_L2CAP_ECRED_CHAN_MAX_PER_REQ)) {
3053 return -EINVAL;
3054 }
3055
3056 if (!IN_RANGE(mps, BT_L2CAP_ECRED_MIN_MPS, BT_L2CAP_RX_MTU)) {
3057 return -EINVAL;
3058 }
3059
3060 for (size_t i = 0; i < chan_count; i++) {
3061 /* validate that all channels are from same connection */
3062 if (conn) {
3063 if (conn != chans[i]->conn) {
3064 return -EINVAL;
3065 }
3066 } else {
3067 conn = chans[i]->conn;
3068 }
3069
3070 /* validate MTU is not decreased */
3071 if (mtu < BT_L2CAP_LE_CHAN(chans[i])->rx.mtu) {
3072 return -EINVAL;
3073 }
3074
3075 /* MPS is not allowed to decrease when reconfiguring multiple channels.
3076 * Core Specification 3.A.4.27 v6.0
3077 */
3078 if (chan_count > 1 && mps < BT_L2CAP_LE_CHAN(chans[i])->rx.mps) {
3079 return -EINVAL;
3080 }
3081 }
3082
3083 if (!conn) {
3084 return -ENOTCONN;
3085 }
3086
3087 if (conn->type != BT_CONN_TYPE_LE) {
3088 return -EINVAL;
3089 }
3090
3091 /* allow only 1 request at time */
3092 if (l2cap_find_pending_reconf(conn)) {
3093 return -EBUSY;
3094 }
3095
3096 ident = get_ident();
3097
3098 buf = l2cap_create_le_sig_pdu(BT_L2CAP_ECRED_RECONF_REQ, ident,
3099 sizeof(*req) + (chan_count * sizeof(uint16_t)));
3100 if (!buf) {
3101 return -ENOMEM;
3102 }
3103
3104 req = net_buf_add(buf, sizeof(*req));
3105 req->mtu = sys_cpu_to_le16(mtu);
3106 req->mps = sys_cpu_to_le16(mps);
3107
3108 for (size_t i = 0; i < chan_count; i++) {
3109 struct bt_l2cap_le_chan *ch;
3110
3111 ch = BT_L2CAP_LE_CHAN(chans[i]);
3112
3113 ch->ident = ident;
3114 ch->pending_rx_mtu = mtu;
3115
3116 net_buf_add_le16(buf, ch->rx.cid);
3117 };
3118
3119 /* We set the RTX timer on one of the supplied channels, but when the
3120 * request resolves or times out we will act on all the channels in the
3121 * supplied array, using the ident field to find them.
3122 */
3123 l2cap_chan_send_req(chans[0], buf, L2CAP_CONN_TIMEOUT);
3124
3125 return 0;
3126 }
3127 #endif /* defined(CONFIG_BT_L2CAP_RECONFIGURE_EXPLICIT) */
3128
3129 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
3130
bt_l2cap_chan_connect(struct bt_conn * conn,struct bt_l2cap_chan * chan,uint16_t psm)3131 int bt_l2cap_chan_connect(struct bt_conn *conn, struct bt_l2cap_chan *chan,
3132 uint16_t psm)
3133 {
3134 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
3135
3136 LOG_DBG("conn %p chan %p psm 0x%04x", conn, chan, psm);
3137
3138 if (!conn || conn->state != BT_CONN_CONNECTED) {
3139 return -ENOTCONN;
3140 }
3141
3142 if (!chan) {
3143 return -EINVAL;
3144 }
3145
3146 if (IS_ENABLED(CONFIG_BT_CLASSIC) &&
3147 conn->type == BT_CONN_TYPE_BR) {
3148 return bt_l2cap_br_chan_connect(conn, chan, psm);
3149 }
3150
3151 if (le_chan->required_sec_level > BT_SECURITY_L4) {
3152 return -EINVAL;
3153 } else if (le_chan->required_sec_level == BT_SECURITY_L0) {
3154 le_chan->required_sec_level = BT_SECURITY_L1;
3155 }
3156
3157 return l2cap_le_connect(conn, le_chan, psm);
3158 }
3159
bt_l2cap_chan_disconnect(struct bt_l2cap_chan * chan)3160 int bt_l2cap_chan_disconnect(struct bt_l2cap_chan *chan)
3161 {
3162 struct bt_conn *conn = chan->conn;
3163 struct net_buf *buf;
3164 struct bt_l2cap_disconn_req *req;
3165 struct bt_l2cap_le_chan *le_chan;
3166
3167 if (!conn) {
3168 return -ENOTCONN;
3169 }
3170
3171 if (IS_ENABLED(CONFIG_BT_CLASSIC) &&
3172 conn->type == BT_CONN_TYPE_BR) {
3173 return bt_l2cap_br_chan_disconnect(chan);
3174 }
3175
3176 le_chan = BT_L2CAP_LE_CHAN(chan);
3177
3178 LOG_DBG("chan %p scid 0x%04x dcid 0x%04x", chan, le_chan->rx.cid, le_chan->tx.cid);
3179
3180 le_chan->ident = get_ident();
3181
3182 buf = l2cap_create_le_sig_pdu(BT_L2CAP_DISCONN_REQ,
3183 le_chan->ident, sizeof(*req));
3184 if (!buf) {
3185 return -ENOMEM;
3186 }
3187
3188 req = net_buf_add(buf, sizeof(*req));
3189 req->dcid = sys_cpu_to_le16(le_chan->tx.cid);
3190 req->scid = sys_cpu_to_le16(le_chan->rx.cid);
3191
3192 l2cap_chan_send_req(chan, buf, L2CAP_DISC_TIMEOUT);
3193 bt_l2cap_chan_set_state(chan, BT_L2CAP_DISCONNECTING);
3194
3195 return 0;
3196 }
3197
user_data_not_empty(const struct net_buf * buf)3198 __maybe_unused static bool user_data_not_empty(const struct net_buf *buf)
3199 {
3200 size_t ud_len = sizeof(struct closure);
3201 const uint8_t *ud = net_buf_user_data(buf);
3202
3203 for (size_t i = 0; i < ud_len; i++) {
3204 if (ud[i] != 0) {
3205 return true;
3206 }
3207 }
3208
3209 return false;
3210 }
3211
bt_l2cap_dyn_chan_send(struct bt_l2cap_le_chan * le_chan,struct net_buf * buf)3212 static int bt_l2cap_dyn_chan_send(struct bt_l2cap_le_chan *le_chan, struct net_buf *buf)
3213 {
3214 uint16_t sdu_len = buf->len;
3215
3216 LOG_DBG("chan %p buf %p", le_chan, buf);
3217
3218 /* Frags are not supported. */
3219 __ASSERT_NO_MSG(buf->frags == NULL);
3220
3221 if (sdu_len > le_chan->tx.mtu) {
3222 LOG_ERR("attempt to send %u bytes on %u MTU chan",
3223 sdu_len, le_chan->tx.mtu);
3224 return -EMSGSIZE;
3225 }
3226
3227 if (buf->ref != 1) {
3228 /* The host may alter the buf contents when segmenting. Higher
3229 * layers cannot expect the buf contents to stay intact. Extra
3230 * refs suggests a silent data corruption would occur if not for
3231 * this error.
3232 */
3233 LOG_ERR("buf given to l2cap has other refs");
3234 return -EINVAL;
3235 }
3236
3237 if (net_buf_headroom(buf) < BT_L2CAP_SDU_CHAN_SEND_RESERVE) {
3238 /* Call `net_buf_reserve(buf, BT_L2CAP_SDU_CHAN_SEND_RESERVE)`
3239 * when allocating buffers intended for bt_l2cap_chan_send().
3240 */
3241 LOG_ERR("Not enough headroom in buf %p", buf);
3242 return -EINVAL;
3243 }
3244
3245 if (user_data_not_empty(buf)) {
3246 /* There may be issues if user_data is not empty. */
3247 LOG_WRN("user_data is not empty");
3248 }
3249
3250 /* Prepend SDU length.
3251 *
3252 * L2CAP LE CoC SDUs are segmented and put into K-frames PDUs which have
3253 * their own L2CAP header (i.e. PDU length, channel id).
3254 *
3255 * The SDU length is right before the data that will be segmented and is
3256 * only present in the first PDU. Here's an example:
3257 *
3258 * Sent data payload of 50 bytes over channel 0x4040 with MPS of 30 bytes:
3259 * First PDU (K-frame):
3260 * | L2CAP K-frame header | K-frame payload |
3261 * | PDU length | Channel ID | SDU length | SDU payload |
3262 * | 0x001e | 0x4040 | 0x0032 | 28 bytes of data |
3263 *
3264 * Second and last PDU (K-frame):
3265 * | L2CAP K-frame header | K-frame payload |
3266 * | PDU length | Channel ID | rest of SDU payload |
3267 * | 0x0016 | 0x4040 | 22 bytes of data |
3268 */
3269 net_buf_push_le16(buf, sdu_len);
3270
3271 /* Put buffer on TX queue */
3272 k_fifo_put(&le_chan->tx_queue, buf);
3273
3274 /* Always process the queue in the same context */
3275 raise_data_ready(le_chan);
3276
3277 return 0;
3278 }
3279
bt_l2cap_chan_send(struct bt_l2cap_chan * chan,struct net_buf * buf)3280 int bt_l2cap_chan_send(struct bt_l2cap_chan *chan, struct net_buf *buf)
3281 {
3282 if (!buf || !chan) {
3283 return -EINVAL;
3284 }
3285
3286 LOG_DBG("chan %p buf %p len %zu", chan, buf, buf->len);
3287
3288 if (buf->ref != 1) {
3289 LOG_WRN("Expecting 1 ref, got %d", buf->ref);
3290 return -EINVAL;
3291 }
3292
3293 if (!chan->conn || chan->conn->state != BT_CONN_CONNECTED) {
3294 return -ENOTCONN;
3295 }
3296
3297 if (atomic_test_bit(chan->status, BT_L2CAP_STATUS_SHUTDOWN)) {
3298 return -ESHUTDOWN;
3299 }
3300
3301 if (IS_ENABLED(CONFIG_BT_CLASSIC) &&
3302 chan->conn->type == BT_CONN_TYPE_BR) {
3303 return bt_l2cap_br_chan_send_cb(chan, buf, NULL, NULL);
3304 }
3305
3306 /* Sending over static channels is not supported by this fn. Use
3307 * `bt_l2cap_send_pdu()` instead.
3308 */
3309 if (IS_ENABLED(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)) {
3310 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
3311
3312 __ASSERT_NO_MSG(le_chan);
3313 __ASSERT_NO_MSG(L2CAP_LE_CID_IS_DYN(le_chan->tx.cid));
3314
3315 return bt_l2cap_dyn_chan_send(le_chan, buf);
3316 }
3317
3318 LOG_DBG("Invalid channel type (chan %p)", chan);
3319
3320 return -EINVAL;
3321 }
3322 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
3323