1 /* l2cap.c - L2CAP handling */
2
3 /*
4 * Copyright (c) 2015-2016 Intel Corporation
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 #include <zephyr.h>
10 #include <string.h>
11 #include <errno.h>
12 #include <sys/atomic.h>
13 #include <sys/byteorder.h>
14 #include <sys/util.h>
15
16 #include <bluetooth/hci.h>
17 #include <bluetooth/bluetooth.h>
18 #include <bluetooth/conn.h>
19 #include <drivers/bluetooth/hci_driver.h>
20
21 #define BT_DBG_ENABLED IS_ENABLED(CONFIG_BT_DEBUG_L2CAP)
22 #define LOG_MODULE_NAME bt_l2cap
23 #include "common/log.h"
24
25 #include "hci_core.h"
26 #include "conn_internal.h"
27 #include "l2cap_internal.h"
28 #include "keys.h"
29
30 #define LE_CHAN_RTX(_w) CONTAINER_OF(_w, struct bt_l2cap_le_chan, chan.rtx_work)
31 #define CHAN_RX(_w) CONTAINER_OF(_w, struct bt_l2cap_le_chan, rx_work)
32
33 #define L2CAP_LE_MIN_MTU 23
34 #define L2CAP_ECRED_MIN_MTU 64
35
36 #define L2CAP_LE_MAX_CREDITS (CONFIG_BT_BUF_ACL_RX_COUNT - 1)
37
38 #define L2CAP_LE_CID_DYN_START 0x0040
39 #define L2CAP_LE_CID_DYN_END 0x007f
40 #define L2CAP_LE_CID_IS_DYN(_cid) \
41 (_cid >= L2CAP_LE_CID_DYN_START && _cid <= L2CAP_LE_CID_DYN_END)
42
43 #define L2CAP_LE_PSM_FIXED_START 0x0001
44 #define L2CAP_LE_PSM_FIXED_END 0x007f
45 #define L2CAP_LE_PSM_DYN_START 0x0080
46 #define L2CAP_LE_PSM_DYN_END 0x00ff
47 #define L2CAP_LE_PSM_IS_DYN(_psm) \
48 (_psm >= L2CAP_LE_PSM_DYN_START && _psm <= L2CAP_LE_PSM_DYN_END)
49
50 #define L2CAP_CONN_TIMEOUT K_SECONDS(40)
51 #define L2CAP_DISC_TIMEOUT K_SECONDS(2)
52 #define L2CAP_RTX_TIMEOUT K_SECONDS(2)
53
54 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
55 /* Dedicated pool for disconnect buffers so they are guaranteed to be send
56 * even in case of data congestion due to flooding.
57 */
58 NET_BUF_POOL_FIXED_DEFINE(disc_pool, 1,
59 BT_L2CAP_BUF_SIZE(
60 sizeof(struct bt_l2cap_sig_hdr) +
61 sizeof(struct bt_l2cap_disconn_req)),
62 NULL);
63
64 #define L2CAP_ECRED_CHAN_MAX 5
65
66 #define l2cap_lookup_ident(conn, ident) __l2cap_lookup_ident(conn, ident, false)
67 #define l2cap_remove_ident(conn, ident) __l2cap_lookup_ident(conn, ident, true)
68
69 struct data_sent {
70 uint16_t len;
71 };
72
73 #define data_sent(buf) ((struct data_sent *)net_buf_user_data(buf))
74
75 static sys_slist_t servers;
76
77 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
78
79 /* L2CAP signalling channel specific context */
80 struct bt_l2cap {
81 /* The channel this context is associated with */
82 struct bt_l2cap_le_chan chan;
83 };
84
85 static struct bt_l2cap bt_l2cap_pool[CONFIG_BT_MAX_CONN];
86
get_ident(void)87 static uint8_t get_ident(void)
88 {
89 static uint8_t ident;
90
91 ident++;
92 /* handle integer overflow (0 is not valid) */
93 if (!ident) {
94 ident++;
95 }
96
97 return ident;
98 }
99
100 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_chan_alloc_cid(struct bt_conn * conn,struct bt_l2cap_chan * chan)101 static struct bt_l2cap_le_chan *l2cap_chan_alloc_cid(struct bt_conn *conn,
102 struct bt_l2cap_chan *chan)
103 {
104 struct bt_l2cap_le_chan *ch = BT_L2CAP_LE_CHAN(chan);
105 uint16_t cid;
106
107 /*
108 * No action needed if there's already a CID allocated, e.g. in
109 * the case of a fixed channel.
110 */
111 if (ch->rx.cid > 0) {
112 return ch;
113 }
114
115 for (cid = L2CAP_LE_CID_DYN_START; cid <= L2CAP_LE_CID_DYN_END; cid++) {
116 if (!bt_l2cap_le_lookup_rx_cid(conn, cid)) {
117 ch->rx.cid = cid;
118 return ch;
119 }
120 }
121
122 return NULL;
123 }
124
125 static struct bt_l2cap_le_chan *
__l2cap_lookup_ident(struct bt_conn * conn,uint16_t ident,bool remove)126 __l2cap_lookup_ident(struct bt_conn *conn, uint16_t ident, bool remove)
127 {
128 struct bt_l2cap_chan *chan;
129 sys_snode_t *prev = NULL;
130
131 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
132 if (chan->ident == ident) {
133 if (remove) {
134 sys_slist_remove(&conn->channels, prev,
135 &chan->node);
136 }
137 return BT_L2CAP_LE_CHAN(chan);
138 }
139
140 prev = &chan->node;
141 }
142
143 return NULL;
144 }
145 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
146
bt_l2cap_chan_remove(struct bt_conn * conn,struct bt_l2cap_chan * ch)147 void bt_l2cap_chan_remove(struct bt_conn *conn, struct bt_l2cap_chan *ch)
148 {
149 struct bt_l2cap_chan *chan;
150 sys_snode_t *prev = NULL;
151
152 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
153 if (chan == ch) {
154 sys_slist_remove(&conn->channels, prev, &chan->node);
155 return;
156 }
157
158 prev = &chan->node;
159 }
160 }
161
bt_l2cap_chan_state_str(bt_l2cap_chan_state_t state)162 const char *bt_l2cap_chan_state_str(bt_l2cap_chan_state_t state)
163 {
164 switch (state) {
165 case BT_L2CAP_DISCONNECTED:
166 return "disconnected";
167 case BT_L2CAP_CONNECT:
168 return "connect";
169 case BT_L2CAP_CONFIG:
170 return "config";
171 case BT_L2CAP_CONNECTED:
172 return "connected";
173 case BT_L2CAP_DISCONNECT:
174 return "disconnect";
175 default:
176 return "unknown";
177 }
178 }
179
180 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
181 #if defined(CONFIG_BT_DEBUG_L2CAP)
bt_l2cap_chan_set_state_debug(struct bt_l2cap_chan * chan,bt_l2cap_chan_state_t state,const char * func,int line)182 void bt_l2cap_chan_set_state_debug(struct bt_l2cap_chan *chan,
183 bt_l2cap_chan_state_t state,
184 const char *func, int line)
185 {
186 BT_DBG("chan %p psm 0x%04x %s -> %s", chan, chan->psm,
187 bt_l2cap_chan_state_str(chan->state),
188 bt_l2cap_chan_state_str(state));
189
190 /* check transitions validness */
191 switch (state) {
192 case BT_L2CAP_DISCONNECTED:
193 /* regardless of old state always allows this state */
194 break;
195 case BT_L2CAP_CONNECT:
196 if (chan->state != BT_L2CAP_DISCONNECTED) {
197 BT_WARN("%s()%d: invalid transition", func, line);
198 }
199 break;
200 case BT_L2CAP_CONFIG:
201 if (chan->state != BT_L2CAP_CONNECT) {
202 BT_WARN("%s()%d: invalid transition", func, line);
203 }
204 break;
205 case BT_L2CAP_CONNECTED:
206 if (chan->state != BT_L2CAP_CONFIG &&
207 chan->state != BT_L2CAP_CONNECT) {
208 BT_WARN("%s()%d: invalid transition", func, line);
209 }
210 break;
211 case BT_L2CAP_DISCONNECT:
212 if (chan->state != BT_L2CAP_CONFIG &&
213 chan->state != BT_L2CAP_CONNECTED) {
214 BT_WARN("%s()%d: invalid transition", func, line);
215 }
216 break;
217 default:
218 BT_ERR("%s()%d: unknown (%u) state was set", func, line, state);
219 return;
220 }
221
222 chan->state = state;
223 }
224 #else
bt_l2cap_chan_set_state(struct bt_l2cap_chan * chan,bt_l2cap_chan_state_t state)225 void bt_l2cap_chan_set_state(struct bt_l2cap_chan *chan,
226 bt_l2cap_chan_state_t state)
227 {
228 chan->state = state;
229 }
230 #endif /* CONFIG_BT_DEBUG_L2CAP */
231 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
232
bt_l2cap_chan_del(struct bt_l2cap_chan * chan)233 void bt_l2cap_chan_del(struct bt_l2cap_chan *chan)
234 {
235 const struct bt_l2cap_chan_ops *ops = chan->ops;
236
237 BT_DBG("conn %p chan %p", chan->conn, chan);
238
239 if (!chan->conn) {
240 goto destroy;
241 }
242
243 if (ops->disconnected) {
244 ops->disconnected(chan);
245 }
246
247 chan->conn = NULL;
248
249 destroy:
250 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
251 /* Reset internal members of common channel */
252 bt_l2cap_chan_set_state(chan, BT_L2CAP_DISCONNECTED);
253 chan->psm = 0U;
254 #endif
255 if (chan->destroy) {
256 chan->destroy(chan);
257 }
258
259 if (ops->released) {
260 ops->released(chan);
261 }
262 }
263
l2cap_rtx_timeout(struct k_work * work)264 static void l2cap_rtx_timeout(struct k_work *work)
265 {
266 struct bt_l2cap_le_chan *chan = LE_CHAN_RTX(work);
267 struct bt_conn *conn = chan->chan.conn;
268
269 BT_ERR("chan %p timeout", chan);
270
271 bt_l2cap_chan_remove(conn, &chan->chan);
272 bt_l2cap_chan_del(&chan->chan);
273
274 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
275 /* Remove other channels if pending on the same ident */
276 while ((chan = l2cap_remove_ident(conn, chan->chan.ident))) {
277 bt_l2cap_chan_del(&chan->chan);
278 }
279 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
280 }
281
282 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
283 static void l2cap_chan_le_recv(struct bt_l2cap_le_chan *chan,
284 struct net_buf *buf);
285
l2cap_rx_process(struct k_work * work)286 static void l2cap_rx_process(struct k_work *work)
287 {
288 struct bt_l2cap_le_chan *ch = CHAN_RX(work);
289 struct net_buf *buf;
290
291 while ((buf = net_buf_get(&ch->rx_queue, K_NO_WAIT))) {
292 BT_DBG("ch %p buf %p", ch, buf);
293 l2cap_chan_le_recv(ch, buf);
294 net_buf_unref(buf);
295 }
296 }
297 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
298
bt_l2cap_chan_add(struct bt_conn * conn,struct bt_l2cap_chan * chan,bt_l2cap_chan_destroy_t destroy)299 void bt_l2cap_chan_add(struct bt_conn *conn, struct bt_l2cap_chan *chan,
300 bt_l2cap_chan_destroy_t destroy)
301 {
302 /* Attach channel to the connection */
303 sys_slist_append(&conn->channels, &chan->node);
304 chan->conn = conn;
305 chan->destroy = destroy;
306
307 BT_DBG("conn %p chan %p", conn, chan);
308 }
309
l2cap_chan_add(struct bt_conn * conn,struct bt_l2cap_chan * chan,bt_l2cap_chan_destroy_t destroy)310 static bool l2cap_chan_add(struct bt_conn *conn, struct bt_l2cap_chan *chan,
311 bt_l2cap_chan_destroy_t destroy)
312 {
313 struct bt_l2cap_le_chan *ch;
314
315 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
316 ch = l2cap_chan_alloc_cid(conn, chan);
317 #else
318 ch = BT_L2CAP_LE_CHAN(chan);
319 #endif
320
321 if (!ch) {
322 BT_ERR("Unable to allocate L2CAP channel ID");
323 return false;
324 }
325
326 /* All dynamic channels have the destroy handler which makes sure that
327 * the RTX work structure is properly released with a cancel sync.
328 * The fixed signal channel is only removed when disconnected and the
329 * disconnected handler is always called from the workqueue itself so
330 * canceling from there should always succeed.
331 */
332 k_work_init_delayable(&chan->rtx_work, l2cap_rtx_timeout);
333 atomic_clear(chan->status);
334
335 bt_l2cap_chan_add(conn, chan, destroy);
336
337 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
338 if (L2CAP_LE_CID_IS_DYN(ch->rx.cid)) {
339 k_work_init(&ch->rx_work, l2cap_rx_process);
340 k_fifo_init(&ch->rx_queue);
341 bt_l2cap_chan_set_state(chan, BT_L2CAP_CONNECT);
342 }
343 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
344
345 return true;
346 }
347
bt_l2cap_connected(struct bt_conn * conn)348 void bt_l2cap_connected(struct bt_conn *conn)
349 {
350 struct bt_l2cap_chan *chan;
351
352 if (IS_ENABLED(CONFIG_BT_BREDR) &&
353 conn->type == BT_CONN_TYPE_BR) {
354 bt_l2cap_br_connected(conn);
355 return;
356 }
357
358 STRUCT_SECTION_FOREACH(bt_l2cap_fixed_chan, fchan) {
359 struct bt_l2cap_le_chan *ch;
360
361 if (fchan->accept(conn, &chan) < 0) {
362 continue;
363 }
364
365 ch = BT_L2CAP_LE_CHAN(chan);
366
367 /* Fill up remaining fixed channel context attached in
368 * fchan->accept()
369 */
370 ch->rx.cid = fchan->cid;
371 ch->tx.cid = fchan->cid;
372
373 if (!l2cap_chan_add(conn, chan, fchan->destroy)) {
374 return;
375 }
376
377 if (chan->ops->connected) {
378 chan->ops->connected(chan);
379 }
380
381 /* Always set output status to fixed channels */
382 atomic_set_bit(chan->status, BT_L2CAP_STATUS_OUT);
383
384 if (chan->ops->status) {
385 chan->ops->status(chan, chan->status);
386 }
387 }
388 }
389
bt_l2cap_disconnected(struct bt_conn * conn)390 void bt_l2cap_disconnected(struct bt_conn *conn)
391 {
392 struct bt_l2cap_chan *chan, *next;
393
394 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&conn->channels, chan, next, node) {
395 bt_l2cap_chan_del(chan);
396 }
397 }
398
l2cap_create_le_sig_pdu(struct net_buf * buf,uint8_t code,uint8_t ident,uint16_t len)399 static struct net_buf *l2cap_create_le_sig_pdu(struct net_buf *buf,
400 uint8_t code, uint8_t ident,
401 uint16_t len)
402 {
403 struct bt_l2cap_sig_hdr *hdr;
404 struct net_buf_pool *pool = NULL;
405
406 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
407 if (code == BT_L2CAP_DISCONN_REQ) {
408 pool = &disc_pool;
409 }
410 #endif
411 /* Don't wait more than the minimum RTX timeout of 2 seconds */
412 buf = bt_l2cap_create_pdu_timeout(pool, 0, L2CAP_RTX_TIMEOUT);
413 if (!buf) {
414 /* If it was not possible to allocate a buffer within the
415 * timeout return NULL.
416 */
417 BT_ERR("Unable to allocate buffer for op 0x%02x", code);
418 return NULL;
419 }
420
421 hdr = net_buf_add(buf, sizeof(*hdr));
422 hdr->code = code;
423 hdr->ident = ident;
424 hdr->len = sys_cpu_to_le16(len);
425
426 return buf;
427 }
428
429 /* Send the buffer and release it in case of failure.
430 * Any other cleanup in failure to send should be handled by the disconnected
431 * handler.
432 */
l2cap_send(struct bt_conn * conn,uint16_t cid,struct net_buf * buf)433 static inline void l2cap_send(struct bt_conn *conn, uint16_t cid,
434 struct net_buf *buf)
435 {
436 if (bt_l2cap_send(conn, cid, buf)) {
437 net_buf_unref(buf);
438 }
439 }
440
441 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_chan_send_req(struct bt_l2cap_chan * chan,struct net_buf * buf,k_timeout_t timeout)442 static void l2cap_chan_send_req(struct bt_l2cap_chan *chan,
443 struct net_buf *buf, k_timeout_t timeout)
444 {
445 if (bt_l2cap_send(chan->conn, BT_L2CAP_CID_LE_SIG, buf)) {
446 net_buf_unref(buf);
447 return;
448 }
449
450 /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part A] page 126:
451 *
452 * The value of this timer is implementation-dependent but the minimum
453 * initial value is 1 second and the maximum initial value is 60
454 * seconds. One RTX timer shall exist for each outstanding signaling
455 * request, including each Echo Request. The timer disappears on the
456 * final expiration, when the response is received, or the physical
457 * link is lost.
458 */
459 k_work_reschedule(&chan->rtx_work, timeout);
460 }
461
l2cap_le_conn_req(struct bt_l2cap_le_chan * ch)462 static int l2cap_le_conn_req(struct bt_l2cap_le_chan *ch)
463 {
464 struct net_buf *buf;
465 struct bt_l2cap_le_conn_req *req;
466
467 ch->chan.ident = get_ident();
468
469 buf = l2cap_create_le_sig_pdu(NULL, BT_L2CAP_LE_CONN_REQ,
470 ch->chan.ident, sizeof(*req));
471 if (!buf) {
472 return -ENOMEM;
473 }
474
475 req = net_buf_add(buf, sizeof(*req));
476 req->psm = sys_cpu_to_le16(ch->chan.psm);
477 req->scid = sys_cpu_to_le16(ch->rx.cid);
478 req->mtu = sys_cpu_to_le16(ch->rx.mtu);
479 req->mps = sys_cpu_to_le16(ch->rx.mps);
480 req->credits = sys_cpu_to_le16(ch->rx.init_credits);
481
482 l2cap_chan_send_req(&ch->chan, buf, L2CAP_CONN_TIMEOUT);
483
484 return 0;
485 }
486
487 #if defined(CONFIG_BT_L2CAP_ECRED)
l2cap_ecred_conn_req(struct bt_l2cap_chan ** chan,int channels)488 static int l2cap_ecred_conn_req(struct bt_l2cap_chan **chan, int channels)
489 {
490 struct net_buf *buf;
491 struct bt_l2cap_ecred_conn_req *req;
492 struct bt_l2cap_le_chan *ch;
493 int i;
494 uint8_t ident;
495
496 if (!chan || !channels) {
497 return -EINVAL;
498 }
499
500 ident = get_ident();
501
502 buf = l2cap_create_le_sig_pdu(NULL, BT_L2CAP_ECRED_CONN_REQ, ident,
503 sizeof(*req) +
504 (channels * sizeof(uint16_t)));
505
506 req = net_buf_add(buf, sizeof(*req));
507
508 ch = BT_L2CAP_LE_CHAN(chan[0]);
509
510 /* Init common parameters */
511 req->psm = sys_cpu_to_le16(ch->chan.psm);
512 req->mtu = sys_cpu_to_le16(ch->rx.mtu);
513 req->mps = sys_cpu_to_le16(ch->rx.mps);
514 req->credits = sys_cpu_to_le16(ch->rx.init_credits);
515
516 for (i = 0; i < channels; i++) {
517 ch = BT_L2CAP_LE_CHAN(chan[i]);
518
519 ch->chan.ident = ident;
520
521 net_buf_add_le16(buf, ch->rx.cid);
522 }
523
524 l2cap_chan_send_req(*chan, buf, L2CAP_CONN_TIMEOUT);
525
526 return 0;
527 }
528 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
529
l2cap_le_encrypt_change(struct bt_l2cap_chan * chan,uint8_t status)530 static void l2cap_le_encrypt_change(struct bt_l2cap_chan *chan, uint8_t status)
531 {
532 int err;
533
534 /* Skip channels that are not pending waiting for encryption */
535 if (!atomic_test_and_clear_bit(chan->status,
536 BT_L2CAP_STATUS_ENCRYPT_PENDING)) {
537 return;
538 }
539
540 if (status) {
541 goto fail;
542 }
543
544 #if defined(CONFIG_BT_L2CAP_ECRED)
545 if (chan->ident) {
546 struct bt_l2cap_chan *echan[L2CAP_ECRED_CHAN_MAX];
547 struct bt_l2cap_le_chan *ch;
548 int i = 0;
549
550 while ((ch = l2cap_remove_ident(chan->conn, chan->ident))) {
551 echan[i++] = &ch->chan;
552 }
553
554 /* Retry ecred connect */
555 l2cap_ecred_conn_req(echan, i);
556 return;
557 }
558 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
559
560 /* Retry to connect */
561 err = l2cap_le_conn_req(BT_L2CAP_LE_CHAN(chan));
562 if (err) {
563 goto fail;
564 }
565
566 return;
567 fail:
568 bt_l2cap_chan_remove(chan->conn, chan);
569 bt_l2cap_chan_del(chan);
570 }
571 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
572
bt_l2cap_security_changed(struct bt_conn * conn,uint8_t hci_status)573 void bt_l2cap_security_changed(struct bt_conn *conn, uint8_t hci_status)
574 {
575 struct bt_l2cap_chan *chan, *next;
576
577 if (IS_ENABLED(CONFIG_BT_BREDR) &&
578 conn->type == BT_CONN_TYPE_BR) {
579 l2cap_br_encrypt_change(conn, hci_status);
580 return;
581 }
582
583 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&conn->channels, chan, next, node) {
584 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
585 l2cap_le_encrypt_change(chan, hci_status);
586 #endif
587
588 if (chan->ops->encrypt_change) {
589 chan->ops->encrypt_change(chan, hci_status);
590 }
591 }
592 }
593
bt_l2cap_create_pdu_timeout(struct net_buf_pool * pool,size_t reserve,k_timeout_t timeout)594 struct net_buf *bt_l2cap_create_pdu_timeout(struct net_buf_pool *pool,
595 size_t reserve,
596 k_timeout_t timeout)
597 {
598 return bt_conn_create_pdu_timeout(pool,
599 sizeof(struct bt_l2cap_hdr) + reserve,
600 timeout);
601 }
602
bt_l2cap_send_cb(struct bt_conn * conn,uint16_t cid,struct net_buf * buf,bt_conn_tx_cb_t cb,void * user_data)603 int bt_l2cap_send_cb(struct bt_conn *conn, uint16_t cid, struct net_buf *buf,
604 bt_conn_tx_cb_t cb, void *user_data)
605 {
606 struct bt_l2cap_hdr *hdr;
607
608 BT_DBG("conn %p cid %u len %zu", conn, cid, net_buf_frags_len(buf));
609
610 hdr = net_buf_push(buf, sizeof(*hdr));
611 hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
612 hdr->cid = sys_cpu_to_le16(cid);
613
614 return bt_conn_send_cb(conn, buf, cb, user_data);
615 }
616
l2cap_send_reject(struct bt_conn * conn,uint8_t ident,uint16_t reason,void * data,uint8_t data_len)617 static void l2cap_send_reject(struct bt_conn *conn, uint8_t ident,
618 uint16_t reason, void *data, uint8_t data_len)
619 {
620 struct bt_l2cap_cmd_reject *rej;
621 struct net_buf *buf;
622
623 buf = l2cap_create_le_sig_pdu(NULL, BT_L2CAP_CMD_REJECT, ident,
624 sizeof(*rej) + data_len);
625 if (!buf) {
626 return;
627 }
628
629 rej = net_buf_add(buf, sizeof(*rej));
630 rej->reason = sys_cpu_to_le16(reason);
631
632 if (data) {
633 net_buf_add_mem(buf, data, data_len);
634 }
635
636 l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf);
637 }
638
le_conn_param_rsp(struct bt_l2cap * l2cap,struct net_buf * buf)639 static void le_conn_param_rsp(struct bt_l2cap *l2cap, struct net_buf *buf)
640 {
641 struct bt_l2cap_conn_param_rsp *rsp = (void *)buf->data;
642
643 if (buf->len < sizeof(*rsp)) {
644 BT_ERR("Too small LE conn param rsp");
645 return;
646 }
647
648 BT_DBG("LE conn param rsp result %u", sys_le16_to_cpu(rsp->result));
649 }
650
le_conn_param_update_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)651 static void le_conn_param_update_req(struct bt_l2cap *l2cap, uint8_t ident,
652 struct net_buf *buf)
653 {
654 struct bt_conn *conn = l2cap->chan.chan.conn;
655 struct bt_le_conn_param param;
656 struct bt_l2cap_conn_param_rsp *rsp;
657 struct bt_l2cap_conn_param_req *req = (void *)buf->data;
658 bool accepted;
659
660 if (buf->len < sizeof(*req)) {
661 BT_ERR("Too small LE conn update param req");
662 return;
663 }
664
665 if (conn->role != BT_HCI_ROLE_CENTRAL) {
666 l2cap_send_reject(conn, ident, BT_L2CAP_REJ_NOT_UNDERSTOOD,
667 NULL, 0);
668 return;
669 }
670
671 param.interval_min = sys_le16_to_cpu(req->min_interval);
672 param.interval_max = sys_le16_to_cpu(req->max_interval);
673 param.latency = sys_le16_to_cpu(req->latency);
674 param.timeout = sys_le16_to_cpu(req->timeout);
675
676 BT_DBG("min 0x%04x max 0x%04x latency: 0x%04x timeout: 0x%04x",
677 param.interval_min, param.interval_max, param.latency,
678 param.timeout);
679
680 buf = l2cap_create_le_sig_pdu(buf, BT_L2CAP_CONN_PARAM_RSP, ident,
681 sizeof(*rsp));
682 if (!buf) {
683 return;
684 }
685
686 accepted = le_param_req(conn, ¶m);
687
688 rsp = net_buf_add(buf, sizeof(*rsp));
689 if (accepted) {
690 rsp->result = sys_cpu_to_le16(BT_L2CAP_CONN_PARAM_ACCEPTED);
691 } else {
692 rsp->result = sys_cpu_to_le16(BT_L2CAP_CONN_PARAM_REJECTED);
693 }
694
695 l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf);
696
697 if (accepted) {
698 bt_conn_le_conn_update(conn, ¶m);
699 }
700 }
701
bt_l2cap_le_lookup_tx_cid(struct bt_conn * conn,uint16_t cid)702 struct bt_l2cap_chan *bt_l2cap_le_lookup_tx_cid(struct bt_conn *conn,
703 uint16_t cid)
704 {
705 struct bt_l2cap_chan *chan;
706
707 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
708 if (BT_L2CAP_LE_CHAN(chan)->tx.cid == cid) {
709 return chan;
710 }
711 }
712
713 return NULL;
714 }
715
bt_l2cap_le_lookup_rx_cid(struct bt_conn * conn,uint16_t cid)716 struct bt_l2cap_chan *bt_l2cap_le_lookup_rx_cid(struct bt_conn *conn,
717 uint16_t cid)
718 {
719 struct bt_l2cap_chan *chan;
720
721 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
722 if (BT_L2CAP_LE_CHAN(chan)->rx.cid == cid) {
723 return chan;
724 }
725 }
726
727 return NULL;
728 }
729
730 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_server_lookup_psm(uint16_t psm)731 static struct bt_l2cap_server *l2cap_server_lookup_psm(uint16_t psm)
732 {
733 struct bt_l2cap_server *server;
734
735 SYS_SLIST_FOR_EACH_CONTAINER(&servers, server, node) {
736 if (server->psm == psm) {
737 return server;
738 }
739 }
740
741 return NULL;
742 }
743
bt_l2cap_server_register(struct bt_l2cap_server * server)744 int bt_l2cap_server_register(struct bt_l2cap_server *server)
745 {
746 if (!server->accept) {
747 return -EINVAL;
748 }
749
750 if (server->psm) {
751 if (server->psm < L2CAP_LE_PSM_FIXED_START ||
752 server->psm > L2CAP_LE_PSM_DYN_END) {
753 return -EINVAL;
754 }
755
756 /* Check if given PSM is already in use */
757 if (l2cap_server_lookup_psm(server->psm)) {
758 BT_DBG("PSM already registered");
759 return -EADDRINUSE;
760 }
761 } else {
762 uint16_t psm;
763
764 for (psm = L2CAP_LE_PSM_DYN_START;
765 psm <= L2CAP_LE_PSM_DYN_END; psm++) {
766 if (!l2cap_server_lookup_psm(psm)) {
767 break;
768 }
769 }
770
771 if (psm > L2CAP_LE_PSM_DYN_END) {
772 BT_WARN("No free dynamic PSMs available");
773 return -EADDRNOTAVAIL;
774 }
775
776 BT_DBG("Allocated PSM 0x%04x for new server", psm);
777 server->psm = psm;
778 }
779
780 if (server->sec_level > BT_SECURITY_L4) {
781 return -EINVAL;
782 } else if (server->sec_level < BT_SECURITY_L1) {
783 /* Level 0 is only applicable for BR/EDR */
784 server->sec_level = BT_SECURITY_L1;
785 }
786
787 BT_DBG("PSM 0x%04x", server->psm);
788
789 sys_slist_append(&servers, &server->node);
790
791 return 0;
792 }
793
l2cap_chan_rx_init(struct bt_l2cap_le_chan * chan)794 static void l2cap_chan_rx_init(struct bt_l2cap_le_chan *chan)
795 {
796 BT_DBG("chan %p", chan);
797
798 /* Use existing MTU if defined */
799 if (!chan->rx.mtu) {
800 /* If application has not provide the incoming L2CAP SDU MTU use
801 * an MTU that does not require segmentation.
802 */
803 chan->rx.mtu = BT_L2CAP_SDU_RX_MTU;
804 }
805
806 /* MPS shall not be bigger than MTU + BT_L2CAP_SDU_HDR_SIZE as the
807 * remaining bytes cannot be used.
808 */
809 chan->rx.mps = MIN(chan->rx.mtu + BT_L2CAP_SDU_HDR_SIZE,
810 BT_L2CAP_RX_MTU);
811
812 /* Truncate MTU if channel have disabled segmentation but still have
813 * set an MTU which requires it.
814 */
815 if (!chan->chan.ops->alloc_buf &&
816 (chan->rx.mps < chan->rx.mtu + BT_L2CAP_SDU_HDR_SIZE)) {
817 BT_WARN("Segmentation disabled but MTU > MPS, truncating MTU");
818 chan->rx.mtu = chan->rx.mps - BT_L2CAP_SDU_HDR_SIZE;
819 }
820
821 /* Use existing credits if defined */
822 if (!chan->rx.init_credits) {
823 if (chan->chan.ops->alloc_buf) {
824 /* Auto tune credits to receive a full packet */
825 chan->rx.init_credits =
826 ceiling_fraction(chan->rx.mtu,
827 BT_L2CAP_RX_MTU);
828 } else {
829 chan->rx.init_credits = L2CAP_LE_MAX_CREDITS;
830 }
831 }
832
833 atomic_set(&chan->rx.credits, 0);
834
835 if (BT_DBG_ENABLED &&
836 chan->rx.init_credits * chan->rx.mps <
837 chan->rx.mtu + BT_L2CAP_SDU_HDR_SIZE) {
838 BT_WARN("Not enough credits for a full packet");
839 }
840 }
841
l2cap_chan_le_get_tx_buf(struct bt_l2cap_le_chan * ch)842 static struct net_buf *l2cap_chan_le_get_tx_buf(struct bt_l2cap_le_chan *ch)
843 {
844 struct net_buf *buf;
845
846 /* Return current buffer */
847 if (ch->tx_buf) {
848 buf = ch->tx_buf;
849 ch->tx_buf = NULL;
850 return buf;
851 }
852
853 return net_buf_get(&ch->tx_queue, K_NO_WAIT);
854 }
855
856 static int l2cap_chan_le_send_sdu(struct bt_l2cap_le_chan *ch,
857 struct net_buf **buf, uint16_t sent);
858
l2cap_chan_tx_process(struct k_work * work)859 static void l2cap_chan_tx_process(struct k_work *work)
860 {
861 struct bt_l2cap_le_chan *ch;
862 struct net_buf *buf;
863
864 ch = CONTAINER_OF(work, struct bt_l2cap_le_chan, tx_work);
865
866 /* Resume tx in case there are buffers in the queue */
867 while ((buf = l2cap_chan_le_get_tx_buf(ch))) {
868 int sent = data_sent(buf)->len;
869
870 BT_DBG("buf %p sent %u", buf, sent);
871
872 sent = l2cap_chan_le_send_sdu(ch, &buf, sent);
873 if (sent < 0) {
874 if (sent == -EAGAIN) {
875 ch->tx_buf = buf;
876 /* If we don't reschedule, and the app doesn't nudge l2cap (e.g. by
877 * sending another SDU), the channel will be stuck in limbo. To
878 * prevent this, we attempt to re-schedule the work item for every
879 * channel on every connection when an SDU has successfully been
880 * sent.
881 */
882 } else {
883 net_buf_unref(buf);
884 }
885 break;
886 }
887 }
888 }
889
l2cap_chan_tx_init(struct bt_l2cap_le_chan * chan)890 static void l2cap_chan_tx_init(struct bt_l2cap_le_chan *chan)
891 {
892 BT_DBG("chan %p", chan);
893
894 (void)memset(&chan->tx, 0, sizeof(chan->tx));
895 atomic_set(&chan->tx.credits, 0);
896 k_fifo_init(&chan->tx_queue);
897 k_work_init(&chan->tx_work, l2cap_chan_tx_process);
898 }
899
l2cap_chan_tx_give_credits(struct bt_l2cap_le_chan * chan,uint16_t credits)900 static void l2cap_chan_tx_give_credits(struct bt_l2cap_le_chan *chan,
901 uint16_t credits)
902 {
903 BT_DBG("chan %p credits %u", chan, credits);
904
905 atomic_add(&chan->tx.credits, credits);
906
907 if (!atomic_test_and_set_bit(chan->chan.status, BT_L2CAP_STATUS_OUT) &&
908 chan->chan.ops->status) {
909 chan->chan.ops->status(&chan->chan, chan->chan.status);
910 }
911 }
912
l2cap_chan_rx_give_credits(struct bt_l2cap_le_chan * chan,uint16_t credits)913 static void l2cap_chan_rx_give_credits(struct bt_l2cap_le_chan *chan,
914 uint16_t credits)
915 {
916 BT_DBG("chan %p credits %u", chan, credits);
917
918 atomic_add(&chan->rx.credits, credits);
919 }
920
l2cap_chan_destroy(struct bt_l2cap_chan * chan)921 static void l2cap_chan_destroy(struct bt_l2cap_chan *chan)
922 {
923 struct bt_l2cap_le_chan *ch = BT_L2CAP_LE_CHAN(chan);
924 struct net_buf *buf;
925
926 BT_DBG("chan %p cid 0x%04x", ch, ch->rx.cid);
927
928 /* Cancel ongoing work. Since the channel can be re-used after this
929 * we need to sync to make sure that the kernel does not have it
930 * in its queue anymore.
931 */
932 k_work_cancel_delayable_sync(&chan->rtx_work, &chan->rtx_sync);
933
934 if (ch->tx_buf) {
935 net_buf_unref(ch->tx_buf);
936 ch->tx_buf = NULL;
937 }
938
939 /* Remove buffers on the TX queue */
940 while ((buf = net_buf_get(&ch->tx_queue, K_NO_WAIT))) {
941 net_buf_unref(buf);
942 }
943
944 /* Remove buffers on the RX queue */
945 while ((buf = net_buf_get(&ch->rx_queue, K_NO_WAIT))) {
946 net_buf_unref(buf);
947 }
948
949 /* Destroy segmented SDU if it exists */
950 if (ch->_sdu) {
951 net_buf_unref(ch->_sdu);
952 ch->_sdu = NULL;
953 ch->_sdu_len = 0U;
954 }
955 }
956
le_err_to_result(int err)957 static uint16_t le_err_to_result(int err)
958 {
959 switch (err) {
960 case -ENOMEM:
961 return BT_L2CAP_LE_ERR_NO_RESOURCES;
962 case -EACCES:
963 return BT_L2CAP_LE_ERR_AUTHORIZATION;
964 case -EPERM:
965 return BT_L2CAP_LE_ERR_KEY_SIZE;
966 case -ENOTSUP:
967 /* This handle the cases where a fixed channel is registered but
968 * for some reason (e.g. controller not suporting a feature)
969 * cannot be used.
970 */
971 return BT_L2CAP_LE_ERR_PSM_NOT_SUPP;
972 default:
973 return BT_L2CAP_LE_ERR_UNACCEPT_PARAMS;
974 }
975 }
976
l2cap_chan_accept(struct bt_conn * conn,struct bt_l2cap_server * server,uint16_t scid,uint16_t mtu,uint16_t mps,uint16_t credits,struct bt_l2cap_chan ** chan)977 static uint16_t l2cap_chan_accept(struct bt_conn *conn,
978 struct bt_l2cap_server *server, uint16_t scid,
979 uint16_t mtu, uint16_t mps, uint16_t credits,
980 struct bt_l2cap_chan **chan)
981 {
982 struct bt_l2cap_le_chan *ch;
983 int err;
984
985 BT_DBG("conn %p scid 0x%04x chan %p", conn, scid, chan);
986
987 if (!L2CAP_LE_CID_IS_DYN(scid)) {
988 return BT_L2CAP_LE_ERR_INVALID_SCID;
989 }
990
991 *chan = bt_l2cap_le_lookup_tx_cid(conn, scid);
992 if (*chan) {
993 return BT_L2CAP_LE_ERR_SCID_IN_USE;
994 }
995
996 /* Request server to accept the new connection and allocate the
997 * channel.
998 */
999 err = server->accept(conn, chan);
1000 if (err < 0) {
1001 return le_err_to_result(err);
1002 }
1003
1004 if (!(*chan)->ops->recv) {
1005 BT_ERR("Mandatory callback 'recv' missing");
1006 return BT_L2CAP_LE_ERR_UNACCEPT_PARAMS;
1007 }
1008
1009 (*chan)->required_sec_level = server->sec_level;
1010
1011 if (!l2cap_chan_add(conn, *chan, l2cap_chan_destroy)) {
1012 return BT_L2CAP_LE_ERR_NO_RESOURCES;
1013 }
1014
1015 ch = BT_L2CAP_LE_CHAN(*chan);
1016
1017 /* Init TX parameters */
1018 l2cap_chan_tx_init(ch);
1019 ch->tx.cid = scid;
1020 ch->tx.mps = mps;
1021 ch->tx.mtu = mtu;
1022 ch->tx.init_credits = credits;
1023 l2cap_chan_tx_give_credits(ch, credits);
1024
1025 /* Init RX parameters */
1026 l2cap_chan_rx_init(ch);
1027 l2cap_chan_rx_give_credits(ch, ch->rx.init_credits);
1028
1029 /* Set channel PSM */
1030 (*chan)->psm = server->psm;
1031
1032 /* Update state */
1033 bt_l2cap_chan_set_state(*chan, BT_L2CAP_CONNECTED);
1034
1035 if ((*chan)->ops->connected) {
1036 (*chan)->ops->connected(*chan);
1037 }
1038
1039 return BT_L2CAP_LE_SUCCESS;
1040 }
1041
l2cap_check_security(struct bt_conn * conn,struct bt_l2cap_server * server)1042 static uint16_t l2cap_check_security(struct bt_conn *conn,
1043 struct bt_l2cap_server *server)
1044 {
1045 const struct bt_keys *keys = bt_keys_find_addr(conn->id, &conn->le.dst);
1046 bool ltk_present;
1047
1048 if (IS_ENABLED(CONFIG_BT_CONN_DISABLE_SECURITY)) {
1049 return BT_L2CAP_LE_SUCCESS;
1050 }
1051
1052 if (conn->sec_level >= server->sec_level) {
1053 return BT_L2CAP_LE_SUCCESS;
1054 }
1055
1056 if (conn->sec_level > BT_SECURITY_L1) {
1057 return BT_L2CAP_LE_ERR_AUTHENTICATION;
1058 }
1059
1060 if (keys) {
1061 if (conn->role == BT_HCI_ROLE_CENTRAL) {
1062 ltk_present = keys->keys & (BT_KEYS_LTK_P256 | BT_KEYS_PERIPH_LTK);
1063 } else {
1064 ltk_present = keys->keys & (BT_KEYS_LTK_P256 | BT_KEYS_LTK);
1065 }
1066 } else {
1067 ltk_present = false;
1068 }
1069
1070 /* If an LTK or an STK is available and encryption is required
1071 * (LE security mode 1) but encryption is not enabled, the
1072 * service request shall be rejected with the error code
1073 * "Insufficient Encryption".
1074 */
1075 if (ltk_present) {
1076 return BT_L2CAP_LE_ERR_ENCRYPTION;
1077 }
1078
1079 return BT_L2CAP_LE_ERR_AUTHENTICATION;
1080 }
1081
le_conn_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1082 static void le_conn_req(struct bt_l2cap *l2cap, uint8_t ident,
1083 struct net_buf *buf)
1084 {
1085 struct bt_conn *conn = l2cap->chan.chan.conn;
1086 struct bt_l2cap_chan *chan;
1087 struct bt_l2cap_le_chan *ch;
1088 struct bt_l2cap_server *server;
1089 struct bt_l2cap_le_conn_req *req = (void *)buf->data;
1090 struct bt_l2cap_le_conn_rsp *rsp;
1091 uint16_t psm, scid, mtu, mps, credits;
1092 uint16_t result;
1093
1094 if (buf->len < sizeof(*req)) {
1095 BT_ERR("Too small LE conn req packet size");
1096 return;
1097 }
1098
1099 psm = sys_le16_to_cpu(req->psm);
1100 scid = sys_le16_to_cpu(req->scid);
1101 mtu = sys_le16_to_cpu(req->mtu);
1102 mps = sys_le16_to_cpu(req->mps);
1103 credits = sys_le16_to_cpu(req->credits);
1104
1105 BT_DBG("psm 0x%02x scid 0x%04x mtu %u mps %u credits %u", psm, scid,
1106 mtu, mps, credits);
1107
1108 if (mtu < L2CAP_LE_MIN_MTU || mps < L2CAP_LE_MIN_MTU) {
1109 BT_ERR("Invalid LE-Conn Req params");
1110 return;
1111 }
1112
1113 buf = l2cap_create_le_sig_pdu(buf, BT_L2CAP_LE_CONN_RSP, ident,
1114 sizeof(*rsp));
1115 if (!buf) {
1116 return;
1117 }
1118
1119 rsp = net_buf_add(buf, sizeof(*rsp));
1120 (void)memset(rsp, 0, sizeof(*rsp));
1121
1122 /* Check if there is a server registered */
1123 server = l2cap_server_lookup_psm(psm);
1124 if (!server) {
1125 rsp->result = sys_cpu_to_le16(BT_L2CAP_LE_ERR_PSM_NOT_SUPP);
1126 goto rsp;
1127 }
1128
1129 /* Check if connection has minimum required security level */
1130 result = l2cap_check_security(conn, server);
1131 if (result != BT_L2CAP_LE_SUCCESS) {
1132 rsp->result = sys_cpu_to_le16(result);
1133 goto rsp;
1134 }
1135
1136 result = l2cap_chan_accept(conn, server, scid, mtu, mps, credits,
1137 &chan);
1138 if (result != BT_L2CAP_LE_SUCCESS) {
1139 rsp->result = sys_cpu_to_le16(result);
1140 goto rsp;
1141 }
1142
1143 ch = BT_L2CAP_LE_CHAN(chan);
1144
1145 /* Prepare response protocol data */
1146 rsp->dcid = sys_cpu_to_le16(ch->rx.cid);
1147 rsp->mps = sys_cpu_to_le16(ch->rx.mps);
1148 rsp->mtu = sys_cpu_to_le16(ch->rx.mtu);
1149 rsp->credits = sys_cpu_to_le16(ch->rx.init_credits);
1150 rsp->result = BT_L2CAP_LE_SUCCESS;
1151
1152 rsp:
1153 l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf);
1154 }
1155
1156 #if defined(CONFIG_BT_L2CAP_ECRED)
le_ecred_conn_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1157 static void le_ecred_conn_req(struct bt_l2cap *l2cap, uint8_t ident,
1158 struct net_buf *buf)
1159 {
1160 struct bt_conn *conn = l2cap->chan.chan.conn;
1161 struct bt_l2cap_chan *chan[L2CAP_ECRED_CHAN_MAX];
1162 struct bt_l2cap_le_chan *ch = NULL;
1163 struct bt_l2cap_server *server;
1164 struct bt_l2cap_ecred_conn_req *req;
1165 struct bt_l2cap_ecred_conn_rsp *rsp;
1166 uint16_t psm, mtu, mps, credits, result = BT_L2CAP_LE_SUCCESS;
1167 uint16_t scid, dcid[L2CAP_ECRED_CHAN_MAX];
1168 int i = 0;
1169 uint8_t req_cid_count;
1170
1171 /* set dcid to zeros here, in case of all connections refused error */
1172 memset(dcid, 0, sizeof(dcid));
1173 if (buf->len < sizeof(*req)) {
1174 BT_ERR("Too small LE conn req packet size");
1175 result = BT_L2CAP_LE_ERR_INVALID_PARAMS;
1176 req_cid_count = 0;
1177 goto response;
1178 }
1179
1180 req = net_buf_pull_mem(buf, sizeof(*req));
1181 req_cid_count = buf->len / sizeof(scid);
1182
1183 if (buf->len > sizeof(dcid)) {
1184 BT_ERR("Too large LE conn req packet size");
1185 req_cid_count = L2CAP_ECRED_CHAN_MAX;
1186 result = BT_L2CAP_LE_ERR_INVALID_PARAMS;
1187 goto response;
1188 }
1189
1190 psm = sys_le16_to_cpu(req->psm);
1191 mtu = sys_le16_to_cpu(req->mtu);
1192 mps = sys_le16_to_cpu(req->mps);
1193 credits = sys_le16_to_cpu(req->credits);
1194
1195 BT_DBG("psm 0x%02x mtu %u mps %u credits %u", psm, mtu, mps, credits);
1196
1197 if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MTU) {
1198 BT_ERR("Invalid ecred conn req params");
1199 result = BT_L2CAP_LE_ERR_INVALID_PARAMS;
1200 goto response;
1201 }
1202
1203 /* Check if there is a server registered */
1204 server = l2cap_server_lookup_psm(psm);
1205 if (!server) {
1206 result = BT_L2CAP_LE_ERR_PSM_NOT_SUPP;
1207 goto response;
1208 }
1209
1210 /* Check if connection has minimum required security level */
1211 result = l2cap_check_security(conn, server);
1212 if (result != BT_L2CAP_LE_SUCCESS) {
1213 goto response;
1214 }
1215
1216 while (buf->len >= sizeof(scid)) {
1217 uint16_t rc;
1218 scid = net_buf_pull_le16(buf);
1219
1220 rc = l2cap_chan_accept(conn, server, scid, mtu, mps,
1221 credits, &chan[i]);
1222 if (rc != BT_L2CAP_LE_SUCCESS) {
1223 result = rc;
1224 }
1225 switch (rc) {
1226 case BT_L2CAP_LE_SUCCESS:
1227 ch = BT_L2CAP_LE_CHAN(chan[i]);
1228 dcid[i++] = sys_cpu_to_le16(ch->rx.cid);
1229 continue;
1230 /* Some connections refused – invalid Source CID */
1231 /* Some connections refused – Source CID already allocated */
1232 /* Some connections refused – not enough resources
1233 * available.
1234 */
1235 default:
1236 /* If a Destination CID is 0x0000, the channel was not
1237 * established.
1238 */
1239 dcid[i++] = 0x0000;
1240 continue;
1241 }
1242 }
1243
1244 response:
1245 buf = l2cap_create_le_sig_pdu(buf, BT_L2CAP_ECRED_CONN_RSP, ident,
1246 sizeof(*rsp) +
1247 (sizeof(scid) * req_cid_count));
1248 if (!buf) {
1249 return;
1250 }
1251
1252 rsp = net_buf_add(buf, sizeof(*rsp));
1253 (void)memset(rsp, 0, sizeof(*rsp));
1254 if (ch) {
1255 rsp->mps = sys_cpu_to_le16(ch->rx.mps);
1256 rsp->mtu = sys_cpu_to_le16(ch->rx.mtu);
1257 rsp->credits = sys_cpu_to_le16(ch->rx.init_credits);
1258 }
1259 rsp->result = sys_cpu_to_le16(result);
1260
1261 net_buf_add_mem(buf, dcid, sizeof(scid) * req_cid_count);
1262
1263 l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf);
1264 }
1265
le_ecred_reconf_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1266 static void le_ecred_reconf_req(struct bt_l2cap *l2cap, uint8_t ident,
1267 struct net_buf *buf)
1268 {
1269 struct bt_conn *conn = l2cap->chan.chan.conn;
1270 struct bt_l2cap_chan *chans[L2CAP_ECRED_CHAN_MAX];
1271 struct bt_l2cap_ecred_reconf_req *req;
1272 struct bt_l2cap_ecred_reconf_rsp *rsp;
1273 uint16_t mtu, mps;
1274 uint16_t scid, result = BT_L2CAP_RECONF_SUCCESS;
1275 int chan_count = 0;
1276 bool mps_reduced = false;
1277
1278 if (buf->len < sizeof(*req)) {
1279 BT_ERR("Too small ecred reconf req packet size");
1280 return;
1281 }
1282
1283 req = net_buf_pull_mem(buf, sizeof(*req));
1284
1285 mtu = sys_le16_to_cpu(req->mtu);
1286 mps = sys_le16_to_cpu(req->mps);
1287
1288 if (mps < L2CAP_ECRED_MIN_MTU) {
1289 result = BT_L2CAP_RECONF_OTHER_UNACCEPT;
1290 goto response;
1291 }
1292
1293 if (mtu < L2CAP_ECRED_MIN_MTU) {
1294 result = BT_L2CAP_RECONF_INVALID_MTU;
1295 goto response;
1296 }
1297
1298 while (buf->len >= sizeof(scid)) {
1299 struct bt_l2cap_chan *chan;
1300 scid = net_buf_pull_le16(buf);
1301 chan = bt_l2cap_le_lookup_tx_cid(conn, scid);
1302 if (!chan) {
1303 result = BT_L2CAP_RECONF_INVALID_CID;
1304 goto response;
1305 }
1306
1307 if (BT_L2CAP_LE_CHAN(chan)->tx.mtu > mtu) {
1308 BT_ERR("chan %p decreased MTU %u -> %u", chan,
1309 BT_L2CAP_LE_CHAN(chan)->tx.mtu, mtu);
1310 result = BT_L2CAP_RECONF_INVALID_MTU;
1311 goto response;
1312 }
1313
1314 if (BT_L2CAP_LE_CHAN(chan)->tx.mps > mps) {
1315 mps_reduced = true;
1316 }
1317
1318 chans[chan_count] = chan;
1319 chan_count++;
1320 }
1321
1322 /* As per BT Core Spec V5.2 Vol. 3, Part A, section 7.11
1323 * The request (...) shall not decrease the MPS of a channel
1324 * if more than one channel is specified.
1325 */
1326 if (mps_reduced && chan_count > 1) {
1327 result = BT_L2CAP_RECONF_INVALID_MPS;
1328 goto response;
1329 }
1330
1331 for (int i = 0; i < chan_count; i++) {
1332 BT_L2CAP_LE_CHAN(chans[i])->tx.mtu = mtu;
1333 BT_L2CAP_LE_CHAN(chans[i])->tx.mps = mps;
1334
1335 if (chans[i]->ops->reconfigured) {
1336 chans[i]->ops->reconfigured(chans[i]);
1337 }
1338 }
1339
1340 BT_DBG("mtu %u mps %u", mtu, mps);
1341
1342 response:
1343 buf = l2cap_create_le_sig_pdu(buf, BT_L2CAP_ECRED_RECONF_RSP, ident,
1344 sizeof(*rsp));
1345
1346 rsp = net_buf_add(buf, sizeof(*rsp));
1347 rsp->result = sys_cpu_to_le16(result);
1348
1349 l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf);
1350 }
1351
le_ecred_reconf_rsp(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1352 static void le_ecred_reconf_rsp(struct bt_l2cap *l2cap, uint8_t ident,
1353 struct net_buf *buf)
1354 {
1355 struct bt_conn *conn = l2cap->chan.chan.conn;
1356 struct bt_l2cap_ecred_reconf_rsp *rsp;
1357 struct bt_l2cap_le_chan *ch;
1358 uint16_t result;
1359
1360 if (buf->len < sizeof(*rsp)) {
1361 BT_ERR("Too small ecred reconf rsp packet size");
1362 return;
1363 }
1364
1365 rsp = net_buf_pull_mem(buf, sizeof(*rsp));
1366 result = sys_le16_to_cpu(rsp->result);
1367
1368 while ((ch = l2cap_lookup_ident(conn, ident))) {
1369 if (result == BT_L2CAP_LE_SUCCESS) {
1370 ch->rx.mtu = ch->pending_rx_mtu;
1371 }
1372
1373 ch->pending_rx_mtu = 0;
1374 ch->chan.ident = 0U;
1375
1376 if (ch->chan.ops->reconfigured) {
1377 ch->chan.ops->reconfigured(&ch->chan);
1378 }
1379 }
1380 }
1381 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
1382
l2cap_remove_rx_cid(struct bt_conn * conn,uint16_t cid)1383 static struct bt_l2cap_le_chan *l2cap_remove_rx_cid(struct bt_conn *conn,
1384 uint16_t cid)
1385 {
1386 struct bt_l2cap_chan *chan;
1387 sys_snode_t *prev = NULL;
1388
1389 /* Protect fixed channels against accidental removal */
1390 if (!L2CAP_LE_CID_IS_DYN(cid)) {
1391 return NULL;
1392 }
1393
1394 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
1395 if (BT_L2CAP_LE_CHAN(chan)->rx.cid == cid) {
1396 sys_slist_remove(&conn->channels, prev, &chan->node);
1397 return BT_L2CAP_LE_CHAN(chan);
1398 }
1399
1400 prev = &chan->node;
1401 }
1402
1403 return NULL;
1404 }
1405
le_disconn_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1406 static void le_disconn_req(struct bt_l2cap *l2cap, uint8_t ident,
1407 struct net_buf *buf)
1408 {
1409 struct bt_conn *conn = l2cap->chan.chan.conn;
1410 struct bt_l2cap_le_chan *chan;
1411 struct bt_l2cap_disconn_req *req = (void *)buf->data;
1412 struct bt_l2cap_disconn_rsp *rsp;
1413 uint16_t dcid;
1414
1415 if (buf->len < sizeof(*req)) {
1416 BT_ERR("Too small LE conn req packet size");
1417 return;
1418 }
1419
1420 dcid = sys_le16_to_cpu(req->dcid);
1421
1422 BT_DBG("dcid 0x%04x scid 0x%04x", dcid, sys_le16_to_cpu(req->scid));
1423
1424 chan = l2cap_remove_rx_cid(conn, dcid);
1425 if (!chan) {
1426 struct bt_l2cap_cmd_reject_cid_data data;
1427
1428 data.scid = req->scid;
1429 data.dcid = req->dcid;
1430
1431 l2cap_send_reject(conn, ident, BT_L2CAP_REJ_INVALID_CID, &data,
1432 sizeof(data));
1433 return;
1434 }
1435
1436 buf = l2cap_create_le_sig_pdu(buf, BT_L2CAP_DISCONN_RSP, ident,
1437 sizeof(*rsp));
1438 if (!buf) {
1439 return;
1440 }
1441
1442 rsp = net_buf_add(buf, sizeof(*rsp));
1443 rsp->dcid = sys_cpu_to_le16(chan->rx.cid);
1444 rsp->scid = sys_cpu_to_le16(chan->tx.cid);
1445
1446 bt_l2cap_chan_del(&chan->chan);
1447
1448 l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf);
1449 }
1450
l2cap_change_security(struct bt_l2cap_le_chan * chan,uint16_t err)1451 static int l2cap_change_security(struct bt_l2cap_le_chan *chan, uint16_t err)
1452 {
1453 struct bt_conn *conn = chan->chan.conn;
1454 bt_security_t sec;
1455 int ret;
1456
1457 if (atomic_test_bit(chan->chan.status,
1458 BT_L2CAP_STATUS_ENCRYPT_PENDING)) {
1459 return -EINPROGRESS;
1460 }
1461
1462 switch (err) {
1463 case BT_L2CAP_LE_ERR_ENCRYPTION:
1464 if (conn->sec_level >= BT_SECURITY_L2) {
1465 return -EALREADY;
1466 }
1467
1468 sec = BT_SECURITY_L2;
1469 break;
1470 case BT_L2CAP_LE_ERR_AUTHENTICATION:
1471 if (conn->sec_level < BT_SECURITY_L2) {
1472 sec = BT_SECURITY_L2;
1473 } else if (conn->sec_level < BT_SECURITY_L3) {
1474 sec = BT_SECURITY_L3;
1475 } else if (conn->sec_level < BT_SECURITY_L4) {
1476 sec = BT_SECURITY_L4;
1477 } else {
1478 return -EALREADY;
1479 }
1480 break;
1481 default:
1482 return -EINVAL;
1483 }
1484
1485 ret = bt_conn_set_security(chan->chan.conn, sec);
1486 if (ret < 0) {
1487 return ret;
1488 }
1489
1490 atomic_set_bit(chan->chan.status, BT_L2CAP_STATUS_ENCRYPT_PENDING);
1491
1492 return 0;
1493 }
1494
1495 #if defined(CONFIG_BT_L2CAP_ECRED)
le_ecred_conn_rsp(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1496 static void le_ecred_conn_rsp(struct bt_l2cap *l2cap, uint8_t ident,
1497 struct net_buf *buf)
1498 {
1499 struct bt_conn *conn = l2cap->chan.chan.conn;
1500 struct bt_l2cap_le_chan *chan;
1501 struct bt_l2cap_ecred_conn_rsp *rsp;
1502 uint16_t dcid, mtu, mps, credits, result;
1503
1504 if (buf->len < sizeof(*rsp)) {
1505 BT_ERR("Too small ecred conn rsp packet size");
1506 return;
1507 }
1508
1509 rsp = net_buf_pull_mem(buf, sizeof(*rsp));
1510 mtu = sys_le16_to_cpu(rsp->mtu);
1511 mps = sys_le16_to_cpu(rsp->mps);
1512 credits = sys_le16_to_cpu(rsp->credits);
1513 result = sys_le16_to_cpu(rsp->result);
1514
1515 BT_DBG("mtu 0x%04x mps 0x%04x credits 0x%04x result %u", mtu,
1516 mps, credits, result);
1517
1518 switch (result) {
1519 case BT_L2CAP_LE_ERR_AUTHENTICATION:
1520 case BT_L2CAP_LE_ERR_ENCRYPTION:
1521 while ((chan = l2cap_lookup_ident(conn, ident))) {
1522 /* Cancel RTX work */
1523 k_work_cancel_delayable(&chan->chan.rtx_work);
1524
1525 /* If security needs changing wait it to be completed */
1526 if (!l2cap_change_security(chan, result)) {
1527 return;
1528 }
1529 bt_l2cap_chan_remove(conn, &chan->chan);
1530 bt_l2cap_chan_del(&chan->chan);
1531 }
1532 break;
1533 case BT_L2CAP_LE_SUCCESS:
1534 /* Some connections refused – invalid Source CID */
1535 case BT_L2CAP_LE_ERR_INVALID_SCID:
1536 /* Some connections refused – Source CID already allocated */
1537 case BT_L2CAP_LE_ERR_SCID_IN_USE:
1538 /* Some connections refused – not enough resources available */
1539 case BT_L2CAP_LE_ERR_NO_RESOURCES:
1540 while ((chan = l2cap_lookup_ident(conn, ident))) {
1541 struct bt_l2cap_chan *c;
1542
1543 /* Cancel RTX work */
1544 k_work_cancel_delayable(&chan->chan.rtx_work);
1545
1546 dcid = net_buf_pull_le16(buf);
1547
1548 BT_DBG("dcid 0x%04x", dcid);
1549
1550 /* If a Destination CID is 0x0000, the channel was not
1551 * established.
1552 */
1553 if (!dcid) {
1554 bt_l2cap_chan_remove(conn, &chan->chan);
1555 bt_l2cap_chan_del(&chan->chan);
1556 continue;
1557 }
1558
1559 c = bt_l2cap_le_lookup_tx_cid(conn, dcid);
1560 if (c) {
1561 /* If a device receives a
1562 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet
1563 * with an already assigned Destination CID,
1564 * then both the original channel and the new
1565 * channel shall be immediately discarded and
1566 * not used.
1567 */
1568 bt_l2cap_chan_remove(conn, &chan->chan);
1569 bt_l2cap_chan_del(&chan->chan);
1570 bt_l2cap_chan_disconnect(c);
1571 continue;
1572 }
1573
1574 chan->tx.cid = dcid;
1575
1576 chan->chan.ident = 0U;
1577
1578 chan->tx.mtu = mtu;
1579 chan->tx.mps = mps;
1580
1581 /* Update state */
1582 bt_l2cap_chan_set_state(&chan->chan,
1583 BT_L2CAP_CONNECTED);
1584
1585 if (chan->chan.ops->connected) {
1586 chan->chan.ops->connected(&chan->chan);
1587 }
1588
1589 /* Give credits */
1590 l2cap_chan_tx_give_credits(chan, credits);
1591 l2cap_chan_rx_give_credits(chan, chan->rx.init_credits);
1592 }
1593 break;
1594 case BT_L2CAP_LE_ERR_PSM_NOT_SUPP:
1595 default:
1596 while ((chan = l2cap_remove_ident(conn, ident))) {
1597 bt_l2cap_chan_del(&chan->chan);
1598 }
1599 break;
1600 }
1601 }
1602 #endif /* CONFIG_BT_L2CAP_ECRED */
1603
le_conn_rsp(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1604 static void le_conn_rsp(struct bt_l2cap *l2cap, uint8_t ident,
1605 struct net_buf *buf)
1606 {
1607 struct bt_conn *conn = l2cap->chan.chan.conn;
1608 struct bt_l2cap_le_chan *chan;
1609 struct bt_l2cap_le_conn_rsp *rsp = (void *)buf->data;
1610 uint16_t dcid, mtu, mps, credits, result;
1611
1612 if (buf->len < sizeof(*rsp)) {
1613 BT_ERR("Too small LE conn rsp packet size");
1614 return;
1615 }
1616
1617 dcid = sys_le16_to_cpu(rsp->dcid);
1618 mtu = sys_le16_to_cpu(rsp->mtu);
1619 mps = sys_le16_to_cpu(rsp->mps);
1620 credits = sys_le16_to_cpu(rsp->credits);
1621 result = sys_le16_to_cpu(rsp->result);
1622
1623 BT_DBG("dcid 0x%04x mtu %u mps %u credits %u result 0x%04x", dcid,
1624 mtu, mps, credits, result);
1625
1626 /* Keep the channel in case of security errors */
1627 if (result == BT_L2CAP_LE_SUCCESS ||
1628 result == BT_L2CAP_LE_ERR_AUTHENTICATION ||
1629 result == BT_L2CAP_LE_ERR_ENCRYPTION) {
1630 chan = l2cap_lookup_ident(conn, ident);
1631 } else {
1632 chan = l2cap_remove_ident(conn, ident);
1633 }
1634
1635 if (!chan) {
1636 BT_ERR("Cannot find channel for ident %u", ident);
1637 return;
1638 }
1639
1640 /* Cancel RTX work */
1641 k_work_cancel_delayable(&chan->chan.rtx_work);
1642
1643 /* Reset ident since it got a response */
1644 chan->chan.ident = 0U;
1645
1646 switch (result) {
1647 case BT_L2CAP_LE_SUCCESS:
1648 chan->tx.cid = dcid;
1649 chan->tx.mtu = mtu;
1650 chan->tx.mps = mps;
1651
1652 /* Update state */
1653 bt_l2cap_chan_set_state(&chan->chan, BT_L2CAP_CONNECTED);
1654
1655 if (chan->chan.ops->connected) {
1656 chan->chan.ops->connected(&chan->chan);
1657 }
1658
1659 /* Give credits */
1660 l2cap_chan_tx_give_credits(chan, credits);
1661 l2cap_chan_rx_give_credits(chan, chan->rx.init_credits);
1662
1663 break;
1664 case BT_L2CAP_LE_ERR_AUTHENTICATION:
1665 case BT_L2CAP_LE_ERR_ENCRYPTION:
1666 /* If security needs changing wait it to be completed */
1667 if (l2cap_change_security(chan, result) == 0) {
1668 return;
1669 }
1670 bt_l2cap_chan_remove(conn, &chan->chan);
1671 __fallthrough;
1672 default:
1673 bt_l2cap_chan_del(&chan->chan);
1674 }
1675 }
1676
le_disconn_rsp(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1677 static void le_disconn_rsp(struct bt_l2cap *l2cap, uint8_t ident,
1678 struct net_buf *buf)
1679 {
1680 struct bt_conn *conn = l2cap->chan.chan.conn;
1681 struct bt_l2cap_le_chan *chan;
1682 struct bt_l2cap_disconn_rsp *rsp = (void *)buf->data;
1683 uint16_t scid;
1684
1685 if (buf->len < sizeof(*rsp)) {
1686 BT_ERR("Too small LE disconn rsp packet size");
1687 return;
1688 }
1689
1690 scid = sys_le16_to_cpu(rsp->scid);
1691
1692 BT_DBG("dcid 0x%04x scid 0x%04x", sys_le16_to_cpu(rsp->dcid), scid);
1693
1694 chan = l2cap_remove_rx_cid(conn, scid);
1695 if (!chan) {
1696 return;
1697 }
1698
1699 bt_l2cap_chan_del(&chan->chan);
1700 }
1701
l2cap_alloc_seg(struct net_buf * buf,struct bt_l2cap_le_chan * ch)1702 static inline struct net_buf *l2cap_alloc_seg(struct net_buf *buf, struct bt_l2cap_le_chan *ch)
1703 {
1704 struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
1705 struct net_buf *seg;
1706
1707 /* Use the dedicated segment callback if registered */
1708 if (ch->chan.ops->alloc_seg) {
1709 seg = ch->chan.ops->alloc_seg(&ch->chan);
1710 __ASSERT_NO_MSG(seg);
1711 } else {
1712 /* Try to use original pool if possible */
1713 seg = net_buf_alloc(pool, K_NO_WAIT);
1714 }
1715
1716 if (seg) {
1717 net_buf_reserve(seg, BT_L2CAP_CHAN_SEND_RESERVE);
1718 return seg;
1719 }
1720
1721 /* Fallback to using global connection tx pool */
1722 return bt_l2cap_create_pdu_timeout(NULL, 0, K_NO_WAIT);
1723 }
1724
l2cap_chan_create_seg(struct bt_l2cap_le_chan * ch,struct net_buf * buf,size_t sdu_hdr_len)1725 static struct net_buf *l2cap_chan_create_seg(struct bt_l2cap_le_chan *ch,
1726 struct net_buf *buf,
1727 size_t sdu_hdr_len)
1728 {
1729 struct net_buf *seg;
1730 uint16_t headroom;
1731 uint16_t len;
1732
1733 /* Segment if data (+ data headroom) is bigger than MPS */
1734 if (buf->len + sdu_hdr_len > ch->tx.mps) {
1735 goto segment;
1736 }
1737
1738 headroom = BT_L2CAP_CHAN_SEND_RESERVE + sdu_hdr_len;
1739
1740 /* Check if original buffer has enough headroom and don't have any
1741 * fragments.
1742 */
1743 if (net_buf_headroom(buf) >= headroom && !buf->frags) {
1744 if (sdu_hdr_len) {
1745 /* Push SDU length if set */
1746 net_buf_push_le16(buf, net_buf_frags_len(buf));
1747 }
1748 return net_buf_ref(buf);
1749 }
1750
1751 segment:
1752 seg = l2cap_alloc_seg(buf, ch);
1753
1754 if (!seg) {
1755 return NULL;
1756 }
1757
1758 if (sdu_hdr_len) {
1759 net_buf_add_le16(seg, net_buf_frags_len(buf));
1760 }
1761
1762 /* Don't send more that TX MPS including SDU length */
1763 len = MIN(net_buf_tailroom(seg), ch->tx.mps - sdu_hdr_len);
1764 /* Limit if original buffer is smaller than the segment */
1765 len = MIN(buf->len, len);
1766 net_buf_add_mem(seg, buf->data, len);
1767 net_buf_pull(buf, len);
1768
1769 BT_DBG("ch %p seg %p len %u", ch, seg, seg->len);
1770
1771 return seg;
1772 }
1773
l2cap_chan_tx_resume(struct bt_l2cap_le_chan * ch)1774 static void l2cap_chan_tx_resume(struct bt_l2cap_le_chan *ch)
1775 {
1776 if (!atomic_get(&ch->tx.credits) ||
1777 (k_fifo_is_empty(&ch->tx_queue) && !ch->tx_buf)) {
1778 return;
1779 }
1780
1781 k_work_submit(&ch->tx_work);
1782 }
1783
1784 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
resume_all_channels(struct bt_conn * conn,void * data)1785 static void resume_all_channels(struct bt_conn *conn, void *data)
1786 {
1787 struct bt_l2cap_chan *chan;
1788
1789 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
1790 l2cap_chan_tx_resume(BT_L2CAP_LE_CHAN(chan));
1791 }
1792 }
1793 #endif
1794
l2cap_chan_sdu_sent(struct bt_conn * conn,void * user_data)1795 static void l2cap_chan_sdu_sent(struct bt_conn *conn, void *user_data)
1796 {
1797 uint16_t cid = POINTER_TO_UINT(user_data);
1798 struct bt_l2cap_chan *chan;
1799
1800 BT_DBG("conn %p CID 0x%04x", conn, cid);
1801
1802 chan = bt_l2cap_le_lookup_tx_cid(conn, cid);
1803 if (!chan) {
1804 /* Received SDU sent callback for disconnected channel */
1805 return;
1806 }
1807
1808 if (chan->ops->sent) {
1809 chan->ops->sent(chan);
1810 }
1811
1812 /* Resume the current channel */
1813 l2cap_chan_tx_resume(BT_L2CAP_LE_CHAN(chan));
1814
1815 if (IS_ENABLED(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)) {
1816 /* Resume all other channels in case one might be stuck.
1817 * The current channel has already been given priority.
1818 */
1819 bt_conn_foreach(BT_CONN_TYPE_LE, resume_all_channels, NULL);
1820 }
1821 }
1822
l2cap_chan_seg_sent(struct bt_conn * conn,void * user_data)1823 static void l2cap_chan_seg_sent(struct bt_conn *conn, void *user_data)
1824 {
1825 uint16_t cid = POINTER_TO_UINT(user_data);
1826 struct bt_l2cap_chan *chan;
1827
1828 BT_DBG("conn %p CID 0x%04x", conn, cid);
1829
1830 chan = bt_l2cap_le_lookup_tx_cid(conn, cid);
1831 if (!chan) {
1832 /* Received segment sent callback for disconnected channel */
1833 return;
1834 }
1835
1836 l2cap_chan_tx_resume(BT_L2CAP_LE_CHAN(chan));
1837 }
1838
test_and_dec(atomic_t * target)1839 static bool test_and_dec(atomic_t *target)
1840 {
1841 atomic_t old_value, new_value;
1842
1843 do {
1844 old_value = atomic_get(target);
1845 if (!old_value) {
1846 return false;
1847 }
1848
1849 new_value = old_value - 1;
1850 } while (atomic_cas(target, old_value, new_value) == 0);
1851
1852 return true;
1853 }
1854
1855 /* This returns -EAGAIN whenever a segment cannot be send immediately which can
1856 * happen under the following circuntances:
1857 *
1858 * 1. There are no credits
1859 * 2. There are no buffers
1860 * 3. There are no TX contexts
1861 *
1862 * In all cases the original buffer is unaffected so it can be pushed back to
1863 * be sent later.
1864 */
l2cap_chan_le_send(struct bt_l2cap_le_chan * ch,struct net_buf * buf,uint16_t sdu_hdr_len)1865 static int l2cap_chan_le_send(struct bt_l2cap_le_chan *ch,
1866 struct net_buf *buf, uint16_t sdu_hdr_len)
1867 {
1868 struct net_buf *seg;
1869 struct net_buf_simple_state state;
1870 int len, err;
1871
1872 if (!test_and_dec(&ch->tx.credits)) {
1873 BT_WARN("No credits to transmit packet");
1874 return -EAGAIN;
1875 }
1876
1877 /* Save state so it can be restored if we failed to send */
1878 net_buf_simple_save(&buf->b, &state);
1879
1880 seg = l2cap_chan_create_seg(ch, buf, sdu_hdr_len);
1881 if (!seg) {
1882 atomic_inc(&ch->tx.credits);
1883 return -EAGAIN;
1884 }
1885
1886 BT_DBG("ch %p cid 0x%04x len %u credits %u", ch, ch->tx.cid,
1887 seg->len, atomic_get(&ch->tx.credits));
1888
1889 len = seg->len - sdu_hdr_len;
1890
1891 /* Set a callback if there is no data left in the buffer and sent
1892 * callback has been set.
1893 */
1894 if ((buf == seg || !buf->len) && ch->chan.ops->sent) {
1895 err = bt_l2cap_send_cb(ch->chan.conn, ch->tx.cid, seg,
1896 l2cap_chan_sdu_sent,
1897 UINT_TO_POINTER(ch->tx.cid));
1898 } else {
1899 err = bt_l2cap_send_cb(ch->chan.conn, ch->tx.cid, seg,
1900 l2cap_chan_seg_sent,
1901 UINT_TO_POINTER(ch->tx.cid));
1902 }
1903
1904 if (err) {
1905 BT_WARN("Unable to send seg %d", err);
1906 atomic_inc(&ch->tx.credits);
1907
1908 /* The host takes ownership of the reference in seg when
1909 * bt_l2cap_send_cb is successful. The call returned an error,
1910 * so we must get rid of the reference that was taken in
1911 * l2cap_chan_create_seg.
1912 */
1913 net_buf_unref(seg);
1914
1915 if (err == -ENOBUFS) {
1916 /* Restore state since segment could not be sent */
1917 net_buf_simple_restore(&buf->b, &state);
1918 return -EAGAIN;
1919 }
1920
1921 return err;
1922 }
1923
1924 /* Check if there is no credits left clear output status and notify its
1925 * change.
1926 */
1927 if (!atomic_get(&ch->tx.credits)) {
1928 atomic_clear_bit(ch->chan.status, BT_L2CAP_STATUS_OUT);
1929 if (ch->chan.ops->status) {
1930 ch->chan.ops->status(&ch->chan, ch->chan.status);
1931 }
1932 }
1933
1934 return len;
1935 }
1936
l2cap_chan_le_send_sdu(struct bt_l2cap_le_chan * ch,struct net_buf ** buf,uint16_t sent)1937 static int l2cap_chan_le_send_sdu(struct bt_l2cap_le_chan *ch,
1938 struct net_buf **buf, uint16_t sent)
1939 {
1940 int ret, total_len;
1941 struct net_buf *frag;
1942
1943 total_len = net_buf_frags_len(*buf) + sent;
1944
1945 if (total_len > ch->tx.mtu) {
1946 return -EMSGSIZE;
1947 }
1948
1949 frag = *buf;
1950 if (!frag->len && frag->frags) {
1951 frag = frag->frags;
1952 }
1953
1954 if (!sent) {
1955 /* Add SDU length for the first segment */
1956 ret = l2cap_chan_le_send(ch, frag, BT_L2CAP_SDU_HDR_SIZE);
1957 if (ret < 0) {
1958 if (ret == -EAGAIN) {
1959 /* Store sent data into user_data */
1960 data_sent(frag)->len = sent;
1961 }
1962 *buf = frag;
1963 return ret;
1964 }
1965 sent = ret;
1966 }
1967
1968 /* Send remaining segments */
1969 for (ret = 0; sent < total_len; sent += ret) {
1970 /* Proceed to next fragment */
1971 if (!frag->len) {
1972 frag = net_buf_frag_del(NULL, frag);
1973 }
1974
1975 ret = l2cap_chan_le_send(ch, frag, 0);
1976 if (ret < 0) {
1977 if (ret == -EAGAIN) {
1978 /* Store sent data into user_data */
1979 data_sent(frag)->len = sent;
1980 }
1981 *buf = frag;
1982 return ret;
1983 }
1984 }
1985
1986 BT_DBG("ch %p cid 0x%04x sent %u total_len %u", ch, ch->tx.cid, sent,
1987 total_len);
1988
1989 net_buf_unref(frag);
1990
1991 return ret;
1992 }
1993
le_credits(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1994 static void le_credits(struct bt_l2cap *l2cap, uint8_t ident,
1995 struct net_buf *buf)
1996 {
1997 struct bt_conn *conn = l2cap->chan.chan.conn;
1998 struct bt_l2cap_chan *chan;
1999 struct bt_l2cap_le_credits *ev = (void *)buf->data;
2000 struct bt_l2cap_le_chan *ch;
2001 uint16_t credits, cid;
2002
2003 if (buf->len < sizeof(*ev)) {
2004 BT_ERR("Too small LE Credits packet size");
2005 return;
2006 }
2007
2008 cid = sys_le16_to_cpu(ev->cid);
2009 credits = sys_le16_to_cpu(ev->credits);
2010
2011 BT_DBG("cid 0x%04x credits %u", cid, credits);
2012
2013 chan = bt_l2cap_le_lookup_tx_cid(conn, cid);
2014 if (!chan) {
2015 BT_ERR("Unable to find channel of LE Credits packet");
2016 return;
2017 }
2018
2019 ch = BT_L2CAP_LE_CHAN(chan);
2020
2021 if (atomic_get(&ch->tx.credits) + credits > UINT16_MAX) {
2022 BT_ERR("Credits overflow");
2023 bt_l2cap_chan_disconnect(chan);
2024 return;
2025 }
2026
2027 l2cap_chan_tx_give_credits(ch, credits);
2028
2029 BT_DBG("chan %p total credits %u", ch, atomic_get(&ch->tx.credits));
2030
2031 l2cap_chan_tx_resume(ch);
2032 }
2033
reject_cmd(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)2034 static void reject_cmd(struct bt_l2cap *l2cap, uint8_t ident,
2035 struct net_buf *buf)
2036 {
2037 struct bt_conn *conn = l2cap->chan.chan.conn;
2038 struct bt_l2cap_le_chan *chan;
2039
2040 /* Check if there is a outstanding channel */
2041 chan = l2cap_remove_ident(conn, ident);
2042 if (!chan) {
2043 return;
2044 }
2045
2046 bt_l2cap_chan_del(&chan->chan);
2047 }
2048 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2049
l2cap_recv(struct bt_l2cap_chan * chan,struct net_buf * buf)2050 static int l2cap_recv(struct bt_l2cap_chan *chan, struct net_buf *buf)
2051 {
2052 struct bt_l2cap *l2cap = CONTAINER_OF(chan, struct bt_l2cap, chan);
2053 struct bt_l2cap_sig_hdr *hdr;
2054 uint16_t len;
2055
2056 if (buf->len < sizeof(*hdr)) {
2057 BT_ERR("Too small L2CAP signaling PDU");
2058 return 0;
2059 }
2060
2061 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
2062 len = sys_le16_to_cpu(hdr->len);
2063
2064 BT_DBG("Signaling code 0x%02x ident %u len %u", hdr->code,
2065 hdr->ident, len);
2066
2067 if (buf->len != len) {
2068 BT_ERR("L2CAP length mismatch (%u != %u)", buf->len, len);
2069 return 0;
2070 }
2071
2072 if (!hdr->ident) {
2073 BT_ERR("Invalid ident value in L2CAP PDU");
2074 return 0;
2075 }
2076
2077 switch (hdr->code) {
2078 case BT_L2CAP_CONN_PARAM_RSP:
2079 le_conn_param_rsp(l2cap, buf);
2080 break;
2081 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
2082 case BT_L2CAP_LE_CONN_REQ:
2083 le_conn_req(l2cap, hdr->ident, buf);
2084 break;
2085 case BT_L2CAP_LE_CONN_RSP:
2086 le_conn_rsp(l2cap, hdr->ident, buf);
2087 break;
2088 case BT_L2CAP_DISCONN_REQ:
2089 le_disconn_req(l2cap, hdr->ident, buf);
2090 break;
2091 case BT_L2CAP_DISCONN_RSP:
2092 le_disconn_rsp(l2cap, hdr->ident, buf);
2093 break;
2094 case BT_L2CAP_LE_CREDITS:
2095 le_credits(l2cap, hdr->ident, buf);
2096 break;
2097 case BT_L2CAP_CMD_REJECT:
2098 reject_cmd(l2cap, hdr->ident, buf);
2099 break;
2100 #if defined(CONFIG_BT_L2CAP_ECRED)
2101 case BT_L2CAP_ECRED_CONN_REQ:
2102 le_ecred_conn_req(l2cap, hdr->ident, buf);
2103 break;
2104 case BT_L2CAP_ECRED_CONN_RSP:
2105 le_ecred_conn_rsp(l2cap, hdr->ident, buf);
2106 break;
2107 case BT_L2CAP_ECRED_RECONF_REQ:
2108 le_ecred_reconf_req(l2cap, hdr->ident, buf);
2109 break;
2110 case BT_L2CAP_ECRED_RECONF_RSP:
2111 le_ecred_reconf_rsp(l2cap, hdr->ident, buf);
2112 break;
2113 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
2114 #else
2115 case BT_L2CAP_CMD_REJECT:
2116 /* Ignored */
2117 break;
2118 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2119 case BT_L2CAP_CONN_PARAM_REQ:
2120 if (IS_ENABLED(CONFIG_BT_CENTRAL)) {
2121 le_conn_param_update_req(l2cap, hdr->ident, buf);
2122 break;
2123 }
2124 __fallthrough;
2125 default:
2126 BT_WARN("Unknown L2CAP PDU code 0x%02x", hdr->code);
2127 l2cap_send_reject(chan->conn, hdr->ident,
2128 BT_L2CAP_REJ_NOT_UNDERSTOOD, NULL, 0);
2129 break;
2130 }
2131
2132 return 0;
2133 }
2134
2135 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_chan_shutdown(struct bt_l2cap_chan * chan)2136 static void l2cap_chan_shutdown(struct bt_l2cap_chan *chan)
2137 {
2138 struct bt_l2cap_le_chan *ch = BT_L2CAP_LE_CHAN(chan);
2139 struct net_buf *buf;
2140
2141 BT_DBG("chan %p", chan);
2142
2143 atomic_set_bit(chan->status, BT_L2CAP_STATUS_SHUTDOWN);
2144
2145 /* Destroy segmented SDU if it exists */
2146 if (ch->_sdu) {
2147 net_buf_unref(ch->_sdu);
2148 ch->_sdu = NULL;
2149 ch->_sdu_len = 0U;
2150 }
2151
2152 /* Cleanup outstanding request */
2153 if (ch->tx_buf) {
2154 net_buf_unref(ch->tx_buf);
2155 ch->tx_buf = NULL;
2156 }
2157
2158 /* Remove buffers on the TX queue */
2159 while ((buf = net_buf_get(&ch->tx_queue, K_NO_WAIT))) {
2160 net_buf_unref(buf);
2161 }
2162
2163 /* Remove buffers on the RX queue */
2164 while ((buf = net_buf_get(&ch->rx_queue, K_NO_WAIT))) {
2165 net_buf_unref(buf);
2166 }
2167
2168 /* Update status */
2169 if (chan->ops->status) {
2170 chan->ops->status(chan, chan->status);
2171 }
2172 }
2173
l2cap_chan_send_credits(struct bt_l2cap_le_chan * chan,struct net_buf * buf,uint16_t credits)2174 static void l2cap_chan_send_credits(struct bt_l2cap_le_chan *chan,
2175 struct net_buf *buf, uint16_t credits)
2176 {
2177 struct bt_l2cap_le_credits *ev;
2178 uint16_t old_credits;
2179
2180 /* Cap the number of credits given */
2181 if (credits > chan->rx.init_credits) {
2182 credits = chan->rx.init_credits;
2183 }
2184
2185 /* Don't send back more than the initial amount. */
2186 old_credits = atomic_get(&chan->rx.credits);
2187 if (credits + old_credits > chan->rx.init_credits) {
2188 credits = chan->rx.init_credits - old_credits;
2189 }
2190
2191 buf = l2cap_create_le_sig_pdu(buf, BT_L2CAP_LE_CREDITS, get_ident(),
2192 sizeof(*ev));
2193 if (!buf) {
2194 BT_ERR("Unable to send credits update");
2195 /* Disconnect would probably not work either so the only
2196 * option left is to shutdown the channel.
2197 */
2198 l2cap_chan_shutdown(&chan->chan);
2199 return;
2200 }
2201
2202 l2cap_chan_rx_give_credits(chan, credits);
2203
2204 ev = net_buf_add(buf, sizeof(*ev));
2205 ev->cid = sys_cpu_to_le16(chan->rx.cid);
2206 ev->credits = sys_cpu_to_le16(credits);
2207
2208 l2cap_send(chan->chan.conn, BT_L2CAP_CID_LE_SIG, buf);
2209
2210 BT_DBG("chan %p credits %u", chan, atomic_get(&chan->rx.credits));
2211 }
2212
l2cap_chan_update_credits(struct bt_l2cap_le_chan * chan,struct net_buf * buf)2213 static void l2cap_chan_update_credits(struct bt_l2cap_le_chan *chan,
2214 struct net_buf *buf)
2215 {
2216 uint16_t credits;
2217 atomic_val_t old_credits = atomic_get(&chan->rx.credits);
2218
2219 /* Restore enough credits to complete the sdu */
2220 credits = ((chan->_sdu_len - net_buf_frags_len(buf)) +
2221 (chan->rx.mps - 1)) / chan->rx.mps;
2222
2223 BT_DBG("cred %d old %d", credits, (int)old_credits);
2224
2225 if (credits < old_credits) {
2226 return;
2227 }
2228
2229 credits -= old_credits;
2230
2231 l2cap_chan_send_credits(chan, buf, credits);
2232 }
2233
bt_l2cap_chan_recv_complete(struct bt_l2cap_chan * chan,struct net_buf * buf)2234 int bt_l2cap_chan_recv_complete(struct bt_l2cap_chan *chan, struct net_buf *buf)
2235 {
2236 struct bt_l2cap_le_chan *ch = BT_L2CAP_LE_CHAN(chan);
2237 struct bt_conn *conn = chan->conn;
2238 uint16_t credits;
2239
2240 __ASSERT_NO_MSG(chan);
2241 __ASSERT_NO_MSG(buf);
2242
2243 if (!conn) {
2244 return -ENOTCONN;
2245 }
2246
2247 if (conn->type != BT_CONN_TYPE_LE) {
2248 return -ENOTSUP;
2249 }
2250
2251 BT_DBG("chan %p buf %p", chan, buf);
2252
2253 /* Restore credits used by packet */
2254 memcpy(&credits, net_buf_user_data(buf), sizeof(credits));
2255
2256 l2cap_chan_send_credits(ch, buf, credits);
2257
2258 net_buf_unref(buf);
2259
2260 return 0;
2261 }
2262
l2cap_alloc_frag(k_timeout_t timeout,void * user_data)2263 static struct net_buf *l2cap_alloc_frag(k_timeout_t timeout, void *user_data)
2264 {
2265 struct bt_l2cap_le_chan *chan = user_data;
2266 struct net_buf *frag = NULL;
2267
2268 frag = chan->chan.ops->alloc_buf(&chan->chan);
2269 if (!frag) {
2270 return NULL;
2271 }
2272
2273 BT_DBG("frag %p tailroom %zu", frag, net_buf_tailroom(frag));
2274
2275 return frag;
2276 }
2277
l2cap_chan_le_recv_sdu(struct bt_l2cap_le_chan * chan,struct net_buf * buf,uint16_t seg)2278 static void l2cap_chan_le_recv_sdu(struct bt_l2cap_le_chan *chan,
2279 struct net_buf *buf, uint16_t seg)
2280 {
2281 int err;
2282
2283 BT_DBG("chan %p len %zu", chan, net_buf_frags_len(buf));
2284
2285 /* Receiving complete SDU, notify channel and reset SDU buf */
2286 err = chan->chan.ops->recv(&chan->chan, buf);
2287 if (err < 0) {
2288 if (err != -EINPROGRESS) {
2289 BT_ERR("err %d", err);
2290 bt_l2cap_chan_disconnect(&chan->chan);
2291 net_buf_unref(buf);
2292 }
2293 return;
2294 }
2295
2296 l2cap_chan_send_credits(chan, buf, seg);
2297 net_buf_unref(buf);
2298 }
2299
l2cap_chan_le_recv_seg(struct bt_l2cap_le_chan * chan,struct net_buf * buf)2300 static void l2cap_chan_le_recv_seg(struct bt_l2cap_le_chan *chan,
2301 struct net_buf *buf)
2302 {
2303 uint16_t len;
2304 uint16_t seg = 0U;
2305
2306 len = net_buf_frags_len(chan->_sdu);
2307 if (len) {
2308 memcpy(&seg, net_buf_user_data(chan->_sdu), sizeof(seg));
2309 }
2310
2311 if (len + buf->len > chan->_sdu_len) {
2312 BT_ERR("SDU length mismatch");
2313 bt_l2cap_chan_disconnect(&chan->chan);
2314 return;
2315 }
2316
2317 seg++;
2318 /* Store received segments in user_data */
2319 memcpy(net_buf_user_data(chan->_sdu), &seg, sizeof(seg));
2320
2321 BT_DBG("chan %p seg %d len %zu", chan, seg, net_buf_frags_len(buf));
2322
2323 /* Append received segment to SDU */
2324 len = net_buf_append_bytes(chan->_sdu, buf->len, buf->data, K_NO_WAIT,
2325 l2cap_alloc_frag, chan);
2326 if (len != buf->len) {
2327 BT_ERR("Unable to store SDU");
2328 bt_l2cap_chan_disconnect(&chan->chan);
2329 return;
2330 }
2331
2332 if (net_buf_frags_len(chan->_sdu) < chan->_sdu_len) {
2333 /* Give more credits if remote has run out of them, this
2334 * should only happen if the remote cannot fully utilize the
2335 * MPS for some reason.
2336 */
2337 if (!atomic_get(&chan->rx.credits) &&
2338 seg == chan->rx.init_credits) {
2339 l2cap_chan_update_credits(chan, buf);
2340 }
2341 return;
2342 }
2343
2344 buf = chan->_sdu;
2345 chan->_sdu = NULL;
2346 chan->_sdu_len = 0U;
2347
2348 l2cap_chan_le_recv_sdu(chan, buf, seg);
2349 }
2350
l2cap_chan_le_recv(struct bt_l2cap_le_chan * chan,struct net_buf * buf)2351 static void l2cap_chan_le_recv(struct bt_l2cap_le_chan *chan,
2352 struct net_buf *buf)
2353 {
2354 uint16_t sdu_len;
2355 int err;
2356
2357 if (!test_and_dec(&chan->rx.credits)) {
2358 BT_ERR("No credits to receive packet");
2359 bt_l2cap_chan_disconnect(&chan->chan);
2360 return;
2361 }
2362
2363 /* Check if segments already exist */
2364 if (chan->_sdu) {
2365 l2cap_chan_le_recv_seg(chan, buf);
2366 return;
2367 }
2368
2369 if (buf->len < 2) {
2370 BT_WARN("Too short data packet");
2371 bt_l2cap_chan_disconnect(&chan->chan);
2372 return;
2373 }
2374
2375 sdu_len = net_buf_pull_le16(buf);
2376
2377 BT_DBG("chan %p len %u sdu_len %u", chan, buf->len, sdu_len);
2378
2379 if (sdu_len > chan->rx.mtu) {
2380 BT_ERR("Invalid SDU length");
2381 bt_l2cap_chan_disconnect(&chan->chan);
2382 return;
2383 }
2384
2385 /* Always allocate buffer from the channel if supported. */
2386 if (chan->chan.ops->alloc_buf) {
2387 chan->_sdu = chan->chan.ops->alloc_buf(&chan->chan);
2388 if (!chan->_sdu) {
2389 BT_ERR("Unable to allocate buffer for SDU");
2390 bt_l2cap_chan_disconnect(&chan->chan);
2391 return;
2392 }
2393 chan->_sdu_len = sdu_len;
2394 l2cap_chan_le_recv_seg(chan, buf);
2395 return;
2396 }
2397
2398 err = chan->chan.ops->recv(&chan->chan, buf);
2399 if (err) {
2400 if (err != -EINPROGRESS) {
2401 BT_ERR("err %d", err);
2402 bt_l2cap_chan_disconnect(&chan->chan);
2403 }
2404 return;
2405 }
2406
2407 l2cap_chan_send_credits(chan, buf, 1);
2408 }
2409
l2cap_chan_recv_queue(struct bt_l2cap_le_chan * chan,struct net_buf * buf)2410 static void l2cap_chan_recv_queue(struct bt_l2cap_le_chan *chan,
2411 struct net_buf *buf)
2412 {
2413 if (chan->chan.state == BT_L2CAP_DISCONNECT) {
2414 BT_WARN("Ignoring data received while disconnecting");
2415 net_buf_unref(buf);
2416 return;
2417 }
2418
2419 if (atomic_test_bit(chan->chan.status, BT_L2CAP_STATUS_SHUTDOWN)) {
2420 BT_WARN("Ignoring data received while channel has shutdown");
2421 net_buf_unref(buf);
2422 return;
2423 }
2424
2425 if (!L2CAP_LE_PSM_IS_DYN(chan->chan.psm)) {
2426 l2cap_chan_le_recv(chan, buf);
2427 net_buf_unref(buf);
2428 return;
2429 }
2430
2431 net_buf_put(&chan->rx_queue, buf);
2432 k_work_submit(&chan->rx_work);
2433 }
2434 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2435
l2cap_chan_recv(struct bt_l2cap_chan * chan,struct net_buf * buf,bool complete)2436 static void l2cap_chan_recv(struct bt_l2cap_chan *chan, struct net_buf *buf,
2437 bool complete)
2438 {
2439 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
2440 struct bt_l2cap_le_chan *ch = BT_L2CAP_LE_CHAN(chan);
2441
2442 if (L2CAP_LE_CID_IS_DYN(ch->rx.cid)) {
2443 if (complete) {
2444 l2cap_chan_recv_queue(ch, buf);
2445 } else {
2446 /* if packet was not complete this means peer device
2447 * overflowed our RX and channel shall be disconnected
2448 */
2449 bt_l2cap_chan_disconnect(chan);
2450 net_buf_unref(buf);
2451 }
2452
2453 return;
2454 }
2455 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2456
2457 BT_DBG("chan %p len %u", chan, buf->len);
2458
2459 chan->ops->recv(chan, buf);
2460 net_buf_unref(buf);
2461 }
2462
bt_l2cap_recv(struct bt_conn * conn,struct net_buf * buf,bool complete)2463 void bt_l2cap_recv(struct bt_conn *conn, struct net_buf *buf, bool complete)
2464 {
2465 struct bt_l2cap_hdr *hdr;
2466 struct bt_l2cap_chan *chan;
2467 uint16_t cid;
2468
2469 if (IS_ENABLED(CONFIG_BT_BREDR) &&
2470 conn->type == BT_CONN_TYPE_BR) {
2471 bt_l2cap_br_recv(conn, buf);
2472 return;
2473 }
2474
2475 if (buf->len < sizeof(*hdr)) {
2476 BT_ERR("Too small L2CAP PDU received");
2477 net_buf_unref(buf);
2478 return;
2479 }
2480
2481 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
2482 cid = sys_le16_to_cpu(hdr->cid);
2483
2484 BT_DBG("Packet for CID %u len %u", cid, buf->len);
2485
2486 chan = bt_l2cap_le_lookup_rx_cid(conn, cid);
2487 if (!chan) {
2488 BT_WARN("Ignoring data for unknown channel ID 0x%04x", cid);
2489 net_buf_unref(buf);
2490 return;
2491 }
2492
2493 l2cap_chan_recv(chan, buf, complete);
2494 }
2495
bt_l2cap_update_conn_param(struct bt_conn * conn,const struct bt_le_conn_param * param)2496 int bt_l2cap_update_conn_param(struct bt_conn *conn,
2497 const struct bt_le_conn_param *param)
2498 {
2499 struct bt_l2cap_conn_param_req *req;
2500 struct net_buf *buf;
2501 int err;
2502
2503 buf = l2cap_create_le_sig_pdu(NULL, BT_L2CAP_CONN_PARAM_REQ,
2504 get_ident(), sizeof(*req));
2505 if (!buf) {
2506 return -ENOMEM;
2507 }
2508
2509 req = net_buf_add(buf, sizeof(*req));
2510 req->min_interval = sys_cpu_to_le16(param->interval_min);
2511 req->max_interval = sys_cpu_to_le16(param->interval_max);
2512 req->latency = sys_cpu_to_le16(param->latency);
2513 req->timeout = sys_cpu_to_le16(param->timeout);
2514
2515 err = bt_l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf);
2516 if (err) {
2517 net_buf_unref(buf);
2518 return err;
2519 }
2520
2521 return 0;
2522 }
2523
l2cap_connected(struct bt_l2cap_chan * chan)2524 static void l2cap_connected(struct bt_l2cap_chan *chan)
2525 {
2526 BT_DBG("ch %p cid 0x%04x", BT_L2CAP_LE_CHAN(chan),
2527 BT_L2CAP_LE_CHAN(chan)->rx.cid);
2528 }
2529
l2cap_disconnected(struct bt_l2cap_chan * chan)2530 static void l2cap_disconnected(struct bt_l2cap_chan *chan)
2531 {
2532 BT_DBG("ch %p cid 0x%04x", BT_L2CAP_LE_CHAN(chan),
2533 BT_L2CAP_LE_CHAN(chan)->rx.cid);
2534
2535 /* Cancel RTX work on signal channel.
2536 * Disconnected callback is always called from system worqueue
2537 * so this should always succeed.
2538 */
2539 (void)k_work_cancel_delayable(&chan->rtx_work);
2540 }
2541
l2cap_accept(struct bt_conn * conn,struct bt_l2cap_chan ** chan)2542 static int l2cap_accept(struct bt_conn *conn, struct bt_l2cap_chan **chan)
2543 {
2544 int i;
2545 static const struct bt_l2cap_chan_ops ops = {
2546 .connected = l2cap_connected,
2547 .disconnected = l2cap_disconnected,
2548 .recv = l2cap_recv,
2549 };
2550
2551 BT_DBG("conn %p handle %u", conn, conn->handle);
2552
2553 for (i = 0; i < ARRAY_SIZE(bt_l2cap_pool); i++) {
2554 struct bt_l2cap *l2cap = &bt_l2cap_pool[i];
2555
2556 if (l2cap->chan.chan.conn) {
2557 continue;
2558 }
2559
2560 l2cap->chan.chan.ops = &ops;
2561 *chan = &l2cap->chan.chan;
2562
2563 return 0;
2564 }
2565
2566 BT_ERR("No available L2CAP context for conn %p", conn);
2567
2568 return -ENOMEM;
2569 }
2570
2571 BT_L2CAP_CHANNEL_DEFINE(le_fixed_chan, BT_L2CAP_CID_LE_SIG, l2cap_accept, NULL);
2572
bt_l2cap_init(void)2573 void bt_l2cap_init(void)
2574 {
2575 if (IS_ENABLED(CONFIG_BT_BREDR)) {
2576 bt_l2cap_br_init();
2577 }
2578 }
2579
2580 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_le_connect(struct bt_conn * conn,struct bt_l2cap_le_chan * ch,uint16_t psm)2581 static int l2cap_le_connect(struct bt_conn *conn, struct bt_l2cap_le_chan *ch,
2582 uint16_t psm)
2583 {
2584 int err;
2585
2586 if (psm < L2CAP_LE_PSM_FIXED_START || psm > L2CAP_LE_PSM_DYN_END) {
2587 return -EINVAL;
2588 }
2589
2590 l2cap_chan_tx_init(ch);
2591 l2cap_chan_rx_init(ch);
2592
2593 if (!l2cap_chan_add(conn, &ch->chan, l2cap_chan_destroy)) {
2594 return -ENOMEM;
2595 }
2596
2597 ch->chan.psm = psm;
2598
2599 if (conn->sec_level < ch->chan.required_sec_level) {
2600 err = bt_conn_set_security(conn, ch->chan.required_sec_level);
2601 if (err) {
2602 goto fail;
2603 }
2604
2605 atomic_set_bit(ch->chan.status,
2606 BT_L2CAP_STATUS_ENCRYPT_PENDING);
2607
2608 return 0;
2609 }
2610
2611 err = l2cap_le_conn_req(ch);
2612 if (err) {
2613 goto fail;
2614 }
2615
2616 return 0;
2617
2618 fail:
2619 bt_l2cap_chan_remove(conn, &ch->chan);
2620 bt_l2cap_chan_del(&ch->chan);
2621 return err;
2622 }
2623
2624 #if defined(CONFIG_BT_L2CAP_ECRED)
l2cap_ecred_init(struct bt_conn * conn,struct bt_l2cap_le_chan * ch,uint16_t psm)2625 static int l2cap_ecred_init(struct bt_conn *conn,
2626 struct bt_l2cap_le_chan *ch, uint16_t psm)
2627 {
2628
2629 if (psm < L2CAP_LE_PSM_FIXED_START || psm > L2CAP_LE_PSM_DYN_END) {
2630 return -EINVAL;
2631 }
2632
2633 l2cap_chan_tx_init(ch);
2634 l2cap_chan_rx_init(ch);
2635
2636 if (!l2cap_chan_add(conn, &ch->chan, l2cap_chan_destroy)) {
2637 return -ENOMEM;
2638 }
2639
2640 ch->chan.psm = psm;
2641
2642 BT_DBG("ch %p psm 0x%02x mtu %u mps %u credits %u", ch, ch->chan.psm,
2643 ch->rx.mtu, ch->rx.mps, ch->rx.init_credits);
2644
2645 return 0;
2646 }
2647
bt_l2cap_ecred_chan_connect(struct bt_conn * conn,struct bt_l2cap_chan ** chan,uint16_t psm)2648 int bt_l2cap_ecred_chan_connect(struct bt_conn *conn,
2649 struct bt_l2cap_chan **chan, uint16_t psm)
2650 {
2651 int i, err;
2652
2653 BT_DBG("conn %p chan %p psm 0x%04x", conn, chan, psm);
2654
2655 if (!conn || !chan) {
2656 return -EINVAL;
2657 }
2658
2659 /* Init non-null channels */
2660 for (i = 0; i < L2CAP_ECRED_CHAN_MAX; i++) {
2661 if (!chan[i]) {
2662 break;
2663 }
2664
2665 err = l2cap_ecred_init(conn, BT_L2CAP_LE_CHAN(chan[i]), psm);
2666 if (err < 0) {
2667 i--;
2668 goto fail;
2669 }
2670 }
2671
2672 return l2cap_ecred_conn_req(chan, i);
2673 fail:
2674 /* Remove channels added */
2675 for (; i >= 0; i--) {
2676 if (!chan[i]) {
2677 continue;
2678 }
2679
2680 bt_l2cap_chan_remove(conn, chan[i]);
2681 }
2682
2683 return err;
2684 }
2685
l2cap_find_pending_reconf(struct bt_conn * conn)2686 static struct bt_l2cap_le_chan *l2cap_find_pending_reconf(struct bt_conn *conn)
2687 {
2688 struct bt_l2cap_chan *chan;
2689
2690 SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
2691 if (BT_L2CAP_LE_CHAN(chan)->pending_rx_mtu) {
2692 return BT_L2CAP_LE_CHAN(chan);
2693 }
2694 }
2695
2696 return NULL;
2697 }
2698
bt_l2cap_ecred_chan_reconfigure(struct bt_l2cap_chan ** chans,uint16_t mtu)2699 int bt_l2cap_ecred_chan_reconfigure(struct bt_l2cap_chan **chans, uint16_t mtu)
2700 {
2701 struct bt_l2cap_ecred_reconf_req *req;
2702 struct bt_conn *conn = NULL;
2703 struct bt_l2cap_le_chan *ch;
2704 struct net_buf *buf;
2705 uint8_t ident;
2706 int i;
2707
2708 BT_DBG("chans %p mtu 0x%04x", chans, mtu);
2709
2710 if (!chans) {
2711 return -EINVAL;
2712 }
2713
2714 for (i = 0; i < L2CAP_ECRED_CHAN_MAX; i++) {
2715 if (!chans[i]) {
2716 break;
2717 }
2718
2719 /* validate that all channels are from same connection */
2720 if (conn) {
2721 if (conn != chans[i]->conn) {
2722 return -EINVAL;
2723 }
2724 } else {
2725 conn = chans[i]->conn;
2726 }
2727
2728 /* validate MTU is not decreased */
2729 if (mtu < BT_L2CAP_LE_CHAN(chans[i])->rx.mtu) {
2730 return -EINVAL;
2731 }
2732 }
2733
2734 if (i == 0) {
2735 return -EINVAL;
2736 }
2737
2738 if (!conn) {
2739 return -ENOTCONN;
2740 }
2741
2742 if (conn->type != BT_CONN_TYPE_LE) {
2743 return -EINVAL;
2744 }
2745
2746 /* allow only 1 request at time */
2747 if (l2cap_find_pending_reconf(conn)) {
2748 return -EBUSY;
2749 }
2750
2751 ident = get_ident();
2752
2753 buf = l2cap_create_le_sig_pdu(NULL, BT_L2CAP_ECRED_RECONF_REQ,
2754 ident,
2755 sizeof(*req) + (i * sizeof(uint16_t)));
2756 if (!buf) {
2757 return -ENOMEM;
2758 }
2759
2760 req = net_buf_add(buf, sizeof(*req));
2761 req->mtu = sys_cpu_to_le16(mtu);
2762
2763 /* MPS shall not be bigger than MTU + BT_L2CAP_SDU_HDR_SIZE
2764 * as the remaining bytes cannot be used.
2765 */
2766 req->mps = sys_cpu_to_le16(MIN(mtu + BT_L2CAP_SDU_HDR_SIZE,
2767 BT_L2CAP_RX_MTU));
2768
2769 for (int j = 0; j < i; j++) {
2770 ch = BT_L2CAP_LE_CHAN(chans[j]);
2771
2772 ch->chan.ident = ident;
2773 ch->pending_rx_mtu = mtu;
2774
2775 net_buf_add_le16(buf, ch->rx.cid);
2776 };
2777
2778 /* we use first channel for sending and timeouting */
2779 l2cap_chan_send_req(chans[0], buf, L2CAP_CONN_TIMEOUT);
2780
2781 return 0;
2782 }
2783
2784 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
2785
bt_l2cap_chan_connect(struct bt_conn * conn,struct bt_l2cap_chan * chan,uint16_t psm)2786 int bt_l2cap_chan_connect(struct bt_conn *conn, struct bt_l2cap_chan *chan,
2787 uint16_t psm)
2788 {
2789 BT_DBG("conn %p chan %p psm 0x%04x", conn, chan, psm);
2790
2791 if (!conn || conn->state != BT_CONN_CONNECTED) {
2792 return -ENOTCONN;
2793 }
2794
2795 if (!chan) {
2796 return -EINVAL;
2797 }
2798
2799 if (IS_ENABLED(CONFIG_BT_BREDR) &&
2800 conn->type == BT_CONN_TYPE_BR) {
2801 return bt_l2cap_br_chan_connect(conn, chan, psm);
2802 }
2803
2804 if (chan->required_sec_level > BT_SECURITY_L4) {
2805 return -EINVAL;
2806 } else if (chan->required_sec_level == BT_SECURITY_L0) {
2807 chan->required_sec_level = BT_SECURITY_L1;
2808 }
2809
2810 return l2cap_le_connect(conn, BT_L2CAP_LE_CHAN(chan), psm);
2811 }
2812
bt_l2cap_chan_disconnect(struct bt_l2cap_chan * chan)2813 int bt_l2cap_chan_disconnect(struct bt_l2cap_chan *chan)
2814 {
2815 struct bt_conn *conn = chan->conn;
2816 struct net_buf *buf;
2817 struct bt_l2cap_disconn_req *req;
2818 struct bt_l2cap_le_chan *ch;
2819
2820 if (!conn) {
2821 return -ENOTCONN;
2822 }
2823
2824 if (IS_ENABLED(CONFIG_BT_BREDR) &&
2825 conn->type == BT_CONN_TYPE_BR) {
2826 return bt_l2cap_br_chan_disconnect(chan);
2827 }
2828
2829 ch = BT_L2CAP_LE_CHAN(chan);
2830
2831 BT_DBG("chan %p scid 0x%04x dcid 0x%04x", chan, ch->rx.cid,
2832 ch->tx.cid);
2833
2834 ch->chan.ident = get_ident();
2835
2836 buf = l2cap_create_le_sig_pdu(NULL, BT_L2CAP_DISCONN_REQ,
2837 ch->chan.ident, sizeof(*req));
2838 if (!buf) {
2839 return -ENOMEM;
2840 }
2841
2842 req = net_buf_add(buf, sizeof(*req));
2843 req->dcid = sys_cpu_to_le16(ch->tx.cid);
2844 req->scid = sys_cpu_to_le16(ch->rx.cid);
2845
2846 l2cap_chan_send_req(chan, buf, L2CAP_DISC_TIMEOUT);
2847 bt_l2cap_chan_set_state(chan, BT_L2CAP_DISCONNECT);
2848
2849 return 0;
2850 }
2851
bt_l2cap_chan_send(struct bt_l2cap_chan * chan,struct net_buf * buf)2852 int bt_l2cap_chan_send(struct bt_l2cap_chan *chan, struct net_buf *buf)
2853 {
2854 struct bt_l2cap_le_chan *ch = BT_L2CAP_LE_CHAN(chan);
2855 int err;
2856
2857 if (!buf) {
2858 return -EINVAL;
2859 }
2860
2861 BT_DBG("chan %p buf %p len %zu", chan, buf, net_buf_frags_len(buf));
2862
2863 if (!chan->conn || chan->conn->state != BT_CONN_CONNECTED) {
2864 return -ENOTCONN;
2865 }
2866
2867 if (atomic_test_bit(chan->status, BT_L2CAP_STATUS_SHUTDOWN)) {
2868 return -ESHUTDOWN;
2869 }
2870
2871 if (IS_ENABLED(CONFIG_BT_BREDR) &&
2872 chan->conn->type == BT_CONN_TYPE_BR) {
2873 return bt_l2cap_br_chan_send(chan, buf);
2874 }
2875
2876 /* Queue if there are pending segments left from previous packet or
2877 * there are no credits available.
2878 */
2879 if (ch->tx_buf || !k_fifo_is_empty(&ch->tx_queue) ||
2880 !atomic_get(&ch->tx.credits)) {
2881 data_sent(buf)->len = 0;
2882 net_buf_put(&ch->tx_queue, buf);
2883 k_work_submit(&ch->tx_work);
2884 return 0;
2885 }
2886
2887 err = l2cap_chan_le_send_sdu(ch, &buf, 0);
2888 if (err < 0) {
2889 if (err == -EAGAIN && data_sent(buf)->len) {
2890 /* Queue buffer if at least one segment could be sent */
2891 net_buf_put(&ch->tx_queue, buf);
2892 return data_sent(buf)->len;
2893 }
2894 BT_ERR("failed to send message %d", err);
2895 }
2896
2897 return err;
2898 }
2899 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2900