1 /* l2cap.c - L2CAP handling */
2 
3 /*
4  * Copyright (c) 2015-2016 Intel Corporation
5  * Copyright (c) 2023 Nordic Semiconductor
6  *
7  * SPDX-License-Identifier: Apache-2.0
8  */
9 
10 #include <zephyr/kernel.h>
11 #include <string.h>
12 #include <errno.h>
13 #include <zephyr/sys/__assert.h>
14 #include <zephyr/sys/atomic.h>
15 #include <zephyr/sys/check.h>
16 #include <zephyr/sys/iterable_sections.h>
17 #include <zephyr/sys/byteorder.h>
18 #include <zephyr/sys/math_extras.h>
19 #include <zephyr/sys/util.h>
20 
21 #include <zephyr/bluetooth/hci.h>
22 #include <zephyr/bluetooth/bluetooth.h>
23 #include <zephyr/bluetooth/conn.h>
24 #include <zephyr/bluetooth/l2cap.h>
25 #include <zephyr/drivers/bluetooth/hci_driver.h>
26 
27 #define LOG_DBG_ENABLED IS_ENABLED(CONFIG_BT_L2CAP_LOG_LEVEL_DBG)
28 
29 #include "buf_view.h"
30 #include "hci_core.h"
31 #include "conn_internal.h"
32 #include "l2cap_internal.h"
33 #include "keys.h"
34 
35 #include <zephyr/logging/log.h>
36 LOG_MODULE_REGISTER(bt_l2cap, CONFIG_BT_L2CAP_LOG_LEVEL);
37 
38 #define LE_CHAN_RTX(_w) CONTAINER_OF(k_work_delayable_from_work(_w), \
39 				     struct bt_l2cap_le_chan, rtx_work)
40 #define CHAN_RX(_w) CONTAINER_OF(_w, struct bt_l2cap_le_chan, rx_work)
41 
42 #define L2CAP_LE_MIN_MTU		23
43 #define L2CAP_ECRED_MIN_MTU		64
44 
45 #define L2CAP_LE_MAX_CREDITS		(CONFIG_BT_BUF_ACL_RX_COUNT - 1)
46 
47 #define L2CAP_LE_CID_DYN_START	0x0040
48 #define L2CAP_LE_CID_DYN_END	0x007f
49 #define L2CAP_LE_CID_IS_DYN(_cid) \
50 	(_cid >= L2CAP_LE_CID_DYN_START && _cid <= L2CAP_LE_CID_DYN_END)
51 
52 #define L2CAP_LE_PSM_FIXED_START 0x0001
53 #define L2CAP_LE_PSM_FIXED_END   0x007f
54 #define L2CAP_LE_PSM_DYN_START   0x0080
55 #define L2CAP_LE_PSM_DYN_END     0x00ff
56 #define L2CAP_LE_PSM_IS_DYN(_psm) \
57 	(_psm >= L2CAP_LE_PSM_DYN_START && _psm <= L2CAP_LE_PSM_DYN_END)
58 
59 #define L2CAP_CONN_TIMEOUT	K_SECONDS(40)
60 #define L2CAP_DISC_TIMEOUT	K_SECONDS(2)
61 #define L2CAP_RTX_TIMEOUT	K_SECONDS(2)
62 
63 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
64 /* Dedicated pool for disconnect buffers so they are guaranteed to be send
65  * even in case of data congestion due to flooding.
66  */
67 NET_BUF_POOL_FIXED_DEFINE(disc_pool, 1,
68 			  BT_L2CAP_BUF_SIZE(
69 				sizeof(struct bt_l2cap_sig_hdr) +
70 				sizeof(struct bt_l2cap_disconn_req)),
71 			  CONFIG_BT_CONN_TX_USER_DATA_SIZE, NULL);
72 
73 #define l2cap_lookup_ident(conn, ident) __l2cap_lookup_ident(conn, ident, false)
74 #define l2cap_remove_ident(conn, ident) __l2cap_lookup_ident(conn, ident, true)
75 
76 static sys_slist_t servers = SYS_SLIST_STATIC_INIT(&servers);
77 
l2cap_tx_buf_destroy(struct bt_conn * conn,struct net_buf * buf,int err)78 static void l2cap_tx_buf_destroy(struct bt_conn *conn, struct net_buf *buf, int err)
79 {
80 	net_buf_unref(buf);
81 }
82 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
83 
84 /* L2CAP signalling channel specific context */
85 struct bt_l2cap {
86 	/* The channel this context is associated with */
87 	struct bt_l2cap_le_chan	chan;
88 };
89 
90 static const struct bt_l2cap_ecred_cb *ecred_cb;
91 static struct bt_l2cap bt_l2cap_pool[CONFIG_BT_MAX_CONN];
92 
bt_l2cap_register_ecred_cb(const struct bt_l2cap_ecred_cb * cb)93 void bt_l2cap_register_ecred_cb(const struct bt_l2cap_ecred_cb *cb)
94 {
95 	ecred_cb = cb;
96 }
97 
get_ident(void)98 static uint8_t get_ident(void)
99 {
100 	static uint8_t ident;
101 
102 	ident++;
103 	/* handle integer overflow (0 is not valid) */
104 	if (!ident) {
105 		ident++;
106 	}
107 
108 	return ident;
109 }
110 
111 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_chan_alloc_cid(struct bt_conn * conn,struct bt_l2cap_chan * chan)112 static struct bt_l2cap_le_chan *l2cap_chan_alloc_cid(struct bt_conn *conn,
113 						     struct bt_l2cap_chan *chan)
114 {
115 	struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
116 	uint16_t cid;
117 
118 	/*
119 	 * No action needed if there's already a CID allocated, e.g. in
120 	 * the case of a fixed channel.
121 	 */
122 	if (le_chan->rx.cid > 0) {
123 		return le_chan;
124 	}
125 
126 	for (cid = L2CAP_LE_CID_DYN_START; cid <= L2CAP_LE_CID_DYN_END; cid++) {
127 		if (!bt_l2cap_le_lookup_rx_cid(conn, cid)) {
128 			le_chan->rx.cid = cid;
129 			return le_chan;
130 		}
131 	}
132 
133 	return NULL;
134 }
135 
136 static struct bt_l2cap_le_chan *
__l2cap_lookup_ident(struct bt_conn * conn,uint16_t ident,bool remove)137 __l2cap_lookup_ident(struct bt_conn *conn, uint16_t ident, bool remove)
138 {
139 	struct bt_l2cap_chan *chan;
140 	sys_snode_t *prev = NULL;
141 
142 	SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
143 		if (BT_L2CAP_LE_CHAN(chan)->ident == ident) {
144 			if (remove) {
145 				sys_slist_remove(&conn->channels, prev,
146 						 &chan->node);
147 			}
148 			return BT_L2CAP_LE_CHAN(chan);
149 		}
150 
151 		prev = &chan->node;
152 	}
153 
154 	return NULL;
155 }
156 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
157 
bt_l2cap_chan_remove(struct bt_conn * conn,struct bt_l2cap_chan * ch)158 void bt_l2cap_chan_remove(struct bt_conn *conn, struct bt_l2cap_chan *ch)
159 {
160 	struct bt_l2cap_chan *chan;
161 	sys_snode_t *prev = NULL;
162 
163 	SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
164 		if (chan == ch) {
165 			sys_slist_remove(&conn->channels, prev, &chan->node);
166 			return;
167 		}
168 
169 		prev = &chan->node;
170 	}
171 }
172 
bt_l2cap_chan_state_str(bt_l2cap_chan_state_t state)173 const char *bt_l2cap_chan_state_str(bt_l2cap_chan_state_t state)
174 {
175 	switch (state) {
176 	case BT_L2CAP_DISCONNECTED:
177 		return "disconnected";
178 	case BT_L2CAP_CONNECTING:
179 		return "connecting";
180 	case BT_L2CAP_CONFIG:
181 		return "config";
182 	case BT_L2CAP_CONNECTED:
183 		return "connected";
184 	case BT_L2CAP_DISCONNECTING:
185 		return "disconnecting";
186 	default:
187 		return "unknown";
188 	}
189 }
190 
191 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
192 #if defined(CONFIG_BT_L2CAP_LOG_LEVEL_DBG)
bt_l2cap_chan_set_state_debug(struct bt_l2cap_chan * chan,bt_l2cap_chan_state_t state,const char * func,int line)193 void bt_l2cap_chan_set_state_debug(struct bt_l2cap_chan *chan,
194 				   bt_l2cap_chan_state_t state,
195 				   const char *func, int line)
196 {
197 	struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
198 
199 	LOG_DBG("chan %p psm 0x%04x %s -> %s", chan, le_chan->psm,
200 		bt_l2cap_chan_state_str(le_chan->state), bt_l2cap_chan_state_str(state));
201 
202 	/* check transitions validness */
203 	switch (state) {
204 	case BT_L2CAP_DISCONNECTED:
205 		/* regardless of old state always allows this state */
206 		break;
207 	case BT_L2CAP_CONNECTING:
208 		if (le_chan->state != BT_L2CAP_DISCONNECTED) {
209 			LOG_WRN("%s()%d: invalid transition", func, line);
210 		}
211 		break;
212 	case BT_L2CAP_CONFIG:
213 		if (le_chan->state != BT_L2CAP_CONNECTING) {
214 			LOG_WRN("%s()%d: invalid transition", func, line);
215 		}
216 		break;
217 	case BT_L2CAP_CONNECTED:
218 		if (le_chan->state != BT_L2CAP_CONFIG &&
219 		    le_chan->state != BT_L2CAP_CONNECTING) {
220 			LOG_WRN("%s()%d: invalid transition", func, line);
221 		}
222 		break;
223 	case BT_L2CAP_DISCONNECTING:
224 		if (le_chan->state != BT_L2CAP_CONFIG &&
225 		    le_chan->state != BT_L2CAP_CONNECTED) {
226 			LOG_WRN("%s()%d: invalid transition", func, line);
227 		}
228 		break;
229 	default:
230 		LOG_ERR("%s()%d: unknown (%u) state was set", func, line, state);
231 		return;
232 	}
233 
234 	le_chan->state = state;
235 }
236 #else
bt_l2cap_chan_set_state(struct bt_l2cap_chan * chan,bt_l2cap_chan_state_t state)237 void bt_l2cap_chan_set_state(struct bt_l2cap_chan *chan,
238 			     bt_l2cap_chan_state_t state)
239 {
240 	BT_L2CAP_LE_CHAN(chan)->state = state;
241 }
242 #endif /* CONFIG_BT_L2CAP_LOG_LEVEL_DBG */
243 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
244 
245 static void cancel_data_ready(struct bt_l2cap_le_chan *lechan);
246 static bool chan_has_data(struct bt_l2cap_le_chan *lechan);
bt_l2cap_chan_del(struct bt_l2cap_chan * chan)247 void bt_l2cap_chan_del(struct bt_l2cap_chan *chan)
248 {
249 	const struct bt_l2cap_chan_ops *ops = chan->ops;
250 	struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
251 
252 	LOG_DBG("conn %p chan %p", chan->conn, chan);
253 
254 	if (!chan->conn) {
255 		goto destroy;
256 	}
257 
258 	cancel_data_ready(le_chan);
259 
260 	/* Remove buffers on the PDU TX queue. We can't do that in
261 	 * `l2cap_chan_destroy()` as it is not called for fixed channels.
262 	 */
263 	while (chan_has_data(le_chan)) {
264 		struct net_buf *buf = net_buf_get(&le_chan->tx_queue, K_NO_WAIT);
265 
266 		net_buf_unref(buf);
267 	}
268 
269 	if (ops->disconnected) {
270 		ops->disconnected(chan);
271 	}
272 
273 	chan->conn = NULL;
274 
275 destroy:
276 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
277 	/* Reset internal members of common channel */
278 	bt_l2cap_chan_set_state(chan, BT_L2CAP_DISCONNECTED);
279 	BT_L2CAP_LE_CHAN(chan)->psm = 0U;
280 #endif
281 	if (chan->destroy) {
282 		chan->destroy(chan);
283 	}
284 
285 	if (ops->released) {
286 		ops->released(chan);
287 	}
288 }
289 
290 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_rtx_timeout(struct k_work * work)291 static void l2cap_rtx_timeout(struct k_work *work)
292 {
293 	struct bt_l2cap_le_chan *chan = LE_CHAN_RTX(work);
294 	struct bt_conn *conn = chan->chan.conn;
295 
296 	LOG_ERR("chan %p timeout", chan);
297 
298 	bt_l2cap_chan_remove(conn, &chan->chan);
299 	bt_l2cap_chan_del(&chan->chan);
300 
301 	/* Remove other channels if pending on the same ident */
302 	while ((chan = l2cap_remove_ident(conn, chan->ident))) {
303 		bt_l2cap_chan_del(&chan->chan);
304 	}
305 }
306 
307 static void l2cap_chan_le_recv(struct bt_l2cap_le_chan *chan,
308 			       struct net_buf *buf);
309 
l2cap_rx_process(struct k_work * work)310 static void l2cap_rx_process(struct k_work *work)
311 {
312 	struct bt_l2cap_le_chan *ch = CHAN_RX(work);
313 	struct net_buf *buf;
314 
315 	while ((buf = net_buf_get(&ch->rx_queue, K_NO_WAIT))) {
316 		LOG_DBG("ch %p buf %p", ch, buf);
317 		l2cap_chan_le_recv(ch, buf);
318 		net_buf_unref(buf);
319 	}
320 }
321 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
322 
bt_l2cap_chan_add(struct bt_conn * conn,struct bt_l2cap_chan * chan,bt_l2cap_chan_destroy_t destroy)323 void bt_l2cap_chan_add(struct bt_conn *conn, struct bt_l2cap_chan *chan,
324 		       bt_l2cap_chan_destroy_t destroy)
325 {
326 	/* Attach channel to the connection */
327 	sys_slist_append(&conn->channels, &chan->node);
328 	chan->conn = conn;
329 	chan->destroy = destroy;
330 
331 	LOG_DBG("conn %p chan %p", conn, chan);
332 }
333 
l2cap_chan_add(struct bt_conn * conn,struct bt_l2cap_chan * chan,bt_l2cap_chan_destroy_t destroy)334 static bool l2cap_chan_add(struct bt_conn *conn, struct bt_l2cap_chan *chan,
335 			   bt_l2cap_chan_destroy_t destroy)
336 {
337 	struct bt_l2cap_le_chan *le_chan;
338 
339 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
340 	le_chan = l2cap_chan_alloc_cid(conn, chan);
341 #else
342 	le_chan = BT_L2CAP_LE_CHAN(chan);
343 #endif
344 
345 	if (!le_chan) {
346 		LOG_ERR("Unable to allocate L2CAP channel ID");
347 		return false;
348 	}
349 
350 	atomic_clear(chan->status);
351 
352 	bt_l2cap_chan_add(conn, chan, destroy);
353 
354 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
355 	/* All dynamic channels have the destroy handler which makes sure that
356 	 * the RTX work structure is properly released with a cancel sync.
357 	 * The fixed signal channel is only removed when disconnected and the
358 	 * disconnected handler is always called from the workqueue itself so
359 	 * canceling from there should always succeed.
360 	 */
361 	k_work_init_delayable(&le_chan->rtx_work, l2cap_rtx_timeout);
362 
363 	if (L2CAP_LE_CID_IS_DYN(le_chan->rx.cid)) {
364 		k_work_init(&le_chan->rx_work, l2cap_rx_process);
365 		k_fifo_init(&le_chan->rx_queue);
366 		bt_l2cap_chan_set_state(chan, BT_L2CAP_CONNECTING);
367 	}
368 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
369 
370 	return true;
371 }
372 
bt_l2cap_connected(struct bt_conn * conn)373 void bt_l2cap_connected(struct bt_conn *conn)
374 {
375 	struct bt_l2cap_chan *chan;
376 
377 	if (IS_ENABLED(CONFIG_BT_CLASSIC) &&
378 	    conn->type == BT_CONN_TYPE_BR) {
379 		bt_l2cap_br_connected(conn);
380 		return;
381 	}
382 
383 	STRUCT_SECTION_FOREACH(bt_l2cap_fixed_chan, fchan) {
384 		struct bt_l2cap_le_chan *le_chan;
385 
386 		if (fchan->accept(conn, &chan) < 0) {
387 			continue;
388 		}
389 
390 		le_chan = BT_L2CAP_LE_CHAN(chan);
391 
392 		/* Fill up remaining fixed channel context attached in
393 		 * fchan->accept()
394 		 */
395 		le_chan->rx.cid = fchan->cid;
396 		le_chan->tx.cid = fchan->cid;
397 
398 		if (!l2cap_chan_add(conn, chan, fchan->destroy)) {
399 			return;
400 		}
401 
402 		k_fifo_init(&le_chan->tx_queue);
403 
404 		if (chan->ops->connected) {
405 			chan->ops->connected(chan);
406 		}
407 
408 		/* Always set output status to fixed channels */
409 		atomic_set_bit(chan->status, BT_L2CAP_STATUS_OUT);
410 
411 		if (chan->ops->status) {
412 			chan->ops->status(chan, chan->status);
413 		}
414 	}
415 }
416 
bt_l2cap_disconnected(struct bt_conn * conn)417 void bt_l2cap_disconnected(struct bt_conn *conn)
418 {
419 	struct bt_l2cap_chan *chan, *next;
420 
421 	if (IS_ENABLED(CONFIG_BT_CLASSIC) &&
422 	    conn->type == BT_CONN_TYPE_BR) {
423 		bt_l2cap_br_disconnected(conn);
424 		return;
425 	}
426 
427 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&conn->channels, chan, next, node) {
428 		bt_l2cap_chan_del(chan);
429 	}
430 }
431 
l2cap_create_le_sig_pdu(uint8_t code,uint8_t ident,uint16_t len)432 static struct net_buf *l2cap_create_le_sig_pdu(uint8_t code, uint8_t ident,
433 					       uint16_t len)
434 {
435 	struct bt_l2cap_sig_hdr *hdr;
436 	struct net_buf_pool *pool = NULL;
437 	struct net_buf *buf;
438 
439 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
440 	if (code == BT_L2CAP_DISCONN_REQ) {
441 		pool = &disc_pool;
442 	}
443 #endif
444 	/* Don't wait more than the minimum RTX timeout of 2 seconds */
445 	buf = bt_l2cap_create_pdu_timeout(pool, 0, L2CAP_RTX_TIMEOUT);
446 	if (!buf) {
447 		/* If it was not possible to allocate a buffer within the
448 		 * timeout return NULL.
449 		 */
450 		LOG_ERR("Unable to allocate buffer for op 0x%02x", code);
451 		return NULL;
452 	}
453 
454 	hdr = net_buf_add(buf, sizeof(*hdr));
455 	hdr->code = code;
456 	hdr->ident = ident;
457 	hdr->len = sys_cpu_to_le16(len);
458 
459 	return buf;
460 }
461 
462 /* Send the buffer over the signalling channel. Release it in case of failure.
463  * Any other cleanup in failure to send should be handled by the disconnected
464  * handler.
465  */
l2cap_send_sig(struct bt_conn * conn,struct net_buf * buf)466 static int l2cap_send_sig(struct bt_conn *conn, struct net_buf *buf)
467 {
468 	struct bt_l2cap_chan *ch = bt_l2cap_le_lookup_tx_cid(conn, BT_L2CAP_CID_LE_SIG);
469 	struct bt_l2cap_le_chan *chan = BT_L2CAP_LE_CHAN(ch);
470 
471 	int err = bt_l2cap_send_pdu(chan, buf, NULL, NULL);
472 
473 	if (err) {
474 		net_buf_unref(buf);
475 	}
476 
477 	return err;
478 }
479 
480 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_chan_send_req(struct bt_l2cap_chan * chan,struct net_buf * buf,k_timeout_t timeout)481 static void l2cap_chan_send_req(struct bt_l2cap_chan *chan,
482 				struct net_buf *buf, k_timeout_t timeout)
483 {
484 	if (l2cap_send_sig(chan->conn, buf)) {
485 		return;
486 	}
487 
488 	/* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part A] page 126:
489 	 *
490 	 * The value of this timer is implementation-dependent but the minimum
491 	 * initial value is 1 second and the maximum initial value is 60
492 	 * seconds. One RTX timer shall exist for each outstanding signaling
493 	 * request, including each Echo Request. The timer disappears on the
494 	 * final expiration, when the response is received, or the physical
495 	 * link is lost.
496 	 */
497 	k_work_reschedule(&(BT_L2CAP_LE_CHAN(chan)->rtx_work), timeout);
498 }
499 
l2cap_le_conn_req(struct bt_l2cap_le_chan * ch)500 static int l2cap_le_conn_req(struct bt_l2cap_le_chan *ch)
501 {
502 	struct net_buf *buf;
503 	struct bt_l2cap_le_conn_req *req;
504 
505 	ch->ident = get_ident();
506 
507 	buf = l2cap_create_le_sig_pdu(BT_L2CAP_LE_CONN_REQ,
508 				      ch->ident, sizeof(*req));
509 	if (!buf) {
510 		return -ENOMEM;
511 	}
512 
513 	req = net_buf_add(buf, sizeof(*req));
514 	req->psm = sys_cpu_to_le16(ch->psm);
515 	req->scid = sys_cpu_to_le16(ch->rx.cid);
516 	req->mtu = sys_cpu_to_le16(ch->rx.mtu);
517 	req->mps = sys_cpu_to_le16(ch->rx.mps);
518 	req->credits = sys_cpu_to_le16(ch->rx.credits);
519 
520 	l2cap_chan_send_req(&ch->chan, buf, L2CAP_CONN_TIMEOUT);
521 
522 	return 0;
523 }
524 
525 #if defined(CONFIG_BT_L2CAP_ECRED)
l2cap_ecred_conn_req(struct bt_l2cap_chan ** chan,int channels)526 static int l2cap_ecred_conn_req(struct bt_l2cap_chan **chan, int channels)
527 {
528 	struct net_buf *buf;
529 	struct bt_l2cap_ecred_conn_req *req;
530 	struct bt_l2cap_le_chan *ch;
531 	int i;
532 	uint8_t ident;
533 	uint16_t req_psm;
534 	uint16_t req_mtu;
535 
536 	if (!chan || !channels) {
537 		return -EINVAL;
538 	}
539 
540 	ident = get_ident();
541 
542 	buf = l2cap_create_le_sig_pdu(BT_L2CAP_ECRED_CONN_REQ, ident,
543 				      sizeof(*req) +
544 				      (channels * sizeof(uint16_t)));
545 
546 	if (!buf) {
547 		return -ENOMEM;
548 	}
549 
550 	req = net_buf_add(buf, sizeof(*req));
551 
552 	ch = BT_L2CAP_LE_CHAN(chan[0]);
553 
554 	/* Init common parameters */
555 	req->psm = sys_cpu_to_le16(ch->psm);
556 	req->mtu = sys_cpu_to_le16(ch->rx.mtu);
557 	req->mps = sys_cpu_to_le16(ch->rx.mps);
558 	req->credits = sys_cpu_to_le16(ch->rx.credits);
559 	req_psm = ch->psm;
560 	req_mtu = ch->tx.mtu;
561 
562 	for (i = 0; i < channels; i++) {
563 		ch = BT_L2CAP_LE_CHAN(chan[i]);
564 
565 		__ASSERT(ch->psm == req_psm,
566 			 "The PSM shall be the same for channels in the same request.");
567 		__ASSERT(ch->tx.mtu == req_mtu,
568 			 "The MTU shall be the same for channels in the same request.");
569 
570 		ch->ident = ident;
571 
572 		net_buf_add_le16(buf, ch->rx.cid);
573 	}
574 
575 	l2cap_chan_send_req(*chan, buf, L2CAP_CONN_TIMEOUT);
576 
577 	return 0;
578 }
579 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
580 
l2cap_le_encrypt_change(struct bt_l2cap_chan * chan,uint8_t status)581 static void l2cap_le_encrypt_change(struct bt_l2cap_chan *chan, uint8_t status)
582 {
583 	int err;
584 	struct bt_l2cap_le_chan *le = BT_L2CAP_LE_CHAN(chan);
585 
586 	/* Skip channels that are not pending waiting for encryption */
587 	if (!atomic_test_and_clear_bit(chan->status,
588 				       BT_L2CAP_STATUS_ENCRYPT_PENDING)) {
589 		return;
590 	}
591 
592 	if (status) {
593 		goto fail;
594 	}
595 
596 #if defined(CONFIG_BT_L2CAP_ECRED)
597 	if (le->ident) {
598 		struct bt_l2cap_chan *echan[L2CAP_ECRED_CHAN_MAX_PER_REQ];
599 		struct bt_l2cap_chan *ch;
600 		int i = 0;
601 
602 		SYS_SLIST_FOR_EACH_CONTAINER(&chan->conn->channels, ch, node) {
603 			if (le->ident == BT_L2CAP_LE_CHAN(ch)->ident) {
604 				__ASSERT(i < L2CAP_ECRED_CHAN_MAX_PER_REQ,
605 					 "There can only be L2CAP_ECRED_CHAN_MAX_PER_REQ channels "
606 					 "from the same request.");
607 				atomic_clear_bit(ch->status, BT_L2CAP_STATUS_ENCRYPT_PENDING);
608 				echan[i++] = ch;
609 			}
610 		}
611 
612 		/* Retry ecred connect */
613 		l2cap_ecred_conn_req(echan, i);
614 		return;
615 	}
616 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
617 
618 	/* Retry to connect */
619 	err = l2cap_le_conn_req(le);
620 	if (err) {
621 		goto fail;
622 	}
623 
624 	return;
625 fail:
626 	bt_l2cap_chan_remove(chan->conn, chan);
627 	bt_l2cap_chan_del(chan);
628 }
629 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
630 
bt_l2cap_security_changed(struct bt_conn * conn,uint8_t hci_status)631 void bt_l2cap_security_changed(struct bt_conn *conn, uint8_t hci_status)
632 {
633 	struct bt_l2cap_chan *chan, *next;
634 
635 	if (IS_ENABLED(CONFIG_BT_CLASSIC) &&
636 	    conn->type == BT_CONN_TYPE_BR) {
637 		l2cap_br_encrypt_change(conn, hci_status);
638 		return;
639 	}
640 
641 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&conn->channels, chan, next, node) {
642 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
643 		l2cap_le_encrypt_change(chan, hci_status);
644 #endif
645 
646 		if (chan->ops->encrypt_change) {
647 			chan->ops->encrypt_change(chan, hci_status);
648 		}
649 	}
650 }
651 
bt_l2cap_create_pdu_timeout(struct net_buf_pool * pool,size_t reserve,k_timeout_t timeout)652 struct net_buf *bt_l2cap_create_pdu_timeout(struct net_buf_pool *pool,
653 					    size_t reserve,
654 					    k_timeout_t timeout)
655 {
656 	return bt_conn_create_pdu_timeout(pool,
657 					  sizeof(struct bt_l2cap_hdr) + reserve,
658 					  timeout);
659 }
660 
raise_data_ready(struct bt_l2cap_le_chan * le_chan)661 static void raise_data_ready(struct bt_l2cap_le_chan *le_chan)
662 {
663 	if (!atomic_set(&le_chan->_pdu_ready_lock, 1)) {
664 		sys_slist_append(&le_chan->chan.conn->l2cap_data_ready,
665 				 &le_chan->_pdu_ready);
666 		LOG_DBG("data ready raised %p", le_chan);
667 	} else {
668 		LOG_DBG("data ready already %p", le_chan);
669 	}
670 
671 	bt_conn_data_ready(le_chan->chan.conn);
672 }
673 
lower_data_ready(struct bt_l2cap_le_chan * le_chan)674 static void lower_data_ready(struct bt_l2cap_le_chan *le_chan)
675 {
676 	struct bt_conn *conn = le_chan->chan.conn;
677 	__maybe_unused sys_snode_t *s = sys_slist_get(&conn->l2cap_data_ready);
678 
679 	LOG_DBG("%p", le_chan);
680 
681 	__ASSERT_NO_MSG(s == &le_chan->_pdu_ready);
682 
683 	__maybe_unused atomic_t old = atomic_set(&le_chan->_pdu_ready_lock, 0);
684 
685 	__ASSERT_NO_MSG(old);
686 }
687 
cancel_data_ready(struct bt_l2cap_le_chan * le_chan)688 static void cancel_data_ready(struct bt_l2cap_le_chan *le_chan)
689 {
690 	struct bt_conn *conn = le_chan->chan.conn;
691 
692 	LOG_DBG("%p", le_chan);
693 
694 	sys_slist_find_and_remove(&conn->l2cap_data_ready,
695 				  &le_chan->_pdu_ready);
696 	atomic_set(&le_chan->_pdu_ready_lock, 0);
697 }
698 
bt_l2cap_send_pdu(struct bt_l2cap_le_chan * le_chan,struct net_buf * pdu,bt_conn_tx_cb_t cb,void * user_data)699 int bt_l2cap_send_pdu(struct bt_l2cap_le_chan *le_chan, struct net_buf *pdu,
700 		      bt_conn_tx_cb_t cb, void *user_data)
701 {
702 	if (pdu->ref != 1) {
703 		/* The host may alter the buf contents when fragmenting. Higher
704 		 * layers cannot expect the buf contents to stay intact. Extra
705 		 * refs suggests a silent data corruption would occur if not for
706 		 * this error.
707 		 */
708 		LOG_ERR("Expecting 1 ref, got %d", pdu->ref);
709 		return -EINVAL;
710 	}
711 
712 	if (pdu->user_data_size < sizeof(struct closure)) {
713 		LOG_DBG("not enough room in user_data %d < %d pool %u",
714 			pdu->user_data_size,
715 			CONFIG_BT_CONN_TX_USER_DATA_SIZE,
716 			pdu->pool_id);
717 		return -EINVAL;
718 	}
719 
720 	make_closure(pdu->user_data, cb, user_data);
721 	LOG_DBG("push: pdu %p len %d cb %p userdata %p", pdu, pdu->len, cb, user_data);
722 
723 	net_buf_put(&le_chan->tx_queue, pdu);
724 
725 	raise_data_ready(le_chan); /* tis just a flag */
726 
727 	return 0;		/* look ma, no failures */
728 }
729 
730 /* L2CAP channel wants to send a PDU */
chan_has_data(struct bt_l2cap_le_chan * lechan)731 static bool chan_has_data(struct bt_l2cap_le_chan *lechan)
732 {
733 	return !k_fifo_is_empty(&lechan->tx_queue);
734 }
735 
736 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
test_and_dec(atomic_t * target)737 static bool test_and_dec(atomic_t *target)
738 {
739 	atomic_t old_value, new_value;
740 
741 	do {
742 		old_value = atomic_get(target);
743 		if (!old_value) {
744 			return false;
745 		}
746 
747 		new_value = old_value - 1;
748 	} while (atomic_cas(target, old_value, new_value) == 0);
749 
750 	return true;
751 }
752 #endif
753 
754 /* Just like in group projects :p */
chan_take_credit(struct bt_l2cap_le_chan * lechan)755 static void chan_take_credit(struct bt_l2cap_le_chan *lechan)
756 {
757 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
758 	if (!L2CAP_LE_CID_IS_DYN(lechan->tx.cid)) {
759 		return;
760 	}
761 
762 	if (!test_and_dec(&lechan->tx.credits)) {
763 		/* Always ensure you have credits before calling this fn */
764 		__ASSERT_NO_MSG(0);
765 	}
766 
767 	/* Notify channel user that it can't send anymore on this channel. */
768 	if (!atomic_get(&lechan->tx.credits)) {
769 		LOG_DBG("chan %p paused", lechan);
770 		atomic_clear_bit(lechan->chan.status, BT_L2CAP_STATUS_OUT);
771 
772 		if (lechan->chan.ops->status) {
773 			lechan->chan.ops->status(&lechan->chan, lechan->chan.status);
774 		}
775 	}
776 #endif
777 }
778 
get_ready_chan(struct bt_conn * conn)779 static struct bt_l2cap_le_chan *get_ready_chan(struct bt_conn *conn)
780 {
781 	struct bt_l2cap_le_chan *lechan;
782 
783 	sys_snode_t *pdu_ready = sys_slist_peek_head(&conn->l2cap_data_ready);
784 
785 	if (!pdu_ready) {
786 		LOG_DBG("nothing to send on this conn");
787 		return NULL;
788 	}
789 
790 	SYS_SLIST_FOR_EACH_CONTAINER(&conn->l2cap_data_ready, lechan, _pdu_ready) {
791 		if (chan_has_data(lechan)) {
792 			LOG_DBG("sending from chan %p (%s) data %d", lechan,
793 				L2CAP_LE_CID_IS_DYN(lechan->tx.cid) ? "dynamic" : "static",
794 				chan_has_data(lechan));
795 			return lechan;
796 		}
797 
798 		LOG_DBG("chan %p has no data", lechan);
799 		lower_data_ready(lechan);
800 	}
801 
802 	return NULL;
803 }
804 
l2cap_chan_sdu_sent(struct bt_conn * conn,void * user_data,int err)805 static void l2cap_chan_sdu_sent(struct bt_conn *conn, void *user_data, int err)
806 {
807 	struct bt_l2cap_chan *chan;
808 	uint16_t cid = POINTER_TO_UINT(user_data);
809 
810 	LOG_DBG("conn %p CID 0x%04x err %d", conn, cid, err);
811 
812 	if (err) {
813 		LOG_DBG("error %d when sending SDU", err);
814 
815 		return;
816 	}
817 
818 	chan = bt_l2cap_le_lookup_tx_cid(conn, cid);
819 	if (!chan) {
820 		LOG_DBG("got SDU sent cb for disconnected chan (CID %u)", cid);
821 
822 		return;
823 	}
824 
825 	if (chan->ops->sent) {
826 		chan->ops->sent(chan);
827 	}
828 }
829 
get_pdu_len(struct bt_l2cap_le_chan * lechan,struct net_buf * buf)830 static uint16_t get_pdu_len(struct bt_l2cap_le_chan *lechan,
831 			    struct net_buf *buf)
832 {
833 	if (!L2CAP_LE_CID_IS_DYN(lechan->tx.cid)) {
834 		/* No segmentation shenanigans on static channels */
835 		return buf->len;
836 	}
837 
838 	return MIN(buf->len, lechan->tx.mps);
839 }
840 
chan_has_credits(struct bt_l2cap_le_chan * lechan)841 static bool chan_has_credits(struct bt_l2cap_le_chan *lechan)
842 {
843 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
844 	if (!L2CAP_LE_CID_IS_DYN(lechan->tx.cid)) {
845 		return true;
846 	}
847 
848 	LOG_DBG("chan %p credits %ld", lechan, atomic_get(&lechan->tx.credits));
849 
850 	return atomic_get(&lechan->tx.credits) >= 1;
851 #else
852 	return true;
853 #endif
854 }
855 
bt_test_l2cap_data_pull_spy(struct bt_conn * conn,struct bt_l2cap_le_chan * lechan,size_t amount,size_t * length)856 __weak void bt_test_l2cap_data_pull_spy(struct bt_conn *conn,
857 					struct bt_l2cap_le_chan *lechan,
858 					size_t amount,
859 					size_t *length)
860 {
861 }
862 
l2cap_data_pull(struct bt_conn * conn,size_t amount,size_t * length)863 struct net_buf *l2cap_data_pull(struct bt_conn *conn,
864 				size_t amount,
865 				size_t *length)
866 {
867 	struct bt_l2cap_le_chan *lechan = get_ready_chan(conn);
868 
869 	if (IS_ENABLED(CONFIG_BT_TESTING)) {
870 		/* Allow tests to snoop in */
871 		bt_test_l2cap_data_pull_spy(conn, lechan, amount, length);
872 	}
873 
874 	if (!lechan) {
875 		LOG_DBG("no channel conn %p", conn);
876 		bt_tx_irq_raise();
877 		return NULL;
878 	}
879 
880 	/* Leave the PDU buffer in the queue until we have sent all its
881 	 * fragments.
882 	 *
883 	 * For SDUs we do the same, we keep it in the queue until all the
884 	 * segments have been sent, adding the PDU headers just-in-time.
885 	 */
886 	struct net_buf *pdu = k_fifo_peek_head(&lechan->tx_queue);
887 
888 	if (!pdu) {
889 		bt_tx_irq_raise();
890 		return NULL;
891 	}
892 	/* __ASSERT(pdu, "signaled ready but no PDUs in the TX queue"); */
893 
894 	if (bt_buf_has_view(pdu)) {
895 		LOG_ERR("already have view on %p", pdu);
896 		return NULL;
897 	}
898 
899 	if (lechan->_pdu_remaining == 0 && !chan_has_credits(lechan)) {
900 		/* We don't have credits to send a new K-frame PDU. Remove the
901 		 * channel from the ready-list, it will be added back later when
902 		 * we get more credits.
903 		 */
904 		LOG_DBG("no credits for new K-frame on %p", lechan);
905 		lower_data_ready(lechan);
906 		return NULL;
907 	}
908 
909 	/* Add PDU header */
910 	if (lechan->_pdu_remaining == 0) {
911 		struct bt_l2cap_hdr *hdr;
912 		uint16_t pdu_len = get_pdu_len(lechan, pdu);
913 
914 		LOG_DBG("Adding L2CAP PDU header: buf %p chan %p len %zu / %zu",
915 			pdu, lechan, pdu_len, pdu->len);
916 
917 		LOG_HEXDUMP_DBG(pdu->data, pdu->len, "PDU payload");
918 
919 		hdr = net_buf_push(pdu, sizeof(*hdr));
920 		hdr->len = sys_cpu_to_le16(pdu_len);
921 		hdr->cid = sys_cpu_to_le16(lechan->tx.cid);
922 
923 		lechan->_pdu_remaining = pdu_len + sizeof(*hdr);
924 		chan_take_credit(lechan);
925 	}
926 
927 	/* Whether the data to be pulled is the last ACL fragment */
928 	bool last_frag = amount >= lechan->_pdu_remaining;
929 
930 	/* Whether the data to be pulled is part of the last L2CAP segment. For
931 	 * static channels, this variable will always be true, even though
932 	 * static channels don't have the concept of L2CAP segments.
933 	 */
934 	bool last_seg = lechan->_pdu_remaining == pdu->len;
935 
936 	if (last_frag && last_seg) {
937 		LOG_DBG("last frag of last seg, dequeuing %p", pdu);
938 		__maybe_unused struct net_buf *b = k_fifo_get(&lechan->tx_queue, K_NO_WAIT);
939 
940 		__ASSERT_NO_MSG(b == pdu);
941 	}
942 
943 	if (last_frag && L2CAP_LE_CID_IS_DYN(lechan->tx.cid)) {
944 		bool sdu_end = last_frag && last_seg;
945 
946 		LOG_DBG("adding %s callback", sdu_end ? "`sdu_sent`" : "NULL");
947 		/* No user callbacks for SDUs */
948 		make_closure(pdu->user_data,
949 			     sdu_end ? l2cap_chan_sdu_sent : NULL,
950 			     sdu_end ? UINT_TO_POINTER(lechan->tx.cid) : NULL);
951 	}
952 
953 	if (last_frag) {
954 		LOG_DBG("done sending PDU");
955 
956 		/* Lowering the "request to send" and raising it again allows
957 		 * fair scheduling of channels on an ACL link: the channel is
958 		 * marked as "ready to send" by adding a reference to it on a
959 		 * FIFO on `conn`. Adding it again will send it to the back of
960 		 * the queue.
961 		 *
962 		 * TODO: add a user-controlled QoS function.
963 		 */
964 		LOG_DBG("chan %p done", lechan);
965 		lower_data_ready(lechan);
966 
967 		/* Append channel to list if it still has data */
968 		if (chan_has_data(lechan)) {
969 			LOG_DBG("chan %p ready", lechan);
970 			raise_data_ready(lechan);
971 		}
972 	}
973 
974 	/* This is used by `conn.c` to figure out if the PDU is done sending. */
975 	*length = lechan->_pdu_remaining;
976 
977 	if (lechan->_pdu_remaining > amount) {
978 		lechan->_pdu_remaining -= amount;
979 	} else {
980 		lechan->_pdu_remaining = 0;
981 	}
982 
983 	return pdu;
984 }
985 
l2cap_send_reject(struct bt_conn * conn,uint8_t ident,uint16_t reason,void * data,uint8_t data_len)986 static void l2cap_send_reject(struct bt_conn *conn, uint8_t ident,
987 			      uint16_t reason, void *data, uint8_t data_len)
988 {
989 	struct bt_l2cap_cmd_reject *rej;
990 	struct net_buf *buf;
991 
992 	buf = l2cap_create_le_sig_pdu(BT_L2CAP_CMD_REJECT, ident,
993 				      sizeof(*rej) + data_len);
994 	if (!buf) {
995 		return;
996 	}
997 
998 	rej = net_buf_add(buf, sizeof(*rej));
999 	rej->reason = sys_cpu_to_le16(reason);
1000 
1001 	if (data) {
1002 		net_buf_add_mem(buf, data, data_len);
1003 	}
1004 
1005 	l2cap_send_sig(conn, buf);
1006 }
1007 
le_conn_param_rsp(struct bt_l2cap * l2cap,struct net_buf * buf)1008 static void le_conn_param_rsp(struct bt_l2cap *l2cap, struct net_buf *buf)
1009 {
1010 	struct bt_l2cap_conn_param_rsp *rsp = (void *)buf->data;
1011 
1012 	if (buf->len < sizeof(*rsp)) {
1013 		LOG_ERR("Too small LE conn param rsp");
1014 		return;
1015 	}
1016 
1017 	LOG_DBG("LE conn param rsp result %u", sys_le16_to_cpu(rsp->result));
1018 }
1019 
le_conn_param_update_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1020 static void le_conn_param_update_req(struct bt_l2cap *l2cap, uint8_t ident,
1021 				     struct net_buf *buf)
1022 {
1023 	struct bt_conn *conn = l2cap->chan.chan.conn;
1024 	struct bt_le_conn_param param;
1025 	struct bt_l2cap_conn_param_rsp *rsp;
1026 	struct bt_l2cap_conn_param_req *req = (void *)buf->data;
1027 	bool accepted;
1028 
1029 	if (buf->len < sizeof(*req)) {
1030 		LOG_ERR("Too small LE conn update param req");
1031 		return;
1032 	}
1033 
1034 	if (conn->state != BT_CONN_CONNECTED) {
1035 		LOG_WRN("Not connected");
1036 		return;
1037 	}
1038 
1039 	if (conn->role != BT_HCI_ROLE_CENTRAL) {
1040 		l2cap_send_reject(conn, ident, BT_L2CAP_REJ_NOT_UNDERSTOOD,
1041 				  NULL, 0);
1042 		return;
1043 	}
1044 
1045 	param.interval_min = sys_le16_to_cpu(req->min_interval);
1046 	param.interval_max = sys_le16_to_cpu(req->max_interval);
1047 	param.latency = sys_le16_to_cpu(req->latency);
1048 	param.timeout = sys_le16_to_cpu(req->timeout);
1049 
1050 	LOG_DBG("min 0x%04x max 0x%04x latency: 0x%04x timeout: 0x%04x", param.interval_min,
1051 		param.interval_max, param.latency, param.timeout);
1052 
1053 	buf = l2cap_create_le_sig_pdu(BT_L2CAP_CONN_PARAM_RSP, ident,
1054 				      sizeof(*rsp));
1055 	if (!buf) {
1056 		return;
1057 	}
1058 
1059 	accepted = le_param_req(conn, &param);
1060 
1061 	rsp = net_buf_add(buf, sizeof(*rsp));
1062 	if (accepted) {
1063 		rsp->result = sys_cpu_to_le16(BT_L2CAP_CONN_PARAM_ACCEPTED);
1064 	} else {
1065 		rsp->result = sys_cpu_to_le16(BT_L2CAP_CONN_PARAM_REJECTED);
1066 	}
1067 
1068 	l2cap_send_sig(conn, buf);
1069 
1070 	if (accepted) {
1071 		bt_conn_le_conn_update(conn, &param);
1072 	}
1073 }
1074 
bt_l2cap_le_lookup_tx_cid(struct bt_conn * conn,uint16_t cid)1075 struct bt_l2cap_chan *bt_l2cap_le_lookup_tx_cid(struct bt_conn *conn,
1076 						uint16_t cid)
1077 {
1078 	struct bt_l2cap_chan *chan;
1079 
1080 	SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
1081 		if (BT_L2CAP_LE_CHAN(chan)->tx.cid == cid) {
1082 			return chan;
1083 		}
1084 	}
1085 
1086 	return NULL;
1087 }
1088 
bt_l2cap_le_lookup_rx_cid(struct bt_conn * conn,uint16_t cid)1089 struct bt_l2cap_chan *bt_l2cap_le_lookup_rx_cid(struct bt_conn *conn,
1090 						uint16_t cid)
1091 {
1092 	struct bt_l2cap_chan *chan;
1093 
1094 	SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
1095 		if (BT_L2CAP_LE_CHAN(chan)->rx.cid == cid) {
1096 			return chan;
1097 		}
1098 	}
1099 
1100 	return NULL;
1101 }
1102 
1103 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
bt_l2cap_server_lookup_psm(uint16_t psm)1104 struct bt_l2cap_server *bt_l2cap_server_lookup_psm(uint16_t psm)
1105 {
1106 	struct bt_l2cap_server *server;
1107 
1108 	SYS_SLIST_FOR_EACH_CONTAINER(&servers, server, node) {
1109 		if (server->psm == psm) {
1110 			return server;
1111 		}
1112 	}
1113 
1114 	return NULL;
1115 }
1116 
bt_l2cap_server_register(struct bt_l2cap_server * server)1117 int bt_l2cap_server_register(struct bt_l2cap_server *server)
1118 {
1119 	if (!server->accept) {
1120 		return -EINVAL;
1121 	}
1122 
1123 	if (server->psm) {
1124 		if (server->psm < L2CAP_LE_PSM_FIXED_START ||
1125 		    server->psm > L2CAP_LE_PSM_DYN_END) {
1126 			return -EINVAL;
1127 		}
1128 
1129 		/* Check if given PSM is already in use */
1130 		if (bt_l2cap_server_lookup_psm(server->psm)) {
1131 			LOG_DBG("PSM already registered");
1132 			return -EADDRINUSE;
1133 		}
1134 	} else {
1135 		uint16_t psm;
1136 
1137 		for (psm = L2CAP_LE_PSM_DYN_START;
1138 		     psm <= L2CAP_LE_PSM_DYN_END; psm++) {
1139 			if (!bt_l2cap_server_lookup_psm(psm)) {
1140 				break;
1141 			}
1142 		}
1143 
1144 		if (psm > L2CAP_LE_PSM_DYN_END) {
1145 			LOG_WRN("No free dynamic PSMs available");
1146 			return -EADDRNOTAVAIL;
1147 		}
1148 
1149 		LOG_DBG("Allocated PSM 0x%04x for new server", psm);
1150 		server->psm = psm;
1151 	}
1152 
1153 	if (server->sec_level > BT_SECURITY_L4) {
1154 		return -EINVAL;
1155 	} else if (server->sec_level < BT_SECURITY_L1) {
1156 		/* Level 0 is only applicable for BR/EDR */
1157 		server->sec_level = BT_SECURITY_L1;
1158 	}
1159 
1160 	LOG_DBG("PSM 0x%04x", server->psm);
1161 
1162 	sys_slist_append(&servers, &server->node);
1163 
1164 	return 0;
1165 }
1166 
1167 #if defined(CONFIG_BT_L2CAP_SEG_RECV)
l2cap_chan_seg_recv_rx_init(struct bt_l2cap_le_chan * chan)1168 static void l2cap_chan_seg_recv_rx_init(struct bt_l2cap_le_chan *chan)
1169 {
1170 	if (chan->rx.mps > BT_L2CAP_RX_MTU) {
1171 		LOG_ERR("Limiting RX MPS by stack buffer size.");
1172 		chan->rx.mps = BT_L2CAP_RX_MTU;
1173 	}
1174 
1175 	chan->_sdu_len = 0;
1176 	chan->_sdu_len_done = 0;
1177 }
1178 #endif /* CONFIG_BT_L2CAP_SEG_RECV */
1179 
l2cap_chan_rx_init(struct bt_l2cap_le_chan * chan)1180 static void l2cap_chan_rx_init(struct bt_l2cap_le_chan *chan)
1181 {
1182 	LOG_DBG("chan %p", chan);
1183 
1184 	/* Redirect to experimental API. */
1185 	IF_ENABLED(CONFIG_BT_L2CAP_SEG_RECV, ({
1186 		if (chan->chan.ops->seg_recv) {
1187 			l2cap_chan_seg_recv_rx_init(chan);
1188 			return;
1189 		}
1190 	}))
1191 
1192 	/* Use existing MTU if defined */
1193 	if (!chan->rx.mtu) {
1194 		/* If application has not provide the incoming L2CAP SDU MTU use
1195 		 * an MTU that does not require segmentation.
1196 		 */
1197 		chan->rx.mtu = BT_L2CAP_SDU_RX_MTU;
1198 	}
1199 
1200 	/* MPS shall not be bigger than MTU + BT_L2CAP_SDU_HDR_SIZE as the
1201 	 * remaining bytes cannot be used.
1202 	 */
1203 	chan->rx.mps = MIN(chan->rx.mtu + BT_L2CAP_SDU_HDR_SIZE,
1204 			   BT_L2CAP_RX_MTU);
1205 
1206 	/* Truncate MTU if channel have disabled segmentation but still have
1207 	 * set an MTU which requires it.
1208 	 */
1209 	if (!chan->chan.ops->alloc_buf &&
1210 	    (chan->rx.mps < chan->rx.mtu + BT_L2CAP_SDU_HDR_SIZE)) {
1211 		LOG_WRN("Segmentation disabled but MTU > MPS, truncating MTU");
1212 		chan->rx.mtu = chan->rx.mps - BT_L2CAP_SDU_HDR_SIZE;
1213 	}
1214 
1215 	atomic_set(&chan->rx.credits, 1);
1216 }
1217 
1218 /** @brief Get @c chan->state.
1219  *
1220  * This field does not exist when @kconfig{CONFIG_BT_L2CAP_DYNAMIC_CHANNEL} is
1221  * disabled. In that case, this function returns @ref BT_L2CAP_CONNECTED since
1222  * the struct can only represent static channels in that case and static
1223  * channels are always connected.
1224  */
bt_l2cap_chan_get_state(struct bt_l2cap_chan * chan)1225 static bt_l2cap_chan_state_t bt_l2cap_chan_get_state(struct bt_l2cap_chan *chan)
1226 {
1227 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
1228 	return BT_L2CAP_LE_CHAN(chan)->state;
1229 #else
1230 	return BT_L2CAP_CONNECTED;
1231 #endif
1232 }
1233 
l2cap_chan_tx_init(struct bt_l2cap_le_chan * chan)1234 static void l2cap_chan_tx_init(struct bt_l2cap_le_chan *chan)
1235 {
1236 	LOG_DBG("chan %p", chan);
1237 
1238 	(void)memset(&chan->tx, 0, sizeof(chan->tx));
1239 	atomic_set(&chan->tx.credits, 0);
1240 	k_fifo_init(&chan->tx_queue);
1241 }
1242 
l2cap_chan_tx_give_credits(struct bt_l2cap_le_chan * chan,uint16_t credits)1243 static void l2cap_chan_tx_give_credits(struct bt_l2cap_le_chan *chan,
1244 				       uint16_t credits)
1245 {
1246 	LOG_DBG("chan %p credits %u", chan, credits);
1247 
1248 	atomic_add(&chan->tx.credits, credits);
1249 
1250 	if (!atomic_test_and_set_bit(chan->chan.status, BT_L2CAP_STATUS_OUT)) {
1251 		LOG_DBG("chan %p unpaused", chan);
1252 		if (chan->chan.ops->status) {
1253 			chan->chan.ops->status(&chan->chan, chan->chan.status);
1254 		}
1255 		if (chan_has_data(chan)) {
1256 			raise_data_ready(chan);
1257 		}
1258 	}
1259 }
1260 
l2cap_chan_destroy(struct bt_l2cap_chan * chan)1261 static void l2cap_chan_destroy(struct bt_l2cap_chan *chan)
1262 {
1263 	struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
1264 	struct net_buf *buf;
1265 
1266 	LOG_DBG("chan %p cid 0x%04x", le_chan, le_chan->rx.cid);
1267 
1268 	/* Cancel ongoing work. Since the channel can be re-used after this
1269 	 * we need to sync to make sure that the kernel does not have it
1270 	 * in its queue anymore.
1271 	 *
1272 	 * In the case where we are in the context of executing the rtx_work
1273 	 * item, we don't sync as it will deadlock the workqueue.
1274 	 */
1275 	struct k_work_q *rtx_work_queue = le_chan->rtx_work.queue;
1276 
1277 	if (rtx_work_queue == NULL || k_current_get() != &rtx_work_queue->thread) {
1278 		k_work_cancel_delayable_sync(&le_chan->rtx_work, &le_chan->rtx_sync);
1279 	} else {
1280 		k_work_cancel_delayable(&le_chan->rtx_work);
1281 	}
1282 
1283 	/* Remove buffers on the SDU RX queue */
1284 	while ((buf = net_buf_get(&le_chan->rx_queue, K_NO_WAIT))) {
1285 		net_buf_unref(buf);
1286 	}
1287 
1288 	/* Destroy segmented SDU if it exists */
1289 	if (le_chan->_sdu) {
1290 		net_buf_unref(le_chan->_sdu);
1291 		le_chan->_sdu = NULL;
1292 		le_chan->_sdu_len = 0U;
1293 	}
1294 }
1295 
le_err_to_result(int err)1296 static uint16_t le_err_to_result(int err)
1297 {
1298 	switch (err) {
1299 	case -ENOMEM:
1300 		return BT_L2CAP_LE_ERR_NO_RESOURCES;
1301 	case -EACCES:
1302 		return BT_L2CAP_LE_ERR_AUTHORIZATION;
1303 	case -EPERM:
1304 		return BT_L2CAP_LE_ERR_KEY_SIZE;
1305 	case -ENOTSUP:
1306 		/* This handle the cases where a fixed channel is registered but
1307 		 * for some reason (e.g. controller not supporting a feature)
1308 		 * cannot be used.
1309 		 */
1310 		return BT_L2CAP_LE_ERR_PSM_NOT_SUPP;
1311 	default:
1312 		return BT_L2CAP_LE_ERR_UNACCEPT_PARAMS;
1313 	}
1314 }
1315 
l2cap_chan_accept(struct bt_conn * conn,struct bt_l2cap_server * server,uint16_t scid,uint16_t mtu,uint16_t mps,uint16_t credits,struct bt_l2cap_chan ** chan)1316 static uint16_t l2cap_chan_accept(struct bt_conn *conn,
1317 			       struct bt_l2cap_server *server, uint16_t scid,
1318 			       uint16_t mtu, uint16_t mps, uint16_t credits,
1319 			       struct bt_l2cap_chan **chan)
1320 {
1321 	struct bt_l2cap_le_chan *le_chan;
1322 	int err;
1323 
1324 	LOG_DBG("conn %p scid 0x%04x chan %p", conn, scid, chan);
1325 
1326 	if (!L2CAP_LE_CID_IS_DYN(scid)) {
1327 		return BT_L2CAP_LE_ERR_INVALID_SCID;
1328 	}
1329 
1330 	*chan = bt_l2cap_le_lookup_tx_cid(conn, scid);
1331 	if (*chan) {
1332 		return BT_L2CAP_LE_ERR_SCID_IN_USE;
1333 	}
1334 
1335 	/* Request server to accept the new connection and allocate the
1336 	 * channel.
1337 	 */
1338 	err = server->accept(conn, server, chan);
1339 	if (err < 0) {
1340 		return le_err_to_result(err);
1341 	}
1342 
1343 #if defined(CONFIG_BT_L2CAP_SEG_RECV)
1344 	if (!(*chan)->ops->recv == !(*chan)->ops->seg_recv) {
1345 		LOG_ERR("Exactly one of 'recv' or 'seg_recv' must be set");
1346 		return BT_L2CAP_LE_ERR_UNACCEPT_PARAMS;
1347 	}
1348 #else
1349 	if (!(*chan)->ops->recv) {
1350 		LOG_ERR("Mandatory callback 'recv' missing");
1351 		return BT_L2CAP_LE_ERR_UNACCEPT_PARAMS;
1352 	}
1353 #endif
1354 
1355 	le_chan = BT_L2CAP_LE_CHAN(*chan);
1356 
1357 	le_chan->required_sec_level = server->sec_level;
1358 
1359 	if (!l2cap_chan_add(conn, *chan, l2cap_chan_destroy)) {
1360 		return BT_L2CAP_LE_ERR_NO_RESOURCES;
1361 	}
1362 
1363 	/* Init TX parameters */
1364 	l2cap_chan_tx_init(le_chan);
1365 	le_chan->tx.cid = scid;
1366 	le_chan->tx.mps = mps;
1367 	le_chan->tx.mtu = mtu;
1368 	l2cap_chan_tx_give_credits(le_chan, credits);
1369 
1370 	/* Init RX parameters */
1371 	l2cap_chan_rx_init(le_chan);
1372 
1373 	/* Set channel PSM */
1374 	le_chan->psm = server->psm;
1375 
1376 	/* Update state */
1377 	bt_l2cap_chan_set_state(*chan, BT_L2CAP_CONNECTED);
1378 
1379 	return BT_L2CAP_LE_SUCCESS;
1380 }
1381 
l2cap_check_security(struct bt_conn * conn,struct bt_l2cap_server * server)1382 static uint16_t l2cap_check_security(struct bt_conn *conn,
1383 				 struct bt_l2cap_server *server)
1384 {
1385 	if (IS_ENABLED(CONFIG_BT_CONN_DISABLE_SECURITY)) {
1386 		return BT_L2CAP_LE_SUCCESS;
1387 	}
1388 
1389 	if (conn->sec_level >= server->sec_level) {
1390 		return BT_L2CAP_LE_SUCCESS;
1391 	}
1392 
1393 	if (conn->sec_level > BT_SECURITY_L1) {
1394 		return BT_L2CAP_LE_ERR_AUTHENTICATION;
1395 	}
1396 
1397 	/* If an LTK or an STK is available and encryption is required
1398 	 * (LE security mode 1) but encryption is not enabled, the
1399 	 * service request shall be rejected with the error code
1400 	 * "Insufficient Encryption".
1401 	 */
1402 	if (bt_conn_ltk_present(conn)) {
1403 		return BT_L2CAP_LE_ERR_ENCRYPTION;
1404 	}
1405 
1406 	return BT_L2CAP_LE_ERR_AUTHENTICATION;
1407 }
1408 
le_conn_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1409 static void le_conn_req(struct bt_l2cap *l2cap, uint8_t ident,
1410 			struct net_buf *buf)
1411 {
1412 	struct bt_conn *conn = l2cap->chan.chan.conn;
1413 	struct bt_l2cap_chan *chan;
1414 	struct bt_l2cap_le_chan *le_chan;
1415 	struct bt_l2cap_server *server;
1416 	struct bt_l2cap_le_conn_req *req = (void *)buf->data;
1417 	struct bt_l2cap_le_conn_rsp *rsp;
1418 	uint16_t psm, scid, mtu, mps, credits;
1419 	uint16_t result;
1420 
1421 	if (buf->len < sizeof(*req)) {
1422 		LOG_ERR("Too small LE conn req packet size");
1423 		return;
1424 	}
1425 
1426 	psm = sys_le16_to_cpu(req->psm);
1427 	scid = sys_le16_to_cpu(req->scid);
1428 	mtu = sys_le16_to_cpu(req->mtu);
1429 	mps = sys_le16_to_cpu(req->mps);
1430 	credits = sys_le16_to_cpu(req->credits);
1431 
1432 	LOG_DBG("psm 0x%02x scid 0x%04x mtu %u mps %u credits %u", psm, scid, mtu, mps, credits);
1433 
1434 	if (mtu < L2CAP_LE_MIN_MTU || mps < L2CAP_LE_MIN_MTU) {
1435 		LOG_ERR("Invalid LE-Conn Req params: mtu %u mps %u", mtu, mps);
1436 		return;
1437 	}
1438 
1439 	buf = l2cap_create_le_sig_pdu(BT_L2CAP_LE_CONN_RSP, ident,
1440 				      sizeof(*rsp));
1441 	if (!buf) {
1442 		return;
1443 	}
1444 
1445 	rsp = net_buf_add(buf, sizeof(*rsp));
1446 	(void)memset(rsp, 0, sizeof(*rsp));
1447 
1448 	/* Check if there is a server registered */
1449 	server = bt_l2cap_server_lookup_psm(psm);
1450 	if (!server) {
1451 		result = BT_L2CAP_LE_ERR_PSM_NOT_SUPP;
1452 		goto rsp;
1453 	}
1454 
1455 	/* Check if connection has minimum required security level */
1456 	result = l2cap_check_security(conn, server);
1457 	if (result != BT_L2CAP_LE_SUCCESS) {
1458 		goto rsp;
1459 	}
1460 
1461 	result = l2cap_chan_accept(conn, server, scid, mtu, mps, credits,
1462 				   &chan);
1463 	if (result != BT_L2CAP_LE_SUCCESS) {
1464 		goto rsp;
1465 	}
1466 
1467 	le_chan = BT_L2CAP_LE_CHAN(chan);
1468 
1469 	/* Prepare response protocol data */
1470 	rsp->dcid = sys_cpu_to_le16(le_chan->rx.cid);
1471 	rsp->mps = sys_cpu_to_le16(le_chan->rx.mps);
1472 	rsp->mtu = sys_cpu_to_le16(le_chan->rx.mtu);
1473 	rsp->credits = sys_cpu_to_le16(le_chan->rx.credits);
1474 
1475 	result = BT_L2CAP_LE_SUCCESS;
1476 
1477 rsp:
1478 	rsp->result = sys_cpu_to_le16(result);
1479 
1480 	if (l2cap_send_sig(conn, buf)) {
1481 		return;
1482 	}
1483 
1484 	/* Raise connected callback on success */
1485 	if ((result == BT_L2CAP_LE_SUCCESS) && (chan->ops->connected != NULL)) {
1486 		chan->ops->connected(chan);
1487 	}
1488 }
1489 
1490 #if defined(CONFIG_BT_L2CAP_ECRED)
le_ecred_conn_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1491 static void le_ecred_conn_req(struct bt_l2cap *l2cap, uint8_t ident,
1492 			      struct net_buf *buf)
1493 {
1494 	struct bt_conn *conn = l2cap->chan.chan.conn;
1495 	struct bt_l2cap_chan *chan[L2CAP_ECRED_CHAN_MAX_PER_REQ];
1496 	struct bt_l2cap_le_chan *ch = NULL;
1497 	struct bt_l2cap_server *server;
1498 	struct bt_l2cap_ecred_conn_req *req;
1499 	struct bt_l2cap_ecred_conn_rsp *rsp;
1500 	uint16_t mtu, mps, credits, result = BT_L2CAP_LE_SUCCESS;
1501 	uint16_t psm = 0x0000;
1502 	uint16_t scid, dcid[L2CAP_ECRED_CHAN_MAX_PER_REQ];
1503 	int i = 0;
1504 	uint8_t req_cid_count;
1505 	bool rsp_queued = false;
1506 
1507 	/* set dcid to zeros here, in case of all connections refused error */
1508 	memset(dcid, 0, sizeof(dcid));
1509 	if (buf->len < sizeof(*req)) {
1510 		LOG_ERR("Too small LE conn req packet size");
1511 		result = BT_L2CAP_LE_ERR_INVALID_PARAMS;
1512 		req_cid_count = 0;
1513 		goto response;
1514 	}
1515 
1516 	req = net_buf_pull_mem(buf, sizeof(*req));
1517 	req_cid_count = buf->len / sizeof(scid);
1518 
1519 	if (buf->len > sizeof(dcid)) {
1520 		LOG_ERR("Too large LE conn req packet size");
1521 		req_cid_count = L2CAP_ECRED_CHAN_MAX_PER_REQ;
1522 		result = BT_L2CAP_LE_ERR_INVALID_PARAMS;
1523 		goto response;
1524 	}
1525 
1526 	psm = sys_le16_to_cpu(req->psm);
1527 	mtu = sys_le16_to_cpu(req->mtu);
1528 	mps = sys_le16_to_cpu(req->mps);
1529 	credits = sys_le16_to_cpu(req->credits);
1530 
1531 	LOG_DBG("psm 0x%02x mtu %u mps %u credits %u", psm, mtu, mps, credits);
1532 
1533 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MTU) {
1534 		LOG_ERR("Invalid ecred conn req params. mtu %u mps %u", mtu, mps);
1535 		result = BT_L2CAP_LE_ERR_INVALID_PARAMS;
1536 		goto response;
1537 	}
1538 
1539 	/* Check if there is a server registered */
1540 	server = bt_l2cap_server_lookup_psm(psm);
1541 	if (!server) {
1542 		result = BT_L2CAP_LE_ERR_PSM_NOT_SUPP;
1543 		goto response;
1544 	}
1545 
1546 	/* Check if connection has minimum required security level */
1547 	result = l2cap_check_security(conn, server);
1548 	if (result != BT_L2CAP_LE_SUCCESS) {
1549 		goto response;
1550 	}
1551 
1552 	while (buf->len >= sizeof(scid)) {
1553 		uint16_t rc;
1554 		scid = net_buf_pull_le16(buf);
1555 
1556 		rc = l2cap_chan_accept(conn, server, scid, mtu, mps,
1557 				credits, &chan[i]);
1558 		if (rc != BT_L2CAP_LE_SUCCESS) {
1559 			result = rc;
1560 		}
1561 		switch (rc) {
1562 		case BT_L2CAP_LE_SUCCESS:
1563 			ch = BT_L2CAP_LE_CHAN(chan[i]);
1564 			dcid[i++] = sys_cpu_to_le16(ch->rx.cid);
1565 			continue;
1566 		/* Some connections refused – invalid Source CID */
1567 		/* Some connections refused – Source CID already allocated */
1568 		/* Some connections refused – not enough resources
1569 		 * available.
1570 		 */
1571 		default:
1572 			/* If a Destination CID is 0x0000, the channel was not
1573 			 * established.
1574 			 */
1575 			dcid[i++] = 0x0000;
1576 			continue;
1577 		}
1578 	}
1579 
1580 response:
1581 	buf = l2cap_create_le_sig_pdu(BT_L2CAP_ECRED_CONN_RSP, ident,
1582 				      sizeof(*rsp) +
1583 				      (sizeof(scid) * req_cid_count));
1584 	if (!buf) {
1585 		goto callback;
1586 	}
1587 
1588 	rsp = net_buf_add(buf, sizeof(*rsp));
1589 	(void)memset(rsp, 0, sizeof(*rsp));
1590 	if (ch) {
1591 		rsp->mps = sys_cpu_to_le16(ch->rx.mps);
1592 		rsp->mtu = sys_cpu_to_le16(ch->rx.mtu);
1593 		rsp->credits = sys_cpu_to_le16(ch->rx.credits);
1594 	}
1595 	rsp->result = sys_cpu_to_le16(result);
1596 
1597 	net_buf_add_mem(buf, dcid, sizeof(scid) * req_cid_count);
1598 
1599 	if (l2cap_send_sig(conn, buf)) {
1600 		goto callback;
1601 	}
1602 
1603 	rsp_queued = true;
1604 
1605 callback:
1606 	if (ecred_cb && ecred_cb->ecred_conn_req) {
1607 		ecred_cb->ecred_conn_req(conn, result, psm);
1608 	}
1609 	if (rsp_queued) {
1610 		for (i = 0; i < req_cid_count; i++) {
1611 			/* Raise connected callback for established channels */
1612 			if ((dcid[i] != 0x00) && (chan[i]->ops->connected != NULL)) {
1613 				chan[i]->ops->connected(chan[i]);
1614 			}
1615 		}
1616 	}
1617 }
1618 
le_ecred_reconf_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1619 static void le_ecred_reconf_req(struct bt_l2cap *l2cap, uint8_t ident,
1620 				struct net_buf *buf)
1621 {
1622 	struct bt_conn *conn = l2cap->chan.chan.conn;
1623 	struct bt_l2cap_chan *chans[L2CAP_ECRED_CHAN_MAX_PER_REQ];
1624 	struct bt_l2cap_ecred_reconf_req *req;
1625 	struct bt_l2cap_ecred_reconf_rsp *rsp;
1626 	uint16_t mtu, mps;
1627 	uint16_t scid, result = BT_L2CAP_RECONF_SUCCESS;
1628 	int chan_count = 0;
1629 	bool mps_reduced = false;
1630 
1631 	if (buf->len < sizeof(*req)) {
1632 		LOG_ERR("Too small ecred reconf req packet size");
1633 		return;
1634 	}
1635 
1636 	req = net_buf_pull_mem(buf, sizeof(*req));
1637 
1638 	mtu = sys_le16_to_cpu(req->mtu);
1639 	mps = sys_le16_to_cpu(req->mps);
1640 
1641 	if (mps < L2CAP_ECRED_MIN_MTU) {
1642 		result = BT_L2CAP_RECONF_OTHER_UNACCEPT;
1643 		goto response;
1644 	}
1645 
1646 	if (mtu < L2CAP_ECRED_MIN_MTU) {
1647 		result = BT_L2CAP_RECONF_INVALID_MTU;
1648 		goto response;
1649 	}
1650 
1651 	/* The specification only allows up to 5 CIDs in this packet */
1652 	if (buf->len > (L2CAP_ECRED_CHAN_MAX_PER_REQ * sizeof(scid))) {
1653 		result = BT_L2CAP_RECONF_OTHER_UNACCEPT;
1654 		goto response;
1655 	}
1656 
1657 	while (buf->len >= sizeof(scid)) {
1658 		struct bt_l2cap_chan *chan;
1659 		scid = net_buf_pull_le16(buf);
1660 		chan = bt_l2cap_le_lookup_tx_cid(conn, scid);
1661 		if (!chan) {
1662 			result = BT_L2CAP_RECONF_INVALID_CID;
1663 			goto response;
1664 		}
1665 
1666 		if (BT_L2CAP_LE_CHAN(chan)->tx.mtu > mtu) {
1667 			LOG_ERR("chan %p decreased MTU %u -> %u", chan,
1668 				BT_L2CAP_LE_CHAN(chan)->tx.mtu, mtu);
1669 			result = BT_L2CAP_RECONF_INVALID_MTU;
1670 			goto response;
1671 		}
1672 
1673 		if (BT_L2CAP_LE_CHAN(chan)->tx.mps > mps) {
1674 			mps_reduced = true;
1675 		}
1676 
1677 		chans[chan_count] = chan;
1678 		chan_count++;
1679 	}
1680 
1681 	/* As per BT Core Spec V5.2 Vol. 3, Part A, section 7.11
1682 	 * The request (...) shall not decrease the MPS of a channel
1683 	 * if more than one channel is specified.
1684 	 */
1685 	if (mps_reduced && chan_count > 1) {
1686 		result = BT_L2CAP_RECONF_INVALID_MPS;
1687 		goto response;
1688 	}
1689 
1690 	for (int i = 0; i < chan_count; i++) {
1691 		BT_L2CAP_LE_CHAN(chans[i])->tx.mtu = mtu;
1692 		BT_L2CAP_LE_CHAN(chans[i])->tx.mps = mps;
1693 
1694 		if (chans[i]->ops->reconfigured) {
1695 			chans[i]->ops->reconfigured(chans[i]);
1696 		}
1697 	}
1698 
1699 	LOG_DBG("mtu %u mps %u", mtu, mps);
1700 
1701 response:
1702 	buf = l2cap_create_le_sig_pdu(BT_L2CAP_ECRED_RECONF_RSP, ident,
1703 				      sizeof(*rsp));
1704 	if (!buf) {
1705 		return;
1706 	}
1707 
1708 	rsp = net_buf_add(buf, sizeof(*rsp));
1709 	rsp->result = sys_cpu_to_le16(result);
1710 
1711 	l2cap_send_sig(conn, buf);
1712 }
1713 
le_ecred_reconf_rsp(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1714 static void le_ecred_reconf_rsp(struct bt_l2cap *l2cap, uint8_t ident,
1715 				struct net_buf *buf)
1716 {
1717 	struct bt_conn *conn = l2cap->chan.chan.conn;
1718 	struct bt_l2cap_ecred_reconf_rsp *rsp;
1719 	struct bt_l2cap_le_chan *ch;
1720 	uint16_t result;
1721 
1722 	if (buf->len < sizeof(*rsp)) {
1723 		LOG_ERR("Too small ecred reconf rsp packet size");
1724 		return;
1725 	}
1726 
1727 	rsp = net_buf_pull_mem(buf, sizeof(*rsp));
1728 	result = sys_le16_to_cpu(rsp->result);
1729 
1730 	while ((ch = l2cap_lookup_ident(conn, ident))) {
1731 		/* Stop timer started on REQ send. The timer is only set on one
1732 		 * of the channels, but we don't want to make assumptions on
1733 		 * which one it is.
1734 		 */
1735 		k_work_cancel_delayable(&ch->rtx_work);
1736 
1737 		if (result == BT_L2CAP_LE_SUCCESS) {
1738 			ch->rx.mtu = ch->pending_rx_mtu;
1739 		}
1740 
1741 		ch->pending_rx_mtu = 0;
1742 		ch->ident = 0U;
1743 
1744 		if (ch->chan.ops->reconfigured) {
1745 			ch->chan.ops->reconfigured(&ch->chan);
1746 		}
1747 	}
1748 }
1749 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
1750 
l2cap_remove_rx_cid(struct bt_conn * conn,uint16_t cid)1751 static struct bt_l2cap_le_chan *l2cap_remove_rx_cid(struct bt_conn *conn,
1752 						    uint16_t cid)
1753 {
1754 	struct bt_l2cap_chan *chan;
1755 	sys_snode_t *prev = NULL;
1756 
1757 	/* Protect fixed channels against accidental removal */
1758 	if (!L2CAP_LE_CID_IS_DYN(cid)) {
1759 		return NULL;
1760 	}
1761 
1762 	SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
1763 		if (BT_L2CAP_LE_CHAN(chan)->rx.cid == cid) {
1764 			sys_slist_remove(&conn->channels, prev, &chan->node);
1765 			return BT_L2CAP_LE_CHAN(chan);
1766 		}
1767 
1768 		prev = &chan->node;
1769 	}
1770 
1771 	return NULL;
1772 }
1773 
le_disconn_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1774 static void le_disconn_req(struct bt_l2cap *l2cap, uint8_t ident,
1775 			   struct net_buf *buf)
1776 {
1777 	struct bt_conn *conn = l2cap->chan.chan.conn;
1778 	struct bt_l2cap_le_chan *chan;
1779 	struct bt_l2cap_disconn_req *req = (void *)buf->data;
1780 	struct bt_l2cap_disconn_rsp *rsp;
1781 	uint16_t dcid;
1782 
1783 	if (buf->len < sizeof(*req)) {
1784 		LOG_ERR("Too small LE conn req packet size");
1785 		return;
1786 	}
1787 
1788 	dcid = sys_le16_to_cpu(req->dcid);
1789 
1790 	LOG_DBG("dcid 0x%04x scid 0x%04x", dcid, sys_le16_to_cpu(req->scid));
1791 
1792 	chan = l2cap_remove_rx_cid(conn, dcid);
1793 	if (!chan) {
1794 		struct bt_l2cap_cmd_reject_cid_data data;
1795 
1796 		data.scid = req->scid;
1797 		data.dcid = req->dcid;
1798 
1799 		l2cap_send_reject(conn, ident, BT_L2CAP_REJ_INVALID_CID, &data,
1800 				  sizeof(data));
1801 		return;
1802 	}
1803 
1804 	buf = l2cap_create_le_sig_pdu(BT_L2CAP_DISCONN_RSP, ident,
1805 				      sizeof(*rsp));
1806 	if (!buf) {
1807 		return;
1808 	}
1809 
1810 	rsp = net_buf_add(buf, sizeof(*rsp));
1811 	rsp->dcid = sys_cpu_to_le16(chan->rx.cid);
1812 	rsp->scid = sys_cpu_to_le16(chan->tx.cid);
1813 
1814 	bt_l2cap_chan_del(&chan->chan);
1815 
1816 	l2cap_send_sig(conn, buf);
1817 }
1818 
l2cap_change_security(struct bt_l2cap_le_chan * chan,uint16_t err)1819 static int l2cap_change_security(struct bt_l2cap_le_chan *chan, uint16_t err)
1820 {
1821 	struct bt_conn *conn = chan->chan.conn;
1822 	bt_security_t sec;
1823 	int ret;
1824 
1825 	if (atomic_test_bit(chan->chan.status,
1826 			    BT_L2CAP_STATUS_ENCRYPT_PENDING)) {
1827 		return -EINPROGRESS;
1828 	}
1829 
1830 	switch (err) {
1831 	case BT_L2CAP_LE_ERR_ENCRYPTION:
1832 		if (conn->sec_level >= BT_SECURITY_L2) {
1833 			return -EALREADY;
1834 		}
1835 
1836 		sec = BT_SECURITY_L2;
1837 		break;
1838 	case BT_L2CAP_LE_ERR_AUTHENTICATION:
1839 		if (conn->sec_level < BT_SECURITY_L2) {
1840 			sec = BT_SECURITY_L2;
1841 		} else if (conn->sec_level < BT_SECURITY_L3) {
1842 			sec = BT_SECURITY_L3;
1843 		} else if (conn->sec_level < BT_SECURITY_L4) {
1844 			sec = BT_SECURITY_L4;
1845 		} else {
1846 			return -EALREADY;
1847 		}
1848 		break;
1849 	default:
1850 		return -EINVAL;
1851 	}
1852 
1853 	ret = bt_conn_set_security(chan->chan.conn, sec);
1854 	if (ret < 0) {
1855 		return ret;
1856 	}
1857 
1858 	atomic_set_bit(chan->chan.status, BT_L2CAP_STATUS_ENCRYPT_PENDING);
1859 
1860 	return 0;
1861 }
1862 
1863 #if defined(CONFIG_BT_L2CAP_ECRED)
le_ecred_conn_rsp(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1864 static void le_ecred_conn_rsp(struct bt_l2cap *l2cap, uint8_t ident,
1865 			      struct net_buf *buf)
1866 {
1867 	struct bt_conn *conn = l2cap->chan.chan.conn;
1868 	struct bt_l2cap_le_chan *chan;
1869 	struct bt_l2cap_ecred_conn_rsp *rsp;
1870 	uint16_t dcid, mtu, mps, credits, result, psm;
1871 	uint8_t attempted = 0;
1872 	uint8_t succeeded = 0;
1873 
1874 	if (buf->len < sizeof(*rsp)) {
1875 		LOG_ERR("Too small ecred conn rsp packet size");
1876 		return;
1877 	}
1878 
1879 	rsp = net_buf_pull_mem(buf, sizeof(*rsp));
1880 	mtu = sys_le16_to_cpu(rsp->mtu);
1881 	mps = sys_le16_to_cpu(rsp->mps);
1882 	credits = sys_le16_to_cpu(rsp->credits);
1883 	result = sys_le16_to_cpu(rsp->result);
1884 
1885 	LOG_DBG("mtu 0x%04x mps 0x%04x credits 0x%04x result %u", mtu, mps, credits, result);
1886 
1887 	chan = l2cap_lookup_ident(conn, ident);
1888 	if (chan) {
1889 		psm = chan->psm;
1890 	} else {
1891 		psm = 0x0000;
1892 	}
1893 
1894 	switch (result) {
1895 	case BT_L2CAP_LE_ERR_AUTHENTICATION:
1896 	case BT_L2CAP_LE_ERR_ENCRYPTION:
1897 		while ((chan = l2cap_lookup_ident(conn, ident))) {
1898 
1899 			/* Cancel RTX work */
1900 			k_work_cancel_delayable(&chan->rtx_work);
1901 
1902 			/* If security needs changing wait it to be completed */
1903 			if (!l2cap_change_security(chan, result)) {
1904 				return;
1905 			}
1906 			bt_l2cap_chan_remove(conn, &chan->chan);
1907 			bt_l2cap_chan_del(&chan->chan);
1908 		}
1909 		break;
1910 	case BT_L2CAP_LE_SUCCESS:
1911 	/* Some connections refused – invalid Source CID */
1912 	case BT_L2CAP_LE_ERR_INVALID_SCID:
1913 	/* Some connections refused – Source CID already allocated */
1914 	case BT_L2CAP_LE_ERR_SCID_IN_USE:
1915 	/* Some connections refused – not enough resources available */
1916 	case BT_L2CAP_LE_ERR_NO_RESOURCES:
1917 		while ((chan = l2cap_lookup_ident(conn, ident))) {
1918 			struct bt_l2cap_chan *c;
1919 
1920 			/* Cancel RTX work */
1921 			k_work_cancel_delayable(&chan->rtx_work);
1922 
1923 			if (buf->len < sizeof(dcid)) {
1924 				LOG_ERR("Fewer dcid values than expected");
1925 				bt_l2cap_chan_remove(conn, &chan->chan);
1926 				bt_l2cap_chan_del(&chan->chan);
1927 				continue;
1928 			}
1929 
1930 			dcid = net_buf_pull_le16(buf);
1931 			attempted++;
1932 
1933 			LOG_DBG("dcid 0x%04x", dcid);
1934 
1935 			/* If a Destination CID is 0x0000, the channel was not
1936 			 * established.
1937 			 */
1938 			if (!dcid) {
1939 				bt_l2cap_chan_remove(conn, &chan->chan);
1940 				bt_l2cap_chan_del(&chan->chan);
1941 				continue;
1942 			}
1943 
1944 			c = bt_l2cap_le_lookup_tx_cid(conn, dcid);
1945 			if (c) {
1946 				/* If a device receives a
1947 				 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet
1948 				 * with an already assigned Destination CID,
1949 				 * then both the original channel and the new
1950 				 * channel shall be immediately discarded and
1951 				 * not used.
1952 				 */
1953 				bt_l2cap_chan_remove(conn, &chan->chan);
1954 				bt_l2cap_chan_del(&chan->chan);
1955 				bt_l2cap_chan_disconnect(c);
1956 				continue;
1957 			}
1958 
1959 			chan->tx.cid = dcid;
1960 
1961 			chan->ident = 0U;
1962 
1963 			chan->tx.mtu = mtu;
1964 			chan->tx.mps = mps;
1965 
1966 			/* Update state */
1967 			bt_l2cap_chan_set_state(&chan->chan,
1968 						BT_L2CAP_CONNECTED);
1969 
1970 			if (chan->chan.ops->connected) {
1971 				chan->chan.ops->connected(&chan->chan);
1972 			}
1973 
1974 			/* Give credits */
1975 			l2cap_chan_tx_give_credits(chan, credits);
1976 
1977 			succeeded++;
1978 		}
1979 		break;
1980 	case BT_L2CAP_LE_ERR_PSM_NOT_SUPP:
1981 	default:
1982 		while ((chan = l2cap_remove_ident(conn, ident))) {
1983 			bt_l2cap_chan_del(&chan->chan);
1984 		}
1985 		break;
1986 	}
1987 
1988 	if (ecred_cb && ecred_cb->ecred_conn_rsp) {
1989 		ecred_cb->ecred_conn_rsp(conn, result, attempted, succeeded, psm);
1990 	}
1991 }
1992 #endif /* CONFIG_BT_L2CAP_ECRED */
1993 
le_conn_rsp(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1994 static void le_conn_rsp(struct bt_l2cap *l2cap, uint8_t ident,
1995 			struct net_buf *buf)
1996 {
1997 	struct bt_conn *conn = l2cap->chan.chan.conn;
1998 	struct bt_l2cap_le_chan *chan;
1999 	struct bt_l2cap_le_conn_rsp *rsp = (void *)buf->data;
2000 	uint16_t dcid, mtu, mps, credits, result;
2001 
2002 	if (buf->len < sizeof(*rsp)) {
2003 		LOG_ERR("Too small LE conn rsp packet size");
2004 		return;
2005 	}
2006 
2007 	dcid = sys_le16_to_cpu(rsp->dcid);
2008 	mtu = sys_le16_to_cpu(rsp->mtu);
2009 	mps = sys_le16_to_cpu(rsp->mps);
2010 	credits = sys_le16_to_cpu(rsp->credits);
2011 	result = sys_le16_to_cpu(rsp->result);
2012 
2013 	LOG_DBG("dcid 0x%04x mtu %u mps %u credits %u result 0x%04x", dcid, mtu, mps, credits,
2014 		result);
2015 
2016 	/* Keep the channel in case of security errors */
2017 	if (result == BT_L2CAP_LE_SUCCESS ||
2018 	    result == BT_L2CAP_LE_ERR_AUTHENTICATION ||
2019 	    result == BT_L2CAP_LE_ERR_ENCRYPTION) {
2020 		chan = l2cap_lookup_ident(conn, ident);
2021 	} else {
2022 		chan = l2cap_remove_ident(conn, ident);
2023 	}
2024 
2025 	if (!chan) {
2026 		LOG_ERR("Cannot find channel for ident %u", ident);
2027 		return;
2028 	}
2029 
2030 	/* Cancel RTX work */
2031 	k_work_cancel_delayable(&chan->rtx_work);
2032 
2033 	/* Reset ident since it got a response */
2034 	chan->ident = 0U;
2035 
2036 	switch (result) {
2037 	case BT_L2CAP_LE_SUCCESS:
2038 		chan->tx.cid = dcid;
2039 		chan->tx.mtu = mtu;
2040 		chan->tx.mps = mps;
2041 
2042 		/* Update state */
2043 		bt_l2cap_chan_set_state(&chan->chan, BT_L2CAP_CONNECTED);
2044 
2045 		if (chan->chan.ops->connected) {
2046 			chan->chan.ops->connected(&chan->chan);
2047 		}
2048 
2049 		/* Give credits */
2050 		l2cap_chan_tx_give_credits(chan, credits);
2051 
2052 		break;
2053 	case BT_L2CAP_LE_ERR_AUTHENTICATION:
2054 	case BT_L2CAP_LE_ERR_ENCRYPTION:
2055 		/* If security needs changing wait it to be completed */
2056 		if (l2cap_change_security(chan, result) == 0) {
2057 			return;
2058 		}
2059 		bt_l2cap_chan_remove(conn, &chan->chan);
2060 		__fallthrough;
2061 	default:
2062 		bt_l2cap_chan_del(&chan->chan);
2063 	}
2064 }
2065 
le_disconn_rsp(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)2066 static void le_disconn_rsp(struct bt_l2cap *l2cap, uint8_t ident,
2067 			   struct net_buf *buf)
2068 {
2069 	struct bt_conn *conn = l2cap->chan.chan.conn;
2070 	struct bt_l2cap_le_chan *chan;
2071 	struct bt_l2cap_disconn_rsp *rsp = (void *)buf->data;
2072 	uint16_t scid;
2073 
2074 	if (buf->len < sizeof(*rsp)) {
2075 		LOG_ERR("Too small LE disconn rsp packet size");
2076 		return;
2077 	}
2078 
2079 	scid = sys_le16_to_cpu(rsp->scid);
2080 
2081 	LOG_DBG("dcid 0x%04x scid 0x%04x", sys_le16_to_cpu(rsp->dcid), scid);
2082 
2083 	chan = l2cap_remove_rx_cid(conn, scid);
2084 	if (!chan) {
2085 		return;
2086 	}
2087 
2088 	bt_l2cap_chan_del(&chan->chan);
2089 }
2090 
le_credits(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)2091 static void le_credits(struct bt_l2cap *l2cap, uint8_t ident,
2092 		       struct net_buf *buf)
2093 {
2094 	struct bt_conn *conn = l2cap->chan.chan.conn;
2095 	struct bt_l2cap_chan *chan;
2096 	struct bt_l2cap_le_credits *ev = (void *)buf->data;
2097 	struct bt_l2cap_le_chan *le_chan;
2098 	uint16_t credits, cid;
2099 
2100 	if (buf->len < sizeof(*ev)) {
2101 		LOG_ERR("Too small LE Credits packet size");
2102 		return;
2103 	}
2104 
2105 	cid = sys_le16_to_cpu(ev->cid);
2106 	credits = sys_le16_to_cpu(ev->credits);
2107 
2108 	LOG_DBG("cid 0x%04x credits %u", cid, credits);
2109 
2110 	chan = bt_l2cap_le_lookup_tx_cid(conn, cid);
2111 	if (!chan) {
2112 		LOG_ERR("Unable to find channel of LE Credits packet");
2113 		return;
2114 	}
2115 
2116 	le_chan = BT_L2CAP_LE_CHAN(chan);
2117 
2118 	if (atomic_get(&le_chan->tx.credits) + credits > UINT16_MAX) {
2119 		LOG_ERR("Credits overflow");
2120 		bt_l2cap_chan_disconnect(chan);
2121 		return;
2122 	}
2123 
2124 	l2cap_chan_tx_give_credits(le_chan, credits);
2125 
2126 	LOG_DBG("chan %p total credits %lu", le_chan, atomic_get(&le_chan->tx.credits));
2127 }
2128 
reject_cmd(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)2129 static void reject_cmd(struct bt_l2cap *l2cap, uint8_t ident,
2130 		       struct net_buf *buf)
2131 {
2132 	struct bt_conn *conn = l2cap->chan.chan.conn;
2133 	struct bt_l2cap_le_chan *chan;
2134 
2135 	while ((chan = l2cap_remove_ident(conn, ident))) {
2136 		bt_l2cap_chan_del(&chan->chan);
2137 	}
2138 }
2139 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2140 
l2cap_recv(struct bt_l2cap_chan * chan,struct net_buf * buf)2141 static int l2cap_recv(struct bt_l2cap_chan *chan, struct net_buf *buf)
2142 {
2143 	struct bt_l2cap_le_chan *l2chan = CONTAINER_OF(chan, struct bt_l2cap_le_chan, chan);
2144 	struct bt_l2cap *l2cap = CONTAINER_OF(l2chan, struct bt_l2cap, chan);
2145 	struct bt_l2cap_sig_hdr *hdr;
2146 	uint16_t len;
2147 
2148 	if (buf->len < sizeof(*hdr)) {
2149 		LOG_ERR("Too small L2CAP signaling PDU");
2150 		return 0;
2151 	}
2152 
2153 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
2154 	len = sys_le16_to_cpu(hdr->len);
2155 
2156 	LOG_DBG("Signaling code 0x%02x ident %u len %u", hdr->code, hdr->ident, len);
2157 
2158 	if (buf->len != len) {
2159 		LOG_ERR("L2CAP length mismatch (%u != %u)", buf->len, len);
2160 		return 0;
2161 	}
2162 
2163 	if (!hdr->ident) {
2164 		LOG_ERR("Invalid ident value in L2CAP PDU");
2165 		return 0;
2166 	}
2167 
2168 	switch (hdr->code) {
2169 	case BT_L2CAP_CONN_PARAM_RSP:
2170 		le_conn_param_rsp(l2cap, buf);
2171 		break;
2172 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
2173 	case BT_L2CAP_LE_CONN_REQ:
2174 		le_conn_req(l2cap, hdr->ident, buf);
2175 		break;
2176 	case BT_L2CAP_LE_CONN_RSP:
2177 		le_conn_rsp(l2cap, hdr->ident, buf);
2178 		break;
2179 	case BT_L2CAP_DISCONN_REQ:
2180 		le_disconn_req(l2cap, hdr->ident, buf);
2181 		break;
2182 	case BT_L2CAP_DISCONN_RSP:
2183 		le_disconn_rsp(l2cap, hdr->ident, buf);
2184 		break;
2185 	case BT_L2CAP_LE_CREDITS:
2186 		le_credits(l2cap, hdr->ident, buf);
2187 		break;
2188 	case BT_L2CAP_CMD_REJECT:
2189 		reject_cmd(l2cap, hdr->ident, buf);
2190 		break;
2191 #if defined(CONFIG_BT_L2CAP_ECRED)
2192 	case BT_L2CAP_ECRED_CONN_REQ:
2193 		le_ecred_conn_req(l2cap, hdr->ident, buf);
2194 		break;
2195 	case BT_L2CAP_ECRED_CONN_RSP:
2196 		le_ecred_conn_rsp(l2cap, hdr->ident, buf);
2197 		break;
2198 	case BT_L2CAP_ECRED_RECONF_REQ:
2199 		le_ecred_reconf_req(l2cap, hdr->ident, buf);
2200 		break;
2201 	case BT_L2CAP_ECRED_RECONF_RSP:
2202 		le_ecred_reconf_rsp(l2cap, hdr->ident, buf);
2203 		break;
2204 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
2205 #else
2206 	case BT_L2CAP_CMD_REJECT:
2207 		/* Ignored */
2208 		break;
2209 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2210 	case BT_L2CAP_CONN_PARAM_REQ:
2211 		if (IS_ENABLED(CONFIG_BT_CENTRAL)) {
2212 			le_conn_param_update_req(l2cap, hdr->ident, buf);
2213 			break;
2214 		}
2215 		__fallthrough;
2216 	default:
2217 		LOG_WRN("Rejecting unknown L2CAP PDU code 0x%02x", hdr->code);
2218 		l2cap_send_reject(chan->conn, hdr->ident,
2219 				  BT_L2CAP_REJ_NOT_UNDERSTOOD, NULL, 0);
2220 		break;
2221 	}
2222 
2223 	return 0;
2224 }
2225 
2226 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_chan_shutdown(struct bt_l2cap_chan * chan)2227 static void l2cap_chan_shutdown(struct bt_l2cap_chan *chan)
2228 {
2229 	struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
2230 	struct net_buf *buf;
2231 
2232 	LOG_DBG("chan %p", chan);
2233 
2234 	atomic_set_bit(chan->status, BT_L2CAP_STATUS_SHUTDOWN);
2235 
2236 	/* Destroy segmented SDU if it exists */
2237 	if (le_chan->_sdu) {
2238 		net_buf_unref(le_chan->_sdu);
2239 		le_chan->_sdu = NULL;
2240 		le_chan->_sdu_len = 0U;
2241 	}
2242 
2243 	/* Remove buffers on the TX queue */
2244 	while ((buf = net_buf_get(&le_chan->tx_queue, K_NO_WAIT))) {
2245 		l2cap_tx_buf_destroy(chan->conn, buf, -ESHUTDOWN);
2246 	}
2247 
2248 	/* Remove buffers on the RX queue */
2249 	while ((buf = net_buf_get(&le_chan->rx_queue, K_NO_WAIT))) {
2250 		net_buf_unref(buf);
2251 	}
2252 
2253 	/* Update status */
2254 	if (chan->ops->status) {
2255 		chan->ops->status(chan, chan->status);
2256 	}
2257 }
2258 
l2cap_chan_send_credits(struct bt_l2cap_le_chan * chan,uint16_t credits)2259 static void l2cap_chan_send_credits(struct bt_l2cap_le_chan *chan,
2260 				    uint16_t credits)
2261 {
2262 	struct bt_l2cap_le_credits *ev;
2263 	struct net_buf *buf;
2264 
2265 	__ASSERT_NO_MSG(bt_l2cap_chan_get_state(&chan->chan) == BT_L2CAP_CONNECTED);
2266 
2267 	buf = l2cap_create_le_sig_pdu(BT_L2CAP_LE_CREDITS, get_ident(),
2268 				      sizeof(*ev));
2269 	if (!buf) {
2270 		LOG_ERR("Unable to send credits update");
2271 		/* Disconnect would probably not work either so the only
2272 		 * option left is to shutdown the channel.
2273 		 */
2274 		l2cap_chan_shutdown(&chan->chan);
2275 		return;
2276 	}
2277 
2278 	__ASSERT_NO_MSG(atomic_get(&chan->rx.credits) == 0);
2279 	atomic_set(&chan->rx.credits, credits);
2280 
2281 	ev = net_buf_add(buf, sizeof(*ev));
2282 	ev->cid = sys_cpu_to_le16(chan->rx.cid);
2283 	ev->credits = sys_cpu_to_le16(credits);
2284 
2285 	l2cap_send_sig(chan->chan.conn, buf);
2286 
2287 	LOG_DBG("chan %p credits %lu", chan, atomic_get(&chan->rx.credits));
2288 }
2289 
2290 #if defined(CONFIG_BT_L2CAP_SEG_RECV)
l2cap_chan_send_credits_pdu(struct bt_conn * conn,uint16_t cid,uint16_t credits)2291 static int l2cap_chan_send_credits_pdu(struct bt_conn *conn, uint16_t cid, uint16_t credits)
2292 {
2293 	struct net_buf *buf;
2294 	struct bt_l2cap_le_credits *ev;
2295 
2296 	buf = l2cap_create_le_sig_pdu(BT_L2CAP_LE_CREDITS, get_ident(), sizeof(*ev));
2297 	if (!buf) {
2298 		return -ENOBUFS;
2299 	}
2300 
2301 	ev = net_buf_add(buf, sizeof(*ev));
2302 	*ev = (struct bt_l2cap_le_credits){
2303 		.cid = sys_cpu_to_le16(cid),
2304 		.credits = sys_cpu_to_le16(credits),
2305 	};
2306 
2307 	return l2cap_send_sig(conn, buf);
2308 }
2309 
2310 /**
2311  * Combination of @ref atomic_add and @ref u16_add_overflow. Leaves @p
2312  * target unchanged if an overflow would occur. Assumes the current
2313  * value of @p target is representable by uint16_t.
2314  */
atomic_add_safe_u16(atomic_t * target,uint16_t addition)2315 static bool atomic_add_safe_u16(atomic_t *target, uint16_t addition)
2316 {
2317 	uint16_t target_old, target_new;
2318 
2319 	do {
2320 		target_old = atomic_get(target);
2321 		if (u16_add_overflow(target_old, addition, &target_new)) {
2322 			return true;
2323 		}
2324 	} while (!atomic_cas(target, target_old, target_new));
2325 
2326 	return false;
2327 }
2328 
bt_l2cap_chan_give_credits(struct bt_l2cap_chan * chan,uint16_t additional_credits)2329 int bt_l2cap_chan_give_credits(struct bt_l2cap_chan *chan, uint16_t additional_credits)
2330 {
2331 	struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
2332 
2333 	if (!chan || !chan->ops) {
2334 		LOG_ERR("%s: Invalid chan object.", __func__);
2335 		return -EINVAL;
2336 	}
2337 
2338 	if (!chan->ops->seg_recv) {
2339 		LOG_ERR("%s: Available only with seg_recv.", __func__);
2340 		return -EINVAL;
2341 	}
2342 
2343 	if (additional_credits == 0) {
2344 		LOG_ERR("%s: Refusing to give 0.", __func__);
2345 		return -EINVAL;
2346 	}
2347 
2348 	if (bt_l2cap_chan_get_state(chan) == BT_L2CAP_CONNECTING) {
2349 		LOG_ERR("%s: Cannot give credits while connecting.", __func__);
2350 		return -EBUSY;
2351 	}
2352 
2353 	if (atomic_add_safe_u16(&le_chan->rx.credits, additional_credits)) {
2354 		LOG_ERR("%s: Overflow.", __func__);
2355 		return -EOVERFLOW;
2356 	}
2357 
2358 	if (bt_l2cap_chan_get_state(chan) == BT_L2CAP_CONNECTED) {
2359 		int err;
2360 
2361 		err = l2cap_chan_send_credits_pdu(chan->conn, le_chan->rx.cid, additional_credits);
2362 		if (err) {
2363 			LOG_ERR("%s: PDU failed %d.", __func__, err);
2364 			return err;
2365 		}
2366 	}
2367 
2368 	return 0;
2369 }
2370 #endif /* CONFIG_BT_L2CAP_SEG_RECV */
2371 
bt_l2cap_chan_recv_complete(struct bt_l2cap_chan * chan,struct net_buf * buf)2372 int bt_l2cap_chan_recv_complete(struct bt_l2cap_chan *chan, struct net_buf *buf)
2373 {
2374 	struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
2375 	struct bt_conn *conn = chan->conn;
2376 
2377 	__ASSERT_NO_MSG(chan);
2378 	__ASSERT_NO_MSG(buf);
2379 
2380 	net_buf_unref(buf);
2381 
2382 	if (!conn) {
2383 		return -ENOTCONN;
2384 	}
2385 
2386 	if (conn->type != BT_CONN_TYPE_LE) {
2387 		return -ENOTSUP;
2388 	}
2389 
2390 	LOG_DBG("chan %p buf %p", chan, buf);
2391 
2392 	if (bt_l2cap_chan_get_state(&le_chan->chan) == BT_L2CAP_CONNECTED) {
2393 		l2cap_chan_send_credits(le_chan, 1);
2394 	}
2395 
2396 	return 0;
2397 }
2398 
l2cap_alloc_frag(k_timeout_t timeout,void * user_data)2399 static struct net_buf *l2cap_alloc_frag(k_timeout_t timeout, void *user_data)
2400 {
2401 	struct bt_l2cap_le_chan *chan = user_data;
2402 	struct net_buf *frag = NULL;
2403 
2404 	frag = chan->chan.ops->alloc_buf(&chan->chan);
2405 	if (!frag) {
2406 		return NULL;
2407 	}
2408 
2409 	LOG_DBG("frag %p tailroom %zu", frag, net_buf_tailroom(frag));
2410 
2411 	return frag;
2412 }
2413 
l2cap_chan_le_recv_sdu(struct bt_l2cap_le_chan * chan,struct net_buf * buf,uint16_t seg)2414 static void l2cap_chan_le_recv_sdu(struct bt_l2cap_le_chan *chan,
2415 				   struct net_buf *buf, uint16_t seg)
2416 {
2417 	int err;
2418 
2419 	LOG_DBG("chan %p len %zu", chan, buf->len);
2420 
2421 	__ASSERT_NO_MSG(bt_l2cap_chan_get_state(&chan->chan) == BT_L2CAP_CONNECTED);
2422 	__ASSERT_NO_MSG(atomic_get(&chan->rx.credits) == 0);
2423 
2424 	/* Receiving complete SDU, notify channel and reset SDU buf */
2425 	err = chan->chan.ops->recv(&chan->chan, buf);
2426 	if (err < 0) {
2427 		if (err != -EINPROGRESS) {
2428 			LOG_ERR("err %d", err);
2429 			bt_l2cap_chan_disconnect(&chan->chan);
2430 			net_buf_unref(buf);
2431 		}
2432 		return;
2433 	} else if (bt_l2cap_chan_get_state(&chan->chan) == BT_L2CAP_CONNECTED) {
2434 		l2cap_chan_send_credits(chan, 1);
2435 	}
2436 
2437 	net_buf_unref(buf);
2438 }
2439 
l2cap_chan_le_recv_seg(struct bt_l2cap_le_chan * chan,struct net_buf * buf)2440 static void l2cap_chan_le_recv_seg(struct bt_l2cap_le_chan *chan,
2441 				   struct net_buf *buf)
2442 {
2443 	uint16_t len;
2444 	uint16_t seg = 0U;
2445 
2446 	len = chan->_sdu->len;
2447 	if (len) {
2448 		memcpy(&seg, net_buf_user_data(chan->_sdu), sizeof(seg));
2449 	}
2450 
2451 	if (len + buf->len > chan->_sdu_len) {
2452 		LOG_ERR("SDU length mismatch");
2453 		bt_l2cap_chan_disconnect(&chan->chan);
2454 		return;
2455 	}
2456 
2457 	seg++;
2458 	/* Store received segments in user_data */
2459 	memcpy(net_buf_user_data(chan->_sdu), &seg, sizeof(seg));
2460 
2461 	LOG_DBG("chan %p seg %d len %zu", chan, seg, buf->len);
2462 
2463 	/* Append received segment to SDU */
2464 	len = net_buf_append_bytes(chan->_sdu, buf->len, buf->data, K_NO_WAIT,
2465 				   l2cap_alloc_frag, chan);
2466 	if (len != buf->len) {
2467 		LOG_ERR("Unable to store SDU");
2468 		bt_l2cap_chan_disconnect(&chan->chan);
2469 		return;
2470 	}
2471 
2472 	if (chan->_sdu->len < chan->_sdu_len) {
2473 		/* Give more credits if remote has run out of them, this
2474 		 * should only happen if the remote cannot fully utilize the
2475 		 * MPS for some reason.
2476 		 *
2477 		 * We can't send more than one credit, because if the remote
2478 		 * decides to start fully utilizing the MPS for the remainder of
2479 		 * the SDU, then the remote will end up with more credits than
2480 		 * the app has buffers.
2481 		 */
2482 		if (atomic_get(&chan->rx.credits) == 0) {
2483 			LOG_DBG("remote is not fully utilizing MPS");
2484 			l2cap_chan_send_credits(chan, 1);
2485 		}
2486 
2487 		return;
2488 	}
2489 
2490 	buf = chan->_sdu;
2491 	chan->_sdu = NULL;
2492 	chan->_sdu_len = 0U;
2493 
2494 	l2cap_chan_le_recv_sdu(chan, buf, seg);
2495 }
2496 
2497 #if defined(CONFIG_BT_L2CAP_SEG_RECV)
l2cap_chan_le_recv_seg_direct(struct bt_l2cap_le_chan * chan,struct net_buf * seg)2498 static void l2cap_chan_le_recv_seg_direct(struct bt_l2cap_le_chan *chan, struct net_buf *seg)
2499 {
2500 	uint16_t seg_offset;
2501 	uint16_t sdu_remaining;
2502 
2503 	if (chan->_sdu_len_done == chan->_sdu_len) {
2504 
2505 		/* This is the first PDU in a SDU. */
2506 
2507 		if (seg->len < 2) {
2508 			LOG_WRN("Missing SDU header");
2509 			bt_l2cap_chan_disconnect(&chan->chan);
2510 			return;
2511 		}
2512 
2513 		/* Pop off the "SDU header". */
2514 		chan->_sdu_len = net_buf_pull_le16(seg);
2515 		chan->_sdu_len_done = 0;
2516 
2517 		if (chan->_sdu_len > chan->rx.mtu) {
2518 			LOG_WRN("SDU exceeds MTU");
2519 			bt_l2cap_chan_disconnect(&chan->chan);
2520 			return;
2521 		}
2522 	}
2523 
2524 	seg_offset = chan->_sdu_len_done;
2525 	sdu_remaining = chan->_sdu_len - chan->_sdu_len_done;
2526 
2527 	if (seg->len > sdu_remaining) {
2528 		LOG_WRN("L2CAP RX PDU total exceeds SDU");
2529 		bt_l2cap_chan_disconnect(&chan->chan);
2530 	}
2531 
2532 	/* Commit receive. */
2533 	chan->_sdu_len_done += seg->len;
2534 
2535 	/* Tail call. */
2536 	chan->chan.ops->seg_recv(&chan->chan, chan->_sdu_len, seg_offset, &seg->b);
2537 }
2538 #endif /* CONFIG_BT_L2CAP_SEG_RECV */
2539 
l2cap_chan_le_recv(struct bt_l2cap_le_chan * chan,struct net_buf * buf)2540 static void l2cap_chan_le_recv(struct bt_l2cap_le_chan *chan,
2541 			       struct net_buf *buf)
2542 {
2543 	uint16_t sdu_len;
2544 	int err;
2545 
2546 	if (!test_and_dec(&chan->rx.credits)) {
2547 		LOG_ERR("No credits to receive packet");
2548 		bt_l2cap_chan_disconnect(&chan->chan);
2549 		return;
2550 	}
2551 
2552 	if (buf->len > chan->rx.mps) {
2553 		LOG_WRN("PDU size > MPS (%u > %u)", buf->len, chan->rx.mps);
2554 		bt_l2cap_chan_disconnect(&chan->chan);
2555 		return;
2556 	}
2557 
2558 	/* Redirect to experimental API. */
2559 	IF_ENABLED(CONFIG_BT_L2CAP_SEG_RECV, (
2560 		if (chan->chan.ops->seg_recv) {
2561 			l2cap_chan_le_recv_seg_direct(chan, buf);
2562 			return;
2563 		}
2564 	))
2565 
2566 	/* Check if segments already exist */
2567 	if (chan->_sdu) {
2568 		l2cap_chan_le_recv_seg(chan, buf);
2569 		return;
2570 	}
2571 
2572 	if (buf->len < 2) {
2573 		LOG_WRN("Too short data packet");
2574 		bt_l2cap_chan_disconnect(&chan->chan);
2575 		return;
2576 	}
2577 
2578 	sdu_len = net_buf_pull_le16(buf);
2579 
2580 	LOG_DBG("chan %p len %u sdu_len %u", chan, buf->len, sdu_len);
2581 
2582 	if (sdu_len > chan->rx.mtu) {
2583 		LOG_ERR("Invalid SDU length");
2584 		bt_l2cap_chan_disconnect(&chan->chan);
2585 		return;
2586 	}
2587 
2588 	/* Always allocate buffer from the channel if supported. */
2589 	if (chan->chan.ops->alloc_buf) {
2590 		chan->_sdu = chan->chan.ops->alloc_buf(&chan->chan);
2591 		if (!chan->_sdu) {
2592 			LOG_ERR("Unable to allocate buffer for SDU");
2593 			bt_l2cap_chan_disconnect(&chan->chan);
2594 			return;
2595 		}
2596 		chan->_sdu_len = sdu_len;
2597 
2598 		/* Send sdu_len/mps worth of credits */
2599 		uint16_t credits = DIV_ROUND_UP(
2600 			MIN(sdu_len - buf->len, net_buf_tailroom(chan->_sdu)),
2601 			chan->rx.mps);
2602 
2603 		if (credits) {
2604 			LOG_DBG("sending %d extra credits (sdu_len %d buf_len %d mps %d)",
2605 				credits,
2606 				sdu_len,
2607 				buf->len,
2608 				chan->rx.mps);
2609 			l2cap_chan_send_credits(chan, credits);
2610 		}
2611 
2612 		l2cap_chan_le_recv_seg(chan, buf);
2613 		return;
2614 	}
2615 
2616 	err = chan->chan.ops->recv(&chan->chan, buf);
2617 	if (err < 0) {
2618 		if (err != -EINPROGRESS) {
2619 			LOG_ERR("err %d", err);
2620 			bt_l2cap_chan_disconnect(&chan->chan);
2621 		}
2622 		return;
2623 	}
2624 
2625 	/* Only attempt to send credits if the channel wasn't disconnected
2626 	 * in the recv() callback above
2627 	 */
2628 	if (bt_l2cap_chan_get_state(&chan->chan) == BT_L2CAP_CONNECTED) {
2629 		l2cap_chan_send_credits(chan, 1);
2630 	}
2631 }
2632 
l2cap_chan_recv_queue(struct bt_l2cap_le_chan * chan,struct net_buf * buf)2633 static void l2cap_chan_recv_queue(struct bt_l2cap_le_chan *chan,
2634 				  struct net_buf *buf)
2635 {
2636 	if (chan->state == BT_L2CAP_DISCONNECTING) {
2637 		LOG_WRN("Ignoring data received while disconnecting");
2638 		net_buf_unref(buf);
2639 		return;
2640 	}
2641 
2642 	if (atomic_test_bit(chan->chan.status, BT_L2CAP_STATUS_SHUTDOWN)) {
2643 		LOG_WRN("Ignoring data received while channel has shutdown");
2644 		net_buf_unref(buf);
2645 		return;
2646 	}
2647 
2648 	if (!L2CAP_LE_PSM_IS_DYN(chan->psm)) {
2649 		l2cap_chan_le_recv(chan, buf);
2650 		net_buf_unref(buf);
2651 		return;
2652 	}
2653 
2654 	net_buf_put(&chan->rx_queue, buf);
2655 	k_work_submit(&chan->rx_work);
2656 }
2657 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2658 
l2cap_chan_recv(struct bt_l2cap_chan * chan,struct net_buf * buf,bool complete)2659 static void l2cap_chan_recv(struct bt_l2cap_chan *chan, struct net_buf *buf,
2660 			    bool complete)
2661 {
2662 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
2663 	struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
2664 
2665 	if (L2CAP_LE_CID_IS_DYN(le_chan->rx.cid)) {
2666 		if (complete) {
2667 			l2cap_chan_recv_queue(le_chan, buf);
2668 		} else {
2669 			/* if packet was not complete this means peer device
2670 			 * overflowed our RX and channel shall be disconnected
2671 			 */
2672 			bt_l2cap_chan_disconnect(chan);
2673 			net_buf_unref(buf);
2674 		}
2675 
2676 		return;
2677 	}
2678 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2679 
2680 	LOG_DBG("chan %p len %u", chan, buf->len);
2681 
2682 	chan->ops->recv(chan, buf);
2683 	net_buf_unref(buf);
2684 }
2685 
bt_l2cap_recv(struct bt_conn * conn,struct net_buf * buf,bool complete)2686 void bt_l2cap_recv(struct bt_conn *conn, struct net_buf *buf, bool complete)
2687 {
2688 	struct bt_l2cap_hdr *hdr;
2689 	struct bt_l2cap_chan *chan;
2690 	uint16_t cid;
2691 
2692 	if (IS_ENABLED(CONFIG_BT_CLASSIC) &&
2693 	    conn->type == BT_CONN_TYPE_BR) {
2694 		bt_l2cap_br_recv(conn, buf);
2695 		return;
2696 	}
2697 
2698 	if (buf->len < sizeof(*hdr)) {
2699 		LOG_ERR("Too small L2CAP PDU received");
2700 		net_buf_unref(buf);
2701 		return;
2702 	}
2703 
2704 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
2705 	cid = sys_le16_to_cpu(hdr->cid);
2706 
2707 	LOG_DBG("Packet for CID %u len %u", cid, buf->len);
2708 
2709 	chan = bt_l2cap_le_lookup_rx_cid(conn, cid);
2710 	if (!chan) {
2711 		LOG_WRN("Ignoring data for unknown channel ID 0x%04x", cid);
2712 		net_buf_unref(buf);
2713 		return;
2714 	}
2715 
2716 	l2cap_chan_recv(chan, buf, complete);
2717 }
2718 
bt_l2cap_update_conn_param(struct bt_conn * conn,const struct bt_le_conn_param * param)2719 int bt_l2cap_update_conn_param(struct bt_conn *conn,
2720 			       const struct bt_le_conn_param *param)
2721 {
2722 	struct bt_l2cap_conn_param_req *req;
2723 	struct net_buf *buf;
2724 
2725 	buf = l2cap_create_le_sig_pdu(BT_L2CAP_CONN_PARAM_REQ,
2726 				      get_ident(), sizeof(*req));
2727 	if (!buf) {
2728 		return -ENOMEM;
2729 	}
2730 
2731 	req = net_buf_add(buf, sizeof(*req));
2732 	req->min_interval = sys_cpu_to_le16(param->interval_min);
2733 	req->max_interval = sys_cpu_to_le16(param->interval_max);
2734 	req->latency = sys_cpu_to_le16(param->latency);
2735 	req->timeout = sys_cpu_to_le16(param->timeout);
2736 
2737 	return l2cap_send_sig(conn, buf);
2738 }
2739 
l2cap_connected(struct bt_l2cap_chan * chan)2740 static void l2cap_connected(struct bt_l2cap_chan *chan)
2741 {
2742 	LOG_DBG("ch %p cid 0x%04x", BT_L2CAP_LE_CHAN(chan), BT_L2CAP_LE_CHAN(chan)->rx.cid);
2743 }
2744 
l2cap_disconnected(struct bt_l2cap_chan * chan)2745 static void l2cap_disconnected(struct bt_l2cap_chan *chan)
2746 {
2747 	struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
2748 
2749 	LOG_DBG("ch %p cid 0x%04x", le_chan, le_chan->rx.cid);
2750 
2751 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
2752 	/* Cancel RTX work on signal channel.
2753 	 * Disconnected callback is always called from system workqueue
2754 	 * so this should always succeed.
2755 	 */
2756 	(void)k_work_cancel_delayable(&le_chan->rtx_work);
2757 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2758 }
2759 
l2cap_accept(struct bt_conn * conn,struct bt_l2cap_chan ** chan)2760 static int l2cap_accept(struct bt_conn *conn, struct bt_l2cap_chan **chan)
2761 {
2762 	int i;
2763 	static const struct bt_l2cap_chan_ops ops = {
2764 		.connected = l2cap_connected,
2765 		.disconnected = l2cap_disconnected,
2766 		.recv = l2cap_recv,
2767 	};
2768 
2769 	LOG_DBG("conn %p handle %u", conn, conn->handle);
2770 
2771 	for (i = 0; i < ARRAY_SIZE(bt_l2cap_pool); i++) {
2772 		struct bt_l2cap *l2cap = &bt_l2cap_pool[i];
2773 
2774 		if (l2cap->chan.chan.conn) {
2775 			continue;
2776 		}
2777 
2778 		l2cap->chan.chan.ops = &ops;
2779 		*chan = &l2cap->chan.chan;
2780 
2781 		return 0;
2782 	}
2783 
2784 	LOG_ERR("No available L2CAP context for conn %p", conn);
2785 
2786 	return -ENOMEM;
2787 }
2788 
2789 BT_L2CAP_CHANNEL_DEFINE(le_fixed_chan, BT_L2CAP_CID_LE_SIG, l2cap_accept, NULL);
2790 
bt_l2cap_init(void)2791 void bt_l2cap_init(void)
2792 {
2793 	if (IS_ENABLED(CONFIG_BT_CLASSIC)) {
2794 		bt_l2cap_br_init();
2795 	}
2796 }
2797 
2798 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_le_connect(struct bt_conn * conn,struct bt_l2cap_le_chan * ch,uint16_t psm)2799 static int l2cap_le_connect(struct bt_conn *conn, struct bt_l2cap_le_chan *ch,
2800 			    uint16_t psm)
2801 {
2802 	int err;
2803 
2804 	if (psm < L2CAP_LE_PSM_FIXED_START || psm > L2CAP_LE_PSM_DYN_END) {
2805 		return -EINVAL;
2806 	}
2807 
2808 	l2cap_chan_tx_init(ch);
2809 	l2cap_chan_rx_init(ch);
2810 
2811 	if (!l2cap_chan_add(conn, &ch->chan, l2cap_chan_destroy)) {
2812 		return -ENOMEM;
2813 	}
2814 
2815 	ch->psm = psm;
2816 
2817 	if (conn->sec_level < ch->required_sec_level) {
2818 		err = bt_conn_set_security(conn, ch->required_sec_level);
2819 		if (err) {
2820 			goto fail;
2821 		}
2822 
2823 		atomic_set_bit(ch->chan.status,
2824 			       BT_L2CAP_STATUS_ENCRYPT_PENDING);
2825 
2826 		return 0;
2827 	}
2828 
2829 	err = l2cap_le_conn_req(ch);
2830 	if (err) {
2831 		goto fail;
2832 	}
2833 
2834 	return 0;
2835 
2836 fail:
2837 	bt_l2cap_chan_remove(conn, &ch->chan);
2838 	bt_l2cap_chan_del(&ch->chan);
2839 	return err;
2840 }
2841 
2842 #if defined(CONFIG_BT_L2CAP_ECRED)
l2cap_ecred_init(struct bt_conn * conn,struct bt_l2cap_le_chan * ch,uint16_t psm)2843 static int l2cap_ecred_init(struct bt_conn *conn,
2844 			       struct bt_l2cap_le_chan *ch, uint16_t psm)
2845 {
2846 
2847 	if (psm < L2CAP_LE_PSM_FIXED_START || psm > L2CAP_LE_PSM_DYN_END) {
2848 		return -EINVAL;
2849 	}
2850 
2851 	l2cap_chan_tx_init(ch);
2852 	l2cap_chan_rx_init(ch);
2853 
2854 	if (!l2cap_chan_add(conn, &ch->chan, l2cap_chan_destroy)) {
2855 		return -ENOMEM;
2856 	}
2857 
2858 	ch->psm = psm;
2859 
2860 	LOG_DBG("ch %p psm 0x%02x mtu %u mps %u credits 1", ch, ch->psm, ch->rx.mtu, ch->rx.mps);
2861 
2862 	return 0;
2863 }
2864 
bt_l2cap_ecred_chan_connect(struct bt_conn * conn,struct bt_l2cap_chan ** chan,uint16_t psm)2865 int bt_l2cap_ecred_chan_connect(struct bt_conn *conn,
2866 				struct bt_l2cap_chan **chan, uint16_t psm)
2867 {
2868 	int i, err;
2869 
2870 	LOG_DBG("conn %p chan %p psm 0x%04x", conn, chan, psm);
2871 
2872 	if (!conn || !chan) {
2873 		return -EINVAL;
2874 	}
2875 
2876 	/* Init non-null channels */
2877 	for (i = 0; i < L2CAP_ECRED_CHAN_MAX_PER_REQ; i++) {
2878 		if (!chan[i]) {
2879 			break;
2880 		}
2881 
2882 		err = l2cap_ecred_init(conn, BT_L2CAP_LE_CHAN(chan[i]), psm);
2883 		if (err < 0) {
2884 			i--;
2885 			goto fail;
2886 		}
2887 	}
2888 
2889 	return l2cap_ecred_conn_req(chan, i);
2890 fail:
2891 	/* Remove channels added */
2892 	for (; i >= 0; i--) {
2893 		if (!chan[i]) {
2894 			continue;
2895 		}
2896 
2897 		bt_l2cap_chan_remove(conn, chan[i]);
2898 	}
2899 
2900 	return err;
2901 }
2902 
l2cap_find_pending_reconf(struct bt_conn * conn)2903 static struct bt_l2cap_le_chan *l2cap_find_pending_reconf(struct bt_conn *conn)
2904 {
2905 	struct bt_l2cap_chan *chan;
2906 
2907 	SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
2908 		if (BT_L2CAP_LE_CHAN(chan)->pending_rx_mtu) {
2909 			return BT_L2CAP_LE_CHAN(chan);
2910 		}
2911 	}
2912 
2913 	return NULL;
2914 }
2915 
bt_l2cap_ecred_chan_reconfigure(struct bt_l2cap_chan ** chans,uint16_t mtu)2916 int bt_l2cap_ecred_chan_reconfigure(struct bt_l2cap_chan **chans, uint16_t mtu)
2917 {
2918 	struct bt_l2cap_ecred_reconf_req *req;
2919 	struct bt_conn *conn = NULL;
2920 	struct bt_l2cap_le_chan *ch;
2921 	struct net_buf *buf;
2922 	uint8_t ident;
2923 	int i;
2924 
2925 	LOG_DBG("chans %p mtu 0x%04x", chans, mtu);
2926 
2927 	if (!chans) {
2928 		return -EINVAL;
2929 	}
2930 
2931 	for (i = 0; i < L2CAP_ECRED_CHAN_MAX_PER_REQ; i++) {
2932 		if (!chans[i]) {
2933 			break;
2934 		}
2935 
2936 		/* validate that all channels are from same connection */
2937 		if (conn) {
2938 			if (conn != chans[i]->conn) {
2939 				return -EINVAL;
2940 			}
2941 		} else {
2942 			conn = chans[i]->conn;
2943 		}
2944 
2945 		/* validate MTU is not decreased */
2946 		if (mtu < BT_L2CAP_LE_CHAN(chans[i])->rx.mtu) {
2947 			return -EINVAL;
2948 		}
2949 	}
2950 
2951 	if (i == 0) {
2952 		return -EINVAL;
2953 	}
2954 
2955 	if (!conn) {
2956 		return -ENOTCONN;
2957 	}
2958 
2959 	if (conn->type != BT_CONN_TYPE_LE) {
2960 		return -EINVAL;
2961 	}
2962 
2963 	/* allow only 1 request at time */
2964 	if (l2cap_find_pending_reconf(conn)) {
2965 		return -EBUSY;
2966 	}
2967 
2968 	ident = get_ident();
2969 
2970 	buf = l2cap_create_le_sig_pdu(BT_L2CAP_ECRED_RECONF_REQ,
2971 				      ident,
2972 				      sizeof(*req) + (i * sizeof(uint16_t)));
2973 	if (!buf) {
2974 		return -ENOMEM;
2975 	}
2976 
2977 	req = net_buf_add(buf, sizeof(*req));
2978 	req->mtu = sys_cpu_to_le16(mtu);
2979 
2980 	/* MPS shall not be bigger than MTU + BT_L2CAP_SDU_HDR_SIZE
2981 	 * as the remaining bytes cannot be used.
2982 	 */
2983 	req->mps = sys_cpu_to_le16(MIN(mtu + BT_L2CAP_SDU_HDR_SIZE,
2984 				       BT_L2CAP_RX_MTU));
2985 
2986 	for (int j = 0; j < i; j++) {
2987 		ch = BT_L2CAP_LE_CHAN(chans[j]);
2988 
2989 		ch->ident = ident;
2990 		ch->pending_rx_mtu = mtu;
2991 
2992 		net_buf_add_le16(buf, ch->rx.cid);
2993 	};
2994 
2995 	/* We set the RTX timer on one of the supplied channels, but when the
2996 	 * request resolves or times out we will act on all the channels in the
2997 	 * supplied array, using the ident field to find them.
2998 	 */
2999 	l2cap_chan_send_req(chans[0], buf, L2CAP_CONN_TIMEOUT);
3000 
3001 	return 0;
3002 }
3003 
3004 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
3005 
bt_l2cap_chan_connect(struct bt_conn * conn,struct bt_l2cap_chan * chan,uint16_t psm)3006 int bt_l2cap_chan_connect(struct bt_conn *conn, struct bt_l2cap_chan *chan,
3007 			  uint16_t psm)
3008 {
3009 	struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
3010 
3011 	LOG_DBG("conn %p chan %p psm 0x%04x", conn, chan, psm);
3012 
3013 	if (!conn || conn->state != BT_CONN_CONNECTED) {
3014 		return -ENOTCONN;
3015 	}
3016 
3017 	if (!chan) {
3018 		return -EINVAL;
3019 	}
3020 
3021 	if (IS_ENABLED(CONFIG_BT_CLASSIC) &&
3022 	    conn->type == BT_CONN_TYPE_BR) {
3023 		return bt_l2cap_br_chan_connect(conn, chan, psm);
3024 	}
3025 
3026 	if (le_chan->required_sec_level > BT_SECURITY_L4) {
3027 		return -EINVAL;
3028 	} else if (le_chan->required_sec_level == BT_SECURITY_L0) {
3029 		le_chan->required_sec_level = BT_SECURITY_L1;
3030 	}
3031 
3032 	return l2cap_le_connect(conn, le_chan, psm);
3033 }
3034 
bt_l2cap_chan_disconnect(struct bt_l2cap_chan * chan)3035 int bt_l2cap_chan_disconnect(struct bt_l2cap_chan *chan)
3036 {
3037 	struct bt_conn *conn = chan->conn;
3038 	struct net_buf *buf;
3039 	struct bt_l2cap_disconn_req *req;
3040 	struct bt_l2cap_le_chan *le_chan;
3041 
3042 	if (!conn) {
3043 		return -ENOTCONN;
3044 	}
3045 
3046 	if (IS_ENABLED(CONFIG_BT_CLASSIC) &&
3047 	    conn->type == BT_CONN_TYPE_BR) {
3048 		return bt_l2cap_br_chan_disconnect(chan);
3049 	}
3050 
3051 	le_chan = BT_L2CAP_LE_CHAN(chan);
3052 
3053 	LOG_DBG("chan %p scid 0x%04x dcid 0x%04x", chan, le_chan->rx.cid, le_chan->tx.cid);
3054 
3055 	le_chan->ident = get_ident();
3056 
3057 	buf = l2cap_create_le_sig_pdu(BT_L2CAP_DISCONN_REQ,
3058 				      le_chan->ident, sizeof(*req));
3059 	if (!buf) {
3060 		return -ENOMEM;
3061 	}
3062 
3063 	req = net_buf_add(buf, sizeof(*req));
3064 	req->dcid = sys_cpu_to_le16(le_chan->tx.cid);
3065 	req->scid = sys_cpu_to_le16(le_chan->rx.cid);
3066 
3067 	l2cap_chan_send_req(chan, buf, L2CAP_DISC_TIMEOUT);
3068 	bt_l2cap_chan_set_state(chan, BT_L2CAP_DISCONNECTING);
3069 
3070 	return 0;
3071 }
3072 
user_data_not_empty(const struct net_buf * buf)3073 __maybe_unused static bool user_data_not_empty(const struct net_buf *buf)
3074 {
3075 	size_t ud_len = sizeof(struct closure);
3076 	const uint8_t *ud = net_buf_user_data(buf);
3077 
3078 	for (size_t i = 0; i < ud_len; i++) {
3079 		if (ud[i] != 0) {
3080 			return true;
3081 		}
3082 	}
3083 
3084 	return false;
3085 }
3086 
bt_l2cap_dyn_chan_send(struct bt_l2cap_le_chan * le_chan,struct net_buf * buf)3087 static int bt_l2cap_dyn_chan_send(struct bt_l2cap_le_chan *le_chan, struct net_buf *buf)
3088 {
3089 	uint16_t sdu_len = buf->len;
3090 
3091 	LOG_DBG("chan %p buf %p", le_chan, buf);
3092 
3093 	/* Frags are not supported. */
3094 	__ASSERT_NO_MSG(buf->frags == NULL);
3095 
3096 	if (sdu_len > le_chan->tx.mtu) {
3097 		LOG_ERR("attempt to send %u bytes on %u MTU chan",
3098 			sdu_len, le_chan->tx.mtu);
3099 		return -EMSGSIZE;
3100 	}
3101 
3102 	if (buf->ref != 1) {
3103 		/* The host may alter the buf contents when segmenting. Higher
3104 		 * layers cannot expect the buf contents to stay intact. Extra
3105 		 * refs suggests a silent data corruption would occur if not for
3106 		 * this error.
3107 		 */
3108 		LOG_ERR("buf given to l2cap has other refs");
3109 		return -EINVAL;
3110 	}
3111 
3112 	if (net_buf_headroom(buf) < BT_L2CAP_SDU_CHAN_SEND_RESERVE) {
3113 		/* Call `net_buf_reserve(buf, BT_L2CAP_SDU_CHAN_SEND_RESERVE)`
3114 		 * when allocating buffers intended for bt_l2cap_chan_send().
3115 		 */
3116 		LOG_DBG("Not enough headroom in buf %p", buf);
3117 		return -EINVAL;
3118 	}
3119 
3120 	CHECKIF(user_data_not_empty(buf)) {
3121 		LOG_DBG("Please clear user_data first");
3122 		return -EINVAL;
3123 	}
3124 
3125 	/* Prepend SDU length.
3126 	 *
3127 	 * L2CAP LE CoC SDUs are segmented and put into K-frames PDUs which have
3128 	 * their own L2CAP header (i.e. PDU length, channel id).
3129 	 *
3130 	 * The SDU length is right before the data that will be segmented and is
3131 	 * only present in the first PDU. Here's an example:
3132 	 *
3133 	 * Sent data payload of 50 bytes over channel 0x4040 with MPS of 30 bytes:
3134 	 * First PDU (K-frame):
3135 	 * | L2CAP K-frame header        | K-frame payload                 |
3136 	 * | PDU length  | Channel ID    | SDU length   | SDU payload      |
3137 	 * | 0x001e      | 0x4040        | 0x0032       | 28 bytes of data |
3138 	 *
3139 	 * Second and last PDU (K-frame):
3140 	 * | L2CAP K-frame header        | K-frame payload     |
3141 	 * | PDU length  | Channel ID    | rest of SDU payload |
3142 	 * | 0x0016      | 0x4040        | 22 bytes of data    |
3143 	 */
3144 	net_buf_push_le16(buf, sdu_len);
3145 
3146 	/* Put buffer on TX queue */
3147 	net_buf_put(&le_chan->tx_queue, buf);
3148 
3149 	/* Always process the queue in the same context */
3150 	raise_data_ready(le_chan);
3151 
3152 	return 0;
3153 }
3154 
bt_l2cap_chan_send(struct bt_l2cap_chan * chan,struct net_buf * buf)3155 int bt_l2cap_chan_send(struct bt_l2cap_chan *chan, struct net_buf *buf)
3156 {
3157 	if (!buf || !chan) {
3158 		return -EINVAL;
3159 	}
3160 
3161 	LOG_DBG("chan %p buf %p len %zu", chan, buf, buf->len);
3162 
3163 	if (buf->ref != 1) {
3164 		LOG_DBG("Expecting 1 ref, got %d", buf->ref);
3165 		return -EINVAL;
3166 	}
3167 
3168 	if (!chan->conn || chan->conn->state != BT_CONN_CONNECTED) {
3169 		return -ENOTCONN;
3170 	}
3171 
3172 	if (atomic_test_bit(chan->status, BT_L2CAP_STATUS_SHUTDOWN)) {
3173 		return -ESHUTDOWN;
3174 	}
3175 
3176 	if (IS_ENABLED(CONFIG_BT_CLASSIC) &&
3177 	    chan->conn->type == BT_CONN_TYPE_BR) {
3178 		return bt_l2cap_br_chan_send_cb(chan, buf, NULL, NULL);
3179 	}
3180 
3181 	/* Sending over static channels is not supported by this fn. Use
3182 	 * `bt_l2cap_send_pdu()` instead.
3183 	 */
3184 	if (IS_ENABLED(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)) {
3185 		struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
3186 
3187 		__ASSERT_NO_MSG(le_chan);
3188 		__ASSERT_NO_MSG(L2CAP_LE_CID_IS_DYN(le_chan->tx.cid));
3189 
3190 		return bt_l2cap_dyn_chan_send(le_chan, buf);
3191 	}
3192 
3193 	LOG_DBG("Invalid channel type (chan %p)", chan);
3194 
3195 	return -EINVAL;
3196 }
3197 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
3198