1 /* l2cap.c - L2CAP handling */
2 
3 /*
4  * Copyright (c) 2015-2016 Intel Corporation
5  * Copyright (c) 2023 Nordic Semiconductor
6  *
7  * SPDX-License-Identifier: Apache-2.0
8  */
9 
10 #include <zephyr/kernel.h>
11 #include <string.h>
12 #include <errno.h>
13 #include <zephyr/sys/atomic.h>
14 #include <zephyr/sys/iterable_sections.h>
15 #include <zephyr/sys/byteorder.h>
16 #include <zephyr/sys/math_extras.h>
17 #include <zephyr/sys/util.h>
18 
19 #include <zephyr/bluetooth/hci.h>
20 #include <zephyr/bluetooth/bluetooth.h>
21 #include <zephyr/bluetooth/conn.h>
22 #include <zephyr/bluetooth/l2cap.h>
23 #include <zephyr/drivers/bluetooth/hci_driver.h>
24 
25 #define LOG_DBG_ENABLED IS_ENABLED(CONFIG_BT_L2CAP_LOG_LEVEL_DBG)
26 
27 #include "hci_core.h"
28 #include "conn_internal.h"
29 #include "l2cap_internal.h"
30 #include "keys.h"
31 
32 #include <zephyr/logging/log.h>
33 LOG_MODULE_REGISTER(bt_l2cap, CONFIG_BT_L2CAP_LOG_LEVEL);
34 
35 #define LE_CHAN_RTX(_w) CONTAINER_OF(k_work_delayable_from_work(_w), \
36 				     struct bt_l2cap_le_chan, rtx_work)
37 #define CHAN_RX(_w) CONTAINER_OF(_w, struct bt_l2cap_le_chan, rx_work)
38 
39 #define L2CAP_LE_MIN_MTU		23
40 #define L2CAP_ECRED_MIN_MTU		64
41 
42 #define L2CAP_LE_MAX_CREDITS		(CONFIG_BT_BUF_ACL_RX_COUNT - 1)
43 
44 #define L2CAP_LE_CID_DYN_START	0x0040
45 #define L2CAP_LE_CID_DYN_END	0x007f
46 #define L2CAP_LE_CID_IS_DYN(_cid) \
47 	(_cid >= L2CAP_LE_CID_DYN_START && _cid <= L2CAP_LE_CID_DYN_END)
48 
49 #define L2CAP_LE_PSM_FIXED_START 0x0001
50 #define L2CAP_LE_PSM_FIXED_END   0x007f
51 #define L2CAP_LE_PSM_DYN_START   0x0080
52 #define L2CAP_LE_PSM_DYN_END     0x00ff
53 #define L2CAP_LE_PSM_IS_DYN(_psm) \
54 	(_psm >= L2CAP_LE_PSM_DYN_START && _psm <= L2CAP_LE_PSM_DYN_END)
55 
56 #define L2CAP_CONN_TIMEOUT	K_SECONDS(40)
57 #define L2CAP_DISC_TIMEOUT	K_SECONDS(2)
58 #define L2CAP_RTX_TIMEOUT	K_SECONDS(2)
59 
60 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
61 /* Dedicated pool for disconnect buffers so they are guaranteed to be send
62  * even in case of data congestion due to flooding.
63  */
64 NET_BUF_POOL_FIXED_DEFINE(disc_pool, 1,
65 			  BT_L2CAP_BUF_SIZE(
66 				sizeof(struct bt_l2cap_sig_hdr) +
67 				sizeof(struct bt_l2cap_disconn_req)),
68 			  8, NULL);
69 
70 #define l2cap_lookup_ident(conn, ident) __l2cap_lookup_ident(conn, ident, false)
71 #define l2cap_remove_ident(conn, ident) __l2cap_lookup_ident(conn, ident, true)
72 
73 struct l2cap_tx_meta_data {
74 	int sent;
75 	uint16_t cid;
76 	bt_conn_tx_cb_t cb;
77 	void *user_data;
78 };
79 
80 struct l2cap_tx_meta {
81 	struct l2cap_tx_meta_data *data;
82 };
83 
84 static struct l2cap_tx_meta_data l2cap_tx_meta_data_storage[CONFIG_BT_CONN_TX_MAX];
85 K_FIFO_DEFINE(free_l2cap_tx_meta_data);
86 
alloc_tx_meta_data(void)87 static struct l2cap_tx_meta_data *alloc_tx_meta_data(void)
88 {
89 	return k_fifo_get(&free_l2cap_tx_meta_data, K_NO_WAIT);
90 }
91 
free_tx_meta_data(struct l2cap_tx_meta_data * data)92 static void free_tx_meta_data(struct l2cap_tx_meta_data *data)
93 {
94 	(void)memset(data, 0, sizeof(*data));
95 	k_fifo_put(&free_l2cap_tx_meta_data, data);
96 }
97 
98 #define l2cap_tx_meta_data(buf) (((struct l2cap_tx_meta *)net_buf_user_data(buf))->data)
99 
100 static sys_slist_t servers;
101 
102 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
103 
104 /* L2CAP signalling channel specific context */
105 struct bt_l2cap {
106 	/* The channel this context is associated with */
107 	struct bt_l2cap_le_chan	chan;
108 };
109 
110 static const struct bt_l2cap_ecred_cb *ecred_cb;
111 static struct bt_l2cap bt_l2cap_pool[CONFIG_BT_MAX_CONN];
112 
bt_l2cap_register_ecred_cb(const struct bt_l2cap_ecred_cb * cb)113 void bt_l2cap_register_ecred_cb(const struct bt_l2cap_ecred_cb *cb)
114 {
115 	ecred_cb = cb;
116 }
117 
get_ident(void)118 static uint8_t get_ident(void)
119 {
120 	static uint8_t ident;
121 
122 	ident++;
123 	/* handle integer overflow (0 is not valid) */
124 	if (!ident) {
125 		ident++;
126 	}
127 
128 	return ident;
129 }
130 
131 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_chan_alloc_cid(struct bt_conn * conn,struct bt_l2cap_chan * chan)132 static struct bt_l2cap_le_chan *l2cap_chan_alloc_cid(struct bt_conn *conn,
133 						     struct bt_l2cap_chan *chan)
134 {
135 	struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
136 	uint16_t cid;
137 
138 	/*
139 	 * No action needed if there's already a CID allocated, e.g. in
140 	 * the case of a fixed channel.
141 	 */
142 	if (le_chan->rx.cid > 0) {
143 		return le_chan;
144 	}
145 
146 	for (cid = L2CAP_LE_CID_DYN_START; cid <= L2CAP_LE_CID_DYN_END; cid++) {
147 		if (!bt_l2cap_le_lookup_rx_cid(conn, cid)) {
148 			le_chan->rx.cid = cid;
149 			return le_chan;
150 		}
151 	}
152 
153 	return NULL;
154 }
155 
156 static struct bt_l2cap_le_chan *
__l2cap_lookup_ident(struct bt_conn * conn,uint16_t ident,bool remove)157 __l2cap_lookup_ident(struct bt_conn *conn, uint16_t ident, bool remove)
158 {
159 	struct bt_l2cap_chan *chan;
160 	sys_snode_t *prev = NULL;
161 
162 	SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
163 		if (BT_L2CAP_LE_CHAN(chan)->ident == ident) {
164 			if (remove) {
165 				sys_slist_remove(&conn->channels, prev,
166 						 &chan->node);
167 			}
168 			return BT_L2CAP_LE_CHAN(chan);
169 		}
170 
171 		prev = &chan->node;
172 	}
173 
174 	return NULL;
175 }
176 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
177 
bt_l2cap_chan_remove(struct bt_conn * conn,struct bt_l2cap_chan * ch)178 void bt_l2cap_chan_remove(struct bt_conn *conn, struct bt_l2cap_chan *ch)
179 {
180 	struct bt_l2cap_chan *chan;
181 	sys_snode_t *prev = NULL;
182 
183 	SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
184 		if (chan == ch) {
185 			sys_slist_remove(&conn->channels, prev, &chan->node);
186 			return;
187 		}
188 
189 		prev = &chan->node;
190 	}
191 }
192 
bt_l2cap_chan_state_str(bt_l2cap_chan_state_t state)193 const char *bt_l2cap_chan_state_str(bt_l2cap_chan_state_t state)
194 {
195 	switch (state) {
196 	case BT_L2CAP_DISCONNECTED:
197 		return "disconnected";
198 	case BT_L2CAP_CONNECTING:
199 		return "connecting";
200 	case BT_L2CAP_CONFIG:
201 		return "config";
202 	case BT_L2CAP_CONNECTED:
203 		return "connected";
204 	case BT_L2CAP_DISCONNECTING:
205 		return "disconnecting";
206 	default:
207 		return "unknown";
208 	}
209 }
210 
211 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
212 #if defined(CONFIG_BT_L2CAP_LOG_LEVEL_DBG)
bt_l2cap_chan_set_state_debug(struct bt_l2cap_chan * chan,bt_l2cap_chan_state_t state,const char * func,int line)213 void bt_l2cap_chan_set_state_debug(struct bt_l2cap_chan *chan,
214 				   bt_l2cap_chan_state_t state,
215 				   const char *func, int line)
216 {
217 	struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
218 
219 	LOG_DBG("chan %p psm 0x%04x %s -> %s", chan, le_chan->psm,
220 		bt_l2cap_chan_state_str(le_chan->state), bt_l2cap_chan_state_str(state));
221 
222 	/* check transitions validness */
223 	switch (state) {
224 	case BT_L2CAP_DISCONNECTED:
225 		/* regardless of old state always allows this state */
226 		break;
227 	case BT_L2CAP_CONNECTING:
228 		if (le_chan->state != BT_L2CAP_DISCONNECTED) {
229 			LOG_WRN("%s()%d: invalid transition", func, line);
230 		}
231 		break;
232 	case BT_L2CAP_CONFIG:
233 		if (le_chan->state != BT_L2CAP_CONNECTING) {
234 			LOG_WRN("%s()%d: invalid transition", func, line);
235 		}
236 		break;
237 	case BT_L2CAP_CONNECTED:
238 		if (le_chan->state != BT_L2CAP_CONFIG &&
239 		    le_chan->state != BT_L2CAP_CONNECTING) {
240 			LOG_WRN("%s()%d: invalid transition", func, line);
241 		}
242 		break;
243 	case BT_L2CAP_DISCONNECTING:
244 		if (le_chan->state != BT_L2CAP_CONFIG &&
245 		    le_chan->state != BT_L2CAP_CONNECTED) {
246 			LOG_WRN("%s()%d: invalid transition", func, line);
247 		}
248 		break;
249 	default:
250 		LOG_ERR("%s()%d: unknown (%u) state was set", func, line, state);
251 		return;
252 	}
253 
254 	le_chan->state = state;
255 }
256 #else
bt_l2cap_chan_set_state(struct bt_l2cap_chan * chan,bt_l2cap_chan_state_t state)257 void bt_l2cap_chan_set_state(struct bt_l2cap_chan *chan,
258 			     bt_l2cap_chan_state_t state)
259 {
260 	BT_L2CAP_LE_CHAN(chan)->state = state;
261 }
262 #endif /* CONFIG_BT_L2CAP_LOG_LEVEL_DBG */
263 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
264 
bt_l2cap_chan_del(struct bt_l2cap_chan * chan)265 void bt_l2cap_chan_del(struct bt_l2cap_chan *chan)
266 {
267 	const struct bt_l2cap_chan_ops *ops = chan->ops;
268 
269 	LOG_DBG("conn %p chan %p", chan->conn, chan);
270 
271 	if (!chan->conn) {
272 		goto destroy;
273 	}
274 
275 	if (ops->disconnected) {
276 		ops->disconnected(chan);
277 	}
278 
279 	chan->conn = NULL;
280 
281 destroy:
282 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
283 	/* Reset internal members of common channel */
284 	bt_l2cap_chan_set_state(chan, BT_L2CAP_DISCONNECTED);
285 	BT_L2CAP_LE_CHAN(chan)->psm = 0U;
286 #endif
287 	if (chan->destroy) {
288 		chan->destroy(chan);
289 	}
290 
291 	if (ops->released) {
292 		ops->released(chan);
293 	}
294 }
295 
296 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_rtx_timeout(struct k_work * work)297 static void l2cap_rtx_timeout(struct k_work *work)
298 {
299 	struct bt_l2cap_le_chan *chan = LE_CHAN_RTX(work);
300 	struct bt_conn *conn = chan->chan.conn;
301 
302 	LOG_ERR("chan %p timeout", chan);
303 
304 	bt_l2cap_chan_remove(conn, &chan->chan);
305 	bt_l2cap_chan_del(&chan->chan);
306 
307 	/* Remove other channels if pending on the same ident */
308 	while ((chan = l2cap_remove_ident(conn, chan->ident))) {
309 		bt_l2cap_chan_del(&chan->chan);
310 	}
311 }
312 
313 static void l2cap_chan_le_recv(struct bt_l2cap_le_chan *chan,
314 			       struct net_buf *buf);
315 
l2cap_rx_process(struct k_work * work)316 static void l2cap_rx_process(struct k_work *work)
317 {
318 	struct bt_l2cap_le_chan *ch = CHAN_RX(work);
319 	struct net_buf *buf;
320 
321 	while ((buf = net_buf_get(&ch->rx_queue, K_NO_WAIT))) {
322 		LOG_DBG("ch %p buf %p", ch, buf);
323 		l2cap_chan_le_recv(ch, buf);
324 		net_buf_unref(buf);
325 	}
326 }
327 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
328 
bt_l2cap_chan_add(struct bt_conn * conn,struct bt_l2cap_chan * chan,bt_l2cap_chan_destroy_t destroy)329 void bt_l2cap_chan_add(struct bt_conn *conn, struct bt_l2cap_chan *chan,
330 		       bt_l2cap_chan_destroy_t destroy)
331 {
332 	/* Attach channel to the connection */
333 	sys_slist_append(&conn->channels, &chan->node);
334 	chan->conn = conn;
335 	chan->destroy = destroy;
336 
337 	LOG_DBG("conn %p chan %p", conn, chan);
338 }
339 
l2cap_chan_add(struct bt_conn * conn,struct bt_l2cap_chan * chan,bt_l2cap_chan_destroy_t destroy)340 static bool l2cap_chan_add(struct bt_conn *conn, struct bt_l2cap_chan *chan,
341 			   bt_l2cap_chan_destroy_t destroy)
342 {
343 	struct bt_l2cap_le_chan *le_chan;
344 
345 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
346 	le_chan = l2cap_chan_alloc_cid(conn, chan);
347 #else
348 	le_chan = BT_L2CAP_LE_CHAN(chan);
349 #endif
350 
351 	if (!le_chan) {
352 		LOG_ERR("Unable to allocate L2CAP channel ID");
353 		return false;
354 	}
355 
356 	atomic_clear(chan->status);
357 
358 	bt_l2cap_chan_add(conn, chan, destroy);
359 
360 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
361 	/* All dynamic channels have the destroy handler which makes sure that
362 	 * the RTX work structure is properly released with a cancel sync.
363 	 * The fixed signal channel is only removed when disconnected and the
364 	 * disconnected handler is always called from the workqueue itself so
365 	 * canceling from there should always succeed.
366 	 */
367 	k_work_init_delayable(&le_chan->rtx_work, l2cap_rtx_timeout);
368 
369 	if (L2CAP_LE_CID_IS_DYN(le_chan->rx.cid)) {
370 		k_work_init(&le_chan->rx_work, l2cap_rx_process);
371 		k_fifo_init(&le_chan->rx_queue);
372 		bt_l2cap_chan_set_state(chan, BT_L2CAP_CONNECTING);
373 	}
374 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
375 
376 	return true;
377 }
378 
bt_l2cap_connected(struct bt_conn * conn)379 void bt_l2cap_connected(struct bt_conn *conn)
380 {
381 	struct bt_l2cap_chan *chan;
382 
383 	if (IS_ENABLED(CONFIG_BT_BREDR) &&
384 	    conn->type == BT_CONN_TYPE_BR) {
385 		bt_l2cap_br_connected(conn);
386 		return;
387 	}
388 
389 	STRUCT_SECTION_FOREACH(bt_l2cap_fixed_chan, fchan) {
390 		struct bt_l2cap_le_chan *le_chan;
391 
392 		if (fchan->accept(conn, &chan) < 0) {
393 			continue;
394 		}
395 
396 		le_chan = BT_L2CAP_LE_CHAN(chan);
397 
398 		/* Fill up remaining fixed channel context attached in
399 		 * fchan->accept()
400 		 */
401 		le_chan->rx.cid = fchan->cid;
402 		le_chan->tx.cid = fchan->cid;
403 
404 		if (!l2cap_chan_add(conn, chan, fchan->destroy)) {
405 			return;
406 		}
407 
408 		if (chan->ops->connected) {
409 			chan->ops->connected(chan);
410 		}
411 
412 		/* Always set output status to fixed channels */
413 		atomic_set_bit(chan->status, BT_L2CAP_STATUS_OUT);
414 
415 		if (chan->ops->status) {
416 			chan->ops->status(chan, chan->status);
417 		}
418 	}
419 }
420 
bt_l2cap_disconnected(struct bt_conn * conn)421 void bt_l2cap_disconnected(struct bt_conn *conn)
422 {
423 	struct bt_l2cap_chan *chan, *next;
424 
425 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&conn->channels, chan, next, node) {
426 		bt_l2cap_chan_del(chan);
427 	}
428 }
429 
l2cap_create_le_sig_pdu(struct net_buf * buf,uint8_t code,uint8_t ident,uint16_t len)430 static struct net_buf *l2cap_create_le_sig_pdu(struct net_buf *buf,
431 					       uint8_t code, uint8_t ident,
432 					       uint16_t len)
433 {
434 	struct bt_l2cap_sig_hdr *hdr;
435 	struct net_buf_pool *pool = NULL;
436 
437 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
438 	if (code == BT_L2CAP_DISCONN_REQ) {
439 		pool = &disc_pool;
440 	}
441 #endif
442 	/* Don't wait more than the minimum RTX timeout of 2 seconds */
443 	buf = bt_l2cap_create_pdu_timeout(pool, 0, L2CAP_RTX_TIMEOUT);
444 	if (!buf) {
445 		/* If it was not possible to allocate a buffer within the
446 		 * timeout return NULL.
447 		 */
448 		LOG_ERR("Unable to allocate buffer for op 0x%02x", code);
449 		return NULL;
450 	}
451 
452 	hdr = net_buf_add(buf, sizeof(*hdr));
453 	hdr->code = code;
454 	hdr->ident = ident;
455 	hdr->len = sys_cpu_to_le16(len);
456 
457 	return buf;
458 }
459 
460 /* Send the buffer and release it in case of failure.
461  * Any other cleanup in failure to send should be handled by the disconnected
462  * handler.
463  */
l2cap_send(struct bt_conn * conn,uint16_t cid,struct net_buf * buf)464 static inline void l2cap_send(struct bt_conn *conn, uint16_t cid,
465 			      struct net_buf *buf)
466 {
467 	if (bt_l2cap_send(conn, cid, buf)) {
468 		net_buf_unref(buf);
469 	}
470 }
471 
472 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_chan_send_req(struct bt_l2cap_chan * chan,struct net_buf * buf,k_timeout_t timeout)473 static void l2cap_chan_send_req(struct bt_l2cap_chan *chan,
474 				struct net_buf *buf, k_timeout_t timeout)
475 {
476 	if (bt_l2cap_send(chan->conn, BT_L2CAP_CID_LE_SIG, buf)) {
477 		net_buf_unref(buf);
478 		return;
479 	}
480 
481 	/* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part A] page 126:
482 	 *
483 	 * The value of this timer is implementation-dependent but the minimum
484 	 * initial value is 1 second and the maximum initial value is 60
485 	 * seconds. One RTX timer shall exist for each outstanding signaling
486 	 * request, including each Echo Request. The timer disappears on the
487 	 * final expiration, when the response is received, or the physical
488 	 * link is lost.
489 	 */
490 	k_work_reschedule(&(BT_L2CAP_LE_CHAN(chan)->rtx_work), timeout);
491 }
492 
l2cap_le_conn_req(struct bt_l2cap_le_chan * ch)493 static int l2cap_le_conn_req(struct bt_l2cap_le_chan *ch)
494 {
495 	struct net_buf *buf;
496 	struct bt_l2cap_le_conn_req *req;
497 
498 	ch->ident = get_ident();
499 
500 	buf = l2cap_create_le_sig_pdu(NULL, BT_L2CAP_LE_CONN_REQ,
501 				      ch->ident, sizeof(*req));
502 	if (!buf) {
503 		return -ENOMEM;
504 	}
505 
506 	req = net_buf_add(buf, sizeof(*req));
507 	req->psm = sys_cpu_to_le16(ch->psm);
508 	req->scid = sys_cpu_to_le16(ch->rx.cid);
509 	req->mtu = sys_cpu_to_le16(ch->rx.mtu);
510 	req->mps = sys_cpu_to_le16(ch->rx.mps);
511 	req->credits = sys_cpu_to_le16(ch->rx.credits);
512 
513 	l2cap_chan_send_req(&ch->chan, buf, L2CAP_CONN_TIMEOUT);
514 
515 	return 0;
516 }
517 
518 #if defined(CONFIG_BT_L2CAP_ECRED)
l2cap_ecred_conn_req(struct bt_l2cap_chan ** chan,int channels)519 static int l2cap_ecred_conn_req(struct bt_l2cap_chan **chan, int channels)
520 {
521 	struct net_buf *buf;
522 	struct bt_l2cap_ecred_conn_req *req;
523 	struct bt_l2cap_le_chan *ch;
524 	int i;
525 	uint8_t ident;
526 	uint16_t req_psm;
527 	uint16_t req_mtu;
528 
529 	if (!chan || !channels) {
530 		return -EINVAL;
531 	}
532 
533 	ident = get_ident();
534 
535 	buf = l2cap_create_le_sig_pdu(NULL, BT_L2CAP_ECRED_CONN_REQ, ident,
536 				      sizeof(*req) +
537 				      (channels * sizeof(uint16_t)));
538 
539 	req = net_buf_add(buf, sizeof(*req));
540 
541 	ch = BT_L2CAP_LE_CHAN(chan[0]);
542 
543 	/* Init common parameters */
544 	req->psm = sys_cpu_to_le16(ch->psm);
545 	req->mtu = sys_cpu_to_le16(ch->rx.mtu);
546 	req->mps = sys_cpu_to_le16(ch->rx.mps);
547 	req->credits = sys_cpu_to_le16(ch->rx.credits);
548 	req_psm = ch->psm;
549 	req_mtu = ch->tx.mtu;
550 
551 	for (i = 0; i < channels; i++) {
552 		ch = BT_L2CAP_LE_CHAN(chan[i]);
553 
554 		__ASSERT(ch->psm == req_psm,
555 			 "The PSM shall be the same for channels in the same request.");
556 		__ASSERT(ch->tx.mtu == req_mtu,
557 			 "The MTU shall be the same for channels in the same request.");
558 
559 		ch->ident = ident;
560 
561 		net_buf_add_le16(buf, ch->rx.cid);
562 	}
563 
564 	l2cap_chan_send_req(*chan, buf, L2CAP_CONN_TIMEOUT);
565 
566 	return 0;
567 }
568 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
569 
l2cap_le_encrypt_change(struct bt_l2cap_chan * chan,uint8_t status)570 static void l2cap_le_encrypt_change(struct bt_l2cap_chan *chan, uint8_t status)
571 {
572 	int err;
573 	struct bt_l2cap_le_chan *le = BT_L2CAP_LE_CHAN(chan);
574 
575 	/* Skip channels that are not pending waiting for encryption */
576 	if (!atomic_test_and_clear_bit(chan->status,
577 				       BT_L2CAP_STATUS_ENCRYPT_PENDING)) {
578 		return;
579 	}
580 
581 	if (status) {
582 		goto fail;
583 	}
584 
585 #if defined(CONFIG_BT_L2CAP_ECRED)
586 	if (le->ident) {
587 		struct bt_l2cap_chan *echan[L2CAP_ECRED_CHAN_MAX_PER_REQ];
588 		struct bt_l2cap_chan *ch;
589 		int i = 0;
590 
591 		SYS_SLIST_FOR_EACH_CONTAINER(&chan->conn->channels, ch, node) {
592 			if (le->ident == BT_L2CAP_LE_CHAN(ch)->ident) {
593 				__ASSERT(i < L2CAP_ECRED_CHAN_MAX_PER_REQ,
594 					 "There can only be L2CAP_ECRED_CHAN_MAX_PER_REQ channels "
595 					 "from the same request.");
596 				atomic_clear_bit(ch->status, BT_L2CAP_STATUS_ENCRYPT_PENDING);
597 				echan[i++] = ch;
598 			}
599 		}
600 
601 		/* Retry ecred connect */
602 		l2cap_ecred_conn_req(echan, i);
603 		return;
604 	}
605 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
606 
607 	/* Retry to connect */
608 	err = l2cap_le_conn_req(le);
609 	if (err) {
610 		goto fail;
611 	}
612 
613 	return;
614 fail:
615 	bt_l2cap_chan_remove(chan->conn, chan);
616 	bt_l2cap_chan_del(chan);
617 }
618 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
619 
bt_l2cap_security_changed(struct bt_conn * conn,uint8_t hci_status)620 void bt_l2cap_security_changed(struct bt_conn *conn, uint8_t hci_status)
621 {
622 	struct bt_l2cap_chan *chan, *next;
623 
624 	if (IS_ENABLED(CONFIG_BT_BREDR) &&
625 	    conn->type == BT_CONN_TYPE_BR) {
626 		l2cap_br_encrypt_change(conn, hci_status);
627 		return;
628 	}
629 
630 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&conn->channels, chan, next, node) {
631 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
632 		l2cap_le_encrypt_change(chan, hci_status);
633 #endif
634 
635 		if (chan->ops->encrypt_change) {
636 			chan->ops->encrypt_change(chan, hci_status);
637 		}
638 	}
639 }
640 
bt_l2cap_create_pdu_timeout(struct net_buf_pool * pool,size_t reserve,k_timeout_t timeout)641 struct net_buf *bt_l2cap_create_pdu_timeout(struct net_buf_pool *pool,
642 					    size_t reserve,
643 					    k_timeout_t timeout)
644 {
645 	return bt_conn_create_pdu_timeout(pool,
646 					  sizeof(struct bt_l2cap_hdr) + reserve,
647 					  timeout);
648 }
649 
bt_l2cap_send_cb(struct bt_conn * conn,uint16_t cid,struct net_buf * buf,bt_conn_tx_cb_t cb,void * user_data)650 int bt_l2cap_send_cb(struct bt_conn *conn, uint16_t cid, struct net_buf *buf,
651 		     bt_conn_tx_cb_t cb, void *user_data)
652 {
653 	struct bt_l2cap_hdr *hdr;
654 
655 	LOG_DBG("conn %p cid %u len %zu", conn, cid, net_buf_frags_len(buf));
656 
657 	hdr = net_buf_push(buf, sizeof(*hdr));
658 	hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
659 	hdr->cid = sys_cpu_to_le16(cid);
660 
661 	return bt_conn_send_cb(conn, buf, cb, user_data);
662 }
663 
l2cap_send_reject(struct bt_conn * conn,uint8_t ident,uint16_t reason,void * data,uint8_t data_len)664 static void l2cap_send_reject(struct bt_conn *conn, uint8_t ident,
665 			      uint16_t reason, void *data, uint8_t data_len)
666 {
667 	struct bt_l2cap_cmd_reject *rej;
668 	struct net_buf *buf;
669 
670 	buf = l2cap_create_le_sig_pdu(NULL, BT_L2CAP_CMD_REJECT, ident,
671 				      sizeof(*rej) + data_len);
672 	if (!buf) {
673 		return;
674 	}
675 
676 	rej = net_buf_add(buf, sizeof(*rej));
677 	rej->reason = sys_cpu_to_le16(reason);
678 
679 	if (data) {
680 		net_buf_add_mem(buf, data, data_len);
681 	}
682 
683 	l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf);
684 }
685 
le_conn_param_rsp(struct bt_l2cap * l2cap,struct net_buf * buf)686 static void le_conn_param_rsp(struct bt_l2cap *l2cap, struct net_buf *buf)
687 {
688 	struct bt_l2cap_conn_param_rsp *rsp = (void *)buf->data;
689 
690 	if (buf->len < sizeof(*rsp)) {
691 		LOG_ERR("Too small LE conn param rsp");
692 		return;
693 	}
694 
695 	LOG_DBG("LE conn param rsp result %u", sys_le16_to_cpu(rsp->result));
696 }
697 
le_conn_param_update_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)698 static void le_conn_param_update_req(struct bt_l2cap *l2cap, uint8_t ident,
699 				     struct net_buf *buf)
700 {
701 	struct bt_conn *conn = l2cap->chan.chan.conn;
702 	struct bt_le_conn_param param;
703 	struct bt_l2cap_conn_param_rsp *rsp;
704 	struct bt_l2cap_conn_param_req *req = (void *)buf->data;
705 	bool accepted;
706 
707 	if (buf->len < sizeof(*req)) {
708 		LOG_ERR("Too small LE conn update param req");
709 		return;
710 	}
711 
712 	if (conn->state != BT_CONN_CONNECTED) {
713 		LOG_WRN("Not connected");
714 		return;
715 	}
716 
717 	if (conn->role != BT_HCI_ROLE_CENTRAL) {
718 		l2cap_send_reject(conn, ident, BT_L2CAP_REJ_NOT_UNDERSTOOD,
719 				  NULL, 0);
720 		return;
721 	}
722 
723 	param.interval_min = sys_le16_to_cpu(req->min_interval);
724 	param.interval_max = sys_le16_to_cpu(req->max_interval);
725 	param.latency = sys_le16_to_cpu(req->latency);
726 	param.timeout = sys_le16_to_cpu(req->timeout);
727 
728 	LOG_DBG("min 0x%04x max 0x%04x latency: 0x%04x timeout: 0x%04x", param.interval_min,
729 		param.interval_max, param.latency, param.timeout);
730 
731 	buf = l2cap_create_le_sig_pdu(buf, BT_L2CAP_CONN_PARAM_RSP, ident,
732 				      sizeof(*rsp));
733 	if (!buf) {
734 		return;
735 	}
736 
737 	accepted = le_param_req(conn, &param);
738 
739 	rsp = net_buf_add(buf, sizeof(*rsp));
740 	if (accepted) {
741 		rsp->result = sys_cpu_to_le16(BT_L2CAP_CONN_PARAM_ACCEPTED);
742 	} else {
743 		rsp->result = sys_cpu_to_le16(BT_L2CAP_CONN_PARAM_REJECTED);
744 	}
745 
746 	l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf);
747 
748 	if (accepted) {
749 		bt_conn_le_conn_update(conn, &param);
750 	}
751 }
752 
bt_l2cap_le_lookup_tx_cid(struct bt_conn * conn,uint16_t cid)753 struct bt_l2cap_chan *bt_l2cap_le_lookup_tx_cid(struct bt_conn *conn,
754 						uint16_t cid)
755 {
756 	struct bt_l2cap_chan *chan;
757 
758 	SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
759 		if (BT_L2CAP_LE_CHAN(chan)->tx.cid == cid) {
760 			return chan;
761 		}
762 	}
763 
764 	return NULL;
765 }
766 
bt_l2cap_le_lookup_rx_cid(struct bt_conn * conn,uint16_t cid)767 struct bt_l2cap_chan *bt_l2cap_le_lookup_rx_cid(struct bt_conn *conn,
768 						uint16_t cid)
769 {
770 	struct bt_l2cap_chan *chan;
771 
772 	SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
773 		if (BT_L2CAP_LE_CHAN(chan)->rx.cid == cid) {
774 			return chan;
775 		}
776 	}
777 
778 	return NULL;
779 }
780 
781 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
bt_l2cap_server_lookup_psm(uint16_t psm)782 struct bt_l2cap_server *bt_l2cap_server_lookup_psm(uint16_t psm)
783 {
784 	struct bt_l2cap_server *server;
785 
786 	SYS_SLIST_FOR_EACH_CONTAINER(&servers, server, node) {
787 		if (server->psm == psm) {
788 			return server;
789 		}
790 	}
791 
792 	return NULL;
793 }
794 
bt_l2cap_server_register(struct bt_l2cap_server * server)795 int bt_l2cap_server_register(struct bt_l2cap_server *server)
796 {
797 	if (!server->accept) {
798 		return -EINVAL;
799 	}
800 
801 	if (server->psm) {
802 		if (server->psm < L2CAP_LE_PSM_FIXED_START ||
803 		    server->psm > L2CAP_LE_PSM_DYN_END) {
804 			return -EINVAL;
805 		}
806 
807 		/* Check if given PSM is already in use */
808 		if (bt_l2cap_server_lookup_psm(server->psm)) {
809 			LOG_DBG("PSM already registered");
810 			return -EADDRINUSE;
811 		}
812 	} else {
813 		uint16_t psm;
814 
815 		for (psm = L2CAP_LE_PSM_DYN_START;
816 		     psm <= L2CAP_LE_PSM_DYN_END; psm++) {
817 			if (!bt_l2cap_server_lookup_psm(psm)) {
818 				break;
819 			}
820 		}
821 
822 		if (psm > L2CAP_LE_PSM_DYN_END) {
823 			LOG_WRN("No free dynamic PSMs available");
824 			return -EADDRNOTAVAIL;
825 		}
826 
827 		LOG_DBG("Allocated PSM 0x%04x for new server", psm);
828 		server->psm = psm;
829 	}
830 
831 	if (server->sec_level > BT_SECURITY_L4) {
832 		return -EINVAL;
833 	} else if (server->sec_level < BT_SECURITY_L1) {
834 		/* Level 0 is only applicable for BR/EDR */
835 		server->sec_level = BT_SECURITY_L1;
836 	}
837 
838 	LOG_DBG("PSM 0x%04x", server->psm);
839 
840 	sys_slist_append(&servers, &server->node);
841 
842 	return 0;
843 }
844 
845 #if defined(CONFIG_BT_L2CAP_SEG_RECV)
l2cap_chan_seg_recv_rx_init(struct bt_l2cap_le_chan * chan)846 static void l2cap_chan_seg_recv_rx_init(struct bt_l2cap_le_chan *chan)
847 {
848 	if (chan->rx.mps > BT_L2CAP_RX_MTU) {
849 		LOG_ERR("Limiting RX MPS by stack buffer size.");
850 		chan->rx.mps = BT_L2CAP_RX_MTU;
851 	}
852 
853 	chan->_sdu_len = 0;
854 	chan->_sdu_len_done = 0;
855 }
856 #endif /* CONFIG_BT_L2CAP_SEG_RECV */
857 
l2cap_chan_rx_init(struct bt_l2cap_le_chan * chan)858 static void l2cap_chan_rx_init(struct bt_l2cap_le_chan *chan)
859 {
860 	LOG_DBG("chan %p", chan);
861 
862 	/* Redirect to experimental API. */
863 	IF_ENABLED(CONFIG_BT_L2CAP_SEG_RECV, ({
864 		if (chan->chan.ops->seg_recv) {
865 			l2cap_chan_seg_recv_rx_init(chan);
866 			return;
867 		}
868 	}))
869 
870 	/* Use existing MTU if defined */
871 	if (!chan->rx.mtu) {
872 		/* If application has not provide the incoming L2CAP SDU MTU use
873 		 * an MTU that does not require segmentation.
874 		 */
875 		chan->rx.mtu = BT_L2CAP_SDU_RX_MTU;
876 	}
877 
878 	/* MPS shall not be bigger than MTU + BT_L2CAP_SDU_HDR_SIZE as the
879 	 * remaining bytes cannot be used.
880 	 */
881 	chan->rx.mps = MIN(chan->rx.mtu + BT_L2CAP_SDU_HDR_SIZE,
882 			   BT_L2CAP_RX_MTU);
883 
884 	/* Truncate MTU if channel have disabled segmentation but still have
885 	 * set an MTU which requires it.
886 	 */
887 	if (!chan->chan.ops->alloc_buf &&
888 	    (chan->rx.mps < chan->rx.mtu + BT_L2CAP_SDU_HDR_SIZE)) {
889 		LOG_WRN("Segmentation disabled but MTU > MPS, truncating MTU");
890 		chan->rx.mtu = chan->rx.mps - BT_L2CAP_SDU_HDR_SIZE;
891 	}
892 
893 	atomic_set(&chan->rx.credits, 1);
894 }
895 
l2cap_chan_le_get_tx_buf(struct bt_l2cap_le_chan * ch)896 static struct net_buf *l2cap_chan_le_get_tx_buf(struct bt_l2cap_le_chan *ch)
897 {
898 	struct net_buf *buf;
899 
900 	/* Return current buffer */
901 	if (ch->tx_buf) {
902 		buf = ch->tx_buf;
903 		ch->tx_buf = NULL;
904 		return buf;
905 	}
906 
907 	return net_buf_get(&ch->tx_queue, K_NO_WAIT);
908 }
909 
910 static int l2cap_chan_le_send_sdu(struct bt_l2cap_le_chan *ch,
911 				  struct net_buf **buf, uint16_t sent);
912 
l2cap_chan_tx_process(struct k_work * work)913 static void l2cap_chan_tx_process(struct k_work *work)
914 {
915 	struct bt_l2cap_le_chan *ch;
916 	struct net_buf *buf;
917 
918 	ch = CONTAINER_OF(k_work_delayable_from_work(work), struct bt_l2cap_le_chan, tx_work);
919 
920 	/* Resume tx in case there are buffers in the queue */
921 	while ((buf = l2cap_chan_le_get_tx_buf(ch))) {
922 		int sent = l2cap_tx_meta_data(buf)->sent;
923 
924 		LOG_DBG("buf %p sent %u", buf, sent);
925 
926 		sent = l2cap_chan_le_send_sdu(ch, &buf, sent);
927 		if (sent < 0) {
928 			if (sent == -EAGAIN) {
929 				ch->tx_buf = buf;
930 				/* If we don't reschedule, and the app doesn't nudge l2cap (e.g. by
931 				 * sending another SDU), the channel will be stuck in limbo. To
932 				 * prevent this, we reschedule with a configurable delay.
933 				 */
934 				k_work_schedule(&ch->tx_work, K_MSEC(CONFIG_BT_L2CAP_RESCHED_MS));
935 			} else {
936 				net_buf_unref(buf);
937 			}
938 			break;
939 		}
940 	}
941 }
942 
l2cap_chan_tx_init(struct bt_l2cap_le_chan * chan)943 static void l2cap_chan_tx_init(struct bt_l2cap_le_chan *chan)
944 {
945 	LOG_DBG("chan %p", chan);
946 
947 	(void)memset(&chan->tx, 0, sizeof(chan->tx));
948 	atomic_set(&chan->tx.credits, 0);
949 	k_fifo_init(&chan->tx_queue);
950 	k_work_init_delayable(&chan->tx_work, l2cap_chan_tx_process);
951 }
952 
l2cap_chan_tx_give_credits(struct bt_l2cap_le_chan * chan,uint16_t credits)953 static void l2cap_chan_tx_give_credits(struct bt_l2cap_le_chan *chan,
954 				       uint16_t credits)
955 {
956 	LOG_DBG("chan %p credits %u", chan, credits);
957 
958 	atomic_add(&chan->tx.credits, credits);
959 
960 	if (!atomic_test_and_set_bit(chan->chan.status, BT_L2CAP_STATUS_OUT) &&
961 	    chan->chan.ops->status) {
962 		chan->chan.ops->status(&chan->chan, chan->chan.status);
963 	}
964 }
965 
l2cap_chan_destroy(struct bt_l2cap_chan * chan)966 static void l2cap_chan_destroy(struct bt_l2cap_chan *chan)
967 {
968 	struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
969 	struct net_buf *buf;
970 
971 	LOG_DBG("chan %p cid 0x%04x", le_chan, le_chan->rx.cid);
972 
973 	/* Cancel ongoing work. Since the channel can be re-used after this
974 	 * we need to sync to make sure that the kernel does not have it
975 	 * in its queue anymore.
976 	 *
977 	 * In the case where we are in the context of executing the rtx_work
978 	 * item, we don't sync as it will deadlock the workqueue.
979 	 */
980 	if (k_current_get() != &le_chan->rtx_work.queue->thread) {
981 		k_work_cancel_delayable_sync(&le_chan->rtx_work, &le_chan->rtx_sync);
982 	} else {
983 		k_work_cancel_delayable(&le_chan->rtx_work);
984 	}
985 
986 	if (le_chan->tx_buf) {
987 		net_buf_unref(le_chan->tx_buf);
988 		le_chan->tx_buf = NULL;
989 	}
990 
991 	/* Remove buffers on the TX queue */
992 	while ((buf = net_buf_get(&le_chan->tx_queue, K_NO_WAIT))) {
993 		net_buf_unref(buf);
994 	}
995 
996 	/* Remove buffers on the RX queue */
997 	while ((buf = net_buf_get(&le_chan->rx_queue, K_NO_WAIT))) {
998 		net_buf_unref(buf);
999 	}
1000 
1001 	/* Destroy segmented SDU if it exists */
1002 	if (le_chan->_sdu) {
1003 		net_buf_unref(le_chan->_sdu);
1004 		le_chan->_sdu = NULL;
1005 		le_chan->_sdu_len = 0U;
1006 	}
1007 }
1008 
le_err_to_result(int err)1009 static uint16_t le_err_to_result(int err)
1010 {
1011 	switch (err) {
1012 	case -ENOMEM:
1013 		return BT_L2CAP_LE_ERR_NO_RESOURCES;
1014 	case -EACCES:
1015 		return BT_L2CAP_LE_ERR_AUTHORIZATION;
1016 	case -EPERM:
1017 		return BT_L2CAP_LE_ERR_KEY_SIZE;
1018 	case -ENOTSUP:
1019 		/* This handle the cases where a fixed channel is registered but
1020 		 * for some reason (e.g. controller not suporting a feature)
1021 		 * cannot be used.
1022 		 */
1023 		return BT_L2CAP_LE_ERR_PSM_NOT_SUPP;
1024 	default:
1025 		return BT_L2CAP_LE_ERR_UNACCEPT_PARAMS;
1026 	}
1027 }
1028 
l2cap_chan_accept(struct bt_conn * conn,struct bt_l2cap_server * server,uint16_t scid,uint16_t mtu,uint16_t mps,uint16_t credits,struct bt_l2cap_chan ** chan)1029 static uint16_t l2cap_chan_accept(struct bt_conn *conn,
1030 			       struct bt_l2cap_server *server, uint16_t scid,
1031 			       uint16_t mtu, uint16_t mps, uint16_t credits,
1032 			       struct bt_l2cap_chan **chan)
1033 {
1034 	struct bt_l2cap_le_chan *le_chan;
1035 	int err;
1036 
1037 	LOG_DBG("conn %p scid 0x%04x chan %p", conn, scid, chan);
1038 
1039 	if (!L2CAP_LE_CID_IS_DYN(scid)) {
1040 		return BT_L2CAP_LE_ERR_INVALID_SCID;
1041 	}
1042 
1043 	*chan = bt_l2cap_le_lookup_tx_cid(conn, scid);
1044 	if (*chan) {
1045 		return BT_L2CAP_LE_ERR_SCID_IN_USE;
1046 	}
1047 
1048 	/* Request server to accept the new connection and allocate the
1049 	 * channel.
1050 	 */
1051 	err = server->accept(conn, server, chan);
1052 	if (err < 0) {
1053 		return le_err_to_result(err);
1054 	}
1055 
1056 #if defined(CONFIG_BT_L2CAP_SEG_RECV)
1057 	if (!(*chan)->ops->recv == !(*chan)->ops->seg_recv) {
1058 		LOG_ERR("Exactly one of 'recv' or 'seg_recv' must be set");
1059 		return BT_L2CAP_LE_ERR_UNACCEPT_PARAMS;
1060 	}
1061 #else
1062 	if (!(*chan)->ops->recv) {
1063 		LOG_ERR("Mandatory callback 'recv' missing");
1064 		return BT_L2CAP_LE_ERR_UNACCEPT_PARAMS;
1065 	}
1066 #endif
1067 
1068 	le_chan = BT_L2CAP_LE_CHAN(*chan);
1069 
1070 	le_chan->required_sec_level = server->sec_level;
1071 
1072 	if (!l2cap_chan_add(conn, *chan, l2cap_chan_destroy)) {
1073 		return BT_L2CAP_LE_ERR_NO_RESOURCES;
1074 	}
1075 
1076 	/* Init TX parameters */
1077 	l2cap_chan_tx_init(le_chan);
1078 	le_chan->tx.cid = scid;
1079 	le_chan->tx.mps = mps;
1080 	le_chan->tx.mtu = mtu;
1081 	l2cap_chan_tx_give_credits(le_chan, credits);
1082 
1083 	/* Init RX parameters */
1084 	l2cap_chan_rx_init(le_chan);
1085 
1086 	/* Set channel PSM */
1087 	le_chan->psm = server->psm;
1088 
1089 	/* Update state */
1090 	bt_l2cap_chan_set_state(*chan, BT_L2CAP_CONNECTED);
1091 
1092 	return BT_L2CAP_LE_SUCCESS;
1093 }
1094 
l2cap_check_security(struct bt_conn * conn,struct bt_l2cap_server * server)1095 static uint16_t l2cap_check_security(struct bt_conn *conn,
1096 				 struct bt_l2cap_server *server)
1097 {
1098 	if (IS_ENABLED(CONFIG_BT_CONN_DISABLE_SECURITY)) {
1099 		return BT_L2CAP_LE_SUCCESS;
1100 	}
1101 
1102 	if (conn->sec_level >= server->sec_level) {
1103 		return BT_L2CAP_LE_SUCCESS;
1104 	}
1105 
1106 	if (conn->sec_level > BT_SECURITY_L1) {
1107 		return BT_L2CAP_LE_ERR_AUTHENTICATION;
1108 	}
1109 
1110 	/* If an LTK or an STK is available and encryption is required
1111 	 * (LE security mode 1) but encryption is not enabled, the
1112 	 * service request shall be rejected with the error code
1113 	 * "Insufficient Encryption".
1114 	 */
1115 	if (bt_conn_ltk_present(conn)) {
1116 		return BT_L2CAP_LE_ERR_ENCRYPTION;
1117 	}
1118 
1119 	return BT_L2CAP_LE_ERR_AUTHENTICATION;
1120 }
1121 
le_conn_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1122 static void le_conn_req(struct bt_l2cap *l2cap, uint8_t ident,
1123 			struct net_buf *buf)
1124 {
1125 	struct bt_conn *conn = l2cap->chan.chan.conn;
1126 	struct bt_l2cap_chan *chan;
1127 	struct bt_l2cap_le_chan *le_chan;
1128 	struct bt_l2cap_server *server;
1129 	struct bt_l2cap_le_conn_req *req = (void *)buf->data;
1130 	struct bt_l2cap_le_conn_rsp *rsp;
1131 	uint16_t psm, scid, mtu, mps, credits;
1132 	uint16_t result;
1133 
1134 	if (buf->len < sizeof(*req)) {
1135 		LOG_ERR("Too small LE conn req packet size");
1136 		return;
1137 	}
1138 
1139 	psm = sys_le16_to_cpu(req->psm);
1140 	scid = sys_le16_to_cpu(req->scid);
1141 	mtu = sys_le16_to_cpu(req->mtu);
1142 	mps = sys_le16_to_cpu(req->mps);
1143 	credits = sys_le16_to_cpu(req->credits);
1144 
1145 	LOG_DBG("psm 0x%02x scid 0x%04x mtu %u mps %u credits %u", psm, scid, mtu, mps, credits);
1146 
1147 	if (mtu < L2CAP_LE_MIN_MTU || mps < L2CAP_LE_MIN_MTU) {
1148 		LOG_ERR("Invalid LE-Conn Req params: mtu %u mps %u", mtu, mps);
1149 		return;
1150 	}
1151 
1152 	buf = l2cap_create_le_sig_pdu(buf, BT_L2CAP_LE_CONN_RSP, ident,
1153 				      sizeof(*rsp));
1154 	if (!buf) {
1155 		return;
1156 	}
1157 
1158 	rsp = net_buf_add(buf, sizeof(*rsp));
1159 	(void)memset(rsp, 0, sizeof(*rsp));
1160 
1161 	/* Check if there is a server registered */
1162 	server = bt_l2cap_server_lookup_psm(psm);
1163 	if (!server) {
1164 		result = BT_L2CAP_LE_ERR_PSM_NOT_SUPP;
1165 		goto rsp;
1166 	}
1167 
1168 	/* Check if connection has minimum required security level */
1169 	result = l2cap_check_security(conn, server);
1170 	if (result != BT_L2CAP_LE_SUCCESS) {
1171 		goto rsp;
1172 	}
1173 
1174 	result = l2cap_chan_accept(conn, server, scid, mtu, mps, credits,
1175 				   &chan);
1176 	if (result != BT_L2CAP_LE_SUCCESS) {
1177 		goto rsp;
1178 	}
1179 
1180 	le_chan = BT_L2CAP_LE_CHAN(chan);
1181 
1182 	/* Prepare response protocol data */
1183 	rsp->dcid = sys_cpu_to_le16(le_chan->rx.cid);
1184 	rsp->mps = sys_cpu_to_le16(le_chan->rx.mps);
1185 	rsp->mtu = sys_cpu_to_le16(le_chan->rx.mtu);
1186 	rsp->credits = sys_cpu_to_le16(le_chan->rx.credits);
1187 
1188 	result = BT_L2CAP_LE_SUCCESS;
1189 
1190 rsp:
1191 	rsp->result = sys_cpu_to_le16(result);
1192 
1193 	if (bt_l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf)) {
1194 		net_buf_unref(buf);
1195 		return;
1196 	}
1197 
1198 	/* Raise connected callback on success */
1199 	if ((result == BT_L2CAP_LE_SUCCESS) && (chan->ops->connected != NULL)) {
1200 		chan->ops->connected(chan);
1201 	}
1202 }
1203 
1204 #if defined(CONFIG_BT_L2CAP_ECRED)
le_ecred_conn_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1205 static void le_ecred_conn_req(struct bt_l2cap *l2cap, uint8_t ident,
1206 			      struct net_buf *buf)
1207 {
1208 	struct bt_conn *conn = l2cap->chan.chan.conn;
1209 	struct bt_l2cap_chan *chan[L2CAP_ECRED_CHAN_MAX_PER_REQ];
1210 	struct bt_l2cap_le_chan *ch = NULL;
1211 	struct bt_l2cap_server *server;
1212 	struct bt_l2cap_ecred_conn_req *req;
1213 	struct bt_l2cap_ecred_conn_rsp *rsp;
1214 	uint16_t mtu, mps, credits, result = BT_L2CAP_LE_SUCCESS;
1215 	uint16_t psm = 0x0000;
1216 	uint16_t scid, dcid[L2CAP_ECRED_CHAN_MAX_PER_REQ];
1217 	int i = 0;
1218 	uint8_t req_cid_count;
1219 	bool rsp_queued = false;
1220 
1221 	/* set dcid to zeros here, in case of all connections refused error */
1222 	memset(dcid, 0, sizeof(dcid));
1223 	if (buf->len < sizeof(*req)) {
1224 		LOG_ERR("Too small LE conn req packet size");
1225 		result = BT_L2CAP_LE_ERR_INVALID_PARAMS;
1226 		req_cid_count = 0;
1227 		goto response;
1228 	}
1229 
1230 	req = net_buf_pull_mem(buf, sizeof(*req));
1231 	req_cid_count = buf->len / sizeof(scid);
1232 
1233 	if (buf->len > sizeof(dcid)) {
1234 		LOG_ERR("Too large LE conn req packet size");
1235 		req_cid_count = L2CAP_ECRED_CHAN_MAX_PER_REQ;
1236 		result = BT_L2CAP_LE_ERR_INVALID_PARAMS;
1237 		goto response;
1238 	}
1239 
1240 	psm = sys_le16_to_cpu(req->psm);
1241 	mtu = sys_le16_to_cpu(req->mtu);
1242 	mps = sys_le16_to_cpu(req->mps);
1243 	credits = sys_le16_to_cpu(req->credits);
1244 
1245 	LOG_DBG("psm 0x%02x mtu %u mps %u credits %u", psm, mtu, mps, credits);
1246 
1247 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MTU) {
1248 		LOG_ERR("Invalid ecred conn req params. mtu %u mps %u", mtu, mps);
1249 		result = BT_L2CAP_LE_ERR_INVALID_PARAMS;
1250 		goto response;
1251 	}
1252 
1253 	/* Check if there is a server registered */
1254 	server = bt_l2cap_server_lookup_psm(psm);
1255 	if (!server) {
1256 		result = BT_L2CAP_LE_ERR_PSM_NOT_SUPP;
1257 		goto response;
1258 	}
1259 
1260 	/* Check if connection has minimum required security level */
1261 	result = l2cap_check_security(conn, server);
1262 	if (result != BT_L2CAP_LE_SUCCESS) {
1263 		goto response;
1264 	}
1265 
1266 	while (buf->len >= sizeof(scid)) {
1267 		uint16_t rc;
1268 		scid = net_buf_pull_le16(buf);
1269 
1270 		rc = l2cap_chan_accept(conn, server, scid, mtu, mps,
1271 				credits, &chan[i]);
1272 		if (rc != BT_L2CAP_LE_SUCCESS) {
1273 			result = rc;
1274 		}
1275 		switch (rc) {
1276 		case BT_L2CAP_LE_SUCCESS:
1277 			ch = BT_L2CAP_LE_CHAN(chan[i]);
1278 			dcid[i++] = sys_cpu_to_le16(ch->rx.cid);
1279 			continue;
1280 		/* Some connections refused – invalid Source CID */
1281 		/* Some connections refused – Source CID already allocated */
1282 		/* Some connections refused – not enough resources
1283 		 * available.
1284 		 */
1285 		default:
1286 			/* If a Destination CID is 0x0000, the channel was not
1287 			 * established.
1288 			 */
1289 			dcid[i++] = 0x0000;
1290 			continue;
1291 		}
1292 	}
1293 
1294 response:
1295 	buf = l2cap_create_le_sig_pdu(buf, BT_L2CAP_ECRED_CONN_RSP, ident,
1296 				      sizeof(*rsp) +
1297 				      (sizeof(scid) * req_cid_count));
1298 	if (!buf) {
1299 		goto callback;
1300 	}
1301 
1302 	rsp = net_buf_add(buf, sizeof(*rsp));
1303 	(void)memset(rsp, 0, sizeof(*rsp));
1304 	if (ch) {
1305 		rsp->mps = sys_cpu_to_le16(ch->rx.mps);
1306 		rsp->mtu = sys_cpu_to_le16(ch->rx.mtu);
1307 		rsp->credits = sys_cpu_to_le16(ch->rx.credits);
1308 	}
1309 	rsp->result = sys_cpu_to_le16(result);
1310 
1311 	net_buf_add_mem(buf, dcid, sizeof(scid) * req_cid_count);
1312 
1313 	if (bt_l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf)) {
1314 		net_buf_unref(buf);
1315 		goto callback;
1316 	}
1317 
1318 	rsp_queued = true;
1319 
1320 callback:
1321 	if (ecred_cb && ecred_cb->ecred_conn_req) {
1322 		ecred_cb->ecred_conn_req(conn, result, psm);
1323 	}
1324 	if (rsp_queued) {
1325 		for (i = 0; i < req_cid_count; i++) {
1326 			/* Raise connected callback for established channels */
1327 			if ((dcid[i] != 0x00) && (chan[i]->ops->connected != NULL)) {
1328 				chan[i]->ops->connected(chan[i]);
1329 			}
1330 		}
1331 	}
1332 }
1333 
le_ecred_reconf_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1334 static void le_ecred_reconf_req(struct bt_l2cap *l2cap, uint8_t ident,
1335 				struct net_buf *buf)
1336 {
1337 	struct bt_conn *conn = l2cap->chan.chan.conn;
1338 	struct bt_l2cap_chan *chans[L2CAP_ECRED_CHAN_MAX_PER_REQ];
1339 	struct bt_l2cap_ecred_reconf_req *req;
1340 	struct bt_l2cap_ecred_reconf_rsp *rsp;
1341 	uint16_t mtu, mps;
1342 	uint16_t scid, result = BT_L2CAP_RECONF_SUCCESS;
1343 	int chan_count = 0;
1344 	bool mps_reduced = false;
1345 
1346 	if (buf->len < sizeof(*req)) {
1347 		LOG_ERR("Too small ecred reconf req packet size");
1348 		return;
1349 	}
1350 
1351 	req = net_buf_pull_mem(buf, sizeof(*req));
1352 
1353 	mtu = sys_le16_to_cpu(req->mtu);
1354 	mps = sys_le16_to_cpu(req->mps);
1355 
1356 	if (mps < L2CAP_ECRED_MIN_MTU) {
1357 		result = BT_L2CAP_RECONF_OTHER_UNACCEPT;
1358 		goto response;
1359 	}
1360 
1361 	if (mtu < L2CAP_ECRED_MIN_MTU) {
1362 		result = BT_L2CAP_RECONF_INVALID_MTU;
1363 		goto response;
1364 	}
1365 
1366 	/* The specification only allows up to 5 CIDs in this packet */
1367 	if (buf->len > (L2CAP_ECRED_CHAN_MAX_PER_REQ * sizeof(scid))) {
1368 		result = BT_L2CAP_RECONF_OTHER_UNACCEPT;
1369 		goto response;
1370 	}
1371 
1372 	while (buf->len >= sizeof(scid)) {
1373 		struct bt_l2cap_chan *chan;
1374 		scid = net_buf_pull_le16(buf);
1375 		chan = bt_l2cap_le_lookup_tx_cid(conn, scid);
1376 		if (!chan) {
1377 			result = BT_L2CAP_RECONF_INVALID_CID;
1378 			goto response;
1379 		}
1380 
1381 		if (BT_L2CAP_LE_CHAN(chan)->tx.mtu > mtu) {
1382 			LOG_ERR("chan %p decreased MTU %u -> %u", chan,
1383 				BT_L2CAP_LE_CHAN(chan)->tx.mtu, mtu);
1384 			result = BT_L2CAP_RECONF_INVALID_MTU;
1385 			goto response;
1386 		}
1387 
1388 		if (BT_L2CAP_LE_CHAN(chan)->tx.mps > mps) {
1389 			mps_reduced = true;
1390 		}
1391 
1392 		chans[chan_count] = chan;
1393 		chan_count++;
1394 	}
1395 
1396 	/* As per BT Core Spec V5.2 Vol. 3, Part A, section 7.11
1397 	 * The request (...) shall not decrease the MPS of a channel
1398 	 * if more than one channel is specified.
1399 	 */
1400 	if (mps_reduced && chan_count > 1) {
1401 		result = BT_L2CAP_RECONF_INVALID_MPS;
1402 		goto response;
1403 	}
1404 
1405 	for (int i = 0; i < chan_count; i++) {
1406 		BT_L2CAP_LE_CHAN(chans[i])->tx.mtu = mtu;
1407 		BT_L2CAP_LE_CHAN(chans[i])->tx.mps = mps;
1408 
1409 		if (chans[i]->ops->reconfigured) {
1410 			chans[i]->ops->reconfigured(chans[i]);
1411 		}
1412 	}
1413 
1414 	LOG_DBG("mtu %u mps %u", mtu, mps);
1415 
1416 response:
1417 	buf = l2cap_create_le_sig_pdu(buf, BT_L2CAP_ECRED_RECONF_RSP, ident,
1418 				      sizeof(*rsp));
1419 
1420 	rsp = net_buf_add(buf, sizeof(*rsp));
1421 	rsp->result = sys_cpu_to_le16(result);
1422 
1423 	l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf);
1424 }
1425 
le_ecred_reconf_rsp(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1426 static void le_ecred_reconf_rsp(struct bt_l2cap *l2cap, uint8_t ident,
1427 				struct net_buf *buf)
1428 {
1429 	struct bt_conn *conn = l2cap->chan.chan.conn;
1430 	struct bt_l2cap_ecred_reconf_rsp *rsp;
1431 	struct bt_l2cap_le_chan *ch;
1432 	uint16_t result;
1433 
1434 	if (buf->len < sizeof(*rsp)) {
1435 		LOG_ERR("Too small ecred reconf rsp packet size");
1436 		return;
1437 	}
1438 
1439 	rsp = net_buf_pull_mem(buf, sizeof(*rsp));
1440 	result = sys_le16_to_cpu(rsp->result);
1441 
1442 	while ((ch = l2cap_lookup_ident(conn, ident))) {
1443 		/* Stop timer started on REQ send. The timer is only set on one
1444 		 * of the channels, but we don't want to make assumptions on
1445 		 * which one it is.
1446 		 */
1447 		k_work_cancel_delayable(&ch->rtx_work);
1448 
1449 		if (result == BT_L2CAP_LE_SUCCESS) {
1450 			ch->rx.mtu = ch->pending_rx_mtu;
1451 		}
1452 
1453 		ch->pending_rx_mtu = 0;
1454 		ch->ident = 0U;
1455 
1456 		if (ch->chan.ops->reconfigured) {
1457 			ch->chan.ops->reconfigured(&ch->chan);
1458 		}
1459 	}
1460 }
1461 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
1462 
l2cap_remove_rx_cid(struct bt_conn * conn,uint16_t cid)1463 static struct bt_l2cap_le_chan *l2cap_remove_rx_cid(struct bt_conn *conn,
1464 						    uint16_t cid)
1465 {
1466 	struct bt_l2cap_chan *chan;
1467 	sys_snode_t *prev = NULL;
1468 
1469 	/* Protect fixed channels against accidental removal */
1470 	if (!L2CAP_LE_CID_IS_DYN(cid)) {
1471 		return NULL;
1472 	}
1473 
1474 	SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
1475 		if (BT_L2CAP_LE_CHAN(chan)->rx.cid == cid) {
1476 			sys_slist_remove(&conn->channels, prev, &chan->node);
1477 			return BT_L2CAP_LE_CHAN(chan);
1478 		}
1479 
1480 		prev = &chan->node;
1481 	}
1482 
1483 	return NULL;
1484 }
1485 
le_disconn_req(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1486 static void le_disconn_req(struct bt_l2cap *l2cap, uint8_t ident,
1487 			   struct net_buf *buf)
1488 {
1489 	struct bt_conn *conn = l2cap->chan.chan.conn;
1490 	struct bt_l2cap_le_chan *chan;
1491 	struct bt_l2cap_disconn_req *req = (void *)buf->data;
1492 	struct bt_l2cap_disconn_rsp *rsp;
1493 	uint16_t dcid;
1494 
1495 	if (buf->len < sizeof(*req)) {
1496 		LOG_ERR("Too small LE conn req packet size");
1497 		return;
1498 	}
1499 
1500 	dcid = sys_le16_to_cpu(req->dcid);
1501 
1502 	LOG_DBG("dcid 0x%04x scid 0x%04x", dcid, sys_le16_to_cpu(req->scid));
1503 
1504 	chan = l2cap_remove_rx_cid(conn, dcid);
1505 	if (!chan) {
1506 		struct bt_l2cap_cmd_reject_cid_data data;
1507 
1508 		data.scid = req->scid;
1509 		data.dcid = req->dcid;
1510 
1511 		l2cap_send_reject(conn, ident, BT_L2CAP_REJ_INVALID_CID, &data,
1512 				  sizeof(data));
1513 		return;
1514 	}
1515 
1516 	buf = l2cap_create_le_sig_pdu(buf, BT_L2CAP_DISCONN_RSP, ident,
1517 				      sizeof(*rsp));
1518 	if (!buf) {
1519 		return;
1520 	}
1521 
1522 	rsp = net_buf_add(buf, sizeof(*rsp));
1523 	rsp->dcid = sys_cpu_to_le16(chan->rx.cid);
1524 	rsp->scid = sys_cpu_to_le16(chan->tx.cid);
1525 
1526 	bt_l2cap_chan_del(&chan->chan);
1527 
1528 	l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf);
1529 }
1530 
l2cap_change_security(struct bt_l2cap_le_chan * chan,uint16_t err)1531 static int l2cap_change_security(struct bt_l2cap_le_chan *chan, uint16_t err)
1532 {
1533 	struct bt_conn *conn = chan->chan.conn;
1534 	bt_security_t sec;
1535 	int ret;
1536 
1537 	if (atomic_test_bit(chan->chan.status,
1538 			    BT_L2CAP_STATUS_ENCRYPT_PENDING)) {
1539 		return -EINPROGRESS;
1540 	}
1541 
1542 	switch (err) {
1543 	case BT_L2CAP_LE_ERR_ENCRYPTION:
1544 		if (conn->sec_level >= BT_SECURITY_L2) {
1545 			return -EALREADY;
1546 		}
1547 
1548 		sec = BT_SECURITY_L2;
1549 		break;
1550 	case BT_L2CAP_LE_ERR_AUTHENTICATION:
1551 		if (conn->sec_level < BT_SECURITY_L2) {
1552 			sec = BT_SECURITY_L2;
1553 		} else if (conn->sec_level < BT_SECURITY_L3) {
1554 			sec = BT_SECURITY_L3;
1555 		} else if (conn->sec_level < BT_SECURITY_L4) {
1556 			sec = BT_SECURITY_L4;
1557 		} else {
1558 			return -EALREADY;
1559 		}
1560 		break;
1561 	default:
1562 		return -EINVAL;
1563 	}
1564 
1565 	ret = bt_conn_set_security(chan->chan.conn, sec);
1566 	if (ret < 0) {
1567 		return ret;
1568 	}
1569 
1570 	atomic_set_bit(chan->chan.status, BT_L2CAP_STATUS_ENCRYPT_PENDING);
1571 
1572 	return 0;
1573 }
1574 
1575 #if defined(CONFIG_BT_L2CAP_ECRED)
le_ecred_conn_rsp(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1576 static void le_ecred_conn_rsp(struct bt_l2cap *l2cap, uint8_t ident,
1577 			      struct net_buf *buf)
1578 {
1579 	struct bt_conn *conn = l2cap->chan.chan.conn;
1580 	struct bt_l2cap_le_chan *chan;
1581 	struct bt_l2cap_ecred_conn_rsp *rsp;
1582 	uint16_t dcid, mtu, mps, credits, result, psm;
1583 	uint8_t attempted = 0;
1584 	uint8_t succeeded = 0;
1585 
1586 	if (buf->len < sizeof(*rsp)) {
1587 		LOG_ERR("Too small ecred conn rsp packet size");
1588 		return;
1589 	}
1590 
1591 	rsp = net_buf_pull_mem(buf, sizeof(*rsp));
1592 	mtu = sys_le16_to_cpu(rsp->mtu);
1593 	mps = sys_le16_to_cpu(rsp->mps);
1594 	credits = sys_le16_to_cpu(rsp->credits);
1595 	result = sys_le16_to_cpu(rsp->result);
1596 
1597 	LOG_DBG("mtu 0x%04x mps 0x%04x credits 0x%04x result %u", mtu, mps, credits, result);
1598 
1599 	chan = l2cap_lookup_ident(conn, ident);
1600 	if (chan) {
1601 		psm = chan->psm;
1602 	} else {
1603 		psm = 0x0000;
1604 	}
1605 
1606 	switch (result) {
1607 	case BT_L2CAP_LE_ERR_AUTHENTICATION:
1608 	case BT_L2CAP_LE_ERR_ENCRYPTION:
1609 		while ((chan = l2cap_lookup_ident(conn, ident))) {
1610 
1611 			/* Cancel RTX work */
1612 			k_work_cancel_delayable(&chan->rtx_work);
1613 
1614 			/* If security needs changing wait it to be completed */
1615 			if (!l2cap_change_security(chan, result)) {
1616 				return;
1617 			}
1618 			bt_l2cap_chan_remove(conn, &chan->chan);
1619 			bt_l2cap_chan_del(&chan->chan);
1620 		}
1621 		break;
1622 	case BT_L2CAP_LE_SUCCESS:
1623 	/* Some connections refused – invalid Source CID */
1624 	case BT_L2CAP_LE_ERR_INVALID_SCID:
1625 	/* Some connections refused – Source CID already allocated */
1626 	case BT_L2CAP_LE_ERR_SCID_IN_USE:
1627 	/* Some connections refused – not enough resources available */
1628 	case BT_L2CAP_LE_ERR_NO_RESOURCES:
1629 		while ((chan = l2cap_lookup_ident(conn, ident))) {
1630 			struct bt_l2cap_chan *c;
1631 
1632 			/* Cancel RTX work */
1633 			k_work_cancel_delayable(&chan->rtx_work);
1634 
1635 			if (buf->len < sizeof(dcid)) {
1636 				LOG_ERR("Fewer dcid values than expected");
1637 				bt_l2cap_chan_remove(conn, &chan->chan);
1638 				bt_l2cap_chan_del(&chan->chan);
1639 				continue;
1640 			}
1641 
1642 			dcid = net_buf_pull_le16(buf);
1643 			attempted++;
1644 
1645 			LOG_DBG("dcid 0x%04x", dcid);
1646 
1647 			/* If a Destination CID is 0x0000, the channel was not
1648 			 * established.
1649 			 */
1650 			if (!dcid) {
1651 				bt_l2cap_chan_remove(conn, &chan->chan);
1652 				bt_l2cap_chan_del(&chan->chan);
1653 				continue;
1654 			}
1655 
1656 			c = bt_l2cap_le_lookup_tx_cid(conn, dcid);
1657 			if (c) {
1658 				/* If a device receives a
1659 				 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet
1660 				 * with an already assigned Destination CID,
1661 				 * then both the original channel and the new
1662 				 * channel shall be immediately discarded and
1663 				 * not used.
1664 				 */
1665 				bt_l2cap_chan_remove(conn, &chan->chan);
1666 				bt_l2cap_chan_del(&chan->chan);
1667 				bt_l2cap_chan_disconnect(c);
1668 				continue;
1669 			}
1670 
1671 			chan->tx.cid = dcid;
1672 
1673 			chan->ident = 0U;
1674 
1675 			chan->tx.mtu = mtu;
1676 			chan->tx.mps = mps;
1677 
1678 			/* Update state */
1679 			bt_l2cap_chan_set_state(&chan->chan,
1680 						BT_L2CAP_CONNECTED);
1681 
1682 			if (chan->chan.ops->connected) {
1683 				chan->chan.ops->connected(&chan->chan);
1684 			}
1685 
1686 			/* Give credits */
1687 			l2cap_chan_tx_give_credits(chan, credits);
1688 
1689 			succeeded++;
1690 		}
1691 		break;
1692 	case BT_L2CAP_LE_ERR_PSM_NOT_SUPP:
1693 	default:
1694 		while ((chan = l2cap_remove_ident(conn, ident))) {
1695 			bt_l2cap_chan_del(&chan->chan);
1696 		}
1697 		break;
1698 	}
1699 
1700 	if (ecred_cb && ecred_cb->ecred_conn_rsp) {
1701 		ecred_cb->ecred_conn_rsp(conn, result, attempted, succeeded, psm);
1702 	}
1703 }
1704 #endif /* CONFIG_BT_L2CAP_ECRED */
1705 
le_conn_rsp(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1706 static void le_conn_rsp(struct bt_l2cap *l2cap, uint8_t ident,
1707 			struct net_buf *buf)
1708 {
1709 	struct bt_conn *conn = l2cap->chan.chan.conn;
1710 	struct bt_l2cap_le_chan *chan;
1711 	struct bt_l2cap_le_conn_rsp *rsp = (void *)buf->data;
1712 	uint16_t dcid, mtu, mps, credits, result;
1713 
1714 	if (buf->len < sizeof(*rsp)) {
1715 		LOG_ERR("Too small LE conn rsp packet size");
1716 		return;
1717 	}
1718 
1719 	dcid = sys_le16_to_cpu(rsp->dcid);
1720 	mtu = sys_le16_to_cpu(rsp->mtu);
1721 	mps = sys_le16_to_cpu(rsp->mps);
1722 	credits = sys_le16_to_cpu(rsp->credits);
1723 	result = sys_le16_to_cpu(rsp->result);
1724 
1725 	LOG_DBG("dcid 0x%04x mtu %u mps %u credits %u result 0x%04x", dcid, mtu, mps, credits,
1726 		result);
1727 
1728 	/* Keep the channel in case of security errors */
1729 	if (result == BT_L2CAP_LE_SUCCESS ||
1730 	    result == BT_L2CAP_LE_ERR_AUTHENTICATION ||
1731 	    result == BT_L2CAP_LE_ERR_ENCRYPTION) {
1732 		chan = l2cap_lookup_ident(conn, ident);
1733 	} else {
1734 		chan = l2cap_remove_ident(conn, ident);
1735 	}
1736 
1737 	if (!chan) {
1738 		LOG_ERR("Cannot find channel for ident %u", ident);
1739 		return;
1740 	}
1741 
1742 	/* Cancel RTX work */
1743 	k_work_cancel_delayable(&chan->rtx_work);
1744 
1745 	/* Reset ident since it got a response */
1746 	chan->ident = 0U;
1747 
1748 	switch (result) {
1749 	case BT_L2CAP_LE_SUCCESS:
1750 		chan->tx.cid = dcid;
1751 		chan->tx.mtu = mtu;
1752 		chan->tx.mps = mps;
1753 
1754 		/* Update state */
1755 		bt_l2cap_chan_set_state(&chan->chan, BT_L2CAP_CONNECTED);
1756 
1757 		if (chan->chan.ops->connected) {
1758 			chan->chan.ops->connected(&chan->chan);
1759 		}
1760 
1761 		/* Give credits */
1762 		l2cap_chan_tx_give_credits(chan, credits);
1763 
1764 		break;
1765 	case BT_L2CAP_LE_ERR_AUTHENTICATION:
1766 	case BT_L2CAP_LE_ERR_ENCRYPTION:
1767 		/* If security needs changing wait it to be completed */
1768 		if (l2cap_change_security(chan, result) == 0) {
1769 			return;
1770 		}
1771 		bt_l2cap_chan_remove(conn, &chan->chan);
1772 		__fallthrough;
1773 	default:
1774 		bt_l2cap_chan_del(&chan->chan);
1775 	}
1776 }
1777 
le_disconn_rsp(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)1778 static void le_disconn_rsp(struct bt_l2cap *l2cap, uint8_t ident,
1779 			   struct net_buf *buf)
1780 {
1781 	struct bt_conn *conn = l2cap->chan.chan.conn;
1782 	struct bt_l2cap_le_chan *chan;
1783 	struct bt_l2cap_disconn_rsp *rsp = (void *)buf->data;
1784 	uint16_t scid;
1785 
1786 	if (buf->len < sizeof(*rsp)) {
1787 		LOG_ERR("Too small LE disconn rsp packet size");
1788 		return;
1789 	}
1790 
1791 	scid = sys_le16_to_cpu(rsp->scid);
1792 
1793 	LOG_DBG("dcid 0x%04x scid 0x%04x", sys_le16_to_cpu(rsp->dcid), scid);
1794 
1795 	chan = l2cap_remove_rx_cid(conn, scid);
1796 	if (!chan) {
1797 		return;
1798 	}
1799 
1800 	bt_l2cap_chan_del(&chan->chan);
1801 }
1802 
l2cap_alloc_seg(struct net_buf * buf,struct bt_l2cap_le_chan * ch)1803 static inline struct net_buf *l2cap_alloc_seg(struct net_buf *buf, struct bt_l2cap_le_chan *ch)
1804 {
1805 	struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
1806 	struct net_buf *seg;
1807 
1808 	/* Use the dedicated segment callback if registered */
1809 	if (ch->chan.ops->alloc_seg) {
1810 		seg = ch->chan.ops->alloc_seg(&ch->chan);
1811 		__ASSERT_NO_MSG(seg);
1812 	} else {
1813 		/* Try to use original pool if possible */
1814 		seg = net_buf_alloc(pool, K_NO_WAIT);
1815 	}
1816 
1817 	if (seg) {
1818 		net_buf_reserve(seg, BT_L2CAP_CHAN_SEND_RESERVE);
1819 		return seg;
1820 	}
1821 
1822 	/* Fallback to using global connection tx pool */
1823 	return bt_l2cap_create_pdu_timeout(NULL, 0, K_NO_WAIT);
1824 }
1825 
l2cap_chan_create_seg(struct bt_l2cap_le_chan * ch,struct net_buf * buf,size_t sdu_hdr_len)1826 static struct net_buf *l2cap_chan_create_seg(struct bt_l2cap_le_chan *ch,
1827 					     struct net_buf *buf,
1828 					     size_t sdu_hdr_len)
1829 {
1830 	struct net_buf *seg;
1831 	uint16_t headroom;
1832 	uint16_t len;
1833 
1834 	/* Segment if data (+ data headroom) is bigger than MPS */
1835 	if (buf->len + sdu_hdr_len > ch->tx.mps) {
1836 		goto segment;
1837 	}
1838 
1839 	headroom = BT_L2CAP_CHAN_SEND_RESERVE + sdu_hdr_len;
1840 
1841 	/* Check if original buffer has enough headroom and don't have any
1842 	 * fragments.
1843 	 */
1844 	if (net_buf_headroom(buf) >= headroom && !buf->frags) {
1845 		if (sdu_hdr_len) {
1846 			/* Push SDU length if set */
1847 			net_buf_push_le16(buf, net_buf_frags_len(buf));
1848 		}
1849 		return net_buf_ref(buf);
1850 	} else {
1851 		/* Unnecessary fragmentation. Ensure the source buffer has
1852 		 * BT_L2CAP_SDU_BUF_SIZE(0) headroom.
1853 		 */
1854 		LOG_DBG("not enough headroom on %p", buf);
1855 	}
1856 
1857 segment:
1858 	seg = l2cap_alloc_seg(buf, ch);
1859 
1860 	if (!seg) {
1861 		return NULL;
1862 	}
1863 
1864 	if (sdu_hdr_len) {
1865 		net_buf_add_le16(seg, net_buf_frags_len(buf));
1866 	}
1867 
1868 	/* Don't send more that TX MPS including SDU length */
1869 	len = MIN(net_buf_tailroom(seg), ch->tx.mps - sdu_hdr_len);
1870 	/* Limit if original buffer is smaller than the segment */
1871 	len = MIN(buf->len, len);
1872 	net_buf_add_mem(seg, buf->data, len);
1873 	net_buf_pull(buf, len);
1874 
1875 	LOG_DBG("ch %p seg %p len %u", ch, seg, seg->len);
1876 
1877 	return seg;
1878 }
1879 
l2cap_chan_tx_resume(struct bt_l2cap_le_chan * ch)1880 static void l2cap_chan_tx_resume(struct bt_l2cap_le_chan *ch)
1881 {
1882 	if (!atomic_get(&ch->tx.credits) ||
1883 	    (k_fifo_is_empty(&ch->tx_queue) && !ch->tx_buf)) {
1884 		return;
1885 	}
1886 
1887 	k_work_reschedule(&ch->tx_work, K_NO_WAIT);
1888 }
1889 
l2cap_chan_sdu_sent(struct bt_conn * conn,void * user_data,int err)1890 static void l2cap_chan_sdu_sent(struct bt_conn *conn, void *user_data, int err)
1891 {
1892 	struct l2cap_tx_meta_data *data = user_data;
1893 	struct bt_l2cap_chan *chan;
1894 	bt_conn_tx_cb_t cb = data->cb;
1895 	void *cb_user_data = data->user_data;
1896 	uint16_t cid = data->cid;
1897 
1898 	LOG_DBG("conn %p CID 0x%04x err %d", conn, cid, err);
1899 
1900 	free_tx_meta_data(data);
1901 
1902 	if (err) {
1903 		if (cb) {
1904 			cb(conn, cb_user_data, err);
1905 		}
1906 
1907 		return;
1908 	}
1909 
1910 	chan = bt_l2cap_le_lookup_tx_cid(conn, cid);
1911 	if (!chan) {
1912 		/* Received SDU sent callback for disconnected channel */
1913 		return;
1914 	}
1915 
1916 	if (chan->ops->sent) {
1917 		chan->ops->sent(chan);
1918 	}
1919 
1920 	if (cb) {
1921 		cb(conn, cb_user_data, 0);
1922 	}
1923 
1924 	/* Resume the current channel */
1925 	l2cap_chan_tx_resume(BT_L2CAP_LE_CHAN(chan));
1926 }
1927 
l2cap_chan_seg_sent(struct bt_conn * conn,void * user_data,int err)1928 static void l2cap_chan_seg_sent(struct bt_conn *conn, void *user_data, int err)
1929 {
1930 	struct l2cap_tx_meta_data *data = user_data;
1931 	struct bt_l2cap_chan *chan;
1932 
1933 	LOG_DBG("conn %p CID 0x%04x err %d", conn, data->cid, err);
1934 
1935 	if (err) {
1936 		return;
1937 	}
1938 
1939 	chan = bt_l2cap_le_lookup_tx_cid(conn, data->cid);
1940 	if (!chan) {
1941 		/* Received segment sent callback for disconnected channel */
1942 		return;
1943 	}
1944 
1945 	l2cap_chan_tx_resume(BT_L2CAP_LE_CHAN(chan));
1946 }
1947 
test_and_dec(atomic_t * target)1948 static bool test_and_dec(atomic_t *target)
1949 {
1950 	atomic_t old_value, new_value;
1951 
1952 	do {
1953 		old_value = atomic_get(target);
1954 		if (!old_value) {
1955 			return false;
1956 		}
1957 
1958 		new_value = old_value - 1;
1959 	} while (atomic_cas(target, old_value, new_value) == 0);
1960 
1961 	return true;
1962 }
1963 
1964 /* This returns -EAGAIN whenever a segment cannot be send immediately which can
1965  * happen under the following circuntances:
1966  *
1967  * 1. There are no credits
1968  * 2. There are no buffers
1969  * 3. There are no TX contexts
1970  *
1971  * In all cases the original buffer is unaffected so it can be pushed back to
1972  * be sent later.
1973  */
l2cap_chan_le_send(struct bt_l2cap_le_chan * ch,struct net_buf * buf,uint16_t sdu_hdr_len)1974 static int l2cap_chan_le_send(struct bt_l2cap_le_chan *ch,
1975 			      struct net_buf *buf, uint16_t sdu_hdr_len)
1976 {
1977 	struct net_buf *seg;
1978 	struct net_buf_simple_state state;
1979 	int len, err;
1980 
1981 	if (!test_and_dec(&ch->tx.credits)) {
1982 		LOG_DBG("No credits to transmit packet");
1983 		return -EAGAIN;
1984 	}
1985 
1986 	/* Save state so it can be restored if we failed to send */
1987 	net_buf_simple_save(&buf->b, &state);
1988 
1989 	seg = l2cap_chan_create_seg(ch, buf, sdu_hdr_len);
1990 	if (!seg) {
1991 		atomic_inc(&ch->tx.credits);
1992 		return -EAGAIN;
1993 	}
1994 
1995 	LOG_DBG("ch %p cid 0x%04x len %u credits %lu", ch, ch->tx.cid, seg->len,
1996 		atomic_get(&ch->tx.credits));
1997 
1998 	len = seg->len - sdu_hdr_len;
1999 
2000 	/* Set a callback if there is no data left in the buffer */
2001 	if (buf == seg || !buf->len) {
2002 		err = bt_l2cap_send_cb(ch->chan.conn, ch->tx.cid, seg,
2003 				       l2cap_chan_sdu_sent,
2004 				       l2cap_tx_meta_data(buf));
2005 	} else {
2006 		err = bt_l2cap_send_cb(ch->chan.conn, ch->tx.cid, seg,
2007 				       l2cap_chan_seg_sent,
2008 				       l2cap_tx_meta_data(buf));
2009 	}
2010 
2011 	if (err) {
2012 		LOG_DBG("Unable to send seg %d", err);
2013 		atomic_inc(&ch->tx.credits);
2014 
2015 		/* The host takes ownership of the reference in seg when
2016 		 * bt_l2cap_send_cb is successful. The call returned an error,
2017 		 * so we must get rid of the reference that was taken in
2018 		 * l2cap_chan_create_seg.
2019 		 */
2020 		net_buf_unref(seg);
2021 
2022 		if (err == -ENOBUFS) {
2023 			/* Restore state since segment could not be sent */
2024 			net_buf_simple_restore(&buf->b, &state);
2025 			return -EAGAIN;
2026 		}
2027 
2028 		return err;
2029 	}
2030 
2031 	/* Check if there is no credits left clear output status and notify its
2032 	 * change.
2033 	 */
2034 	if (!atomic_get(&ch->tx.credits)) {
2035 		atomic_clear_bit(ch->chan.status, BT_L2CAP_STATUS_OUT);
2036 		if (ch->chan.ops->status) {
2037 			ch->chan.ops->status(&ch->chan, ch->chan.status);
2038 		}
2039 	}
2040 
2041 	return len;
2042 }
2043 
l2cap_chan_le_send_sdu(struct bt_l2cap_le_chan * ch,struct net_buf ** buf,uint16_t sent)2044 static int l2cap_chan_le_send_sdu(struct bt_l2cap_le_chan *ch,
2045 				  struct net_buf **buf, uint16_t sent)
2046 {
2047 	int ret, total_len;
2048 	struct net_buf *frag;
2049 
2050 	total_len = net_buf_frags_len(*buf) + sent;
2051 
2052 	if (total_len > ch->tx.mtu) {
2053 		return -EMSGSIZE;
2054 	}
2055 
2056 	frag = *buf;
2057 	if (!frag->len && frag->frags) {
2058 		frag = frag->frags;
2059 	}
2060 
2061 	if (!sent) {
2062 		/* Add SDU length for the first segment */
2063 		ret = l2cap_chan_le_send(ch, frag, BT_L2CAP_SDU_HDR_SIZE);
2064 		if (ret < 0) {
2065 			if (ret == -EAGAIN) {
2066 				/* Store sent data into user_data */
2067 				l2cap_tx_meta_data(frag)->sent = sent;
2068 			}
2069 			*buf = frag;
2070 			return ret;
2071 		}
2072 		sent = ret;
2073 	}
2074 
2075 	/* Send remaining segments */
2076 	for (ret = 0; sent < total_len; sent += ret) {
2077 		/* Proceed to next fragment */
2078 		if (!frag->len) {
2079 			frag = net_buf_frag_del(NULL, frag);
2080 		}
2081 
2082 		ret = l2cap_chan_le_send(ch, frag, 0);
2083 		if (ret < 0) {
2084 			if (ret == -EAGAIN) {
2085 				/* Store sent data into user_data */
2086 				l2cap_tx_meta_data(frag)->sent = sent;
2087 			}
2088 			*buf = frag;
2089 			return ret;
2090 		}
2091 	}
2092 
2093 	LOG_DBG("ch %p cid 0x%04x sent %u total_len %u", ch, ch->tx.cid, sent, total_len);
2094 
2095 	net_buf_unref(frag);
2096 
2097 	return sent;
2098 }
2099 
le_credits(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)2100 static void le_credits(struct bt_l2cap *l2cap, uint8_t ident,
2101 		       struct net_buf *buf)
2102 {
2103 	struct bt_conn *conn = l2cap->chan.chan.conn;
2104 	struct bt_l2cap_chan *chan;
2105 	struct bt_l2cap_le_credits *ev = (void *)buf->data;
2106 	struct bt_l2cap_le_chan *le_chan;
2107 	uint16_t credits, cid;
2108 
2109 	if (buf->len < sizeof(*ev)) {
2110 		LOG_ERR("Too small LE Credits packet size");
2111 		return;
2112 	}
2113 
2114 	cid = sys_le16_to_cpu(ev->cid);
2115 	credits = sys_le16_to_cpu(ev->credits);
2116 
2117 	LOG_DBG("cid 0x%04x credits %u", cid, credits);
2118 
2119 	chan = bt_l2cap_le_lookup_tx_cid(conn, cid);
2120 	if (!chan) {
2121 		LOG_ERR("Unable to find channel of LE Credits packet");
2122 		return;
2123 	}
2124 
2125 	le_chan = BT_L2CAP_LE_CHAN(chan);
2126 
2127 	if (atomic_get(&le_chan->tx.credits) + credits > UINT16_MAX) {
2128 		LOG_ERR("Credits overflow");
2129 		bt_l2cap_chan_disconnect(chan);
2130 		return;
2131 	}
2132 
2133 	l2cap_chan_tx_give_credits(le_chan, credits);
2134 
2135 	LOG_DBG("chan %p total credits %lu", le_chan, atomic_get(&le_chan->tx.credits));
2136 
2137 	l2cap_chan_tx_resume(le_chan);
2138 }
2139 
reject_cmd(struct bt_l2cap * l2cap,uint8_t ident,struct net_buf * buf)2140 static void reject_cmd(struct bt_l2cap *l2cap, uint8_t ident,
2141 		       struct net_buf *buf)
2142 {
2143 	struct bt_conn *conn = l2cap->chan.chan.conn;
2144 	struct bt_l2cap_le_chan *chan;
2145 
2146 	/* Check if there is a outstanding channel */
2147 	chan = l2cap_remove_ident(conn, ident);
2148 	if (!chan) {
2149 		return;
2150 	}
2151 
2152 	bt_l2cap_chan_del(&chan->chan);
2153 }
2154 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2155 
l2cap_recv(struct bt_l2cap_chan * chan,struct net_buf * buf)2156 static int l2cap_recv(struct bt_l2cap_chan *chan, struct net_buf *buf)
2157 {
2158 	struct bt_l2cap_le_chan *l2chan = CONTAINER_OF(chan, struct bt_l2cap_le_chan, chan);
2159 	struct bt_l2cap *l2cap = CONTAINER_OF(l2chan, struct bt_l2cap, chan);
2160 	struct bt_l2cap_sig_hdr *hdr;
2161 	uint16_t len;
2162 
2163 	if (buf->len < sizeof(*hdr)) {
2164 		LOG_ERR("Too small L2CAP signaling PDU");
2165 		return 0;
2166 	}
2167 
2168 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
2169 	len = sys_le16_to_cpu(hdr->len);
2170 
2171 	LOG_DBG("Signaling code 0x%02x ident %u len %u", hdr->code, hdr->ident, len);
2172 
2173 	if (buf->len != len) {
2174 		LOG_ERR("L2CAP length mismatch (%u != %u)", buf->len, len);
2175 		return 0;
2176 	}
2177 
2178 	if (!hdr->ident) {
2179 		LOG_ERR("Invalid ident value in L2CAP PDU");
2180 		return 0;
2181 	}
2182 
2183 	switch (hdr->code) {
2184 	case BT_L2CAP_CONN_PARAM_RSP:
2185 		le_conn_param_rsp(l2cap, buf);
2186 		break;
2187 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
2188 	case BT_L2CAP_LE_CONN_REQ:
2189 		le_conn_req(l2cap, hdr->ident, buf);
2190 		break;
2191 	case BT_L2CAP_LE_CONN_RSP:
2192 		le_conn_rsp(l2cap, hdr->ident, buf);
2193 		break;
2194 	case BT_L2CAP_DISCONN_REQ:
2195 		le_disconn_req(l2cap, hdr->ident, buf);
2196 		break;
2197 	case BT_L2CAP_DISCONN_RSP:
2198 		le_disconn_rsp(l2cap, hdr->ident, buf);
2199 		break;
2200 	case BT_L2CAP_LE_CREDITS:
2201 		le_credits(l2cap, hdr->ident, buf);
2202 		break;
2203 	case BT_L2CAP_CMD_REJECT:
2204 		reject_cmd(l2cap, hdr->ident, buf);
2205 		break;
2206 #if defined(CONFIG_BT_L2CAP_ECRED)
2207 	case BT_L2CAP_ECRED_CONN_REQ:
2208 		le_ecred_conn_req(l2cap, hdr->ident, buf);
2209 		break;
2210 	case BT_L2CAP_ECRED_CONN_RSP:
2211 		le_ecred_conn_rsp(l2cap, hdr->ident, buf);
2212 		break;
2213 	case BT_L2CAP_ECRED_RECONF_REQ:
2214 		le_ecred_reconf_req(l2cap, hdr->ident, buf);
2215 		break;
2216 	case BT_L2CAP_ECRED_RECONF_RSP:
2217 		le_ecred_reconf_rsp(l2cap, hdr->ident, buf);
2218 		break;
2219 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
2220 #else
2221 	case BT_L2CAP_CMD_REJECT:
2222 		/* Ignored */
2223 		break;
2224 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2225 	case BT_L2CAP_CONN_PARAM_REQ:
2226 		if (IS_ENABLED(CONFIG_BT_CENTRAL)) {
2227 			le_conn_param_update_req(l2cap, hdr->ident, buf);
2228 			break;
2229 		}
2230 		__fallthrough;
2231 	default:
2232 		LOG_WRN("Rejecting unknown L2CAP PDU code 0x%02x", hdr->code);
2233 		l2cap_send_reject(chan->conn, hdr->ident,
2234 				  BT_L2CAP_REJ_NOT_UNDERSTOOD, NULL, 0);
2235 		break;
2236 	}
2237 
2238 	return 0;
2239 }
2240 
2241 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_chan_shutdown(struct bt_l2cap_chan * chan)2242 static void l2cap_chan_shutdown(struct bt_l2cap_chan *chan)
2243 {
2244 	struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
2245 	struct net_buf *buf;
2246 
2247 	LOG_DBG("chan %p", chan);
2248 
2249 	atomic_set_bit(chan->status, BT_L2CAP_STATUS_SHUTDOWN);
2250 
2251 	/* Destroy segmented SDU if it exists */
2252 	if (le_chan->_sdu) {
2253 		net_buf_unref(le_chan->_sdu);
2254 		le_chan->_sdu = NULL;
2255 		le_chan->_sdu_len = 0U;
2256 	}
2257 
2258 	/* Cleanup outstanding request */
2259 	if (le_chan->tx_buf) {
2260 		net_buf_unref(le_chan->tx_buf);
2261 		le_chan->tx_buf = NULL;
2262 	}
2263 
2264 	/* Remove buffers on the TX queue */
2265 	while ((buf = net_buf_get(&le_chan->tx_queue, K_NO_WAIT))) {
2266 		net_buf_unref(buf);
2267 	}
2268 
2269 	/* Remove buffers on the RX queue */
2270 	while ((buf = net_buf_get(&le_chan->rx_queue, K_NO_WAIT))) {
2271 		net_buf_unref(buf);
2272 	}
2273 
2274 	/* Update status */
2275 	if (chan->ops->status) {
2276 		chan->ops->status(chan, chan->status);
2277 	}
2278 }
2279 
2280 /** @brief Get @c chan->state.
2281  *
2282  * This field does not exist when @kconfig{CONFIG_BT_L2CAP_DYNAMIC_CHANNEL} is
2283  * disabled. In that case, this function returns @ref BT_L2CAP_CONNECTED since
2284  * the struct can only represent static channels in that case and static
2285  * channels are always connected.
2286  */
bt_l2cap_chan_get_state(struct bt_l2cap_chan * chan)2287 static inline bt_l2cap_chan_state_t bt_l2cap_chan_get_state(struct bt_l2cap_chan *chan)
2288 {
2289 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
2290 	return BT_L2CAP_LE_CHAN(chan)->state;
2291 #else
2292 	return BT_L2CAP_CONNECTED;
2293 #endif
2294 }
2295 
l2cap_chan_send_credits(struct bt_l2cap_le_chan * chan,uint16_t credits)2296 static void l2cap_chan_send_credits(struct bt_l2cap_le_chan *chan,
2297 				    uint16_t credits)
2298 {
2299 	struct bt_l2cap_le_credits *ev;
2300 	struct net_buf *buf;
2301 
2302 	__ASSERT_NO_MSG(bt_l2cap_chan_get_state(&chan->chan) == BT_L2CAP_CONNECTED);
2303 
2304 	buf = l2cap_create_le_sig_pdu(NULL, BT_L2CAP_LE_CREDITS, get_ident(),
2305 				      sizeof(*ev));
2306 	if (!buf) {
2307 		LOG_ERR("Unable to send credits update");
2308 		/* Disconnect would probably not work either so the only
2309 		 * option left is to shutdown the channel.
2310 		 */
2311 		l2cap_chan_shutdown(&chan->chan);
2312 		return;
2313 	}
2314 
2315 	__ASSERT_NO_MSG(atomic_get(&chan->rx.credits) == 0);
2316 	atomic_set(&chan->rx.credits, credits);
2317 
2318 	ev = net_buf_add(buf, sizeof(*ev));
2319 	ev->cid = sys_cpu_to_le16(chan->rx.cid);
2320 	ev->credits = sys_cpu_to_le16(credits);
2321 
2322 	l2cap_send(chan->chan.conn, BT_L2CAP_CID_LE_SIG, buf);
2323 
2324 	LOG_DBG("chan %p credits %lu", chan, atomic_get(&chan->rx.credits));
2325 }
2326 
2327 #if defined(CONFIG_BT_L2CAP_SEG_RECV)
l2cap_chan_send_credits_pdu(struct bt_conn * conn,uint16_t cid,uint16_t credits)2328 static int l2cap_chan_send_credits_pdu(struct bt_conn *conn, uint16_t cid, uint16_t credits)
2329 {
2330 	int err;
2331 	struct net_buf *buf;
2332 	struct bt_l2cap_le_credits *ev;
2333 
2334 	buf = l2cap_create_le_sig_pdu(NULL, BT_L2CAP_LE_CREDITS, get_ident(), sizeof(*ev));
2335 	if (!buf) {
2336 		return -ENOBUFS;
2337 	}
2338 
2339 	ev = net_buf_add(buf, sizeof(*ev));
2340 	*ev = (struct bt_l2cap_le_credits){
2341 		.cid = sys_cpu_to_le16(cid),
2342 		.credits = sys_cpu_to_le16(credits),
2343 	};
2344 
2345 	err = bt_l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf);
2346 	if (err) {
2347 		net_buf_unref(buf);
2348 		return err;
2349 	}
2350 
2351 	return 0;
2352 }
2353 
2354 /**
2355  * Combination of @ref atomic_add and @ref u16_add_overflow. Leaves @p
2356  * target unchanged if an overflow would occur. Assumes the current
2357  * value of @p target is representable by uint16_t.
2358  */
atomic_add_safe_u16(atomic_t * target,uint16_t addition)2359 static bool atomic_add_safe_u16(atomic_t *target, uint16_t addition)
2360 {
2361 	uint16_t target_old, target_new;
2362 
2363 	do {
2364 		target_old = atomic_get(target);
2365 		if (u16_add_overflow(target_old, addition, &target_new)) {
2366 			return true;
2367 		}
2368 	} while (!atomic_cas(target, target_old, target_new));
2369 
2370 	return false;
2371 }
2372 
bt_l2cap_chan_give_credits(struct bt_l2cap_chan * chan,uint16_t additional_credits)2373 int bt_l2cap_chan_give_credits(struct bt_l2cap_chan *chan, uint16_t additional_credits)
2374 {
2375 	struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
2376 
2377 	if (!chan || !chan->ops) {
2378 		LOG_ERR("%s: Invalid chan object.", __func__);
2379 		return -EINVAL;
2380 	}
2381 
2382 	if (!chan->ops->seg_recv) {
2383 		LOG_ERR("%s: Available only with seg_recv.", __func__);
2384 		return -EINVAL;
2385 	}
2386 
2387 	if (additional_credits == 0) {
2388 		LOG_ERR("%s: Refusing to give 0.", __func__);
2389 		return -EINVAL;
2390 	}
2391 
2392 	if (bt_l2cap_chan_get_state(chan) == BT_L2CAP_CONNECTING) {
2393 		LOG_ERR("%s: Cannot give credits while connecting.", __func__);
2394 		return -EBUSY;
2395 	}
2396 
2397 	if (atomic_add_safe_u16(&le_chan->rx.credits, additional_credits)) {
2398 		LOG_ERR("%s: Overflow.", __func__);
2399 		return -EOVERFLOW;
2400 	}
2401 
2402 	if (bt_l2cap_chan_get_state(chan) == BT_L2CAP_CONNECTED) {
2403 		int err;
2404 
2405 		err = l2cap_chan_send_credits_pdu(chan->conn, le_chan->rx.cid, additional_credits);
2406 		if (err) {
2407 			LOG_ERR("%s: PDU failed %d.", __func__, err);
2408 			return err;
2409 		}
2410 	}
2411 
2412 	return 0;
2413 }
2414 #endif /* CONFIG_BT_L2CAP_SEG_RECV */
2415 
bt_l2cap_chan_recv_complete(struct bt_l2cap_chan * chan,struct net_buf * buf)2416 int bt_l2cap_chan_recv_complete(struct bt_l2cap_chan *chan, struct net_buf *buf)
2417 {
2418 	struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
2419 	struct bt_conn *conn = chan->conn;
2420 
2421 	__ASSERT_NO_MSG(chan);
2422 	__ASSERT_NO_MSG(buf);
2423 
2424 	net_buf_unref(buf);
2425 
2426 	if (!conn) {
2427 		return -ENOTCONN;
2428 	}
2429 
2430 	if (conn->type != BT_CONN_TYPE_LE) {
2431 		return -ENOTSUP;
2432 	}
2433 
2434 	LOG_DBG("chan %p buf %p", chan, buf);
2435 
2436 	if (bt_l2cap_chan_get_state(&le_chan->chan) == BT_L2CAP_CONNECTED) {
2437 		l2cap_chan_send_credits(le_chan, 1);
2438 	}
2439 
2440 	return 0;
2441 }
2442 
l2cap_alloc_frag(k_timeout_t timeout,void * user_data)2443 static struct net_buf *l2cap_alloc_frag(k_timeout_t timeout, void *user_data)
2444 {
2445 	struct bt_l2cap_le_chan *chan = user_data;
2446 	struct net_buf *frag = NULL;
2447 
2448 	frag = chan->chan.ops->alloc_buf(&chan->chan);
2449 	if (!frag) {
2450 		return NULL;
2451 	}
2452 
2453 	LOG_DBG("frag %p tailroom %zu", frag, net_buf_tailroom(frag));
2454 
2455 	return frag;
2456 }
2457 
l2cap_chan_le_recv_sdu(struct bt_l2cap_le_chan * chan,struct net_buf * buf,uint16_t seg)2458 static void l2cap_chan_le_recv_sdu(struct bt_l2cap_le_chan *chan,
2459 				   struct net_buf *buf, uint16_t seg)
2460 {
2461 	int err;
2462 
2463 	LOG_DBG("chan %p len %zu", chan, net_buf_frags_len(buf));
2464 
2465 	__ASSERT_NO_MSG(bt_l2cap_chan_get_state(&chan->chan) == BT_L2CAP_CONNECTED);
2466 	__ASSERT_NO_MSG(atomic_get(&chan->rx.credits) == 0);
2467 
2468 	/* Receiving complete SDU, notify channel and reset SDU buf */
2469 	err = chan->chan.ops->recv(&chan->chan, buf);
2470 	if (err < 0) {
2471 		if (err != -EINPROGRESS) {
2472 			LOG_ERR("err %d", err);
2473 			bt_l2cap_chan_disconnect(&chan->chan);
2474 			net_buf_unref(buf);
2475 		}
2476 		return;
2477 	} else if (bt_l2cap_chan_get_state(&chan->chan) == BT_L2CAP_CONNECTED) {
2478 		l2cap_chan_send_credits(chan, 1);
2479 	}
2480 
2481 	net_buf_unref(buf);
2482 }
2483 
l2cap_chan_le_recv_seg(struct bt_l2cap_le_chan * chan,struct net_buf * buf)2484 static void l2cap_chan_le_recv_seg(struct bt_l2cap_le_chan *chan,
2485 				   struct net_buf *buf)
2486 {
2487 	uint16_t len;
2488 	uint16_t seg = 0U;
2489 
2490 	len = net_buf_frags_len(chan->_sdu);
2491 	if (len) {
2492 		memcpy(&seg, net_buf_user_data(chan->_sdu), sizeof(seg));
2493 	}
2494 
2495 	if (len + buf->len > chan->_sdu_len) {
2496 		LOG_ERR("SDU length mismatch");
2497 		bt_l2cap_chan_disconnect(&chan->chan);
2498 		return;
2499 	}
2500 
2501 	seg++;
2502 	/* Store received segments in user_data */
2503 	memcpy(net_buf_user_data(chan->_sdu), &seg, sizeof(seg));
2504 
2505 	LOG_DBG("chan %p seg %d len %zu", chan, seg, net_buf_frags_len(buf));
2506 
2507 	/* Append received segment to SDU */
2508 	len = net_buf_append_bytes(chan->_sdu, buf->len, buf->data, K_NO_WAIT,
2509 				   l2cap_alloc_frag, chan);
2510 	if (len != buf->len) {
2511 		LOG_ERR("Unable to store SDU");
2512 		bt_l2cap_chan_disconnect(&chan->chan);
2513 		return;
2514 	}
2515 
2516 	if (net_buf_frags_len(chan->_sdu) < chan->_sdu_len) {
2517 		/* Give more credits if remote has run out of them, this
2518 		 * should only happen if the remote cannot fully utilize the
2519 		 * MPS for some reason.
2520 		 *
2521 		 * We can't send more than one credit, because if the remote
2522 		 * decides to start fully utilizing the MPS for the remainder of
2523 		 * the SDU, then the remote will end up with more credits than
2524 		 * the app has buffers.
2525 		 */
2526 		if (atomic_get(&chan->rx.credits) == 0) {
2527 			LOG_DBG("remote is not fully utilizing MPS");
2528 			l2cap_chan_send_credits(chan, 1);
2529 		}
2530 
2531 		return;
2532 	}
2533 
2534 	buf = chan->_sdu;
2535 	chan->_sdu = NULL;
2536 	chan->_sdu_len = 0U;
2537 
2538 	l2cap_chan_le_recv_sdu(chan, buf, seg);
2539 }
2540 
2541 #if defined(CONFIG_BT_L2CAP_SEG_RECV)
l2cap_chan_le_recv_seg_direct(struct bt_l2cap_le_chan * chan,struct net_buf * seg)2542 static void l2cap_chan_le_recv_seg_direct(struct bt_l2cap_le_chan *chan, struct net_buf *seg)
2543 {
2544 	uint16_t seg_offset;
2545 	uint16_t sdu_remaining;
2546 
2547 	if (chan->_sdu_len_done == chan->_sdu_len) {
2548 
2549 		/* This is the first PDU in a SDU. */
2550 
2551 		if (seg->len < 2) {
2552 			LOG_WRN("Missing SDU header");
2553 			bt_l2cap_chan_disconnect(&chan->chan);
2554 			return;
2555 		}
2556 
2557 		/* Pop off the "SDU header". */
2558 		chan->_sdu_len = net_buf_pull_le16(seg);
2559 		chan->_sdu_len_done = 0;
2560 
2561 		if (chan->_sdu_len > chan->rx.mtu) {
2562 			LOG_WRN("SDU exceeds MTU");
2563 			bt_l2cap_chan_disconnect(&chan->chan);
2564 			return;
2565 		}
2566 	}
2567 
2568 	seg_offset = chan->_sdu_len_done;
2569 	sdu_remaining = chan->_sdu_len - chan->_sdu_len_done;
2570 
2571 	if (seg->len > sdu_remaining) {
2572 		LOG_WRN("L2CAP RX PDU total exceeds SDU");
2573 		bt_l2cap_chan_disconnect(&chan->chan);
2574 	}
2575 
2576 	/* Commit receive. */
2577 	chan->_sdu_len_done += seg->len;
2578 
2579 	/* Tail call. */
2580 	chan->chan.ops->seg_recv(&chan->chan, chan->_sdu_len, seg_offset, &seg->b);
2581 }
2582 #endif /* CONFIG_BT_L2CAP_SEG_RECV */
2583 
l2cap_chan_le_recv(struct bt_l2cap_le_chan * chan,struct net_buf * buf)2584 static void l2cap_chan_le_recv(struct bt_l2cap_le_chan *chan,
2585 			       struct net_buf *buf)
2586 {
2587 	uint16_t sdu_len;
2588 	int err;
2589 
2590 	if (!test_and_dec(&chan->rx.credits)) {
2591 		LOG_ERR("No credits to receive packet");
2592 		bt_l2cap_chan_disconnect(&chan->chan);
2593 		return;
2594 	}
2595 
2596 	if (buf->len > chan->rx.mps) {
2597 		LOG_WRN("PDU size > MPS (%u > %u)", buf->len, chan->rx.mps);
2598 		bt_l2cap_chan_disconnect(&chan->chan);
2599 		return;
2600 	}
2601 
2602 	/* Redirect to experimental API. */
2603 	IF_ENABLED(CONFIG_BT_L2CAP_SEG_RECV, (
2604 		if (chan->chan.ops->seg_recv) {
2605 			l2cap_chan_le_recv_seg_direct(chan, buf);
2606 			return;
2607 		}
2608 	))
2609 
2610 	/* Check if segments already exist */
2611 	if (chan->_sdu) {
2612 		l2cap_chan_le_recv_seg(chan, buf);
2613 		return;
2614 	}
2615 
2616 	if (buf->len < 2) {
2617 		LOG_WRN("Too short data packet");
2618 		bt_l2cap_chan_disconnect(&chan->chan);
2619 		return;
2620 	}
2621 
2622 	sdu_len = net_buf_pull_le16(buf);
2623 
2624 	LOG_DBG("chan %p len %u sdu_len %u", chan, buf->len, sdu_len);
2625 
2626 	if (sdu_len > chan->rx.mtu) {
2627 		LOG_ERR("Invalid SDU length");
2628 		bt_l2cap_chan_disconnect(&chan->chan);
2629 		return;
2630 	}
2631 
2632 	/* Always allocate buffer from the channel if supported. */
2633 	if (chan->chan.ops->alloc_buf) {
2634 		chan->_sdu = chan->chan.ops->alloc_buf(&chan->chan);
2635 		if (!chan->_sdu) {
2636 			LOG_ERR("Unable to allocate buffer for SDU");
2637 			bt_l2cap_chan_disconnect(&chan->chan);
2638 			return;
2639 		}
2640 		chan->_sdu_len = sdu_len;
2641 
2642 		/* Send sdu_len/mps worth of credits */
2643 		uint16_t credits = DIV_ROUND_UP(
2644 			MIN(sdu_len - buf->len, net_buf_tailroom(chan->_sdu)),
2645 			chan->rx.mps);
2646 
2647 		if (credits) {
2648 			LOG_DBG("sending %d extra credits (sdu_len %d buf_len %d mps %d)",
2649 				credits,
2650 				sdu_len,
2651 				buf->len,
2652 				chan->rx.mps);
2653 			l2cap_chan_send_credits(chan, credits);
2654 		}
2655 
2656 		l2cap_chan_le_recv_seg(chan, buf);
2657 		return;
2658 	}
2659 
2660 	err = chan->chan.ops->recv(&chan->chan, buf);
2661 	if (err < 0) {
2662 		if (err != -EINPROGRESS) {
2663 			LOG_ERR("err %d", err);
2664 			bt_l2cap_chan_disconnect(&chan->chan);
2665 		}
2666 		return;
2667 	}
2668 
2669 	/* Only attempt to send credits if the channel wasn't disconnected
2670 	 * in the recv() callback above
2671 	 */
2672 	if (bt_l2cap_chan_get_state(&chan->chan) == BT_L2CAP_CONNECTED) {
2673 		l2cap_chan_send_credits(chan, 1);
2674 	}
2675 }
2676 
l2cap_chan_recv_queue(struct bt_l2cap_le_chan * chan,struct net_buf * buf)2677 static void l2cap_chan_recv_queue(struct bt_l2cap_le_chan *chan,
2678 				  struct net_buf *buf)
2679 {
2680 	if (chan->state == BT_L2CAP_DISCONNECTING) {
2681 		LOG_WRN("Ignoring data received while disconnecting");
2682 		net_buf_unref(buf);
2683 		return;
2684 	}
2685 
2686 	if (atomic_test_bit(chan->chan.status, BT_L2CAP_STATUS_SHUTDOWN)) {
2687 		LOG_WRN("Ignoring data received while channel has shutdown");
2688 		net_buf_unref(buf);
2689 		return;
2690 	}
2691 
2692 	if (!L2CAP_LE_PSM_IS_DYN(chan->psm)) {
2693 		l2cap_chan_le_recv(chan, buf);
2694 		net_buf_unref(buf);
2695 		return;
2696 	}
2697 
2698 	net_buf_put(&chan->rx_queue, buf);
2699 	k_work_submit(&chan->rx_work);
2700 }
2701 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2702 
l2cap_chan_recv(struct bt_l2cap_chan * chan,struct net_buf * buf,bool complete)2703 static void l2cap_chan_recv(struct bt_l2cap_chan *chan, struct net_buf *buf,
2704 			    bool complete)
2705 {
2706 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
2707 	struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
2708 
2709 	if (L2CAP_LE_CID_IS_DYN(le_chan->rx.cid)) {
2710 		if (complete) {
2711 			l2cap_chan_recv_queue(le_chan, buf);
2712 		} else {
2713 			/* if packet was not complete this means peer device
2714 			 * overflowed our RX and channel shall be disconnected
2715 			 */
2716 			bt_l2cap_chan_disconnect(chan);
2717 			net_buf_unref(buf);
2718 		}
2719 
2720 		return;
2721 	}
2722 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2723 
2724 	LOG_DBG("chan %p len %u", chan, buf->len);
2725 
2726 	chan->ops->recv(chan, buf);
2727 	net_buf_unref(buf);
2728 }
2729 
bt_l2cap_recv(struct bt_conn * conn,struct net_buf * buf,bool complete)2730 void bt_l2cap_recv(struct bt_conn *conn, struct net_buf *buf, bool complete)
2731 {
2732 	struct bt_l2cap_hdr *hdr;
2733 	struct bt_l2cap_chan *chan;
2734 	uint16_t cid;
2735 
2736 	if (IS_ENABLED(CONFIG_BT_BREDR) &&
2737 	    conn->type == BT_CONN_TYPE_BR) {
2738 		bt_l2cap_br_recv(conn, buf);
2739 		return;
2740 	}
2741 
2742 	if (buf->len < sizeof(*hdr)) {
2743 		LOG_ERR("Too small L2CAP PDU received");
2744 		net_buf_unref(buf);
2745 		return;
2746 	}
2747 
2748 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
2749 	cid = sys_le16_to_cpu(hdr->cid);
2750 
2751 	LOG_DBG("Packet for CID %u len %u", cid, buf->len);
2752 
2753 	chan = bt_l2cap_le_lookup_rx_cid(conn, cid);
2754 	if (!chan) {
2755 		LOG_WRN("Ignoring data for unknown channel ID 0x%04x", cid);
2756 		net_buf_unref(buf);
2757 		return;
2758 	}
2759 
2760 	l2cap_chan_recv(chan, buf, complete);
2761 }
2762 
bt_l2cap_update_conn_param(struct bt_conn * conn,const struct bt_le_conn_param * param)2763 int bt_l2cap_update_conn_param(struct bt_conn *conn,
2764 			       const struct bt_le_conn_param *param)
2765 {
2766 	struct bt_l2cap_conn_param_req *req;
2767 	struct net_buf *buf;
2768 	int err;
2769 
2770 	buf = l2cap_create_le_sig_pdu(NULL, BT_L2CAP_CONN_PARAM_REQ,
2771 				      get_ident(), sizeof(*req));
2772 	if (!buf) {
2773 		return -ENOMEM;
2774 	}
2775 
2776 	req = net_buf_add(buf, sizeof(*req));
2777 	req->min_interval = sys_cpu_to_le16(param->interval_min);
2778 	req->max_interval = sys_cpu_to_le16(param->interval_max);
2779 	req->latency = sys_cpu_to_le16(param->latency);
2780 	req->timeout = sys_cpu_to_le16(param->timeout);
2781 
2782 	err = bt_l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf);
2783 	if (err) {
2784 		net_buf_unref(buf);
2785 		return err;
2786 	}
2787 
2788 	return 0;
2789 }
2790 
l2cap_connected(struct bt_l2cap_chan * chan)2791 static void l2cap_connected(struct bt_l2cap_chan *chan)
2792 {
2793 	LOG_DBG("ch %p cid 0x%04x", BT_L2CAP_LE_CHAN(chan), BT_L2CAP_LE_CHAN(chan)->rx.cid);
2794 }
2795 
l2cap_disconnected(struct bt_l2cap_chan * chan)2796 static void l2cap_disconnected(struct bt_l2cap_chan *chan)
2797 {
2798 	struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
2799 
2800 	LOG_DBG("ch %p cid 0x%04x", le_chan, le_chan->rx.cid);
2801 
2802 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
2803 	/* Cancel RTX work on signal channel.
2804 	 * Disconnected callback is always called from system workqueue
2805 	 * so this should always succeed.
2806 	 */
2807 	(void)k_work_cancel_delayable(&le_chan->rtx_work);
2808 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2809 }
2810 
l2cap_accept(struct bt_conn * conn,struct bt_l2cap_chan ** chan)2811 static int l2cap_accept(struct bt_conn *conn, struct bt_l2cap_chan **chan)
2812 {
2813 	int i;
2814 	static const struct bt_l2cap_chan_ops ops = {
2815 		.connected = l2cap_connected,
2816 		.disconnected = l2cap_disconnected,
2817 		.recv = l2cap_recv,
2818 	};
2819 
2820 	LOG_DBG("conn %p handle %u", conn, conn->handle);
2821 
2822 	for (i = 0; i < ARRAY_SIZE(bt_l2cap_pool); i++) {
2823 		struct bt_l2cap *l2cap = &bt_l2cap_pool[i];
2824 
2825 		if (l2cap->chan.chan.conn) {
2826 			continue;
2827 		}
2828 
2829 		l2cap->chan.chan.ops = &ops;
2830 		*chan = &l2cap->chan.chan;
2831 
2832 		return 0;
2833 	}
2834 
2835 	LOG_ERR("No available L2CAP context for conn %p", conn);
2836 
2837 	return -ENOMEM;
2838 }
2839 
2840 BT_L2CAP_CHANNEL_DEFINE(le_fixed_chan, BT_L2CAP_CID_LE_SIG, l2cap_accept, NULL);
2841 
bt_l2cap_init(void)2842 void bt_l2cap_init(void)
2843 {
2844 	if (IS_ENABLED(CONFIG_BT_BREDR)) {
2845 		bt_l2cap_br_init();
2846 	}
2847 
2848 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
2849 	k_fifo_init(&free_l2cap_tx_meta_data);
2850 	for (size_t i = 0; i < ARRAY_SIZE(l2cap_tx_meta_data_storage); i++) {
2851 		(void)memset(&l2cap_tx_meta_data_storage[i], 0,
2852 					sizeof(l2cap_tx_meta_data_storage[i]));
2853 		k_fifo_put(&free_l2cap_tx_meta_data, &l2cap_tx_meta_data_storage[i]);
2854 	}
2855 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
2856 }
2857 
2858 #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
l2cap_le_connect(struct bt_conn * conn,struct bt_l2cap_le_chan * ch,uint16_t psm)2859 static int l2cap_le_connect(struct bt_conn *conn, struct bt_l2cap_le_chan *ch,
2860 			    uint16_t psm)
2861 {
2862 	int err;
2863 
2864 	if (psm < L2CAP_LE_PSM_FIXED_START || psm > L2CAP_LE_PSM_DYN_END) {
2865 		return -EINVAL;
2866 	}
2867 
2868 	l2cap_chan_tx_init(ch);
2869 	l2cap_chan_rx_init(ch);
2870 
2871 	if (!l2cap_chan_add(conn, &ch->chan, l2cap_chan_destroy)) {
2872 		return -ENOMEM;
2873 	}
2874 
2875 	ch->psm = psm;
2876 
2877 	if (conn->sec_level < ch->required_sec_level) {
2878 		err = bt_conn_set_security(conn, ch->required_sec_level);
2879 		if (err) {
2880 			goto fail;
2881 		}
2882 
2883 		atomic_set_bit(ch->chan.status,
2884 			       BT_L2CAP_STATUS_ENCRYPT_PENDING);
2885 
2886 		return 0;
2887 	}
2888 
2889 	err = l2cap_le_conn_req(ch);
2890 	if (err) {
2891 		goto fail;
2892 	}
2893 
2894 	return 0;
2895 
2896 fail:
2897 	bt_l2cap_chan_remove(conn, &ch->chan);
2898 	bt_l2cap_chan_del(&ch->chan);
2899 	return err;
2900 }
2901 
2902 #if defined(CONFIG_BT_L2CAP_ECRED)
l2cap_ecred_init(struct bt_conn * conn,struct bt_l2cap_le_chan * ch,uint16_t psm)2903 static int l2cap_ecred_init(struct bt_conn *conn,
2904 			       struct bt_l2cap_le_chan *ch, uint16_t psm)
2905 {
2906 
2907 	if (psm < L2CAP_LE_PSM_FIXED_START || psm > L2CAP_LE_PSM_DYN_END) {
2908 		return -EINVAL;
2909 	}
2910 
2911 	l2cap_chan_tx_init(ch);
2912 	l2cap_chan_rx_init(ch);
2913 
2914 	if (!l2cap_chan_add(conn, &ch->chan, l2cap_chan_destroy)) {
2915 		return -ENOMEM;
2916 	}
2917 
2918 	ch->psm = psm;
2919 
2920 	LOG_DBG("ch %p psm 0x%02x mtu %u mps %u credits 1", ch, ch->psm, ch->rx.mtu, ch->rx.mps);
2921 
2922 	return 0;
2923 }
2924 
bt_l2cap_ecred_chan_connect(struct bt_conn * conn,struct bt_l2cap_chan ** chan,uint16_t psm)2925 int bt_l2cap_ecred_chan_connect(struct bt_conn *conn,
2926 				struct bt_l2cap_chan **chan, uint16_t psm)
2927 {
2928 	int i, err;
2929 
2930 	LOG_DBG("conn %p chan %p psm 0x%04x", conn, chan, psm);
2931 
2932 	if (!conn || !chan) {
2933 		return -EINVAL;
2934 	}
2935 
2936 	/* Init non-null channels */
2937 	for (i = 0; i < L2CAP_ECRED_CHAN_MAX_PER_REQ; i++) {
2938 		if (!chan[i]) {
2939 			break;
2940 		}
2941 
2942 		err = l2cap_ecred_init(conn, BT_L2CAP_LE_CHAN(chan[i]), psm);
2943 		if (err < 0) {
2944 			i--;
2945 			goto fail;
2946 		}
2947 	}
2948 
2949 	return l2cap_ecred_conn_req(chan, i);
2950 fail:
2951 	/* Remove channels added */
2952 	for (; i >= 0; i--) {
2953 		if (!chan[i]) {
2954 			continue;
2955 		}
2956 
2957 		bt_l2cap_chan_remove(conn, chan[i]);
2958 	}
2959 
2960 	return err;
2961 }
2962 
l2cap_find_pending_reconf(struct bt_conn * conn)2963 static struct bt_l2cap_le_chan *l2cap_find_pending_reconf(struct bt_conn *conn)
2964 {
2965 	struct bt_l2cap_chan *chan;
2966 
2967 	SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
2968 		if (BT_L2CAP_LE_CHAN(chan)->pending_rx_mtu) {
2969 			return BT_L2CAP_LE_CHAN(chan);
2970 		}
2971 	}
2972 
2973 	return NULL;
2974 }
2975 
bt_l2cap_ecred_chan_reconfigure(struct bt_l2cap_chan ** chans,uint16_t mtu)2976 int bt_l2cap_ecred_chan_reconfigure(struct bt_l2cap_chan **chans, uint16_t mtu)
2977 {
2978 	struct bt_l2cap_ecred_reconf_req *req;
2979 	struct bt_conn *conn = NULL;
2980 	struct bt_l2cap_le_chan *ch;
2981 	struct net_buf *buf;
2982 	uint8_t ident;
2983 	int i;
2984 
2985 	LOG_DBG("chans %p mtu 0x%04x", chans, mtu);
2986 
2987 	if (!chans) {
2988 		return -EINVAL;
2989 	}
2990 
2991 	for (i = 0; i < L2CAP_ECRED_CHAN_MAX_PER_REQ; i++) {
2992 		if (!chans[i]) {
2993 			break;
2994 		}
2995 
2996 		/* validate that all channels are from same connection */
2997 		if (conn) {
2998 			if (conn != chans[i]->conn) {
2999 				return -EINVAL;
3000 			}
3001 		} else {
3002 			conn = chans[i]->conn;
3003 		}
3004 
3005 		/* validate MTU is not decreased */
3006 		if (mtu < BT_L2CAP_LE_CHAN(chans[i])->rx.mtu) {
3007 			return -EINVAL;
3008 		}
3009 	}
3010 
3011 	if (i == 0) {
3012 		return -EINVAL;
3013 	}
3014 
3015 	if (!conn) {
3016 		return -ENOTCONN;
3017 	}
3018 
3019 	if (conn->type != BT_CONN_TYPE_LE) {
3020 		return -EINVAL;
3021 	}
3022 
3023 	/* allow only 1 request at time */
3024 	if (l2cap_find_pending_reconf(conn)) {
3025 		return -EBUSY;
3026 	}
3027 
3028 	ident = get_ident();
3029 
3030 	buf = l2cap_create_le_sig_pdu(NULL, BT_L2CAP_ECRED_RECONF_REQ,
3031 				      ident,
3032 				      sizeof(*req) + (i * sizeof(uint16_t)));
3033 	if (!buf) {
3034 		return -ENOMEM;
3035 	}
3036 
3037 	req = net_buf_add(buf, sizeof(*req));
3038 	req->mtu = sys_cpu_to_le16(mtu);
3039 
3040 	/* MPS shall not be bigger than MTU + BT_L2CAP_SDU_HDR_SIZE
3041 	 * as the remaining bytes cannot be used.
3042 	 */
3043 	req->mps = sys_cpu_to_le16(MIN(mtu + BT_L2CAP_SDU_HDR_SIZE,
3044 				       BT_L2CAP_RX_MTU));
3045 
3046 	for (int j = 0; j < i; j++) {
3047 		ch = BT_L2CAP_LE_CHAN(chans[j]);
3048 
3049 		ch->ident = ident;
3050 		ch->pending_rx_mtu = mtu;
3051 
3052 		net_buf_add_le16(buf, ch->rx.cid);
3053 	};
3054 
3055 	/* We set the RTX timer on one of the supplied channels, but when the
3056 	 * request resolves or times out we will act on all the channels in the
3057 	 * supplied array, using the ident field to find them.
3058 	 */
3059 	l2cap_chan_send_req(chans[0], buf, L2CAP_CONN_TIMEOUT);
3060 
3061 	return 0;
3062 }
3063 
3064 #endif /* defined(CONFIG_BT_L2CAP_ECRED) */
3065 
bt_l2cap_chan_connect(struct bt_conn * conn,struct bt_l2cap_chan * chan,uint16_t psm)3066 int bt_l2cap_chan_connect(struct bt_conn *conn, struct bt_l2cap_chan *chan,
3067 			  uint16_t psm)
3068 {
3069 	struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
3070 
3071 	LOG_DBG("conn %p chan %p psm 0x%04x", conn, chan, psm);
3072 
3073 	if (!conn || conn->state != BT_CONN_CONNECTED) {
3074 		return -ENOTCONN;
3075 	}
3076 
3077 	if (!chan) {
3078 		return -EINVAL;
3079 	}
3080 
3081 	if (IS_ENABLED(CONFIG_BT_BREDR) &&
3082 	    conn->type == BT_CONN_TYPE_BR) {
3083 		return bt_l2cap_br_chan_connect(conn, chan, psm);
3084 	}
3085 
3086 	if (le_chan->required_sec_level > BT_SECURITY_L4) {
3087 		return -EINVAL;
3088 	} else if (le_chan->required_sec_level == BT_SECURITY_L0) {
3089 		le_chan->required_sec_level = BT_SECURITY_L1;
3090 	}
3091 
3092 	return l2cap_le_connect(conn, le_chan, psm);
3093 }
3094 
bt_l2cap_chan_disconnect(struct bt_l2cap_chan * chan)3095 int bt_l2cap_chan_disconnect(struct bt_l2cap_chan *chan)
3096 {
3097 	struct bt_conn *conn = chan->conn;
3098 	struct net_buf *buf;
3099 	struct bt_l2cap_disconn_req *req;
3100 	struct bt_l2cap_le_chan *le_chan;
3101 
3102 	if (!conn) {
3103 		return -ENOTCONN;
3104 	}
3105 
3106 	if (IS_ENABLED(CONFIG_BT_BREDR) &&
3107 	    conn->type == BT_CONN_TYPE_BR) {
3108 		return bt_l2cap_br_chan_disconnect(chan);
3109 	}
3110 
3111 	le_chan = BT_L2CAP_LE_CHAN(chan);
3112 
3113 	LOG_DBG("chan %p scid 0x%04x dcid 0x%04x", chan, le_chan->rx.cid, le_chan->tx.cid);
3114 
3115 	le_chan->ident = get_ident();
3116 
3117 	buf = l2cap_create_le_sig_pdu(NULL, BT_L2CAP_DISCONN_REQ,
3118 				      le_chan->ident, sizeof(*req));
3119 	if (!buf) {
3120 		return -ENOMEM;
3121 	}
3122 
3123 	req = net_buf_add(buf, sizeof(*req));
3124 	req->dcid = sys_cpu_to_le16(le_chan->tx.cid);
3125 	req->scid = sys_cpu_to_le16(le_chan->rx.cid);
3126 
3127 	l2cap_chan_send_req(chan, buf, L2CAP_DISC_TIMEOUT);
3128 	bt_l2cap_chan_set_state(chan, BT_L2CAP_DISCONNECTING);
3129 
3130 	return 0;
3131 }
3132 
bt_l2cap_chan_send_cb(struct bt_l2cap_chan * chan,struct net_buf * buf,bt_conn_tx_cb_t cb,void * user_data)3133 int bt_l2cap_chan_send_cb(struct bt_l2cap_chan *chan, struct net_buf *buf, bt_conn_tx_cb_t cb,
3134 			  void *user_data)
3135 {
3136 	struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
3137 	struct l2cap_tx_meta_data *data;
3138 	void *old_user_data = l2cap_tx_meta_data(buf);
3139 	int err;
3140 
3141 	if (!buf) {
3142 		return -EINVAL;
3143 	}
3144 
3145 	LOG_DBG("chan %p buf %p len %zu", chan, buf, net_buf_frags_len(buf));
3146 
3147 	if (!chan->conn || chan->conn->state != BT_CONN_CONNECTED) {
3148 		return -ENOTCONN;
3149 	}
3150 
3151 	if (atomic_test_bit(chan->status, BT_L2CAP_STATUS_SHUTDOWN)) {
3152 		return -ESHUTDOWN;
3153 	}
3154 
3155 	if (IS_ENABLED(CONFIG_BT_BREDR) &&
3156 	    chan->conn->type == BT_CONN_TYPE_BR) {
3157 		return bt_l2cap_br_chan_send_cb(chan, buf, cb, user_data);
3158 	}
3159 
3160 	data = alloc_tx_meta_data();
3161 	if (!data) {
3162 		LOG_WRN("Unable to allocate TX context");
3163 		return -ENOBUFS;
3164 	}
3165 
3166 	data->sent = 0;
3167 	data->cid = le_chan->tx.cid;
3168 	data->cb = cb;
3169 	data->user_data = user_data;
3170 	l2cap_tx_meta_data(buf) = data;
3171 
3172 	/* Queue if there are pending segments left from previous packet or
3173 	 * there are no credits available.
3174 	 */
3175 	if (le_chan->tx_buf || !k_fifo_is_empty(&le_chan->tx_queue) ||
3176 	    !atomic_get(&le_chan->tx.credits)) {
3177 		l2cap_tx_meta_data(buf)->sent = 0;
3178 		net_buf_put(&le_chan->tx_queue, buf);
3179 		k_work_reschedule(&le_chan->tx_work, K_NO_WAIT);
3180 		return 0;
3181 	}
3182 
3183 	err = l2cap_chan_le_send_sdu(le_chan, &buf, 0);
3184 	if (err < 0) {
3185 		if (err == -EAGAIN && l2cap_tx_meta_data(buf)->sent) {
3186 			/* Queue buffer if at least one segment could be sent */
3187 			net_buf_put(&le_chan->tx_queue, buf);
3188 			return l2cap_tx_meta_data(buf)->sent;
3189 		}
3190 
3191 		LOG_ERR("failed to send message %d", err);
3192 
3193 		l2cap_tx_meta_data(buf) = old_user_data;
3194 		free_tx_meta_data(data);
3195 	}
3196 
3197 	return err;
3198 }
3199 
bt_l2cap_chan_send(struct bt_l2cap_chan * chan,struct net_buf * buf)3200 int bt_l2cap_chan_send(struct bt_l2cap_chan *chan, struct net_buf *buf)
3201 {
3202 	return bt_l2cap_chan_send_cb(chan, buf, NULL, NULL);
3203 }
3204 #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
3205