1 /*
2  * Copyright (c) 2024 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <zephyr/sys/byteorder.h>
9 #include <zephyr/sys/__assert.h>
10 
11 #include <zephyr/net_buf.h>
12 #include <zephyr/bluetooth/buf.h>
13 
14 #include <zephyr/bluetooth/bluetooth.h>
15 #include <zephyr/bluetooth/hci.h>
16 #include <zephyr/bluetooth/hci_raw.h>
17 #include <zephyr/bluetooth/hci_types.h>
18 #include <zephyr/bluetooth/gap.h>
19 
20 #include "common/bt_str.h"
21 
22 #include "host/conn_internal.h"
23 #include "host/l2cap_internal.h"
24 
25 #include "babblekit/flags.h"
26 #include "babblekit/device.h"
27 #include "babblekit/testcase.h"
28 
29 /* local includes */
30 #include "data.h"
31 
32 #include <zephyr/logging/log.h>
33 LOG_MODULE_REGISTER(tester, CONFIG_APP_LOG_LEVEL);
34 
35 DEFINE_FLAG(is_connected);
36 DEFINE_FLAG(flag_l2cap_connected);
37 
38 static K_FIFO_DEFINE(rx_queue);
39 
40 #define CMD_BUF_SIZE MAX(BT_BUF_EVT_RX_SIZE, BT_BUF_CMD_TX_SIZE)
41 NET_BUF_POOL_FIXED_DEFINE(hci_cmd_pool, CONFIG_BT_BUF_CMD_TX_COUNT, CMD_BUF_SIZE, 8, NULL);
42 
43 static K_SEM_DEFINE(cmd_sem, 1, 1);
44 static struct k_sem acl_pkts;
45 static struct k_sem tx_credits;
46 static uint16_t peer_mps;
47 static uint16_t conn_handle;
48 
49 static uint16_t active_opcode = 0xFFFF;
50 static struct net_buf *cmd_rsp;
51 
bt_hci_cmd_create(uint16_t opcode,uint8_t param_len)52 struct net_buf *bt_hci_cmd_create(uint16_t opcode, uint8_t param_len)
53 {
54 	struct bt_hci_cmd_hdr *hdr;
55 	struct net_buf *buf;
56 
57 	LOG_DBG("opcode 0x%04x param_len %u", opcode, param_len);
58 
59 	buf = net_buf_alloc(&hci_cmd_pool, K_FOREVER);
60 	__ASSERT_NO_MSG(buf);
61 
62 	LOG_DBG("buf %p", buf);
63 
64 	net_buf_reserve(buf, BT_BUF_RESERVE);
65 
66 	bt_buf_set_type(buf, BT_BUF_CMD);
67 
68 	hdr = net_buf_add(buf, sizeof(*hdr));
69 	hdr->opcode = sys_cpu_to_le16(opcode);
70 	hdr->param_len = param_len;
71 
72 	return buf;
73 }
74 
handle_cmd_complete(struct net_buf * buf)75 static void handle_cmd_complete(struct net_buf *buf)
76 {
77 	struct bt_hci_evt_hdr *hdr;
78 	uint8_t status, ncmd;
79 	uint16_t opcode;
80 
81 	struct net_buf_simple_state state;
82 
83 	net_buf_simple_save(&buf->b, &state);
84 
85 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
86 
87 	if (hdr->evt == BT_HCI_EVT_CMD_COMPLETE) {
88 		struct bt_hci_evt_cmd_complete *evt;
89 
90 		evt = net_buf_pull_mem(buf, sizeof(*evt));
91 		status = 0;
92 		ncmd = evt->ncmd;
93 		opcode = sys_le16_to_cpu(evt->opcode);
94 
95 	} else if (hdr->evt == BT_HCI_EVT_CMD_STATUS) {
96 		struct bt_hci_evt_cmd_status *evt;
97 
98 		evt = net_buf_pull_mem(buf, sizeof(*evt));
99 		status = buf->data[0];
100 		ncmd = evt->ncmd;
101 		opcode = sys_le16_to_cpu(evt->opcode);
102 
103 	} else {
104 		__ASSERT_NO_MSG(0);
105 	}
106 
107 	LOG_DBG("opcode 0x%04x status %x", opcode, status);
108 
109 	__ASSERT(status == 0x00, "cmd status: %x", status);
110 
111 	__ASSERT(active_opcode == opcode, "unexpected opcode %x != %x", active_opcode, opcode);
112 
113 	if (active_opcode) {
114 		active_opcode = 0xFFFF;
115 		cmd_rsp = net_buf_ref(buf);
116 		net_buf_simple_restore(&buf->b, &state);
117 	}
118 
119 	if (ncmd) {
120 		k_sem_give(&cmd_sem);
121 	}
122 }
123 
verify_interval(uint16_t interval)124 static void verify_interval(uint16_t interval)
125 {
126 	uint16_t min = EXPECTED_CONN_INTERVAL - CONN_INTERVAL_TOL;
127 	uint16_t max = EXPECTED_CONN_INTERVAL + CONN_INTERVAL_TOL;
128 
129 	TEST_ASSERT(interval > min, "Conn interval %d < %d", interval, min);
130 	TEST_ASSERT(interval < max, "Conn interval %d > %d", interval, max);
131 }
132 
handle_meta_event(struct net_buf * buf)133 static void handle_meta_event(struct net_buf *buf)
134 {
135 	uint8_t code = buf->data[2];
136 
137 	switch (code) {
138 	case BT_HCI_EVT_LE_ENH_CONN_COMPLETE:
139 	case BT_HCI_EVT_LE_ENH_CONN_COMPLETE_V2:
140 		struct bt_hci_evt_le_enh_conn_complete *evt = (void *)(&buf->data[3]);
141 
142 		conn_handle = evt->handle;
143 		LOG_DBG("connected: handle: %d interval %d", conn_handle, evt->interval);
144 
145 		verify_interval(evt->interval);
146 		SET_FLAG(is_connected);
147 		break;
148 	case BT_HCI_EVT_LE_CHAN_SEL_ALGO:
149 		/* do nothing */
150 		break;
151 	default:
152 		LOG_ERR("unhandled meta event %x", code);
153 		LOG_HEXDUMP_ERR(buf->data, buf->len, "HCI META EVT");
154 	}
155 }
156 
handle_ncp(struct net_buf * buf)157 static void handle_ncp(struct net_buf *buf)
158 {
159 	struct bt_hci_evt_hdr *hdr;
160 
161 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
162 
163 	struct bt_hci_evt_num_completed_packets *evt = (void *)buf->data;
164 
165 	uint16_t handle, count;
166 
167 	handle = sys_le16_to_cpu(evt->h[0].handle);
168 	count = sys_le16_to_cpu(evt->h[0].count);
169 
170 	LOG_DBG("sent %d packets", count);
171 
172 	while (count--) {
173 		k_sem_give(&acl_pkts);
174 	}
175 }
176 
handle_l2cap_credits(struct net_buf * buf)177 static void handle_l2cap_credits(struct net_buf *buf)
178 {
179 	struct bt_l2cap_le_credits *ev = (void *)buf->data;
180 	uint16_t credits = sys_le16_to_cpu(ev->credits);
181 
182 	LOG_DBG("got credits: %d", credits);
183 	while (credits--) {
184 		k_sem_give(&tx_credits);
185 	}
186 }
187 
handle_l2cap_connected(struct net_buf * buf)188 static void handle_l2cap_connected(struct net_buf *buf)
189 {
190 	struct bt_l2cap_le_conn_rsp *rsp = (void *)buf->data;
191 
192 	uint16_t credits = sys_le16_to_cpu(rsp->credits);
193 	uint16_t mtu = sys_le16_to_cpu(rsp->mtu);
194 	uint16_t mps = sys_le16_to_cpu(rsp->mps);
195 
196 	peer_mps = mps;
197 
198 	LOG_DBG("l2cap connected: mtu %d mps %d credits: %d", mtu, mps, credits);
199 
200 	k_sem_init(&tx_credits, credits, credits);
201 	SET_FLAG(flag_l2cap_connected);
202 }
203 
handle_sig(struct net_buf * buf)204 static void handle_sig(struct net_buf *buf)
205 {
206 	struct bt_l2cap_sig_hdr *hdr;
207 
208 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
209 
210 	switch (hdr->code) {
211 	case BT_L2CAP_LE_CONN_RSP:
212 		handle_l2cap_connected(buf);
213 		return;
214 	case BT_L2CAP_LE_CREDITS:
215 		handle_l2cap_credits(buf);
216 		return;
217 	case BT_L2CAP_DISCONN_REQ:
218 		TEST_FAIL("channel disconnected\n");
219 		return;
220 	default:
221 		TEST_FAIL("unhandled opcode %x\n", hdr->code);
222 		return;
223 	}
224 }
225 
handle_l2cap(struct net_buf * buf)226 static void handle_l2cap(struct net_buf *buf)
227 {
228 	struct bt_l2cap_hdr *hdr;
229 	uint16_t cid;
230 
231 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
232 	cid = sys_le16_to_cpu(hdr->cid);
233 
234 	__ASSERT_NO_MSG(buf->len == hdr->len);
235 	LOG_DBG("Packet for CID %u len %u", cid, buf->len);
236 	LOG_HEXDUMP_DBG(buf->data, buf->len, "l2cap");
237 
238 	/* signaling PDU */
239 	if (cid == 0x0005) {
240 		handle_sig(buf);
241 		return;
242 	}
243 
244 	/* CoC PDU */
245 	if (cid == 0x0040) {
246 		TEST_FAIL("unexpected data rx");
247 	}
248 }
249 
handle_acl(struct net_buf * buf)250 static void handle_acl(struct net_buf *buf)
251 {
252 	struct bt_hci_acl_hdr *hdr;
253 	uint16_t len, handle;
254 	uint8_t flags;
255 
256 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
257 	len = sys_le16_to_cpu(hdr->len);
258 	handle = sys_le16_to_cpu(hdr->handle);
259 
260 	flags = bt_acl_flags(handle);
261 	handle = bt_acl_handle(handle);
262 
263 	/* fragmentation not supported */
264 	__ASSERT_NO_MSG(flags == BT_ACL_START);
265 
266 	LOG_DBG("ACL: conn %d len %d flags %d", handle, len, flags);
267 	LOG_HEXDUMP_DBG(buf->data, buf->len, "HCI ACL");
268 
269 	handle_l2cap(buf);
270 }
271 
recv(struct net_buf * buf)272 static void recv(struct net_buf *buf)
273 {
274 	LOG_HEXDUMP_DBG(buf->data, buf->len, "HCI RX");
275 
276 	uint8_t code = buf->data[0];
277 
278 	if (bt_buf_get_type(buf) == BT_BUF_EVT) {
279 		switch (code) {
280 		case BT_HCI_EVT_CMD_COMPLETE:
281 		case BT_HCI_EVT_CMD_STATUS:
282 			handle_cmd_complete(buf);
283 			break;
284 		case BT_HCI_EVT_LE_META_EVENT:
285 			handle_meta_event(buf);
286 			break;
287 		case BT_HCI_EVT_DISCONN_COMPLETE:
288 			UNSET_FLAG(is_connected);
289 			break;
290 		case BT_HCI_EVT_NUM_COMPLETED_PACKETS:
291 			handle_ncp(buf);
292 			break;
293 		default:
294 			LOG_ERR("unhandled msg %x", code);
295 			LOG_HEXDUMP_ERR(buf->data, buf->len, "HCI EVT");
296 		}
297 
298 		/* handlers should take a ref if they want to access the buffer
299 		 * later
300 		 */
301 		net_buf_unref(buf);
302 		return;
303 	}
304 
305 	if (bt_buf_get_type(buf) == BT_BUF_ACL_IN) {
306 		handle_acl(buf);
307 		net_buf_unref(buf);
308 		return;
309 	}
310 
311 	LOG_ERR("HCI RX (not data or event)");
312 	net_buf_unref(buf);
313 }
314 
send_cmd(uint16_t opcode,struct net_buf * cmd,struct net_buf ** rsp)315 static void send_cmd(uint16_t opcode, struct net_buf *cmd, struct net_buf **rsp)
316 {
317 	LOG_DBG("opcode %x", opcode);
318 
319 	if (!cmd) {
320 		cmd = bt_hci_cmd_create(opcode, 0);
321 	}
322 
323 	k_sem_take(&cmd_sem, K_FOREVER);
324 	__ASSERT_NO_MSG(active_opcode == 0xFFFF);
325 
326 	active_opcode = opcode;
327 
328 	LOG_HEXDUMP_DBG(cmd->data, cmd->len, "HCI TX");
329 	bt_send(cmd);
330 
331 	/* Wait until the command completes */
332 	k_sem_take(&cmd_sem, K_FOREVER);
333 	k_sem_give(&cmd_sem);
334 
335 	net_buf_unref(cmd);
336 
337 	/* return response. it's okay if cmd_rsp gets overwritten, since the app
338 	 * gets the ref to the underlying buffer when this fn returns.
339 	 */
340 	if (rsp) {
341 		*rsp = cmd_rsp;
342 	} else {
343 		net_buf_unref(cmd_rsp);
344 		cmd_rsp = NULL;
345 	}
346 }
347 
348 static K_THREAD_STACK_DEFINE(rx_thread_stack, 1024);
349 static struct k_thread rx_thread_data;
350 
rx_thread(void * p1,void * p2,void * p3)351 static void rx_thread(void *p1, void *p2, void *p3)
352 {
353 	LOG_DBG("start HCI rx");
354 
355 	while (1) {
356 		struct net_buf *buf;
357 
358 		/* Wait until a buffer is available */
359 		buf = k_fifo_get(&rx_queue, K_FOREVER);
360 		recv(buf);
361 	}
362 }
363 
le_read_buffer_size_complete(struct net_buf * rsp)364 static void le_read_buffer_size_complete(struct net_buf *rsp)
365 {
366 	struct bt_hci_rp_le_read_buffer_size *rp = (void *)rsp->data;
367 
368 	LOG_DBG("status 0x%02x", rp->status);
369 	LOG_DBG("max len %d max num %d", rp->le_max_len, rp->le_max_num);
370 
371 	k_sem_init(&acl_pkts, rp->le_max_num, rp->le_max_num);
372 	net_buf_unref(rsp);
373 }
374 
read_max_data_len(uint16_t * tx_octets,uint16_t * tx_time)375 static void read_max_data_len(uint16_t *tx_octets, uint16_t *tx_time)
376 {
377 	struct bt_hci_rp_le_read_max_data_len *rp;
378 	struct net_buf *rsp;
379 
380 	send_cmd(BT_HCI_OP_LE_READ_MAX_DATA_LEN, NULL, &rsp);
381 
382 	rp = (void *)rsp->data;
383 	*tx_octets = sys_le16_to_cpu(rp->max_tx_octets);
384 	*tx_time = sys_le16_to_cpu(rp->max_tx_time);
385 	net_buf_unref(rsp);
386 }
387 
write_default_data_len(uint16_t tx_octets,uint16_t tx_time)388 static void write_default_data_len(uint16_t tx_octets, uint16_t tx_time)
389 {
390 	struct bt_hci_cp_le_write_default_data_len *cp;
391 	struct net_buf *buf = bt_hci_cmd_create(BT_HCI_OP_LE_WRITE_DEFAULT_DATA_LEN, sizeof(*cp));
392 
393 	__ASSERT_NO_MSG(buf);
394 
395 	cp = net_buf_add(buf, sizeof(*cp));
396 	cp->max_tx_octets = sys_cpu_to_le16(tx_octets);
397 	cp->max_tx_time = sys_cpu_to_le16(tx_time);
398 
399 	send_cmd(BT_HCI_OP_LE_WRITE_DEFAULT_DATA_LEN, buf, NULL);
400 }
401 
set_data_len(void)402 static void set_data_len(void)
403 {
404 	uint16_t tx_octets, tx_time;
405 
406 	read_max_data_len(&tx_octets, &tx_time);
407 	write_default_data_len(tx_octets, tx_time);
408 }
409 
set_event_mask(uint16_t opcode)410 static void set_event_mask(uint16_t opcode)
411 {
412 	struct bt_hci_cp_set_event_mask *cp_mask;
413 	struct net_buf *buf;
414 	uint64_t mask = 0U;
415 
416 	/* The two commands have the same length/params */
417 	buf = bt_hci_cmd_create(opcode, sizeof(*cp_mask));
418 	__ASSERT_NO_MSG(buf);
419 
420 	/* Forward all events */
421 	cp_mask = net_buf_add(buf, sizeof(*cp_mask));
422 	mask = UINT64_MAX;
423 	sys_put_le64(mask, cp_mask->events);
424 
425 	send_cmd(opcode, buf, NULL);
426 }
427 
set_random_address(void)428 static void set_random_address(void)
429 {
430 	struct net_buf *buf;
431 	bt_addr_le_t addr = {BT_ADDR_LE_RANDOM, {{0x0A, 0x89, 0x67, 0x45, 0x23, 0xC1}}};
432 
433 	/* Allow multilink */
434 	addr.a.val[3] = bk_device_get_number();
435 
436 	LOG_DBG("%s", bt_addr_str(&addr.a));
437 
438 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_RANDOM_ADDRESS, sizeof(addr.a));
439 	__ASSERT_NO_MSG(buf);
440 
441 	net_buf_add_mem(buf, &addr.a, sizeof(addr.a));
442 	send_cmd(BT_HCI_OP_LE_SET_RANDOM_ADDRESS, buf, NULL);
443 }
444 
start_adv(uint16_t interval,const char * name,size_t name_len)445 static void start_adv(uint16_t interval, const char *name, size_t name_len)
446 {
447 	struct bt_hci_cp_le_set_adv_data data;
448 	struct bt_hci_cp_le_set_adv_param set_param;
449 	struct net_buf *buf;
450 
451 	/* name_len should also not include the \0 */
452 	__ASSERT(name_len < (31 - 2), "name_len should be < 30");
453 
454 	(void)memset(&data, 0, sizeof(data));
455 	data.len = name_len + 2;
456 	data.data[0] = name_len + 1;
457 	data.data[1] = BT_DATA_NAME_COMPLETE;
458 	memcpy(&data.data[2], name, name_len);
459 
460 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_ADV_DATA, sizeof(data));
461 	__ASSERT_NO_MSG(buf);
462 	net_buf_add_mem(buf, &data, sizeof(data));
463 	send_cmd(BT_HCI_OP_LE_SET_ADV_DATA, buf, NULL);
464 
465 	(void)memset(&set_param, 0, sizeof(set_param));
466 	set_param.min_interval = sys_cpu_to_le16(interval);
467 	set_param.max_interval = sys_cpu_to_le16(interval);
468 	set_param.channel_map = 0x07;
469 	set_param.filter_policy = BT_LE_ADV_FP_NO_FILTER;
470 	set_param.type = BT_HCI_ADV_IND;
471 	set_param.own_addr_type = BT_HCI_OWN_ADDR_RANDOM;
472 
473 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_ADV_PARAM, sizeof(set_param));
474 	__ASSERT_NO_MSG(buf);
475 	net_buf_add_mem(buf, &set_param, sizeof(set_param));
476 
477 	send_cmd(BT_HCI_OP_LE_SET_ADV_PARAM, buf, NULL);
478 
479 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_ADV_ENABLE, 1);
480 	__ASSERT_NO_MSG(buf);
481 
482 	net_buf_add_u8(buf, BT_HCI_LE_ADV_ENABLE);
483 	send_cmd(BT_HCI_OP_LE_SET_ADV_ENABLE, buf, NULL);
484 }
485 
486 NET_BUF_POOL_DEFINE(acl_tx_pool, 100, BT_L2CAP_SDU_BUF_SIZE(200), 8, NULL);
487 
alloc_l2cap_pdu(void)488 static struct net_buf *alloc_l2cap_pdu(void)
489 {
490 	struct net_buf *buf;
491 	uint16_t reserve;
492 
493 	buf = net_buf_alloc(&acl_tx_pool, K_FOREVER);
494 	__ASSERT_NO_MSG(buf);
495 
496 	reserve = sizeof(struct bt_l2cap_hdr);
497 	reserve += sizeof(struct bt_hci_acl_hdr) + BT_BUF_RESERVE;
498 
499 	net_buf_reserve(buf, reserve);
500 
501 	return buf;
502 }
503 
l2cap_create_le_sig_pdu(uint8_t code,uint8_t ident,uint16_t len)504 static struct net_buf *l2cap_create_le_sig_pdu(uint8_t code, uint8_t ident, uint16_t len)
505 {
506 	struct bt_l2cap_sig_hdr *hdr;
507 	struct net_buf *buf;
508 
509 	buf = alloc_l2cap_pdu();
510 
511 	hdr = net_buf_add(buf, sizeof(*hdr));
512 	hdr->code = code;
513 	hdr->ident = ident;
514 	hdr->len = sys_cpu_to_le16(len);
515 
516 	return buf;
517 }
518 
send_acl(struct net_buf * buf,uint8_t flags)519 static int send_acl(struct net_buf *buf, uint8_t flags)
520 {
521 	struct bt_hci_acl_hdr *hdr;
522 
523 	hdr = net_buf_push(buf, sizeof(*hdr));
524 	hdr->handle = sys_cpu_to_le16(bt_acl_handle_pack(conn_handle, flags));
525 	hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
526 
527 	bt_buf_set_type(buf, BT_BUF_ACL_OUT);
528 
529 	k_sem_take(&acl_pkts, K_FOREVER);
530 
531 	return bt_send(buf);
532 }
533 
push_l2cap_pdu_header(struct net_buf * dst,uint16_t len,uint16_t cid)534 static void push_l2cap_pdu_header(struct net_buf *dst, uint16_t len, uint16_t cid)
535 {
536 	struct bt_l2cap_hdr *hdr;
537 
538 	hdr = net_buf_push(dst, sizeof(*hdr));
539 	hdr->len = sys_cpu_to_le16(len);
540 	hdr->cid = sys_cpu_to_le16(cid);
541 }
542 
send_l2cap_packet(struct net_buf * buf,uint16_t cid)543 static void send_l2cap_packet(struct net_buf *buf, uint16_t cid)
544 {
545 	push_l2cap_pdu_header(buf, buf->len, cid);
546 	send_acl(buf, BT_ACL_START_NO_FLUSH);
547 }
548 
open_l2cap(void)549 static void open_l2cap(void)
550 {
551 	struct net_buf *buf;
552 	struct bt_l2cap_le_conn_req *req;
553 
554 	buf = l2cap_create_le_sig_pdu(BT_L2CAP_LE_CONN_REQ, 1, sizeof(*req));
555 
556 	req = net_buf_add(buf, sizeof(*req));
557 	req->psm = sys_cpu_to_le16(L2CAP_TEST_PSM);
558 	req->scid = sys_cpu_to_le16(L2CAP_TEST_CID);
559 
560 	/* we don't intend on receiving anything. use the smallest allowed
561 	 * values and no initial credits.
562 	 */
563 	req->mtu = sys_cpu_to_le16(23);
564 	req->mps = sys_cpu_to_le16(23);
565 	req->credits = sys_cpu_to_le16(0);
566 
567 	send_l2cap_packet(buf, BT_L2CAP_CID_LE_SIG);
568 
569 	WAIT_FOR_FLAG(flag_l2cap_connected);
570 }
571 
send_l2cap_sdu(uint8_t * data,uint16_t data_len,uint16_t mps,uint16_t on_air_size)572 static void send_l2cap_sdu(uint8_t *data, uint16_t data_len, uint16_t mps, uint16_t on_air_size)
573 {
574 	uint16_t frag_len;
575 	uint8_t flags = BT_ACL_START_NO_FLUSH;
576 
577 	/* Only MPS-sized SDUs */
578 	__ASSERT_NO_MSG(data_len <= (mps - BT_L2CAP_SDU_HDR_SIZE));
579 
580 	/* Need to fit both headers on the first ACL fragment */
581 	__ASSERT_NO_MSG(on_air_size >= (BT_L2CAP_SDU_HDR_SIZE + BT_L2CAP_HDR_SIZE));
582 
583 	LOG_HEXDUMP_DBG(data, data_len, "send SDU:");
584 
585 	/* Since we send one PDU (but many HCI ACL fragments) we only need one
586 	 * (PDU) credit.
587 	 */
588 	k_sem_take(&tx_credits, K_FOREVER);
589 
590 	for (int i = 0; data_len; i++) {
591 		struct net_buf *buf = net_buf_alloc(&acl_tx_pool, K_FOREVER);
592 
593 		__ASSERT_NO_MSG(buf);
594 		net_buf_reserve(buf, BT_L2CAP_SDU_CHAN_SEND_RESERVE);
595 
596 		frag_len = MIN(data_len, on_air_size);
597 
598 		if (i == 0) {
599 			/* The first packet the first part of both the SDU and
600 			 * the PDU. It then needs to contain both headers.
601 			 */
602 			net_buf_push_le16(buf, data_len);
603 			frag_len -= BT_L2CAP_SDU_HDR_SIZE;
604 
605 			push_l2cap_pdu_header(buf, data_len + BT_L2CAP_SDU_HDR_SIZE, 0x0040);
606 			frag_len -= BT_L2CAP_HDR_SIZE;
607 		}
608 
609 		/* copy data into ACL frag */
610 		net_buf_add_mem(buf, data, frag_len);
611 		data = &data[frag_len];
612 		data_len -= frag_len;
613 
614 		LOG_DBG("send ACL frag %d (%d bytes, remaining %d)", i, buf->len, data_len);
615 		LOG_HEXDUMP_DBG(buf->data, buf->len, "ACL Fragment");
616 
617 		send_acl(buf, flags);
618 		flags = BT_ACL_CONT;
619 	}
620 }
621 
entrypoint_tester(void)622 void entrypoint_tester(void)
623 {
624 	bt_enable_raw(&rx_queue);
625 
626 	/* Start the RX thread */
627 	k_thread_create(&rx_thread_data, rx_thread_stack, K_THREAD_STACK_SIZEOF(rx_thread_stack),
628 			rx_thread, NULL, NULL, NULL, K_PRIO_PREEMPT(0), 0, K_NO_WAIT);
629 	k_thread_name_set(&rx_thread_data, "HCI RX");
630 
631 	k_thread_priority_set(k_current_get(), K_PRIO_PREEMPT(0));
632 
633 	/* Initialize controller */
634 	struct net_buf *rsp;
635 
636 	send_cmd(BT_HCI_OP_RESET, NULL, NULL);
637 	send_cmd(BT_HCI_OP_LE_READ_BUFFER_SIZE, NULL, &rsp);
638 	le_read_buffer_size_complete(rsp);
639 
640 	set_data_len();
641 	set_event_mask(BT_HCI_OP_SET_EVENT_MASK);
642 	set_event_mask(BT_HCI_OP_LE_SET_EVENT_MASK);
643 	set_random_address();
644 
645 	/* Start advertising & wait for a connection */
646 	start_adv(40, TESTER_NAME, sizeof(TESTER_NAME) - 1);
647 	WAIT_FOR_FLAG(is_connected);
648 	LOG_INF("connected");
649 
650 	/* Connect to the central's dynamic L2CAP server */
651 	open_l2cap();
652 
653 	/* Prepare the data for sending */
654 	uint8_t data[PAYLOAD_LEN];
655 
656 	for (int i = 0; i < ARRAY_SIZE(data); i++) {
657 		data[i] = (uint8_t)i;
658 	}
659 
660 	/* Start sending data at a set time + offset[device_num].
661 	 *
662 	 * The connection is created with ~30-50ms interval, so that should be
663 	 * enough to have the DUT re-assembling L2CAP PDUs from all the peers at
664 	 * the same time.
665 	 */
666 	int delay = bk_device_get_number() * 2 * EXPECTED_CONN_INTERVAL;
667 
668 	k_msleep(delay);
669 
670 	for (int i = 0; i < SDU_NUM; i++) {
671 		LOG_INF("Sending SDU %d / %d", i + 1, SDU_NUM);
672 		send_l2cap_sdu(data, sizeof(data), peer_mps, 8);
673 	}
674 
675 	TEST_PASS("Sent all %d SDUs", SDU_NUM);
676 }
677