1 /*
2  * Copyright (c) 2023 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <zephyr/sys/byteorder.h>
9 #include <zephyr/sys/__assert.h>
10 
11 #include <zephyr/net_buf.h>
12 #include <zephyr/bluetooth/buf.h>
13 
14 #include <zephyr/bluetooth/bluetooth.h>
15 #include <zephyr/bluetooth/hci.h>
16 #include <zephyr/bluetooth/hci_raw.h>
17 #include <zephyr/bluetooth/hci_types.h>
18 
19 #include "common/hci_common_internal.h"
20 #include "common/bt_str.h"
21 
22 #include "host/conn_internal.h"
23 #include "host/l2cap_internal.h"
24 
25 #include "babblekit/testcase.h"
26 #include "babblekit/flags.h"
27 
28 #include <zephyr/logging/log.h>
29 LOG_MODULE_REGISTER(bt_tinyhost, LOG_LEVEL_INF);
30 
31 #define BT_ATT_OP_MTU_REQ   0x02
32 #define BT_ATT_OP_MTU_RSP   0x03
33 #define BT_ATT_OP_WRITE_REQ 0x12
34 #define BT_ATT_OP_WRITE_RSP 0x13
35 #define BT_ATT_OP_NOTIFY    0x1b
36 #define BT_ATT_OP_INDICATE  0x1d
37 #define BT_ATT_OP_CONFIRM   0x1e
38 #define BT_ATT_OP_WRITE_CMD 0x52
39 #define BT_L2CAP_CID_ATT    0x0004
40 #define LAST_SUPPORTED_ATT_OPCODE 0x20
41 
42 /* Run for more than ATT_TIMEOUT */
43 #define PROCEDURE_1_TIMEOUT_MS (1000 * 70)
44 
45 DEFINE_FLAG_STATIC(is_connected);
46 DEFINE_FLAG_STATIC(flag_data_length_updated);
47 DEFINE_FLAG_STATIC(flag_handle);
48 DEFINE_FLAG_STATIC(flag_write_ack);
49 DEFINE_FLAG_STATIC(flag_req_in_progress);
50 
51 static uint16_t server_write_handle;
52 
53 typedef void (*att_handler_t)(struct net_buf *buf);
54 static att_handler_t att_handlers[LAST_SUPPORTED_ATT_OPCODE];
55 
56 static K_FIFO_DEFINE(rx_queue);
57 
58 #define CMD_BUF_SIZE MAX(BT_BUF_EVT_RX_SIZE, BT_BUF_CMD_TX_SIZE)
59 NET_BUF_POOL_FIXED_DEFINE(hci_cmd_pool, BT_BUF_CMD_TX_COUNT,
60 			  CMD_BUF_SIZE, 8, NULL);
61 
62 static K_SEM_DEFINE(cmd_sem, 1, 1);
63 static struct k_sem acl_pkts;
64 static uint16_t conn_handle;
65 
66 static volatile uint16_t active_opcode = 0xFFFF;
67 static struct net_buf *cmd_rsp;
68 
bt_hci_cmd_create(uint16_t opcode,uint8_t param_len)69 struct net_buf *bt_hci_cmd_create(uint16_t opcode, uint8_t param_len)
70 {
71 	struct bt_hci_cmd_hdr *hdr;
72 	struct net_buf *buf;
73 
74 	LOG_DBG("opcode 0x%04x param_len %u", opcode, param_len);
75 
76 	buf = net_buf_alloc(&hci_cmd_pool, K_FOREVER);
77 	TEST_ASSERT(buf, "failed allocation");
78 
79 	LOG_DBG("buf %p", buf);
80 
81 	net_buf_reserve(buf, BT_BUF_RESERVE);
82 
83 	bt_buf_set_type(buf, BT_BUF_CMD);
84 
85 	hdr = net_buf_add(buf, sizeof(*hdr));
86 	hdr->opcode = sys_cpu_to_le16(opcode);
87 	hdr->param_len = param_len;
88 
89 	return buf;
90 }
91 
handle_cmd_complete(struct net_buf * buf)92 static void handle_cmd_complete(struct net_buf *buf)
93 {
94 	struct bt_hci_evt_hdr *hdr;
95 	uint8_t status, ncmd;
96 	uint16_t opcode;
97 	struct net_buf_simple_state state;
98 
99 	net_buf_simple_save(&buf->b, &state);
100 
101 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
102 
103 	if (hdr->evt == BT_HCI_EVT_CMD_COMPLETE) {
104 		struct bt_hci_evt_cmd_complete *evt;
105 
106 		evt = net_buf_pull_mem(buf, sizeof(*evt));
107 		status = 0;
108 		ncmd = evt->ncmd;
109 		opcode = sys_le16_to_cpu(evt->opcode);
110 
111 	} else if (hdr->evt == BT_HCI_EVT_CMD_STATUS) {
112 		struct bt_hci_evt_cmd_status *evt;
113 
114 		evt = net_buf_pull_mem(buf, sizeof(*evt));
115 		status = buf->data[0];
116 		ncmd = evt->ncmd;
117 		opcode = sys_le16_to_cpu(evt->opcode);
118 
119 	} else {
120 		TEST_FAIL("unhandled event 0x%x", hdr->evt);
121 	}
122 
123 	LOG_DBG("opcode 0x%04x status %x", opcode, status);
124 
125 	TEST_ASSERT(status == 0x00, "cmd status: %x", status);
126 
127 	TEST_ASSERT(active_opcode == opcode, "unexpected opcode %x != %x", active_opcode, opcode);
128 
129 	if (active_opcode) {
130 		active_opcode = 0xFFFF;
131 		cmd_rsp = net_buf_ref(buf);
132 		net_buf_simple_restore(&buf->b, &state);
133 	}
134 
135 	if (ncmd) {
136 		k_sem_give(&cmd_sem);
137 	}
138 }
139 
handle_meta_event(struct net_buf * buf)140 static void handle_meta_event(struct net_buf *buf)
141 {
142 	uint8_t code = buf->data[2];
143 
144 	switch (code) {
145 	case BT_HCI_EVT_LE_ENH_CONN_COMPLETE:
146 	case BT_HCI_EVT_LE_ENH_CONN_COMPLETE_V2:
147 		conn_handle = sys_get_le16(&buf->data[4]);
148 		LOG_DBG("connected: handle: %d", conn_handle);
149 		SET_FLAG(is_connected);
150 		break;
151 	case BT_HCI_EVT_LE_DATA_LEN_CHANGE:
152 		SET_FLAG(flag_data_length_updated);
153 		break;
154 	case BT_HCI_EVT_LE_CHAN_SEL_ALGO:
155 		/* do nothing */
156 		break;
157 	default:
158 		LOG_ERR("unhandled meta event %x", code);
159 		LOG_HEXDUMP_ERR(buf->data, buf->len, "HCI META EVT");
160 	}
161 }
162 
handle_ncp(struct net_buf * buf)163 static void handle_ncp(struct net_buf *buf)
164 {
165 	struct bt_hci_evt_num_completed_packets *evt;
166 	struct bt_hci_evt_hdr *hdr;
167 	uint16_t handle, count;
168 
169 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
170 
171 	evt = (void *)buf->data;
172 	handle = sys_le16_to_cpu(evt->h[0].handle);
173 	count = sys_le16_to_cpu(evt->h[0].count);
174 
175 	LOG_DBG("sent %d packets", count);
176 
177 	while (count--) {
178 		k_sem_give(&acl_pkts);
179 	}
180 }
181 
handle_att_notification(struct net_buf * buf)182 static void handle_att_notification(struct net_buf *buf)
183 {
184 	uint16_t handle = net_buf_pull_le16(buf);
185 
186 	LOG_INF("Got notification for 0x%04x len %d", handle, buf->len);
187 	LOG_HEXDUMP_DBG(buf->data, buf->len, "payload");
188 
189 	server_write_handle = net_buf_pull_le16(buf);
190 	LOG_INF("Retrieved handle to write to: 0x%x", server_write_handle);
191 	SET_FLAG(flag_handle);
192 }
193 
194 struct net_buf *alloc_l2cap_pdu(void);
195 static void send_l2cap_packet(struct net_buf *buf, uint16_t cid);
196 
send_write_rsp(void)197 static void send_write_rsp(void)
198 {
199 	struct net_buf *buf = alloc_l2cap_pdu();
200 
201 	UNSET_FLAG(flag_req_in_progress);
202 
203 	net_buf_add_u8(buf, BT_ATT_OP_WRITE_RSP);
204 	send_l2cap_packet(buf, BT_L2CAP_CID_ATT);
205 }
206 
handle_att_write_0(struct net_buf * buf)207 static void handle_att_write_0(struct net_buf *buf)
208 {
209 	uint16_t handle = net_buf_pull_le16(buf);
210 
211 	LOG_INF("Got write for 0x%04x len %d", handle, buf->len);
212 	LOG_HEXDUMP_DBG(buf->data, buf->len, "payload");
213 
214 	TEST_ASSERT(!IS_FLAG_SET(flag_req_in_progress),
215 	       "Peer is pipelining REQs. This inSIGdent will be reported.");
216 
217 	SET_FLAG(flag_req_in_progress);
218 	send_write_rsp();
219 }
220 
handle_att_write_1(struct net_buf * buf)221 static void handle_att_write_1(struct net_buf *buf)
222 {
223 	uint16_t handle = net_buf_pull_le16(buf);
224 
225 	LOG_INF("Got write for 0x%04x len %d", handle, buf->len);
226 	LOG_HEXDUMP_DBG(buf->data, buf->len, "payload");
227 
228 	TEST_ASSERT(!IS_FLAG_SET(flag_req_in_progress),
229 	       "Peer is pipelining REQs. This inSIGdent will be reported.");
230 
231 	SET_FLAG(flag_req_in_progress);
232 
233 	/* For this test procedure, the response is sent from main */
234 }
235 
handle_att(struct net_buf * buf)236 static void handle_att(struct net_buf *buf)
237 {
238 	uint8_t op = net_buf_pull_u8(buf);
239 
240 	if (ARRAY_SIZE(att_handlers) > op && att_handlers[op]) {
241 		LOG_DBG("executing custom ATT handler for op %x", op);
242 		att_handlers[op](buf);
243 		return;
244 	}
245 
246 	switch (op) {
247 	case BT_ATT_OP_NOTIFY:
248 		handle_att_notification(buf);
249 		return;
250 	case BT_ATT_OP_WRITE_RSP:
251 		LOG_INF("got ATT write RSP");
252 		SET_FLAG(flag_write_ack);
253 		return;
254 	case BT_ATT_OP_MTU_RSP:
255 		LOG_INF("got ATT MTU RSP");
256 		return;
257 	default:
258 		LOG_HEXDUMP_ERR(buf->data, buf->len, "payload");
259 		TEST_FAIL("unhandled opcode %x", op);
260 		return;
261 	}
262 }
263 
handle_l2cap(struct net_buf * buf)264 static void handle_l2cap(struct net_buf *buf)
265 {
266 	struct bt_l2cap_hdr *hdr;
267 	uint16_t cid;
268 
269 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
270 	cid = sys_le16_to_cpu(hdr->cid);
271 
272 	LOG_DBG("Packet for CID %u len %u", cid, buf->len);
273 	LOG_HEXDUMP_DBG(buf->data, buf->len, "l2cap");
274 
275 	/* Make sure we don't have to recombine packets */
276 	TEST_ASSERT(buf->len == hdr->len, "buflen = %d != hdrlen %d", buf->len, hdr->len);
277 
278 	TEST_ASSERT(cid == BT_L2CAP_CID_ATT, "We only support (U)ATT");
279 
280 	/* (U)ATT PDU */
281 	handle_att(buf);
282 }
283 
handle_acl(struct net_buf * buf)284 static void handle_acl(struct net_buf *buf)
285 {
286 	struct bt_hci_acl_hdr *hdr;
287 	uint16_t len, handle;
288 	uint8_t flags;
289 
290 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
291 	len = sys_le16_to_cpu(hdr->len);
292 	handle = sys_le16_to_cpu(hdr->handle);
293 
294 	flags = bt_acl_flags(handle);
295 	handle = bt_acl_handle(handle);
296 
297 	TEST_ASSERT(flags == BT_ACL_START, "Fragmentation not supported");
298 
299 	LOG_DBG("ACL: conn %d len %d flags %d", handle, len, flags);
300 	LOG_HEXDUMP_DBG(buf->data, buf->len, "HCI ACL");
301 
302 	handle_l2cap(buf);
303 }
304 
recv(struct net_buf * buf)305 static void recv(struct net_buf *buf)
306 {
307 	LOG_HEXDUMP_DBG(buf->data, buf->len, "HCI RX");
308 
309 	uint8_t code = buf->data[0];
310 
311 	if (bt_buf_get_type(buf) == BT_BUF_EVT) {
312 		switch (code) {
313 		case BT_HCI_EVT_CMD_COMPLETE:
314 		case BT_HCI_EVT_CMD_STATUS:
315 			handle_cmd_complete(buf);
316 			break;
317 		case BT_HCI_EVT_LE_META_EVENT:
318 			handle_meta_event(buf);
319 			break;
320 		case BT_HCI_EVT_DISCONN_COMPLETE:
321 			UNSET_FLAG(is_connected);
322 			break;
323 		case BT_HCI_EVT_NUM_COMPLETED_PACKETS:
324 			handle_ncp(buf);
325 			break;
326 		default:
327 			LOG_ERR("unhandled msg %x", code);
328 			LOG_HEXDUMP_ERR(buf->data, buf->len, "HCI EVT");
329 		}
330 
331 		/* handlers should take a ref if they want to access the buffer
332 		 * later
333 		 */
334 		net_buf_unref(buf);
335 		return;
336 	}
337 
338 	if (bt_buf_get_type(buf) == BT_BUF_ACL_IN) {
339 		handle_acl(buf);
340 		net_buf_unref(buf);
341 		return;
342 	}
343 
344 	LOG_ERR("HCI RX (not data or event)");
345 	net_buf_unref(buf);
346 }
347 
send_cmd(uint16_t opcode,struct net_buf * cmd,struct net_buf ** rsp)348 static void send_cmd(uint16_t opcode, struct net_buf *cmd, struct net_buf **rsp)
349 {
350 	LOG_DBG("opcode %x", opcode);
351 
352 	if (!cmd) {
353 		cmd = bt_hci_cmd_create(opcode, 0);
354 	}
355 
356 	k_sem_take(&cmd_sem, K_FOREVER);
357 	TEST_ASSERT_NO_MSG(active_opcode == 0xFFFF);
358 
359 	active_opcode = opcode;
360 
361 	LOG_HEXDUMP_DBG(cmd->data, cmd->len, "HCI TX");
362 	bt_send(cmd);
363 
364 	/* Wait until the command completes */
365 	k_sem_take(&cmd_sem, K_FOREVER);
366 	k_sem_give(&cmd_sem);
367 
368 	net_buf_unref(cmd);
369 
370 	/* return response. it's okay if cmd_rsp gets overwritten, since the app
371 	 * gets the ref to the underlying buffer when this fn returns.
372 	 */
373 	if (rsp) {
374 		*rsp = cmd_rsp;
375 	} else {
376 		net_buf_unref(cmd_rsp);
377 		cmd_rsp = NULL;
378 	}
379 }
380 
381 static K_THREAD_STACK_DEFINE(rx_thread_stack, 1024);
382 static struct k_thread rx_thread_data;
383 
rx_thread(void * p1,void * p2,void * p3)384 static void rx_thread(void *p1, void *p2, void *p3)
385 {
386 	LOG_DBG("start HCI rx");
387 
388 	while (true) {
389 		struct net_buf *buf;
390 
391 		/* Wait until a buffer is available */
392 		buf = k_fifo_get(&rx_queue, K_FOREVER);
393 		recv(buf);
394 	}
395 }
396 
le_read_buffer_size_complete(struct net_buf * rsp)397 static void le_read_buffer_size_complete(struct net_buf *rsp)
398 {
399 	struct bt_hci_rp_le_read_buffer_size *rp = (void *)rsp->data;
400 
401 	LOG_DBG("status 0x%02x", rp->status);
402 	LOG_DBG("max len %d max num %d", rp->le_max_len, rp->le_max_num);
403 
404 	k_sem_init(&acl_pkts, rp->le_max_num, rp->le_max_num);
405 	net_buf_unref(rsp);
406 }
407 
read_max_data_len(uint16_t * tx_octets,uint16_t * tx_time)408 static void read_max_data_len(uint16_t *tx_octets, uint16_t *tx_time)
409 {
410 	struct bt_hci_rp_le_read_max_data_len *rp;
411 	struct net_buf *rsp;
412 
413 	send_cmd(BT_HCI_OP_LE_READ_MAX_DATA_LEN, NULL, &rsp);
414 
415 	rp = (void *)rsp->data;
416 	*tx_octets = sys_le16_to_cpu(rp->max_tx_octets);
417 	*tx_time = sys_le16_to_cpu(rp->max_tx_time);
418 	net_buf_unref(rsp);
419 }
420 
write_default_data_len(uint16_t tx_octets,uint16_t tx_time)421 static void write_default_data_len(uint16_t tx_octets, uint16_t tx_time)
422 {
423 	struct bt_hci_cp_le_write_default_data_len *cp;
424 	struct net_buf *buf = bt_hci_cmd_create(BT_HCI_OP_LE_WRITE_DEFAULT_DATA_LEN, sizeof(*cp));
425 
426 	TEST_ASSERT_NO_MSG(buf);
427 
428 	cp = net_buf_add(buf, sizeof(*cp));
429 	cp->max_tx_octets = sys_cpu_to_le16(tx_octets);
430 	cp->max_tx_time = sys_cpu_to_le16(tx_time);
431 
432 	send_cmd(BT_HCI_OP_LE_WRITE_DEFAULT_DATA_LEN, buf, NULL);
433 }
434 
set_data_len(void)435 static void set_data_len(void)
436 {
437 	uint16_t tx_octets, tx_time;
438 
439 	read_max_data_len(&tx_octets, &tx_time);
440 	write_default_data_len(tx_octets, tx_time);
441 }
442 
set_event_mask(uint16_t opcode)443 static void set_event_mask(uint16_t opcode)
444 {
445 	struct bt_hci_cp_set_event_mask *cp_mask;
446 	struct net_buf *buf;
447 	uint64_t mask = 0U;
448 
449 	/* The two commands have the same length/params */
450 	buf = bt_hci_cmd_create(opcode, sizeof(*cp_mask));
451 	TEST_ASSERT_NO_MSG(buf);
452 
453 	/* Forward all events */
454 	cp_mask = net_buf_add(buf, sizeof(*cp_mask));
455 	mask = UINT64_MAX;
456 	sys_put_le64(mask, cp_mask->events);
457 
458 	send_cmd(opcode, buf, NULL);
459 }
460 
set_random_address(void)461 static void set_random_address(void)
462 {
463 	struct net_buf *buf;
464 	bt_addr_le_t addr = {BT_ADDR_LE_RANDOM, {{0x0A, 0x89, 0x67, 0x45, 0x23, 0xC1}}};
465 
466 	LOG_DBG("%s", bt_addr_str(&addr.a));
467 
468 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_RANDOM_ADDRESS, sizeof(addr.a));
469 	TEST_ASSERT_NO_MSG(buf);
470 
471 	net_buf_add_mem(buf, &addr.a, sizeof(addr.a));
472 	send_cmd(BT_HCI_OP_LE_SET_RANDOM_ADDRESS, buf, NULL);
473 }
474 
start_adv(void)475 void start_adv(void)
476 {
477 	struct bt_hci_cp_le_set_adv_param set_param;
478 	struct net_buf *buf;
479 	uint16_t interval = 60; /* Interval doesn't matter */
480 
481 	(void)memset(&set_param, 0, sizeof(set_param));
482 
483 	set_param.min_interval = sys_cpu_to_le16(interval);
484 	set_param.max_interval = sys_cpu_to_le16(interval);
485 	set_param.channel_map = 0x07;
486 	set_param.filter_policy = BT_LE_ADV_FP_NO_FILTER;
487 	set_param.type = BT_HCI_ADV_IND;
488 	set_param.own_addr_type = BT_HCI_OWN_ADDR_RANDOM;
489 
490 	/* configure */
491 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_ADV_PARAM, sizeof(set_param));
492 	net_buf_add_mem(buf, &set_param, sizeof(set_param));
493 	send_cmd(BT_HCI_OP_LE_SET_ADV_PARAM, buf, NULL);
494 
495 	/* start */
496 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_ADV_ENABLE, 1);
497 	net_buf_add_u8(buf, BT_HCI_LE_ADV_ENABLE);
498 	send_cmd(BT_HCI_OP_LE_SET_ADV_ENABLE, buf, NULL);
499 }
500 
501 NET_BUF_POOL_DEFINE(acl_tx_pool, 5, BT_L2CAP_BUF_SIZE(200), 8, NULL);
502 
alloc_l2cap_pdu(void)503 struct net_buf *alloc_l2cap_pdu(void)
504 {
505 	struct net_buf *buf;
506 	uint16_t reserve;
507 
508 	buf = net_buf_alloc(&acl_tx_pool, K_FOREVER);
509 	TEST_ASSERT(buf, "failed ACL allocation");
510 
511 	reserve = sizeof(struct bt_l2cap_hdr);
512 	reserve += sizeof(struct bt_hci_acl_hdr) + BT_BUF_RESERVE;
513 
514 	net_buf_reserve(buf, reserve);
515 
516 	return buf;
517 }
518 
send_acl(struct net_buf * buf)519 static int send_acl(struct net_buf *buf)
520 {
521 	struct bt_hci_acl_hdr *hdr;
522 	uint8_t flags = BT_ACL_START_NO_FLUSH;
523 
524 	hdr = net_buf_push(buf, sizeof(*hdr));
525 	hdr->handle = sys_cpu_to_le16(bt_acl_handle_pack(conn_handle, flags));
526 	hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
527 
528 	bt_buf_set_type(buf, BT_BUF_ACL_OUT);
529 
530 	k_sem_take(&acl_pkts, K_FOREVER);
531 
532 	return bt_send(buf);
533 }
534 
send_l2cap_packet(struct net_buf * buf,uint16_t cid)535 static void send_l2cap_packet(struct net_buf *buf, uint16_t cid)
536 {
537 	struct bt_l2cap_hdr *hdr;
538 
539 	hdr = net_buf_push(buf, sizeof(*hdr));
540 	hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
541 	hdr->cid = sys_cpu_to_le16(cid);
542 
543 	/* Always entire packets, no HCI fragmentation */
544 	TEST_ASSERT(buf->len <= CONFIG_BT_BUF_ACL_TX_SIZE, "Fragmentation not supported");
545 
546 	send_acl(buf);
547 }
548 
gatt_write(uint16_t op)549 static void gatt_write(uint16_t op)
550 {
551 	static uint8_t data[] = "write";
552 	uint16_t handle = server_write_handle;
553 	struct net_buf *buf = alloc_l2cap_pdu();
554 
555 	net_buf_add_u8(buf, op);
556 	net_buf_add_le16(buf, handle);
557 	net_buf_add_mem(buf, data, sizeof(data));
558 
559 	LOG_INF("send ATT write %s",
560 		op == BT_ATT_OP_WRITE_REQ ? "REQ" : "CMD");
561 
562 	send_l2cap_packet(buf, BT_L2CAP_CID_ATT);
563 }
564 
prepare_controller(void)565 static void prepare_controller(void)
566 {
567 	/* Initialize controller */
568 	struct net_buf *rsp;
569 
570 	send_cmd(BT_HCI_OP_RESET, NULL, NULL);
571 	send_cmd(BT_HCI_OP_LE_READ_BUFFER_SIZE, NULL, &rsp);
572 	le_read_buffer_size_complete(rsp);
573 
574 	set_data_len();
575 	set_event_mask(BT_HCI_OP_SET_EVENT_MASK);
576 	set_event_mask(BT_HCI_OP_LE_SET_EVENT_MASK);
577 	set_random_address();
578 }
579 
init_tinyhost(void)580 static void init_tinyhost(void)
581 {
582 	bt_enable_raw(&rx_queue);
583 
584 	/* Start the RX thread */
585 	k_thread_create(&rx_thread_data, rx_thread_stack,
586 			K_THREAD_STACK_SIZEOF(rx_thread_stack), rx_thread,
587 			NULL, NULL, NULL, K_PRIO_PREEMPT(0), 0, K_NO_WAIT);
588 	k_thread_name_set(&rx_thread_data, "HCI RX");
589 
590 	k_thread_priority_set(k_current_get(), K_PRIO_PREEMPT(0));
591 
592 	prepare_controller();
593 }
594 
test_procedure_0(void)595 void test_procedure_0(void)
596 {
597 	att_handlers[BT_ATT_OP_WRITE_REQ] = handle_att_write_0;
598 
599 	init_tinyhost();
600 
601 	/* wait until the good peer has connected */
602 	k_msleep(1000);
603 
604 	LOG_INF("init adv");
605 
606 	/* Start advertising & wait for a connection */
607 	start_adv();
608 	WAIT_FOR_FLAG(is_connected);
609 	LOG_INF("connected");
610 
611 	/* We need this to be able to send whole L2CAP PDUs on-air. */
612 	WAIT_FOR_FLAG(flag_data_length_updated);
613 
614 	/* Get handle we will write to */
615 	WAIT_FOR_FLAG(flag_handle);
616 
617 	LOG_INF("##################### START TEST #####################");
618 
619 	uint32_t timeout_ms = PROCEDURE_1_TIMEOUT_MS;
620 	uint32_t start_time = k_uptime_get_32();
621 
622 	while (k_uptime_get_32() - start_time < timeout_ms) {
623 		gatt_write(BT_ATT_OP_WRITE_REQ);
624 	}
625 
626 	/* Verify we get at least one write */
627 	WAIT_FOR_FLAG(flag_write_ack);
628 
629 	TEST_PASS("Tester done");
630 }
631 
test_procedure_1(void)632 void test_procedure_1(void)
633 {
634 	att_handlers[BT_ATT_OP_WRITE_REQ] = handle_att_write_1;
635 
636 	init_tinyhost();
637 
638 	/* Start advertising & wait for a connection */
639 	LOG_INF("init adv");
640 	start_adv();
641 	WAIT_FOR_FLAG(is_connected);
642 	LOG_INF("connected");
643 
644 	/* We need this to be able to send whole L2CAP PDUs on-air. */
645 	WAIT_FOR_FLAG(flag_data_length_updated);
646 
647 	LOG_INF("##################### START TEST #####################");
648 
649 	/* In this testcase, DUT is the aggressor.
650 	 * Tester verifies no spec violation happens.
651 	 */
652 	while (IS_FLAG_SET(is_connected)) {
653 		/* Should be enough to allow DUT's app to batch a few requests. */
654 		k_msleep(1000);
655 		if (IS_FLAG_SET(flag_req_in_progress)) {
656 			send_write_rsp();
657 		}
658 	}
659 
660 	TEST_PASS("Tester done");
661 }
662 
663 static const struct bst_test_instance test_to_add[] = {
664 	{
665 		.test_id = "tester",
666 		.test_main_f = test_procedure_0,
667 	},
668 	{
669 		.test_id = "tester_1",
670 		.test_main_f = test_procedure_1,
671 	},
672 	BSTEST_END_MARKER,
673 };
674 
install(struct bst_test_list * tests)675 static struct bst_test_list *install(struct bst_test_list *tests)
676 {
677 	return bst_add_tests(tests, test_to_add);
678 };
679 
680 bst_test_install_t test_installers[] = {install, NULL};
681 
682 
main(void)683 int main(void)
684 {
685 	bst_main();
686 
687 	return 0;
688 }
689