1 /*
2  * Copyright (c) 2024 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <zephyr/sys/byteorder.h>
9 #include <zephyr/sys/__assert.h>
10 
11 #include <zephyr/net_buf.h>
12 #include <zephyr/bluetooth/buf.h>
13 
14 #include <zephyr/bluetooth/bluetooth.h>
15 #include <zephyr/bluetooth/hci.h>
16 #include <zephyr/bluetooth/hci_raw.h>
17 #include <zephyr/bluetooth/hci_types.h>
18 
19 #include "common/bt_str.h"
20 
21 #include "host/conn_internal.h"
22 #include "host/l2cap_internal.h"
23 
24 #include "babblekit/flags.h"
25 #include "babblekit/testcase.h"
26 
27 #include "data.h"
28 
29 #include <zephyr/logging/log.h>
30 LOG_MODULE_REGISTER(bt_tinyhost, LOG_LEVEL_INF);
31 
32 #define BT_ATT_OP_MTU_REQ   0x02
33 #define BT_ATT_OP_MTU_RSP   0x03
34 #define BT_ATT_OP_WRITE_REQ 0x12
35 #define BT_ATT_OP_WRITE_RSP 0x13
36 #define BT_ATT_OP_NOTIFY    0x1b
37 #define BT_ATT_OP_INDICATE  0x1d
38 #define BT_ATT_OP_CONFIRM   0x1e
39 #define BT_ATT_OP_WRITE_CMD 0x52
40 #define BT_L2CAP_CID_ATT    0x0004
41 #define LAST_SUPPORTED_ATT_OPCODE 0x20
42 
43 DEFINE_FLAG(is_connected);
44 
45 static K_FIFO_DEFINE(rx_queue);
46 
47 #define CMD_BUF_SIZE MAX(BT_BUF_EVT_RX_SIZE, BT_BUF_CMD_TX_SIZE)
48 NET_BUF_POOL_FIXED_DEFINE(hci_cmd_pool, CONFIG_BT_BUF_CMD_TX_COUNT,
49 			  CMD_BUF_SIZE, 8, NULL);
50 
51 #define MAX_CMD_COUNT 1
52 static K_SEM_DEFINE(cmd_sem, MAX_CMD_COUNT, MAX_CMD_COUNT);
53 static struct k_sem acl_pkts;
54 static uint16_t conn_handle;
55 
56 static volatile uint16_t active_opcode = 0xFFFF;
57 static struct net_buf *cmd_rsp;
58 
bt_hci_cmd_create(uint16_t opcode,uint8_t param_len)59 struct net_buf *bt_hci_cmd_create(uint16_t opcode, uint8_t param_len)
60 {
61 	struct bt_hci_cmd_hdr *hdr;
62 	struct net_buf *buf;
63 
64 	LOG_DBG("opcode 0x%04x param_len %u", opcode, param_len);
65 
66 	buf = net_buf_alloc(&hci_cmd_pool, K_FOREVER);
67 	TEST_ASSERT(buf, "failed allocation");
68 
69 	LOG_DBG("buf %p", buf);
70 
71 	net_buf_reserve(buf, BT_BUF_RESERVE);
72 
73 	bt_buf_set_type(buf, BT_BUF_CMD);
74 
75 	hdr = net_buf_add(buf, sizeof(*hdr));
76 	hdr->opcode = sys_cpu_to_le16(opcode);
77 	hdr->param_len = param_len;
78 
79 	return buf;
80 }
81 
handle_cmd_complete(struct net_buf * buf)82 static void handle_cmd_complete(struct net_buf *buf)
83 {
84 	struct bt_hci_evt_hdr *hdr;
85 	uint8_t status, ncmd;
86 	uint16_t opcode;
87 	struct net_buf_simple_state state;
88 
89 	net_buf_simple_save(&buf->b, &state);
90 
91 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
92 
93 	if (hdr->evt == BT_HCI_EVT_CMD_COMPLETE) {
94 		struct bt_hci_evt_cmd_complete *evt;
95 
96 		evt = net_buf_pull_mem(buf, sizeof(*evt));
97 		status = 0;
98 		ncmd = evt->ncmd;
99 		opcode = sys_le16_to_cpu(evt->opcode);
100 
101 	} else if (hdr->evt == BT_HCI_EVT_CMD_STATUS) {
102 		struct bt_hci_evt_cmd_status *evt;
103 
104 		evt = net_buf_pull_mem(buf, sizeof(*evt));
105 		status = buf->data[0];
106 		ncmd = evt->ncmd;
107 		opcode = sys_le16_to_cpu(evt->opcode);
108 
109 	} else {
110 		TEST_FAIL("unhandled event 0x%x", hdr->evt);
111 	}
112 
113 	LOG_DBG("opcode 0x%04x status %x", opcode, status);
114 
115 	TEST_ASSERT(status == 0x00, "cmd 0x%x status: 0x%x", opcode, status);
116 
117 	TEST_ASSERT(active_opcode == opcode, "unexpected opcode %x != %x", active_opcode, opcode);
118 
119 	active_opcode = 0xFFFF;
120 	cmd_rsp = net_buf_ref(buf);
121 	net_buf_simple_restore(&buf->b, &state);
122 
123 	if (ncmd) {
124 		k_sem_give(&cmd_sem);
125 	}
126 }
127 
handle_meta_event(struct net_buf * buf)128 static void handle_meta_event(struct net_buf *buf)
129 {
130 	uint8_t code = buf->data[2];
131 
132 	switch (code) {
133 	case BT_HCI_EVT_LE_ENH_CONN_COMPLETE:
134 	case BT_HCI_EVT_LE_ENH_CONN_COMPLETE_V2:
135 		conn_handle = sys_get_le16(&buf->data[4]);
136 		LOG_DBG("connected: handle: %d", conn_handle);
137 		SET_FLAG(is_connected);
138 		break;
139 	case BT_HCI_EVT_LE_CHAN_SEL_ALGO:
140 		/* do nothing */
141 		break;
142 	default:
143 		LOG_ERR("unhandled meta event %x", code);
144 		LOG_HEXDUMP_ERR(buf->data, buf->len, "HCI META EVT");
145 	}
146 }
147 
handle_ncp(struct net_buf * buf)148 static void handle_ncp(struct net_buf *buf)
149 {
150 	struct bt_hci_evt_num_completed_packets *evt;
151 	struct bt_hci_evt_hdr *hdr;
152 	uint16_t handle, count;
153 
154 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
155 
156 	evt = (void *)buf->data;
157 	handle = sys_le16_to_cpu(evt->h[0].handle);
158 	count = sys_le16_to_cpu(evt->h[0].count);
159 
160 	LOG_DBG("sent %d packets", count);
161 
162 	while (count--) {
163 		k_sem_give(&acl_pkts);
164 	}
165 }
166 
167 struct net_buf *alloc_l2cap_pdu(void);
168 static void send_l2cap_packet(struct net_buf *buf, uint16_t cid);
169 
handle_att(struct net_buf * buf)170 static void handle_att(struct net_buf *buf)
171 {
172 	uint8_t op = net_buf_pull_u8(buf);
173 
174 	switch (op) {
175 	case BT_ATT_OP_NOTIFY:
176 		LOG_INF("got ATT notification");
177 		return;
178 	case BT_ATT_OP_WRITE_RSP:
179 		LOG_INF("got ATT write RSP");
180 		return;
181 	case BT_ATT_OP_MTU_RSP:
182 		LOG_INF("got ATT MTU RSP");
183 		return;
184 	default:
185 		LOG_HEXDUMP_ERR(buf->data, buf->len, "payload");
186 		TEST_FAIL("unhandled opcode %x\n", op);
187 		return;
188 	}
189 }
190 
handle_l2cap(struct net_buf * buf)191 static void handle_l2cap(struct net_buf *buf)
192 {
193 	struct bt_l2cap_hdr *hdr;
194 	uint16_t cid;
195 
196 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
197 	cid = sys_le16_to_cpu(hdr->cid);
198 
199 	LOG_DBG("Packet for CID %u len %u", cid, buf->len);
200 	LOG_HEXDUMP_DBG(buf->data, buf->len, "l2cap");
201 
202 	/* Make sure we don't have to recombine packets */
203 	TEST_ASSERT(buf->len == hdr->len, "buflen = %d != hdrlen %d",
204 	       buf->len, hdr->len);
205 
206 	TEST_ASSERT(cid == BT_L2CAP_CID_ATT, "We only support (U)ATT");
207 
208 	/* (U)ATT PDU */
209 	handle_att(buf);
210 }
211 
handle_acl(struct net_buf * buf)212 static void handle_acl(struct net_buf *buf)
213 {
214 	struct bt_hci_acl_hdr *hdr;
215 	uint16_t len, handle;
216 	uint8_t flags;
217 
218 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
219 	len = sys_le16_to_cpu(hdr->len);
220 	handle = sys_le16_to_cpu(hdr->handle);
221 
222 	flags = bt_acl_flags(handle);
223 	handle = bt_acl_handle(handle);
224 
225 	TEST_ASSERT(flags == BT_ACL_START,
226 	       "Fragmentation not supported");
227 
228 	LOG_DBG("ACL: conn %d len %d flags %d", handle, len, flags);
229 	LOG_HEXDUMP_DBG(buf->data, buf->len, "HCI ACL");
230 
231 	handle_l2cap(buf);
232 }
233 
recv(struct net_buf * buf)234 static void recv(struct net_buf *buf)
235 {
236 	LOG_HEXDUMP_DBG(buf->data, buf->len, "HCI RX");
237 
238 	uint8_t code = buf->data[0];
239 
240 	if (bt_buf_get_type(buf) == BT_BUF_EVT) {
241 		switch (code) {
242 		case BT_HCI_EVT_CMD_COMPLETE:
243 		case BT_HCI_EVT_CMD_STATUS:
244 			handle_cmd_complete(buf);
245 			break;
246 		case BT_HCI_EVT_LE_META_EVENT:
247 			handle_meta_event(buf);
248 			break;
249 		case BT_HCI_EVT_DISCONN_COMPLETE:
250 			UNSET_FLAG(is_connected);
251 			break;
252 		case BT_HCI_EVT_NUM_COMPLETED_PACKETS:
253 			handle_ncp(buf);
254 			break;
255 		default:
256 			LOG_ERR("unhandled msg %x", code);
257 			LOG_HEXDUMP_ERR(buf->data, buf->len, "HCI EVT");
258 		}
259 
260 		/* handlers should take a ref if they want to access the buffer
261 		 * later
262 		 */
263 		net_buf_unref(buf);
264 		return;
265 	}
266 
267 	if (bt_buf_get_type(buf) == BT_BUF_ACL_IN) {
268 		handle_acl(buf);
269 		net_buf_unref(buf);
270 		return;
271 	}
272 
273 	LOG_ERR("HCI RX (not data or event)");
274 	net_buf_unref(buf);
275 }
276 
send_cmd(uint16_t opcode,struct net_buf * cmd,struct net_buf ** rsp)277 static void send_cmd(uint16_t opcode, struct net_buf *cmd, struct net_buf **rsp)
278 {
279 	LOG_DBG("opcode %x", opcode);
280 
281 	if (!cmd) {
282 		cmd = bt_hci_cmd_create(opcode, 0);
283 	}
284 
285 	k_sem_take(&cmd_sem, K_FOREVER);
286 	TEST_ASSERT(active_opcode == 0xFFFF, "");
287 
288 	__ASSERT_NO_MSG(opcode);
289 	active_opcode = opcode;
290 
291 	LOG_HEXDUMP_DBG(cmd->data, cmd->len, "HCI TX");
292 	bt_send(cmd);
293 
294 	/* Wait until the command completes:
295 	 *
296 	 * Use `cmd_sem` as a signal that we are able to send another command,
297 	 * which means that the current command (for which we took cmd_sem
298 	 * above) likely has gotten a response.
299 	 *
300 	 * We don't actually want to send anything more, so when we got that
301 	 * signal (ie the thread is un-suspended), then we release the sem
302 	 * immediately.
303 	 */
304 	BUILD_ASSERT(MAX_CMD_COUNT == 1, "Logic depends on only 1 cmd at a time");
305 	k_sem_take(&cmd_sem, K_FOREVER);
306 	k_sem_give(&cmd_sem);
307 
308 	net_buf_unref(cmd);
309 
310 	/* return response. it's okay if cmd_rsp gets overwritten, since the app
311 	 * gets the ref to the underlying buffer when this fn returns.
312 	 */
313 	if (rsp) {
314 		*rsp = cmd_rsp;
315 	} else {
316 		net_buf_unref(cmd_rsp);
317 		cmd_rsp = NULL;
318 	}
319 }
320 
321 static K_THREAD_STACK_DEFINE(rx_thread_stack, 1024);
322 static struct k_thread rx_thread_data;
323 
rx_thread(void * p1,void * p2,void * p3)324 static void rx_thread(void *p1, void *p2, void *p3)
325 {
326 	LOG_DBG("start HCI rx");
327 
328 	while (true) {
329 		struct net_buf *buf;
330 
331 		/* Wait until a buffer is available */
332 		buf = k_fifo_get(&rx_queue, K_FOREVER);
333 		recv(buf);
334 	}
335 }
336 
le_read_buffer_size_complete(struct net_buf * rsp)337 static void le_read_buffer_size_complete(struct net_buf *rsp)
338 {
339 	struct bt_hci_rp_le_read_buffer_size *rp = (void *)rsp->data;
340 
341 	LOG_DBG("status 0x%02x", rp->status);
342 	LOG_DBG("max len %d max num %d", rp->le_max_len, rp->le_max_num);
343 
344 	k_sem_init(&acl_pkts, rp->le_max_num, rp->le_max_num);
345 	net_buf_unref(rsp);
346 }
347 
set_event_mask(uint16_t opcode)348 static void set_event_mask(uint16_t opcode)
349 {
350 	struct bt_hci_cp_set_event_mask *cp_mask;
351 	struct net_buf *buf;
352 	uint64_t mask = 0U;
353 
354 	/* The two commands have the same length/params */
355 	buf = bt_hci_cmd_create(opcode, sizeof(*cp_mask));
356 	TEST_ASSERT(buf, "");
357 
358 	/* Forward all events */
359 	cp_mask = net_buf_add(buf, sizeof(*cp_mask));
360 	mask = UINT64_MAX;
361 	sys_put_le64(mask, cp_mask->events);
362 
363 	send_cmd(opcode, buf, NULL);
364 }
365 
set_random_address(void)366 static void set_random_address(void)
367 {
368 	struct net_buf *buf;
369 	bt_addr_le_t addr = {BT_ADDR_LE_RANDOM, {{0x0A, 0x89, 0x67, 0x45, 0x23, 0xC1}}};
370 
371 	LOG_DBG("%s", bt_addr_str(&addr.a));
372 
373 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_RANDOM_ADDRESS, sizeof(addr.a));
374 	TEST_ASSERT(buf, "");
375 
376 	net_buf_add_mem(buf, &addr.a, sizeof(addr.a));
377 	send_cmd(BT_HCI_OP_LE_SET_RANDOM_ADDRESS, buf, NULL);
378 }
379 
start_adv(uint16_t interval,const char * name,size_t name_len)380 static void start_adv(uint16_t interval, const char *name, size_t name_len)
381 {
382 	struct bt_hci_cp_le_set_adv_data data;
383 	struct bt_hci_cp_le_set_adv_param set_param;
384 	struct net_buf *buf;
385 
386 	/* name_len should also not include the \0 */
387 	__ASSERT(name_len < (31 - 2), "name_len should be < 30");
388 
389 	(void)memset(&data, 0, sizeof(data));
390 	data.len = name_len + 2;
391 	data.data[0] = name_len + 1;
392 	data.data[1] = BT_DATA_NAME_COMPLETE;
393 	memcpy(&data.data[2], name, name_len);
394 
395 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_ADV_DATA, sizeof(data));
396 	__ASSERT_NO_MSG(buf);
397 	net_buf_add_mem(buf, &data, sizeof(data));
398 	send_cmd(BT_HCI_OP_LE_SET_ADV_DATA, buf, NULL);
399 
400 	(void)memset(&set_param, 0, sizeof(set_param));
401 	set_param.min_interval = sys_cpu_to_le16(interval);
402 	set_param.max_interval = sys_cpu_to_le16(interval);
403 	set_param.channel_map = 0x07;
404 	set_param.filter_policy = BT_LE_ADV_FP_NO_FILTER;
405 	set_param.type = BT_HCI_ADV_IND;
406 	set_param.own_addr_type = BT_HCI_OWN_ADDR_RANDOM;
407 
408 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_ADV_PARAM, sizeof(set_param));
409 	__ASSERT_NO_MSG(buf);
410 	net_buf_add_mem(buf, &set_param, sizeof(set_param));
411 
412 	send_cmd(BT_HCI_OP_LE_SET_ADV_PARAM, buf, NULL);
413 
414 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_ADV_ENABLE, 1);
415 	__ASSERT_NO_MSG(buf);
416 
417 	net_buf_add_u8(buf, BT_HCI_LE_ADV_ENABLE);
418 	send_cmd(BT_HCI_OP_LE_SET_ADV_ENABLE, buf, NULL);
419 }
420 
disconnect(void)421 static void disconnect(void)
422 {
423 	struct net_buf *buf;
424 	struct bt_hci_cp_disconnect *disconn;
425 	uint8_t reason = BT_HCI_ERR_REMOTE_USER_TERM_CONN;
426 	uint16_t handle = conn_handle;
427 
428 	LOG_INF("Disconnecting");
429 
430 	buf = bt_hci_cmd_create(BT_HCI_OP_DISCONNECT, sizeof(*disconn));
431 	TEST_ASSERT(buf);
432 
433 	disconn = net_buf_add(buf, sizeof(*disconn));
434 	disconn->handle = sys_cpu_to_le16(handle);
435 	disconn->reason = reason;
436 
437 	send_cmd(BT_HCI_OP_DISCONNECT, buf, NULL);
438 
439 	WAIT_FOR_FLAG_UNSET(is_connected);
440 	LOG_INF("Disconnected");
441 }
442 
443 NET_BUF_POOL_DEFINE(acl_tx_pool, 5, BT_L2CAP_BUF_SIZE(200), 8, NULL);
444 
alloc_l2cap_pdu(void)445 struct net_buf *alloc_l2cap_pdu(void)
446 {
447 	struct net_buf *buf;
448 	uint16_t reserve;
449 
450 	buf = net_buf_alloc(&acl_tx_pool, K_FOREVER);
451 	TEST_ASSERT(buf, "failed ACL allocation");
452 
453 	reserve = sizeof(struct bt_l2cap_hdr);
454 	reserve += sizeof(struct bt_hci_acl_hdr) + BT_BUF_RESERVE;
455 
456 	net_buf_reserve(buf, reserve);
457 
458 	return buf;
459 }
460 
send_acl(struct net_buf * buf,uint8_t flags)461 static int send_acl(struct net_buf *buf, uint8_t flags)
462 {
463 	struct bt_hci_acl_hdr *hdr;
464 
465 	hdr = net_buf_push(buf, sizeof(*hdr));
466 	hdr->handle = sys_cpu_to_le16(bt_acl_handle_pack(conn_handle, flags));
467 	hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
468 
469 	bt_buf_set_type(buf, BT_BUF_ACL_OUT);
470 
471 	k_sem_take(&acl_pkts, K_FOREVER);
472 
473 	return bt_send(buf);
474 }
475 
push_l2cap_pdu_header(struct net_buf * dst,uint16_t len,uint16_t cid)476 static void push_l2cap_pdu_header(struct net_buf *dst, uint16_t len, uint16_t cid)
477 {
478 	struct bt_l2cap_hdr *hdr;
479 
480 	hdr = net_buf_push(dst, sizeof(*hdr));
481 	hdr->len = sys_cpu_to_le16(len);
482 	hdr->cid = sys_cpu_to_le16(cid);
483 }
484 
send_l2cap_packet(struct net_buf * buf,uint16_t cid)485 static void send_l2cap_packet(struct net_buf *buf, uint16_t cid)
486 {
487 	push_l2cap_pdu_header(buf, buf->len, cid);
488 	send_acl(buf, BT_ACL_START_NO_FLUSH);
489 }
490 
prepare_controller(void)491 static void prepare_controller(void)
492 {
493 	/* Initialize controller */
494 	struct net_buf *rsp;
495 
496 	send_cmd(BT_HCI_OP_RESET, NULL, NULL);
497 	send_cmd(BT_HCI_OP_LE_READ_BUFFER_SIZE, NULL, &rsp);
498 	le_read_buffer_size_complete(rsp);
499 
500 	set_event_mask(BT_HCI_OP_SET_EVENT_MASK);
501 	set_event_mask(BT_HCI_OP_LE_SET_EVENT_MASK);
502 	set_random_address();
503 }
504 
init_tinyhost(void)505 static void init_tinyhost(void)
506 {
507 	bt_enable_raw(&rx_queue);
508 
509 	/* Start the RX thread */
510 	k_thread_create(&rx_thread_data, rx_thread_stack,
511 			K_THREAD_STACK_SIZEOF(rx_thread_stack), rx_thread,
512 			NULL, NULL, NULL, K_PRIO_PREEMPT(0), 0, K_NO_WAIT);
513 	k_thread_name_set(&rx_thread_data, "HCI RX");
514 
515 	k_thread_priority_set(k_current_get(), K_PRIO_PREEMPT(0));
516 
517 	prepare_controller();
518 }
519 
gatt_notify(void)520 static void gatt_notify(void)
521 {
522 	static uint8_t data[] = NOTIFICATION_PAYLOAD;
523 	uint16_t handle = GATT_HANDLE;
524 	struct net_buf *buf = alloc_l2cap_pdu();
525 
526 	net_buf_add_u8(buf, BT_ATT_OP_NOTIFY);
527 	net_buf_add_le16(buf, handle);
528 	net_buf_add_mem(buf, data, sizeof(data));
529 
530 	LOG_INF("Sending complete notification");
531 	send_l2cap_packet(buf, BT_L2CAP_CID_ATT);
532 }
533 
534 /* Send all but the last fragment of a notification */
gatt_notify_without_last_fragment(void)535 static void gatt_notify_without_last_fragment(void)
536 {
537 	static uint8_t data[] = NOTIFICATION_PAYLOAD;
538 	uint16_t handle = GATT_HANDLE;
539 	struct net_buf *att_packet = alloc_l2cap_pdu();
540 
541 	/* Prepare (G)ATT notification packet */
542 	net_buf_add_u8(att_packet, BT_ATT_OP_NOTIFY);
543 	net_buf_add_le16(att_packet, handle);
544 	net_buf_add_mem(att_packet, data, sizeof(data));
545 
546 	size_t on_air_size = 5;
547 	uint8_t flags = BT_ACL_START_NO_FLUSH;
548 
549 	LOG_INF("Sending partial notification");
550 
551 	for (size_t i = 0; att_packet->len > on_air_size; i++) {
552 		struct net_buf *buf = alloc_l2cap_pdu();
553 
554 		__ASSERT_NO_MSG(buf);
555 
556 		/* This is the size of the ACL payload. I.e. not including the HCI header. */
557 		size_t frag_len = MIN(att_packet->len, on_air_size);
558 
559 		if (i == 0) {
560 			/* Only first fragment should have L2CAP PDU header */
561 			push_l2cap_pdu_header(buf, att_packet->len, BT_L2CAP_CID_ATT);
562 			frag_len -= BT_L2CAP_HDR_SIZE;
563 		}
564 
565 		/* copy data into ACL frag */
566 		net_buf_add_mem(buf,
567 				net_buf_pull_mem(att_packet, frag_len),
568 				frag_len);
569 
570 		LOG_DBG("send ACL frag %d (%d bytes, remaining %d)", i, buf->len, att_packet->len);
571 		LOG_HEXDUMP_DBG(buf->data, buf->len, "ACL Fragment");
572 
573 		send_acl(buf, flags);
574 		flags = BT_ACL_CONT;
575 	}
576 
577 	net_buf_unref(att_packet);
578 
579 	/* Hey! You didn't send the last frag, no fair!
580 	 *   - The DUT (probably)
581 	 */
582 	LOG_INF("Partial notification sent");
583 }
584 
run_test_iteration(void)585 static void run_test_iteration(void)
586 {
587 	LOG_INF("advertise");
588 
589 	/* Start advertising & wait for a connection */
590 	start_adv(40, PEER_NAME, sizeof(PEER_NAME) - 1);
591 	WAIT_FOR_FLAG(is_connected);
592 	LOG_INF("connected");
593 
594 	/* Generous time allotment for dut to fake-subscribe */
595 	k_sleep(K_MSEC(100));
596 
597 	gatt_notify();
598 	gatt_notify_without_last_fragment();
599 	disconnect();
600 }
601 
entrypoint_peer(void)602 void entrypoint_peer(void)
603 {
604 	init_tinyhost();
605 
606 	LOG_INF("##################### START TEST #####################");
607 
608 	for (size_t i = 0; i < TEST_ITERATIONS; i++) {
609 		LOG_INF("## Iteration %d", i);
610 		run_test_iteration();
611 	}
612 
613 	TEST_PASS_AND_EXIT("Peer (tester) done\n");
614 }
615