1 /*
2  * Copyright (c) 2023 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <zephyr/sys/byteorder.h>
9 #include <zephyr/sys/__assert.h>
10 
11 #include <zephyr/net_buf.h>
12 #include <zephyr/bluetooth/buf.h>
13 
14 #include <zephyr/bluetooth/bluetooth.h>
15 #include <zephyr/bluetooth/hci.h>
16 #include <zephyr/bluetooth/hci_raw.h>
17 #include <zephyr/bluetooth/hci_types.h>
18 
19 #include "common/bt_str.h"
20 
21 #include "host/conn_internal.h"
22 #include "host/l2cap_internal.h"
23 
24 #include "utils.h"
25 #include "bstests.h"
26 
27 #include <zephyr/logging/log.h>
28 LOG_MODULE_REGISTER(bt_tinyhost, LOG_LEVEL_INF);
29 
30 #define BT_ATT_OP_MTU_REQ   0x02
31 #define BT_ATT_OP_MTU_RSP   0x03
32 #define BT_ATT_OP_WRITE_REQ 0x12
33 #define BT_ATT_OP_WRITE_RSP 0x13
34 #define BT_ATT_OP_NOTIFY    0x1b
35 #define BT_ATT_OP_INDICATE  0x1d
36 #define BT_ATT_OP_CONFIRM   0x1e
37 #define BT_ATT_OP_WRITE_CMD 0x52
38 #define BT_L2CAP_CID_ATT    0x0004
39 
40 DEFINE_FLAG(is_connected);
41 DEFINE_FLAG(flag_data_length_updated);
42 DEFINE_FLAG(flag_handle);
43 DEFINE_FLAG(flag_notified);
44 DEFINE_FLAG(flag_write_ack);
45 DEFINE_FLAG(flag_indication_ack);
46 
47 static uint16_t server_write_handle;
48 
49 static K_FIFO_DEFINE(rx_queue);
50 
51 #define CMD_BUF_SIZE MAX(BT_BUF_EVT_RX_SIZE, BT_BUF_CMD_TX_SIZE)
52 NET_BUF_POOL_FIXED_DEFINE(hci_cmd_pool, CONFIG_BT_BUF_CMD_TX_COUNT,
53 			  CMD_BUF_SIZE, 8, NULL);
54 
55 static K_SEM_DEFINE(cmd_sem, 1, 1);
56 static struct k_sem acl_pkts;
57 static uint16_t conn_handle;
58 
59 static volatile uint16_t active_opcode = 0xFFFF;
60 static struct net_buf *cmd_rsp;
61 
bt_hci_cmd_create(uint16_t opcode,uint8_t param_len)62 struct net_buf *bt_hci_cmd_create(uint16_t opcode, uint8_t param_len)
63 {
64 	struct bt_hci_cmd_hdr *hdr;
65 	struct net_buf *buf;
66 
67 	LOG_DBG("opcode 0x%04x param_len %u", opcode, param_len);
68 
69 	buf = net_buf_alloc(&hci_cmd_pool, K_FOREVER);
70 	ASSERT(buf, "failed allocation");
71 
72 	LOG_DBG("buf %p", buf);
73 
74 	net_buf_reserve(buf, BT_BUF_RESERVE);
75 
76 	bt_buf_set_type(buf, BT_BUF_CMD);
77 
78 	hdr = net_buf_add(buf, sizeof(*hdr));
79 	hdr->opcode = sys_cpu_to_le16(opcode);
80 	hdr->param_len = param_len;
81 
82 	return buf;
83 }
84 
handle_cmd_complete(struct net_buf * buf)85 static void handle_cmd_complete(struct net_buf *buf)
86 {
87 	struct bt_hci_evt_hdr *hdr;
88 	uint8_t status, ncmd;
89 	uint16_t opcode;
90 	struct net_buf_simple_state state;
91 
92 	net_buf_simple_save(&buf->b, &state);
93 
94 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
95 
96 	if (hdr->evt == BT_HCI_EVT_CMD_COMPLETE) {
97 		struct bt_hci_evt_cmd_complete *evt;
98 
99 		evt = net_buf_pull_mem(buf, sizeof(*evt));
100 		status = 0;
101 		ncmd = evt->ncmd;
102 		opcode = sys_le16_to_cpu(evt->opcode);
103 
104 	} else if (hdr->evt == BT_HCI_EVT_CMD_STATUS) {
105 		struct bt_hci_evt_cmd_status *evt;
106 
107 		evt = net_buf_pull_mem(buf, sizeof(*evt));
108 		status = buf->data[0];
109 		ncmd = evt->ncmd;
110 		opcode = sys_le16_to_cpu(evt->opcode);
111 
112 	} else {
113 		FAIL("unhandled event 0x%x", hdr->evt);
114 	}
115 
116 	LOG_DBG("opcode 0x%04x status %x", opcode, status);
117 
118 	ASSERT(status == 0x00, "cmd status: %x", status);
119 
120 	ASSERT(active_opcode == opcode, "unexpected opcode %x != %x", active_opcode, opcode);
121 
122 	if (active_opcode) {
123 		active_opcode = 0xFFFF;
124 		cmd_rsp = net_buf_ref(buf);
125 		net_buf_simple_restore(&buf->b, &state);
126 	}
127 
128 	if (ncmd) {
129 		k_sem_give(&cmd_sem);
130 	}
131 }
132 
handle_meta_event(struct net_buf * buf)133 static void handle_meta_event(struct net_buf *buf)
134 {
135 	uint8_t code = buf->data[2];
136 
137 	switch (code) {
138 	case BT_HCI_EVT_LE_ENH_CONN_COMPLETE:
139 	case BT_HCI_EVT_LE_ENH_CONN_COMPLETE_V2:
140 		conn_handle = sys_get_le16(&buf->data[4]);
141 		LOG_DBG("connected: handle: %d", conn_handle);
142 		SET_FLAG(is_connected);
143 		break;
144 	case BT_HCI_EVT_LE_DATA_LEN_CHANGE:
145 		SET_FLAG(flag_data_length_updated);
146 		break;
147 	case BT_HCI_EVT_LE_CHAN_SEL_ALGO:
148 		/* do nothing */
149 		break;
150 	default:
151 		LOG_ERR("unhandled meta event %x", code);
152 		LOG_HEXDUMP_ERR(buf->data, buf->len, "HCI META EVT");
153 	}
154 }
155 
handle_ncp(struct net_buf * buf)156 static void handle_ncp(struct net_buf *buf)
157 {
158 	struct bt_hci_evt_num_completed_packets *evt;
159 	struct bt_hci_evt_hdr *hdr;
160 	uint16_t handle, count;
161 
162 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
163 
164 	evt = (void *)buf->data;
165 	handle = sys_le16_to_cpu(evt->h[0].handle);
166 	count = sys_le16_to_cpu(evt->h[0].count);
167 
168 	LOG_DBG("sent %d packets", count);
169 
170 	while (count--) {
171 		k_sem_give(&acl_pkts);
172 	}
173 }
174 
handle_att_notification(struct net_buf * buf)175 static void handle_att_notification(struct net_buf *buf)
176 {
177 	uint16_t handle = net_buf_pull_le16(buf);
178 
179 	LOG_INF("Got notification for 0x%04x len %d", handle, buf->len);
180 	LOG_HEXDUMP_DBG(buf->data, buf->len, "payload");
181 
182 	server_write_handle = net_buf_pull_le16(buf);
183 	LOG_INF("Retrieved handle to write to: 0x%x", server_write_handle);
184 	SET_FLAG(flag_handle);
185 }
186 
187 struct net_buf *alloc_l2cap_pdu(void);
188 static void send_l2cap_packet(struct net_buf *buf, uint16_t cid);
189 
send_write_rsp(void)190 static void send_write_rsp(void)
191 {
192 	struct net_buf *buf = alloc_l2cap_pdu();
193 
194 	net_buf_add_u8(buf, BT_ATT_OP_WRITE_RSP);
195 	send_l2cap_packet(buf, BT_L2CAP_CID_ATT);
196 }
197 
handle_att_write(struct net_buf * buf)198 static void handle_att_write(struct net_buf *buf)
199 {
200 	uint16_t handle = net_buf_pull_le16(buf);
201 
202 	LOG_INF("Got write for 0x%04x len %d", handle, buf->len);
203 	LOG_HEXDUMP_DBG(buf->data, buf->len, "payload");
204 
205 	send_write_rsp();
206 }
207 
handle_att(struct net_buf * buf)208 static void handle_att(struct net_buf *buf)
209 {
210 	uint8_t op = net_buf_pull_u8(buf);
211 
212 	switch (op) {
213 	case BT_ATT_OP_NOTIFY:
214 		handle_att_notification(buf);
215 		return;
216 	case BT_ATT_OP_WRITE_REQ:
217 		handle_att_write(buf);
218 		return;
219 	case BT_ATT_OP_WRITE_RSP:
220 		LOG_INF("got ATT write RSP");
221 		SET_FLAG(flag_write_ack);
222 		return;
223 	case BT_ATT_OP_CONFIRM:
224 		LOG_INF("got ATT indication confirm");
225 		SET_FLAG(flag_indication_ack);
226 		return;
227 	case BT_ATT_OP_MTU_RSP:
228 		LOG_INF("got ATT MTU RSP");
229 		return;
230 	default:
231 		LOG_HEXDUMP_ERR(buf->data, buf->len, "payload");
232 		FAIL("unhandled opcode %x\n", op);
233 		return;
234 	}
235 }
236 
handle_l2cap(struct net_buf * buf)237 static void handle_l2cap(struct net_buf *buf)
238 {
239 	struct bt_l2cap_hdr *hdr;
240 	uint16_t cid;
241 
242 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
243 	cid = sys_le16_to_cpu(hdr->cid);
244 
245 	LOG_DBG("Packet for CID %u len %u", cid, buf->len);
246 	LOG_HEXDUMP_DBG(buf->data, buf->len, "l2cap");
247 
248 	/* Make sure we don't have to recombine packets */
249 	ASSERT(buf->len == hdr->len, "buflen = %d != hdrlen %d",
250 	       buf->len, hdr->len);
251 
252 	ASSERT(cid == BT_L2CAP_CID_ATT, "We only support (U)ATT");
253 
254 	/* (U)ATT PDU */
255 	handle_att(buf);
256 }
257 
handle_acl(struct net_buf * buf)258 static void handle_acl(struct net_buf *buf)
259 {
260 	struct bt_hci_acl_hdr *hdr;
261 	uint16_t len, handle;
262 	uint8_t flags;
263 
264 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
265 	len = sys_le16_to_cpu(hdr->len);
266 	handle = sys_le16_to_cpu(hdr->handle);
267 
268 	flags = bt_acl_flags(handle);
269 	handle = bt_acl_handle(handle);
270 
271 	ASSERT(flags == BT_ACL_START,
272 	       "Fragmentation not supported");
273 
274 	LOG_DBG("ACL: conn %d len %d flags %d", handle, len, flags);
275 	LOG_HEXDUMP_DBG(buf->data, buf->len, "HCI ACL");
276 
277 	handle_l2cap(buf);
278 }
279 
recv(struct net_buf * buf)280 static void recv(struct net_buf *buf)
281 {
282 	LOG_HEXDUMP_DBG(buf->data, buf->len, "HCI RX");
283 
284 	uint8_t code = buf->data[0];
285 
286 	if (bt_buf_get_type(buf) == BT_BUF_EVT) {
287 		switch (code) {
288 		case BT_HCI_EVT_CMD_COMPLETE:
289 		case BT_HCI_EVT_CMD_STATUS:
290 			handle_cmd_complete(buf);
291 			break;
292 		case BT_HCI_EVT_LE_META_EVENT:
293 			handle_meta_event(buf);
294 			break;
295 		case BT_HCI_EVT_DISCONN_COMPLETE:
296 			UNSET_FLAG(is_connected);
297 			break;
298 		case BT_HCI_EVT_NUM_COMPLETED_PACKETS:
299 			handle_ncp(buf);
300 			break;
301 		default:
302 			LOG_ERR("unhandled msg %x", code);
303 			LOG_HEXDUMP_ERR(buf->data, buf->len, "HCI EVT");
304 		}
305 
306 		/* handlers should take a ref if they want to access the buffer
307 		 * later
308 		 */
309 		net_buf_unref(buf);
310 		return;
311 	}
312 
313 	if (bt_buf_get_type(buf) == BT_BUF_ACL_IN) {
314 		handle_acl(buf);
315 		net_buf_unref(buf);
316 		return;
317 	}
318 
319 	LOG_ERR("HCI RX (not data or event)");
320 	net_buf_unref(buf);
321 }
322 
send_cmd(uint16_t opcode,struct net_buf * cmd,struct net_buf ** rsp)323 static void send_cmd(uint16_t opcode, struct net_buf *cmd, struct net_buf **rsp)
324 {
325 	LOG_DBG("opcode %x", opcode);
326 
327 	if (!cmd) {
328 		cmd = bt_hci_cmd_create(opcode, 0);
329 	}
330 
331 	k_sem_take(&cmd_sem, K_FOREVER);
332 	ASSERT(active_opcode == 0xFFFF, "");
333 
334 	active_opcode = opcode;
335 
336 	LOG_HEXDUMP_DBG(cmd->data, cmd->len, "HCI TX");
337 	bt_send(cmd);
338 
339 	/* Wait until the command completes */
340 	k_sem_take(&cmd_sem, K_FOREVER);
341 	k_sem_give(&cmd_sem);
342 
343 	net_buf_unref(cmd);
344 
345 	/* return response. it's okay if cmd_rsp gets overwritten, since the app
346 	 * gets the ref to the underlying buffer when this fn returns.
347 	 */
348 	if (rsp) {
349 		*rsp = cmd_rsp;
350 	} else {
351 		net_buf_unref(cmd_rsp);
352 		cmd_rsp = NULL;
353 	}
354 }
355 
356 static K_THREAD_STACK_DEFINE(rx_thread_stack, 1024);
357 static struct k_thread rx_thread_data;
358 
rx_thread(void * p1,void * p2,void * p3)359 static void rx_thread(void *p1, void *p2, void *p3)
360 {
361 	LOG_DBG("start HCI rx");
362 
363 	while (true) {
364 		struct net_buf *buf;
365 
366 		/* Wait until a buffer is available */
367 		buf = k_fifo_get(&rx_queue, K_FOREVER);
368 		recv(buf);
369 	}
370 }
371 
le_read_buffer_size_complete(struct net_buf * rsp)372 static void le_read_buffer_size_complete(struct net_buf *rsp)
373 {
374 	struct bt_hci_rp_le_read_buffer_size *rp = (void *)rsp->data;
375 
376 	LOG_DBG("status 0x%02x", rp->status);
377 	LOG_DBG("max len %d max num %d", rp->le_max_len, rp->le_max_num);
378 
379 	k_sem_init(&acl_pkts, rp->le_max_num, rp->le_max_num);
380 	net_buf_unref(rsp);
381 }
382 
read_max_data_len(uint16_t * tx_octets,uint16_t * tx_time)383 static void read_max_data_len(uint16_t *tx_octets, uint16_t *tx_time)
384 {
385 	struct bt_hci_rp_le_read_max_data_len *rp;
386 	struct net_buf *rsp;
387 
388 	send_cmd(BT_HCI_OP_LE_READ_MAX_DATA_LEN, NULL, &rsp);
389 
390 	rp = (void *)rsp->data;
391 	*tx_octets = sys_le16_to_cpu(rp->max_tx_octets);
392 	*tx_time = sys_le16_to_cpu(rp->max_tx_time);
393 	net_buf_unref(rsp);
394 }
395 
write_default_data_len(uint16_t tx_octets,uint16_t tx_time)396 static void write_default_data_len(uint16_t tx_octets, uint16_t tx_time)
397 {
398 	struct bt_hci_cp_le_write_default_data_len *cp;
399 	struct net_buf *buf = bt_hci_cmd_create(BT_HCI_OP_LE_WRITE_DEFAULT_DATA_LEN, sizeof(*cp));
400 
401 	ASSERT(buf, "");
402 
403 	cp = net_buf_add(buf, sizeof(*cp));
404 	cp->max_tx_octets = sys_cpu_to_le16(tx_octets);
405 	cp->max_tx_time = sys_cpu_to_le16(tx_time);
406 
407 	send_cmd(BT_HCI_OP_LE_WRITE_DEFAULT_DATA_LEN, buf, NULL);
408 }
409 
set_data_len(void)410 static void set_data_len(void)
411 {
412 	uint16_t tx_octets, tx_time;
413 
414 	read_max_data_len(&tx_octets, &tx_time);
415 	write_default_data_len(tx_octets, tx_time);
416 }
417 
set_event_mask(uint16_t opcode)418 static void set_event_mask(uint16_t opcode)
419 {
420 	struct bt_hci_cp_set_event_mask *cp_mask;
421 	struct net_buf *buf;
422 	uint64_t mask = 0U;
423 
424 	/* The two commands have the same length/params */
425 	buf = bt_hci_cmd_create(opcode, sizeof(*cp_mask));
426 	ASSERT(buf, "");
427 
428 	/* Forward all events */
429 	cp_mask = net_buf_add(buf, sizeof(*cp_mask));
430 	mask = UINT64_MAX;
431 	sys_put_le64(mask, cp_mask->events);
432 
433 	send_cmd(opcode, buf, NULL);
434 }
435 
set_random_address(void)436 static void set_random_address(void)
437 {
438 	struct net_buf *buf;
439 	bt_addr_le_t addr = {BT_ADDR_LE_RANDOM, {{0x0A, 0x89, 0x67, 0x45, 0x23, 0xC1}}};
440 
441 	LOG_DBG("%s", bt_addr_str(&addr.a));
442 
443 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_RANDOM_ADDRESS, sizeof(addr.a));
444 	ASSERT(buf, "");
445 
446 	net_buf_add_mem(buf, &addr.a, sizeof(addr.a));
447 	send_cmd(BT_HCI_OP_LE_SET_RANDOM_ADDRESS, buf, NULL);
448 }
449 
start_adv(void)450 void start_adv(void)
451 {
452 	struct bt_hci_cp_le_set_adv_param set_param;
453 	struct net_buf *buf;
454 	uint16_t interval = 60; /* Interval doesn't matter */
455 
456 	(void)memset(&set_param, 0, sizeof(set_param));
457 
458 	set_param.min_interval = sys_cpu_to_le16(interval);
459 	set_param.max_interval = sys_cpu_to_le16(interval);
460 	set_param.channel_map = 0x07;
461 	set_param.filter_policy = BT_LE_ADV_FP_NO_FILTER;
462 	set_param.type = BT_HCI_ADV_IND;
463 	set_param.own_addr_type = BT_HCI_OWN_ADDR_RANDOM;
464 
465 	/* configure */
466 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_ADV_PARAM, sizeof(set_param));
467 	net_buf_add_mem(buf, &set_param, sizeof(set_param));
468 	send_cmd(BT_HCI_OP_LE_SET_ADV_PARAM, buf, NULL);
469 
470 	/* start */
471 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_ADV_ENABLE, 1);
472 	net_buf_add_u8(buf, BT_HCI_LE_ADV_ENABLE);
473 	send_cmd(BT_HCI_OP_LE_SET_ADV_ENABLE, buf, NULL);
474 }
475 
476 NET_BUF_POOL_DEFINE(acl_tx_pool, 5, BT_L2CAP_BUF_SIZE(200), 8, NULL);
477 
alloc_l2cap_pdu(void)478 struct net_buf *alloc_l2cap_pdu(void)
479 {
480 	struct net_buf *buf;
481 	uint16_t reserve;
482 
483 	buf = net_buf_alloc(&acl_tx_pool, K_FOREVER);
484 	ASSERT(buf, "failed ACL allocation");
485 
486 	reserve = sizeof(struct bt_l2cap_hdr);
487 	reserve += sizeof(struct bt_hci_acl_hdr) + BT_BUF_RESERVE;
488 
489 	net_buf_reserve(buf, reserve);
490 
491 	return buf;
492 }
493 
send_acl(struct net_buf * buf)494 static int send_acl(struct net_buf *buf)
495 {
496 	struct bt_hci_acl_hdr *hdr;
497 	uint8_t flags = BT_ACL_START_NO_FLUSH;
498 
499 	hdr = net_buf_push(buf, sizeof(*hdr));
500 	hdr->handle = sys_cpu_to_le16(bt_acl_handle_pack(conn_handle, flags));
501 	hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
502 
503 	bt_buf_set_type(buf, BT_BUF_ACL_OUT);
504 
505 	k_sem_take(&acl_pkts, K_FOREVER);
506 
507 	return bt_send(buf);
508 }
509 
send_l2cap_packet(struct net_buf * buf,uint16_t cid)510 static void send_l2cap_packet(struct net_buf *buf, uint16_t cid)
511 {
512 	struct bt_l2cap_hdr *hdr;
513 
514 	hdr = net_buf_push(buf, sizeof(*hdr));
515 	hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
516 	hdr->cid = sys_cpu_to_le16(cid);
517 
518 	/* Always entire packets, no HCI fragmentation */
519 	ASSERT(buf->len <= CONFIG_BT_BUF_ACL_TX_SIZE,
520 	       "Fragmentation not supported");
521 
522 	send_acl(buf);
523 }
524 
gatt_write(uint16_t op)525 static void gatt_write(uint16_t op)
526 {
527 	static uint8_t data[] = "write";
528 	uint16_t handle = server_write_handle;
529 	struct net_buf *buf = alloc_l2cap_pdu();
530 
531 	net_buf_add_u8(buf, op);
532 	net_buf_add_le16(buf, handle);
533 	net_buf_add_mem(buf, data, sizeof(data));
534 
535 	LOG_INF("send ATT write %s",
536 		op == BT_ATT_OP_WRITE_REQ ? "REQ" : "CMD");
537 
538 	send_l2cap_packet(buf, BT_L2CAP_CID_ATT);
539 }
540 
gatt_notify(void)541 static void gatt_notify(void)
542 {
543 	static uint8_t data[] = NOTIFICATION_PAYLOAD;
544 	uint16_t handle = HVX_HANDLE;
545 	struct net_buf *buf = alloc_l2cap_pdu();
546 
547 	net_buf_add_u8(buf, BT_ATT_OP_NOTIFY);
548 	net_buf_add_le16(buf, handle);
549 	net_buf_add_mem(buf, data, sizeof(data));
550 
551 	LOG_INF("send ATT notification");
552 	send_l2cap_packet(buf, BT_L2CAP_CID_ATT);
553 }
554 
gatt_indicate(void)555 static void gatt_indicate(void)
556 {
557 	static uint8_t data[] = INDICATION_PAYLOAD;
558 	uint16_t handle = HVX_HANDLE;
559 	struct net_buf *buf = alloc_l2cap_pdu();
560 
561 	net_buf_add_u8(buf, BT_ATT_OP_INDICATE);
562 	net_buf_add_le16(buf, handle);
563 	net_buf_add_mem(buf, data, sizeof(data));
564 
565 	LOG_INF("send ATT indication");
566 	send_l2cap_packet(buf, BT_L2CAP_CID_ATT);
567 }
568 
prepare_controller(void)569 static void prepare_controller(void)
570 {
571 	/* Initialize controller */
572 	struct net_buf *rsp;
573 
574 	send_cmd(BT_HCI_OP_RESET, NULL, NULL);
575 	send_cmd(BT_HCI_OP_LE_READ_BUFFER_SIZE, NULL, &rsp);
576 	le_read_buffer_size_complete(rsp);
577 
578 	set_data_len();
579 	set_event_mask(BT_HCI_OP_SET_EVENT_MASK);
580 	set_event_mask(BT_HCI_OP_LE_SET_EVENT_MASK);
581 	set_random_address();
582 }
583 
init_tinyhost(void)584 static void init_tinyhost(void)
585 {
586 	bt_enable_raw(&rx_queue);
587 
588 	/* Start the RX thread */
589 	k_thread_create(&rx_thread_data, rx_thread_stack,
590 			K_THREAD_STACK_SIZEOF(rx_thread_stack), rx_thread,
591 			NULL, NULL, NULL, K_PRIO_PREEMPT(0), 0, K_NO_WAIT);
592 	k_thread_name_set(&rx_thread_data, "HCI RX");
593 
594 	k_thread_priority_set(k_current_get(), K_PRIO_PREEMPT(0));
595 
596 	prepare_controller();
597 }
598 
test_procedure_0(void)599 void test_procedure_0(void)
600 {
601 	init_tinyhost();
602 
603 	/* Start advertising & wait for a connection */
604 	start_adv();
605 	WAIT_FOR_FLAG(is_connected);
606 	LOG_INF("connected");
607 
608 	/* We need this to be able to send whole L2CAP PDUs on-air. */
609 	WAIT_FOR_FLAG(flag_data_length_updated);
610 
611 	/* Get handle we will write to */
612 	WAIT_FOR_FLAG(flag_handle);
613 
614 	LOG_INF("##################### START TEST #####################");
615 
616 	gatt_write(BT_ATT_OP_WRITE_REQ);	/* will prompt a response PDU */
617 	gatt_indicate();			/* will prompt a confirmation PDU */
618 
619 	gatt_notify();
620 	gatt_write(BT_ATT_OP_WRITE_CMD);
621 
622 	gatt_notify();
623 	gatt_write(BT_ATT_OP_WRITE_CMD);
624 
625 	WAIT_FOR_FLAG(flag_write_ack);
626 	WAIT_FOR_FLAG(flag_indication_ack);
627 
628 	PASS("Tester done\n");
629 }
630 
test_tick(bs_time_t HW_device_time)631 void test_tick(bs_time_t HW_device_time)
632 {
633 	bs_trace_debug_time(0, "Simulation ends now.\n");
634 	if (bst_result != Passed) {
635 		bst_result = Failed;
636 		bs_trace_error("Test did not pass before simulation ended.\n");
637 	}
638 }
639 
test_init(void)640 void test_init(void)
641 {
642 	bst_ticker_set_next_tick_absolute(TEST_TIMEOUT_SIMULATED);
643 	bst_result = In_progress;
644 }
645 
646 static const struct bst_test_instance test_to_add[] = {
647 	{
648 		.test_id = "tester",
649 		.test_pre_init_f = test_init,
650 		.test_tick_f = test_tick,
651 		.test_main_f = test_procedure_0,
652 	},
653 	BSTEST_END_MARKER,
654 };
655 
install(struct bst_test_list * tests)656 static struct bst_test_list *install(struct bst_test_list *tests)
657 {
658 	return bst_add_tests(tests, test_to_add);
659 };
660 
661 bst_test_install_t test_installers[] = {install, NULL};
662 
663 
main(void)664 int main(void)
665 {
666 	bst_main();
667 
668 	return 0;
669 }
670