1 /*
2 * Copyright (c) 2023 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/sys/byteorder.h>
9 #include <zephyr/sys/__assert.h>
10
11 #include <zephyr/net_buf.h>
12 #include <zephyr/bluetooth/buf.h>
13
14 #include <zephyr/bluetooth/bluetooth.h>
15 #include <zephyr/bluetooth/hci.h>
16 #include <zephyr/bluetooth/hci_raw.h>
17 #include <zephyr/bluetooth/hci_types.h>
18
19 #include "common/bt_str.h"
20
21 #include "host/conn_internal.h"
22 #include "host/l2cap_internal.h"
23
24 #include "utils.h"
25 #include "bstests.h"
26
27 #include <zephyr/logging/log.h>
28 LOG_MODULE_REGISTER(bt_tinyhost, LOG_LEVEL_INF);
29
30 #define BT_ATT_OP_MTU_REQ 0x02
31 #define BT_ATT_OP_MTU_RSP 0x03
32 #define BT_ATT_OP_WRITE_REQ 0x12
33 #define BT_ATT_OP_WRITE_RSP 0x13
34 #define BT_ATT_OP_NOTIFY 0x1b
35 #define BT_ATT_OP_INDICATE 0x1d
36 #define BT_ATT_OP_CONFIRM 0x1e
37 #define BT_ATT_OP_WRITE_CMD 0x52
38 #define BT_L2CAP_CID_ATT 0x0004
39 #define LAST_SUPPORTED_ATT_OPCODE 0x20
40
41 DEFINE_FLAG(is_connected);
42 DEFINE_FLAG(flag_data_length_updated);
43 DEFINE_FLAG(flag_handle);
44 DEFINE_FLAG(flag_notified);
45 DEFINE_FLAG(flag_write_ack);
46 DEFINE_FLAG(flag_req_in_progress);
47
48 static uint16_t server_write_handle;
49
50 typedef void (*att_handler_t)(struct net_buf *buf);
51 static att_handler_t att_handlers[LAST_SUPPORTED_ATT_OPCODE];
52
53 static K_FIFO_DEFINE(rx_queue);
54
55 #define CMD_BUF_SIZE MAX(BT_BUF_EVT_RX_SIZE, BT_BUF_CMD_TX_SIZE)
56 NET_BUF_POOL_FIXED_DEFINE(hci_cmd_pool, CONFIG_BT_BUF_CMD_TX_COUNT,
57 CMD_BUF_SIZE, 8, NULL);
58
59 static K_SEM_DEFINE(cmd_sem, 1, 1);
60 static struct k_sem acl_pkts;
61 static uint16_t conn_handle;
62
63 static volatile uint16_t active_opcode = 0xFFFF;
64 static struct net_buf *cmd_rsp;
65
bt_hci_cmd_create(uint16_t opcode,uint8_t param_len)66 struct net_buf *bt_hci_cmd_create(uint16_t opcode, uint8_t param_len)
67 {
68 struct bt_hci_cmd_hdr *hdr;
69 struct net_buf *buf;
70
71 LOG_DBG("opcode 0x%04x param_len %u", opcode, param_len);
72
73 buf = net_buf_alloc(&hci_cmd_pool, K_FOREVER);
74 ASSERT(buf, "failed allocation");
75
76 LOG_DBG("buf %p", buf);
77
78 net_buf_reserve(buf, BT_BUF_RESERVE);
79
80 bt_buf_set_type(buf, BT_BUF_CMD);
81
82 hdr = net_buf_add(buf, sizeof(*hdr));
83 hdr->opcode = sys_cpu_to_le16(opcode);
84 hdr->param_len = param_len;
85
86 return buf;
87 }
88
handle_cmd_complete(struct net_buf * buf)89 static void handle_cmd_complete(struct net_buf *buf)
90 {
91 struct bt_hci_evt_hdr *hdr;
92 uint8_t status, ncmd;
93 uint16_t opcode;
94 struct net_buf_simple_state state;
95
96 net_buf_simple_save(&buf->b, &state);
97
98 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
99
100 if (hdr->evt == BT_HCI_EVT_CMD_COMPLETE) {
101 struct bt_hci_evt_cmd_complete *evt;
102
103 evt = net_buf_pull_mem(buf, sizeof(*evt));
104 status = 0;
105 ncmd = evt->ncmd;
106 opcode = sys_le16_to_cpu(evt->opcode);
107
108 } else if (hdr->evt == BT_HCI_EVT_CMD_STATUS) {
109 struct bt_hci_evt_cmd_status *evt;
110
111 evt = net_buf_pull_mem(buf, sizeof(*evt));
112 status = buf->data[0];
113 ncmd = evt->ncmd;
114 opcode = sys_le16_to_cpu(evt->opcode);
115
116 } else {
117 FAIL("unhandled event 0x%x", hdr->evt);
118 }
119
120 LOG_DBG("opcode 0x%04x status %x", opcode, status);
121
122 ASSERT(status == 0x00, "cmd status: %x", status);
123
124 ASSERT(active_opcode == opcode, "unexpected opcode %x != %x", active_opcode, opcode);
125
126 if (active_opcode) {
127 active_opcode = 0xFFFF;
128 cmd_rsp = net_buf_ref(buf);
129 net_buf_simple_restore(&buf->b, &state);
130 }
131
132 if (ncmd) {
133 k_sem_give(&cmd_sem);
134 }
135 }
136
handle_meta_event(struct net_buf * buf)137 static void handle_meta_event(struct net_buf *buf)
138 {
139 uint8_t code = buf->data[2];
140
141 switch (code) {
142 case BT_HCI_EVT_LE_ENH_CONN_COMPLETE:
143 case BT_HCI_EVT_LE_ENH_CONN_COMPLETE_V2:
144 conn_handle = sys_get_le16(&buf->data[4]);
145 LOG_DBG("connected: handle: %d", conn_handle);
146 SET_FLAG(is_connected);
147 break;
148 case BT_HCI_EVT_LE_DATA_LEN_CHANGE:
149 SET_FLAG(flag_data_length_updated);
150 break;
151 case BT_HCI_EVT_LE_CHAN_SEL_ALGO:
152 /* do nothing */
153 break;
154 default:
155 LOG_ERR("unhandled meta event %x", code);
156 LOG_HEXDUMP_ERR(buf->data, buf->len, "HCI META EVT");
157 }
158 }
159
handle_ncp(struct net_buf * buf)160 static void handle_ncp(struct net_buf *buf)
161 {
162 struct bt_hci_evt_num_completed_packets *evt;
163 struct bt_hci_evt_hdr *hdr;
164 uint16_t handle, count;
165
166 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
167
168 evt = (void *)buf->data;
169 handle = sys_le16_to_cpu(evt->h[0].handle);
170 count = sys_le16_to_cpu(evt->h[0].count);
171
172 LOG_DBG("sent %d packets", count);
173
174 while (count--) {
175 k_sem_give(&acl_pkts);
176 }
177 }
178
handle_att_notification(struct net_buf * buf)179 static void handle_att_notification(struct net_buf *buf)
180 {
181 uint16_t handle = net_buf_pull_le16(buf);
182
183 LOG_INF("Got notification for 0x%04x len %d", handle, buf->len);
184 LOG_HEXDUMP_DBG(buf->data, buf->len, "payload");
185
186 server_write_handle = net_buf_pull_le16(buf);
187 LOG_INF("Retrieved handle to write to: 0x%x", server_write_handle);
188 SET_FLAG(flag_handle);
189 }
190
191 struct net_buf *alloc_l2cap_pdu(void);
192 static void send_l2cap_packet(struct net_buf *buf, uint16_t cid);
193
send_write_rsp(void)194 static void send_write_rsp(void)
195 {
196 struct net_buf *buf = alloc_l2cap_pdu();
197
198 UNSET_FLAG(flag_req_in_progress);
199
200 net_buf_add_u8(buf, BT_ATT_OP_WRITE_RSP);
201 send_l2cap_packet(buf, BT_L2CAP_CID_ATT);
202 }
203
handle_att_write_0(struct net_buf * buf)204 static void handle_att_write_0(struct net_buf *buf)
205 {
206 uint16_t handle = net_buf_pull_le16(buf);
207
208 LOG_INF("Got write for 0x%04x len %d", handle, buf->len);
209 LOG_HEXDUMP_DBG(buf->data, buf->len, "payload");
210
211 ASSERT(!TEST_FLAG(flag_req_in_progress),
212 "Peer is pipelining REQs. This inSIGdent will be reported.\n");
213
214 SET_FLAG(flag_req_in_progress);
215 send_write_rsp();
216 }
217
handle_att_write_1(struct net_buf * buf)218 static void handle_att_write_1(struct net_buf *buf)
219 {
220 uint16_t handle = net_buf_pull_le16(buf);
221
222 LOG_INF("Got write for 0x%04x len %d", handle, buf->len);
223 LOG_HEXDUMP_DBG(buf->data, buf->len, "payload");
224
225 ASSERT(!TEST_FLAG(flag_req_in_progress),
226 "Peer is pipelining REQs. This inSIGdent will be reported.\n");
227
228 SET_FLAG(flag_req_in_progress);
229
230 /* For this test procedure, the response is sent from main */
231 }
232
handle_att(struct net_buf * buf)233 static void handle_att(struct net_buf *buf)
234 {
235 uint8_t op = net_buf_pull_u8(buf);
236
237 if (ARRAY_SIZE(att_handlers) > op && att_handlers[op]) {
238 LOG_DBG("executing custom ATT handler for op %x", op);
239 att_handlers[op](buf);
240 return;
241 }
242
243 switch (op) {
244 case BT_ATT_OP_NOTIFY:
245 handle_att_notification(buf);
246 return;
247 case BT_ATT_OP_WRITE_RSP:
248 LOG_INF("got ATT write RSP");
249 SET_FLAG(flag_write_ack);
250 return;
251 case BT_ATT_OP_MTU_RSP:
252 LOG_INF("got ATT MTU RSP");
253 return;
254 default:
255 LOG_HEXDUMP_ERR(buf->data, buf->len, "payload");
256 FAIL("unhandled opcode %x\n", op);
257 return;
258 }
259 }
260
handle_l2cap(struct net_buf * buf)261 static void handle_l2cap(struct net_buf *buf)
262 {
263 struct bt_l2cap_hdr *hdr;
264 uint16_t cid;
265
266 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
267 cid = sys_le16_to_cpu(hdr->cid);
268
269 LOG_DBG("Packet for CID %u len %u", cid, buf->len);
270 LOG_HEXDUMP_DBG(buf->data, buf->len, "l2cap");
271
272 /* Make sure we don't have to recombine packets */
273 ASSERT(buf->len == hdr->len, "buflen = %d != hdrlen %d",
274 buf->len, hdr->len);
275
276 ASSERT(cid == BT_L2CAP_CID_ATT, "We only support (U)ATT");
277
278 /* (U)ATT PDU */
279 handle_att(buf);
280 }
281
handle_acl(struct net_buf * buf)282 static void handle_acl(struct net_buf *buf)
283 {
284 struct bt_hci_acl_hdr *hdr;
285 uint16_t len, handle;
286 uint8_t flags;
287
288 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
289 len = sys_le16_to_cpu(hdr->len);
290 handle = sys_le16_to_cpu(hdr->handle);
291
292 flags = bt_acl_flags(handle);
293 handle = bt_acl_handle(handle);
294
295 ASSERT(flags == BT_ACL_START,
296 "Fragmentation not supported");
297
298 LOG_DBG("ACL: conn %d len %d flags %d", handle, len, flags);
299 LOG_HEXDUMP_DBG(buf->data, buf->len, "HCI ACL");
300
301 handle_l2cap(buf);
302 }
303
recv(struct net_buf * buf)304 static void recv(struct net_buf *buf)
305 {
306 LOG_HEXDUMP_DBG(buf->data, buf->len, "HCI RX");
307
308 uint8_t code = buf->data[0];
309
310 if (bt_buf_get_type(buf) == BT_BUF_EVT) {
311 switch (code) {
312 case BT_HCI_EVT_CMD_COMPLETE:
313 case BT_HCI_EVT_CMD_STATUS:
314 handle_cmd_complete(buf);
315 break;
316 case BT_HCI_EVT_LE_META_EVENT:
317 handle_meta_event(buf);
318 break;
319 case BT_HCI_EVT_DISCONN_COMPLETE:
320 UNSET_FLAG(is_connected);
321 break;
322 case BT_HCI_EVT_NUM_COMPLETED_PACKETS:
323 handle_ncp(buf);
324 break;
325 default:
326 LOG_ERR("unhandled msg %x", code);
327 LOG_HEXDUMP_ERR(buf->data, buf->len, "HCI EVT");
328 }
329
330 /* handlers should take a ref if they want to access the buffer
331 * later
332 */
333 net_buf_unref(buf);
334 return;
335 }
336
337 if (bt_buf_get_type(buf) == BT_BUF_ACL_IN) {
338 handle_acl(buf);
339 net_buf_unref(buf);
340 return;
341 }
342
343 LOG_ERR("HCI RX (not data or event)");
344 net_buf_unref(buf);
345 }
346
send_cmd(uint16_t opcode,struct net_buf * cmd,struct net_buf ** rsp)347 static void send_cmd(uint16_t opcode, struct net_buf *cmd, struct net_buf **rsp)
348 {
349 LOG_DBG("opcode %x", opcode);
350
351 if (!cmd) {
352 cmd = bt_hci_cmd_create(opcode, 0);
353 }
354
355 k_sem_take(&cmd_sem, K_FOREVER);
356 ASSERT(active_opcode == 0xFFFF, "");
357
358 active_opcode = opcode;
359
360 LOG_HEXDUMP_DBG(cmd->data, cmd->len, "HCI TX");
361 bt_send(cmd);
362
363 /* Wait until the command completes */
364 k_sem_take(&cmd_sem, K_FOREVER);
365 k_sem_give(&cmd_sem);
366
367 net_buf_unref(cmd);
368
369 /* return response. it's okay if cmd_rsp gets overwritten, since the app
370 * gets the ref to the underlying buffer when this fn returns.
371 */
372 if (rsp) {
373 *rsp = cmd_rsp;
374 } else {
375 net_buf_unref(cmd_rsp);
376 cmd_rsp = NULL;
377 }
378 }
379
380 static K_THREAD_STACK_DEFINE(rx_thread_stack, 1024);
381 static struct k_thread rx_thread_data;
382
rx_thread(void * p1,void * p2,void * p3)383 static void rx_thread(void *p1, void *p2, void *p3)
384 {
385 LOG_DBG("start HCI rx");
386
387 while (true) {
388 struct net_buf *buf;
389
390 /* Wait until a buffer is available */
391 buf = k_fifo_get(&rx_queue, K_FOREVER);
392 recv(buf);
393 }
394 }
395
le_read_buffer_size_complete(struct net_buf * rsp)396 static void le_read_buffer_size_complete(struct net_buf *rsp)
397 {
398 struct bt_hci_rp_le_read_buffer_size *rp = (void *)rsp->data;
399
400 LOG_DBG("status 0x%02x", rp->status);
401 LOG_DBG("max len %d max num %d", rp->le_max_len, rp->le_max_num);
402
403 k_sem_init(&acl_pkts, rp->le_max_num, rp->le_max_num);
404 net_buf_unref(rsp);
405 }
406
read_max_data_len(uint16_t * tx_octets,uint16_t * tx_time)407 static void read_max_data_len(uint16_t *tx_octets, uint16_t *tx_time)
408 {
409 struct bt_hci_rp_le_read_max_data_len *rp;
410 struct net_buf *rsp;
411
412 send_cmd(BT_HCI_OP_LE_READ_MAX_DATA_LEN, NULL, &rsp);
413
414 rp = (void *)rsp->data;
415 *tx_octets = sys_le16_to_cpu(rp->max_tx_octets);
416 *tx_time = sys_le16_to_cpu(rp->max_tx_time);
417 net_buf_unref(rsp);
418 }
419
write_default_data_len(uint16_t tx_octets,uint16_t tx_time)420 static void write_default_data_len(uint16_t tx_octets, uint16_t tx_time)
421 {
422 struct bt_hci_cp_le_write_default_data_len *cp;
423 struct net_buf *buf = bt_hci_cmd_create(BT_HCI_OP_LE_WRITE_DEFAULT_DATA_LEN, sizeof(*cp));
424
425 ASSERT(buf, "");
426
427 cp = net_buf_add(buf, sizeof(*cp));
428 cp->max_tx_octets = sys_cpu_to_le16(tx_octets);
429 cp->max_tx_time = sys_cpu_to_le16(tx_time);
430
431 send_cmd(BT_HCI_OP_LE_WRITE_DEFAULT_DATA_LEN, buf, NULL);
432 }
433
set_data_len(void)434 static void set_data_len(void)
435 {
436 uint16_t tx_octets, tx_time;
437
438 read_max_data_len(&tx_octets, &tx_time);
439 write_default_data_len(tx_octets, tx_time);
440 }
441
set_event_mask(uint16_t opcode)442 static void set_event_mask(uint16_t opcode)
443 {
444 struct bt_hci_cp_set_event_mask *cp_mask;
445 struct net_buf *buf;
446 uint64_t mask = 0U;
447
448 /* The two commands have the same length/params */
449 buf = bt_hci_cmd_create(opcode, sizeof(*cp_mask));
450 ASSERT(buf, "");
451
452 /* Forward all events */
453 cp_mask = net_buf_add(buf, sizeof(*cp_mask));
454 mask = UINT64_MAX;
455 sys_put_le64(mask, cp_mask->events);
456
457 send_cmd(opcode, buf, NULL);
458 }
459
set_random_address(void)460 static void set_random_address(void)
461 {
462 struct net_buf *buf;
463 bt_addr_le_t addr = {BT_ADDR_LE_RANDOM, {{0x0A, 0x89, 0x67, 0x45, 0x23, 0xC1}}};
464
465 LOG_DBG("%s", bt_addr_str(&addr.a));
466
467 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_RANDOM_ADDRESS, sizeof(addr.a));
468 ASSERT(buf, "");
469
470 net_buf_add_mem(buf, &addr.a, sizeof(addr.a));
471 send_cmd(BT_HCI_OP_LE_SET_RANDOM_ADDRESS, buf, NULL);
472 }
473
start_adv(void)474 void start_adv(void)
475 {
476 struct bt_hci_cp_le_set_adv_param set_param;
477 struct net_buf *buf;
478 uint16_t interval = 60; /* Interval doesn't matter */
479
480 (void)memset(&set_param, 0, sizeof(set_param));
481
482 set_param.min_interval = sys_cpu_to_le16(interval);
483 set_param.max_interval = sys_cpu_to_le16(interval);
484 set_param.channel_map = 0x07;
485 set_param.filter_policy = BT_LE_ADV_FP_NO_FILTER;
486 set_param.type = BT_HCI_ADV_IND;
487 set_param.own_addr_type = BT_HCI_OWN_ADDR_RANDOM;
488
489 /* configure */
490 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_ADV_PARAM, sizeof(set_param));
491 net_buf_add_mem(buf, &set_param, sizeof(set_param));
492 send_cmd(BT_HCI_OP_LE_SET_ADV_PARAM, buf, NULL);
493
494 /* start */
495 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_ADV_ENABLE, 1);
496 net_buf_add_u8(buf, BT_HCI_LE_ADV_ENABLE);
497 send_cmd(BT_HCI_OP_LE_SET_ADV_ENABLE, buf, NULL);
498 }
499
500 NET_BUF_POOL_DEFINE(acl_tx_pool, 5, BT_L2CAP_BUF_SIZE(200), 8, NULL);
501
alloc_l2cap_pdu(void)502 struct net_buf *alloc_l2cap_pdu(void)
503 {
504 struct net_buf *buf;
505 uint16_t reserve;
506
507 buf = net_buf_alloc(&acl_tx_pool, K_FOREVER);
508 ASSERT(buf, "failed ACL allocation");
509
510 reserve = sizeof(struct bt_l2cap_hdr);
511 reserve += sizeof(struct bt_hci_acl_hdr) + BT_BUF_RESERVE;
512
513 net_buf_reserve(buf, reserve);
514
515 return buf;
516 }
517
send_acl(struct net_buf * buf)518 static int send_acl(struct net_buf *buf)
519 {
520 struct bt_hci_acl_hdr *hdr;
521 uint8_t flags = BT_ACL_START_NO_FLUSH;
522
523 hdr = net_buf_push(buf, sizeof(*hdr));
524 hdr->handle = sys_cpu_to_le16(bt_acl_handle_pack(conn_handle, flags));
525 hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
526
527 bt_buf_set_type(buf, BT_BUF_ACL_OUT);
528
529 k_sem_take(&acl_pkts, K_FOREVER);
530
531 return bt_send(buf);
532 }
533
send_l2cap_packet(struct net_buf * buf,uint16_t cid)534 static void send_l2cap_packet(struct net_buf *buf, uint16_t cid)
535 {
536 struct bt_l2cap_hdr *hdr;
537
538 hdr = net_buf_push(buf, sizeof(*hdr));
539 hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
540 hdr->cid = sys_cpu_to_le16(cid);
541
542 /* Always entire packets, no HCI fragmentation */
543 ASSERT(buf->len <= CONFIG_BT_BUF_ACL_TX_SIZE,
544 "Fragmentation not supported");
545
546 send_acl(buf);
547 }
548
gatt_write(uint16_t op)549 static void gatt_write(uint16_t op)
550 {
551 static uint8_t data[] = "write";
552 uint16_t handle = server_write_handle;
553 struct net_buf *buf = alloc_l2cap_pdu();
554
555 net_buf_add_u8(buf, op);
556 net_buf_add_le16(buf, handle);
557 net_buf_add_mem(buf, data, sizeof(data));
558
559 LOG_INF("send ATT write %s",
560 op == BT_ATT_OP_WRITE_REQ ? "REQ" : "CMD");
561
562 send_l2cap_packet(buf, BT_L2CAP_CID_ATT);
563 }
564
prepare_controller(void)565 static void prepare_controller(void)
566 {
567 /* Initialize controller */
568 struct net_buf *rsp;
569
570 send_cmd(BT_HCI_OP_RESET, NULL, NULL);
571 send_cmd(BT_HCI_OP_LE_READ_BUFFER_SIZE, NULL, &rsp);
572 le_read_buffer_size_complete(rsp);
573
574 set_data_len();
575 set_event_mask(BT_HCI_OP_SET_EVENT_MASK);
576 set_event_mask(BT_HCI_OP_LE_SET_EVENT_MASK);
577 set_random_address();
578 }
579
init_tinyhost(void)580 static void init_tinyhost(void)
581 {
582 bt_enable_raw(&rx_queue);
583
584 /* Start the RX thread */
585 k_thread_create(&rx_thread_data, rx_thread_stack,
586 K_THREAD_STACK_SIZEOF(rx_thread_stack), rx_thread,
587 NULL, NULL, NULL, K_PRIO_PREEMPT(0), 0, K_NO_WAIT);
588 k_thread_name_set(&rx_thread_data, "HCI RX");
589
590 k_thread_priority_set(k_current_get(), K_PRIO_PREEMPT(0));
591
592 prepare_controller();
593 }
594
test_procedure_0(void)595 void test_procedure_0(void)
596 {
597 att_handlers[BT_ATT_OP_WRITE_REQ] = handle_att_write_0;
598
599 init_tinyhost();
600
601 /* wait until the good peer has connected */
602 k_msleep(1000);
603
604 LOG_INF("init adv");
605
606 /* Start advertising & wait for a connection */
607 start_adv();
608 WAIT_FOR_FLAG(is_connected);
609 LOG_INF("connected");
610
611 /* We need this to be able to send whole L2CAP PDUs on-air. */
612 WAIT_FOR_FLAG(flag_data_length_updated);
613
614 /* Get handle we will write to */
615 WAIT_FOR_FLAG(flag_handle);
616
617 LOG_INF("##################### START TEST #####################");
618
619 uint32_t timeout_ms = PROCEDURE_1_TIMEOUT_MS;
620 uint32_t start_time = k_uptime_get_32();
621
622 while (k_uptime_get_32() - start_time < timeout_ms) {
623 gatt_write(BT_ATT_OP_WRITE_REQ);
624 }
625
626 /* Verify we get at least one write */
627 WAIT_FOR_FLAG(flag_write_ack);
628
629 PASS("Tester done\n");
630 }
631
test_procedure_1(void)632 void test_procedure_1(void)
633 {
634 att_handlers[BT_ATT_OP_WRITE_REQ] = handle_att_write_1;
635
636 init_tinyhost();
637
638 /* Start advertising & wait for a connection */
639 LOG_INF("init adv");
640 start_adv();
641 WAIT_FOR_FLAG(is_connected);
642 LOG_INF("connected");
643
644 /* We need this to be able to send whole L2CAP PDUs on-air. */
645 WAIT_FOR_FLAG(flag_data_length_updated);
646
647 LOG_INF("##################### START TEST #####################");
648
649 /* In this testcase, DUT is the aggressor.
650 * Tester verifies no spec violation happens.
651 */
652 while (TEST_FLAG(is_connected)) {
653 /* Should be enough to allow DUT's app to batch a few requests. */
654 k_msleep(1000);
655 if (TEST_FLAG(flag_req_in_progress)) {
656 send_write_rsp();
657 }
658 }
659
660 PASS("Tester done\n");
661 }
662
test_tick(bs_time_t HW_device_time)663 void test_tick(bs_time_t HW_device_time)
664 {
665 bs_trace_debug_time(0, "Simulation ends now.\n");
666 if (bst_result != Passed) {
667 bst_result = Failed;
668 bs_trace_error("Test did not pass before simulation ended.\n");
669 }
670 }
671
test_init(void)672 void test_init(void)
673 {
674 bst_ticker_set_next_tick_absolute(TEST_TIMEOUT_SIMULATED);
675 bst_result = In_progress;
676 }
677
678 static const struct bst_test_instance test_to_add[] = {
679 {
680 .test_id = "tester",
681 .test_pre_init_f = test_init,
682 .test_tick_f = test_tick,
683 .test_main_f = test_procedure_0,
684 },
685 {
686 .test_id = "tester_1",
687 .test_pre_init_f = test_init,
688 .test_tick_f = test_tick,
689 .test_main_f = test_procedure_1,
690 },
691 BSTEST_END_MARKER,
692 };
693
install(struct bst_test_list * tests)694 static struct bst_test_list *install(struct bst_test_list *tests)
695 {
696 return bst_add_tests(tests, test_to_add);
697 };
698
699 bst_test_install_t test_installers[] = {install, NULL};
700
701
main(void)702 int main(void)
703 {
704 bst_main();
705
706 return 0;
707 }
708