1 /* hci_da1469x.c - DA1469x CMAC IPC Bluetooth driver */
2 
3 /*
4  * Copyright (c) 2023 Renesas Electronics Corporation
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  */
8 
9 #include <zephyr/init.h>
10 #include <zephyr/sys/util.h>
11 #include <zephyr/bluetooth/hci.h>
12 #include <zephyr/drivers/bluetooth.h>
13 #include <zephyr/irq.h>
14 #include <zephyr/sys/byteorder.h>
15 #include <zephyr/random/random.h>
16 
17 #include <common/bt_str.h>
18 
19 #include <DA1469xAB.h>
20 #include <mbox.h>
21 #include <shm.h>
22 #include <rand.h>
23 
24 #define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL
25 #include <zephyr/logging/log.h>
26 
27 LOG_MODULE_REGISTER(hci_da1469x);
28 
29 #define DT_DRV_COMPAT renesas_bt_hci_da1469x
30 
31 struct hci_data {
32 	bt_hci_recv_t recv;
33 };
34 
35 static K_KERNEL_STACK_DEFINE(rng_thread_stack, CONFIG_BT_RX_STACK_SIZE);
36 static struct k_thread rng_thread_data;
37 struct k_sem rng_sem;
38 
39 static K_KERNEL_STACK_DEFINE(rx_thread_stack, CONFIG_BT_RX_STACK_SIZE);
40 static struct k_thread rx_thread_data;
41 
42 static struct {
43 	struct net_buf *buf;
44 	struct k_fifo   fifo;
45 
46 	uint16_t    remaining;
47 	uint16_t    discard;
48 
49 	bool     have_hdr;
50 	bool     discardable;
51 	bool     deferred;
52 
53 	uint8_t     hdr_len;
54 
55 	uint8_t     type;
56 	union {
57 		struct bt_hci_evt_hdr evt;
58 		struct bt_hci_acl_hdr acl;
59 		struct bt_hci_iso_hdr iso;
60 		uint8_t hdr[4];
61 	};
62 } rx = {
63 	.fifo = Z_FIFO_INITIALIZER(rx.fifo),
64 };
65 
h4_get_type(void)66 static void h4_get_type(void)
67 {
68 	/* Get packet type */
69 	if (cmac_mbox_read(&rx.type, 1) != 1) {
70 		LOG_WRN("Unable to read H:4 packet type");
71 		rx.type = BT_HCI_H4_NONE;
72 		return;
73 	}
74 
75 	switch (rx.type) {
76 	case BT_HCI_H4_EVT:
77 		rx.remaining = sizeof(rx.evt);
78 		rx.hdr_len = rx.remaining;
79 		break;
80 	case BT_HCI_H4_ACL:
81 		rx.remaining = sizeof(rx.acl);
82 		rx.hdr_len = rx.remaining;
83 		break;
84 	case BT_HCI_H4_ISO:
85 		if (IS_ENABLED(CONFIG_BT_ISO)) {
86 			rx.remaining = sizeof(rx.iso);
87 			rx.hdr_len = rx.remaining;
88 			break;
89 		}
90 		__fallthrough;
91 	default:
92 		LOG_ERR("Unknown H:4 type 0x%02x", rx.type);
93 		rx.type = BT_HCI_H4_NONE;
94 	}
95 }
96 
h4_read_hdr(void)97 static void h4_read_hdr(void)
98 {
99 	int bytes_read = rx.hdr_len - rx.remaining;
100 	int ret;
101 
102 	ret = cmac_mbox_read(rx.hdr + bytes_read, rx.remaining);
103 	if (unlikely(ret < 0)) {
104 		LOG_ERR("Unable to read from mailbox (ret %d)", ret);
105 	} else {
106 		rx.remaining -= ret;
107 	}
108 }
109 
get_acl_hdr(void)110 static inline void get_acl_hdr(void)
111 {
112 	h4_read_hdr();
113 
114 	if (!rx.remaining) {
115 		struct bt_hci_acl_hdr *hdr = &rx.acl;
116 
117 		rx.remaining = sys_le16_to_cpu(hdr->len);
118 		LOG_DBG("Got ACL header. Payload %u bytes", rx.remaining);
119 		rx.have_hdr = true;
120 	}
121 }
122 
get_iso_hdr(void)123 static inline void get_iso_hdr(void)
124 {
125 	h4_read_hdr();
126 
127 	if (!rx.remaining) {
128 		struct bt_hci_iso_hdr *hdr = &rx.iso;
129 
130 		rx.remaining = bt_iso_hdr_len(sys_le16_to_cpu(hdr->len));
131 		LOG_DBG("Got ISO header. Payload %u bytes", rx.remaining);
132 		rx.have_hdr = true;
133 	}
134 }
135 
get_evt_hdr(void)136 static inline void get_evt_hdr(void)
137 {
138 	struct bt_hci_evt_hdr *hdr = &rx.evt;
139 
140 	h4_read_hdr();
141 
142 	if (rx.hdr_len == sizeof(*hdr) && rx.remaining < sizeof(*hdr)) {
143 		switch (rx.evt.evt) {
144 		case BT_HCI_EVT_LE_META_EVENT:
145 			rx.remaining++;
146 			rx.hdr_len++;
147 			break;
148 		}
149 	}
150 
151 	if (!rx.remaining) {
152 		if (rx.evt.evt == BT_HCI_EVT_LE_META_EVENT &&
153 		    (rx.hdr[sizeof(*hdr)] == BT_HCI_EVT_LE_ADVERTISING_REPORT)) {
154 			LOG_DBG("Marking adv report as discardable");
155 			rx.discardable = true;
156 		}
157 
158 		rx.remaining = hdr->len - (rx.hdr_len - sizeof(*hdr));
159 		LOG_DBG("Got event header. Payload %u bytes", hdr->len);
160 		rx.have_hdr = true;
161 	}
162 }
163 
164 
copy_hdr(struct net_buf * buf)165 static inline void copy_hdr(struct net_buf *buf)
166 {
167 	net_buf_add_mem(buf, rx.hdr, rx.hdr_len);
168 }
169 
reset_rx(void)170 static void reset_rx(void)
171 {
172 	rx.type = BT_HCI_H4_NONE;
173 	rx.remaining = 0U;
174 	rx.have_hdr = false;
175 	rx.hdr_len = 0U;
176 	rx.discardable = false;
177 }
178 
get_rx(k_timeout_t timeout)179 static struct net_buf *get_rx(k_timeout_t timeout)
180 {
181 	LOG_DBG("type 0x%02x, evt 0x%02x", rx.type, rx.evt.evt);
182 
183 	switch (rx.type) {
184 	case BT_HCI_H4_EVT:
185 		return bt_buf_get_evt(rx.evt.evt, rx.discardable, timeout);
186 	case BT_HCI_H4_ACL:
187 		return bt_buf_get_rx(BT_BUF_ACL_IN, timeout);
188 	case BT_HCI_H4_ISO:
189 		if (IS_ENABLED(CONFIG_BT_ISO)) {
190 			return bt_buf_get_rx(BT_BUF_ISO_IN, timeout);
191 		}
192 	}
193 
194 	return NULL;
195 }
196 
rx_isr_start(void)197 static void rx_isr_start(void)
198 {
199 	if (rx.deferred) {
200 		rx.deferred = false;
201 		NVIC_SetPendingIRQ(CMAC2SYS_IRQn);
202 	}
203 
204 	irq_enable(CMAC2SYS_IRQn);
205 }
206 
rx_isr_stop(void)207 static void rx_isr_stop(void)
208 {
209 	irq_disable(CMAC2SYS_IRQn);
210 }
211 
rx_thread(void * p1,void * p2,void * p3)212 static void rx_thread(void *p1, void *p2, void *p3)
213 {
214 	const struct device *dev = p1;
215 	struct hci_data *hci = dev->data;
216 	struct net_buf *buf;
217 
218 	ARG_UNUSED(p2);
219 	ARG_UNUSED(p3);
220 
221 	LOG_DBG("started");
222 
223 	while (1) {
224 		LOG_DBG("rx.buf %p", rx.buf);
225 
226 		/* We can only do the allocation if we know the initial
227 		 * header, since Command Complete/Status events must use the
228 		 * original command buffer (if available).
229 		 */
230 		if (rx.have_hdr && !rx.buf) {
231 			rx.buf = get_rx(K_FOREVER);
232 			LOG_DBG("Got rx.buf %p", rx.buf);
233 			if (rx.remaining > net_buf_tailroom(rx.buf)) {
234 				LOG_ERR("Not enough space in buffer");
235 				rx.discard = rx.remaining;
236 				reset_rx();
237 			} else {
238 				copy_hdr(rx.buf);
239 			}
240 		}
241 
242 		/* Let the ISR continue receiving new packets */
243 		rx_isr_start();
244 
245 		buf = k_fifo_get(&rx.fifo, K_FOREVER);
246 		do {
247 			rx_isr_start();
248 
249 			LOG_DBG("Calling bt_recv(%p)", buf);
250 			hci->recv(dev, buf);
251 
252 			/* Give other threads a chance to run if the ISR
253 			 * is receiving data so fast that rx.fifo never
254 			 * or very rarely goes empty.
255 			 */
256 			k_yield();
257 
258 			rx_isr_stop();
259 
260 			buf = k_fifo_get(&rx.fifo, K_NO_WAIT);
261 		} while (buf);
262 	}
263 }
264 
h4_discard(size_t len)265 static size_t h4_discard(size_t len)
266 {
267 	uint8_t buf[33];
268 	int err;
269 
270 	err = cmac_mbox_read(buf, MIN(len, sizeof(buf)));
271 	if (unlikely(err < 0)) {
272 		LOG_ERR("Unable to read from mailbox (err %d)", err);
273 		return 0;
274 	}
275 
276 	return err;
277 }
278 
read_payload(void)279 static inline void read_payload(void)
280 {
281 	struct net_buf *buf;
282 	int read;
283 
284 	if (!rx.buf) {
285 		size_t buf_tailroom;
286 
287 		rx.buf = get_rx(K_NO_WAIT);
288 		if (!rx.buf) {
289 			if (rx.discardable) {
290 				LOG_WRN("Discarding event 0x%02x", rx.evt.evt);
291 				rx.discard = rx.remaining;
292 				reset_rx();
293 				return;
294 			}
295 
296 			LOG_WRN("Failed to allocate, deferring to rx_thread");
297 			rx.deferred = true;
298 			return;
299 		}
300 
301 		LOG_DBG("Allocated rx.buf %p", rx.buf);
302 
303 		buf_tailroom = net_buf_tailroom(rx.buf);
304 		if (buf_tailroom < rx.remaining) {
305 			LOG_ERR("Not enough space in buffer %u/%zu", rx.remaining, buf_tailroom);
306 			rx.discard = rx.remaining;
307 			reset_rx();
308 			return;
309 		}
310 
311 		copy_hdr(rx.buf);
312 	}
313 
314 	read = cmac_mbox_read(net_buf_tail(rx.buf), rx.remaining);
315 	if (unlikely(read < 0)) {
316 		LOG_ERR("Failed to read mailbox (err %d)", read);
317 		return;
318 	}
319 
320 	net_buf_add(rx.buf, read);
321 	rx.remaining -= read;
322 
323 	LOG_DBG("got %d bytes, remaining %u", read, rx.remaining);
324 	LOG_DBG("Payload (len %u): %s", rx.buf->len, bt_hex(rx.buf->data, rx.buf->len));
325 
326 	if (rx.remaining) {
327 		return;
328 	}
329 
330 	buf = rx.buf;
331 	rx.buf = NULL;
332 
333 	if (rx.type == BT_HCI_H4_EVT) {
334 		bt_buf_set_type(buf, BT_BUF_EVT);
335 	} else {
336 		bt_buf_set_type(buf, BT_BUF_ACL_IN);
337 	}
338 
339 	reset_rx();
340 
341 	LOG_DBG("Putting buf %p to rx fifo", buf);
342 	k_fifo_put(&rx.fifo, buf);
343 }
344 
read_header(void)345 static inline void read_header(void)
346 {
347 	switch (rx.type) {
348 	case BT_HCI_H4_NONE:
349 		h4_get_type();
350 		return;
351 	case BT_HCI_H4_EVT:
352 		get_evt_hdr();
353 		break;
354 	case BT_HCI_H4_ACL:
355 		get_acl_hdr();
356 		break;
357 	case BT_HCI_H4_ISO:
358 		if (IS_ENABLED(CONFIG_BT_ISO)) {
359 			get_iso_hdr();
360 			break;
361 		}
362 		__fallthrough;
363 	default:
364 		CODE_UNREACHABLE;
365 		return;
366 	}
367 
368 	if (rx.have_hdr && rx.buf) {
369 		if (rx.remaining > net_buf_tailroom(rx.buf)) {
370 			LOG_ERR("Not enough space in buffer");
371 			rx.discard = rx.remaining;
372 			reset_rx();
373 		} else {
374 			copy_hdr(rx.buf);
375 		}
376 	}
377 }
378 
process_rx(void)379 static inline void process_rx(void)
380 {
381 	LOG_DBG("remaining %u discard %u have_hdr %u rx.buf %p len %u", rx.remaining, rx.discard,
382 		rx.have_hdr, rx.buf, rx.buf ? rx.buf->len : 0);
383 
384 	if (rx.discard) {
385 		rx.discard -= h4_discard(rx.discard);
386 		return;
387 	}
388 
389 	if (rx.have_hdr) {
390 		read_payload();
391 	} else {
392 		read_header();
393 	}
394 }
395 
396 /* Called by HAL when data in CMAC mailbox is available to read */
cmac_read_req(void)397 void cmac_read_req(void)
398 {
399 	while (!rx.deferred && cmac_mbox_has_data()) {
400 		process_rx();
401 	}
402 }
403 
404 /* Called by HAL when CMAC requests host to put more data in rng buffer */
cmac_rng_req(void)405 void cmac_rng_req(void)
406 {
407 	k_sem_give(&rng_sem);
408 }
409 
rng_thread(void * p1,void * p2,void * p3)410 static void rng_thread(void *p1, void *p2, void *p3)
411 {
412 	uint32_t word;
413 
414 	ARG_UNUSED(p1);
415 	ARG_UNUSED(p2);
416 	ARG_UNUSED(p3);
417 
418 	while (1) {
419 		k_sem_take(&rng_sem, K_FOREVER);
420 
421 		while (cmac_rand_needs_data()) {
422 			word = sys_rand32_get();
423 			cmac_rand_fill(&word, 1);
424 		}
425 
426 		cmac_signal();
427 	}
428 }
429 
bt_da1469x_open(const struct device * dev,bt_hci_recv_t recv)430 static int bt_da1469x_open(const struct device *dev, bt_hci_recv_t recv)
431 {
432 	struct hci_data *hci = dev->data;
433 	k_tid_t tid;
434 
435 	tid = k_thread_create(&rx_thread_data, rx_thread_stack,
436 			      K_KERNEL_STACK_SIZEOF(rx_thread_stack),
437 			      rx_thread, (void *)dev, NULL, NULL,
438 			      K_PRIO_COOP(CONFIG_BT_RX_PRIO),
439 			      0, K_NO_WAIT);
440 	k_thread_name_set(tid, "bt_rx_thread");
441 
442 	k_sem_init(&rng_sem, 0, 1);
443 
444 	tid = k_thread_create(&rng_thread_data, rng_thread_stack,
445 			      K_KERNEL_STACK_SIZEOF(rng_thread_stack),
446 			      rng_thread, NULL, NULL, NULL,
447 			      K_PRIO_COOP(CONFIG_BT_RX_PRIO),
448 			      0, K_NO_WAIT);
449 	k_thread_name_set(tid, "bt_rng_thread");
450 
451 	hci->recv = recv;
452 
453 	cmac_enable();
454 	irq_enable(CMAC2SYS_IRQn);
455 
456 	return 0;
457 }
458 
459 #ifdef CONFIG_BT_HCI_HOST
bt_da1469x_close(const struct device * dev)460 static int bt_da1469x_close(const struct device *dev)
461 {
462 	struct hci_data *hci = dev->data;
463 
464 	irq_disable(CMAC2SYS_IRQn);
465 	cmac_disable();
466 
467 	hci->recv = NULL;
468 
469 	return 0;
470 }
471 #endif /* CONFIG_BT_HCI_HOST */
472 
bt_da1469x_send(const struct device * dev,struct net_buf * buf)473 static int bt_da1469x_send(const struct device *dev, struct net_buf *buf)
474 {
475 	ARG_UNUSED(dev);
476 
477 	switch (bt_buf_get_type(buf)) {
478 	case BT_BUF_ACL_OUT:
479 		LOG_DBG("ACL: buf %p type %u len %u", buf, bt_buf_get_type(buf), buf->len);
480 		net_buf_push_u8(buf, BT_HCI_H4_ACL);
481 		break;
482 	case BT_BUF_CMD:
483 		LOG_DBG("CMD: buf %p type %u len %u", buf, bt_buf_get_type(buf), buf->len);
484 		net_buf_push_u8(buf, BT_HCI_H4_CMD);
485 		break;
486 	default:
487 		LOG_ERR("Unsupported type");
488 		return -EINVAL;
489 	}
490 
491 	cmac_mbox_write(buf->data, buf->len);
492 
493 	net_buf_unref(buf);
494 
495 	return 0;
496 }
497 
498 static DEVICE_API(bt_hci, drv) = {
499 	.open           = bt_da1469x_open,
500 	.close          = bt_da1469x_close,
501 	.send           = bt_da1469x_send,
502 };
503 
bt_da1469x_init(const struct device * dev)504 static int bt_da1469x_init(const struct device *dev)
505 {
506 	irq_disable(CMAC2SYS_IRQn);
507 
508 	cmac_disable();
509 	cmac_load_image();
510 	cmac_configure_pdc();
511 	cmac_configure_shm();
512 
513 	IRQ_CONNECT(CMAC2SYS_IRQn, 0, cmac_cmac2sys_isr, NULL, 0);
514 
515 	return 0;
516 }
517 
518 #define HCI_DEVICE_INIT(inst) \
519 	static struct hci_data hci_data_##inst = { \
520 	}; \
521 	DEVICE_DT_INST_DEFINE(inst, bt_da1469x_init, NULL, &hci_data_##inst, NULL, \
522 			      POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &drv)
523 
524 /* Only one instance supported right now */
525 HCI_DEVICE_INIT(0)
526