1 /*
2 * Copyright (c) 2020 PHYTEC Messtechnik GmbH
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/logging/log.h>
8 LOG_MODULE_REGISTER(dw1000, LOG_LEVEL_INF);
9
10 #include <errno.h>
11 #include <zephyr/kernel.h>
12 #include <zephyr/arch/cpu.h>
13 #include <zephyr/debug/stack.h>
14 #include <zephyr/device.h>
15 #include <zephyr/init.h>
16 #include <zephyr/net/net_if.h>
17 #include <zephyr/net/net_pkt.h>
18
19 #include <zephyr/sys/byteorder.h>
20 #include <string.h>
21 #include <zephyr/random/random.h>
22 #include <zephyr/debug/stack.h>
23 #include <math.h>
24
25 #include <zephyr/drivers/gpio.h>
26 #include <zephyr/drivers/spi.h>
27
28 #include <zephyr/net/ieee802154_radio.h>
29 #include "ieee802154_dw1000_regs.h"
30
31 #define DT_DRV_COMPAT decawave_dw1000
32
33 #define DWT_FCS_LENGTH 2U
34 #define DWT_SPI_CSWAKEUP_FREQ 500000U
35 #define DWT_SPI_SLOW_FREQ 2000000U
36 #define DWT_SPI_TRANS_MAX_HDR_LEN 3
37 #define DWT_SPI_TRANS_REG_MAX_RANGE 0x3F
38 #define DWT_SPI_TRANS_SHORT_MAX_OFFSET 0x7F
39 #define DWT_SPI_TRANS_WRITE_OP BIT(7)
40 #define DWT_SPI_TRANS_SUB_ADDR BIT(6)
41 #define DWT_SPI_TRANS_EXTEND_ADDR BIT(7)
42
43 #define DWT_TS_TIME_UNITS_FS 15650U /* DWT_TIME_UNITS in fs */
44
45 #define DW1000_TX_ANT_DLY 16450
46 #define DW1000_RX_ANT_DLY 16450
47
48 /* SHR Symbol Duration in ns */
49 #define UWB_PHY_TPSYM_PRF64 IEEE802154_PHY_HRP_UWB_PRF64_TPSYM_SYMBOL_PERIOD_NS
50 #define UWB_PHY_TPSYM_PRF16 IEEE802154_PHY_HRP_UWB_PRF16_TPSYM_SYMBOL_PERIOD_NS
51
52 #define UWB_PHY_NUMOF_SYM_SHR_SFD 8
53
54 /* PHR Symbol Duration Tdsym in ns */
55 #define UWB_PHY_TDSYM_PHR_110K 8205.13
56 #define UWB_PHY_TDSYM_PHR_850K 1025.64
57 #define UWB_PHY_TDSYM_PHR_6M8 1025.64
58
59 #define UWB_PHY_NUMOF_SYM_PHR 18
60
61 /* Data Symbol Duration Tdsym in ns */
62 #define UWB_PHY_TDSYM_DATA_110K 8205.13
63 #define UWB_PHY_TDSYM_DATA_850K 1025.64
64 #define UWB_PHY_TDSYM_DATA_6M8 128.21
65
66 #define DWT_WORK_QUEUE_STACK_SIZE 512
67
68 static struct k_work_q dwt_work_queue;
69 static K_KERNEL_STACK_DEFINE(dwt_work_queue_stack,
70 DWT_WORK_QUEUE_STACK_SIZE);
71
72 struct dwt_phy_config {
73 uint8_t channel; /* Channel 1, 2, 3, 4, 5, 7 */
74 uint8_t dr; /* Data rate DWT_BR_110K, DWT_BR_850K, DWT_BR_6M8 */
75 uint8_t prf; /* PRF DWT_PRF_16M or DWT_PRF_64M */
76
77 uint8_t rx_pac_l; /* DWT_PAC8..DWT_PAC64 */
78 uint8_t rx_shr_code; /* RX SHR preamble code */
79 uint8_t rx_ns_sfd; /* non-standard SFD */
80 uint16_t rx_sfd_to; /* SFD timeout value (in symbols)
81 * (tx_shr_nsync + 1 + SFD_length - rx_pac_l)
82 */
83
84 uint8_t tx_shr_code; /* TX SHR preamble code */
85 uint32_t tx_shr_nsync; /* PLEN index, e.g. DWT_PLEN_64 */
86
87 float t_shr;
88 float t_phr;
89 float t_dsym;
90 };
91
92 struct dwt_hi_cfg {
93 struct spi_dt_spec bus;
94 struct gpio_dt_spec irq_gpio;
95 struct gpio_dt_spec rst_gpio;
96 };
97
98 #define DWT_STATE_TX 0
99 #define DWT_STATE_CCA 1
100 #define DWT_STATE_RX_DEF_ON 2
101
102 struct dwt_context {
103 const struct device *dev;
104 struct net_if *iface;
105 const struct spi_config *spi_cfg;
106 struct spi_config spi_cfg_slow;
107 struct gpio_callback gpio_cb;
108 struct k_sem dev_lock;
109 struct k_sem phy_sem;
110 struct k_work irq_cb_work;
111 struct k_thread thread;
112 struct dwt_phy_config rf_cfg;
113 atomic_t state;
114 bool cca_busy;
115 uint16_t sleep_mode;
116 uint8_t mac_addr[8];
117 };
118
119 static const struct dwt_hi_cfg dw1000_0_config = {
120 .bus = SPI_DT_SPEC_INST_GET(0, SPI_WORD_SET(8), 0),
121 .irq_gpio = GPIO_DT_SPEC_INST_GET(0, int_gpios),
122 .rst_gpio = GPIO_DT_SPEC_INST_GET(0, reset_gpios),
123 };
124
125 static struct dwt_context dwt_0_context = {
126 .dev_lock = Z_SEM_INITIALIZER(dwt_0_context.dev_lock, 1, 1),
127 .phy_sem = Z_SEM_INITIALIZER(dwt_0_context.phy_sem, 0, 1),
128 .rf_cfg = {
129 .channel = 5,
130 .dr = DWT_BR_6M8,
131 .prf = DWT_PRF_64M,
132
133 .rx_pac_l = DWT_PAC8,
134 .rx_shr_code = 10,
135 .rx_ns_sfd = 0,
136 .rx_sfd_to = (129 + 8 - 8),
137
138 .tx_shr_code = 10,
139 .tx_shr_nsync = DWT_PLEN_128,
140 },
141 };
142
143 /* This struct is used to read all additional RX frame info at one push */
144 struct dwt_rx_info_regs {
145 uint8_t rx_fqual[DWT_RX_FQUAL_LEN];
146 uint8_t rx_ttcki[DWT_RX_TTCKI_LEN];
147 uint8_t rx_ttcko[DWT_RX_TTCKO_LEN];
148 /* RX_TIME without RX_RAWST */
149 uint8_t rx_time[DWT_RX_TIME_FP_RAWST_OFFSET];
150 } _packed;
151
152 static int dwt_configure_rf_phy(const struct device *dev);
153
dwt_spi_read(const struct device * dev,uint16_t hdr_len,const uint8_t * hdr_buf,uint32_t data_len,uint8_t * data)154 static int dwt_spi_read(const struct device *dev,
155 uint16_t hdr_len, const uint8_t *hdr_buf,
156 uint32_t data_len, uint8_t *data)
157 {
158 struct dwt_context *ctx = dev->data;
159 const struct dwt_hi_cfg *hi_cfg = dev->config;
160 const struct spi_buf tx_buf = {
161 .buf = (uint8_t *)hdr_buf,
162 .len = hdr_len
163 };
164 const struct spi_buf_set tx = {
165 .buffers = &tx_buf,
166 .count = 1
167 };
168 struct spi_buf rx_buf[2] = {
169 {
170 .buf = NULL,
171 .len = hdr_len,
172 },
173 {
174 .buf = (uint8_t *)data,
175 .len = data_len,
176 },
177 };
178 const struct spi_buf_set rx = {
179 .buffers = rx_buf,
180 .count = 2
181 };
182
183 LOG_DBG("spi read, header length %u, data length %u",
184 (uint16_t)hdr_len, (uint32_t)data_len);
185 LOG_HEXDUMP_DBG(hdr_buf, (uint16_t)hdr_len, "rd: header");
186
187 if (spi_transceive(hi_cfg->bus.bus, ctx->spi_cfg, &tx, &rx)) {
188 LOG_ERR("SPI transfer failed");
189 return -EIO;
190 }
191
192 LOG_HEXDUMP_DBG(data, (uint32_t)data_len, "rd: data");
193
194 return 0;
195 }
196
197
dwt_spi_write(const struct device * dev,uint16_t hdr_len,const uint8_t * hdr_buf,uint32_t data_len,const uint8_t * data)198 static int dwt_spi_write(const struct device *dev,
199 uint16_t hdr_len, const uint8_t *hdr_buf,
200 uint32_t data_len, const uint8_t *data)
201 {
202 struct dwt_context *ctx = dev->data;
203 const struct dwt_hi_cfg *hi_cfg = dev->config;
204 struct spi_buf buf[2] = {
205 {.buf = (uint8_t *)hdr_buf, .len = hdr_len},
206 {.buf = (uint8_t *)data, .len = data_len}
207 };
208 struct spi_buf_set buf_set = {.buffers = buf, .count = 2};
209
210 LOG_DBG("spi write, header length %u, data length %u",
211 (uint16_t)hdr_len, (uint32_t)data_len);
212 LOG_HEXDUMP_DBG(hdr_buf, (uint16_t)hdr_len, "wr: header");
213 LOG_HEXDUMP_DBG(data, (uint32_t)data_len, "wr: data");
214
215 if (spi_write(hi_cfg->bus.bus, ctx->spi_cfg, &buf_set)) {
216 LOG_ERR("SPI read failed");
217 return -EIO;
218 }
219
220 return 0;
221 }
222
223 /* See 2.2.1.2 Transaction formats of the SPI interface */
dwt_spi_transfer(const struct device * dev,uint8_t reg,uint16_t offset,size_t buf_len,uint8_t * buf,bool write)224 static int dwt_spi_transfer(const struct device *dev,
225 uint8_t reg, uint16_t offset,
226 size_t buf_len, uint8_t *buf, bool write)
227 {
228 uint8_t hdr[DWT_SPI_TRANS_MAX_HDR_LEN] = {0};
229 size_t hdr_len = 0;
230
231 hdr[0] = reg & DWT_SPI_TRANS_REG_MAX_RANGE;
232 hdr_len += 1;
233
234 if (offset != 0) {
235 hdr[0] |= DWT_SPI_TRANS_SUB_ADDR;
236 hdr[1] = (uint8_t)offset & DWT_SPI_TRANS_SHORT_MAX_OFFSET;
237 hdr_len += 1;
238
239 if (offset > DWT_SPI_TRANS_SHORT_MAX_OFFSET) {
240 hdr[1] |= DWT_SPI_TRANS_EXTEND_ADDR;
241 hdr[2] = (uint8_t)(offset >> 7);
242 hdr_len += 1;
243 }
244
245 }
246
247 if (write) {
248 hdr[0] |= DWT_SPI_TRANS_WRITE_OP;
249 return dwt_spi_write(dev, hdr_len, hdr, buf_len, buf);
250 } else {
251 return dwt_spi_read(dev, hdr_len, hdr, buf_len, buf);
252 }
253 }
254
dwt_register_read(const struct device * dev,uint8_t reg,uint16_t offset,size_t buf_len,uint8_t * buf)255 static int dwt_register_read(const struct device *dev,
256 uint8_t reg, uint16_t offset, size_t buf_len, uint8_t *buf)
257 {
258 return dwt_spi_transfer(dev, reg, offset, buf_len, buf, false);
259 }
260
dwt_register_write(const struct device * dev,uint8_t reg,uint16_t offset,size_t buf_len,uint8_t * buf)261 static int dwt_register_write(const struct device *dev,
262 uint8_t reg, uint16_t offset, size_t buf_len, uint8_t *buf)
263 {
264 return dwt_spi_transfer(dev, reg, offset, buf_len, buf, true);
265 }
266
dwt_reg_read_u32(const struct device * dev,uint8_t reg,uint16_t offset)267 static inline uint32_t dwt_reg_read_u32(const struct device *dev,
268 uint8_t reg, uint16_t offset)
269 {
270 uint8_t buf[sizeof(uint32_t)];
271
272 dwt_spi_transfer(dev, reg, offset, sizeof(buf), buf, false);
273
274 return sys_get_le32(buf);
275 }
276
dwt_reg_read_u16(const struct device * dev,uint8_t reg,uint16_t offset)277 static inline uint16_t dwt_reg_read_u16(const struct device *dev,
278 uint8_t reg, uint16_t offset)
279 {
280 uint8_t buf[sizeof(uint16_t)];
281
282 dwt_spi_transfer(dev, reg, offset, sizeof(buf), buf, false);
283
284 return sys_get_le16(buf);
285 }
286
dwt_reg_read_u8(const struct device * dev,uint8_t reg,uint16_t offset)287 static inline uint8_t dwt_reg_read_u8(const struct device *dev,
288 uint8_t reg, uint16_t offset)
289 {
290 uint8_t buf;
291
292 dwt_spi_transfer(dev, reg, offset, sizeof(buf), &buf, false);
293
294 return buf;
295 }
296
dwt_reg_write_u32(const struct device * dev,uint8_t reg,uint16_t offset,uint32_t val)297 static inline void dwt_reg_write_u32(const struct device *dev,
298 uint8_t reg, uint16_t offset, uint32_t val)
299 {
300 uint8_t buf[sizeof(uint32_t)];
301
302 sys_put_le32(val, buf);
303 dwt_spi_transfer(dev, reg, offset, sizeof(buf), buf, true);
304 }
305
dwt_reg_write_u16(const struct device * dev,uint8_t reg,uint16_t offset,uint16_t val)306 static inline void dwt_reg_write_u16(const struct device *dev,
307 uint8_t reg, uint16_t offset, uint16_t val)
308 {
309 uint8_t buf[sizeof(uint16_t)];
310
311 sys_put_le16(val, buf);
312 dwt_spi_transfer(dev, reg, offset, sizeof(buf), buf, true);
313 }
314
dwt_reg_write_u8(const struct device * dev,uint8_t reg,uint16_t offset,uint8_t val)315 static inline void dwt_reg_write_u8(const struct device *dev,
316 uint8_t reg, uint16_t offset, uint8_t val)
317 {
318 dwt_spi_transfer(dev, reg, offset, sizeof(uint8_t), &val, true);
319 }
320
dwt_setup_int(const struct device * dev,bool enable)321 static ALWAYS_INLINE void dwt_setup_int(const struct device *dev,
322 bool enable)
323 {
324 const struct dwt_hi_cfg *hi_cfg = dev->config;
325
326 unsigned int flags = enable
327 ? GPIO_INT_EDGE_TO_ACTIVE
328 : GPIO_INT_DISABLE;
329
330 gpio_pin_interrupt_configure_dt(&hi_cfg->irq_gpio, flags);
331 }
332
dwt_reset_rfrx(const struct device * dev)333 static void dwt_reset_rfrx(const struct device *dev)
334 {
335 /*
336 * Apply a receiver-only soft reset,
337 * see SOFTRESET field description in DW1000 User Manual.
338 */
339 dwt_reg_write_u8(dev, DWT_PMSC_ID, DWT_PMSC_CTRL0_SOFTRESET_OFFSET,
340 DWT_PMSC_CTRL0_RESET_RX);
341 dwt_reg_write_u8(dev, DWT_PMSC_ID, DWT_PMSC_CTRL0_SOFTRESET_OFFSET,
342 DWT_PMSC_CTRL0_RESET_CLEAR);
343 }
344
dwt_disable_txrx(const struct device * dev)345 static void dwt_disable_txrx(const struct device *dev)
346 {
347 dwt_setup_int(dev, false);
348
349 dwt_reg_write_u8(dev, DWT_SYS_CTRL_ID, DWT_SYS_CTRL_OFFSET,
350 DWT_SYS_CTRL_TRXOFF);
351
352 dwt_reg_write_u32(dev, DWT_SYS_STATUS_ID, DWT_SYS_STATUS_OFFSET,
353 (DWT_SYS_STATUS_ALL_RX_GOOD |
354 DWT_SYS_STATUS_ALL_RX_TO |
355 DWT_SYS_STATUS_ALL_RX_ERR |
356 DWT_SYS_STATUS_ALL_TX));
357
358 dwt_setup_int(dev, true);
359 }
360
361 /* timeout time in units of 1.026 microseconds */
dwt_enable_rx(const struct device * dev,uint16_t timeout)362 static int dwt_enable_rx(const struct device *dev, uint16_t timeout)
363 {
364 uint32_t sys_cfg;
365 uint16_t sys_ctrl = DWT_SYS_CTRL_RXENAB;
366
367 sys_cfg = dwt_reg_read_u32(dev, DWT_SYS_CFG_ID, 0);
368
369 if (timeout != 0) {
370 dwt_reg_write_u16(dev, DWT_RX_FWTO_ID, DWT_RX_FWTO_OFFSET,
371 timeout);
372 sys_cfg |= DWT_SYS_CFG_RXWTOE;
373 } else {
374 sys_cfg &= ~DWT_SYS_CFG_RXWTOE;
375 }
376
377 dwt_reg_write_u32(dev, DWT_SYS_CFG_ID, 0, sys_cfg);
378 dwt_reg_write_u16(dev, DWT_SYS_CTRL_ID, DWT_SYS_CTRL_OFFSET, sys_ctrl);
379
380 return 0;
381 }
382
dwt_irq_handle_rx_cca(const struct device * dev)383 static inline void dwt_irq_handle_rx_cca(const struct device *dev)
384 {
385 struct dwt_context *ctx = dev->data;
386
387 k_sem_give(&ctx->phy_sem);
388 ctx->cca_busy = true;
389
390 /* Clear all RX event bits */
391 dwt_reg_write_u32(dev, DWT_SYS_STATUS_ID, 0,
392 DWT_SYS_STATUS_ALL_RX_GOOD);
393 }
394
dwt_irq_handle_rx(const struct device * dev,uint32_t sys_stat)395 static inline void dwt_irq_handle_rx(const struct device *dev, uint32_t sys_stat)
396 {
397 struct dwt_context *ctx = dev->data;
398 struct net_pkt *pkt = NULL;
399 struct dwt_rx_info_regs rx_inf_reg;
400 float a_const;
401 uint32_t rx_finfo;
402 uint32_t ttcki;
403 uint32_t rx_pacc;
404 uint32_t cir_pwr;
405 uint32_t flags_to_clear;
406 int32_t ttcko;
407 uint16_t pkt_len;
408 uint8_t *fctrl;
409 int8_t rx_level = INT8_MIN;
410
411 LOG_DBG("RX OK event, SYS_STATUS 0x%08x", sys_stat);
412 flags_to_clear = sys_stat & DWT_SYS_STATUS_ALL_RX_GOOD;
413
414 rx_finfo = dwt_reg_read_u32(dev, DWT_RX_FINFO_ID, DWT_RX_FINFO_OFFSET);
415 pkt_len = rx_finfo & DWT_RX_FINFO_RXFLEN_MASK;
416 rx_pacc = (rx_finfo & DWT_RX_FINFO_RXPACC_MASK) >>
417 DWT_RX_FINFO_RXPACC_SHIFT;
418
419 if (!(IS_ENABLED(CONFIG_IEEE802154_RAW_MODE))) {
420 pkt_len -= DWT_FCS_LENGTH;
421 }
422
423 pkt = net_pkt_rx_alloc_with_buffer(ctx->iface, pkt_len,
424 AF_UNSPEC, 0, K_NO_WAIT);
425 if (!pkt) {
426 LOG_ERR("No buf available");
427 goto rx_out_enable_rx;
428 }
429
430 dwt_register_read(dev, DWT_RX_BUFFER_ID, 0, pkt_len, pkt->buffer->data);
431 dwt_register_read(dev, DWT_RX_FQUAL_ID, 0, sizeof(rx_inf_reg),
432 (uint8_t *)&rx_inf_reg);
433 net_buf_add(pkt->buffer, pkt_len);
434 fctrl = pkt->buffer->data;
435
436 /*
437 * Get Ranging tracking offset and tracking interval
438 * for Crystal characterization
439 */
440 ttcki = sys_get_le32(rx_inf_reg.rx_ttcki);
441 ttcko = sys_get_le32(rx_inf_reg.rx_ttcko) & DWT_RX_TTCKO_RXTOFS_MASK;
442 /* Tracking offset value is a 19-bit signed integer */
443 if (ttcko & BIT(18)) {
444 ttcko |= ~DWT_RX_TTCKO_RXTOFS_MASK;
445 }
446
447 /* TODO add:
448 * net_pkt_set_ieee802154_tcki(pkt, ttcki);
449 * net_pkt_set_ieee802154_tcko(pkt, ttcko);
450 */
451 LOG_DBG("ttcko %d ttcki: 0x%08x", ttcko, ttcki);
452
453 if (IS_ENABLED(CONFIG_NET_PKT_TIMESTAMP)) {
454 uint8_t ts_buf[sizeof(uint64_t)] = {0};
455 uint64_t ts_nsec;
456
457 memcpy(ts_buf, rx_inf_reg.rx_time, DWT_RX_TIME_RX_STAMP_LEN);
458 ts_nsec = (sys_get_le64(ts_buf) * DWT_TS_TIME_UNITS_FS) / 1000000U;
459 net_pkt_set_timestamp_ns(pkt, ts_nsec);
460 }
461
462 /* See 4.7.2 Estimating the receive signal power */
463 cir_pwr = sys_get_le16(&rx_inf_reg.rx_fqual[6]);
464 if (ctx->rf_cfg.prf == DWT_PRF_16M) {
465 a_const = DWT_RX_SIG_PWR_A_CONST_PRF16;
466 } else {
467 a_const = DWT_RX_SIG_PWR_A_CONST_PRF64;
468 }
469
470 if (rx_pacc != 0) {
471 #if defined(CONFIG_NEWLIB_LIBC)
472 /* From 4.7.2 Estimating the receive signal power */
473 rx_level = 10.0 * log10f(cir_pwr * BIT(17) /
474 (rx_pacc * rx_pacc)) - a_const;
475 #endif
476 }
477
478 net_pkt_set_ieee802154_rssi_dbm(pkt, rx_level);
479
480 /*
481 * Workaround for AAT status bit issue,
482 * From 5.3.5 Host Notification in DW1000 User Manual:
483 * "Note: there is a situation that can result in the AAT bit being set
484 * for the current frame as a result of a previous frame that was
485 * received and rejected due to frame filtering."
486 */
487 if ((sys_stat & DWT_SYS_STATUS_AAT) && ((fctrl[0] & 0x20) == 0)) {
488 flags_to_clear |= DWT_SYS_STATUS_AAT;
489 }
490
491 if (ieee802154_handle_ack(ctx->iface, pkt) == NET_OK) {
492 LOG_INF("ACK packet handled");
493 goto rx_out_unref_pkt;
494 }
495
496 /* LQI not implemented */
497 LOG_DBG("Caught a packet (%u) (RSSI: %d)", pkt_len, rx_level);
498 LOG_HEXDUMP_DBG(pkt->buffer->data, pkt_len, "RX buffer:");
499
500 if (net_recv_data(ctx->iface, pkt) == NET_OK) {
501 goto rx_out_enable_rx;
502 } else {
503 LOG_DBG("Packet dropped by NET stack");
504 }
505
506 rx_out_unref_pkt:
507 if (pkt) {
508 net_pkt_unref(pkt);
509 }
510
511 rx_out_enable_rx:
512 dwt_reg_write_u32(dev, DWT_SYS_STATUS_ID, 0, flags_to_clear);
513 LOG_DBG("Cleared SYS_STATUS flags 0x%08x", flags_to_clear);
514 if (atomic_test_bit(&ctx->state, DWT_STATE_RX_DEF_ON)) {
515 /*
516 * Re-enable reception but in contrast to dwt_enable_rx()
517 * without to read SYS_STATUS and set delayed option.
518 */
519 dwt_reg_write_u16(dev, DWT_SYS_CTRL_ID, DWT_SYS_CTRL_OFFSET,
520 DWT_SYS_CTRL_RXENAB);
521 }
522 }
523
dwt_irq_handle_tx(const struct device * dev,uint32_t sys_stat)524 static void dwt_irq_handle_tx(const struct device *dev, uint32_t sys_stat)
525 {
526 struct dwt_context *ctx = dev->data;
527
528 /* Clear TX event bits */
529 dwt_reg_write_u32(dev, DWT_SYS_STATUS_ID, 0,
530 DWT_SYS_STATUS_ALL_TX);
531
532 LOG_DBG("TX confirmed event");
533 k_sem_give(&ctx->phy_sem);
534 }
535
dwt_irq_handle_rxto(const struct device * dev,uint32_t sys_stat)536 static void dwt_irq_handle_rxto(const struct device *dev, uint32_t sys_stat)
537 {
538 struct dwt_context *ctx = dev->data;
539
540 /* Clear RX timeout event bits */
541 dwt_reg_write_u32(dev, DWT_SYS_STATUS_ID, 0,
542 DWT_SYS_STATUS_RXRFTO);
543
544 dwt_disable_txrx(dev);
545 /* Receiver reset necessary, see 4.1.6 RX Message timestamp */
546 dwt_reset_rfrx(dev);
547
548 LOG_DBG("RX timeout event");
549
550 if (atomic_test_bit(&ctx->state, DWT_STATE_CCA)) {
551 k_sem_give(&ctx->phy_sem);
552 ctx->cca_busy = false;
553 }
554 }
555
dwt_irq_handle_error(const struct device * dev,uint32_t sys_stat)556 static void dwt_irq_handle_error(const struct device *dev, uint32_t sys_stat)
557 {
558 struct dwt_context *ctx = dev->data;
559
560 /* Clear RX error event bits */
561 dwt_reg_write_u32(dev, DWT_SYS_STATUS_ID, 0, DWT_SYS_STATUS_ALL_RX_ERR);
562
563 dwt_disable_txrx(dev);
564 /* Receiver reset necessary, see 4.1.6 RX Message timestamp */
565 dwt_reset_rfrx(dev);
566
567 LOG_INF("RX error event");
568 if (atomic_test_bit(&ctx->state, DWT_STATE_CCA)) {
569 k_sem_give(&ctx->phy_sem);
570 ctx->cca_busy = true;
571 return;
572 }
573
574 if (atomic_test_bit(&ctx->state, DWT_STATE_RX_DEF_ON)) {
575 dwt_enable_rx(dev, 0);
576 }
577 }
578
dwt_irq_work_handler(struct k_work * item)579 static void dwt_irq_work_handler(struct k_work *item)
580 {
581 struct dwt_context *ctx = CONTAINER_OF(item, struct dwt_context,
582 irq_cb_work);
583 const struct device *dev = ctx->dev;
584 uint32_t sys_stat;
585
586 k_sem_take(&ctx->dev_lock, K_FOREVER);
587
588 sys_stat = dwt_reg_read_u32(dev, DWT_SYS_STATUS_ID, 0);
589
590 if (sys_stat & DWT_SYS_STATUS_RXFCG) {
591 if (atomic_test_bit(&ctx->state, DWT_STATE_CCA)) {
592 dwt_irq_handle_rx_cca(dev);
593 } else {
594 dwt_irq_handle_rx(dev, sys_stat);
595 }
596 }
597
598 if (sys_stat & DWT_SYS_STATUS_TXFRS) {
599 dwt_irq_handle_tx(dev, sys_stat);
600 }
601
602 if (sys_stat & DWT_SYS_STATUS_ALL_RX_TO) {
603 dwt_irq_handle_rxto(dev, sys_stat);
604 }
605
606 if (sys_stat & DWT_SYS_STATUS_ALL_RX_ERR) {
607 dwt_irq_handle_error(dev, sys_stat);
608 }
609
610 k_sem_give(&ctx->dev_lock);
611 }
612
dwt_gpio_callback(const struct device * dev,struct gpio_callback * cb,uint32_t pins)613 static void dwt_gpio_callback(const struct device *dev,
614 struct gpio_callback *cb, uint32_t pins)
615 {
616 struct dwt_context *ctx = CONTAINER_OF(cb, struct dwt_context, gpio_cb);
617
618 LOG_DBG("IRQ callback triggered %p", ctx);
619 k_work_submit_to_queue(&dwt_work_queue, &ctx->irq_cb_work);
620 }
621
dwt_get_capabilities(const struct device * dev)622 static enum ieee802154_hw_caps dwt_get_capabilities(const struct device *dev)
623 {
624 /* TODO: Implement HW-supported AUTOACK + frame pending bit handling. */
625 return IEEE802154_HW_FCS | IEEE802154_HW_FILTER |
626 IEEE802154_HW_TXTIME;
627 }
628
dwt_get_pkt_duration_ns(struct dwt_context * ctx,uint8_t psdu_len)629 static uint32_t dwt_get_pkt_duration_ns(struct dwt_context *ctx, uint8_t psdu_len)
630 {
631 struct dwt_phy_config *rf_cfg = &ctx->rf_cfg;
632 float t_psdu = rf_cfg->t_dsym * psdu_len * 8;
633
634 return (rf_cfg->t_shr + rf_cfg->t_phr + t_psdu);
635 }
636
dwt_cca(const struct device * dev)637 static int dwt_cca(const struct device *dev)
638 {
639 struct dwt_context *ctx = dev->data;
640 uint32_t cca_dur = (dwt_get_pkt_duration_ns(ctx, 127) +
641 dwt_get_pkt_duration_ns(ctx, 5)) /
642 UWB_PHY_TDSYM_PHR_6M8;
643
644 if (atomic_test_and_set_bit(&ctx->state, DWT_STATE_CCA)) {
645 LOG_ERR("Transceiver busy");
646 return -EBUSY;
647 }
648
649 /* Perform CCA Mode 5 */
650 k_sem_take(&ctx->dev_lock, K_FOREVER);
651 dwt_disable_txrx(dev);
652 LOG_DBG("CCA duration %u us", cca_dur);
653
654 dwt_enable_rx(dev, cca_dur);
655 k_sem_give(&ctx->dev_lock);
656
657 k_sem_take(&ctx->phy_sem, K_FOREVER);
658 LOG_DBG("CCA finished %p", ctx);
659
660 atomic_clear_bit(&ctx->state, DWT_STATE_CCA);
661 if (atomic_test_bit(&ctx->state, DWT_STATE_RX_DEF_ON)) {
662 k_sem_take(&ctx->dev_lock, K_FOREVER);
663 dwt_enable_rx(dev, 0);
664 k_sem_give(&ctx->dev_lock);
665 }
666
667 return ctx->cca_busy ? -EBUSY : 0;
668 }
669
dwt_ed(const struct device * dev,uint16_t duration,energy_scan_done_cb_t done_cb)670 static int dwt_ed(const struct device *dev, uint16_t duration,
671 energy_scan_done_cb_t done_cb)
672 {
673 /* TODO: see description Sub-Register 0x23:02 – AGC_CTRL1 */
674
675 return -ENOTSUP;
676 }
677
dwt_set_channel(const struct device * dev,uint16_t channel)678 static int dwt_set_channel(const struct device *dev, uint16_t channel)
679 {
680 struct dwt_context *ctx = dev->data;
681 struct dwt_phy_config *rf_cfg = &ctx->rf_cfg;
682
683 if (channel > 15) {
684 return -EINVAL;
685 }
686
687 if (channel == 0 || channel == 6 || channel > 7) {
688 return -ENOTSUP;
689 }
690
691 rf_cfg->channel = channel;
692 LOG_INF("Set channel %u", channel);
693
694 k_sem_take(&ctx->dev_lock, K_FOREVER);
695
696 dwt_disable_txrx(dev);
697 dwt_configure_rf_phy(dev);
698
699 if (atomic_test_bit(&ctx->state, DWT_STATE_RX_DEF_ON)) {
700 dwt_enable_rx(dev, 0);
701 }
702
703 k_sem_give(&ctx->dev_lock);
704
705 return 0;
706 }
707
dwt_set_pan_id(const struct device * dev,uint16_t pan_id)708 static int dwt_set_pan_id(const struct device *dev, uint16_t pan_id)
709 {
710 struct dwt_context *ctx = dev->data;
711
712 k_sem_take(&ctx->dev_lock, K_FOREVER);
713 dwt_reg_write_u16(dev, DWT_PANADR_ID, DWT_PANADR_PAN_ID_OFFSET, pan_id);
714 k_sem_give(&ctx->dev_lock);
715
716 LOG_INF("Set PAN ID 0x%04x %p", pan_id, ctx);
717
718 return 0;
719 }
720
dwt_set_short_addr(const struct device * dev,uint16_t short_addr)721 static int dwt_set_short_addr(const struct device *dev, uint16_t short_addr)
722 {
723 struct dwt_context *ctx = dev->data;
724
725 k_sem_take(&ctx->dev_lock, K_FOREVER);
726 dwt_reg_write_u16(dev, DWT_PANADR_ID, DWT_PANADR_SHORT_ADDR_OFFSET,
727 short_addr);
728 k_sem_give(&ctx->dev_lock);
729
730 LOG_INF("Set short 0x%x %p", short_addr, ctx);
731
732 return 0;
733 }
734
dwt_set_ieee_addr(const struct device * dev,const uint8_t * ieee_addr)735 static int dwt_set_ieee_addr(const struct device *dev,
736 const uint8_t *ieee_addr)
737 {
738 struct dwt_context *ctx = dev->data;
739
740 LOG_INF("IEEE address %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
741 ieee_addr[7], ieee_addr[6], ieee_addr[5], ieee_addr[4],
742 ieee_addr[3], ieee_addr[2], ieee_addr[1], ieee_addr[0]);
743
744 k_sem_take(&ctx->dev_lock, K_FOREVER);
745 dwt_register_write(dev, DWT_EUI_64_ID, DWT_EUI_64_OFFSET,
746 DWT_EUI_64_LEN, (uint8_t *)ieee_addr);
747 k_sem_give(&ctx->dev_lock);
748
749 return 0;
750 }
751
dwt_filter(const struct device * dev,bool set,enum ieee802154_filter_type type,const struct ieee802154_filter * filter)752 static int dwt_filter(const struct device *dev,
753 bool set,
754 enum ieee802154_filter_type type,
755 const struct ieee802154_filter *filter)
756 {
757 if (!set) {
758 return -ENOTSUP;
759 }
760
761 if (type == IEEE802154_FILTER_TYPE_IEEE_ADDR) {
762 return dwt_set_ieee_addr(dev, filter->ieee_addr);
763 } else if (type == IEEE802154_FILTER_TYPE_SHORT_ADDR) {
764 return dwt_set_short_addr(dev, filter->short_addr);
765 } else if (type == IEEE802154_FILTER_TYPE_PAN_ID) {
766 return dwt_set_pan_id(dev, filter->pan_id);
767 }
768
769 return -ENOTSUP;
770 }
771
dwt_set_power(const struct device * dev,int16_t dbm)772 static int dwt_set_power(const struct device *dev, int16_t dbm)
773 {
774 struct dwt_context *ctx = dev->data;
775
776 LOG_INF("set_txpower not supported %p", ctx);
777
778 return 0;
779 }
780
dwt_tx(const struct device * dev,enum ieee802154_tx_mode tx_mode,struct net_pkt * pkt,struct net_buf * frag)781 static int dwt_tx(const struct device *dev, enum ieee802154_tx_mode tx_mode,
782 struct net_pkt *pkt, struct net_buf *frag)
783 {
784 struct dwt_context *ctx = dev->data;
785 size_t len = frag->len;
786 uint32_t tx_time = 0;
787 uint64_t tmp_fs;
788 uint32_t tx_fctrl;
789 uint8_t sys_ctrl = DWT_SYS_CTRL_TXSTRT;
790
791 if (atomic_test_and_set_bit(&ctx->state, DWT_STATE_TX)) {
792 LOG_ERR("Transceiver busy");
793 return -EBUSY;
794 }
795
796 k_sem_reset(&ctx->phy_sem);
797 k_sem_take(&ctx->dev_lock, K_FOREVER);
798
799 switch (tx_mode) {
800 case IEEE802154_TX_MODE_DIRECT:
801 break;
802 case IEEE802154_TX_MODE_TXTIME:
803 /*
804 * tx_time is the high 32-bit of the 40-bit system
805 * time value at which to send the message.
806 */
807 tmp_fs = net_pkt_timestamp_ns(pkt);
808 tmp_fs *= 1000U * 1000U;
809
810 tx_time = (tmp_fs / DWT_TS_TIME_UNITS_FS) >> 8;
811 sys_ctrl |= DWT_SYS_CTRL_TXDLYS;
812 /* DX_TIME is 40-bit register */
813 dwt_reg_write_u32(dev, DWT_DX_TIME_ID, 1, tx_time);
814
815 LOG_DBG("ntx hi32 %x", tx_time);
816 LOG_DBG("sys hi32 %x",
817 dwt_reg_read_u32(dev, DWT_SYS_TIME_ID, 1));
818 break;
819 default:
820 LOG_ERR("TX mode %d not supported", tx_mode);
821 goto error;
822 }
823
824 LOG_HEXDUMP_DBG(frag->data, len, "TX buffer:");
825
826 /*
827 * See "3 Message Transmission" in DW1000 User Manual for
828 * more details about transmission configuration.
829 */
830 if (dwt_register_write(dev, DWT_TX_BUFFER_ID, 0, len, frag->data)) {
831 LOG_ERR("Failed to write TX data");
832 goto error;
833 }
834
835 tx_fctrl = dwt_reg_read_u32(dev, DWT_TX_FCTRL_ID, 0);
836 /* Clear TX buffer index offset, frame length, and length extension */
837 tx_fctrl &= ~(DWT_TX_FCTRL_TFLEN_MASK | DWT_TX_FCTRL_TFLE_MASK |
838 DWT_TX_FCTRL_TXBOFFS_MASK);
839 /* Set frame length and ranging flag */
840 tx_fctrl |= (len + DWT_FCS_LENGTH) & DWT_TX_FCTRL_TFLEN_MASK;
841 tx_fctrl |= DWT_TX_FCTRL_TR;
842 /* Update Transmit Frame Control register */
843 dwt_reg_write_u32(dev, DWT_TX_FCTRL_ID, 0, tx_fctrl);
844
845 dwt_disable_txrx(dev);
846
847 /* Begin transmission */
848 dwt_reg_write_u8(dev, DWT_SYS_CTRL_ID, DWT_SYS_CTRL_OFFSET, sys_ctrl);
849
850 if (sys_ctrl & DWT_SYS_CTRL_TXDLYS) {
851 uint32_t sys_stat = dwt_reg_read_u32(dev, DWT_SYS_STATUS_ID, 0);
852
853 if (sys_stat & DWT_SYS_STATUS_HPDWARN) {
854 LOG_WRN("Half Period Delay Warning");
855 }
856 }
857
858 k_sem_give(&ctx->dev_lock);
859 /* Wait for the TX confirmed event */
860 k_sem_take(&ctx->phy_sem, K_FOREVER);
861
862 if (IS_ENABLED(CONFIG_NET_PKT_TIMESTAMP)) {
863 uint8_t ts_buf[sizeof(uint64_t)] = {0};
864
865 k_sem_take(&ctx->dev_lock, K_FOREVER);
866 dwt_register_read(dev, DWT_TX_TIME_ID,
867 DWT_TX_TIME_TX_STAMP_OFFSET,
868 DWT_TX_TIME_TX_STAMP_LEN,
869 ts_buf);
870 LOG_DBG("ts hi32 %x", (uint32_t)(sys_get_le64(ts_buf) >> 8));
871 LOG_DBG("sys hi32 %x",
872 dwt_reg_read_u32(dev, DWT_SYS_TIME_ID, 1));
873 k_sem_give(&ctx->dev_lock);
874
875 tmp_fs = sys_get_le64(ts_buf) * DWT_TS_TIME_UNITS_FS;
876 net_pkt_set_timestamp_ns(pkt, tmp_fs / 1000000U);
877 }
878
879 atomic_clear_bit(&ctx->state, DWT_STATE_TX);
880
881 if (atomic_test_bit(&ctx->state, DWT_STATE_RX_DEF_ON)) {
882 k_sem_take(&ctx->dev_lock, K_FOREVER);
883 dwt_enable_rx(dev, 0);
884 k_sem_give(&ctx->dev_lock);
885 }
886
887 return 0;
888
889 error:
890 atomic_clear_bit(&ctx->state, DWT_STATE_TX);
891 k_sem_give(&ctx->dev_lock);
892
893 return -EIO;
894 }
895
dwt_set_frame_filter(const struct device * dev,bool ff_enable,uint8_t ff_type)896 static void dwt_set_frame_filter(const struct device *dev,
897 bool ff_enable, uint8_t ff_type)
898 {
899 uint32_t sys_cfg_ff = ff_enable ? DWT_SYS_CFG_FFE : 0;
900
901 sys_cfg_ff |= ff_type & DWT_SYS_CFG_FF_ALL_EN;
902
903 dwt_reg_write_u8(dev, DWT_SYS_CFG_ID, 0, (uint8_t)sys_cfg_ff);
904 }
905
dwt_configure(const struct device * dev,enum ieee802154_config_type type,const struct ieee802154_config * config)906 static int dwt_configure(const struct device *dev,
907 enum ieee802154_config_type type,
908 const struct ieee802154_config *config)
909 {
910 struct dwt_context *ctx = dev->data;
911
912 LOG_DBG("API configure %p", ctx);
913
914 switch (type) {
915 case IEEE802154_CONFIG_AUTO_ACK_FPB:
916 LOG_DBG("IEEE802154_CONFIG_AUTO_ACK_FPB");
917 break;
918
919 case IEEE802154_CONFIG_ACK_FPB:
920 LOG_DBG("IEEE802154_CONFIG_ACK_FPB");
921 break;
922
923 case IEEE802154_CONFIG_PAN_COORDINATOR:
924 LOG_DBG("IEEE802154_CONFIG_PAN_COORDINATOR");
925 break;
926
927 case IEEE802154_CONFIG_PROMISCUOUS:
928 LOG_DBG("IEEE802154_CONFIG_PROMISCUOUS");
929 break;
930
931 case IEEE802154_CONFIG_EVENT_HANDLER:
932 LOG_DBG("IEEE802154_CONFIG_EVENT_HANDLER");
933 break;
934
935 default:
936 return -EINVAL;
937 }
938
939 return -ENOTSUP;
940 }
941
942 /* driver-allocated attribute memory - constant across all driver instances */
943 static const struct {
944 const struct ieee802154_phy_channel_range phy_channel_range[2];
945 const struct ieee802154_phy_supported_channels phy_supported_channels;
946 } drv_attr = {
947 .phy_channel_range = {
948 { .from_channel = 1, .to_channel = 5 },
949 { .from_channel = 7, .to_channel = 7 },
950 },
951 .phy_supported_channels = {
952 .ranges = drv_attr.phy_channel_range,
953 .num_ranges = 2U,
954 },
955 };
956
dwt_attr_get(const struct device * dev,enum ieee802154_attr attr,struct ieee802154_attr_value * value)957 static int dwt_attr_get(const struct device *dev, enum ieee802154_attr attr,
958 struct ieee802154_attr_value *value)
959 {
960 if (ieee802154_attr_get_channel_page_and_range(
961 attr, IEEE802154_ATTR_PHY_CHANNEL_PAGE_FOUR_HRP_UWB,
962 &drv_attr.phy_supported_channels, value) == 0) {
963 return 0;
964 }
965
966 switch (attr) {
967 case IEEE802154_ATTR_PHY_HRP_UWB_SUPPORTED_PRFS: {
968 struct dwt_context *ctx = dev->data;
969 struct dwt_phy_config *rf_cfg = &ctx->rf_cfg;
970
971 value->phy_hrp_uwb_supported_nominal_prfs =
972 rf_cfg->prf == DWT_PRF_64M ? IEEE802154_PHY_HRP_UWB_NOMINAL_64_M
973 : IEEE802154_PHY_HRP_UWB_NOMINAL_16_M;
974 return 0;
975 }
976
977 default:
978 return -ENOENT;
979 }
980 }
981
982 /*
983 * Note, the DW_RESET pin should not be driven high externally.
984 */
dwt_hw_reset(const struct device * dev)985 static int dwt_hw_reset(const struct device *dev)
986 {
987 const struct dwt_hi_cfg *hi_cfg = dev->config;
988
989 if (gpio_pin_configure_dt(&hi_cfg->rst_gpio, GPIO_OUTPUT_ACTIVE)) {
990 LOG_ERR("Failed to configure GPIO pin %u", hi_cfg->rst_gpio.pin);
991 return -EINVAL;
992 }
993
994 k_sleep(K_MSEC(1));
995 gpio_pin_set_dt(&hi_cfg->rst_gpio, 0);
996 k_sleep(K_MSEC(5));
997
998 if (gpio_pin_configure_dt(&hi_cfg->rst_gpio, GPIO_INPUT)) {
999 LOG_ERR("Failed to configure GPIO pin %u", hi_cfg->rst_gpio.pin);
1000 return -EINVAL;
1001 }
1002
1003 return 0;
1004 }
1005
1006 /*
1007 * SPI speed in INIT state or for wake-up sequence,
1008 * see 2.3.2 Overview of main operational states
1009 */
dwt_set_spi_slow(const struct device * dev,const uint32_t freq)1010 static void dwt_set_spi_slow(const struct device *dev, const uint32_t freq)
1011 {
1012 struct dwt_context *ctx = dev->data;
1013
1014 ctx->spi_cfg_slow.frequency = freq;
1015 ctx->spi_cfg = &ctx->spi_cfg_slow;
1016 }
1017
1018 /* SPI speed in IDLE, RX, and TX state */
dwt_set_spi_fast(const struct device * dev)1019 static void dwt_set_spi_fast(const struct device *dev)
1020 {
1021 const struct dwt_hi_cfg *hi_cfg = dev->config;
1022 struct dwt_context *ctx = dev->data;
1023
1024 ctx->spi_cfg = &hi_cfg->bus.config;
1025 }
1026
dwt_set_rx_mode(const struct device * dev)1027 static void dwt_set_rx_mode(const struct device *dev)
1028 {
1029 struct dwt_context *ctx = dev->data;
1030 struct dwt_phy_config *rf_cfg = &ctx->rf_cfg;
1031 uint32_t pmsc_ctrl0;
1032 uint32_t t_on_us;
1033 uint8_t rx_sniff[2];
1034
1035 /* SNIFF Mode ON time in units of PAC */
1036 rx_sniff[0] = CONFIG_IEEE802154_DW1000_SNIFF_ONT &
1037 DWT_RX_SNIFF_SNIFF_ONT_MASK;
1038 /* SNIFF Mode OFF time in microseconds */
1039 rx_sniff[1] = CONFIG_IEEE802154_DW1000_SNIFF_OFFT;
1040
1041 t_on_us = (rx_sniff[0] + 1) * (BIT(3) << rf_cfg->rx_pac_l);
1042 LOG_INF("RX duty cycle %u%%", t_on_us * 100 / (t_on_us + rx_sniff[1]));
1043
1044 dwt_register_write(dev, DWT_RX_SNIFF_ID, DWT_RX_SNIFF_OFFSET,
1045 sizeof(rx_sniff), rx_sniff);
1046
1047 pmsc_ctrl0 = dwt_reg_read_u32(dev, DWT_PMSC_ID, DWT_PMSC_CTRL0_OFFSET);
1048 /* Enable PLL2 on/off sequencing for SNIFF mode */
1049 pmsc_ctrl0 |= DWT_PMSC_CTRL0_PLL2_SEQ_EN;
1050 dwt_reg_write_u32(dev, DWT_PMSC_ID, DWT_PMSC_CTRL0_OFFSET, pmsc_ctrl0);
1051 }
1052
dwt_start(const struct device * dev)1053 static int dwt_start(const struct device *dev)
1054 {
1055 struct dwt_context *ctx = dev->data;
1056 uint8_t cswakeup_buf[32] = {0};
1057
1058 k_sem_take(&ctx->dev_lock, K_FOREVER);
1059
1060 /* Set SPI clock to lowest frequency */
1061 dwt_set_spi_slow(dev, DWT_SPI_CSWAKEUP_FREQ);
1062
1063 if (dwt_reg_read_u32(dev, DWT_DEV_ID_ID, 0) != DWT_DEVICE_ID) {
1064 /* Keep SPI CS line low for 500 microseconds */
1065 dwt_register_read(dev, 0, 0, sizeof(cswakeup_buf),
1066 cswakeup_buf);
1067 /* Give device time to initialize */
1068 k_sleep(K_MSEC(5));
1069
1070 if (dwt_reg_read_u32(dev, DWT_DEV_ID_ID, 0) != DWT_DEVICE_ID) {
1071 LOG_ERR("Failed to wake-up %p", dev);
1072 k_sem_give(&ctx->dev_lock);
1073 return -1;
1074 }
1075 } else {
1076 LOG_WRN("Device not in a sleep mode");
1077 }
1078
1079 /* Restore SPI clock settings */
1080 dwt_set_spi_slow(dev, DWT_SPI_SLOW_FREQ);
1081 dwt_set_spi_fast(dev);
1082
1083 dwt_setup_int(dev, true);
1084 dwt_disable_txrx(dev);
1085 dwt_reset_rfrx(dev);
1086
1087 if (CONFIG_IEEE802154_DW1000_SNIFF_ONT != 0) {
1088 dwt_set_rx_mode(dev);
1089 }
1090
1091 /* Re-enable RX after packet reception */
1092 atomic_set_bit(&ctx->state, DWT_STATE_RX_DEF_ON);
1093 dwt_enable_rx(dev, 0);
1094 k_sem_give(&ctx->dev_lock);
1095
1096 LOG_INF("Started %p", dev);
1097
1098 return 0;
1099 }
1100
dwt_stop(const struct device * dev)1101 static int dwt_stop(const struct device *dev)
1102 {
1103 struct dwt_context *ctx = dev->data;
1104
1105 k_sem_take(&ctx->dev_lock, K_FOREVER);
1106 dwt_disable_txrx(dev);
1107 dwt_reset_rfrx(dev);
1108 dwt_setup_int(dev, false);
1109
1110 /* Copy the user configuration and enter sleep mode */
1111 dwt_reg_write_u8(dev, DWT_AON_ID, DWT_AON_CTRL_OFFSET,
1112 DWT_AON_CTRL_SAVE);
1113 k_sem_give(&ctx->dev_lock);
1114
1115 LOG_INF("Stopped %p", dev);
1116
1117 return 0;
1118 }
1119
dwt_set_sysclks_xti(const struct device * dev,bool ldeload)1120 static inline void dwt_set_sysclks_xti(const struct device *dev, bool ldeload)
1121 {
1122 uint16_t clks = BIT(9) | DWT_PMSC_CTRL0_SYSCLKS_19M;
1123
1124 /*
1125 * See Table 4: Register accesses required to load LDE microcode,
1126 * set PMSC_CTRL0 0x0301, load LDE, set PMSC_CTRL0 0x0200.
1127 */
1128 if (ldeload) {
1129 clks |= BIT(8);
1130 }
1131
1132 /* Force system clock to be the 19.2 MHz XTI clock */
1133 dwt_reg_write_u16(dev, DWT_PMSC_ID, DWT_PMSC_CTRL0_OFFSET, clks);
1134 }
1135
dwt_set_sysclks_auto(const struct device * dev)1136 static inline void dwt_set_sysclks_auto(const struct device *dev)
1137 {
1138 uint8_t sclks = DWT_PMSC_CTRL0_SYSCLKS_AUTO |
1139 DWT_PMSC_CTRL0_RXCLKS_AUTO |
1140 DWT_PMSC_CTRL0_TXCLKS_AUTO;
1141
1142 dwt_reg_write_u8(dev, DWT_PMSC_ID, DWT_PMSC_CTRL0_OFFSET, sclks);
1143 }
1144
dwt_otpmem_read(const struct device * dev,uint16_t otp_addr)1145 static uint32_t dwt_otpmem_read(const struct device *dev, uint16_t otp_addr)
1146 {
1147 dwt_reg_write_u16(dev, DWT_OTP_IF_ID, DWT_OTP_ADDR, otp_addr);
1148
1149 dwt_reg_write_u8(dev, DWT_OTP_IF_ID, DWT_OTP_CTRL,
1150 DWT_OTP_CTRL_OTPREAD | DWT_OTP_CTRL_OTPRDEN);
1151 /* OTPREAD is self clearing but OTPRDEN is not */
1152 dwt_reg_write_u8(dev, DWT_OTP_IF_ID, DWT_OTP_CTRL, 0x00);
1153
1154 /* Read read data, available 40ns after rising edge of OTP_READ */
1155 return dwt_reg_read_u32(dev, DWT_OTP_IF_ID, DWT_OTP_RDAT);
1156 }
1157
dwt_initialise_dev(const struct device * dev)1158 static int dwt_initialise_dev(const struct device *dev)
1159 {
1160 struct dwt_context *ctx = dev->data;
1161 uint32_t otp_val = 0;
1162 uint8_t xtal_trim;
1163
1164 dwt_set_sysclks_xti(dev, false);
1165 ctx->sleep_mode = 0;
1166
1167 /* Disable PMSC control of analog RF subsystem */
1168 dwt_reg_write_u16(dev, DWT_PMSC_ID, DWT_PMSC_CTRL1_OFFSET,
1169 DWT_PMSC_CTRL1_PKTSEQ_DISABLE);
1170
1171 /* Clear all status flags */
1172 dwt_reg_write_u32(dev, DWT_SYS_STATUS_ID, 0, DWT_SYS_STATUS_MASK_32);
1173
1174 /*
1175 * Apply soft reset,
1176 * see SOFTRESET field description in DW1000 User Manual.
1177 */
1178 dwt_reg_write_u8(dev, DWT_PMSC_ID, DWT_PMSC_CTRL0_SOFTRESET_OFFSET,
1179 DWT_PMSC_CTRL0_RESET_ALL);
1180 k_sleep(K_MSEC(1));
1181 dwt_reg_write_u8(dev, DWT_PMSC_ID, DWT_PMSC_CTRL0_SOFTRESET_OFFSET,
1182 DWT_PMSC_CTRL0_RESET_CLEAR);
1183
1184 dwt_set_sysclks_xti(dev, false);
1185
1186 /*
1187 * This bit (a.k.a PLLLDT) should be set to ensure reliable
1188 * operation of the CPLOCK bit.
1189 */
1190 dwt_reg_write_u8(dev, DWT_EXT_SYNC_ID, DWT_EC_CTRL_OFFSET,
1191 DWT_EC_CTRL_PLLLCK);
1192
1193 /* Kick LDO if there is a value programmed. */
1194 otp_val = dwt_otpmem_read(dev, DWT_OTP_LDOTUNE_ADDR);
1195 if ((otp_val & 0xFF) != 0) {
1196 dwt_reg_write_u8(dev, DWT_OTP_IF_ID, DWT_OTP_SF,
1197 DWT_OTP_SF_LDO_KICK);
1198 ctx->sleep_mode |= DWT_AON_WCFG_ONW_LLDO;
1199 LOG_INF("Load LDOTUNE_CAL parameter");
1200 }
1201
1202 otp_val = dwt_otpmem_read(dev, DWT_OTP_XTRIM_ADDR);
1203 xtal_trim = otp_val & DWT_FS_XTALT_MASK;
1204 LOG_INF("OTP Revision 0x%02x, XTAL Trim 0x%02x",
1205 (uint8_t)(otp_val >> 8), xtal_trim);
1206
1207 LOG_DBG("CHIP ID 0x%08x", dwt_otpmem_read(dev, DWT_OTP_PARTID_ADDR));
1208 LOG_DBG("LOT ID 0x%08x", dwt_otpmem_read(dev, DWT_OTP_LOTID_ADDR));
1209 LOG_DBG("Vbat 0x%02x", dwt_otpmem_read(dev, DWT_OTP_VBAT_ADDR));
1210 LOG_DBG("Vtemp 0x%02x", dwt_otpmem_read(dev, DWT_OTP_VTEMP_ADDR));
1211
1212 if (xtal_trim == 0) {
1213 /* Set to default */
1214 xtal_trim = DWT_FS_XTALT_MIDRANGE;
1215 }
1216
1217 /* For FS_XTALT bits 7:5 must always be set to binary “011” */
1218 xtal_trim |= BIT(6) | BIT(5);
1219 dwt_reg_write_u8(dev, DWT_FS_CTRL_ID, DWT_FS_XTALT_OFFSET, xtal_trim);
1220
1221 /* Load LDE microcode into RAM, see 2.5.5.10 LDELOAD */
1222 dwt_set_sysclks_xti(dev, true);
1223 dwt_reg_write_u16(dev, DWT_OTP_IF_ID, DWT_OTP_CTRL,
1224 DWT_OTP_CTRL_LDELOAD);
1225 k_sleep(K_MSEC(1));
1226 dwt_set_sysclks_xti(dev, false);
1227 ctx->sleep_mode |= DWT_AON_WCFG_ONW_LLDE;
1228
1229 dwt_set_sysclks_auto(dev);
1230
1231 if (!(dwt_reg_read_u8(dev, DWT_SYS_STATUS_ID, 0) &
1232 DWT_SYS_STATUS_CPLOCK)) {
1233 LOG_WRN("PLL has not locked");
1234 return -EIO;
1235 }
1236
1237 dwt_set_spi_fast(dev);
1238
1239 /* Setup antenna delay values */
1240 dwt_reg_write_u16(dev, DWT_LDE_IF_ID, DWT_LDE_RXANTD_OFFSET,
1241 DW1000_RX_ANT_DLY);
1242 dwt_reg_write_u16(dev, DWT_TX_ANTD_ID, DWT_TX_ANTD_OFFSET,
1243 DW1000_TX_ANT_DLY);
1244
1245 /* Clear AON_CFG1 register */
1246 dwt_reg_write_u8(dev, DWT_AON_ID, DWT_AON_CFG1_OFFSET, 0);
1247 /*
1248 * Configure sleep mode:
1249 * - On wake-up load configurations from the AON memory
1250 * - preserve sleep mode configuration
1251 * - On Wake-up load the LDE microcode
1252 * - When available, on wake-up load the LDO tune value
1253 */
1254 ctx->sleep_mode |= DWT_AON_WCFG_ONW_LDC |
1255 DWT_AON_WCFG_PRES_SLEEP;
1256 dwt_reg_write_u16(dev, DWT_AON_ID, DWT_AON_WCFG_OFFSET,
1257 ctx->sleep_mode);
1258 LOG_DBG("sleep mode 0x%04x", ctx->sleep_mode);
1259 /* Enable sleep and wake using SPI CSn */
1260 dwt_reg_write_u8(dev, DWT_AON_ID, DWT_AON_CFG0_OFFSET,
1261 DWT_AON_CFG0_WAKE_SPI | DWT_AON_CFG0_SLEEP_EN);
1262
1263 return 0;
1264 }
1265
1266 /*
1267 * RF PHY configuration. Must be carried out as part of initialization and
1268 * for every channel change. See also 2.5 Default Configuration on Power Up.
1269 */
dwt_configure_rf_phy(const struct device * dev)1270 static int dwt_configure_rf_phy(const struct device *dev)
1271 {
1272 struct dwt_context *ctx = dev->data;
1273 struct dwt_phy_config *rf_cfg = &ctx->rf_cfg;
1274 uint8_t chan = rf_cfg->channel;
1275 uint8_t prf_idx = rf_cfg->prf;
1276 uint32_t chan_ctrl = 0;
1277 uint8_t rxctrlh;
1278 uint8_t pll_tune;
1279 uint8_t tune4h;
1280 uint8_t pgdelay;
1281 uint16_t lde_repc;
1282 uint16_t agc_tune1;
1283 uint16_t sfdto;
1284 uint16_t tune1a;
1285 uint16_t tune0b;
1286 uint16_t tune1b;
1287 uint32_t txctrl;
1288 uint32_t pll_cfg;
1289 uint32_t tune2;
1290 uint32_t sys_cfg;
1291 uint32_t tx_fctrl;
1292 uint32_t power;
1293
1294 if ((chan < 1) || (chan > 7) || (chan == 6)) {
1295 LOG_ERR("Channel not supported %u", chan);
1296 return -ENOTSUP;
1297 }
1298
1299 if (rf_cfg->rx_shr_code >= ARRAY_SIZE(dwt_lde_repc_defs)) {
1300 LOG_ERR("Preamble code not supported %u",
1301 rf_cfg->rx_shr_code);
1302 return -ENOTSUP;
1303 }
1304
1305 if (prf_idx >= DWT_NUMOF_PRFS) {
1306 LOG_ERR("PRF not supported %u", prf_idx);
1307 return -ENOTSUP;
1308 }
1309
1310 if (rf_cfg->rx_pac_l >= DWT_NUMOF_PACS) {
1311 LOG_ERR("RX PAC not supported %u", rf_cfg->rx_pac_l);
1312 return -ENOTSUP;
1313 }
1314
1315 if (rf_cfg->rx_ns_sfd > 1) {
1316 LOG_ERR("Wrong NS SFD configuration");
1317 return -ENOTSUP;
1318 }
1319
1320 if (rf_cfg->tx_shr_nsync >= DWT_NUM_OF_PLEN) {
1321 LOG_ERR("Wrong SHR configuration");
1322 return -ENOTSUP;
1323 }
1324
1325 lde_repc = dwt_lde_repc_defs[rf_cfg->rx_shr_code];
1326 agc_tune1 = dwt_agc_tune1_defs[prf_idx];
1327 sfdto = rf_cfg->rx_sfd_to;
1328 rxctrlh = dwt_rxctrlh_defs[dwt_ch_to_cfg[chan]];
1329 txctrl = dwt_txctrl_defs[dwt_ch_to_cfg[chan]];
1330 pll_tune = dwt_plltune_defs[dwt_ch_to_cfg[chan]];
1331 pll_cfg = dwt_pllcfg_defs[dwt_ch_to_cfg[chan]];
1332 tune2 = dwt_tune2_defs[prf_idx][rf_cfg->rx_pac_l];
1333 tune1a = dwt_tune1a_defs[prf_idx];
1334 tune0b = dwt_tune0b_defs[rf_cfg->dr][rf_cfg->rx_ns_sfd];
1335 pgdelay = dwt_pgdelay_defs[dwt_ch_to_cfg[chan]];
1336
1337 sys_cfg = dwt_reg_read_u32(dev, DWT_SYS_CFG_ID, 0);
1338 tx_fctrl = dwt_reg_read_u32(dev, DWT_TX_FCTRL_ID, 0);
1339
1340 /* Don't allow 0 - SFD timeout will always be enabled */
1341 if (sfdto == 0) {
1342 sfdto = DWT_SFDTOC_DEF;
1343 }
1344
1345 /* Set IEEE 802.15.4 compliant mode */
1346 sys_cfg &= ~DWT_SYS_CFG_PHR_MODE_11;
1347
1348 if (rf_cfg->dr == DWT_BR_110K) {
1349 /* Set Receiver Mode 110 kbps data rate */
1350 sys_cfg |= DWT_SYS_CFG_RXM110K;
1351 lde_repc = lde_repc >> 3;
1352 tune1b = DWT_DRX_TUNE1b_110K;
1353 tune4h = DWT_DRX_TUNE4H_PRE64;
1354 } else {
1355 sys_cfg &= ~DWT_SYS_CFG_RXM110K;
1356 if (rf_cfg->tx_shr_nsync == DWT_PLEN_64) {
1357 tune1b = DWT_DRX_TUNE1b_6M8_PRE64;
1358 tune4h = DWT_DRX_TUNE4H_PRE64;
1359 } else {
1360 tune1b = DWT_DRX_TUNE1b_850K_6M8;
1361 tune4h = DWT_DRX_TUNE4H_PRE128PLUS;
1362 }
1363 }
1364
1365 if (sys_cfg & DWT_SYS_CFG_DIS_STXP) {
1366 if (rf_cfg->prf == DWT_PRF_64M) {
1367 power = dwt_txpwr_stxp1_64[dwt_ch_to_cfg[chan]];
1368 } else {
1369 power = dwt_txpwr_stxp1_16[dwt_ch_to_cfg[chan]];
1370 }
1371 } else {
1372 if (rf_cfg->prf == DWT_PRF_64M) {
1373 power = dwt_txpwr_stxp0_64[dwt_ch_to_cfg[chan]];
1374 } else {
1375 power = dwt_txpwr_stxp0_16[dwt_ch_to_cfg[chan]];
1376 }
1377 }
1378
1379 dwt_reg_write_u32(dev, DWT_SYS_CFG_ID, 0, sys_cfg);
1380 LOG_DBG("SYS_CFG: 0x%08x", sys_cfg);
1381
1382 dwt_reg_write_u16(dev, DWT_LDE_IF_ID, DWT_LDE_REPC_OFFSET, lde_repc);
1383 LOG_DBG("LDE_REPC: 0x%04x", lde_repc);
1384
1385 dwt_reg_write_u8(dev, DWT_LDE_IF_ID, DWT_LDE_CFG1_OFFSET,
1386 DWT_DEFAULT_LDE_CFG1);
1387
1388 if (rf_cfg->prf == DWT_PRF_64M) {
1389 dwt_reg_write_u16(dev, DWT_LDE_IF_ID, DWT_LDE_CFG2_OFFSET,
1390 DWT_DEFAULT_LDE_CFG2_PRF64);
1391 LOG_DBG("LDE_CFG2: 0x%04x", DWT_DEFAULT_LDE_CFG2_PRF64);
1392 } else {
1393 dwt_reg_write_u16(dev, DWT_LDE_IF_ID, DWT_LDE_CFG2_OFFSET,
1394 DWT_DEFAULT_LDE_CFG2_PRF16);
1395 LOG_DBG("LDE_CFG2: 0x%04x", DWT_DEFAULT_LDE_CFG2_PRF16);
1396 }
1397
1398 /* Configure PLL2/RF PLL block CFG/TUNE (for a given channel) */
1399 dwt_reg_write_u32(dev, DWT_FS_CTRL_ID, DWT_FS_PLLCFG_OFFSET, pll_cfg);
1400 LOG_DBG("PLLCFG: 0x%08x", pll_cfg);
1401 dwt_reg_write_u8(dev, DWT_FS_CTRL_ID, DWT_FS_PLLTUNE_OFFSET, pll_tune);
1402 LOG_DBG("PLLTUNE: 0x%02x", pll_tune);
1403 /* Configure RF RX blocks (for specified channel/bandwidth) */
1404 dwt_reg_write_u8(dev, DWT_RF_CONF_ID, DWT_RF_RXCTRLH_OFFSET, rxctrlh);
1405 LOG_DBG("RXCTRLH: 0x%02x", rxctrlh);
1406 /* Configure RF/TX blocks for specified channel and PRF */
1407 dwt_reg_write_u32(dev, DWT_RF_CONF_ID, DWT_RF_TXCTRL_OFFSET, txctrl);
1408 LOG_DBG("TXCTRL: 0x%08x", txctrl);
1409
1410 /* Digital receiver configuration, DRX_CONF */
1411 dwt_reg_write_u16(dev, DWT_DRX_CONF_ID, DWT_DRX_TUNE0b_OFFSET, tune0b);
1412 LOG_DBG("DRX_TUNE0b: 0x%04x", tune0b);
1413 dwt_reg_write_u16(dev, DWT_DRX_CONF_ID, DWT_DRX_TUNE1a_OFFSET, tune1a);
1414 LOG_DBG("DRX_TUNE1a: 0x%04x", tune1a);
1415 dwt_reg_write_u16(dev, DWT_DRX_CONF_ID, DWT_DRX_TUNE1b_OFFSET, tune1b);
1416 LOG_DBG("DRX_TUNE1b: 0x%04x", tune1b);
1417 dwt_reg_write_u32(dev, DWT_DRX_CONF_ID, DWT_DRX_TUNE2_OFFSET, tune2);
1418 LOG_DBG("DRX_TUNE2: 0x%08x", tune2);
1419 dwt_reg_write_u8(dev, DWT_DRX_CONF_ID, DWT_DRX_TUNE4H_OFFSET, tune4h);
1420 LOG_DBG("DRX_TUNE4H: 0x%02x", tune4h);
1421 dwt_reg_write_u16(dev, DWT_DRX_CONF_ID, DWT_DRX_SFDTOC_OFFSET, sfdto);
1422 LOG_DBG("DRX_SFDTOC: 0x%04x", sfdto);
1423
1424 /* Automatic Gain Control configuration and control, AGC_CTRL */
1425 dwt_reg_write_u16(dev, DWT_AGC_CTRL_ID, DWT_AGC_TUNE1_OFFSET,
1426 agc_tune1);
1427 LOG_DBG("AGC_TUNE1: 0x%04x", agc_tune1);
1428 dwt_reg_write_u32(dev, DWT_AGC_CTRL_ID, DWT_AGC_TUNE2_OFFSET,
1429 DWT_AGC_TUNE2_VAL);
1430
1431 if (rf_cfg->rx_ns_sfd) {
1432 /*
1433 * SFD_LENGTH, length of the SFD sequence used when
1434 * the data rate is 850 kbps or 6.8 Mbps,
1435 * must be set to either 8 or 16.
1436 */
1437 dwt_reg_write_u8(dev, DWT_USR_SFD_ID, 0x00,
1438 dwt_ns_sfdlen[rf_cfg->dr]);
1439 LOG_DBG("USR_SFDLEN: 0x%02x", dwt_ns_sfdlen[rf_cfg->dr]);
1440 chan_ctrl |= DWT_CHAN_CTRL_DWSFD;
1441 }
1442
1443 /* Set RX_CHAN and TX CHAN */
1444 chan_ctrl |= (chan & DWT_CHAN_CTRL_TX_CHAN_MASK) |
1445 ((chan << DWT_CHAN_CTRL_RX_CHAN_SHIFT) &
1446 DWT_CHAN_CTRL_RX_CHAN_MASK);
1447
1448 /* Set RXPRF */
1449 chan_ctrl |= (BIT(rf_cfg->prf) << DWT_CHAN_CTRL_RXFPRF_SHIFT) &
1450 DWT_CHAN_CTRL_RXFPRF_MASK;
1451
1452 /* Set TX_PCOD */
1453 chan_ctrl |= (rf_cfg->tx_shr_code << DWT_CHAN_CTRL_TX_PCOD_SHIFT) &
1454 DWT_CHAN_CTRL_TX_PCOD_MASK;
1455
1456 /* Set RX_PCOD */
1457 chan_ctrl |= (rf_cfg->rx_shr_code << DWT_CHAN_CTRL_RX_PCOD_SHIFT) &
1458 DWT_CHAN_CTRL_RX_PCOD_MASK;
1459
1460 /* Set Channel Control */
1461 dwt_reg_write_u32(dev, DWT_CHAN_CTRL_ID, 0, chan_ctrl);
1462 LOG_DBG("CHAN_CTRL 0x%08x", chan_ctrl);
1463
1464 /* Set up TX Preamble Size, PRF and Data Rate */
1465 tx_fctrl = dwt_plen_cfg[rf_cfg->tx_shr_nsync] |
1466 (BIT(rf_cfg->prf) << DWT_TX_FCTRL_TXPRF_SHFT) |
1467 (rf_cfg->dr << DWT_TX_FCTRL_TXBR_SHFT);
1468
1469 dwt_reg_write_u32(dev, DWT_TX_FCTRL_ID, 0, tx_fctrl);
1470 LOG_DBG("TX_FCTRL 0x%08x", tx_fctrl);
1471
1472 /* Set the Pulse Generator Delay */
1473 dwt_reg_write_u8(dev, DWT_TX_CAL_ID, DWT_TC_PGDELAY_OFFSET, pgdelay);
1474 LOG_DBG("PGDELAY 0x%02x", pgdelay);
1475 /* Set Transmit Power Control */
1476 dwt_reg_write_u32(dev, DWT_TX_POWER_ID, 0, power);
1477 LOG_DBG("TX_POWER 0x%08x", power);
1478
1479 /*
1480 * From 5.3.1.2 SFD Initialisation,
1481 * SFD sequence initialisation for Auto ACK frame.
1482 */
1483 dwt_reg_write_u8(dev, DWT_SYS_CTRL_ID, DWT_SYS_CTRL_OFFSET,
1484 DWT_SYS_CTRL_TXSTRT | DWT_SYS_CTRL_TRXOFF);
1485
1486 /*
1487 * Calculate PHY timing parameters
1488 *
1489 * From (9.4) Std 802.15.4-2011
1490 * Tshr = Tpsym * (NSYNC + NSFD )
1491 * Tphr = NPHR * Tdsym1m
1492 * Tpsdu = Tdsym * NPSDU * NSYMPEROCTET / Rfec
1493 *
1494 * PRF: pulse repetition frequency
1495 * PSR: preamble symbol repetitions
1496 * SFD: start of frame delimiter
1497 * SHR: synchronisation header (SYNC + SFD)
1498 * PHR: PHY header
1499 */
1500 uint16_t nsync = BIT(rf_cfg->tx_shr_nsync + 6);
1501
1502 if (rf_cfg->prf == DWT_PRF_64M) {
1503 rf_cfg->t_shr = UWB_PHY_TPSYM_PRF64 *
1504 (nsync + UWB_PHY_NUMOF_SYM_SHR_SFD);
1505 } else {
1506 rf_cfg->t_shr = UWB_PHY_TPSYM_PRF16 *
1507 (nsync + UWB_PHY_NUMOF_SYM_SHR_SFD);
1508 }
1509
1510 if (rf_cfg->dr == DWT_BR_6M8) {
1511 rf_cfg->t_phr = UWB_PHY_NUMOF_SYM_PHR * UWB_PHY_TDSYM_PHR_6M8;
1512 rf_cfg->t_dsym = UWB_PHY_TDSYM_DATA_6M8 / 0.44;
1513 } else if (rf_cfg->dr == DWT_BR_850K) {
1514 rf_cfg->t_phr = UWB_PHY_NUMOF_SYM_PHR * UWB_PHY_TDSYM_PHR_850K;
1515 rf_cfg->t_dsym = UWB_PHY_TDSYM_DATA_850K / 0.44;
1516 } else {
1517 rf_cfg->t_phr = UWB_PHY_NUMOF_SYM_PHR * UWB_PHY_TDSYM_PHR_110K;
1518 rf_cfg->t_dsym = UWB_PHY_TDSYM_DATA_110K / 0.44;
1519 }
1520
1521 return 0;
1522 }
1523
dw1000_init(const struct device * dev)1524 static int dw1000_init(const struct device *dev)
1525 {
1526 struct dwt_context *ctx = dev->data;
1527 const struct dwt_hi_cfg *hi_cfg = dev->config;
1528
1529 LOG_INF("Initialize DW1000 Transceiver");
1530 k_sem_init(&ctx->phy_sem, 0, 1);
1531
1532 /* slow SPI config */
1533 memcpy(&ctx->spi_cfg_slow, &hi_cfg->bus.config, sizeof(ctx->spi_cfg_slow));
1534 ctx->spi_cfg_slow.frequency = DWT_SPI_SLOW_FREQ;
1535
1536 if (!spi_is_ready_dt(&hi_cfg->bus)) {
1537 LOG_ERR("SPI device not ready");
1538 return -ENODEV;
1539 }
1540
1541 dwt_set_spi_slow(dev, DWT_SPI_SLOW_FREQ);
1542
1543 /* Initialize IRQ GPIO */
1544 if (!gpio_is_ready_dt(&hi_cfg->irq_gpio)) {
1545 LOG_ERR("IRQ GPIO device not ready");
1546 return -ENODEV;
1547 }
1548
1549 if (gpio_pin_configure_dt(&hi_cfg->irq_gpio, GPIO_INPUT)) {
1550 LOG_ERR("Unable to configure GPIO pin %u", hi_cfg->irq_gpio.pin);
1551 return -EINVAL;
1552 }
1553
1554 gpio_init_callback(&(ctx->gpio_cb), dwt_gpio_callback,
1555 BIT(hi_cfg->irq_gpio.pin));
1556
1557 if (gpio_add_callback(hi_cfg->irq_gpio.port, &(ctx->gpio_cb))) {
1558 LOG_ERR("Failed to add IRQ callback");
1559 return -EINVAL;
1560 }
1561
1562 /* Initialize RESET GPIO */
1563 if (!gpio_is_ready_dt(&hi_cfg->rst_gpio)) {
1564 LOG_ERR("Reset GPIO device not ready");
1565 return -ENODEV;
1566 }
1567
1568 if (gpio_pin_configure_dt(&hi_cfg->rst_gpio, GPIO_INPUT)) {
1569 LOG_ERR("Unable to configure GPIO pin %u", hi_cfg->rst_gpio.pin);
1570 return -EINVAL;
1571 }
1572
1573 LOG_INF("GPIO and SPI configured");
1574
1575 dwt_hw_reset(dev);
1576
1577 if (dwt_reg_read_u32(dev, DWT_DEV_ID_ID, 0) != DWT_DEVICE_ID) {
1578 LOG_ERR("Failed to read device ID %p", dev);
1579 return -ENODEV;
1580 }
1581
1582 if (dwt_initialise_dev(dev)) {
1583 LOG_ERR("Failed to initialize DW1000");
1584 return -EIO;
1585 }
1586
1587 if (dwt_configure_rf_phy(dev)) {
1588 LOG_ERR("Failed to configure RF PHY");
1589 return -EIO;
1590 }
1591
1592 /* Allow Beacon, Data, Acknowledgement, MAC command */
1593 dwt_set_frame_filter(dev, true, DWT_SYS_CFG_FFAB | DWT_SYS_CFG_FFAD |
1594 DWT_SYS_CFG_FFAA | DWT_SYS_CFG_FFAM);
1595
1596 /*
1597 * Enable system events:
1598 * - transmit frame sent,
1599 * - receiver FCS good,
1600 * - receiver PHY header error,
1601 * - receiver FCS error,
1602 * - receiver Reed Solomon Frame Sync Loss,
1603 * - receive Frame Wait Timeout,
1604 * - preamble detection timeout,
1605 * - receive SFD timeout
1606 */
1607 dwt_reg_write_u32(dev, DWT_SYS_MASK_ID, 0,
1608 DWT_SYS_MASK_MTXFRS |
1609 DWT_SYS_MASK_MRXFCG |
1610 DWT_SYS_MASK_MRXPHE |
1611 DWT_SYS_MASK_MRXFCE |
1612 DWT_SYS_MASK_MRXRFSL |
1613 DWT_SYS_MASK_MRXRFTO |
1614 DWT_SYS_MASK_MRXPTO |
1615 DWT_SYS_MASK_MRXSFDTO);
1616
1617 /* Initialize IRQ event work queue */
1618 ctx->dev = dev;
1619
1620 k_work_queue_start(&dwt_work_queue, dwt_work_queue_stack,
1621 K_KERNEL_STACK_SIZEOF(dwt_work_queue_stack),
1622 CONFIG_SYSTEM_WORKQUEUE_PRIORITY, NULL);
1623
1624 k_work_init(&ctx->irq_cb_work, dwt_irq_work_handler);
1625
1626 dwt_setup_int(dev, true);
1627
1628 LOG_INF("DW1000 device initialized and configured");
1629
1630 return 0;
1631 }
1632
get_mac(const struct device * dev)1633 static inline uint8_t *get_mac(const struct device *dev)
1634 {
1635 struct dwt_context *dw1000 = dev->data;
1636 uint32_t *ptr = (uint32_t *)(dw1000->mac_addr);
1637
1638 UNALIGNED_PUT(sys_rand32_get(), ptr);
1639 ptr = (uint32_t *)(dw1000->mac_addr + 4);
1640 UNALIGNED_PUT(sys_rand32_get(), ptr);
1641
1642 dw1000->mac_addr[0] = (dw1000->mac_addr[0] & ~0x01) | 0x02;
1643
1644 return dw1000->mac_addr;
1645 }
1646
dwt_iface_api_init(struct net_if * iface)1647 static void dwt_iface_api_init(struct net_if *iface)
1648 {
1649 const struct device *dev = net_if_get_device(iface);
1650 struct dwt_context *dw1000 = dev->data;
1651 uint8_t *mac = get_mac(dev);
1652
1653 net_if_set_link_addr(iface, mac, 8, NET_LINK_IEEE802154);
1654
1655 dw1000->iface = iface;
1656
1657 ieee802154_init(iface);
1658
1659 LOG_INF("Iface initialized");
1660 }
1661
1662 static struct ieee802154_radio_api dwt_radio_api = {
1663 .iface_api.init = dwt_iface_api_init,
1664
1665 .get_capabilities = dwt_get_capabilities,
1666 .cca = dwt_cca,
1667 .set_channel = dwt_set_channel,
1668 .filter = dwt_filter,
1669 .set_txpower = dwt_set_power,
1670 .start = dwt_start,
1671 .stop = dwt_stop,
1672 .configure = dwt_configure,
1673 .ed_scan = dwt_ed,
1674 .tx = dwt_tx,
1675 .attr_get = dwt_attr_get,
1676 };
1677
1678 #define DWT_PSDU_LENGTH (127 - DWT_FCS_LENGTH)
1679
1680 #if defined(CONFIG_IEEE802154_RAW_MODE)
1681 DEVICE_DT_INST_DEFINE(0, dw1000_init, NULL,
1682 &dwt_0_context, &dw1000_0_config,
1683 POST_KERNEL, CONFIG_IEEE802154_DW1000_INIT_PRIO,
1684 &dwt_radio_api);
1685 #else
1686 NET_DEVICE_DT_INST_DEFINE(0,
1687 dw1000_init,
1688 NULL,
1689 &dwt_0_context,
1690 &dw1000_0_config,
1691 CONFIG_IEEE802154_DW1000_INIT_PRIO,
1692 &dwt_radio_api,
1693 IEEE802154_L2,
1694 NET_L2_GET_CTX_TYPE(IEEE802154_L2),
1695 DWT_PSDU_LENGTH);
1696 #endif
1697