1 /* ieee802154_mcr20a.c - NXP MCR20A driver */
2
3 #define DT_DRV_COMPAT nxp_mcr20a
4
5 /*
6 * Copyright (c) 2017 PHYTEC Messtechnik GmbH
7 *
8 * SPDX-License-Identifier: Apache-2.0
9 */
10
11 #define LOG_MODULE_NAME ieee802154_mcr20a
12 #define LOG_LEVEL CONFIG_IEEE802154_DRIVER_LOG_LEVEL
13
14 #include <zephyr/logging/log.h>
15 LOG_MODULE_REGISTER(LOG_MODULE_NAME);
16
17 #include <errno.h>
18
19 #include <zephyr/kernel.h>
20 #include <zephyr/arch/cpu.h>
21 #include <zephyr/debug/stack.h>
22
23 #include <zephyr/device.h>
24 #include <zephyr/init.h>
25 #include <zephyr/net/net_if.h>
26 #include <zephyr/net/net_pkt.h>
27
28 #include <zephyr/sys/byteorder.h>
29 #include <string.h>
30 #include <zephyr/random/rand32.h>
31 #include <zephyr/debug/stack.h>
32
33 #include <zephyr/drivers/gpio.h>
34
35 #include <zephyr/net/ieee802154_radio.h>
36
37 #include "ieee802154_mcr20a.h"
38 #include "MCR20Overwrites.h"
39
40 /*
41 * max. TX duration = (PR + SFD + FLI + PDU + FCS)
42 * + RX_warmup + cca + TX_warmup
43 * TODO: Calculate the value from frame length.
44 * Invalid for the SLOTTED mode.
45 */
46 #define _MAX_PKT_TX_DURATION (133 + 9 + 8 + 9)
47
48 #if LOG_LEVEL == LOG_LEVEL_DBG
49 /* Prevent timer overflow during LOG_* output */
50 #define _MACACKWAITDURATION (864 / 16 + 11625)
51 #define MCR20A_SEQ_SYNC_TIMEOUT (200)
52 #else
53 #define MCR20A_SEQ_SYNC_TIMEOUT (20)
54 #define _MACACKWAITDURATION (864 / 16) /* 864us * 62500Hz */
55 #endif
56
57 #define MCR20A_FCS_LENGTH (2)
58 #define MCR20A_PSDU_LENGTH (125)
59 #define MCR20A_GET_SEQ_STATE_RETRIES (3)
60
61 /* Values for the clock output (CLK_OUT) configuration */
62 #ifdef CONFIG_MCR20A_CLK_OUT_DISABLED
63 #define MCR20A_CLK_OUT_CONFIG (MCR20A_CLK_OUT_HIZ)
64
65 #elif CONFIG_MCR20A_CLK_OUT_32MHZ
66 #define MCR20A_CLK_OUT_CONFIG (set_bits_clk_out_div(0) | MCR20A_CLK_OUT_DS |\
67 MCR20A_CLK_OUT_EN)
68
69 #elif CONFIG_MCR20A_CLK_OUT_16MHZ
70 #define MCR20A_CLK_OUT_CONFIG (set_bits_clk_out_div(1) | MCR20A_CLK_OUT_DS |\
71 MCR20A_CLK_OUT_EN)
72
73 #elif CONFIG_MCR20A_CLK_OUT_8MHZ
74 #define MCR20A_CLK_OUT_CONFIG (set_bits_clk_out_div(2) | MCR20A_CLK_OUT_EN)
75
76 #elif CONFIG_MCR20A_CLK_OUT_4MHZ
77 #define MCR20A_CLK_OUT_CONFIG (set_bits_clk_out_div(3) | MCR20A_CLK_OUT_EN)
78
79 #elif CONFIG_MCR20A_CLK_OUT_1MHZ
80 #define MCR20A_CLK_OUT_CONFIG (set_bits_clk_out_div(4) | MCR20A_CLK_OUT_EN)
81
82 #elif CONFIG_MCR20A_CLK_OUT_250KHZ
83 #define MCR20A_CLK_OUT_CONFIG (set_bits_clk_out_div(5) | MCR20A_CLK_OUT_EN)
84
85 #elif CONFIG_MCR20A_CLK_OUT_62500HZ
86 #define MCR20A_CLK_OUT_CONFIG (set_bits_clk_out_div(6) | MCR20A_CLK_OUT_EN)
87
88 #elif CONFIG_MCR20A_CLK_OUT_32768HZ
89 #define MCR20A_CLK_OUT_CONFIG (set_bits_clk_out_div(7) | MCR20A_CLK_OUT_EN)
90
91 #endif
92
93 #ifdef CONFIG_MCR20A_IS_PART_OF_KW2XD_SIP
94 #define PART_OF_KW2XD_SIP 1
95 #else
96 #define PART_OF_KW2XD_SIP 0
97 #endif
98
99 /* Values for the power mode (PM) configuration */
100 #define MCR20A_PM_HIBERNATE 0
101 #define MCR20A_PM_DOZE MCR20A_PWR_MODES_XTALEN
102 #define MCR20A_PM_IDLE (MCR20A_PWR_MODES_XTALEN |\
103 MCR20A_PWR_MODES_PMC_MODE)
104 #define MCR20A_PM_AUTODOZE (MCR20A_PWR_MODES_XTALEN |\
105 MCR20A_PWR_MODES_AUTODOZE)
106
107 /* Default settings for the device initialization */
108 #define MCR20A_DEFAULT_TX_POWER (0)
109 #define MCR20A_DEFAULT_CHANNEL (26)
110
111 /* RF TX power max/min values (dBm) */
112 #define MCR20A_OUTPUT_POWER_MAX (8)
113 #define MCR20A_OUTPUT_POWER_MIN (-35)
114
115 /* Lookup table for the Power Control register */
116 static const uint8_t pow_lt[44] = {
117 3, 4, 5, 6,
118 6, 7, 7, 8,
119 8, 9, 9, 10,
120 11, 11, 12, 13,
121 13, 14, 14, 15,
122 16, 16, 17, 18,
123 18, 19, 20, 20,
124 21, 21, 22, 23,
125 23, 24, 25, 25,
126 26, 27, 27, 28,
127 28, 29, 30, 31
128 };
129
130 /* PLL integer and fractional lookup tables
131 *
132 * Fc = 2405 + 5(k - 11) , k = 11,12,...,26
133 *
134 * Equation for PLL frequency, MKW2xD Reference Manual, p.255 :
135 * F = ((PLL_INT0 + 64) + (PLL_FRAC0/65536))32MHz
136 *
137 */
138 static const uint8_t pll_int_lt[16] = {
139 11, 11, 11, 11,
140 11, 11, 12, 12,
141 12, 12, 12, 12,
142 13, 13, 13, 13
143 };
144
145 static const uint16_t pll_frac_lt[16] = {
146 10240, 20480, 30720, 40960,
147 51200, 61440, 6144, 16384,
148 26624, 36864, 47104, 57344,
149 2048, 12288, 22528, 32768
150 };
151
152 #define z_usleep(usec) k_busy_wait(usec)
153
154 /* Read direct (dreg is true) or indirect register (dreg is false) */
z_mcr20a_read_reg(const struct device * dev,bool dreg,uint8_t addr)155 uint8_t z_mcr20a_read_reg(const struct device *dev, bool dreg, uint8_t addr)
156 {
157 const struct mcr20a_config *config = dev->config;
158 uint8_t cmd_buf[3] = {
159 dreg ? (MCR20A_REG_READ | addr) :
160 (MCR20A_IAR_INDEX | MCR20A_REG_WRITE),
161 dreg ? 0 : (addr | MCR20A_REG_READ),
162 0
163 };
164 uint8_t len = dreg ? 2 : 3;
165 const struct spi_buf buf = {
166 .buf = cmd_buf,
167 .len = len
168 };
169 const struct spi_buf_set tx = {
170 .buffers = &buf,
171 .count = 1
172 };
173 const struct spi_buf_set rx = {
174 .buffers = &buf,
175 .count = 1
176 };
177
178 if (spi_transceive_dt(&config->bus, &tx, &rx) == 0) {
179 return cmd_buf[len - 1];
180 }
181
182 LOG_ERR("Failed");
183
184 return 0;
185 }
186
187 /* Write direct (dreg is true) or indirect register (dreg is false) */
z_mcr20a_write_reg(const struct device * dev,bool dreg,uint8_t addr,uint8_t value)188 bool z_mcr20a_write_reg(const struct device *dev, bool dreg, uint8_t addr,
189 uint8_t value)
190 {
191 const struct mcr20a_config *config = dev->config;
192 uint8_t cmd_buf[3] = {
193 dreg ? (MCR20A_REG_WRITE | addr) :
194 (MCR20A_IAR_INDEX | MCR20A_REG_WRITE),
195 dreg ? value : (addr | MCR20A_REG_WRITE),
196 dreg ? 0 : value
197 };
198 const struct spi_buf buf = {
199 .buf = cmd_buf,
200 .len = dreg ? 2 : 3
201 };
202 const struct spi_buf_set tx = {
203 .buffers = &buf,
204 .count = 1
205 };
206
207 return (spi_write_dt(&config->bus, &tx) == 0);
208 }
209
210 /* Write multiple bytes to direct or indirect register */
z_mcr20a_write_burst(const struct device * dev,bool dreg,uint16_t addr,uint8_t * data_buf,uint8_t len)211 bool z_mcr20a_write_burst(const struct device *dev, bool dreg, uint16_t addr,
212 uint8_t *data_buf, uint8_t len)
213 {
214 const struct mcr20a_config *config = dev->config;
215 uint8_t cmd_buf[2] = {
216 dreg ? MCR20A_REG_WRITE | addr :
217 MCR20A_IAR_INDEX | MCR20A_REG_WRITE,
218 dreg ? 0 : addr | MCR20A_REG_WRITE
219 };
220 struct spi_buf bufs[2] = {
221 {
222 .buf = cmd_buf,
223 .len = dreg ? 1 : 2
224 },
225 {
226 .buf = data_buf,
227 .len = len
228 }
229 };
230 const struct spi_buf_set tx = {
231 .buffers = bufs,
232 .count = 2
233 };
234
235 return (spi_write_dt(&config->bus, &tx) == 0);
236 }
237
238 /* Read multiple bytes from direct or indirect register */
z_mcr20a_read_burst(const struct device * dev,bool dreg,uint16_t addr,uint8_t * data_buf,uint8_t len)239 bool z_mcr20a_read_burst(const struct device *dev, bool dreg, uint16_t addr,
240 uint8_t *data_buf, uint8_t len)
241 {
242 const struct mcr20a_config *config = dev->config;
243 uint8_t cmd_buf[2] = {
244 dreg ? MCR20A_REG_READ | addr :
245 MCR20A_IAR_INDEX | MCR20A_REG_WRITE,
246 dreg ? 0 : addr | MCR20A_REG_READ
247 };
248 struct spi_buf bufs[2] = {
249 {
250 .buf = cmd_buf,
251 .len = dreg ? 1 : 2
252 },
253 {
254 .buf = data_buf,
255 .len = len
256 }
257 };
258 const struct spi_buf_set tx = {
259 .buffers = bufs,
260 .count = 1
261 };
262 const struct spi_buf_set rx = {
263 .buffers = bufs,
264 .count = 2
265 };
266
267 return (spi_transceive_dt(&config->bus, &tx, &rx) == 0);
268 }
269
270 /* Mask (msk is true) or unmask all interrupts from asserting IRQ_B */
mcr20a_mask_irqb(const struct device * dev,bool msk)271 static bool mcr20a_mask_irqb(const struct device *dev, bool msk)
272 {
273 uint8_t ctrl4 = read_reg_phy_ctrl4(dev);
274
275 if (msk) {
276 ctrl4 |= MCR20A_PHY_CTRL4_TRCV_MSK;
277 } else {
278 ctrl4 &= ~MCR20A_PHY_CTRL4_TRCV_MSK;
279 }
280
281 return write_reg_phy_ctrl4(dev, ctrl4);
282 }
283
284 /** Set an timeout value for the given compare register */
mcr20a_timer_set(const struct device * dev,uint8_t cmp_reg,uint32_t timeout)285 static int mcr20a_timer_set(const struct device *dev,
286 uint8_t cmp_reg,
287 uint32_t timeout)
288 {
289 uint32_t now = 0U;
290 uint32_t next;
291 bool retval;
292
293 if (!read_burst_event_timer(dev, (uint8_t *)&now)) {
294 goto error;
295 }
296
297 now = sys_le32_to_cpu(now);
298 next = now + timeout;
299 LOG_DBG("now: 0x%x set 0x%x", now, next);
300 next = sys_cpu_to_le32(next);
301
302 switch (cmp_reg) {
303 case 1:
304 retval = write_burst_t1cmp(dev, (uint8_t *)&next);
305 break;
306 case 2:
307 retval = write_burst_t2cmp(dev, (uint8_t *)&next);
308 break;
309 case 3:
310 retval = write_burst_t3cmp(dev, (uint8_t *)&next);
311 break;
312 case 4:
313 retval = write_burst_t4cmp(dev, (uint8_t *)&next);
314 break;
315 default:
316 goto error;
317 }
318
319 if (!retval) {
320 goto error;
321 }
322
323 return 0;
324
325 error:
326 LOG_ERR("Failed");
327 return -EIO;
328 }
329
mcr20a_timer_init(const struct device * dev,uint8_t tb)330 static int mcr20a_timer_init(const struct device *dev, uint8_t tb)
331 {
332 uint8_t buf[3] = {0, 0, 0};
333 uint8_t ctrl4;
334
335 if (!write_reg_tmr_prescale(dev,
336 set_bits_tmr_prescale(tb))) {
337 goto error;
338 }
339
340 if (!write_burst_t1cmp(dev, buf)) {
341 goto error;
342 }
343
344 ctrl4 = read_reg_phy_ctrl4(dev);
345 ctrl4 |= MCR20A_PHY_CTRL4_TMRLOAD;
346 if (!write_reg_phy_ctrl4(dev, ctrl4)) {
347 goto error;
348 }
349
350 LOG_DBG("done, timebase %d", tb);
351 return 0;
352
353 error:
354 LOG_ERR("Failed");
355 return -EIO;
356 }
357
358 /* Set Timer Comparator 4 */
mcr20a_t4cmp_set(const struct device * dev,uint32_t timeout)359 static int mcr20a_t4cmp_set(const struct device *dev,
360 uint32_t timeout)
361 {
362 uint8_t irqsts3;
363 uint8_t ctrl3;
364
365 if (mcr20a_timer_set(dev, 4, timeout)) {
366 goto error;
367 }
368
369 /* enable and clear irq for the timer 4 */
370 irqsts3 = read_reg_irqsts3(dev);
371 irqsts3 &= ~MCR20A_IRQSTS3_TMR4MSK;
372 irqsts3 |= MCR20A_IRQSTS3_TMR4IRQ;
373 if (!write_reg_irqsts3(dev, irqsts3)) {
374 goto error;
375 }
376
377 ctrl3 = read_reg_phy_ctrl3(dev);
378 ctrl3 |= MCR20A_PHY_CTRL3_TMR4CMP_EN;
379 if (!write_reg_phy_ctrl3(dev, ctrl3)) {
380 goto error;
381 }
382
383 return 0;
384
385 error:
386 LOG_DBG("Failed");
387 return -EIO;
388 }
389
390 /* Clear Timer Comparator 4 */
mcr20a_t4cmp_clear(const struct device * dev)391 static int mcr20a_t4cmp_clear(const struct device *dev)
392 {
393 uint8_t irqsts3;
394 uint8_t ctrl3;
395
396 ctrl3 = read_reg_phy_ctrl3(dev);
397 ctrl3 &= ~MCR20A_PHY_CTRL3_TMR4CMP_EN;
398 if (!write_reg_phy_ctrl3(dev, ctrl3)) {
399 goto error;
400 }
401
402 irqsts3 = read_reg_irqsts3(dev);
403 irqsts3 |= MCR20A_IRQSTS3_TMR4IRQ;
404 if (!write_reg_irqsts3(dev, irqsts3)) {
405 goto error;
406 }
407
408 return 0;
409
410 error:
411 LOG_DBG("Failed");
412 return -EIO;
413 }
414
xcvseq_wait_until_idle(const struct device * dev)415 static inline void xcvseq_wait_until_idle(const struct device *dev)
416 {
417 uint8_t state;
418 uint8_t retries = MCR20A_GET_SEQ_STATE_RETRIES;
419
420 do {
421 state = read_reg_seq_state(dev);
422 retries--;
423 } while ((state & MCR20A_SEQ_STATE_MASK) && retries);
424
425 if (state & MCR20A_SEQ_STATE_MASK) {
426 LOG_ERR("Timeout");
427 }
428 }
429
mcr20a_abort_sequence(const struct device * dev,bool force)430 static inline int mcr20a_abort_sequence(const struct device *dev,
431 bool force)
432 {
433 uint8_t ctrl1;
434
435 ctrl1 = read_reg_phy_ctrl1(dev);
436 LOG_DBG("CTRL1 0x%02x", ctrl1);
437
438 if (((ctrl1 & MCR20A_PHY_CTRL1_XCVSEQ_MASK) == MCR20A_XCVSEQ_TX) ||
439 ((ctrl1 & MCR20A_PHY_CTRL1_XCVSEQ_MASK) == MCR20A_XCVSEQ_TX_RX)) {
440 if (!force) {
441 return -1;
442 }
443 }
444
445 /* Abort ongoing sequence */
446 ctrl1 &= ~MCR20A_PHY_CTRL1_XCVSEQ_MASK;
447 if (!write_reg_phy_ctrl1(dev, ctrl1)) {
448 return -1;
449 }
450
451 xcvseq_wait_until_idle(dev);
452
453 /* Clear relevant interrupt flags */
454 if (!write_reg_irqsts1(dev, MCR20A_IRQSTS1_IRQ_MASK)) {
455 return -1;
456 }
457
458 return 0;
459 }
460
461 /* Initiate a (new) Transceiver Sequence */
mcr20a_set_sequence(const struct device * dev,uint8_t seq)462 static inline int mcr20a_set_sequence(const struct device *dev,
463 uint8_t seq)
464 {
465 uint8_t ctrl1 = 0U;
466
467 seq = set_bits_phy_ctrl1_xcvseq(seq);
468 ctrl1 = read_reg_phy_ctrl1(dev);
469 ctrl1 &= ~MCR20A_PHY_CTRL1_XCVSEQ_MASK;
470
471 if ((seq == MCR20A_XCVSEQ_TX_RX) &&
472 (ctrl1 & MCR20A_PHY_CTRL1_RXACKRQD)) {
473 /* RXACKRQD enabled, timer should be set. */
474 mcr20a_t4cmp_set(dev, _MACACKWAITDURATION +
475 _MAX_PKT_TX_DURATION);
476 }
477
478 ctrl1 |= seq;
479 if (!write_reg_phy_ctrl1(dev, ctrl1)) {
480 return -EIO;
481 }
482
483 return 0;
484 }
485
mcr20a_get_rssi(uint32_t lqi)486 static inline uint32_t mcr20a_get_rssi(uint32_t lqi)
487 {
488 /* Get rssi (Received Signal Strength Indicator, unit is dBm)
489 * from lqi (Link Quality Indicator) value.
490 * There are two different equations for RSSI:
491 * RF = (LQI – 286.6) / 2.69333 (MKW2xD Reference Manual)
492 * RF = (LQI – 295.4) / 2.84 (MCR20A Reference Manual)
493 * The last appears more to match the graphic (Figure 3-10).
494 * Since RSSI value is always positive and we want to
495 * avoid the floating point computation:
496 * -RF * 65536 = (LQI / 2.84 - 295.4 / 2.84) * 65536
497 * RF * 65536 = (295.4 * 65536 / 2.84) - (LQI * 65536 / 2.84)
498 */
499 uint32_t a = (uint32_t)(295.4 * 65536 / 2.84);
500 uint32_t b = (uint32_t)(65536 / 2.84);
501
502 return (a - (b * lqi)) >> 16;
503 }
504
get_mac(const struct device * dev)505 static inline uint8_t *get_mac(const struct device *dev)
506 {
507 struct mcr20a_context *mcr20a = dev->data;
508 uint32_t *ptr = (uint32_t *)(mcr20a->mac_addr);
509
510 UNALIGNED_PUT(sys_rand32_get(), ptr);
511 ptr = (uint32_t *)(mcr20a->mac_addr + 4);
512 UNALIGNED_PUT(sys_rand32_get(), ptr);
513
514 mcr20a->mac_addr[0] = (mcr20a->mac_addr[0] & ~0x01) | 0x02;
515
516 return mcr20a->mac_addr;
517 }
518
read_rxfifo_content(const struct device * dev,struct net_buf * buf,uint8_t len)519 static inline bool read_rxfifo_content(const struct device *dev,
520 struct net_buf *buf, uint8_t len)
521 {
522 const struct mcr20a_config *config = dev->config;
523 uint8_t cmd = MCR20A_BUF_READ;
524 struct spi_buf bufs[2] = {
525 {
526 .buf = &cmd,
527 .len = 1
528 },
529 {
530 .buf = buf->data,
531 .len = len
532 }
533 };
534 const struct spi_buf_set tx = {
535 .buffers = bufs,
536 .count = 1
537 };
538 const struct spi_buf_set rx = {
539 .buffers = bufs,
540 .count = 2
541 };
542
543 if (spi_transceive_dt(&config->bus, &tx, &rx) != 0) {
544 return false;
545 }
546
547 net_buf_add(buf, len);
548
549 return true;
550 }
551
mcr20a_rx(const struct device * dev,uint8_t len)552 static inline void mcr20a_rx(const struct device *dev, uint8_t len)
553 {
554 struct mcr20a_context *mcr20a = dev->data;
555 struct net_pkt *pkt = NULL;
556 uint8_t pkt_len;
557
558 pkt_len = len - MCR20A_FCS_LENGTH;
559
560 pkt = net_pkt_rx_alloc_with_buffer(mcr20a->iface, pkt_len,
561 AF_UNSPEC, 0, K_NO_WAIT);
562 if (!pkt) {
563 LOG_ERR("No buf available");
564 goto out;
565 }
566
567 if (!read_rxfifo_content(dev, pkt->buffer, pkt_len)) {
568 LOG_ERR("No content read");
569 goto out;
570 }
571
572 if (ieee802154_radio_handle_ack(mcr20a->iface, pkt) == NET_OK) {
573 LOG_DBG("ACK packet handled");
574 goto out;
575 }
576
577 net_pkt_set_ieee802154_lqi(pkt, read_reg_lqi_value(dev));
578 net_pkt_set_ieee802154_rssi(pkt, mcr20a_get_rssi(
579 net_pkt_ieee802154_lqi(pkt)));
580
581 LOG_DBG("Caught a packet (%u) (LQI: %u, RSSI: %u)",
582 pkt_len, net_pkt_ieee802154_lqi(pkt),
583 net_pkt_ieee802154_rssi(pkt));
584
585 if (net_recv_data(mcr20a->iface, pkt) < 0) {
586 LOG_DBG("Packet dropped by NET stack");
587 goto out;
588 }
589
590 log_stack_usage(&mcr20a->mcr20a_rx_thread);
591 return;
592 out:
593 if (pkt) {
594 net_pkt_unref(pkt);
595 }
596 }
597
598 /*
599 * The function checks how the XCV sequence has been completed
600 * and sets the variable seq_retval accordingly. It returns true
601 * if a new sequence is to be set. This function is only to be called
602 * when a sequence has been completed.
603 */
irqsts1_event(const struct device * dev,uint8_t * dregs)604 static inline bool irqsts1_event(const struct device *dev,
605 uint8_t *dregs)
606 {
607 struct mcr20a_context *mcr20a = dev->data;
608 uint8_t seq = dregs[MCR20A_PHY_CTRL1] & MCR20A_PHY_CTRL1_XCVSEQ_MASK;
609 uint8_t new_seq = MCR20A_XCVSEQ_RECEIVE;
610 bool retval = false;
611
612 switch (seq) {
613 case MCR20A_XCVSEQ_RECEIVE:
614 if ((dregs[MCR20A_IRQSTS1] & MCR20A_IRQSTS1_RXIRQ)) {
615 if ((dregs[MCR20A_IRQSTS1] & MCR20A_IRQSTS1_TXIRQ)) {
616 LOG_DBG("Finished RxSeq + TxAck");
617 } else {
618 LOG_DBG("Finished RxSeq");
619 }
620
621 mcr20a_rx(dev, dregs[MCR20A_RX_FRM_LEN]);
622 retval = true;
623 }
624 break;
625 case MCR20A_XCVSEQ_TX:
626 case MCR20A_XCVSEQ_TX_RX:
627 if (dregs[MCR20A_IRQSTS1] & MCR20A_IRQSTS1_CCAIRQ) {
628 if (dregs[MCR20A_IRQSTS2] & MCR20A_IRQSTS2_CCA) {
629 LOG_DBG("Finished CCA, CH busy");
630 atomic_set(&mcr20a->seq_retval, -EBUSY);
631 retval = true;
632 break;
633 }
634 }
635
636 if (dregs[MCR20A_IRQSTS1] & MCR20A_IRQSTS1_TXIRQ) {
637 atomic_set(&mcr20a->seq_retval, 0);
638
639 if ((dregs[MCR20A_IRQSTS1] & MCR20A_IRQSTS1_RXIRQ)) {
640 LOG_DBG("Finished TxSeq + RxAck");
641 /* Got Ack, timer should be disabled. */
642 mcr20a_t4cmp_clear(dev);
643 } else {
644 LOG_DBG("Finished TxSeq");
645 }
646
647 retval = true;
648 }
649 break;
650 case MCR20A_XCVSEQ_CONTINUOUS_CCA:
651 case MCR20A_XCVSEQ_CCA:
652 if ((dregs[MCR20A_IRQSTS1] & MCR20A_IRQSTS1_CCAIRQ)) {
653
654 /* If CCCA, then timer should be disabled. */
655 /* mcr20a_t4cmp_clear(dev); */
656
657 if (dregs[MCR20A_IRQSTS2] & MCR20A_IRQSTS2_CCA) {
658 LOG_DBG("Finished CCA, CH busy");
659 atomic_set(&mcr20a->seq_retval, -EBUSY);
660 } else {
661 /**
662 * Assume that after the CCA,
663 * a transmit sequence follows and
664 * set here the sequence manager to Idle.
665 */
666 LOG_DBG("Finished CCA, CH idle");
667 new_seq = MCR20A_XCVSEQ_IDLE;
668 atomic_set(&mcr20a->seq_retval, 0);
669 }
670
671 retval = true;
672 }
673 break;
674 case MCR20A_XCVSEQ_IDLE:
675 default:
676 LOG_ERR("SEQ triggered, but XCVSEQ is in the Idle state");
677 LOG_ERR("IRQSTS: 0x%02x", dregs[MCR20A_IRQSTS1]);
678 break;
679 }
680
681 dregs[MCR20A_PHY_CTRL1] &= ~MCR20A_PHY_CTRL1_XCVSEQ_MASK;
682 dregs[MCR20A_PHY_CTRL1] |= new_seq;
683
684 return retval;
685 }
686
687 /*
688 * Check the Timer Comparator IRQ register IRQSTS3.
689 * Currently we use only T4CMP to cancel the running sequence,
690 * usually the TR.
691 */
irqsts3_event(const struct device * dev,uint8_t * dregs)692 static inline bool irqsts3_event(const struct device *dev,
693 uint8_t *dregs)
694 {
695 struct mcr20a_context *mcr20a = dev->data;
696 bool retval = false;
697
698 if (dregs[MCR20A_IRQSTS3] & MCR20A_IRQSTS3_TMR4IRQ) {
699 LOG_DBG("Sequence timeout, IRQSTSs 0x%02x 0x%02x 0x%02x",
700 dregs[MCR20A_IRQSTS1],
701 dregs[MCR20A_IRQSTS2],
702 dregs[MCR20A_IRQSTS3]);
703
704 atomic_set(&mcr20a->seq_retval, -EBUSY);
705 mcr20a_t4cmp_clear(dev);
706 dregs[MCR20A_PHY_CTRL1] &= ~MCR20A_PHY_CTRL1_XCVSEQ_MASK;
707 dregs[MCR20A_PHY_CTRL1] |= MCR20A_XCVSEQ_RECEIVE;
708
709 /* Clear all interrupts */
710 dregs[MCR20A_IRQSTS1] = MCR20A_IRQSTS1_IRQ_MASK;
711 retval = true;
712 } else {
713 LOG_ERR("IRQSTS3 contains untreated IRQs: 0x%02x",
714 dregs[MCR20A_IRQSTS3]);
715 }
716
717 return retval;
718 }
719
mcr20a_thread_main(void * arg)720 static void mcr20a_thread_main(void *arg)
721 {
722 const struct device *dev = arg;
723 struct mcr20a_context *mcr20a = dev->data;
724 uint8_t dregs[MCR20A_PHY_CTRL4 + 1];
725 bool set_new_seq;
726 uint8_t ctrl1 = 0U;
727
728 while (true) {
729 k_sem_take(&mcr20a->isr_sem, K_FOREVER);
730
731 k_mutex_lock(&mcr20a->phy_mutex, K_FOREVER);
732 set_new_seq = false;
733
734 if (!mcr20a_mask_irqb(dev, true)) {
735 LOG_ERR("Failed to mask IRQ_B");
736 goto unmask_irqb;
737 }
738
739 /* Read the register from IRQSTS1 until CTRL4 */
740 if (!read_burst_irqsts1_ctrl4(dev, dregs)) {
741 LOG_ERR("Failed to read register");
742 goto unmask_irqb;
743 }
744 /* make backup from PHY_CTRL1 register */
745 ctrl1 = dregs[MCR20A_PHY_CTRL1];
746
747 if (dregs[MCR20A_IRQSTS3] & MCR20A_IRQSTS3_IRQ_MASK) {
748 set_new_seq = irqsts3_event(dev, dregs);
749 } else if (dregs[MCR20A_IRQSTS1] & MCR20A_IRQSTS1_SEQIRQ) {
750 set_new_seq = irqsts1_event(dev, dregs);
751 }
752
753 if (dregs[MCR20A_IRQSTS2] & MCR20A_IRQSTS2_IRQ_MASK) {
754 LOG_ERR("IRQSTS2 contains untreated IRQs: 0x%02x",
755 dregs[MCR20A_IRQSTS2]);
756 }
757
758 LOG_DBG("WB: 0x%02x | 0x%02x | 0x%02x",
759 dregs[MCR20A_IRQSTS1],
760 dregs[MCR20A_IRQSTS2],
761 dregs[MCR20A_IRQSTS3]);
762
763 /* Write back register, clear IRQs and set new sequence */
764 if (set_new_seq) {
765 /* Reset sequence manager */
766 ctrl1 &= ~MCR20A_PHY_CTRL1_XCVSEQ_MASK;
767 if (!write_reg_phy_ctrl1(dev, ctrl1)) {
768 LOG_ERR("Failed to reset SEQ manager");
769 }
770
771 xcvseq_wait_until_idle(dev);
772
773 if (!write_burst_irqsts1_ctrl1(dev, dregs)) {
774 LOG_ERR("Failed to write CTRL1");
775 }
776 } else {
777 if (!write_burst_irqsts1_irqsts3(dev, dregs)) {
778 LOG_ERR("Failed to write IRQSTS3");
779 }
780 }
781
782 unmask_irqb:
783 if (!mcr20a_mask_irqb(dev, false)) {
784 LOG_ERR("Failed to unmask IRQ_B");
785 }
786
787 k_mutex_unlock(&mcr20a->phy_mutex);
788
789 if (set_new_seq) {
790 k_sem_give(&mcr20a->seq_sync);
791 }
792 }
793 }
794
irqb_int_handler(const struct device * port,struct gpio_callback * cb,uint32_t pins)795 static inline void irqb_int_handler(const struct device *port,
796 struct gpio_callback *cb, uint32_t pins)
797 {
798 struct mcr20a_context *mcr20a = CONTAINER_OF(cb,
799 struct mcr20a_context,
800 irqb_cb);
801 k_sem_give(&mcr20a->isr_sem);
802 }
803
enable_irqb_interrupt(const struct device * dev,bool enable)804 static void enable_irqb_interrupt(const struct device *dev,
805 bool enable)
806 {
807 const struct mcr20a_config *config = dev->config;
808 gpio_flags_t flags = enable
809 ? GPIO_INT_EDGE_TO_ACTIVE
810 : GPIO_INT_DISABLE;
811
812 gpio_pin_interrupt_configure_dt(&config->irq_gpio, flags);
813 }
814
setup_gpio_callbacks(const struct device * dev)815 static inline void setup_gpio_callbacks(const struct device *dev)
816 {
817 const struct mcr20a_config *config = dev->config;
818 struct mcr20a_context *mcr20a = dev->data;
819
820 gpio_init_callback(&mcr20a->irqb_cb,
821 irqb_int_handler,
822 BIT(config->irq_gpio.pin));
823 gpio_add_callback(config->irq_gpio.port, &mcr20a->irqb_cb);
824 }
825
mcr20a_set_cca_mode(const struct device * dev,uint8_t mode)826 static int mcr20a_set_cca_mode(const struct device *dev, uint8_t mode)
827 {
828 uint8_t ctrl4;
829
830 ctrl4 = read_reg_phy_ctrl4(dev);
831 ctrl4 &= ~MCR20A_PHY_CTRL4_CCATYPE_MASK;
832 ctrl4 |= set_bits_phy_ctrl4_ccatype(mode);
833
834 if (!write_reg_phy_ctrl4(dev, ctrl4)) {
835 LOG_ERR("Failed");
836 return -EIO;
837 }
838
839 return 0;
840 }
841
mcr20a_get_capabilities(const struct device * dev)842 static enum ieee802154_hw_caps mcr20a_get_capabilities(const struct device *dev)
843 {
844 return IEEE802154_HW_FCS |
845 IEEE802154_HW_2_4_GHZ |
846 IEEE802154_HW_TX_RX_ACK |
847 IEEE802154_HW_FILTER;
848 }
849
850 /* Note: CCA before TX is enabled by default */
mcr20a_cca(const struct device * dev)851 static int mcr20a_cca(const struct device *dev)
852 {
853 struct mcr20a_context *mcr20a = dev->data;
854 int retval;
855
856 k_mutex_lock(&mcr20a->phy_mutex, K_FOREVER);
857
858 if (!mcr20a_mask_irqb(dev, true)) {
859 LOG_ERR("Failed to mask IRQ_B");
860 goto error;
861 }
862
863 k_sem_init(&mcr20a->seq_sync, 0, 1);
864
865 if (mcr20a_abort_sequence(dev, false)) {
866 LOG_ERR("Failed to reset XCV sequence");
867 goto error;
868 }
869
870 LOG_DBG("start CCA sequence");
871
872 if (mcr20a_set_sequence(dev, MCR20A_XCVSEQ_CCA)) {
873 LOG_ERR("Failed to reset XCV sequence");
874 goto error;
875 }
876
877 if (!mcr20a_mask_irqb(dev, false)) {
878 LOG_ERR("Failed to unmask IRQ_B");
879 goto error;
880 }
881
882 k_mutex_unlock(&mcr20a->phy_mutex);
883 retval = k_sem_take(&mcr20a->seq_sync,
884 K_MSEC(MCR20A_SEQ_SYNC_TIMEOUT));
885 if (retval) {
886 LOG_ERR("Timeout occurred, %d", retval);
887 return retval;
888 }
889
890 LOG_DBG("done");
891
892 return mcr20a->seq_retval;
893
894 error:
895 k_mutex_unlock(&mcr20a->phy_mutex);
896 return -EIO;
897 }
898
mcr20a_set_channel(const struct device * dev,uint16_t channel)899 static int mcr20a_set_channel(const struct device *dev, uint16_t channel)
900 {
901 struct mcr20a_context *mcr20a = dev->data;
902 uint8_t buf[3];
903 uint8_t ctrl1;
904 int retval = -EIO;
905
906 if (channel < 11 || channel > 26) {
907 LOG_ERR("Unsupported channel %u", channel);
908 return -EINVAL;
909 }
910
911 k_mutex_lock(&mcr20a->phy_mutex, K_FOREVER);
912
913 if (!mcr20a_mask_irqb(dev, true)) {
914 LOG_ERR("Failed to mask IRQ_B");
915 goto out;
916 }
917
918 ctrl1 = read_reg_phy_ctrl1(dev);
919
920 if (mcr20a_abort_sequence(dev, true)) {
921 LOG_ERR("Failed to reset XCV sequence");
922 goto out;
923 }
924
925 LOG_DBG("%u", channel);
926 channel -= 11U;
927 buf[0] = set_bits_pll_int0_val(pll_int_lt[channel]);
928 buf[1] = (uint8_t)pll_frac_lt[channel];
929 buf[2] = (uint8_t)(pll_frac_lt[channel] >> 8);
930
931 if (!write_burst_pll_int0(dev, buf)) {
932 LOG_ERR("Failed to set PLL");
933 goto out;
934 }
935
936 if (mcr20a_set_sequence(dev, ctrl1)) {
937 LOG_ERR("Failed to restore XCV sequence");
938 goto out;
939 }
940
941 retval = 0;
942
943 out:
944 if (!mcr20a_mask_irqb(dev, false)) {
945 LOG_ERR("Failed to unmask IRQ_B");
946 retval = -EIO;
947 }
948
949 k_mutex_unlock(&mcr20a->phy_mutex);
950
951 return retval;
952 }
953
mcr20a_set_pan_id(const struct device * dev,uint16_t pan_id)954 static int mcr20a_set_pan_id(const struct device *dev, uint16_t pan_id)
955 {
956 struct mcr20a_context *mcr20a = dev->data;
957
958 pan_id = sys_le16_to_cpu(pan_id);
959 k_mutex_lock(&mcr20a->phy_mutex, K_FOREVER);
960
961 if (!write_burst_pan_id(dev, (uint8_t *) &pan_id)) {
962 LOG_ERR("Failed");
963 k_mutex_unlock(&mcr20a->phy_mutex);
964 return -EIO;
965 }
966
967 k_mutex_unlock(&mcr20a->phy_mutex);
968 LOG_DBG("0x%x", pan_id);
969
970 return 0;
971 }
972
mcr20a_set_short_addr(const struct device * dev,uint16_t short_addr)973 static int mcr20a_set_short_addr(const struct device *dev,
974 uint16_t short_addr)
975 {
976 struct mcr20a_context *mcr20a = dev->data;
977
978 short_addr = sys_le16_to_cpu(short_addr);
979 k_mutex_lock(&mcr20a->phy_mutex, K_FOREVER);
980
981 if (!write_burst_short_addr(dev, (uint8_t *) &short_addr)) {
982 LOG_ERR("Failed");
983 k_mutex_unlock(&mcr20a->phy_mutex);
984 return -EIO;
985 }
986
987 k_mutex_unlock(&mcr20a->phy_mutex);
988 LOG_DBG("0x%x", short_addr);
989
990 return 0;
991 }
992
mcr20a_set_ieee_addr(const struct device * dev,const uint8_t * ieee_addr)993 static int mcr20a_set_ieee_addr(const struct device *dev,
994 const uint8_t *ieee_addr)
995 {
996 struct mcr20a_context *mcr20a = dev->data;
997
998 k_mutex_lock(&mcr20a->phy_mutex, K_FOREVER);
999
1000 if (!write_burst_ext_addr(dev, (void *)ieee_addr)) {
1001 LOG_ERR("Failed");
1002 k_mutex_unlock(&mcr20a->phy_mutex);
1003 return -EIO;
1004 }
1005
1006 k_mutex_unlock(&mcr20a->phy_mutex);
1007 LOG_DBG("IEEE address %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1008 ieee_addr[7], ieee_addr[6], ieee_addr[5], ieee_addr[4],
1009 ieee_addr[3], ieee_addr[2], ieee_addr[1], ieee_addr[0]);
1010
1011 return 0;
1012 }
1013
mcr20a_filter(const struct device * dev,bool set,enum ieee802154_filter_type type,const struct ieee802154_filter * filter)1014 static int mcr20a_filter(const struct device *dev,
1015 bool set,
1016 enum ieee802154_filter_type type,
1017 const struct ieee802154_filter *filter)
1018 {
1019 LOG_DBG("Applying filter %u", type);
1020
1021 if (!set) {
1022 return -ENOTSUP;
1023 }
1024
1025 if (type == IEEE802154_FILTER_TYPE_IEEE_ADDR) {
1026 return mcr20a_set_ieee_addr(dev, filter->ieee_addr);
1027 } else if (type == IEEE802154_FILTER_TYPE_SHORT_ADDR) {
1028 return mcr20a_set_short_addr(dev, filter->short_addr);
1029 } else if (type == IEEE802154_FILTER_TYPE_PAN_ID) {
1030 return mcr20a_set_pan_id(dev, filter->pan_id);
1031 }
1032
1033 return -ENOTSUP;
1034 }
1035
mcr20a_set_txpower(const struct device * dev,int16_t dbm)1036 static int mcr20a_set_txpower(const struct device *dev, int16_t dbm)
1037 {
1038 struct mcr20a_context *mcr20a = dev->data;
1039 uint8_t pwr;
1040
1041 k_mutex_lock(&mcr20a->phy_mutex, K_FOREVER);
1042 LOG_DBG("%d", dbm);
1043
1044 if ((dbm > MCR20A_OUTPUT_POWER_MAX) ||
1045 (dbm < MCR20A_OUTPUT_POWER_MIN)) {
1046 goto error;
1047 }
1048
1049 pwr = pow_lt[dbm - MCR20A_OUTPUT_POWER_MIN];
1050 if (!write_reg_pa_pwr(dev, set_bits_pa_pwr_val(pwr))) {
1051 goto error;
1052 }
1053
1054 k_mutex_unlock(&mcr20a->phy_mutex);
1055 return 0;
1056
1057 error:
1058 k_mutex_unlock(&mcr20a->phy_mutex);
1059 LOG_DBG("Failed");
1060 return -EIO;
1061 }
1062
write_txfifo_content(const struct device * dev,struct net_pkt * pkt,struct net_buf * frag)1063 static inline bool write_txfifo_content(const struct device *dev,
1064 struct net_pkt *pkt,
1065 struct net_buf *frag)
1066 {
1067 const struct mcr20a_config *config = dev->config;
1068 size_t payload_len = frag->len;
1069 uint8_t cmd_buf[2] = {
1070 MCR20A_BUF_WRITE,
1071 payload_len + MCR20A_FCS_LENGTH
1072 };
1073 const struct spi_buf bufs[2] = {
1074 {
1075 .buf = cmd_buf,
1076 .len = 2
1077 },
1078 {
1079 .buf = frag->data,
1080 .len = payload_len
1081 }
1082 };
1083 const struct spi_buf_set tx = {
1084 .buffers = bufs,
1085 .count = 2
1086 };
1087
1088 if (payload_len > MCR20A_PSDU_LENGTH) {
1089 LOG_ERR("Payload too long");
1090 return 0;
1091 }
1092
1093 return (spi_write_dt(&config->bus, &tx) == 0);
1094 }
1095
mcr20a_tx(const struct device * dev,enum ieee802154_tx_mode mode,struct net_pkt * pkt,struct net_buf * frag)1096 static int mcr20a_tx(const struct device *dev,
1097 enum ieee802154_tx_mode mode,
1098 struct net_pkt *pkt,
1099 struct net_buf *frag)
1100 {
1101 struct mcr20a_context *mcr20a = dev->data;
1102 uint8_t seq = ieee802154_is_ar_flag_set(frag) ? MCR20A_XCVSEQ_TX_RX :
1103 MCR20A_XCVSEQ_TX;
1104 int retval;
1105
1106 if (mode != IEEE802154_TX_MODE_DIRECT) {
1107 NET_ERR("TX mode %d not supported", mode);
1108 return -ENOTSUP;
1109 }
1110
1111 k_mutex_lock(&mcr20a->phy_mutex, K_FOREVER);
1112
1113 LOG_DBG("%p (%u)", frag, frag->len);
1114
1115 if (!mcr20a_mask_irqb(dev, true)) {
1116 LOG_ERR("Failed to mask IRQ_B");
1117 goto error;
1118 }
1119
1120 if (mcr20a_abort_sequence(dev, false)) {
1121 LOG_ERR("Failed to reset XCV sequence");
1122 goto error;
1123 }
1124
1125 if (!write_txfifo_content(dev, pkt, frag)) {
1126 LOG_ERR("Did not write properly into TX FIFO");
1127 goto error;
1128 }
1129
1130 k_sem_init(&mcr20a->seq_sync, 0, 1);
1131
1132 if (mcr20a_set_sequence(dev, seq)) {
1133 LOG_ERR("Cannot start transmission");
1134 goto error;
1135 }
1136
1137 if (!mcr20a_mask_irqb(dev, false)) {
1138 LOG_ERR("Failed to unmask IRQ_B");
1139 goto error;
1140 }
1141
1142 k_mutex_unlock(&mcr20a->phy_mutex);
1143 retval = k_sem_take(&mcr20a->seq_sync,
1144 K_MSEC(MCR20A_SEQ_SYNC_TIMEOUT));
1145 if (retval) {
1146 LOG_ERR("Timeout occurred, %d", retval);
1147 return retval;
1148 }
1149
1150 LOG_DBG("done");
1151
1152 return mcr20a->seq_retval;
1153
1154 error:
1155 k_mutex_unlock(&mcr20a->phy_mutex);
1156 return -EIO;
1157 }
1158
mcr20a_start(const struct device * dev)1159 static int mcr20a_start(const struct device *dev)
1160 {
1161 struct mcr20a_context *mcr20a = dev->data;
1162 uint8_t timeout = 6U;
1163 uint8_t status;
1164
1165 k_mutex_lock(&mcr20a->phy_mutex, K_FOREVER);
1166 enable_irqb_interrupt(dev, false);
1167
1168 if (!write_reg_pwr_modes(dev, MCR20A_PM_AUTODOZE)) {
1169 LOG_ERR("Error starting MCR20A");
1170 goto error;
1171 }
1172
1173 do {
1174 z_usleep(50);
1175 timeout--;
1176 status = read_reg_pwr_modes(dev);
1177 } while (!(status & MCR20A_PWR_MODES_XTAL_READY) && timeout);
1178
1179 if (!(status & MCR20A_PWR_MODES_XTAL_READY)) {
1180 LOG_ERR("Timeout, failed to wake up");
1181 goto error;
1182 }
1183
1184 /* Clear all interrupt flags */
1185 write_reg_irqsts1(dev, MCR20A_IRQSTS1_IRQ_MASK);
1186 write_reg_irqsts2(dev, MCR20A_IRQSTS2_IRQ_MASK);
1187 write_reg_irqsts3(dev, MCR20A_IRQSTS3_IRQ_MASK |
1188 MCR20A_IRQSTS3_TMR_MASK);
1189
1190 if (mcr20a_abort_sequence(dev, true)) {
1191 LOG_ERR("Failed to reset XCV sequence");
1192 goto error;
1193 }
1194
1195 if (mcr20a_set_sequence(dev, MCR20A_XCVSEQ_RECEIVE)) {
1196 LOG_ERR("Failed to set XCV sequence");
1197 goto error;
1198 }
1199
1200 enable_irqb_interrupt(dev, true);
1201
1202 if (!mcr20a_mask_irqb(dev, false)) {
1203 LOG_ERR("Failed to unmask IRQ_B");
1204 goto error;
1205 }
1206
1207 k_mutex_unlock(&mcr20a->phy_mutex);
1208 LOG_DBG("started");
1209
1210 return 0;
1211
1212 error:
1213 k_mutex_unlock(&mcr20a->phy_mutex);
1214 return -EIO;
1215 }
1216
mcr20a_stop(const struct device * dev)1217 static int mcr20a_stop(const struct device *dev)
1218 {
1219 struct mcr20a_context *mcr20a = dev->data;
1220 uint8_t power_mode;
1221
1222 k_mutex_lock(&mcr20a->phy_mutex, K_FOREVER);
1223
1224 if (!mcr20a_mask_irqb(dev, true)) {
1225 LOG_ERR("Failed to mask IRQ_B");
1226 goto error;
1227 }
1228
1229 if (mcr20a_abort_sequence(dev, true)) {
1230 LOG_ERR("Failed to reset XCV sequence");
1231 goto error;
1232 }
1233
1234 enable_irqb_interrupt(dev, false);
1235
1236 if (PART_OF_KW2XD_SIP) {
1237 power_mode = MCR20A_PM_DOZE;
1238 } else {
1239 power_mode = MCR20A_PM_HIBERNATE;
1240 }
1241
1242 if (!write_reg_pwr_modes(dev, power_mode)) {
1243 goto error;
1244 }
1245
1246 LOG_DBG("stopped");
1247 k_mutex_unlock(&mcr20a->phy_mutex);
1248
1249 return 0;
1250
1251 error:
1252 k_mutex_unlock(&mcr20a->phy_mutex);
1253 LOG_ERR("Error stopping MCR20A");
1254 return -EIO;
1255 }
1256
mcr20a_update_overwrites(const struct device * dev)1257 static int mcr20a_update_overwrites(const struct device *dev)
1258 {
1259 if (!write_reg_overwrite_ver(dev, overwrites_direct[0].data)) {
1260 goto error;
1261 }
1262
1263 for (uint8_t i = 0;
1264 i < sizeof(overwrites_indirect) / sizeof(overwrites_t);
1265 i++) {
1266
1267 if (!z_mcr20a_write_reg(dev, false,
1268 overwrites_indirect[i].address,
1269 overwrites_indirect[i].data)) {
1270 goto error;
1271 }
1272 }
1273
1274 return 0;
1275
1276 error:
1277 LOG_ERR("Error update overwrites");
1278 return -EIO;
1279 }
1280
power_on_and_setup(const struct device * dev)1281 static int power_on_and_setup(const struct device *dev)
1282 {
1283 const struct mcr20a_config *config = dev->config;
1284 uint8_t timeout = 6U;
1285 int pin;
1286 uint8_t tmp = 0U;
1287
1288 if (!PART_OF_KW2XD_SIP) {
1289 gpio_pin_set_dt(&config->reset_gpio, 1);
1290 z_usleep(150);
1291 gpio_pin_set_dt(&config->reset_gpio, 0);
1292
1293 do {
1294 z_usleep(50);
1295 timeout--;
1296 pin = gpio_pin_get_dt(&config->irq_gpio);
1297 } while (pin > 0 && timeout);
1298
1299 if (pin) {
1300 LOG_ERR("Timeout, failed to get WAKE IRQ");
1301 return -EIO;
1302 }
1303
1304 }
1305
1306 tmp = MCR20A_CLK_OUT_CONFIG | MCR20A_CLK_OUT_EXTEND;
1307 write_reg_clk_out_ctrl(dev, tmp);
1308
1309 if (read_reg_clk_out_ctrl(dev) != tmp) {
1310 LOG_ERR("Failed to get device up");
1311 return -EIO;
1312 }
1313
1314 /* Clear all interrupt flags */
1315 write_reg_irqsts1(dev, MCR20A_IRQSTS1_IRQ_MASK);
1316 write_reg_irqsts2(dev, MCR20A_IRQSTS2_IRQ_MASK);
1317 write_reg_irqsts3(dev, MCR20A_IRQSTS3_IRQ_MASK |
1318 MCR20A_IRQSTS3_TMR_MASK);
1319
1320 mcr20a_update_overwrites(dev);
1321 mcr20a_timer_init(dev, MCR20A_TIMEBASE_62500HZ);
1322
1323 mcr20a_set_txpower(dev, MCR20A_DEFAULT_TX_POWER);
1324 mcr20a_set_channel(dev, MCR20A_DEFAULT_CHANNEL);
1325 mcr20a_set_cca_mode(dev, 1);
1326 write_reg_rx_wtr_mark(dev, 8);
1327
1328 /* Configure PHY behaviour */
1329 tmp = MCR20A_PHY_CTRL1_CCABFRTX |
1330 MCR20A_PHY_CTRL1_AUTOACK |
1331 MCR20A_PHY_CTRL1_RXACKRQD;
1332 write_reg_phy_ctrl1(dev, tmp);
1333
1334 /* Enable Sequence-end interrupt */
1335 tmp = MCR20A_PHY_CTRL2_SEQMSK;
1336 write_reg_phy_ctrl2(dev, ~tmp);
1337
1338 setup_gpio_callbacks(dev);
1339
1340 return 0;
1341 }
1342
1343
configure_gpios(const struct device * dev)1344 static inline int configure_gpios(const struct device *dev)
1345 {
1346 const struct mcr20a_config *config = dev->config;
1347
1348 /* setup gpio for the modem interrupt */
1349 if (!device_is_ready(config->irq_gpio.port)) {
1350 LOG_ERR("IRQ GPIO device not ready");
1351 return -ENODEV;
1352 }
1353
1354 gpio_pin_configure_dt(&config->irq_gpio, GPIO_INPUT);
1355
1356 if (!PART_OF_KW2XD_SIP) {
1357 /* setup gpio for the modems reset */
1358 if (!device_is_ready(config->reset_gpio.port)) {
1359 LOG_ERR("Reset GPIO device not ready");
1360 return -EINVAL;
1361 }
1362
1363 gpio_pin_configure_dt(&config->reset_gpio, GPIO_OUTPUT_ACTIVE);
1364 }
1365
1366 return 0;
1367 }
1368
mcr20a_init(const struct device * dev)1369 static int mcr20a_init(const struct device *dev)
1370 {
1371 const struct mcr20a_config *config = dev->config;
1372 struct mcr20a_context *mcr20a = dev->data;
1373
1374 k_mutex_init(&mcr20a->phy_mutex);
1375 k_sem_init(&mcr20a->isr_sem, 0, 1);
1376
1377 LOG_DBG("\nInitialize MCR20A Transceiver\n");
1378
1379 if (configure_gpios(dev) != 0) {
1380 LOG_ERR("Configuring GPIOS failed");
1381 return -EIO;
1382 }
1383
1384 if (!spi_is_ready_dt(&config->bus)) {
1385 LOG_ERR("Configuring SPI failed");
1386 return -EIO;
1387 }
1388
1389 LOG_DBG("GPIO and SPI configured");
1390
1391 if (power_on_and_setup(dev) != 0) {
1392 LOG_ERR("Configuring MCR20A failed");
1393 return -EIO;
1394 }
1395
1396 k_thread_create(&mcr20a->mcr20a_rx_thread, mcr20a->mcr20a_rx_stack,
1397 CONFIG_IEEE802154_MCR20A_RX_STACK_SIZE,
1398 (k_thread_entry_t)mcr20a_thread_main,
1399 (void *)dev, NULL, NULL, K_PRIO_COOP(2), 0, K_NO_WAIT);
1400 k_thread_name_set(&mcr20a->mcr20a_rx_thread, "mcr20a_rx");
1401
1402 return 0;
1403 }
1404
mcr20a_iface_init(struct net_if * iface)1405 static void mcr20a_iface_init(struct net_if *iface)
1406 {
1407 const struct device *dev = net_if_get_device(iface);
1408 struct mcr20a_context *mcr20a = dev->data;
1409 uint8_t *mac = get_mac(dev);
1410
1411 net_if_set_link_addr(iface, mac, 8, NET_LINK_IEEE802154);
1412
1413 mcr20a->iface = iface;
1414
1415 ieee802154_init(iface);
1416
1417 LOG_DBG("done");
1418 }
1419
1420 static const struct mcr20a_config mcr20a_config = {
1421 .bus = SPI_DT_SPEC_INST_GET(0, SPI_WORD_SET(8), 0),
1422 .irq_gpio = GPIO_DT_SPEC_INST_GET(0, irqb_gpios),
1423 .reset_gpio = GPIO_DT_SPEC_INST_GET(0, reset_gpios),
1424 };
1425
1426 static struct mcr20a_context mcr20a_context_data;
1427
1428 static struct ieee802154_radio_api mcr20a_radio_api = {
1429 .iface_api.init = mcr20a_iface_init,
1430
1431 .get_capabilities = mcr20a_get_capabilities,
1432 .cca = mcr20a_cca,
1433 .set_channel = mcr20a_set_channel,
1434 .filter = mcr20a_filter,
1435 .set_txpower = mcr20a_set_txpower,
1436 .start = mcr20a_start,
1437 .stop = mcr20a_stop,
1438 .tx = mcr20a_tx,
1439 };
1440
1441 #if defined(CONFIG_IEEE802154_RAW_MODE)
1442 DEVICE_DT_INST_DEFINE(0, mcr20a_init, NULL, &mcr20a_context_data,
1443 &mcr20a_config, POST_KERNEL,
1444 CONFIG_IEEE802154_MCR20A_INIT_PRIO, &mcr20a_radio_api);
1445 #else
1446 NET_DEVICE_DT_INST_DEFINE(0, mcr20a_init, NULL, &mcr20a_context_data,
1447 &mcr20a_config, CONFIG_IEEE802154_MCR20A_INIT_PRIO,
1448 &mcr20a_radio_api, IEEE802154_L2,
1449 NET_L2_GET_CTX_TYPE(IEEE802154_L2),
1450 MCR20A_PSDU_LENGTH);
1451 #endif
1452