1 /* ieee802154_mcr20a.c - NXP MCR20A driver */
2 
3 #define DT_DRV_COMPAT nxp_mcr20a
4 
5 /*
6  * Copyright (c) 2017 PHYTEC Messtechnik GmbH
7  *
8  * SPDX-License-Identifier: Apache-2.0
9  */
10 
11 #define LOG_MODULE_NAME ieee802154_mcr20a
12 #define LOG_LEVEL CONFIG_IEEE802154_DRIVER_LOG_LEVEL
13 
14 #include <zephyr/logging/log.h>
15 LOG_MODULE_REGISTER(LOG_MODULE_NAME);
16 
17 #include <errno.h>
18 
19 #include <zephyr/kernel.h>
20 #include <zephyr/arch/cpu.h>
21 #include <zephyr/debug/stack.h>
22 
23 #include <zephyr/device.h>
24 #include <zephyr/init.h>
25 #include <zephyr/net/net_if.h>
26 #include <zephyr/net/net_pkt.h>
27 
28 #include <zephyr/sys/byteorder.h>
29 #include <string.h>
30 #include <zephyr/random/random.h>
31 #include <zephyr/debug/stack.h>
32 
33 #include <zephyr/drivers/gpio.h>
34 
35 #include <zephyr/net/ieee802154_radio.h>
36 
37 #include "ieee802154_mcr20a.h"
38 #include "MCR20Overwrites.h"
39 
40 /*
41  * max. TX duration = (PR + SFD + FLI + PDU + FCS)
42  *                 + RX_warmup + cca + TX_warmup
43  * TODO: Calculate the value from frame length.
44  * Invalid for the SLOTTED mode.
45  */
46 #define _MAX_PKT_TX_DURATION		(133 + 9 + 8 + 9)
47 
48 #if LOG_LEVEL == LOG_LEVEL_DBG
49 /* Prevent timer overflow during LOG_* output */
50 #define _MACACKWAITDURATION		(864 / 16 + 11625)
51 #define MCR20A_SEQ_SYNC_TIMEOUT		(200)
52 #else
53 #define MCR20A_SEQ_SYNC_TIMEOUT		(20)
54 #define _MACACKWAITDURATION		(864 / 16) /* 864us * 62500Hz */
55 #endif
56 
57 #define MCR20A_FCS_LENGTH		(2)
58 #define MCR20A_PSDU_LENGTH		(125)
59 #define MCR20A_GET_SEQ_STATE_RETRIES	(3)
60 
61 /* Values for the clock output (CLK_OUT) configuration */
62 #ifdef CONFIG_MCR20A_CLK_OUT_DISABLED
63 #define MCR20A_CLK_OUT_CONFIG	(MCR20A_CLK_OUT_HIZ)
64 
65 #elif CONFIG_MCR20A_CLK_OUT_32MHZ
66 #define MCR20A_CLK_OUT_CONFIG	(set_bits_clk_out_div(0) | MCR20A_CLK_OUT_DS |\
67 				 MCR20A_CLK_OUT_EN)
68 
69 #elif CONFIG_MCR20A_CLK_OUT_16MHZ
70 #define MCR20A_CLK_OUT_CONFIG	(set_bits_clk_out_div(1) | MCR20A_CLK_OUT_DS |\
71 				 MCR20A_CLK_OUT_EN)
72 
73 #elif CONFIG_MCR20A_CLK_OUT_8MHZ
74 #define MCR20A_CLK_OUT_CONFIG	(set_bits_clk_out_div(2) | MCR20A_CLK_OUT_EN)
75 
76 #elif CONFIG_MCR20A_CLK_OUT_4MHZ
77 #define MCR20A_CLK_OUT_CONFIG	(set_bits_clk_out_div(3) | MCR20A_CLK_OUT_EN)
78 
79 #elif CONFIG_MCR20A_CLK_OUT_1MHZ
80 #define MCR20A_CLK_OUT_CONFIG	(set_bits_clk_out_div(4) | MCR20A_CLK_OUT_EN)
81 
82 #elif CONFIG_MCR20A_CLK_OUT_250KHZ
83 #define MCR20A_CLK_OUT_CONFIG	(set_bits_clk_out_div(5) | MCR20A_CLK_OUT_EN)
84 
85 #elif CONFIG_MCR20A_CLK_OUT_62500HZ
86 #define MCR20A_CLK_OUT_CONFIG	(set_bits_clk_out_div(6) | MCR20A_CLK_OUT_EN)
87 
88 #elif CONFIG_MCR20A_CLK_OUT_32768HZ
89 #define MCR20A_CLK_OUT_CONFIG	(set_bits_clk_out_div(7) | MCR20A_CLK_OUT_EN)
90 
91 #endif
92 
93 #ifdef CONFIG_MCR20A_IS_PART_OF_KW2XD_SIP
94 #define PART_OF_KW2XD_SIP	1
95 #else
96 #define PART_OF_KW2XD_SIP	0
97 #endif
98 
99 /* Values for the power mode (PM) configuration */
100 #define MCR20A_PM_HIBERNATE	0
101 #define MCR20A_PM_DOZE		MCR20A_PWR_MODES_XTALEN
102 #define MCR20A_PM_IDLE		(MCR20A_PWR_MODES_XTALEN |\
103 				 MCR20A_PWR_MODES_PMC_MODE)
104 #define MCR20A_PM_AUTODOZE	(MCR20A_PWR_MODES_XTALEN |\
105 				 MCR20A_PWR_MODES_AUTODOZE)
106 
107 /* Default settings for the device initialization */
108 #define MCR20A_DEFAULT_TX_POWER	(0)
109 #define MCR20A_DEFAULT_CHANNEL	(26)
110 
111 /* RF TX power max/min values (dBm) */
112 #define MCR20A_OUTPUT_POWER_MAX	(8)
113 #define MCR20A_OUTPUT_POWER_MIN	(-35)
114 
115 /* Lookup table for the Power Control register */
116 static const uint8_t pow_lt[44] = {
117 	3, 4, 5, 6,
118 	6, 7, 7, 8,
119 	8, 9, 9, 10,
120 	11, 11, 12, 13,
121 	13, 14, 14, 15,
122 	16, 16, 17, 18,
123 	18, 19, 20, 20,
124 	21, 21, 22, 23,
125 	23, 24, 25, 25,
126 	26, 27, 27, 28,
127 	28, 29, 30, 31
128 };
129 
130 /* PLL integer and fractional lookup tables
131  *
132  * Fc = 2405 + 5(k - 11) , k = 11,12,...,26
133  *
134  * Equation for PLL frequency, MKW2xD Reference Manual, p.255 :
135  * F = ((PLL_INT0 + 64) + (PLL_FRAC0/65536))32MHz
136  *
137  */
138 static const uint8_t pll_int_lt[16] = {
139 	11, 11, 11, 11,
140 	11, 11, 12, 12,
141 	12, 12, 12, 12,
142 	13, 13, 13, 13
143 };
144 
145 static const uint16_t pll_frac_lt[16] = {
146 	10240, 20480, 30720, 40960,
147 	51200, 61440, 6144, 16384,
148 	26624, 36864, 47104, 57344,
149 	2048, 12288, 22528, 32768
150 };
151 
152 #define z_usleep(usec) k_busy_wait(usec)
153 
154 /* Read direct (dreg is true) or indirect register (dreg is false) */
z_mcr20a_read_reg(const struct device * dev,bool dreg,uint8_t addr)155 uint8_t z_mcr20a_read_reg(const struct device *dev, bool dreg, uint8_t addr)
156 {
157 	const struct mcr20a_config *config = dev->config;
158 	uint8_t cmd_buf[3] = {
159 		dreg ? (MCR20A_REG_READ | addr) :
160 		(MCR20A_IAR_INDEX | MCR20A_REG_WRITE),
161 		dreg ? 0 : (addr | MCR20A_REG_READ),
162 		0
163 	};
164 	uint8_t len = dreg ? 2 : 3;
165 	const struct spi_buf buf = {
166 		.buf = cmd_buf,
167 		.len = len
168 	};
169 	const struct spi_buf_set tx = {
170 		.buffers = &buf,
171 		.count = 1
172 	};
173 	const struct spi_buf_set rx = {
174 		.buffers = &buf,
175 		.count = 1
176 	};
177 
178 	if (spi_transceive_dt(&config->bus, &tx, &rx) == 0) {
179 		return cmd_buf[len - 1];
180 	}
181 
182 	LOG_ERR("Failed");
183 
184 	return 0;
185 }
186 
187 /* Write direct (dreg is true) or indirect register (dreg is false) */
z_mcr20a_write_reg(const struct device * dev,bool dreg,uint8_t addr,uint8_t value)188 bool z_mcr20a_write_reg(const struct device *dev, bool dreg, uint8_t addr,
189 		       uint8_t value)
190 {
191 	const struct mcr20a_config *config = dev->config;
192 	uint8_t cmd_buf[3] = {
193 		dreg ? (MCR20A_REG_WRITE | addr) :
194 		(MCR20A_IAR_INDEX | MCR20A_REG_WRITE),
195 		dreg ? value : (addr | MCR20A_REG_WRITE),
196 		dreg ? 0 : value
197 	};
198 	const struct spi_buf buf = {
199 		.buf = cmd_buf,
200 		.len = dreg ? 2 : 3
201 	};
202 	const struct spi_buf_set tx = {
203 		.buffers = &buf,
204 		.count = 1
205 	};
206 
207 	return (spi_write_dt(&config->bus, &tx) == 0);
208 }
209 
210 /* Write multiple bytes to direct or indirect register */
z_mcr20a_write_burst(const struct device * dev,bool dreg,uint16_t addr,uint8_t * data_buf,uint8_t len)211 bool z_mcr20a_write_burst(const struct device *dev, bool dreg, uint16_t addr,
212 			 uint8_t *data_buf, uint8_t len)
213 {
214 	const struct mcr20a_config *config = dev->config;
215 	uint8_t cmd_buf[2] = {
216 		dreg ? MCR20A_REG_WRITE | addr :
217 		MCR20A_IAR_INDEX | MCR20A_REG_WRITE,
218 		dreg ? 0 : addr | MCR20A_REG_WRITE
219 	};
220 	struct spi_buf bufs[2] = {
221 		{
222 			.buf = cmd_buf,
223 			.len = dreg ? 1 : 2
224 		},
225 		{
226 			.buf = data_buf,
227 			.len = len
228 		}
229 	};
230 	const struct spi_buf_set tx = {
231 		.buffers = bufs,
232 		.count = 2
233 	};
234 
235 	return (spi_write_dt(&config->bus, &tx) == 0);
236 }
237 
238 /* Read multiple bytes from direct or indirect register */
z_mcr20a_read_burst(const struct device * dev,bool dreg,uint16_t addr,uint8_t * data_buf,uint8_t len)239 bool z_mcr20a_read_burst(const struct device *dev, bool dreg, uint16_t addr,
240 			uint8_t *data_buf, uint8_t len)
241 {
242 	const struct mcr20a_config *config = dev->config;
243 	uint8_t cmd_buf[2] = {
244 		dreg ? MCR20A_REG_READ | addr :
245 		MCR20A_IAR_INDEX | MCR20A_REG_WRITE,
246 		dreg ? 0 : addr | MCR20A_REG_READ
247 	};
248 	struct spi_buf bufs[2] = {
249 		{
250 			.buf = cmd_buf,
251 			.len = dreg ? 1 : 2
252 		},
253 		{
254 			.buf = data_buf,
255 			.len = len
256 		}
257 	};
258 	const struct spi_buf_set tx = {
259 		.buffers = bufs,
260 		.count = 1
261 	};
262 	const struct spi_buf_set rx = {
263 		.buffers = bufs,
264 		.count = 2
265 	};
266 
267 	return (spi_transceive_dt(&config->bus, &tx, &rx) == 0);
268 }
269 
270 /* Mask (msk is true) or unmask all interrupts from asserting IRQ_B */
mcr20a_mask_irqb(const struct device * dev,bool msk)271 static bool mcr20a_mask_irqb(const struct device *dev, bool msk)
272 {
273 	uint8_t ctrl4 = read_reg_phy_ctrl4(dev);
274 
275 	if (msk) {
276 		ctrl4 |= MCR20A_PHY_CTRL4_TRCV_MSK;
277 	} else {
278 		ctrl4 &= ~MCR20A_PHY_CTRL4_TRCV_MSK;
279 	}
280 
281 	return write_reg_phy_ctrl4(dev, ctrl4);
282 }
283 
284 /** Set an timeout value for the given compare register */
mcr20a_timer_set(const struct device * dev,uint8_t cmp_reg,uint32_t timeout)285 static int mcr20a_timer_set(const struct device *dev,
286 			    uint8_t cmp_reg,
287 			    uint32_t timeout)
288 {
289 	uint32_t now = 0U;
290 	uint32_t next;
291 	bool retval;
292 
293 	if (!read_burst_event_timer(dev, (uint8_t *)&now)) {
294 		goto error;
295 	}
296 
297 	now = sys_le32_to_cpu(now);
298 	next = now + timeout;
299 	LOG_DBG("now: 0x%x set 0x%x", now, next);
300 	next = sys_cpu_to_le32(next);
301 
302 	switch (cmp_reg) {
303 	case 1:
304 		retval = write_burst_t1cmp(dev, (uint8_t *)&next);
305 		break;
306 	case 2:
307 		retval = write_burst_t2cmp(dev, (uint8_t *)&next);
308 		break;
309 	case 3:
310 		retval = write_burst_t3cmp(dev, (uint8_t *)&next);
311 		break;
312 	case 4:
313 		retval = write_burst_t4cmp(dev, (uint8_t *)&next);
314 		break;
315 	default:
316 		goto error;
317 	}
318 
319 	if (!retval) {
320 		goto error;
321 	}
322 
323 	return 0;
324 
325 error:
326 	LOG_ERR("Failed");
327 	return -EIO;
328 }
329 
mcr20a_timer_init(const struct device * dev,uint8_t tb)330 static int mcr20a_timer_init(const struct device *dev, uint8_t tb)
331 {
332 	uint8_t buf[3] = {0, 0, 0};
333 	uint8_t ctrl4;
334 
335 	if (!write_reg_tmr_prescale(dev,
336 				    set_bits_tmr_prescale(tb))) {
337 		goto error;
338 	}
339 
340 	if (!write_burst_t1cmp(dev, buf)) {
341 		goto error;
342 	}
343 
344 	ctrl4 = read_reg_phy_ctrl4(dev);
345 	ctrl4 |= MCR20A_PHY_CTRL4_TMRLOAD;
346 	if (!write_reg_phy_ctrl4(dev, ctrl4)) {
347 		goto error;
348 	}
349 
350 	LOG_DBG("done, timebase %d", tb);
351 	return 0;
352 
353 error:
354 	LOG_ERR("Failed");
355 	return -EIO;
356 }
357 
358 /* Set Timer Comparator 4 */
mcr20a_t4cmp_set(const struct device * dev,uint32_t timeout)359 static int mcr20a_t4cmp_set(const struct device *dev,
360 			    uint32_t timeout)
361 {
362 	uint8_t irqsts3;
363 	uint8_t ctrl3;
364 
365 	if (mcr20a_timer_set(dev, 4, timeout)) {
366 		goto error;
367 	}
368 
369 	/* enable and clear irq for the timer 4 */
370 	irqsts3 = read_reg_irqsts3(dev);
371 	irqsts3 &= ~MCR20A_IRQSTS3_TMR4MSK;
372 	irqsts3 |= MCR20A_IRQSTS3_TMR4IRQ;
373 	if (!write_reg_irqsts3(dev, irqsts3)) {
374 		goto error;
375 	}
376 
377 	ctrl3 = read_reg_phy_ctrl3(dev);
378 	ctrl3 |= MCR20A_PHY_CTRL3_TMR4CMP_EN;
379 	if (!write_reg_phy_ctrl3(dev, ctrl3)) {
380 		goto error;
381 	}
382 
383 	return 0;
384 
385 error:
386 	LOG_DBG("Failed");
387 	return -EIO;
388 }
389 
390 /* Clear Timer Comparator 4 */
mcr20a_t4cmp_clear(const struct device * dev)391 static int mcr20a_t4cmp_clear(const struct device *dev)
392 {
393 	uint8_t irqsts3;
394 	uint8_t ctrl3;
395 
396 	ctrl3 = read_reg_phy_ctrl3(dev);
397 	ctrl3 &= ~MCR20A_PHY_CTRL3_TMR4CMP_EN;
398 	if (!write_reg_phy_ctrl3(dev, ctrl3)) {
399 		goto error;
400 	}
401 
402 	irqsts3 = read_reg_irqsts3(dev);
403 	irqsts3 |= MCR20A_IRQSTS3_TMR4IRQ;
404 	if (!write_reg_irqsts3(dev, irqsts3)) {
405 		goto error;
406 	}
407 
408 	return 0;
409 
410 error:
411 	LOG_DBG("Failed");
412 	return -EIO;
413 }
414 
xcvseq_wait_until_idle(const struct device * dev)415 static inline void xcvseq_wait_until_idle(const struct device *dev)
416 {
417 	uint8_t state;
418 	uint8_t retries = MCR20A_GET_SEQ_STATE_RETRIES;
419 
420 	do {
421 		state = read_reg_seq_state(dev);
422 		retries--;
423 	} while ((state & MCR20A_SEQ_STATE_MASK) && retries);
424 
425 	if (state & MCR20A_SEQ_STATE_MASK) {
426 		LOG_ERR("Timeout");
427 	}
428 }
429 
mcr20a_abort_sequence(const struct device * dev,bool force)430 static inline int mcr20a_abort_sequence(const struct device *dev,
431 					bool force)
432 {
433 	uint8_t ctrl1;
434 
435 	ctrl1 = read_reg_phy_ctrl1(dev);
436 	LOG_DBG("CTRL1 0x%02x", ctrl1);
437 
438 	if (((ctrl1 & MCR20A_PHY_CTRL1_XCVSEQ_MASK) == MCR20A_XCVSEQ_TX) ||
439 	    ((ctrl1 & MCR20A_PHY_CTRL1_XCVSEQ_MASK) == MCR20A_XCVSEQ_TX_RX)) {
440 		if (!force) {
441 			return -1;
442 		}
443 	}
444 
445 	/* Abort ongoing sequence */
446 	ctrl1 &= ~MCR20A_PHY_CTRL1_XCVSEQ_MASK;
447 	if (!write_reg_phy_ctrl1(dev, ctrl1)) {
448 		return -1;
449 	}
450 
451 	xcvseq_wait_until_idle(dev);
452 
453 	/* Clear relevant interrupt flags */
454 	if (!write_reg_irqsts1(dev, MCR20A_IRQSTS1_IRQ_MASK)) {
455 		return -1;
456 	}
457 
458 	return 0;
459 }
460 
461 /* Initiate a (new) Transceiver Sequence */
mcr20a_set_sequence(const struct device * dev,uint8_t seq)462 static inline int mcr20a_set_sequence(const struct device *dev,
463 				      uint8_t seq)
464 {
465 	uint8_t ctrl1 = 0U;
466 
467 	seq = set_bits_phy_ctrl1_xcvseq(seq);
468 	ctrl1 = read_reg_phy_ctrl1(dev);
469 	ctrl1 &= ~MCR20A_PHY_CTRL1_XCVSEQ_MASK;
470 
471 	if ((seq == MCR20A_XCVSEQ_TX_RX) &&
472 	    (ctrl1 & MCR20A_PHY_CTRL1_RXACKRQD)) {
473 		/* RXACKRQD enabled, timer should be set. */
474 		mcr20a_t4cmp_set(dev, _MACACKWAITDURATION +
475 				 _MAX_PKT_TX_DURATION);
476 	}
477 
478 	ctrl1 |= seq;
479 	if (!write_reg_phy_ctrl1(dev, ctrl1)) {
480 		return -EIO;
481 	}
482 
483 	return 0;
484 }
485 
486 #define DIV_ROUND_CLOSEST_WITH_OPPOSITE_SIGNS(n, d) (((n) - (d)/2)/(d))
487 
mcr20a_get_rssi(uint8_t lqi)488 static inline int16_t mcr20a_get_rssi(uint8_t lqi)
489 {
490 	/* Calculate the RSSI (Received Signal Strength Indicator)
491 	 * in dBm from the LQI (Link Quality Indicator) value.
492 	 *
493 	 * There are two different equations for the RF value (which
494 	 * we use as the RSSI value) in the reference manuals:
495 	 *
496 	 * RF = (LQI – 286.6) / 2.69333 (MKW2xD Reference Manual)
497 	 * RF = (LQI – 295.4) / 2.84 (MCR20A Reference Manual)
498 	 *
499 	 * The second is derived from empiric values (see Figure 3-10)
500 	 * so we use that one.
501 	 *
502 	 * Since we want to avoid floating point computation and
503 	 * the result needs to be rounded to a signed integer value
504 	 * anyways, we take the numerator and denominator times 100
505 	 * each and round the end result of the division:
506 	 * RF = (LQI – 295.4) / 2.84
507 	 *    = (100 * (LQI – 295.4)) / (100 * 2.84)
508 	 *    = (100 * LQI – 29540) / 284
509 	 */
510 	int16_t numerator = ((int16_t)100 * lqi) - 29540; /* always negative */
511 
512 	return DIV_ROUND_CLOSEST_WITH_OPPOSITE_SIGNS(numerator, 284);
513 }
514 
get_mac(const struct device * dev)515 static inline uint8_t *get_mac(const struct device *dev)
516 {
517 	struct mcr20a_context *mcr20a = dev->data;
518 	uint32_t *ptr = (uint32_t *)(mcr20a->mac_addr);
519 
520 	UNALIGNED_PUT(sys_rand32_get(), ptr);
521 	ptr = (uint32_t *)(mcr20a->mac_addr + 4);
522 	UNALIGNED_PUT(sys_rand32_get(), ptr);
523 
524 	mcr20a->mac_addr[0] = (mcr20a->mac_addr[0] & ~0x01) | 0x02;
525 
526 	return mcr20a->mac_addr;
527 }
528 
read_rxfifo_content(const struct device * dev,struct net_buf * buf,uint8_t len)529 static inline bool read_rxfifo_content(const struct device *dev,
530 				       struct net_buf *buf, uint8_t len)
531 {
532 	const struct mcr20a_config *config = dev->config;
533 	uint8_t cmd = MCR20A_BUF_READ;
534 	struct spi_buf bufs[2] = {
535 		{
536 			.buf = &cmd,
537 			.len = 1
538 		},
539 		{
540 			.buf = buf->data,
541 			.len = len
542 		}
543 	};
544 	const struct spi_buf_set tx = {
545 		.buffers = bufs,
546 		.count = 1
547 	};
548 	const struct spi_buf_set rx = {
549 		.buffers = bufs,
550 		.count = 2
551 	};
552 
553 	if (spi_transceive_dt(&config->bus, &tx, &rx) != 0) {
554 		return false;
555 	}
556 
557 	net_buf_add(buf, len);
558 
559 	return true;
560 }
561 
mcr20a_rx(const struct device * dev,uint8_t len)562 static inline void mcr20a_rx(const struct device *dev, uint8_t len)
563 {
564 	struct mcr20a_context *mcr20a = dev->data;
565 	struct net_pkt *pkt = NULL;
566 	uint8_t pkt_len;
567 	uint16_t rssi;
568 	uint8_t lqi;
569 
570 	pkt_len = len - MCR20A_FCS_LENGTH;
571 
572 	pkt = net_pkt_rx_alloc_with_buffer(mcr20a->iface, pkt_len,
573 					   AF_UNSPEC, 0, K_NO_WAIT);
574 	if (!pkt) {
575 		LOG_ERR("No buf available");
576 		goto out;
577 	}
578 
579 	if (!read_rxfifo_content(dev, pkt->buffer, pkt_len)) {
580 		LOG_ERR("No content read");
581 		goto out;
582 	}
583 
584 	/* TODO: ieee802154_handle_ack() expects an ACK package. */
585 	if (ieee802154_handle_ack(mcr20a->iface, pkt) == NET_OK) {
586 		LOG_DBG("ACK packet handled");
587 		goto out;
588 	}
589 
590 	lqi = read_reg_lqi_value(dev);
591 	net_pkt_set_ieee802154_lqi(pkt, lqi);
592 
593 	rssi = mcr20a_get_rssi(lqi);
594 	net_pkt_set_ieee802154_rssi_dbm(pkt, rssi);
595 
596 	LOG_DBG("Caught a packet (%u) (LQI: %u, RSSI: %d)", pkt_len, lqi, rssi);
597 
598 	if (net_recv_data(mcr20a->iface, pkt) < 0) {
599 		LOG_DBG("Packet dropped by NET stack");
600 		goto out;
601 	}
602 
603 	log_stack_usage(&mcr20a->mcr20a_rx_thread);
604 	return;
605 out:
606 	if (pkt) {
607 		net_pkt_unref(pkt);
608 	}
609 }
610 
611 /*
612  * The function checks how the XCV sequence has been completed
613  * and sets the variable seq_retval accordingly. It returns true
614  * if a new sequence is to be set. This function is only to be called
615  * when a sequence has been completed.
616  */
irqsts1_event(const struct device * dev,uint8_t * dregs)617 static inline bool irqsts1_event(const struct device *dev,
618 				  uint8_t *dregs)
619 {
620 	struct mcr20a_context *mcr20a = dev->data;
621 	uint8_t seq = dregs[MCR20A_PHY_CTRL1] & MCR20A_PHY_CTRL1_XCVSEQ_MASK;
622 	uint8_t new_seq = MCR20A_XCVSEQ_RECEIVE;
623 	bool retval = false;
624 
625 	switch (seq) {
626 	case MCR20A_XCVSEQ_RECEIVE:
627 		if ((dregs[MCR20A_IRQSTS1] & MCR20A_IRQSTS1_RXIRQ)) {
628 			if ((dregs[MCR20A_IRQSTS1] & MCR20A_IRQSTS1_TXIRQ)) {
629 				LOG_DBG("Finished RxSeq + TxAck");
630 			} else {
631 				LOG_DBG("Finished RxSeq");
632 			}
633 
634 			mcr20a_rx(dev, dregs[MCR20A_RX_FRM_LEN]);
635 			retval = true;
636 		}
637 		break;
638 	case MCR20A_XCVSEQ_TX:
639 	case MCR20A_XCVSEQ_TX_RX:
640 		if (dregs[MCR20A_IRQSTS1] & MCR20A_IRQSTS1_CCAIRQ) {
641 			if (dregs[MCR20A_IRQSTS2] & MCR20A_IRQSTS2_CCA) {
642 				LOG_DBG("Finished CCA, CH busy");
643 				atomic_set(&mcr20a->seq_retval, -EBUSY);
644 				retval = true;
645 				break;
646 			}
647 		}
648 
649 		if (dregs[MCR20A_IRQSTS1] & MCR20A_IRQSTS1_TXIRQ) {
650 			atomic_set(&mcr20a->seq_retval, 0);
651 
652 			if ((dregs[MCR20A_IRQSTS1] & MCR20A_IRQSTS1_RXIRQ)) {
653 				LOG_DBG("Finished TxSeq + RxAck");
654 				/* Got Ack, timer should be disabled. */
655 				mcr20a_t4cmp_clear(dev);
656 			} else {
657 				LOG_DBG("Finished TxSeq");
658 			}
659 
660 			retval = true;
661 		}
662 		break;
663 	case MCR20A_XCVSEQ_CONTINUOUS_CCA:
664 	case MCR20A_XCVSEQ_CCA:
665 		if ((dregs[MCR20A_IRQSTS1] & MCR20A_IRQSTS1_CCAIRQ)) {
666 
667 			/* If CCCA, then timer should be disabled. */
668 			/* mcr20a_t4cmp_clear(dev); */
669 
670 			if (dregs[MCR20A_IRQSTS2] & MCR20A_IRQSTS2_CCA) {
671 				LOG_DBG("Finished CCA, CH busy");
672 				atomic_set(&mcr20a->seq_retval, -EBUSY);
673 			} else {
674 				/**
675 				 * Assume that after the CCA,
676 				 * a transmit sequence follows and
677 				 * set here the sequence manager to Idle.
678 				 */
679 				LOG_DBG("Finished CCA, CH idle");
680 				new_seq = MCR20A_XCVSEQ_IDLE;
681 				atomic_set(&mcr20a->seq_retval, 0);
682 			}
683 
684 			retval = true;
685 		}
686 		break;
687 	case MCR20A_XCVSEQ_IDLE:
688 	default:
689 		LOG_ERR("SEQ triggered, but XCVSEQ is in the Idle state");
690 		LOG_ERR("IRQSTS: 0x%02x", dregs[MCR20A_IRQSTS1]);
691 		break;
692 	}
693 
694 	dregs[MCR20A_PHY_CTRL1] &= ~MCR20A_PHY_CTRL1_XCVSEQ_MASK;
695 	dregs[MCR20A_PHY_CTRL1] |= new_seq;
696 
697 	return retval;
698 }
699 
700 /*
701  * Check the Timer Comparator IRQ register IRQSTS3.
702  * Currently we use only T4CMP to cancel the running sequence,
703  * usually the TR.
704  */
irqsts3_event(const struct device * dev,uint8_t * dregs)705 static inline bool irqsts3_event(const struct device *dev,
706 				 uint8_t *dregs)
707 {
708 	struct mcr20a_context *mcr20a = dev->data;
709 	bool retval = false;
710 
711 	if (dregs[MCR20A_IRQSTS3] & MCR20A_IRQSTS3_TMR4IRQ) {
712 		LOG_DBG("Sequence timeout, IRQSTSs 0x%02x 0x%02x 0x%02x",
713 			    dregs[MCR20A_IRQSTS1],
714 			    dregs[MCR20A_IRQSTS2],
715 			    dregs[MCR20A_IRQSTS3]);
716 
717 		atomic_set(&mcr20a->seq_retval, -EBUSY);
718 		mcr20a_t4cmp_clear(dev);
719 		dregs[MCR20A_PHY_CTRL1] &= ~MCR20A_PHY_CTRL1_XCVSEQ_MASK;
720 		dregs[MCR20A_PHY_CTRL1] |= MCR20A_XCVSEQ_RECEIVE;
721 
722 		/* Clear all interrupts */
723 		dregs[MCR20A_IRQSTS1] = MCR20A_IRQSTS1_IRQ_MASK;
724 		retval = true;
725 	} else {
726 		LOG_ERR("IRQSTS3 contains untreated IRQs: 0x%02x",
727 			    dregs[MCR20A_IRQSTS3]);
728 	}
729 
730 	return retval;
731 }
732 
mcr20a_thread_main(void * arg)733 static void mcr20a_thread_main(void *arg)
734 {
735 	const struct device *dev = arg;
736 	struct mcr20a_context *mcr20a = dev->data;
737 	uint8_t dregs[MCR20A_PHY_CTRL4 + 1];
738 	bool set_new_seq;
739 	uint8_t ctrl1 = 0U;
740 
741 	while (true) {
742 		k_sem_take(&mcr20a->isr_sem, K_FOREVER);
743 
744 		k_mutex_lock(&mcr20a->phy_mutex, K_FOREVER);
745 		set_new_seq = false;
746 
747 		if (!mcr20a_mask_irqb(dev, true)) {
748 			LOG_ERR("Failed to mask IRQ_B");
749 			goto unmask_irqb;
750 		}
751 
752 		/* Read the register from IRQSTS1 until CTRL4 */
753 		if (!read_burst_irqsts1_ctrl4(dev, dregs)) {
754 			LOG_ERR("Failed to read register");
755 			goto unmask_irqb;
756 		}
757 		/* make backup from PHY_CTRL1 register */
758 		ctrl1 = dregs[MCR20A_PHY_CTRL1];
759 
760 		if (dregs[MCR20A_IRQSTS3] & MCR20A_IRQSTS3_IRQ_MASK) {
761 			set_new_seq = irqsts3_event(dev, dregs);
762 		} else if (dregs[MCR20A_IRQSTS1] & MCR20A_IRQSTS1_SEQIRQ) {
763 			set_new_seq = irqsts1_event(dev, dregs);
764 		}
765 
766 		if (dregs[MCR20A_IRQSTS2] & MCR20A_IRQSTS2_IRQ_MASK) {
767 			LOG_ERR("IRQSTS2 contains untreated IRQs: 0x%02x",
768 				    dregs[MCR20A_IRQSTS2]);
769 		}
770 
771 		LOG_DBG("WB: 0x%02x | 0x%02x | 0x%02x",
772 			     dregs[MCR20A_IRQSTS1],
773 			     dregs[MCR20A_IRQSTS2],
774 			     dregs[MCR20A_IRQSTS3]);
775 
776 		/* Write back register, clear IRQs and set new sequence */
777 		if (set_new_seq) {
778 			/* Reset sequence manager */
779 			ctrl1 &= ~MCR20A_PHY_CTRL1_XCVSEQ_MASK;
780 			if (!write_reg_phy_ctrl1(dev, ctrl1)) {
781 				LOG_ERR("Failed to reset SEQ manager");
782 			}
783 
784 			xcvseq_wait_until_idle(dev);
785 
786 			if (!write_burst_irqsts1_ctrl1(dev, dregs)) {
787 				LOG_ERR("Failed to write CTRL1");
788 			}
789 		} else {
790 			if (!write_burst_irqsts1_irqsts3(dev, dregs)) {
791 				LOG_ERR("Failed to write IRQSTS3");
792 			}
793 		}
794 
795 unmask_irqb:
796 		if (!mcr20a_mask_irqb(dev, false)) {
797 			LOG_ERR("Failed to unmask IRQ_B");
798 		}
799 
800 		k_mutex_unlock(&mcr20a->phy_mutex);
801 
802 		if (set_new_seq) {
803 			k_sem_give(&mcr20a->seq_sync);
804 		}
805 	}
806 }
807 
irqb_int_handler(const struct device * port,struct gpio_callback * cb,uint32_t pins)808 static inline void irqb_int_handler(const struct device *port,
809 				    struct gpio_callback *cb, uint32_t pins)
810 {
811 	struct mcr20a_context *mcr20a = CONTAINER_OF(cb,
812 						     struct mcr20a_context,
813 						     irqb_cb);
814 	k_sem_give(&mcr20a->isr_sem);
815 }
816 
enable_irqb_interrupt(const struct device * dev,bool enable)817 static void enable_irqb_interrupt(const struct device *dev,
818 				 bool enable)
819 {
820 	const struct mcr20a_config *config = dev->config;
821 	gpio_flags_t flags = enable
822 		? GPIO_INT_EDGE_TO_ACTIVE
823 		: GPIO_INT_DISABLE;
824 
825 	gpio_pin_interrupt_configure_dt(&config->irq_gpio, flags);
826 }
827 
setup_gpio_callbacks(const struct device * dev)828 static inline void setup_gpio_callbacks(const struct device *dev)
829 {
830 	const struct mcr20a_config *config = dev->config;
831 	struct mcr20a_context *mcr20a = dev->data;
832 
833 	gpio_init_callback(&mcr20a->irqb_cb,
834 			   irqb_int_handler,
835 			   BIT(config->irq_gpio.pin));
836 	gpio_add_callback(config->irq_gpio.port, &mcr20a->irqb_cb);
837 }
838 
mcr20a_set_cca_mode(const struct device * dev,uint8_t mode)839 static int mcr20a_set_cca_mode(const struct device *dev, uint8_t mode)
840 {
841 	uint8_t ctrl4;
842 
843 	ctrl4 = read_reg_phy_ctrl4(dev);
844 	ctrl4 &= ~MCR20A_PHY_CTRL4_CCATYPE_MASK;
845 	ctrl4 |= set_bits_phy_ctrl4_ccatype(mode);
846 
847 	if (!write_reg_phy_ctrl4(dev, ctrl4)) {
848 		LOG_ERR("Failed");
849 		return -EIO;
850 	}
851 
852 	return 0;
853 }
854 
mcr20a_get_capabilities(const struct device * dev)855 static enum ieee802154_hw_caps mcr20a_get_capabilities(const struct device *dev)
856 {
857 	return IEEE802154_HW_FCS | IEEE802154_HW_TX_RX_ACK |
858 	       IEEE802154_HW_RX_TX_ACK | IEEE802154_HW_FILTER;
859 }
860 
861 /* Note: CCA before TX is enabled by default */
mcr20a_cca(const struct device * dev)862 static int mcr20a_cca(const struct device *dev)
863 {
864 	struct mcr20a_context *mcr20a = dev->data;
865 	int retval;
866 
867 	k_mutex_lock(&mcr20a->phy_mutex, K_FOREVER);
868 
869 	if (!mcr20a_mask_irqb(dev, true)) {
870 		LOG_ERR("Failed to mask IRQ_B");
871 		goto error;
872 	}
873 
874 	k_sem_init(&mcr20a->seq_sync, 0, 1);
875 
876 	if (mcr20a_abort_sequence(dev, false)) {
877 		LOG_ERR("Failed to reset XCV sequence");
878 		goto error;
879 	}
880 
881 	LOG_DBG("start CCA sequence");
882 
883 	if (mcr20a_set_sequence(dev, MCR20A_XCVSEQ_CCA)) {
884 		LOG_ERR("Failed to reset XCV sequence");
885 		goto error;
886 	}
887 
888 	if (!mcr20a_mask_irqb(dev, false)) {
889 		LOG_ERR("Failed to unmask IRQ_B");
890 		goto error;
891 	}
892 
893 	k_mutex_unlock(&mcr20a->phy_mutex);
894 	retval = k_sem_take(&mcr20a->seq_sync,
895 			    K_MSEC(MCR20A_SEQ_SYNC_TIMEOUT));
896 	if (retval) {
897 		LOG_ERR("Timeout occurred, %d", retval);
898 		return retval;
899 	}
900 
901 	LOG_DBG("done");
902 
903 	return mcr20a->seq_retval;
904 
905 error:
906 	k_mutex_unlock(&mcr20a->phy_mutex);
907 	return -EIO;
908 }
909 
mcr20a_set_channel(const struct device * dev,uint16_t channel)910 static int mcr20a_set_channel(const struct device *dev, uint16_t channel)
911 {
912 	struct mcr20a_context *mcr20a = dev->data;
913 	uint8_t buf[3];
914 	uint8_t ctrl1;
915 	int retval = -EIO;
916 
917 	if (channel < 11 || channel > 26) {
918 		LOG_ERR("Unsupported channel %u", channel);
919 		return channel < 11 ? -ENOTSUP : -EINVAL;
920 	}
921 
922 	k_mutex_lock(&mcr20a->phy_mutex, K_FOREVER);
923 
924 	if (!mcr20a_mask_irqb(dev, true)) {
925 		LOG_ERR("Failed to mask IRQ_B");
926 		goto out;
927 	}
928 
929 	ctrl1 = read_reg_phy_ctrl1(dev);
930 
931 	if (mcr20a_abort_sequence(dev, true)) {
932 		LOG_ERR("Failed to reset XCV sequence");
933 		goto out;
934 	}
935 
936 	LOG_DBG("%u", channel);
937 	channel -= 11U;
938 	buf[0] = set_bits_pll_int0_val(pll_int_lt[channel]);
939 	buf[1] = (uint8_t)pll_frac_lt[channel];
940 	buf[2] = (uint8_t)(pll_frac_lt[channel] >> 8);
941 
942 	if (!write_burst_pll_int0(dev, buf)) {
943 		LOG_ERR("Failed to set PLL");
944 		goto out;
945 	}
946 
947 	if (mcr20a_set_sequence(dev, ctrl1)) {
948 		LOG_ERR("Failed to restore XCV sequence");
949 		goto out;
950 	}
951 
952 	retval = 0;
953 
954 out:
955 	if (!mcr20a_mask_irqb(dev, false)) {
956 		LOG_ERR("Failed to unmask IRQ_B");
957 		retval = -EIO;
958 	}
959 
960 	k_mutex_unlock(&mcr20a->phy_mutex);
961 
962 	return retval;
963 }
964 
mcr20a_set_pan_id(const struct device * dev,uint16_t pan_id)965 static int mcr20a_set_pan_id(const struct device *dev, uint16_t pan_id)
966 {
967 	struct mcr20a_context *mcr20a = dev->data;
968 
969 	pan_id = sys_le16_to_cpu(pan_id);
970 	k_mutex_lock(&mcr20a->phy_mutex, K_FOREVER);
971 
972 	if (!write_burst_pan_id(dev, (uint8_t *) &pan_id)) {
973 		LOG_ERR("Failed");
974 		k_mutex_unlock(&mcr20a->phy_mutex);
975 		return -EIO;
976 	}
977 
978 	k_mutex_unlock(&mcr20a->phy_mutex);
979 	LOG_DBG("0x%x", pan_id);
980 
981 	return 0;
982 }
983 
mcr20a_set_short_addr(const struct device * dev,uint16_t short_addr)984 static int mcr20a_set_short_addr(const struct device *dev,
985 				 uint16_t short_addr)
986 {
987 	struct mcr20a_context *mcr20a = dev->data;
988 
989 	short_addr = sys_le16_to_cpu(short_addr);
990 	k_mutex_lock(&mcr20a->phy_mutex, K_FOREVER);
991 
992 	if (!write_burst_short_addr(dev, (uint8_t *) &short_addr)) {
993 		LOG_ERR("Failed");
994 		k_mutex_unlock(&mcr20a->phy_mutex);
995 		return -EIO;
996 	}
997 
998 	k_mutex_unlock(&mcr20a->phy_mutex);
999 	LOG_DBG("0x%x", short_addr);
1000 
1001 	return 0;
1002 }
1003 
mcr20a_set_ieee_addr(const struct device * dev,const uint8_t * ieee_addr)1004 static int mcr20a_set_ieee_addr(const struct device *dev,
1005 				const uint8_t *ieee_addr)
1006 {
1007 	struct mcr20a_context *mcr20a = dev->data;
1008 
1009 	k_mutex_lock(&mcr20a->phy_mutex, K_FOREVER);
1010 
1011 	if (!write_burst_ext_addr(dev, (void *)ieee_addr)) {
1012 		LOG_ERR("Failed");
1013 		k_mutex_unlock(&mcr20a->phy_mutex);
1014 		return -EIO;
1015 	}
1016 
1017 	k_mutex_unlock(&mcr20a->phy_mutex);
1018 	LOG_DBG("IEEE address %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1019 		    ieee_addr[7], ieee_addr[6], ieee_addr[5], ieee_addr[4],
1020 		    ieee_addr[3], ieee_addr[2], ieee_addr[1], ieee_addr[0]);
1021 
1022 	return 0;
1023 }
1024 
mcr20a_filter(const struct device * dev,bool set,enum ieee802154_filter_type type,const struct ieee802154_filter * filter)1025 static int mcr20a_filter(const struct device *dev,
1026 			 bool set,
1027 			 enum ieee802154_filter_type type,
1028 			 const struct ieee802154_filter *filter)
1029 {
1030 	LOG_DBG("Applying filter %u", type);
1031 
1032 	if (!set) {
1033 		return -ENOTSUP;
1034 	}
1035 
1036 	if (type == IEEE802154_FILTER_TYPE_IEEE_ADDR) {
1037 		return mcr20a_set_ieee_addr(dev, filter->ieee_addr);
1038 	} else if (type == IEEE802154_FILTER_TYPE_SHORT_ADDR) {
1039 		return mcr20a_set_short_addr(dev, filter->short_addr);
1040 	} else if (type == IEEE802154_FILTER_TYPE_PAN_ID) {
1041 		return mcr20a_set_pan_id(dev, filter->pan_id);
1042 	}
1043 
1044 	return -ENOTSUP;
1045 }
1046 
mcr20a_set_txpower(const struct device * dev,int16_t dbm)1047 static int mcr20a_set_txpower(const struct device *dev, int16_t dbm)
1048 {
1049 	struct mcr20a_context *mcr20a = dev->data;
1050 	uint8_t pwr;
1051 
1052 	k_mutex_lock(&mcr20a->phy_mutex, K_FOREVER);
1053 	LOG_DBG("%d", dbm);
1054 
1055 	if ((dbm > MCR20A_OUTPUT_POWER_MAX) ||
1056 	    (dbm < MCR20A_OUTPUT_POWER_MIN)) {
1057 		goto error;
1058 	}
1059 
1060 	pwr = pow_lt[dbm - MCR20A_OUTPUT_POWER_MIN];
1061 	if (!write_reg_pa_pwr(dev, set_bits_pa_pwr_val(pwr))) {
1062 		goto error;
1063 	}
1064 
1065 	k_mutex_unlock(&mcr20a->phy_mutex);
1066 	return 0;
1067 
1068 error:
1069 	k_mutex_unlock(&mcr20a->phy_mutex);
1070 	LOG_DBG("Failed");
1071 	return -EIO;
1072 }
1073 
write_txfifo_content(const struct device * dev,struct net_pkt * pkt,struct net_buf * frag)1074 static inline bool write_txfifo_content(const struct device *dev,
1075 					struct net_pkt *pkt,
1076 					struct net_buf *frag)
1077 {
1078 	const struct mcr20a_config *config = dev->config;
1079 	size_t payload_len = frag->len;
1080 	uint8_t cmd_buf[2] = {
1081 		MCR20A_BUF_WRITE,
1082 		payload_len + MCR20A_FCS_LENGTH
1083 	};
1084 	const struct spi_buf bufs[2] = {
1085 		{
1086 			.buf = cmd_buf,
1087 			.len = 2
1088 		},
1089 		{
1090 			.buf = frag->data,
1091 			.len = payload_len
1092 		}
1093 	};
1094 	const struct spi_buf_set tx = {
1095 		.buffers = bufs,
1096 		.count = 2
1097 	};
1098 
1099 	if (payload_len > MCR20A_PSDU_LENGTH) {
1100 		LOG_ERR("Payload too long");
1101 		return 0;
1102 	}
1103 
1104 	return (spi_write_dt(&config->bus, &tx) == 0);
1105 }
1106 
mcr20a_tx(const struct device * dev,enum ieee802154_tx_mode mode,struct net_pkt * pkt,struct net_buf * frag)1107 static int mcr20a_tx(const struct device *dev,
1108 		     enum ieee802154_tx_mode mode,
1109 		     struct net_pkt *pkt,
1110 		     struct net_buf *frag)
1111 {
1112 	struct mcr20a_context *mcr20a = dev->data;
1113 	uint8_t seq = ieee802154_is_ar_flag_set(frag) ? MCR20A_XCVSEQ_TX_RX :
1114 						     MCR20A_XCVSEQ_TX;
1115 	int retval;
1116 
1117 	if (mode != IEEE802154_TX_MODE_DIRECT) {
1118 		NET_ERR("TX mode %d not supported", mode);
1119 		return -ENOTSUP;
1120 	}
1121 
1122 	k_mutex_lock(&mcr20a->phy_mutex, K_FOREVER);
1123 
1124 	LOG_DBG("%p (%u)", frag, frag->len);
1125 
1126 	if (!mcr20a_mask_irqb(dev, true)) {
1127 		LOG_ERR("Failed to mask IRQ_B");
1128 		goto error;
1129 	}
1130 
1131 	if (mcr20a_abort_sequence(dev, false)) {
1132 		LOG_ERR("Failed to reset XCV sequence");
1133 		goto error;
1134 	}
1135 
1136 	if (!write_txfifo_content(dev, pkt, frag)) {
1137 		LOG_ERR("Did not write properly into TX FIFO");
1138 		goto error;
1139 	}
1140 
1141 	k_sem_init(&mcr20a->seq_sync, 0, 1);
1142 
1143 	if (mcr20a_set_sequence(dev, seq)) {
1144 		LOG_ERR("Cannot start transmission");
1145 		goto error;
1146 	}
1147 
1148 	if (!mcr20a_mask_irqb(dev, false)) {
1149 		LOG_ERR("Failed to unmask IRQ_B");
1150 		goto error;
1151 	}
1152 
1153 	k_mutex_unlock(&mcr20a->phy_mutex);
1154 	retval = k_sem_take(&mcr20a->seq_sync,
1155 			    K_MSEC(MCR20A_SEQ_SYNC_TIMEOUT));
1156 	if (retval) {
1157 		LOG_ERR("Timeout occurred, %d", retval);
1158 		return retval;
1159 	}
1160 
1161 	LOG_DBG("done");
1162 
1163 	return mcr20a->seq_retval;
1164 
1165 error:
1166 	k_mutex_unlock(&mcr20a->phy_mutex);
1167 	return -EIO;
1168 }
1169 
mcr20a_start(const struct device * dev)1170 static int mcr20a_start(const struct device *dev)
1171 {
1172 	struct mcr20a_context *mcr20a = dev->data;
1173 	uint8_t timeout = 6U;
1174 	uint8_t status;
1175 
1176 	k_mutex_lock(&mcr20a->phy_mutex, K_FOREVER);
1177 	enable_irqb_interrupt(dev, false);
1178 
1179 	if (!write_reg_pwr_modes(dev, MCR20A_PM_AUTODOZE)) {
1180 		LOG_ERR("Error starting MCR20A");
1181 		goto error;
1182 	}
1183 
1184 	do {
1185 		z_usleep(50);
1186 		timeout--;
1187 		status = read_reg_pwr_modes(dev);
1188 	} while (!(status & MCR20A_PWR_MODES_XTAL_READY) && timeout);
1189 
1190 	if (!(status & MCR20A_PWR_MODES_XTAL_READY)) {
1191 		LOG_ERR("Timeout, failed to wake up");
1192 		goto error;
1193 	}
1194 
1195 	/* Clear all interrupt flags */
1196 	write_reg_irqsts1(dev, MCR20A_IRQSTS1_IRQ_MASK);
1197 	write_reg_irqsts2(dev, MCR20A_IRQSTS2_IRQ_MASK);
1198 	write_reg_irqsts3(dev, MCR20A_IRQSTS3_IRQ_MASK |
1199 			  MCR20A_IRQSTS3_TMR_MASK);
1200 
1201 	if (mcr20a_abort_sequence(dev, true)) {
1202 		LOG_ERR("Failed to reset XCV sequence");
1203 		goto error;
1204 	}
1205 
1206 	if (mcr20a_set_sequence(dev, MCR20A_XCVSEQ_RECEIVE)) {
1207 		LOG_ERR("Failed to set XCV sequence");
1208 		goto error;
1209 	}
1210 
1211 	enable_irqb_interrupt(dev, true);
1212 
1213 	if (!mcr20a_mask_irqb(dev, false)) {
1214 		LOG_ERR("Failed to unmask IRQ_B");
1215 		goto error;
1216 	}
1217 
1218 	k_mutex_unlock(&mcr20a->phy_mutex);
1219 	LOG_DBG("started");
1220 
1221 	return 0;
1222 
1223 error:
1224 	k_mutex_unlock(&mcr20a->phy_mutex);
1225 	return -EIO;
1226 }
1227 
mcr20a_stop(const struct device * dev)1228 static int mcr20a_stop(const struct device *dev)
1229 {
1230 	struct mcr20a_context *mcr20a = dev->data;
1231 	uint8_t power_mode;
1232 
1233 	k_mutex_lock(&mcr20a->phy_mutex, K_FOREVER);
1234 
1235 	if (!mcr20a_mask_irqb(dev, true)) {
1236 		LOG_ERR("Failed to mask IRQ_B");
1237 		goto error;
1238 	}
1239 
1240 	if (mcr20a_abort_sequence(dev, true)) {
1241 		LOG_ERR("Failed to reset XCV sequence");
1242 		goto error;
1243 	}
1244 
1245 	enable_irqb_interrupt(dev, false);
1246 
1247 	if (PART_OF_KW2XD_SIP) {
1248 		power_mode = MCR20A_PM_DOZE;
1249 	} else {
1250 		power_mode = MCR20A_PM_HIBERNATE;
1251 	}
1252 
1253 	if (!write_reg_pwr_modes(dev, power_mode)) {
1254 		goto error;
1255 	}
1256 
1257 	LOG_DBG("stopped");
1258 	k_mutex_unlock(&mcr20a->phy_mutex);
1259 
1260 	return 0;
1261 
1262 error:
1263 	k_mutex_unlock(&mcr20a->phy_mutex);
1264 	LOG_ERR("Error stopping MCR20A");
1265 	return -EIO;
1266 }
1267 
1268 /* driver-allocated attribute memory - constant across all driver instances */
1269 IEEE802154_DEFINE_PHY_SUPPORTED_CHANNELS(drv_attr, 11, 26);
1270 
mcr20a_attr_get(const struct device * dev,enum ieee802154_attr attr,struct ieee802154_attr_value * value)1271 static int mcr20a_attr_get(const struct device *dev, enum ieee802154_attr attr,
1272 			   struct ieee802154_attr_value *value)
1273 {
1274 	ARG_UNUSED(dev);
1275 
1276 	return ieee802154_attr_get_channel_page_and_range(
1277 		attr, IEEE802154_ATTR_PHY_CHANNEL_PAGE_ZERO_OQPSK_2450_BPSK_868_915,
1278 		&drv_attr.phy_supported_channels, value);
1279 }
1280 
mcr20a_update_overwrites(const struct device * dev)1281 static int mcr20a_update_overwrites(const struct device *dev)
1282 {
1283 	if (!write_reg_overwrite_ver(dev, overwrites_direct[0].data)) {
1284 		goto error;
1285 	}
1286 
1287 	for (uint8_t i = 0;
1288 	     i < sizeof(overwrites_indirect) / sizeof(overwrites_t);
1289 	     i++) {
1290 
1291 		if (!z_mcr20a_write_reg(dev, false,
1292 					overwrites_indirect[i].address,
1293 					overwrites_indirect[i].data)) {
1294 			goto error;
1295 		}
1296 	}
1297 
1298 	return 0;
1299 
1300 error:
1301 	LOG_ERR("Error update overwrites");
1302 	return -EIO;
1303 }
1304 
power_on_and_setup(const struct device * dev)1305 static int power_on_and_setup(const struct device *dev)
1306 {
1307 	const struct mcr20a_config *config = dev->config;
1308 	uint8_t timeout = 6U;
1309 	int pin;
1310 	uint8_t tmp = 0U;
1311 
1312 	if (!PART_OF_KW2XD_SIP) {
1313 		gpio_pin_set_dt(&config->reset_gpio, 1);
1314 		z_usleep(150);
1315 		gpio_pin_set_dt(&config->reset_gpio, 0);
1316 
1317 		do {
1318 			z_usleep(50);
1319 			timeout--;
1320 			pin = gpio_pin_get_dt(&config->irq_gpio);
1321 		} while (pin > 0 && timeout);
1322 
1323 		if (pin) {
1324 			LOG_ERR("Timeout, failed to get WAKE IRQ");
1325 			return -EIO;
1326 		}
1327 
1328 	}
1329 
1330 	tmp = MCR20A_CLK_OUT_CONFIG | MCR20A_CLK_OUT_EXTEND;
1331 	write_reg_clk_out_ctrl(dev, tmp);
1332 
1333 	if (read_reg_clk_out_ctrl(dev) != tmp) {
1334 		LOG_ERR("Failed to get device up");
1335 		return -EIO;
1336 	}
1337 
1338 	/* Clear all interrupt flags */
1339 	write_reg_irqsts1(dev, MCR20A_IRQSTS1_IRQ_MASK);
1340 	write_reg_irqsts2(dev, MCR20A_IRQSTS2_IRQ_MASK);
1341 	write_reg_irqsts3(dev, MCR20A_IRQSTS3_IRQ_MASK |
1342 			  MCR20A_IRQSTS3_TMR_MASK);
1343 
1344 	mcr20a_update_overwrites(dev);
1345 	mcr20a_timer_init(dev, MCR20A_TIMEBASE_62500HZ);
1346 
1347 	mcr20a_set_txpower(dev, MCR20A_DEFAULT_TX_POWER);
1348 	mcr20a_set_channel(dev, MCR20A_DEFAULT_CHANNEL);
1349 	mcr20a_set_cca_mode(dev, 1);
1350 	write_reg_rx_wtr_mark(dev, 8);
1351 
1352 	/* Configure PHY behaviour */
1353 	tmp = MCR20A_PHY_CTRL1_CCABFRTX |
1354 	      MCR20A_PHY_CTRL1_AUTOACK |
1355 	      MCR20A_PHY_CTRL1_RXACKRQD;
1356 	write_reg_phy_ctrl1(dev, tmp);
1357 
1358 	/* Enable Sequence-end interrupt */
1359 	tmp = MCR20A_PHY_CTRL2_SEQMSK;
1360 	write_reg_phy_ctrl2(dev, ~tmp);
1361 
1362 	setup_gpio_callbacks(dev);
1363 
1364 	return 0;
1365 }
1366 
1367 
configure_gpios(const struct device * dev)1368 static inline int configure_gpios(const struct device *dev)
1369 {
1370 	const struct mcr20a_config *config = dev->config;
1371 
1372 	/* setup gpio for the modem interrupt */
1373 	if (!gpio_is_ready_dt(&config->irq_gpio)) {
1374 		LOG_ERR("IRQ GPIO device not ready");
1375 		return -ENODEV;
1376 	}
1377 
1378 	gpio_pin_configure_dt(&config->irq_gpio, GPIO_INPUT);
1379 
1380 	if (!PART_OF_KW2XD_SIP) {
1381 		/* setup gpio for the modems reset */
1382 		if (!gpio_is_ready_dt(&config->reset_gpio)) {
1383 			LOG_ERR("Reset GPIO device not ready");
1384 			return -EINVAL;
1385 		}
1386 
1387 		gpio_pin_configure_dt(&config->reset_gpio, GPIO_OUTPUT_ACTIVE);
1388 	}
1389 
1390 	return 0;
1391 }
1392 
mcr20a_init(const struct device * dev)1393 static int mcr20a_init(const struct device *dev)
1394 {
1395 	const struct mcr20a_config *config = dev->config;
1396 	struct mcr20a_context *mcr20a = dev->data;
1397 
1398 	k_mutex_init(&mcr20a->phy_mutex);
1399 	k_sem_init(&mcr20a->isr_sem, 0, 1);
1400 
1401 	LOG_DBG("\nInitialize MCR20A Transceiver\n");
1402 
1403 	if (configure_gpios(dev) != 0) {
1404 		LOG_ERR("Configuring GPIOS failed");
1405 		return -EIO;
1406 	}
1407 
1408 	if (!spi_is_ready_dt(&config->bus)) {
1409 		LOG_ERR("Configuring SPI failed");
1410 		return -EIO;
1411 	}
1412 
1413 	LOG_DBG("GPIO and SPI configured");
1414 
1415 	if (power_on_and_setup(dev) != 0) {
1416 		LOG_ERR("Configuring MCR20A failed");
1417 		return -EIO;
1418 	}
1419 
1420 	k_thread_create(&mcr20a->mcr20a_rx_thread, mcr20a->mcr20a_rx_stack,
1421 			CONFIG_IEEE802154_MCR20A_RX_STACK_SIZE,
1422 			(k_thread_entry_t)mcr20a_thread_main,
1423 			(void *)dev, NULL, NULL, K_PRIO_COOP(2), 0, K_NO_WAIT);
1424 	k_thread_name_set(&mcr20a->mcr20a_rx_thread, "mcr20a_rx");
1425 
1426 	return 0;
1427 }
1428 
mcr20a_iface_init(struct net_if * iface)1429 static void mcr20a_iface_init(struct net_if *iface)
1430 {
1431 	const struct device *dev = net_if_get_device(iface);
1432 	struct mcr20a_context *mcr20a = dev->data;
1433 	uint8_t *mac = get_mac(dev);
1434 
1435 	net_if_set_link_addr(iface, mac, 8, NET_LINK_IEEE802154);
1436 
1437 	mcr20a->iface = iface;
1438 
1439 	ieee802154_init(iface);
1440 
1441 	LOG_DBG("done");
1442 }
1443 
1444 static const struct mcr20a_config mcr20a_config = {
1445 	.bus = SPI_DT_SPEC_INST_GET(0, SPI_WORD_SET(8), 0),
1446 	.irq_gpio = GPIO_DT_SPEC_INST_GET(0, irqb_gpios),
1447 	.reset_gpio = GPIO_DT_SPEC_INST_GET(0, reset_gpios),
1448 };
1449 
1450 static struct mcr20a_context mcr20a_context_data;
1451 
1452 static struct ieee802154_radio_api mcr20a_radio_api = {
1453 	.iface_api.init	= mcr20a_iface_init,
1454 
1455 	.get_capabilities	= mcr20a_get_capabilities,
1456 	.cca			= mcr20a_cca,
1457 	.set_channel		= mcr20a_set_channel,
1458 	.filter			= mcr20a_filter,
1459 	.set_txpower		= mcr20a_set_txpower,
1460 	.start			= mcr20a_start,
1461 	.stop			= mcr20a_stop,
1462 	.tx			= mcr20a_tx,
1463 	.attr_get		= mcr20a_attr_get,
1464 };
1465 
1466 #if defined(CONFIG_IEEE802154_RAW_MODE)
1467 DEVICE_DT_INST_DEFINE(0, mcr20a_init, NULL, &mcr20a_context_data,
1468 		      &mcr20a_config, POST_KERNEL,
1469 		      CONFIG_IEEE802154_MCR20A_INIT_PRIO, &mcr20a_radio_api);
1470 #else
1471 NET_DEVICE_DT_INST_DEFINE(0, mcr20a_init, NULL, &mcr20a_context_data,
1472 			  &mcr20a_config, CONFIG_IEEE802154_MCR20A_INIT_PRIO,
1473 			  &mcr20a_radio_api, IEEE802154_L2,
1474 			  NET_L2_GET_CTX_TYPE(IEEE802154_L2),
1475 			  MCR20A_PSDU_LENGTH);
1476 #endif
1477