1 /* ieee802154_nrf5.c - nRF5 802.15.4 driver */
2 
3 /*
4  * Copyright (c) 2017-2023 Nordic Semiconductor ASA
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  */
8 
9 #define DT_DRV_COMPAT nordic_nrf_ieee802154
10 
11 #define LOG_MODULE_NAME ieee802154_nrf5
12 #if defined(CONFIG_IEEE802154_DRIVER_LOG_LEVEL)
13 #define LOG_LEVEL CONFIG_IEEE802154_DRIVER_LOG_LEVEL
14 #else
15 #define LOG_LEVEL LOG_LEVEL_NONE
16 #endif
17 
18 #include <zephyr/logging/log.h>
19 LOG_MODULE_REGISTER(LOG_MODULE_NAME);
20 
21 #include <errno.h>
22 
23 #include <zephyr/kernel.h>
24 #include <zephyr/arch/cpu.h>
25 #include <zephyr/debug/stack.h>
26 
27 #include <soc.h>
28 #include <soc_secure.h>
29 #include <zephyr/device.h>
30 #include <zephyr/init.h>
31 #include <zephyr/debug/stack.h>
32 #include <zephyr/net/net_if.h>
33 #include <zephyr/net/net_pkt.h>
34 
35 #if defined(CONFIG_NET_L2_OPENTHREAD)
36 #include <zephyr/net/openthread.h>
37 #include <zephyr/net/ieee802154_radio_openthread.h>
38 #endif
39 
40 #include <zephyr/sys/byteorder.h>
41 #include <string.h>
42 #include <zephyr/random/random.h>
43 
44 #include <zephyr/net/ieee802154_radio.h>
45 #include <zephyr/irq.h>
46 
47 #include "ieee802154_nrf5.h"
48 #include "nrf_802154.h"
49 #include "nrf_802154_const.h"
50 
51 #if defined(CONFIG_NRF_802154_SER_HOST)
52 #include "nrf_802154_serialization_error.h"
53 #endif
54 
55 struct nrf5_802154_config {
56 	void (*irq_config_func)(const struct device *dev);
57 };
58 
59 static struct nrf5_802154_data nrf5_data;
60 #if defined(CONFIG_IEEE802154_RAW_MODE)
61 static const struct device *nrf5_dev;
62 #endif
63 
64 #define DRX_SLOT_RX 0 /* Delayed reception window ID */
65 
66 #define NSEC_PER_TEN_SYMBOLS (10 * IEEE802154_PHY_OQPSK_780_TO_2450MHZ_SYMBOL_PERIOD_NS)
67 
68 #if defined(CONFIG_IEEE802154_NRF5_UICR_EUI64_ENABLE)
69 #if defined(CONFIG_SOC_NRF5340_CPUAPP)
70 #if defined(CONFIG_TRUSTED_EXECUTION_NONSECURE)
71 #error "NRF_UICR->OTP is not supported to read from non-secure"
72 #else
73 #define EUI64_ADDR (NRF_UICR->OTP)
74 #endif /* CONFIG_TRUSTED_EXECUTION_NONSECURE */
75 #else
76 #define EUI64_ADDR (NRF_UICR->CUSTOMER)
77 #endif /* CONFIG_SOC_NRF5340_CPUAPP */
78 #endif /* CONFIG_IEEE802154_NRF5_UICR_EUI64_ENABLE */
79 
80 #if defined(CONFIG_IEEE802154_NRF5_UICR_EUI64_ENABLE)
81 #define EUI64_ADDR_HIGH CONFIG_IEEE802154_NRF5_UICR_EUI64_REG
82 #define EUI64_ADDR_LOW (CONFIG_IEEE802154_NRF5_UICR_EUI64_REG + 1)
83 #else
84 #define EUI64_ADDR_HIGH 0
85 #define EUI64_ADDR_LOW 1
86 #endif /* CONFIG_IEEE802154_NRF5_UICR_EUI64_ENABLE */
87 
88 /* Convenience defines for RADIO */
89 #define NRF5_802154_DATA(dev) \
90 	((struct nrf5_802154_data * const)(dev)->data)
91 
92 #define NRF5_802154_CFG(dev) \
93 	((const struct nrf5_802154_config * const)(dev)->config)
94 
95 #if CONFIG_IEEE802154_VENDOR_OUI_ENABLE
96 #define IEEE802154_NRF5_VENDOR_OUI CONFIG_IEEE802154_VENDOR_OUI
97 #else
98 #define IEEE802154_NRF5_VENDOR_OUI (uint32_t)0xF4CE36
99 #endif
100 
nrf5_get_device(void)101 static inline const struct device *nrf5_get_device(void)
102 {
103 #if defined(CONFIG_IEEE802154_RAW_MODE)
104 	return nrf5_dev;
105 #else
106 	return net_if_get_device(nrf5_data.iface);
107 #endif
108 }
109 
nrf5_get_eui64(uint8_t * mac)110 static void nrf5_get_eui64(uint8_t *mac)
111 {
112 	uint64_t factoryAddress;
113 	uint32_t index = 0;
114 
115 #if !defined(CONFIG_IEEE802154_NRF5_UICR_EUI64_ENABLE)
116 	uint32_t deviceid[2];
117 
118 	/* Set the MAC Address Block Larger (MA-L) formerly called OUI. */
119 	mac[index++] = (IEEE802154_NRF5_VENDOR_OUI >> 16) & 0xff;
120 	mac[index++] = (IEEE802154_NRF5_VENDOR_OUI >> 8) & 0xff;
121 	mac[index++] = IEEE802154_NRF5_VENDOR_OUI & 0xff;
122 
123 	soc_secure_read_deviceid(deviceid);
124 
125 	factoryAddress = (uint64_t)deviceid[EUI64_ADDR_HIGH] << 32;
126 	factoryAddress |= deviceid[EUI64_ADDR_LOW];
127 #else
128 	/* Use device identifier assigned during the production. */
129 	factoryAddress = (uint64_t)EUI64_ADDR[EUI64_ADDR_HIGH] << 32;
130 	factoryAddress |= EUI64_ADDR[EUI64_ADDR_LOW];
131 #endif
132 	memcpy(mac + index, &factoryAddress, sizeof(factoryAddress) - index);
133 }
134 
nrf5_rx_thread(void * arg1,void * arg2,void * arg3)135 static void nrf5_rx_thread(void *arg1, void *arg2, void *arg3)
136 {
137 	struct nrf5_802154_data *nrf5_radio = (struct nrf5_802154_data *)arg1;
138 	struct net_pkt *pkt;
139 	struct nrf5_802154_rx_frame *rx_frame;
140 	uint8_t pkt_len;
141 	uint8_t *psdu;
142 
143 	ARG_UNUSED(arg2);
144 	ARG_UNUSED(arg3);
145 
146 	while (1) {
147 		pkt = NULL;
148 		rx_frame = NULL;
149 
150 		LOG_DBG("Waiting for frame");
151 
152 		rx_frame = k_fifo_get(&nrf5_radio->rx_fifo, K_FOREVER);
153 
154 		__ASSERT_NO_MSG(rx_frame->psdu);
155 
156 		/* rx_mpdu contains length, psdu, fcs|lqi
157 		 * The last 2 bytes contain LQI or FCS, depending if
158 		 * automatic CRC handling is enabled or not, respectively.
159 		 */
160 		if (IS_ENABLED(CONFIG_IEEE802154_NRF5_FCS_IN_LENGTH)) {
161 			pkt_len = rx_frame->psdu[0];
162 		} else {
163 			pkt_len = rx_frame->psdu[0] -  IEEE802154_FCS_LENGTH;
164 		}
165 
166 #if defined(CONFIG_NET_BUF_DATA_SIZE)
167 		__ASSERT_NO_MSG(pkt_len <= CONFIG_NET_BUF_DATA_SIZE);
168 #endif
169 
170 		LOG_DBG("Frame received");
171 
172 		/* Block the RX thread until net_pkt is available, so that we
173 		 * don't drop already ACKed frame in case of temporary net_pkt
174 		 * scarcity. The nRF 802154 radio driver will accumulate any
175 		 * incoming frames until it runs out of internal buffers (and
176 		 * thus stops acknowledging consecutive frames).
177 		 */
178 		pkt = net_pkt_rx_alloc_with_buffer(nrf5_radio->iface, pkt_len,
179 						   AF_UNSPEC, 0, K_FOREVER);
180 
181 		if (net_pkt_write(pkt, rx_frame->psdu + 1, pkt_len)) {
182 			goto drop;
183 		}
184 
185 		net_pkt_set_ieee802154_lqi(pkt, rx_frame->lqi);
186 		net_pkt_set_ieee802154_rssi_dbm(pkt, rx_frame->rssi);
187 		net_pkt_set_ieee802154_ack_fpb(pkt, rx_frame->ack_fpb);
188 
189 #if defined(CONFIG_NET_PKT_TIMESTAMP)
190 		net_pkt_set_timestamp_ns(pkt, rx_frame->time * NSEC_PER_USEC);
191 #endif
192 
193 #if defined(CONFIG_NET_L2_OPENTHREAD)
194 		net_pkt_set_ieee802154_ack_seb(pkt, rx_frame->ack_seb);
195 #endif
196 
197 		LOG_DBG("Caught a packet (%u) (LQI: %u)",
198 			 pkt_len, rx_frame->lqi);
199 
200 		if (net_recv_data(nrf5_radio->iface, pkt) < 0) {
201 			LOG_ERR("Packet dropped by NET stack");
202 			goto drop;
203 		}
204 
205 		psdu = rx_frame->psdu;
206 		rx_frame->psdu = NULL;
207 		nrf_802154_buffer_free_raw(psdu);
208 
209 		if (LOG_LEVEL >= LOG_LEVEL_DBG) {
210 			log_stack_usage(&nrf5_radio->rx_thread);
211 		}
212 
213 		continue;
214 
215 drop:
216 		psdu = rx_frame->psdu;
217 		rx_frame->psdu = NULL;
218 		nrf_802154_buffer_free_raw(psdu);
219 
220 		net_pkt_unref(pkt);
221 	}
222 }
223 
nrf5_get_capabilities_at_boot(void)224 static void nrf5_get_capabilities_at_boot(void)
225 {
226 	nrf_802154_capabilities_t caps = nrf_802154_capabilities_get();
227 
228 	nrf5_data.capabilities =
229 		IEEE802154_HW_FCS |
230 		IEEE802154_HW_PROMISC |
231 		IEEE802154_HW_FILTER |
232 		((caps & NRF_802154_CAPABILITY_CSMA) ? IEEE802154_HW_CSMA : 0UL) |
233 		IEEE802154_HW_TX_RX_ACK |
234 		IEEE802154_HW_RX_TX_ACK |
235 		IEEE802154_HW_ENERGY_SCAN |
236 		((caps & NRF_802154_CAPABILITY_DELAYED_TX) ? IEEE802154_HW_TXTIME : 0UL) |
237 		((caps & NRF_802154_CAPABILITY_DELAYED_RX) ? IEEE802154_HW_RXTIME : 0UL) |
238 		IEEE802154_HW_SLEEP_TO_TX |
239 		IEEE802154_RX_ON_WHEN_IDLE |
240 		((caps & NRF_802154_CAPABILITY_SECURITY) ? IEEE802154_HW_TX_SEC : 0UL)
241 #if defined(CONFIG_IEEE802154_NRF5_MULTIPLE_CCA)
242 		| IEEE802154_OPENTHREAD_HW_MULTIPLE_CCA
243 #endif
244 		;
245 }
246 
247 /* Radio device API */
248 
nrf5_get_capabilities(const struct device * dev)249 static enum ieee802154_hw_caps nrf5_get_capabilities(const struct device *dev)
250 {
251 	return nrf5_data.capabilities;
252 }
253 
nrf5_cca(const struct device * dev)254 static int nrf5_cca(const struct device *dev)
255 {
256 	struct nrf5_802154_data *nrf5_radio = NRF5_802154_DATA(dev);
257 
258 	if (!nrf_802154_cca()) {
259 		LOG_DBG("CCA failed");
260 		return -EBUSY;
261 	}
262 
263 	/* The nRF driver guarantees that a callback will be called once
264 	 * the CCA function is done, thus unlocking the semaphore.
265 	 */
266 	k_sem_take(&nrf5_radio->cca_wait, K_FOREVER);
267 
268 	LOG_DBG("Channel free? %d", nrf5_radio->channel_free);
269 
270 	return nrf5_radio->channel_free ? 0 : -EBUSY;
271 }
272 
nrf5_set_channel(const struct device * dev,uint16_t channel)273 static int nrf5_set_channel(const struct device *dev, uint16_t channel)
274 {
275 	ARG_UNUSED(dev);
276 
277 	LOG_DBG("%u", channel);
278 
279 	if (channel < 11 || channel > 26) {
280 		return channel < 11 ? -ENOTSUP : -EINVAL;
281 	}
282 
283 	nrf_802154_channel_set(channel);
284 
285 	return 0;
286 }
287 
nrf5_energy_scan_start(const struct device * dev,uint16_t duration,energy_scan_done_cb_t done_cb)288 static int nrf5_energy_scan_start(const struct device *dev,
289 				  uint16_t duration,
290 				  energy_scan_done_cb_t done_cb)
291 {
292 	int err = 0;
293 
294 	ARG_UNUSED(dev);
295 
296 	if (nrf5_data.energy_scan_done == NULL) {
297 		nrf5_data.energy_scan_done = done_cb;
298 
299 		if (nrf_802154_energy_detection(duration * 1000) == false) {
300 			nrf5_data.energy_scan_done = NULL;
301 			err = -EBUSY;
302 		}
303 	} else {
304 		err = -EALREADY;
305 	}
306 
307 	return err;
308 }
309 
nrf5_set_pan_id(const struct device * dev,uint16_t pan_id)310 static int nrf5_set_pan_id(const struct device *dev, uint16_t pan_id)
311 {
312 	uint8_t pan_id_le[2];
313 
314 	ARG_UNUSED(dev);
315 
316 	sys_put_le16(pan_id, pan_id_le);
317 	nrf_802154_pan_id_set(pan_id_le);
318 
319 	LOG_DBG("0x%x", pan_id);
320 
321 	return 0;
322 }
323 
nrf5_set_short_addr(const struct device * dev,uint16_t short_addr)324 static int nrf5_set_short_addr(const struct device *dev, uint16_t short_addr)
325 {
326 	uint8_t short_addr_le[2];
327 
328 	ARG_UNUSED(dev);
329 
330 	sys_put_le16(short_addr, short_addr_le);
331 	nrf_802154_short_address_set(short_addr_le);
332 
333 	LOG_DBG("0x%x", short_addr);
334 
335 	return 0;
336 }
337 
nrf5_set_ieee_addr(const struct device * dev,const uint8_t * ieee_addr)338 static int nrf5_set_ieee_addr(const struct device *dev,
339 			      const uint8_t *ieee_addr)
340 {
341 	ARG_UNUSED(dev);
342 
343 	LOG_DBG("IEEE address %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
344 		    ieee_addr[7], ieee_addr[6], ieee_addr[5], ieee_addr[4],
345 		    ieee_addr[3], ieee_addr[2], ieee_addr[1], ieee_addr[0]);
346 
347 	nrf_802154_extended_address_set(ieee_addr);
348 
349 	return 0;
350 }
351 
nrf5_filter(const struct device * dev,bool set,enum ieee802154_filter_type type,const struct ieee802154_filter * filter)352 static int nrf5_filter(const struct device *dev, bool set,
353 		       enum ieee802154_filter_type type,
354 		       const struct ieee802154_filter *filter)
355 {
356 	LOG_DBG("Applying filter %u", type);
357 
358 	if (!set) {
359 		return -ENOTSUP;
360 	}
361 
362 	if (type == IEEE802154_FILTER_TYPE_IEEE_ADDR) {
363 		return nrf5_set_ieee_addr(dev, filter->ieee_addr);
364 	} else if (type == IEEE802154_FILTER_TYPE_SHORT_ADDR) {
365 		return nrf5_set_short_addr(dev, filter->short_addr);
366 	} else if (type == IEEE802154_FILTER_TYPE_PAN_ID) {
367 		return nrf5_set_pan_id(dev, filter->pan_id);
368 	}
369 
370 	return -ENOTSUP;
371 }
372 
nrf5_set_txpower(const struct device * dev,int16_t dbm)373 static int nrf5_set_txpower(const struct device *dev, int16_t dbm)
374 {
375 	ARG_UNUSED(dev);
376 
377 	LOG_DBG("%d", dbm);
378 
379 	nrf5_data.txpwr = dbm;
380 
381 	return 0;
382 }
383 
handle_ack(struct nrf5_802154_data * nrf5_radio)384 static int handle_ack(struct nrf5_802154_data *nrf5_radio)
385 {
386 	uint8_t ack_len;
387 	struct net_pkt *ack_pkt;
388 	int err = 0;
389 
390 	if (IS_ENABLED(CONFIG_IEEE802154_NRF5_FCS_IN_LENGTH)) {
391 		ack_len = nrf5_radio->ack_frame.psdu[0];
392 	} else {
393 		ack_len = nrf5_radio->ack_frame.psdu[0] - IEEE802154_FCS_LENGTH;
394 	}
395 
396 	ack_pkt = net_pkt_rx_alloc_with_buffer(nrf5_radio->iface, ack_len,
397 					       AF_UNSPEC, 0, K_NO_WAIT);
398 	if (!ack_pkt) {
399 		LOG_ERR("No free packet available.");
400 		err = -ENOMEM;
401 		goto free_nrf_ack;
402 	}
403 
404 	/* Upper layers expect the frame to start at the MAC header, skip the
405 	 * PHY header (1 byte).
406 	 */
407 	if (net_pkt_write(ack_pkt, nrf5_radio->ack_frame.psdu + 1,
408 			  ack_len) < 0) {
409 		LOG_ERR("Failed to write to a packet.");
410 		err = -ENOMEM;
411 		goto free_net_ack;
412 	}
413 
414 	net_pkt_set_ieee802154_lqi(ack_pkt, nrf5_radio->ack_frame.lqi);
415 	net_pkt_set_ieee802154_rssi_dbm(ack_pkt, nrf5_radio->ack_frame.rssi);
416 
417 #if defined(CONFIG_NET_PKT_TIMESTAMP)
418 	net_pkt_set_timestamp_ns(ack_pkt, nrf5_radio->ack_frame.time * NSEC_PER_USEC);
419 #endif
420 
421 	net_pkt_cursor_init(ack_pkt);
422 
423 	if (ieee802154_handle_ack(nrf5_radio->iface, ack_pkt) != NET_OK) {
424 		LOG_INF("ACK packet not handled - releasing.");
425 	}
426 
427 free_net_ack:
428 	net_pkt_unref(ack_pkt);
429 
430 free_nrf_ack:
431 	nrf_802154_buffer_free_raw(nrf5_radio->ack_frame.psdu);
432 	nrf5_radio->ack_frame.psdu = NULL;
433 
434 	return err;
435 }
436 
nrf5_tx_started(const struct device * dev,struct net_pkt * pkt,struct net_buf * frag)437 static void nrf5_tx_started(const struct device *dev,
438 			    struct net_pkt *pkt,
439 			    struct net_buf *frag)
440 {
441 	ARG_UNUSED(pkt);
442 
443 	if (nrf5_data.event_handler) {
444 		nrf5_data.event_handler(dev, IEEE802154_EVENT_TX_STARTED,
445 					(void *)frag);
446 	}
447 }
448 
nrf5_tx_immediate(struct net_pkt * pkt,uint8_t * payload,bool cca)449 static bool nrf5_tx_immediate(struct net_pkt *pkt, uint8_t *payload, bool cca)
450 {
451 	nrf_802154_transmit_metadata_t metadata = {
452 		.frame_props = {
453 			.is_secured = net_pkt_ieee802154_frame_secured(pkt),
454 			.dynamic_data_is_set = net_pkt_ieee802154_mac_hdr_rdy(pkt),
455 		},
456 		.cca = cca,
457 		.tx_power = {
458 			.use_metadata_value = true,
459 			.power = nrf5_data.txpwr,
460 		},
461 	};
462 
463 	return nrf_802154_transmit_raw(payload, &metadata);
464 }
465 
466 #if NRF_802154_CSMA_CA_ENABLED
nrf5_tx_csma_ca(struct net_pkt * pkt,uint8_t * payload)467 static bool nrf5_tx_csma_ca(struct net_pkt *pkt, uint8_t *payload)
468 {
469 	nrf_802154_transmit_csma_ca_metadata_t metadata = {
470 		.frame_props = {
471 			.is_secured = net_pkt_ieee802154_frame_secured(pkt),
472 			.dynamic_data_is_set = net_pkt_ieee802154_mac_hdr_rdy(pkt),
473 		},
474 		.tx_power = {
475 			.use_metadata_value = true,
476 			.power = nrf5_data.txpwr,
477 		},
478 	};
479 
480 	return nrf_802154_transmit_csma_ca_raw(payload, &metadata);
481 }
482 #endif
483 
484 #if defined(CONFIG_NET_PKT_TXTIME)
nrf5_tx_at(struct nrf5_802154_data * nrf5_radio,struct net_pkt * pkt,uint8_t * payload,enum ieee802154_tx_mode mode)485 static bool nrf5_tx_at(struct nrf5_802154_data *nrf5_radio, struct net_pkt *pkt,
486 		   uint8_t *payload, enum ieee802154_tx_mode mode)
487 {
488 	bool cca = false;
489 #if defined(CONFIG_IEEE802154_NRF5_MULTIPLE_CCA)
490 	uint8_t max_extra_cca_attempts = 0;
491 #endif
492 
493 	switch (mode) {
494 	case IEEE802154_TX_MODE_TXTIME:
495 		break;
496 	case IEEE802154_TX_MODE_TXTIME_CCA:
497 		cca = true;
498 		break;
499 #if defined(CONFIG_IEEE802154_NRF5_MULTIPLE_CCA)
500 	case IEEE802154_OPENTHREAD_TX_MODE_TXTIME_MULTIPLE_CCA:
501 		cca = true;
502 		max_extra_cca_attempts = nrf5_data.max_extra_cca_attempts;
503 		break;
504 #endif
505 		break;
506 	default:
507 		__ASSERT_NO_MSG(false);
508 		return false;
509 	}
510 
511 	nrf_802154_transmit_at_metadata_t metadata = {
512 		.frame_props = {
513 			.is_secured = net_pkt_ieee802154_frame_secured(pkt),
514 			.dynamic_data_is_set = net_pkt_ieee802154_mac_hdr_rdy(pkt),
515 		},
516 		.cca = cca,
517 		.channel = nrf_802154_channel_get(),
518 		.tx_power = {
519 			.use_metadata_value = true,
520 			.power = nrf5_data.txpwr,
521 		},
522 #if defined(CONFIG_IEEE802154_NRF5_MULTIPLE_CCA)
523 		.extra_cca_attempts = max_extra_cca_attempts,
524 #endif
525 	};
526 
527 	/* The timestamp points to the start of PHR but `nrf_802154_transmit_raw_at`
528 	 * expects a timestamp pointing to start of SHR.
529 	 */
530 	uint64_t tx_at = nrf_802154_timestamp_phr_to_shr_convert(
531 		net_pkt_timestamp_ns(pkt) / NSEC_PER_USEC);
532 
533 	return nrf_802154_transmit_raw_at(payload, tx_at, &metadata);
534 }
535 #endif /* CONFIG_NET_PKT_TXTIME */
536 
nrf5_tx(const struct device * dev,enum ieee802154_tx_mode mode,struct net_pkt * pkt,struct net_buf * frag)537 static int nrf5_tx(const struct device *dev,
538 		   enum ieee802154_tx_mode mode,
539 		   struct net_pkt *pkt,
540 		   struct net_buf *frag)
541 {
542 	struct nrf5_802154_data *nrf5_radio = NRF5_802154_DATA(dev);
543 	uint8_t payload_len = frag->len;
544 	uint8_t *payload = frag->data;
545 	bool ret = true;
546 
547 	if (payload_len > IEEE802154_MTU) {
548 		LOG_ERR("Payload too large: %d", payload_len);
549 		return -EMSGSIZE;
550 	}
551 
552 	LOG_DBG("%p (%u)", payload, payload_len);
553 
554 	nrf5_radio->tx_psdu[0] = payload_len + IEEE802154_FCS_LENGTH;
555 	memcpy(nrf5_radio->tx_psdu + 1, payload, payload_len);
556 
557 	/* Reset semaphore in case ACK was received after timeout */
558 	k_sem_reset(&nrf5_radio->tx_wait);
559 
560 	switch (mode) {
561 	case IEEE802154_TX_MODE_DIRECT:
562 	case IEEE802154_TX_MODE_CCA:
563 		ret = nrf5_tx_immediate(pkt, nrf5_radio->tx_psdu,
564 					mode == IEEE802154_TX_MODE_CCA);
565 		break;
566 #if NRF_802154_CSMA_CA_ENABLED
567 	case IEEE802154_TX_MODE_CSMA_CA:
568 		ret = nrf5_tx_csma_ca(pkt, nrf5_radio->tx_psdu);
569 		break;
570 #endif
571 #if defined(CONFIG_NET_PKT_TXTIME)
572 	case IEEE802154_TX_MODE_TXTIME:
573 	case IEEE802154_TX_MODE_TXTIME_CCA:
574 #if defined(CONFIG_IEEE802154_NRF5_MULTIPLE_CCA)
575 	case IEEE802154_OPENTHREAD_TX_MODE_TXTIME_MULTIPLE_CCA:
576 #endif
577 		__ASSERT_NO_MSG(pkt);
578 		ret = nrf5_tx_at(nrf5_radio, pkt, nrf5_radio->tx_psdu, mode);
579 		break;
580 #endif /* CONFIG_NET_PKT_TXTIME */
581 	default:
582 		NET_ERR("TX mode %d not supported", mode);
583 		return -ENOTSUP;
584 	}
585 
586 	if (!ret) {
587 		LOG_ERR("Cannot send frame");
588 		return -EIO;
589 	}
590 
591 	nrf5_tx_started(dev, pkt, frag);
592 
593 	LOG_DBG("Sending frame (ch:%d, txpower:%d)",
594 		nrf_802154_channel_get(), nrf_802154_tx_power_get());
595 
596 	/* Wait for the callback from the radio driver. */
597 	k_sem_take(&nrf5_radio->tx_wait, K_FOREVER);
598 
599 	LOG_DBG("Result: %d", nrf5_data.tx_result);
600 
601 #if defined(CONFIG_NRF_802154_ENCRYPTION)
602 	/*
603 	 * When frame encryption by the radio driver is enabled, the frame stored in
604 	 * the tx_psdu buffer is:
605 	 * 1) authenticated and encrypted in place which causes that after an unsuccessful
606 	 *    TX attempt, this frame must be propagated back to the upper layer for retransmission.
607 	 *    The upper layer must ensure that the exact same secured frame is used for
608 	 *    retransmission
609 	 * 2) frame counters are updated in place and for keeping the link frame counter up to date,
610 	 *    this information must be propagated back to the upper layer
611 	 */
612 	memcpy(payload, nrf5_radio->tx_psdu + 1, payload_len);
613 #endif
614 	net_pkt_set_ieee802154_frame_secured(pkt, nrf5_radio->tx_frame_is_secured);
615 	net_pkt_set_ieee802154_mac_hdr_rdy(pkt, nrf5_radio->tx_frame_mac_hdr_rdy);
616 
617 	switch (nrf5_radio->tx_result) {
618 	case NRF_802154_TX_ERROR_NONE:
619 		if (nrf5_radio->ack_frame.psdu == NULL) {
620 			/* No ACK was requested. */
621 			return 0;
622 		}
623 		/* Handle ACK packet. */
624 		return handle_ack(nrf5_radio);
625 	case NRF_802154_TX_ERROR_NO_MEM:
626 		return -ENOBUFS;
627 	case NRF_802154_TX_ERROR_BUSY_CHANNEL:
628 		return -EBUSY;
629 	case NRF_802154_TX_ERROR_INVALID_ACK:
630 	case NRF_802154_TX_ERROR_NO_ACK:
631 		return -ENOMSG;
632 	case NRF_802154_TX_ERROR_ABORTED:
633 	case NRF_802154_TX_ERROR_TIMESLOT_DENIED:
634 	case NRF_802154_TX_ERROR_TIMESLOT_ENDED:
635 	default:
636 		return -EIO;
637 	}
638 }
639 
nrf5_get_time(const struct device * dev)640 static net_time_t nrf5_get_time(const struct device *dev)
641 {
642 	ARG_UNUSED(dev);
643 
644 	return (net_time_t)nrf_802154_time_get() * NSEC_PER_USEC;
645 }
646 
nrf5_get_acc(const struct device * dev)647 static uint8_t nrf5_get_acc(const struct device *dev)
648 {
649 	ARG_UNUSED(dev);
650 
651 	return CONFIG_IEEE802154_NRF5_DELAY_TRX_ACC;
652 }
653 
nrf5_start(const struct device * dev)654 static int nrf5_start(const struct device *dev)
655 {
656 	ARG_UNUSED(dev);
657 
658 	nrf_802154_tx_power_set(nrf5_data.txpwr);
659 
660 	if (!nrf_802154_receive()) {
661 		LOG_ERR("Failed to enter receive state");
662 		return -EIO;
663 	}
664 
665 	LOG_DBG("nRF5 802154 radio started (channel: %d)",
666 		nrf_802154_channel_get());
667 
668 	return 0;
669 }
670 
nrf5_stop(const struct device * dev)671 static int nrf5_stop(const struct device *dev)
672 {
673 #if defined(CONFIG_IEEE802154_CSL_ENDPOINT)
674 	if (nrf_802154_sleep_if_idle() != NRF_802154_SLEEP_ERROR_NONE) {
675 		if (nrf5_data.event_handler) {
676 			nrf5_data.event_handler(dev, IEEE802154_EVENT_RX_OFF, NULL);
677 		} else {
678 			LOG_WRN("Transition to radio sleep cannot be handled.");
679 		}
680 		Z_SPIN_DELAY(1);
681 		return 0;
682 	}
683 #else
684 	ARG_UNUSED(dev);
685 
686 	if (!nrf_802154_sleep()) {
687 		LOG_ERR("Error while stopping radio");
688 		return -EIO;
689 	}
690 #endif
691 
692 	LOG_DBG("nRF5 802154 radio stopped");
693 
694 	return 0;
695 }
696 
697 #if defined(CONFIG_NRF_802154_CARRIER_FUNCTIONS)
nrf5_continuous_carrier(const struct device * dev)698 static int nrf5_continuous_carrier(const struct device *dev)
699 {
700 	ARG_UNUSED(dev);
701 
702 	nrf_802154_tx_power_set(nrf5_data.txpwr);
703 
704 	if (!nrf_802154_continuous_carrier()) {
705 		LOG_ERR("Failed to enter continuous carrier state");
706 		return -EIO;
707 	}
708 
709 	LOG_DBG("Continuous carrier wave transmission started (channel: %d)",
710 		nrf_802154_channel_get());
711 
712 	return 0;
713 }
714 #endif
715 
716 #if !IS_ENABLED(CONFIG_IEEE802154_NRF5_EXT_IRQ_MGMT)
nrf5_radio_irq(const void * arg)717 static void nrf5_radio_irq(const void *arg)
718 {
719 	ARG_UNUSED(arg);
720 
721 	nrf_802154_radio_irq_handler();
722 }
723 #endif
724 
nrf5_irq_config(const struct device * dev)725 static void nrf5_irq_config(const struct device *dev)
726 {
727 	ARG_UNUSED(dev);
728 
729 #if !IS_ENABLED(CONFIG_IEEE802154_NRF5_EXT_IRQ_MGMT)
730 	IRQ_CONNECT(RADIO_IRQn, NRF_802154_IRQ_PRIORITY,
731 		    nrf5_radio_irq, NULL, 0);
732 	irq_enable(RADIO_IRQn);
733 #endif
734 }
735 
nrf5_init(const struct device * dev)736 static int nrf5_init(const struct device *dev)
737 {
738 	const struct nrf5_802154_config *nrf5_radio_cfg = NRF5_802154_CFG(dev);
739 	struct nrf5_802154_data *nrf5_radio = NRF5_802154_DATA(dev);
740 #if defined(CONFIG_IEEE802154_RAW_MODE)
741 	nrf5_dev = dev;
742 #endif
743 
744 	k_fifo_init(&nrf5_radio->rx_fifo);
745 	k_sem_init(&nrf5_radio->tx_wait, 0, 1);
746 	k_sem_init(&nrf5_radio->cca_wait, 0, 1);
747 
748 	nrf_802154_init();
749 
750 	nrf5_get_capabilities_at_boot();
751 
752 	nrf5_radio->rx_on_when_idle = true;
753 	nrf5_radio_cfg->irq_config_func(dev);
754 
755 	k_thread_create(&nrf5_radio->rx_thread, nrf5_radio->rx_stack,
756 			CONFIG_IEEE802154_NRF5_RX_STACK_SIZE,
757 			nrf5_rx_thread, nrf5_radio, NULL, NULL,
758 			K_PRIO_COOP(2), 0, K_NO_WAIT);
759 
760 	k_thread_name_set(&nrf5_radio->rx_thread, "nrf5_rx");
761 
762 	LOG_INF("nRF5 802154 radio initialized");
763 
764 	return 0;
765 }
766 
nrf5_iface_init(struct net_if * iface)767 static void nrf5_iface_init(struct net_if *iface)
768 {
769 	const struct device *dev = net_if_get_device(iface);
770 	struct nrf5_802154_data *nrf5_radio = NRF5_802154_DATA(dev);
771 
772 	nrf5_get_eui64(nrf5_radio->mac);
773 	net_if_set_link_addr(iface, nrf5_radio->mac, sizeof(nrf5_radio->mac),
774 			     NET_LINK_IEEE802154);
775 
776 	nrf5_radio->iface = iface;
777 
778 	ieee802154_init(iface);
779 }
780 
781 #if defined(CONFIG_NRF_802154_ENCRYPTION)
nrf5_config_mac_keys(struct ieee802154_key * mac_keys)782 static void nrf5_config_mac_keys(struct ieee802154_key *mac_keys)
783 {
784 	nrf_802154_security_key_remove_all();
785 
786 	for (uint8_t i = 0; mac_keys->key_value
787 			&& i < NRF_802154_SECURITY_KEY_STORAGE_SIZE; mac_keys++, i++) {
788 		nrf_802154_key_t key = {
789 			.value.p_cleartext_key = mac_keys->key_value,
790 			.id.mode = mac_keys->key_id_mode,
791 			.id.p_key_id = mac_keys->key_id,
792 			.type = NRF_802154_KEY_CLEARTEXT,
793 			.frame_counter = 0,
794 			.use_global_frame_counter = !(mac_keys->frame_counter_per_key),
795 		};
796 
797 		__ASSERT_EVAL((void)nrf_802154_security_key_store(&key),
798 			nrf_802154_security_error_t err = nrf_802154_security_key_store(&key),
799 			err == NRF_802154_SECURITY_ERROR_NONE ||
800 			err == NRF_802154_SECURITY_ERROR_ALREADY_PRESENT,
801 			"Storing key failed, err: %d", err);
802 	};
803 }
804 #endif /* CONFIG_NRF_802154_ENCRYPTION */
805 
nrf5_configure(const struct device * dev,enum ieee802154_config_type type,const struct ieee802154_config * config)806 static int nrf5_configure(const struct device *dev,
807 			  enum ieee802154_config_type type,
808 			  const struct ieee802154_config *config)
809 {
810 	ARG_UNUSED(dev);
811 
812 	switch (type) {
813 	case IEEE802154_CONFIG_AUTO_ACK_FPB:
814 		if (config->auto_ack_fpb.enabled) {
815 			switch (config->auto_ack_fpb.mode) {
816 			case IEEE802154_FPB_ADDR_MATCH_THREAD:
817 				nrf_802154_src_addr_matching_method_set(
818 					NRF_802154_SRC_ADDR_MATCH_THREAD);
819 				break;
820 
821 			case IEEE802154_FPB_ADDR_MATCH_ZIGBEE:
822 				nrf_802154_src_addr_matching_method_set(
823 					NRF_802154_SRC_ADDR_MATCH_ZIGBEE);
824 				break;
825 
826 			default:
827 				return -EINVAL;
828 			}
829 		}
830 
831 		nrf_802154_auto_pending_bit_set(config->auto_ack_fpb.enabled);
832 		break;
833 
834 	case IEEE802154_CONFIG_ACK_FPB:
835 		if (config->ack_fpb.enabled) {
836 			if (!nrf_802154_pending_bit_for_addr_set(
837 						config->ack_fpb.addr,
838 						config->ack_fpb.extended)) {
839 				return -ENOMEM;
840 			}
841 
842 			break;
843 		}
844 
845 		if (config->ack_fpb.addr != NULL) {
846 			if (!nrf_802154_pending_bit_for_addr_clear(
847 						config->ack_fpb.addr,
848 						config->ack_fpb.extended)) {
849 				return -ENOENT;
850 			}
851 		} else {
852 			nrf_802154_pending_bit_for_addr_reset(
853 						config->ack_fpb.extended);
854 		}
855 
856 		break;
857 
858 	case IEEE802154_CONFIG_PAN_COORDINATOR:
859 		nrf_802154_pan_coord_set(config->pan_coordinator);
860 		break;
861 
862 	case IEEE802154_CONFIG_PROMISCUOUS:
863 		nrf_802154_promiscuous_set(config->promiscuous);
864 		break;
865 
866 	case IEEE802154_CONFIG_EVENT_HANDLER:
867 		nrf5_data.event_handler = config->event_handler;
868 		break;
869 
870 #if defined(CONFIG_NRF_802154_ENCRYPTION)
871 	case IEEE802154_CONFIG_MAC_KEYS:
872 		nrf5_config_mac_keys(config->mac_keys);
873 		break;
874 
875 	case IEEE802154_CONFIG_FRAME_COUNTER:
876 		nrf_802154_security_global_frame_counter_set(config->frame_counter);
877 		break;
878 
879 	case IEEE802154_CONFIG_FRAME_COUNTER_IF_LARGER:
880 		nrf_802154_security_global_frame_counter_set_if_larger(config->frame_counter);
881 		break;
882 #endif /* CONFIG_NRF_802154_ENCRYPTION */
883 
884 	case IEEE802154_CONFIG_ENH_ACK_HEADER_IE: {
885 		uint8_t ext_addr_le[EXTENDED_ADDRESS_SIZE];
886 		uint8_t short_addr_le[SHORT_ADDRESS_SIZE];
887 		uint8_t element_id;
888 		bool valid_vendor_specific_ie = false;
889 
890 		if (config->ack_ie.purge_ie) {
891 			nrf_802154_ack_data_remove_all(false, NRF_802154_ACK_DATA_IE);
892 			nrf_802154_ack_data_remove_all(true, NRF_802154_ACK_DATA_IE);
893 			break;
894 		}
895 
896 		if (config->ack_ie.short_addr == IEEE802154_BROADCAST_ADDRESS ||
897 		    config->ack_ie.ext_addr == NULL) {
898 			return -ENOTSUP;
899 		}
900 
901 		sys_put_le16(config->ack_ie.short_addr, short_addr_le);
902 		sys_memcpy_swap(ext_addr_le, config->ack_ie.ext_addr, EXTENDED_ADDRESS_SIZE);
903 
904 		if (config->ack_ie.header_ie == NULL || config->ack_ie.header_ie->length == 0) {
905 			nrf_802154_ack_data_clear(short_addr_le, false, NRF_802154_ACK_DATA_IE);
906 			nrf_802154_ack_data_clear(ext_addr_le, true, NRF_802154_ACK_DATA_IE);
907 		} else {
908 			element_id = ieee802154_header_ie_get_element_id(config->ack_ie.header_ie);
909 
910 #if defined(CONFIG_NET_L2_OPENTHREAD)
911 			uint8_t vendor_oui_le[IEEE802154_OPENTHREAD_VENDOR_OUI_LEN] =
912 				IEEE802154_OPENTHREAD_THREAD_IE_VENDOR_OUI;
913 
914 			if (element_id == IEEE802154_HEADER_IE_ELEMENT_ID_VENDOR_SPECIFIC_IE &&
915 			    memcmp(config->ack_ie.header_ie->content.vendor_specific.vendor_oui,
916 				   vendor_oui_le, sizeof(vendor_oui_le)) == 0) {
917 				valid_vendor_specific_ie = true;
918 			}
919 #endif
920 
921 			if (element_id != IEEE802154_HEADER_IE_ELEMENT_ID_CSL_IE &&
922 			    !valid_vendor_specific_ie) {
923 				return -ENOTSUP;
924 			}
925 
926 			nrf_802154_ack_data_set(short_addr_le, false, config->ack_ie.header_ie,
927 						config->ack_ie.header_ie->length +
928 							IEEE802154_HEADER_IE_HEADER_LENGTH,
929 						NRF_802154_ACK_DATA_IE);
930 			nrf_802154_ack_data_set(ext_addr_le, true, config->ack_ie.header_ie,
931 						config->ack_ie.header_ie->length +
932 							IEEE802154_HEADER_IE_HEADER_LENGTH,
933 						NRF_802154_ACK_DATA_IE);
934 		}
935 	} break;
936 
937 #if defined(CONFIG_IEEE802154_CSL_ENDPOINT)
938 	case IEEE802154_CONFIG_EXPECTED_RX_TIME: {
939 
940 #if defined(CONFIG_NRF_802154_SER_HOST)
941 		net_time_t period_ns = nrf5_data.csl_period * NSEC_PER_TEN_SYMBOLS;
942 		bool changed = (config->expected_rx_time - nrf5_data.csl_rx_time) % period_ns;
943 
944 		nrf5_data.csl_rx_time = config->expected_rx_time;
945 
946 		if (changed)
947 #endif /* CONFIG_NRF_802154_SER_HOST */
948 		{
949 			nrf_802154_csl_writer_anchor_time_set(
950 				nrf_802154_timestamp_phr_to_mhr_convert(config->expected_rx_time /
951 									NSEC_PER_USEC));
952 		}
953 	} break;
954 
955 	case IEEE802154_CONFIG_RX_SLOT: {
956 		/* Note that even if the nrf_802154_receive_at function is not called in time
957 		 * (for example due to the call being blocked by higher priority threads) and
958 		 * the delayed reception window is not scheduled, the CSL phase will still be
959 		 * calculated as if the following reception windows were at times
960 		 * anchor_time + n * csl_period. The previously set
961 		 * anchor_time will be used for calculations.
962 		 */
963 		nrf_802154_receive_at(config->rx_slot.start / NSEC_PER_USEC,
964 				      config->rx_slot.duration / NSEC_PER_USEC,
965 				      config->rx_slot.channel, DRX_SLOT_RX);
966 	} break;
967 
968 	case IEEE802154_CONFIG_CSL_PERIOD: {
969 		nrf_802154_csl_writer_period_set(config->csl_period);
970 #if defined(CONFIG_NRF_802154_SER_HOST)
971 		nrf5_data.csl_period = config->csl_period;
972 #endif
973 	} break;
974 #endif /* CONFIG_IEEE802154_CSL_ENDPOINT */
975 
976 #if defined(CONFIG_IEEE802154_NRF5_MULTIPLE_CCA)
977 	case IEEE802154_OPENTHREAD_CONFIG_MAX_EXTRA_CCA_ATTEMPTS:
978 		nrf5_data.max_extra_cca_attempts =
979 			((const struct ieee802154_openthread_config *)config)
980 				->max_extra_cca_attempts;
981 		break;
982 #endif /* CONFIG_IEEE802154_NRF5_MULTIPLE_CCA */
983 
984 	case IEEE802154_CONFIG_RX_ON_WHEN_IDLE:
985 		nrf_802154_rx_on_when_idle_set(config->rx_on_when_idle);
986 		nrf5_data.rx_on_when_idle = config->rx_on_when_idle;
987 		break;
988 
989 	default:
990 		return -EINVAL;
991 	}
992 
993 	return 0;
994 }
995 
996 /* driver-allocated attribute memory - constant across all driver instances */
997 IEEE802154_DEFINE_PHY_SUPPORTED_CHANNELS(drv_attr, 11, 26);
998 
nrf5_attr_get(const struct device * dev,enum ieee802154_attr attr,struct ieee802154_attr_value * value)999 static int nrf5_attr_get(const struct device *dev,
1000 			 enum ieee802154_attr attr,
1001 			 struct ieee802154_attr_value *value)
1002 {
1003 	ARG_UNUSED(dev);
1004 
1005 	if (ieee802154_attr_get_channel_page_and_range(
1006 		    attr, IEEE802154_ATTR_PHY_CHANNEL_PAGE_ZERO_OQPSK_2450_BPSK_868_915,
1007 		    &drv_attr.phy_supported_channels, value) == 0) {
1008 		return 0;
1009 	}
1010 
1011 	switch ((uint32_t)attr) {
1012 #if defined(CONFIG_IEEE802154_NRF5_MULTIPLE_CCA)
1013 	/* TODO: t_recca and t_ccatx should be provided by the public API of the
1014 	 * nRF 802.15.4 Radio Driver.
1015 	 */
1016 	case IEEE802154_OPENTHREAD_ATTR_T_RECCA:
1017 		((struct ieee802154_openthread_attr_value *)value)->t_recca = 0;
1018 		break;
1019 	case IEEE802154_OPENTHREAD_ATTR_T_CCATX:
1020 		((struct ieee802154_openthread_attr_value *)value)->t_ccatx = 20;
1021 		break;
1022 #endif
1023 	default:
1024 		return -ENOENT;
1025 	}
1026 
1027 	return 0;
1028 }
1029 
1030 /* nRF5 radio driver callbacks */
1031 
nrf_802154_received_timestamp_raw(uint8_t * data,int8_t power,uint8_t lqi,uint64_t time)1032 void nrf_802154_received_timestamp_raw(uint8_t *data, int8_t power, uint8_t lqi, uint64_t time)
1033 {
1034 	for (uint32_t i = 0; i < ARRAY_SIZE(nrf5_data.rx_frames); i++) {
1035 		if (nrf5_data.rx_frames[i].psdu != NULL) {
1036 			continue;
1037 		}
1038 
1039 		nrf5_data.rx_frames[i].psdu = data;
1040 		nrf5_data.rx_frames[i].rssi = power;
1041 		nrf5_data.rx_frames[i].lqi = lqi;
1042 
1043 #if defined(CONFIG_NET_PKT_TIMESTAMP)
1044 		nrf5_data.rx_frames[i].time =
1045 			nrf_802154_timestamp_end_to_phr_convert(time, data[0]);
1046 #endif
1047 
1048 		nrf5_data.rx_frames[i].ack_fpb = nrf5_data.last_frame_ack_fpb;
1049 		nrf5_data.rx_frames[i].ack_seb = nrf5_data.last_frame_ack_seb;
1050 		nrf5_data.last_frame_ack_fpb = false;
1051 		nrf5_data.last_frame_ack_seb = false;
1052 
1053 		k_fifo_put(&nrf5_data.rx_fifo, &nrf5_data.rx_frames[i]);
1054 
1055 		return;
1056 	}
1057 
1058 	__ASSERT(false, "Not enough rx frames allocated for 15.4 driver");
1059 }
1060 
nrf_802154_receive_failed(nrf_802154_rx_error_t error,uint32_t id)1061 void nrf_802154_receive_failed(nrf_802154_rx_error_t error, uint32_t id)
1062 {
1063 	const struct device *dev = nrf5_get_device();
1064 
1065 #if defined(CONFIG_IEEE802154_CSL_ENDPOINT)
1066 	if (id == DRX_SLOT_RX && error == NRF_802154_RX_ERROR_DELAYED_TIMEOUT) {
1067 		if (!nrf5_data.rx_on_when_idle) {
1068 			/* Transition to RxOff done automatically by the driver */
1069 			return;
1070 		} else if (nrf5_data.event_handler) {
1071 			/* Notify the higher layer to allow it to transition if needed */
1072 			nrf5_data.event_handler(dev, IEEE802154_EVENT_RX_OFF, NULL);
1073 		}
1074 	}
1075 #else
1076 	ARG_UNUSED(id);
1077 #endif
1078 
1079 	enum ieee802154_rx_fail_reason reason;
1080 
1081 	switch (error) {
1082 	case NRF_802154_RX_ERROR_INVALID_FRAME:
1083 	case NRF_802154_RX_ERROR_DELAYED_TIMEOUT:
1084 		reason = IEEE802154_RX_FAIL_NOT_RECEIVED;
1085 		break;
1086 
1087 	case NRF_802154_RX_ERROR_INVALID_FCS:
1088 		reason = IEEE802154_RX_FAIL_INVALID_FCS;
1089 		break;
1090 
1091 	case NRF_802154_RX_ERROR_INVALID_DEST_ADDR:
1092 		reason = IEEE802154_RX_FAIL_ADDR_FILTERED;
1093 		break;
1094 
1095 	default:
1096 		reason = IEEE802154_RX_FAIL_OTHER;
1097 		break;
1098 	}
1099 
1100 	if (IS_ENABLED(CONFIG_IEEE802154_NRF5_LOG_RX_FAILURES)) {
1101 		LOG_INF("Rx failed, error = %d", error);
1102 	}
1103 
1104 	nrf5_data.last_frame_ack_fpb = false;
1105 	nrf5_data.last_frame_ack_seb = false;
1106 
1107 	if (nrf5_data.event_handler) {
1108 		nrf5_data.event_handler(dev, IEEE802154_EVENT_RX_FAILED, (void *)&reason);
1109 	}
1110 }
1111 
nrf_802154_tx_ack_started(const uint8_t * data)1112 void nrf_802154_tx_ack_started(const uint8_t *data)
1113 {
1114 	nrf5_data.last_frame_ack_fpb = data[FRAME_PENDING_OFFSET] & FRAME_PENDING_BIT;
1115 	nrf5_data.last_frame_ack_seb = data[SECURITY_ENABLED_OFFSET] & SECURITY_ENABLED_BIT;
1116 }
1117 
nrf_802154_transmitted_raw(uint8_t * frame,const nrf_802154_transmit_done_metadata_t * metadata)1118 void nrf_802154_transmitted_raw(uint8_t *frame,
1119 				const nrf_802154_transmit_done_metadata_t *metadata)
1120 {
1121 	ARG_UNUSED(frame);
1122 
1123 	nrf5_data.tx_result = NRF_802154_TX_ERROR_NONE;
1124 	nrf5_data.tx_frame_is_secured = metadata->frame_props.is_secured;
1125 	nrf5_data.tx_frame_mac_hdr_rdy = metadata->frame_props.dynamic_data_is_set;
1126 	nrf5_data.ack_frame.psdu = metadata->data.transmitted.p_ack;
1127 
1128 	if (nrf5_data.ack_frame.psdu) {
1129 		nrf5_data.ack_frame.rssi = metadata->data.transmitted.power;
1130 		nrf5_data.ack_frame.lqi = metadata->data.transmitted.lqi;
1131 
1132 #if defined(CONFIG_NET_PKT_TIMESTAMP)
1133 		nrf5_data.ack_frame.time = nrf_802154_timestamp_end_to_phr_convert(
1134 			metadata->data.transmitted.time, nrf5_data.ack_frame.psdu[0]);
1135 #endif
1136 	}
1137 
1138 	k_sem_give(&nrf5_data.tx_wait);
1139 }
1140 
nrf_802154_transmit_failed(uint8_t * frame,nrf_802154_tx_error_t error,const nrf_802154_transmit_done_metadata_t * metadata)1141 void nrf_802154_transmit_failed(uint8_t *frame,
1142 				nrf_802154_tx_error_t error,
1143 				const nrf_802154_transmit_done_metadata_t *metadata)
1144 {
1145 	ARG_UNUSED(frame);
1146 
1147 	nrf5_data.tx_result = error;
1148 	nrf5_data.tx_frame_is_secured = metadata->frame_props.is_secured;
1149 	nrf5_data.tx_frame_mac_hdr_rdy = metadata->frame_props.dynamic_data_is_set;
1150 
1151 	k_sem_give(&nrf5_data.tx_wait);
1152 }
1153 
nrf_802154_cca_done(bool channel_free)1154 void nrf_802154_cca_done(bool channel_free)
1155 {
1156 	nrf5_data.channel_free = channel_free;
1157 
1158 	k_sem_give(&nrf5_data.cca_wait);
1159 }
1160 
nrf_802154_cca_failed(nrf_802154_cca_error_t error)1161 void nrf_802154_cca_failed(nrf_802154_cca_error_t error)
1162 {
1163 	ARG_UNUSED(error);
1164 
1165 	nrf5_data.channel_free = false;
1166 
1167 	k_sem_give(&nrf5_data.cca_wait);
1168 }
1169 
nrf_802154_energy_detected(const nrf_802154_energy_detected_t * result)1170 void nrf_802154_energy_detected(const nrf_802154_energy_detected_t *result)
1171 {
1172 	if (nrf5_data.energy_scan_done != NULL) {
1173 		energy_scan_done_cb_t callback = nrf5_data.energy_scan_done;
1174 
1175 		nrf5_data.energy_scan_done = NULL;
1176 		callback(nrf5_get_device(), result->ed_dbm);
1177 	}
1178 }
1179 
nrf_802154_energy_detection_failed(nrf_802154_ed_error_t error)1180 void nrf_802154_energy_detection_failed(nrf_802154_ed_error_t error)
1181 {
1182 	if (nrf5_data.energy_scan_done != NULL) {
1183 		energy_scan_done_cb_t callback = nrf5_data.energy_scan_done;
1184 
1185 		nrf5_data.energy_scan_done = NULL;
1186 		callback(nrf5_get_device(), SHRT_MAX);
1187 	}
1188 }
1189 
1190 #if defined(CONFIG_NRF_802154_SER_HOST)
nrf_802154_serialization_error(const nrf_802154_ser_err_data_t * err)1191 void nrf_802154_serialization_error(const nrf_802154_ser_err_data_t *err)
1192 {
1193 	__ASSERT(false, "802.15.4 serialization error: %d", err->reason);
1194 	k_oops();
1195 }
1196 #endif
1197 
1198 static const struct nrf5_802154_config nrf5_radio_cfg = {
1199 	.irq_config_func = nrf5_irq_config,
1200 };
1201 
1202 static const struct ieee802154_radio_api nrf5_radio_api = {
1203 	.iface_api.init = nrf5_iface_init,
1204 
1205 	.get_capabilities = nrf5_get_capabilities,
1206 	.cca = nrf5_cca,
1207 	.set_channel = nrf5_set_channel,
1208 	.filter = nrf5_filter,
1209 	.set_txpower = nrf5_set_txpower,
1210 	.start = nrf5_start,
1211 	.stop = nrf5_stop,
1212 #if defined(CONFIG_NRF_802154_CARRIER_FUNCTIONS)
1213 	.continuous_carrier = nrf5_continuous_carrier,
1214 #endif
1215 	.tx = nrf5_tx,
1216 	.ed_scan = nrf5_energy_scan_start,
1217 	.get_time = nrf5_get_time,
1218 	.get_sch_acc = nrf5_get_acc,
1219 	.configure = nrf5_configure,
1220 	.attr_get = nrf5_attr_get
1221 };
1222 
1223 #if defined(CONFIG_NET_L2_IEEE802154)
1224 #define L2 IEEE802154_L2
1225 #define L2_CTX_TYPE NET_L2_GET_CTX_TYPE(IEEE802154_L2)
1226 #define MTU IEEE802154_MTU
1227 #elif defined(CONFIG_NET_L2_OPENTHREAD)
1228 #define L2 OPENTHREAD_L2
1229 #define L2_CTX_TYPE NET_L2_GET_CTX_TYPE(OPENTHREAD_L2)
1230 #define MTU 1280
1231 #elif defined(CONFIG_NET_L2_CUSTOM_IEEE802154)
1232 #define L2 CUSTOM_IEEE802154_L2
1233 #define L2_CTX_TYPE NET_L2_GET_CTX_TYPE(CUSTOM_IEEE802154_L2)
1234 #define MTU CONFIG_NET_L2_CUSTOM_IEEE802154_MTU
1235 #endif
1236 
1237 #if defined(CONFIG_NET_L2_PHY_IEEE802154)
1238 NET_DEVICE_DT_INST_DEFINE(0, nrf5_init, NULL, &nrf5_data, &nrf5_radio_cfg,
1239 			  CONFIG_IEEE802154_NRF5_INIT_PRIO, &nrf5_radio_api, L2,
1240 			  L2_CTX_TYPE, MTU);
1241 #else
1242 DEVICE_DT_INST_DEFINE(0, nrf5_init, NULL, &nrf5_data, &nrf5_radio_cfg,
1243 		      POST_KERNEL, CONFIG_IEEE802154_NRF5_INIT_PRIO,
1244 		      &nrf5_radio_api);
1245 #endif
1246