1 /*
2 * Copyright (c) 2021 Telink Semiconductor
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT telink_b91_zb
8
9 #include "rf.h"
10 #include "stimer.h"
11
12 #define LOG_MODULE_NAME ieee802154_b91
13 #if defined(CONFIG_IEEE802154_DRIVER_LOG_LEVEL)
14 #define LOG_LEVEL CONFIG_IEEE802154_DRIVER_LOG_LEVEL
15 #else
16 #define LOG_LEVEL LOG_LEVEL_NONE
17 #endif
18
19 #include <zephyr/logging/log.h>
20 LOG_MODULE_REGISTER(LOG_MODULE_NAME);
21
22 #include <zephyr/random/random.h>
23 #include <zephyr/net/ieee802154_radio.h>
24 #include <zephyr/irq.h>
25 #if defined(CONFIG_NET_L2_OPENTHREAD)
26 #include <zephyr/net/openthread.h>
27 #endif
28
29 #include <zephyr/drivers/interrupt_controller/riscv_plic.h>
30
31 #include "ieee802154_b91.h"
32
33
34 /* B91 data structure */
35 static struct b91_data data;
36
37 /* Set filter PAN ID */
b91_set_pan_id(uint16_t pan_id)38 static int b91_set_pan_id(uint16_t pan_id)
39 {
40 uint8_t pan_id_le[B91_PAN_ID_SIZE];
41
42 sys_put_le16(pan_id, pan_id_le);
43 memcpy(data.filter_pan_id, pan_id_le, B91_PAN_ID_SIZE);
44
45 return 0;
46 }
47
48 /* Set filter short address */
b91_set_short_addr(uint16_t short_addr)49 static int b91_set_short_addr(uint16_t short_addr)
50 {
51 uint8_t short_addr_le[B91_SHORT_ADDRESS_SIZE];
52
53 sys_put_le16(short_addr, short_addr_le);
54 memcpy(data.filter_short_addr, short_addr_le, B91_SHORT_ADDRESS_SIZE);
55
56 return 0;
57 }
58
59 /* Set filter IEEE address */
b91_set_ieee_addr(const uint8_t * ieee_addr)60 static int b91_set_ieee_addr(const uint8_t *ieee_addr)
61 {
62 memcpy(data.filter_ieee_addr, ieee_addr, B91_IEEE_ADDRESS_SIZE);
63
64 return 0;
65 }
66
67 /* Filter PAN ID, short address and IEEE address */
b91_run_filter(uint8_t * rx_buffer)68 static bool b91_run_filter(uint8_t *rx_buffer)
69 {
70 /* Check destination PAN Id */
71 if (memcmp(&rx_buffer[B91_PAN_ID_OFFSET], data.filter_pan_id,
72 B91_PAN_ID_SIZE) != 0 &&
73 memcmp(&rx_buffer[B91_PAN_ID_OFFSET], B91_BROADCAST_ADDRESS,
74 B91_PAN_ID_SIZE) != 0) {
75 return false;
76 }
77
78 /* Check destination address */
79 switch (rx_buffer[B91_DEST_ADDR_TYPE_OFFSET] & B91_DEST_ADDR_TYPE_MASK) {
80 case B91_DEST_ADDR_TYPE_SHORT:
81 /* First check if the destination is broadcast */
82 /* If not broadcast, check if length and address matches */
83 if (memcmp(&rx_buffer[B91_DEST_ADDR_OFFSET], B91_BROADCAST_ADDRESS,
84 B91_SHORT_ADDRESS_SIZE) != 0 &&
85 memcmp(&rx_buffer[B91_DEST_ADDR_OFFSET], data.filter_short_addr,
86 B91_SHORT_ADDRESS_SIZE) != 0) {
87 return false;
88 }
89 break;
90
91 case B91_DEST_ADDR_TYPE_IEEE:
92 /* If not broadcast, check if length and address matches */
93 if ((net_if_get_link_addr(data.iface)->len != B91_IEEE_ADDRESS_SIZE) ||
94 memcmp(&rx_buffer[B91_DEST_ADDR_OFFSET], data.filter_ieee_addr,
95 B91_IEEE_ADDRESS_SIZE) != 0) {
96 return false;
97 }
98 break;
99
100 default:
101 return false;
102 }
103
104 return true;
105 }
106
107 /* Get MAC address */
b91_get_mac(const struct device * dev)108 static inline uint8_t *b91_get_mac(const struct device *dev)
109 {
110 struct b91_data *b91 = dev->data;
111
112 #if defined(CONFIG_IEEE802154_B91_RANDOM_MAC)
113 sys_rand_get(b91->mac_addr, sizeof(b91->mac_addr));
114
115 /*
116 * Clear bit 0 to ensure it isn't a multicast address and set
117 * bit 1 to indicate address is locally administered and may
118 * not be globally unique.
119 */
120 b91->mac_addr[0] = (b91->mac_addr[0] & ~0x01) | 0x02;
121 #else
122 /* Vendor Unique Identifier */
123 b91->mac_addr[0] = 0xC4;
124 b91->mac_addr[1] = 0x19;
125 b91->mac_addr[2] = 0xD1;
126 b91->mac_addr[3] = 0x00;
127
128 /* Extended Unique Identifier */
129 b91->mac_addr[4] = CONFIG_IEEE802154_B91_MAC4;
130 b91->mac_addr[5] = CONFIG_IEEE802154_B91_MAC5;
131 b91->mac_addr[6] = CONFIG_IEEE802154_B91_MAC6;
132 b91->mac_addr[7] = CONFIG_IEEE802154_B91_MAC7;
133 #endif
134
135 return b91->mac_addr;
136 }
137
138 /* Convert RSSI to LQI */
b91_convert_rssi_to_lqi(int8_t rssi)139 static uint8_t b91_convert_rssi_to_lqi(int8_t rssi)
140 {
141 uint32_t lqi32 = 0;
142
143 /* check for MIN value */
144 if (rssi < B91_RSSI_TO_LQI_MIN) {
145 return 0;
146 }
147
148 /* convert RSSI to LQI */
149 lqi32 = B91_RSSI_TO_LQI_SCALE * (rssi - B91_RSSI_TO_LQI_MIN);
150
151 /* check for MAX value */
152 if (lqi32 > 0xFF) {
153 lqi32 = 0xFF;
154 }
155
156 return (uint8_t)lqi32;
157 }
158
159 /* Update RSSI and LQI parameters */
b91_update_rssi_and_lqi(struct net_pkt * pkt)160 static void b91_update_rssi_and_lqi(struct net_pkt *pkt)
161 {
162 int8_t rssi;
163 uint8_t lqi;
164
165 rssi = ((signed char)(data.rx_buffer
166 [data.rx_buffer[B91_LENGTH_OFFSET] + B91_RSSI_OFFSET])) - 110;
167 lqi = b91_convert_rssi_to_lqi(rssi);
168
169 net_pkt_set_ieee802154_lqi(pkt, lqi);
170 net_pkt_set_ieee802154_rssi_dbm(pkt, rssi);
171 }
172
173 /* Prepare TX buffer */
b91_set_tx_payload(uint8_t * payload,uint8_t payload_len)174 static int b91_set_tx_payload(uint8_t *payload, uint8_t payload_len)
175 {
176 unsigned char rf_data_len;
177 unsigned int rf_tx_dma_len;
178
179 /* See Telink SDK Dev Handbook, AN-21010600, section 21.5.2.2. */
180 if (payload_len > (B91_TRX_LENGTH - B91_PAYLOAD_OFFSET - IEEE802154_FCS_LENGTH)) {
181 return -EINVAL;
182 }
183
184 rf_data_len = payload_len + 1;
185 rf_tx_dma_len = rf_tx_packet_dma_len(rf_data_len);
186 data.tx_buffer[0] = rf_tx_dma_len & 0xff;
187 data.tx_buffer[1] = (rf_tx_dma_len >> 8) & 0xff;
188 data.tx_buffer[2] = (rf_tx_dma_len >> 16) & 0xff;
189 data.tx_buffer[3] = (rf_tx_dma_len >> 24) & 0xff;
190 data.tx_buffer[4] = payload_len + IEEE802154_FCS_LENGTH;
191 memcpy(data.tx_buffer + B91_PAYLOAD_OFFSET, payload, payload_len);
192
193 return 0;
194 }
195
196 /* Enable ack handler */
b91_handle_ack_en(void)197 static void b91_handle_ack_en(void)
198 {
199 data.ack_handler_en = true;
200 }
201
202 /* Disable ack handler */
b91_handle_ack_dis(void)203 static void b91_handle_ack_dis(void)
204 {
205 data.ack_handler_en = false;
206 }
207
208 /* Handle acknowledge packet */
b91_handle_ack(void)209 static void b91_handle_ack(void)
210 {
211 struct net_pkt *ack_pkt;
212
213 /* allocate ack packet */
214 ack_pkt = net_pkt_rx_alloc_with_buffer(data.iface, B91_ACK_FRAME_LEN,
215 AF_UNSPEC, 0, K_NO_WAIT);
216 if (!ack_pkt) {
217 LOG_ERR("No free packet available.");
218 return;
219 }
220
221 /* update packet data */
222 if (net_pkt_write(ack_pkt, data.rx_buffer + B91_PAYLOAD_OFFSET,
223 B91_ACK_FRAME_LEN) < 0) {
224 LOG_ERR("Failed to write to a packet.");
225 goto out;
226 }
227
228 /* update RSSI and LQI */
229 b91_update_rssi_and_lqi(ack_pkt);
230
231 /* init net cursor */
232 net_pkt_cursor_init(ack_pkt);
233
234 /* handle ack */
235 if (ieee802154_handle_ack(data.iface, ack_pkt) != NET_OK) {
236 LOG_INF("ACK packet not handled - releasing.");
237 }
238
239 /* release ack_wait semaphore */
240 k_sem_give(&data.ack_wait);
241
242 out:
243 net_pkt_unref(ack_pkt);
244 }
245
246 /* Send acknowledge packet */
b91_send_ack(uint8_t seq_num)247 static void b91_send_ack(uint8_t seq_num)
248 {
249 uint8_t ack_buf[] = { B91_ACK_TYPE, 0, seq_num };
250
251 if (b91_set_tx_payload(ack_buf, sizeof(ack_buf))) {
252 return;
253 }
254
255 rf_set_txmode();
256 delay_us(CONFIG_IEEE802154_B91_SET_TXRX_DELAY_US);
257 rf_tx_pkt(data.tx_buffer);
258 }
259
260 /* RX IRQ handler */
b91_rf_rx_isr(void)261 static void b91_rf_rx_isr(void)
262 {
263 int status;
264 uint8_t length;
265 uint8_t *payload;
266 struct net_pkt *pkt;
267
268 /* disable DMA and clear IRQ flag */
269 dma_chn_dis(DMA1);
270 rf_clr_irq_status(FLD_RF_IRQ_RX);
271
272 /* check CRC */
273 if (rf_zigbee_packet_crc_ok(data.rx_buffer)) {
274 /* get payload length */
275 if (IS_ENABLED(CONFIG_IEEE802154_L2_PKT_INCL_FCS)) {
276 length = data.rx_buffer[B91_LENGTH_OFFSET];
277 } else {
278 length = data.rx_buffer[B91_LENGTH_OFFSET] - B91_FCS_LENGTH;
279 }
280
281 /* check length */
282 if ((length < B91_PAYLOAD_MIN) || (length > B91_PAYLOAD_MAX)) {
283 LOG_ERR("Invalid length\n");
284 goto exit;
285 }
286
287 /* get payload */
288 payload = (uint8_t *)(data.rx_buffer + B91_PAYLOAD_OFFSET);
289
290 /* handle acknowledge packet if enabled */
291 if ((length == (B91_ACK_FRAME_LEN + B91_FCS_LENGTH)) &&
292 ((payload[B91_FRAME_TYPE_OFFSET] & B91_FRAME_TYPE_MASK) == B91_ACK_TYPE)) {
293 if (data.ack_handler_en) {
294 b91_handle_ack();
295 }
296 goto exit;
297 }
298
299 /* run filter (check PAN ID and destination address) */
300 if (b91_run_filter(payload) == false) {
301 LOG_DBG("Packet received is not addressed to me");
302 goto exit;
303 }
304
305 /* send ack if requested */
306 if (payload[B91_FRAME_TYPE_OFFSET] & B91_ACK_REQUEST) {
307 b91_send_ack(payload[B91_DSN_OFFSET]);
308 }
309
310 /* get packet pointer from NET stack */
311 pkt = net_pkt_rx_alloc_with_buffer(data.iface, length, AF_UNSPEC, 0, K_NO_WAIT);
312 if (!pkt) {
313 LOG_ERR("No pkt available");
314 goto exit;
315 }
316
317 /* update packet data */
318 if (net_pkt_write(pkt, payload, length)) {
319 LOG_ERR("Failed to write to a packet.");
320 net_pkt_unref(pkt);
321 goto exit;
322 }
323
324 /* update RSSI and LQI parameters */
325 b91_update_rssi_and_lqi(pkt);
326
327 /* transfer data to NET stack */
328 status = net_recv_data(data.iface, pkt);
329 if (status < 0) {
330 LOG_ERR("RCV Packet dropped by NET stack: %d", status);
331 net_pkt_unref(pkt);
332 }
333 }
334
335 exit:
336 dma_chn_en(DMA1);
337 }
338
339 /* TX IRQ handler */
b91_rf_tx_isr(void)340 static void b91_rf_tx_isr(void)
341 {
342 /* clear irq status */
343 rf_clr_irq_status(FLD_RF_IRQ_TX);
344
345 /* release tx semaphore */
346 k_sem_give(&data.tx_wait);
347
348 /* set to rx mode */
349 rf_set_rxmode();
350 }
351
352 /* IRQ handler */
b91_rf_isr(void)353 static void b91_rf_isr(void)
354 {
355 if (rf_get_irq_status(FLD_RF_IRQ_RX)) {
356 b91_rf_rx_isr();
357 } else if (rf_get_irq_status(FLD_RF_IRQ_TX)) {
358 b91_rf_tx_isr();
359 } else {
360 rf_clr_irq_status(FLD_RF_IRQ_ALL);
361 }
362 }
363
364 /* Driver initialization */
b91_init(const struct device * dev)365 static int b91_init(const struct device *dev)
366 {
367 struct b91_data *b91 = dev->data;
368
369 /* init semaphores */
370 k_sem_init(&b91->tx_wait, 0, 1);
371 k_sem_init(&b91->ack_wait, 0, 1);
372
373 /* init rf module */
374 rf_mode_init();
375 rf_set_zigbee_250K_mode();
376 rf_set_tx_dma(2, B91_TRX_LENGTH);
377 rf_set_rx_dma(data.rx_buffer, 3, B91_TRX_LENGTH);
378 rf_set_rxmode();
379
380 /* init IRQs */
381 IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), b91_rf_isr, 0, 0);
382 riscv_plic_irq_enable(DT_INST_IRQN(0));
383 riscv_plic_set_priority(DT_INST_IRQN(0), DT_INST_IRQ(0, priority));
384 rf_set_irq_mask(FLD_RF_IRQ_RX | FLD_RF_IRQ_TX);
385
386 /* init data variables */
387 data.is_started = true;
388 data.ack_handler_en = false;
389 data.current_channel = 0;
390
391 return 0;
392 }
393
394 /* API implementation: iface_init */
b91_iface_init(struct net_if * iface)395 static void b91_iface_init(struct net_if *iface)
396 {
397 const struct device *dev = net_if_get_device(iface);
398 struct b91_data *b91 = dev->data;
399 uint8_t *mac = b91_get_mac(dev);
400
401 net_if_set_link_addr(iface, mac, B91_IEEE_ADDRESS_SIZE, NET_LINK_IEEE802154);
402
403 b91->iface = iface;
404
405 ieee802154_init(iface);
406 }
407
408 /* API implementation: get_capabilities */
b91_get_capabilities(const struct device * dev)409 static enum ieee802154_hw_caps b91_get_capabilities(const struct device *dev)
410 {
411 ARG_UNUSED(dev);
412
413 return IEEE802154_HW_FCS | IEEE802154_HW_FILTER |
414 IEEE802154_HW_TX_RX_ACK | IEEE802154_HW_RX_TX_ACK;
415 }
416
417 /* API implementation: cca */
b91_cca(const struct device * dev)418 static int b91_cca(const struct device *dev)
419 {
420 ARG_UNUSED(dev);
421
422 unsigned int t1 = stimer_get_tick();
423
424 while (!clock_time_exceed(t1, B91_CCA_TIME_MAX_US)) {
425 if (rf_get_rssi() < CONFIG_IEEE802154_B91_CCA_RSSI_THRESHOLD) {
426 return 0;
427 }
428 }
429
430 return -EBUSY;
431 }
432
433 /* API implementation: set_channel */
b91_set_channel(const struct device * dev,uint16_t channel)434 static int b91_set_channel(const struct device *dev, uint16_t channel)
435 {
436 ARG_UNUSED(dev);
437
438 if (channel > 26) {
439 return -EINVAL;
440 }
441
442 if (channel < 11) {
443 return -ENOTSUP;
444 }
445
446 if (data.current_channel != channel) {
447 data.current_channel = channel;
448 rf_set_chn(B91_LOGIC_CHANNEL_TO_PHYSICAL(channel));
449 rf_set_rxmode();
450 }
451
452 return 0;
453 }
454
455 /* API implementation: filter */
b91_filter(const struct device * dev,bool set,enum ieee802154_filter_type type,const struct ieee802154_filter * filter)456 static int b91_filter(const struct device *dev,
457 bool set,
458 enum ieee802154_filter_type type,
459 const struct ieee802154_filter *filter)
460 {
461 if (!set) {
462 return -ENOTSUP;
463 }
464
465 if (type == IEEE802154_FILTER_TYPE_IEEE_ADDR) {
466 return b91_set_ieee_addr(filter->ieee_addr);
467 } else if (type == IEEE802154_FILTER_TYPE_SHORT_ADDR) {
468 return b91_set_short_addr(filter->short_addr);
469 } else if (type == IEEE802154_FILTER_TYPE_PAN_ID) {
470 return b91_set_pan_id(filter->pan_id);
471 }
472
473 return -ENOTSUP;
474 }
475
476 /* API implementation: set_txpower */
b91_set_txpower(const struct device * dev,int16_t dbm)477 static int b91_set_txpower(const struct device *dev, int16_t dbm)
478 {
479 ARG_UNUSED(dev);
480
481 /* check for supported Min/Max range */
482 if (dbm < B91_TX_POWER_MIN) {
483 dbm = B91_TX_POWER_MIN;
484 } else if (dbm > B91_TX_POWER_MAX) {
485 dbm = B91_TX_POWER_MAX;
486 }
487
488 /* set TX power */
489 rf_set_power_level(b91_tx_pwr_lt[dbm - B91_TX_POWER_MIN]);
490
491 return 0;
492 }
493
494 /* API implementation: start */
b91_start(const struct device * dev)495 static int b91_start(const struct device *dev)
496 {
497 ARG_UNUSED(dev);
498
499 /* check if RF is already started */
500 if (!data.is_started) {
501 rf_set_rxmode();
502 delay_us(CONFIG_IEEE802154_B91_SET_TXRX_DELAY_US);
503 riscv_plic_irq_enable(DT_INST_IRQN(0));
504 data.is_started = true;
505 }
506
507 return 0;
508 }
509
510 /* API implementation: stop */
b91_stop(const struct device * dev)511 static int b91_stop(const struct device *dev)
512 {
513 ARG_UNUSED(dev);
514
515 /* check if RF is already stopped */
516 if (data.is_started) {
517 riscv_plic_irq_disable(DT_INST_IRQN(0));
518 rf_set_tx_rx_off();
519 delay_us(CONFIG_IEEE802154_B91_SET_TXRX_DELAY_US);
520 data.is_started = false;
521 }
522
523 return 0;
524 }
525
526 /* API implementation: tx */
b91_tx(const struct device * dev,enum ieee802154_tx_mode mode,struct net_pkt * pkt,struct net_buf * frag)527 static int b91_tx(const struct device *dev,
528 enum ieee802154_tx_mode mode,
529 struct net_pkt *pkt,
530 struct net_buf *frag)
531 {
532 ARG_UNUSED(pkt);
533
534 int status;
535 struct b91_data *b91 = dev->data;
536
537 /* check for supported mode */
538 if (mode != IEEE802154_TX_MODE_DIRECT) {
539 LOG_DBG("TX mode %d not supported", mode);
540 return -ENOTSUP;
541 }
542
543 /* prepare tx buffer */
544 status = b91_set_tx_payload(frag->data, frag->len);
545 if (status) {
546 return status;
547 }
548
549 /* reset semaphores */
550 k_sem_reset(&b91->tx_wait);
551 k_sem_reset(&b91->ack_wait);
552
553 /* start transmission */
554 rf_set_txmode();
555 delay_us(CONFIG_IEEE802154_B91_SET_TXRX_DELAY_US);
556 rf_tx_pkt(data.tx_buffer);
557
558 /* wait for tx done */
559 status = k_sem_take(&b91->tx_wait, K_MSEC(B91_TX_WAIT_TIME_MS));
560 if (status != 0) {
561 rf_set_rxmode();
562 return -EIO;
563 }
564
565 /* wait for ACK if requested */
566 if (frag->data[B91_FRAME_TYPE_OFFSET] & B91_ACK_REQUEST) {
567 b91_handle_ack_en();
568 status = k_sem_take(&b91->ack_wait, K_MSEC(B91_ACK_WAIT_TIME_MS));
569 b91_handle_ack_dis();
570 }
571
572 return status;
573 }
574
575 /* API implementation: ed_scan */
b91_ed_scan(const struct device * dev,uint16_t duration,energy_scan_done_cb_t done_cb)576 static int b91_ed_scan(const struct device *dev, uint16_t duration,
577 energy_scan_done_cb_t done_cb)
578 {
579 ARG_UNUSED(dev);
580 ARG_UNUSED(duration);
581 ARG_UNUSED(done_cb);
582
583 /* ed_scan not supported */
584
585 return -ENOTSUP;
586 }
587
588 /* API implementation: configure */
b91_configure(const struct device * dev,enum ieee802154_config_type type,const struct ieee802154_config * config)589 static int b91_configure(const struct device *dev,
590 enum ieee802154_config_type type,
591 const struct ieee802154_config *config)
592 {
593 ARG_UNUSED(dev);
594 ARG_UNUSED(type);
595 ARG_UNUSED(config);
596
597 /* configure not supported */
598
599 return -ENOTSUP;
600 }
601
602 /* driver-allocated attribute memory - constant across all driver instances */
603 IEEE802154_DEFINE_PHY_SUPPORTED_CHANNELS(drv_attr, 11, 26);
604
605 /* API implementation: attr_get */
b91_attr_get(const struct device * dev,enum ieee802154_attr attr,struct ieee802154_attr_value * value)606 static int b91_attr_get(const struct device *dev, enum ieee802154_attr attr,
607 struct ieee802154_attr_value *value)
608 {
609 ARG_UNUSED(dev);
610
611 return ieee802154_attr_get_channel_page_and_range(
612 attr, IEEE802154_ATTR_PHY_CHANNEL_PAGE_ZERO_OQPSK_2450_BPSK_868_915,
613 &drv_attr.phy_supported_channels, value);
614 }
615
616 /* IEEE802154 driver APIs structure */
617 static const struct ieee802154_radio_api b91_radio_api = {
618 .iface_api.init = b91_iface_init,
619 .get_capabilities = b91_get_capabilities,
620 .cca = b91_cca,
621 .set_channel = b91_set_channel,
622 .filter = b91_filter,
623 .set_txpower = b91_set_txpower,
624 .start = b91_start,
625 .stop = b91_stop,
626 .tx = b91_tx,
627 .ed_scan = b91_ed_scan,
628 .configure = b91_configure,
629 .attr_get = b91_attr_get,
630 };
631
632
633 #if defined(CONFIG_NET_L2_IEEE802154)
634 #define L2 IEEE802154_L2
635 #define L2_CTX_TYPE NET_L2_GET_CTX_TYPE(IEEE802154_L2)
636 #define MTU 125
637 #elif defined(CONFIG_NET_L2_OPENTHREAD)
638 #define L2 OPENTHREAD_L2
639 #define L2_CTX_TYPE NET_L2_GET_CTX_TYPE(OPENTHREAD_L2)
640 #define MTU 1280
641 #endif
642
643
644 /* IEEE802154 driver registration */
645 #if defined(CONFIG_NET_L2_IEEE802154) || defined(CONFIG_NET_L2_OPENTHREAD)
646 NET_DEVICE_DT_INST_DEFINE(0, b91_init, NULL, &data, NULL,
647 CONFIG_IEEE802154_B91_INIT_PRIO,
648 &b91_radio_api, L2, L2_CTX_TYPE, MTU);
649 #else
650 DEVICE_DT_INST_DEFINE(0, b91_init, NULL, &data, NULL,
651 POST_KERNEL, CONFIG_IEEE802154_B91_INIT_PRIO,
652 &b91_radio_api);
653 #endif
654