1 /*
2  * Copyright (c) 2021 Telink Semiconductor
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT telink_b91_zb
8 
9 #include "rf.h"
10 #include "stimer.h"
11 
12 #define LOG_MODULE_NAME ieee802154_b91
13 #if defined(CONFIG_IEEE802154_DRIVER_LOG_LEVEL)
14 #define LOG_LEVEL CONFIG_IEEE802154_DRIVER_LOG_LEVEL
15 #else
16 #define LOG_LEVEL LOG_LEVEL_NONE
17 #endif
18 
19 #include <zephyr/logging/log.h>
20 LOG_MODULE_REGISTER(LOG_MODULE_NAME);
21 
22 #include <zephyr/random/random.h>
23 #include <zephyr/net/ieee802154_radio.h>
24 #include <zephyr/irq.h>
25 #if defined(CONFIG_NET_L2_OPENTHREAD)
26 #include <zephyr/net/openthread.h>
27 #endif
28 
29 #include "ieee802154_b91.h"
30 
31 
32 /* B91 data structure */
33 static struct b91_data data;
34 
35 /* Set filter PAN ID */
b91_set_pan_id(uint16_t pan_id)36 static int b91_set_pan_id(uint16_t pan_id)
37 {
38 	uint8_t pan_id_le[B91_PAN_ID_SIZE];
39 
40 	sys_put_le16(pan_id, pan_id_le);
41 	memcpy(data.filter_pan_id, pan_id_le, B91_PAN_ID_SIZE);
42 
43 	return 0;
44 }
45 
46 /* Set filter short address */
b91_set_short_addr(uint16_t short_addr)47 static int b91_set_short_addr(uint16_t short_addr)
48 {
49 	uint8_t short_addr_le[B91_SHORT_ADDRESS_SIZE];
50 
51 	sys_put_le16(short_addr, short_addr_le);
52 	memcpy(data.filter_short_addr, short_addr_le, B91_SHORT_ADDRESS_SIZE);
53 
54 	return 0;
55 }
56 
57 /* Set filter IEEE address */
b91_set_ieee_addr(const uint8_t * ieee_addr)58 static int b91_set_ieee_addr(const uint8_t *ieee_addr)
59 {
60 	memcpy(data.filter_ieee_addr, ieee_addr, B91_IEEE_ADDRESS_SIZE);
61 
62 	return 0;
63 }
64 
65 /* Filter PAN ID, short address and IEEE address */
b91_run_filter(uint8_t * rx_buffer)66 static bool b91_run_filter(uint8_t *rx_buffer)
67 {
68 	/* Check destination PAN Id */
69 	if (memcmp(&rx_buffer[B91_PAN_ID_OFFSET], data.filter_pan_id,
70 		   B91_PAN_ID_SIZE) != 0 &&
71 	    memcmp(&rx_buffer[B91_PAN_ID_OFFSET], B91_BROADCAST_ADDRESS,
72 		   B91_PAN_ID_SIZE) != 0) {
73 		return false;
74 	}
75 
76 	/* Check destination address */
77 	switch (rx_buffer[B91_DEST_ADDR_TYPE_OFFSET] & B91_DEST_ADDR_TYPE_MASK) {
78 	case B91_DEST_ADDR_TYPE_SHORT:
79 		/* First check if the destination is broadcast */
80 		/* If not broadcast, check if length and address matches */
81 		if (memcmp(&rx_buffer[B91_DEST_ADDR_OFFSET], B91_BROADCAST_ADDRESS,
82 			   B91_SHORT_ADDRESS_SIZE) != 0 &&
83 		    memcmp(&rx_buffer[B91_DEST_ADDR_OFFSET], data.filter_short_addr,
84 			   B91_SHORT_ADDRESS_SIZE) != 0) {
85 			return false;
86 		}
87 		break;
88 
89 	case B91_DEST_ADDR_TYPE_IEEE:
90 		/* If not broadcast, check if length and address matches */
91 		if ((net_if_get_link_addr(data.iface)->len != B91_IEEE_ADDRESS_SIZE) ||
92 		    memcmp(&rx_buffer[B91_DEST_ADDR_OFFSET], data.filter_ieee_addr,
93 			   B91_IEEE_ADDRESS_SIZE) != 0) {
94 			return false;
95 		}
96 		break;
97 
98 	default:
99 		return false;
100 	}
101 
102 	return true;
103 }
104 
105 /* Get MAC address */
b91_get_mac(const struct device * dev)106 static inline uint8_t *b91_get_mac(const struct device *dev)
107 {
108 	struct b91_data *b91 = dev->data;
109 
110 #if defined(CONFIG_IEEE802154_B91_RANDOM_MAC)
111 	uint32_t *ptr = (uint32_t *)(b91->mac_addr);
112 
113 	UNALIGNED_PUT(sys_rand32_get(), ptr);
114 	ptr = (uint32_t *)(b91->mac_addr + 4);
115 	UNALIGNED_PUT(sys_rand32_get(), ptr);
116 
117 	/*
118 	 * Clear bit 0 to ensure it isn't a multicast address and set
119 	 * bit 1 to indicate address is locally administered and may
120 	 * not be globally unique.
121 	 */
122 	b91->mac_addr[0] = (b91->mac_addr[0] & ~0x01) | 0x02;
123 #else
124 	/* Vendor Unique Identifier */
125 	b91->mac_addr[0] = 0xC4;
126 	b91->mac_addr[1] = 0x19;
127 	b91->mac_addr[2] = 0xD1;
128 	b91->mac_addr[3] = 0x00;
129 
130 	/* Extended Unique Identifier */
131 	b91->mac_addr[4] = CONFIG_IEEE802154_B91_MAC4;
132 	b91->mac_addr[5] = CONFIG_IEEE802154_B91_MAC5;
133 	b91->mac_addr[6] = CONFIG_IEEE802154_B91_MAC6;
134 	b91->mac_addr[7] = CONFIG_IEEE802154_B91_MAC7;
135 #endif
136 
137 	return b91->mac_addr;
138 }
139 
140 /* Convert RSSI to LQI */
b91_convert_rssi_to_lqi(int8_t rssi)141 static uint8_t b91_convert_rssi_to_lqi(int8_t rssi)
142 {
143 	uint32_t lqi32 = 0;
144 
145 	/* check for MIN value */
146 	if (rssi < B91_RSSI_TO_LQI_MIN) {
147 		return 0;
148 	}
149 
150 	/* convert RSSI to LQI */
151 	lqi32 = B91_RSSI_TO_LQI_SCALE * (rssi - B91_RSSI_TO_LQI_MIN);
152 
153 	/* check for MAX value */
154 	if (lqi32 > 0xFF) {
155 		lqi32 = 0xFF;
156 	}
157 
158 	return (uint8_t)lqi32;
159 }
160 
161 /* Update RSSI and LQI parameters */
b91_update_rssi_and_lqi(struct net_pkt * pkt)162 static void b91_update_rssi_and_lqi(struct net_pkt *pkt)
163 {
164 	int8_t rssi;
165 	uint8_t lqi;
166 
167 	rssi = ((signed char)(data.rx_buffer
168 			      [data.rx_buffer[B91_LENGTH_OFFSET] + B91_RSSI_OFFSET])) - 110;
169 	lqi = b91_convert_rssi_to_lqi(rssi);
170 
171 	net_pkt_set_ieee802154_lqi(pkt, lqi);
172 	net_pkt_set_ieee802154_rssi_dbm(pkt, rssi);
173 }
174 
175 /* Prepare TX buffer */
b91_set_tx_payload(uint8_t * payload,uint8_t payload_len)176 static int b91_set_tx_payload(uint8_t *payload, uint8_t payload_len)
177 {
178 	unsigned char rf_data_len;
179 	unsigned int rf_tx_dma_len;
180 
181 	/* See Telink SDK Dev Handbook, AN-21010600, section 21.5.2.2. */
182 	if (payload_len > (B91_TRX_LENGTH - B91_PAYLOAD_OFFSET - IEEE802154_FCS_LENGTH)) {
183 		return -EINVAL;
184 	}
185 
186 	rf_data_len = payload_len + 1;
187 	rf_tx_dma_len = rf_tx_packet_dma_len(rf_data_len);
188 	data.tx_buffer[0] = rf_tx_dma_len & 0xff;
189 	data.tx_buffer[1] = (rf_tx_dma_len >> 8) & 0xff;
190 	data.tx_buffer[2] = (rf_tx_dma_len >> 16) & 0xff;
191 	data.tx_buffer[3] = (rf_tx_dma_len >> 24) & 0xff;
192 	data.tx_buffer[4] = payload_len + IEEE802154_FCS_LENGTH;
193 	memcpy(data.tx_buffer + B91_PAYLOAD_OFFSET, payload, payload_len);
194 
195 	return 0;
196 }
197 
198 /* Enable ack handler */
b91_handle_ack_en(void)199 static void b91_handle_ack_en(void)
200 {
201 	data.ack_handler_en = true;
202 }
203 
204 /* Disable ack handler */
b91_handle_ack_dis(void)205 static void b91_handle_ack_dis(void)
206 {
207 	data.ack_handler_en = false;
208 }
209 
210 /* Handle acknowledge packet */
b91_handle_ack(void)211 static void b91_handle_ack(void)
212 {
213 	struct net_pkt *ack_pkt;
214 
215 	/* allocate ack packet */
216 	ack_pkt = net_pkt_rx_alloc_with_buffer(data.iface, B91_ACK_FRAME_LEN,
217 					       AF_UNSPEC, 0, K_NO_WAIT);
218 	if (!ack_pkt) {
219 		LOG_ERR("No free packet available.");
220 		return;
221 	}
222 
223 	/* update packet data */
224 	if (net_pkt_write(ack_pkt, data.rx_buffer + B91_PAYLOAD_OFFSET,
225 			  B91_ACK_FRAME_LEN) < 0) {
226 		LOG_ERR("Failed to write to a packet.");
227 		goto out;
228 	}
229 
230 	/* update RSSI and LQI */
231 	b91_update_rssi_and_lqi(ack_pkt);
232 
233 	/* init net cursor */
234 	net_pkt_cursor_init(ack_pkt);
235 
236 	/* handle ack */
237 	if (ieee802154_handle_ack(data.iface, ack_pkt) != NET_OK) {
238 		LOG_INF("ACK packet not handled - releasing.");
239 	}
240 
241 	/* release ack_wait semaphore */
242 	k_sem_give(&data.ack_wait);
243 
244 out:
245 	net_pkt_unref(ack_pkt);
246 }
247 
248 /* Send acknowledge packet */
b91_send_ack(uint8_t seq_num)249 static void b91_send_ack(uint8_t seq_num)
250 {
251 	uint8_t ack_buf[] = { B91_ACK_TYPE, 0, seq_num };
252 
253 	if (b91_set_tx_payload(ack_buf, sizeof(ack_buf))) {
254 		return;
255 	}
256 
257 	rf_set_txmode();
258 	delay_us(CONFIG_IEEE802154_B91_SET_TXRX_DELAY_US);
259 	rf_tx_pkt(data.tx_buffer);
260 }
261 
262 /* RX IRQ handler */
b91_rf_rx_isr(void)263 static void b91_rf_rx_isr(void)
264 {
265 	uint8_t status;
266 	uint8_t length;
267 	uint8_t *payload;
268 	struct net_pkt *pkt;
269 
270 	/* disable DMA and clear IRQ flag */
271 	dma_chn_dis(DMA1);
272 	rf_clr_irq_status(FLD_RF_IRQ_RX);
273 
274 	/* check CRC */
275 	if (rf_zigbee_packet_crc_ok(data.rx_buffer)) {
276 		/* get payload length */
277 		if (IS_ENABLED(CONFIG_IEEE802154_RAW_MODE) ||
278 		    IS_ENABLED(CONFIG_NET_L2_OPENTHREAD)) {
279 			length = data.rx_buffer[B91_LENGTH_OFFSET];
280 		} else {
281 			length = data.rx_buffer[B91_LENGTH_OFFSET] - B91_FCS_LENGTH;
282 		}
283 
284 		/* check length */
285 		if ((length < B91_PAYLOAD_MIN) || (length > B91_PAYLOAD_MAX)) {
286 			LOG_ERR("Invalid length\n");
287 			goto exit;
288 		}
289 
290 		/* get payload */
291 		payload = (uint8_t *)(data.rx_buffer + B91_PAYLOAD_OFFSET);
292 
293 		/* handle acknowledge packet if enabled */
294 		if ((length == (B91_ACK_FRAME_LEN + B91_FCS_LENGTH)) &&
295 		    ((payload[B91_FRAME_TYPE_OFFSET] & B91_FRAME_TYPE_MASK) == B91_ACK_TYPE)) {
296 			if (data.ack_handler_en) {
297 				b91_handle_ack();
298 			}
299 			goto exit;
300 		}
301 
302 		/* run filter (check PAN ID and destination address) */
303 		if (b91_run_filter(payload) == false) {
304 			LOG_DBG("Packet received is not addressed to me");
305 			goto exit;
306 		}
307 
308 		/* send ack if requested */
309 		if (payload[B91_FRAME_TYPE_OFFSET] & B91_ACK_REQUEST) {
310 			b91_send_ack(payload[B91_DSN_OFFSET]);
311 		}
312 
313 		/* get packet pointer from NET stack */
314 		pkt = net_pkt_rx_alloc_with_buffer(data.iface, length, AF_UNSPEC, 0, K_NO_WAIT);
315 		if (!pkt) {
316 			LOG_ERR("No pkt available");
317 			goto exit;
318 		}
319 
320 		/* update packet data */
321 		if (net_pkt_write(pkt, payload, length)) {
322 			LOG_ERR("Failed to write to a packet.");
323 			net_pkt_unref(pkt);
324 			goto exit;
325 		}
326 
327 		/* update RSSI and LQI parameters */
328 		b91_update_rssi_and_lqi(pkt);
329 
330 		/* transfer data to NET stack */
331 		status = net_recv_data(data.iface, pkt);
332 		if (status < 0) {
333 			LOG_ERR("RCV Packet dropped by NET stack: %d", status);
334 			net_pkt_unref(pkt);
335 		}
336 	}
337 
338 exit:
339 	dma_chn_en(DMA1);
340 }
341 
342 /* TX IRQ handler */
b91_rf_tx_isr(void)343 static void b91_rf_tx_isr(void)
344 {
345 	/* clear irq status */
346 	rf_clr_irq_status(FLD_RF_IRQ_TX);
347 
348 	/* release tx semaphore */
349 	k_sem_give(&data.tx_wait);
350 
351 	/* set to rx mode */
352 	rf_set_rxmode();
353 }
354 
355 /* IRQ handler */
b91_rf_isr(void)356 static void b91_rf_isr(void)
357 {
358 	if (rf_get_irq_status(FLD_RF_IRQ_RX)) {
359 		b91_rf_rx_isr();
360 	} else if (rf_get_irq_status(FLD_RF_IRQ_TX)) {
361 		b91_rf_tx_isr();
362 	} else {
363 		rf_clr_irq_status(FLD_RF_IRQ_ALL);
364 	}
365 }
366 
367 /* Driver initialization */
b91_init(const struct device * dev)368 static int b91_init(const struct device *dev)
369 {
370 	struct b91_data *b91 = dev->data;
371 
372 	/* init semaphores */
373 	k_sem_init(&b91->tx_wait, 0, 1);
374 	k_sem_init(&b91->ack_wait, 0, 1);
375 
376 	/* init rf module */
377 	rf_mode_init();
378 	rf_set_zigbee_250K_mode();
379 	rf_set_tx_dma(2, B91_TRX_LENGTH);
380 	rf_set_rx_dma(data.rx_buffer, 3, B91_TRX_LENGTH);
381 	rf_set_rxmode();
382 
383 	/* init IRQs */
384 	IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), b91_rf_isr, 0, 0);
385 	riscv_plic_irq_enable(DT_INST_IRQN(0));
386 	riscv_plic_set_priority(DT_INST_IRQN(0), DT_INST_IRQ(0, priority));
387 	rf_set_irq_mask(FLD_RF_IRQ_RX | FLD_RF_IRQ_TX);
388 
389 	/* init data variables */
390 	data.is_started = true;
391 	data.ack_handler_en = false;
392 	data.current_channel = 0;
393 
394 	return 0;
395 }
396 
397 /* API implementation: iface_init */
b91_iface_init(struct net_if * iface)398 static void b91_iface_init(struct net_if *iface)
399 {
400 	const struct device *dev = net_if_get_device(iface);
401 	struct b91_data *b91 = dev->data;
402 	uint8_t *mac = b91_get_mac(dev);
403 
404 	net_if_set_link_addr(iface, mac, B91_IEEE_ADDRESS_SIZE, NET_LINK_IEEE802154);
405 
406 	b91->iface = iface;
407 
408 	ieee802154_init(iface);
409 }
410 
411 /* API implementation: get_capabilities */
b91_get_capabilities(const struct device * dev)412 static enum ieee802154_hw_caps b91_get_capabilities(const struct device *dev)
413 {
414 	ARG_UNUSED(dev);
415 
416 	return IEEE802154_HW_FCS | IEEE802154_HW_FILTER |
417 	       IEEE802154_HW_TX_RX_ACK | IEEE802154_HW_RX_TX_ACK;
418 }
419 
420 /* API implementation: cca */
b91_cca(const struct device * dev)421 static int b91_cca(const struct device *dev)
422 {
423 	ARG_UNUSED(dev);
424 
425 	unsigned int t1 = stimer_get_tick();
426 
427 	while (!clock_time_exceed(t1, B91_CCA_TIME_MAX_US)) {
428 		if (rf_get_rssi() < CONFIG_IEEE802154_B91_CCA_RSSI_THRESHOLD) {
429 			return 0;
430 		}
431 	}
432 
433 	return -EBUSY;
434 }
435 
436 /* API implementation: set_channel */
b91_set_channel(const struct device * dev,uint16_t channel)437 static int b91_set_channel(const struct device *dev, uint16_t channel)
438 {
439 	ARG_UNUSED(dev);
440 
441 	if (channel > 26) {
442 		return -EINVAL;
443 	}
444 
445 	if (channel < 11) {
446 		return -ENOTSUP;
447 	}
448 
449 	if (data.current_channel != channel) {
450 		data.current_channel = channel;
451 		rf_set_chn(B91_LOGIC_CHANNEL_TO_PHYSICAL(channel));
452 		rf_set_rxmode();
453 	}
454 
455 	return 0;
456 }
457 
458 /* API implementation: filter */
b91_filter(const struct device * dev,bool set,enum ieee802154_filter_type type,const struct ieee802154_filter * filter)459 static int b91_filter(const struct device *dev,
460 		      bool set,
461 		      enum ieee802154_filter_type type,
462 		      const struct ieee802154_filter *filter)
463 {
464 	if (!set) {
465 		return -ENOTSUP;
466 	}
467 
468 	if (type == IEEE802154_FILTER_TYPE_IEEE_ADDR) {
469 		return b91_set_ieee_addr(filter->ieee_addr);
470 	} else if (type == IEEE802154_FILTER_TYPE_SHORT_ADDR) {
471 		return b91_set_short_addr(filter->short_addr);
472 	} else if (type == IEEE802154_FILTER_TYPE_PAN_ID) {
473 		return b91_set_pan_id(filter->pan_id);
474 	}
475 
476 	return -ENOTSUP;
477 }
478 
479 /* API implementation: set_txpower */
b91_set_txpower(const struct device * dev,int16_t dbm)480 static int b91_set_txpower(const struct device *dev, int16_t dbm)
481 {
482 	ARG_UNUSED(dev);
483 
484 	/* check for supported Min/Max range */
485 	if (dbm < B91_TX_POWER_MIN) {
486 		dbm = B91_TX_POWER_MIN;
487 	} else if (dbm > B91_TX_POWER_MAX) {
488 		dbm = B91_TX_POWER_MAX;
489 	}
490 
491 	/* set TX power */
492 	rf_set_power_level(b91_tx_pwr_lt[dbm - B91_TX_POWER_MIN]);
493 
494 	return 0;
495 }
496 
497 /* API implementation: start */
b91_start(const struct device * dev)498 static int b91_start(const struct device *dev)
499 {
500 	ARG_UNUSED(dev);
501 
502 	/* check if RF is already started */
503 	if (!data.is_started) {
504 		rf_set_rxmode();
505 		delay_us(CONFIG_IEEE802154_B91_SET_TXRX_DELAY_US);
506 		riscv_plic_irq_enable(DT_INST_IRQN(0));
507 		data.is_started = true;
508 	}
509 
510 	return 0;
511 }
512 
513 /* API implementation: stop */
b91_stop(const struct device * dev)514 static int b91_stop(const struct device *dev)
515 {
516 	ARG_UNUSED(dev);
517 
518 	/* check if RF is already stopped */
519 	if (data.is_started) {
520 		riscv_plic_irq_disable(DT_INST_IRQN(0));
521 		rf_set_tx_rx_off();
522 		delay_us(CONFIG_IEEE802154_B91_SET_TXRX_DELAY_US);
523 		data.is_started = false;
524 	}
525 
526 	return 0;
527 }
528 
529 /* API implementation: tx */
b91_tx(const struct device * dev,enum ieee802154_tx_mode mode,struct net_pkt * pkt,struct net_buf * frag)530 static int b91_tx(const struct device *dev,
531 		  enum ieee802154_tx_mode mode,
532 		  struct net_pkt *pkt,
533 		  struct net_buf *frag)
534 {
535 	ARG_UNUSED(pkt);
536 
537 	int status;
538 	struct b91_data *b91 = dev->data;
539 
540 	/* check for supported mode */
541 	if (mode != IEEE802154_TX_MODE_DIRECT) {
542 		LOG_DBG("TX mode %d not supported", mode);
543 		return -ENOTSUP;
544 	}
545 
546 	/* prepare tx buffer */
547 	status = b91_set_tx_payload(frag->data, frag->len);
548 	if (status) {
549 		return status;
550 	}
551 
552 	/* reset semaphores */
553 	k_sem_reset(&b91->tx_wait);
554 	k_sem_reset(&b91->ack_wait);
555 
556 	/* start transmission */
557 	rf_set_txmode();
558 	delay_us(CONFIG_IEEE802154_B91_SET_TXRX_DELAY_US);
559 	rf_tx_pkt(data.tx_buffer);
560 
561 	/* wait for tx done */
562 	status = k_sem_take(&b91->tx_wait, K_MSEC(B91_TX_WAIT_TIME_MS));
563 	if (status != 0) {
564 		rf_set_rxmode();
565 		return -EIO;
566 	}
567 
568 	/* wait for ACK if requested */
569 	if (frag->data[B91_FRAME_TYPE_OFFSET] & B91_ACK_REQUEST) {
570 		b91_handle_ack_en();
571 		status = k_sem_take(&b91->ack_wait, K_MSEC(B91_ACK_WAIT_TIME_MS));
572 		b91_handle_ack_dis();
573 	}
574 
575 	return status;
576 }
577 
578 /* API implementation: ed_scan */
b91_ed_scan(const struct device * dev,uint16_t duration,energy_scan_done_cb_t done_cb)579 static int b91_ed_scan(const struct device *dev, uint16_t duration,
580 		       energy_scan_done_cb_t done_cb)
581 {
582 	ARG_UNUSED(dev);
583 	ARG_UNUSED(duration);
584 	ARG_UNUSED(done_cb);
585 
586 	/* ed_scan not supported */
587 
588 	return -ENOTSUP;
589 }
590 
591 /* API implementation: configure */
b91_configure(const struct device * dev,enum ieee802154_config_type type,const struct ieee802154_config * config)592 static int b91_configure(const struct device *dev,
593 			 enum ieee802154_config_type type,
594 			 const struct ieee802154_config *config)
595 {
596 	ARG_UNUSED(dev);
597 	ARG_UNUSED(type);
598 	ARG_UNUSED(config);
599 
600 	/* configure not supported */
601 
602 	return -ENOTSUP;
603 }
604 
605 /* driver-allocated attribute memory - constant across all driver instances */
606 IEEE802154_DEFINE_PHY_SUPPORTED_CHANNELS(drv_attr, 11, 26);
607 
608 /* API implementation: attr_get */
b91_attr_get(const struct device * dev,enum ieee802154_attr attr,struct ieee802154_attr_value * value)609 static int b91_attr_get(const struct device *dev, enum ieee802154_attr attr,
610 			struct ieee802154_attr_value *value)
611 {
612 	ARG_UNUSED(dev);
613 
614 	return ieee802154_attr_get_channel_page_and_range(
615 		attr, IEEE802154_ATTR_PHY_CHANNEL_PAGE_ZERO_OQPSK_2450_BPSK_868_915,
616 		&drv_attr.phy_supported_channels, value);
617 }
618 
619 /* IEEE802154 driver APIs structure */
620 static struct ieee802154_radio_api b91_radio_api = {
621 	.iface_api.init = b91_iface_init,
622 	.get_capabilities = b91_get_capabilities,
623 	.cca = b91_cca,
624 	.set_channel = b91_set_channel,
625 	.filter = b91_filter,
626 	.set_txpower = b91_set_txpower,
627 	.start = b91_start,
628 	.stop = b91_stop,
629 	.tx = b91_tx,
630 	.ed_scan = b91_ed_scan,
631 	.configure = b91_configure,
632 	.attr_get = b91_attr_get,
633 };
634 
635 
636 #if defined(CONFIG_NET_L2_IEEE802154)
637 #define L2 IEEE802154_L2
638 #define L2_CTX_TYPE NET_L2_GET_CTX_TYPE(IEEE802154_L2)
639 #define MTU 125
640 #elif defined(CONFIG_NET_L2_OPENTHREAD)
641 #define L2 OPENTHREAD_L2
642 #define L2_CTX_TYPE NET_L2_GET_CTX_TYPE(OPENTHREAD_L2)
643 #define MTU 1280
644 #endif
645 
646 
647 /* IEEE802154 driver registration */
648 #if defined(CONFIG_NET_L2_IEEE802154) || defined(CONFIG_NET_L2_OPENTHREAD)
649 NET_DEVICE_DT_INST_DEFINE(0, b91_init, NULL, &data, NULL,
650 			  CONFIG_IEEE802154_B91_INIT_PRIO,
651 			  &b91_radio_api, L2, L2_CTX_TYPE, MTU);
652 #else
653 DEVICE_DT_INST_DEFINE(0, b91_init, NULL, &data, NULL,
654 		      POST_KERNEL, CONFIG_IEEE802154_B91_INIT_PRIO,
655 		      &b91_radio_api);
656 #endif
657