1 /*
2 * Driver for NXP MCR20A 802.15.4 Wireless-PAN Networking controller
3 *
4 * Copyright (C) 2018 Xue Liu <liuxuenetmail@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/gpio/consumer.h>
19 #include <linux/spi/spi.h>
20 #include <linux/workqueue.h>
21 #include <linux/interrupt.h>
22 #include <linux/irq.h>
23 #include <linux/skbuff.h>
24 #include <linux/of_gpio.h>
25 #include <linux/regmap.h>
26 #include <linux/ieee802154.h>
27 #include <linux/debugfs.h>
28
29 #include <net/mac802154.h>
30 #include <net/cfg802154.h>
31
32 #include <linux/device.h>
33
34 #include "mcr20a.h"
35
36 #define SPI_COMMAND_BUFFER 3
37
38 #define REGISTER_READ BIT(7)
39 #define REGISTER_WRITE (0 << 7)
40 #define REGISTER_ACCESS (0 << 6)
41 #define PACKET_BUFF_BURST_ACCESS BIT(6)
42 #define PACKET_BUFF_BYTE_ACCESS BIT(5)
43
44 #define MCR20A_WRITE_REG(x) (x)
45 #define MCR20A_READ_REG(x) (REGISTER_READ | (x))
46 #define MCR20A_BURST_READ_PACKET_BUF (0xC0)
47 #define MCR20A_BURST_WRITE_PACKET_BUF (0x40)
48
49 #define MCR20A_CMD_REG 0x80
50 #define MCR20A_CMD_REG_MASK 0x3f
51 #define MCR20A_CMD_WRITE 0x40
52 #define MCR20A_CMD_FB 0x20
53
54 /* Number of Interrupt Request Status Register */
55 #define MCR20A_IRQSTS_NUM 2 /* only IRQ_STS1 and IRQ_STS2 */
56
57 /* MCR20A CCA Type */
58 enum {
59 MCR20A_CCA_ED, // energy detect - CCA bit not active,
60 // not to be used for T and CCCA sequences
61 MCR20A_CCA_MODE1, // energy detect - CCA bit ACTIVE
62 MCR20A_CCA_MODE2, // 802.15.4 compliant signal detect - CCA bit ACTIVE
63 MCR20A_CCA_MODE3
64 };
65
66 enum {
67 MCR20A_XCVSEQ_IDLE = 0x00,
68 MCR20A_XCVSEQ_RX = 0x01,
69 MCR20A_XCVSEQ_TX = 0x02,
70 MCR20A_XCVSEQ_CCA = 0x03,
71 MCR20A_XCVSEQ_TR = 0x04,
72 MCR20A_XCVSEQ_CCCA = 0x05,
73 };
74
75 /* IEEE-802.15.4 defined constants (2.4 GHz logical channels) */
76 #define MCR20A_MIN_CHANNEL (11)
77 #define MCR20A_MAX_CHANNEL (26)
78 #define MCR20A_CHANNEL_SPACING (5)
79
80 /* MCR20A CCA Threshold constans */
81 #define MCR20A_MIN_CCA_THRESHOLD (0x6EU)
82 #define MCR20A_MAX_CCA_THRESHOLD (0x00U)
83
84 /* version 0C */
85 #define MCR20A_OVERWRITE_VERSION (0x0C)
86
87 /* MCR20A PLL configurations */
88 static const u8 PLL_INT[16] = {
89 /* 2405 */ 0x0B, /* 2410 */ 0x0B, /* 2415 */ 0x0B,
90 /* 2420 */ 0x0B, /* 2425 */ 0x0B, /* 2430 */ 0x0B,
91 /* 2435 */ 0x0C, /* 2440 */ 0x0C, /* 2445 */ 0x0C,
92 /* 2450 */ 0x0C, /* 2455 */ 0x0C, /* 2460 */ 0x0C,
93 /* 2465 */ 0x0D, /* 2470 */ 0x0D, /* 2475 */ 0x0D,
94 /* 2480 */ 0x0D
95 };
96
97 static const u8 PLL_FRAC[16] = {
98 /* 2405 */ 0x28, /* 2410 */ 0x50, /* 2415 */ 0x78,
99 /* 2420 */ 0xA0, /* 2425 */ 0xC8, /* 2430 */ 0xF0,
100 /* 2435 */ 0x18, /* 2440 */ 0x40, /* 2445 */ 0x68,
101 /* 2450 */ 0x90, /* 2455 */ 0xB8, /* 2460 */ 0xE0,
102 /* 2465 */ 0x08, /* 2470 */ 0x30, /* 2475 */ 0x58,
103 /* 2480 */ 0x80
104 };
105
106 static const struct reg_sequence mar20a_iar_overwrites[] = {
107 { IAR_MISC_PAD_CTRL, 0x02 },
108 { IAR_VCO_CTRL1, 0xB3 },
109 { IAR_VCO_CTRL2, 0x07 },
110 { IAR_PA_TUNING, 0x71 },
111 { IAR_CHF_IBUF, 0x2F },
112 { IAR_CHF_QBUF, 0x2F },
113 { IAR_CHF_IRIN, 0x24 },
114 { IAR_CHF_QRIN, 0x24 },
115 { IAR_CHF_IL, 0x24 },
116 { IAR_CHF_QL, 0x24 },
117 { IAR_CHF_CC1, 0x32 },
118 { IAR_CHF_CCL, 0x1D },
119 { IAR_CHF_CC2, 0x2D },
120 { IAR_CHF_IROUT, 0x24 },
121 { IAR_CHF_QROUT, 0x24 },
122 { IAR_PA_CAL, 0x28 },
123 { IAR_AGC_THR1, 0x55 },
124 { IAR_AGC_THR2, 0x2D },
125 { IAR_ATT_RSSI1, 0x5F },
126 { IAR_ATT_RSSI2, 0x8F },
127 { IAR_RSSI_OFFSET, 0x61 },
128 { IAR_CHF_PMA_GAIN, 0x03 },
129 { IAR_CCA1_THRESH, 0x50 },
130 { IAR_CORR_NVAL, 0x13 },
131 { IAR_ACKDELAY, 0x3D },
132 };
133
134 #define MCR20A_VALID_CHANNELS (0x07FFF800)
135
136 struct mcr20a_platform_data {
137 int rst_gpio;
138 };
139
140 #define MCR20A_MAX_BUF (127)
141
142 #define printdev(X) (&X->spi->dev)
143
144 /* regmap information for Direct Access Register (DAR) access */
145 #define MCR20A_DAR_WRITE 0x01
146 #define MCR20A_DAR_READ 0x00
147 #define MCR20A_DAR_NUMREGS 0x3F
148
149 /* regmap information for Indirect Access Register (IAR) access */
150 #define MCR20A_IAR_ACCESS 0x80
151 #define MCR20A_IAR_NUMREGS 0xBEFF
152
153 /* Read/Write SPI Commands for DAR and IAR registers. */
154 #define MCR20A_READSHORT(reg) ((reg) << 1)
155 #define MCR20A_WRITESHORT(reg) ((reg) << 1 | 1)
156 #define MCR20A_READLONG(reg) (1 << 15 | (reg) << 5)
157 #define MCR20A_WRITELONG(reg) (1 << 15 | (reg) << 5 | 1 << 4)
158
159 /* Type definitions for link configuration of instantiable layers */
160 #define MCR20A_PHY_INDIRECT_QUEUE_SIZE (12)
161
162 static bool
mcr20a_dar_writeable(struct device * dev,unsigned int reg)163 mcr20a_dar_writeable(struct device *dev, unsigned int reg)
164 {
165 switch (reg) {
166 case DAR_IRQ_STS1:
167 case DAR_IRQ_STS2:
168 case DAR_IRQ_STS3:
169 case DAR_PHY_CTRL1:
170 case DAR_PHY_CTRL2:
171 case DAR_PHY_CTRL3:
172 case DAR_PHY_CTRL4:
173 case DAR_SRC_CTRL:
174 case DAR_SRC_ADDRS_SUM_LSB:
175 case DAR_SRC_ADDRS_SUM_MSB:
176 case DAR_T3CMP_LSB:
177 case DAR_T3CMP_MSB:
178 case DAR_T3CMP_USB:
179 case DAR_T2PRIMECMP_LSB:
180 case DAR_T2PRIMECMP_MSB:
181 case DAR_T1CMP_LSB:
182 case DAR_T1CMP_MSB:
183 case DAR_T1CMP_USB:
184 case DAR_T2CMP_LSB:
185 case DAR_T2CMP_MSB:
186 case DAR_T2CMP_USB:
187 case DAR_T4CMP_LSB:
188 case DAR_T4CMP_MSB:
189 case DAR_T4CMP_USB:
190 case DAR_PLL_INT0:
191 case DAR_PLL_FRAC0_LSB:
192 case DAR_PLL_FRAC0_MSB:
193 case DAR_PA_PWR:
194 /* no DAR_ACM */
195 case DAR_OVERWRITE_VER:
196 case DAR_CLK_OUT_CTRL:
197 case DAR_PWR_MODES:
198 return true;
199 default:
200 return false;
201 }
202 }
203
204 static bool
mcr20a_dar_readable(struct device * dev,unsigned int reg)205 mcr20a_dar_readable(struct device *dev, unsigned int reg)
206 {
207 bool rc;
208
209 /* all writeable are also readable */
210 rc = mcr20a_dar_writeable(dev, reg);
211 if (rc)
212 return rc;
213
214 /* readonly regs */
215 switch (reg) {
216 case DAR_RX_FRM_LEN:
217 case DAR_CCA1_ED_FNL:
218 case DAR_EVENT_TMR_LSB:
219 case DAR_EVENT_TMR_MSB:
220 case DAR_EVENT_TMR_USB:
221 case DAR_TIMESTAMP_LSB:
222 case DAR_TIMESTAMP_MSB:
223 case DAR_TIMESTAMP_USB:
224 case DAR_SEQ_STATE:
225 case DAR_LQI_VALUE:
226 case DAR_RSSI_CCA_CONT:
227 return true;
228 default:
229 return false;
230 }
231 }
232
233 static bool
mcr20a_dar_volatile(struct device * dev,unsigned int reg)234 mcr20a_dar_volatile(struct device *dev, unsigned int reg)
235 {
236 /* can be changed during runtime */
237 switch (reg) {
238 case DAR_IRQ_STS1:
239 case DAR_IRQ_STS2:
240 case DAR_IRQ_STS3:
241 /* use them in spi_async and regmap so it's volatile */
242 return true;
243 default:
244 return false;
245 }
246 }
247
248 static bool
mcr20a_dar_precious(struct device * dev,unsigned int reg)249 mcr20a_dar_precious(struct device *dev, unsigned int reg)
250 {
251 /* don't clear irq line on read */
252 switch (reg) {
253 case DAR_IRQ_STS1:
254 case DAR_IRQ_STS2:
255 case DAR_IRQ_STS3:
256 return true;
257 default:
258 return false;
259 }
260 }
261
262 static const struct regmap_config mcr20a_dar_regmap = {
263 .name = "mcr20a_dar",
264 .reg_bits = 8,
265 .val_bits = 8,
266 .write_flag_mask = REGISTER_ACCESS | REGISTER_WRITE,
267 .read_flag_mask = REGISTER_ACCESS | REGISTER_READ,
268 .cache_type = REGCACHE_RBTREE,
269 .writeable_reg = mcr20a_dar_writeable,
270 .readable_reg = mcr20a_dar_readable,
271 .volatile_reg = mcr20a_dar_volatile,
272 .precious_reg = mcr20a_dar_precious,
273 .fast_io = true,
274 .can_multi_write = true,
275 };
276
277 static bool
mcr20a_iar_writeable(struct device * dev,unsigned int reg)278 mcr20a_iar_writeable(struct device *dev, unsigned int reg)
279 {
280 switch (reg) {
281 case IAR_XTAL_TRIM:
282 case IAR_PMC_LP_TRIM:
283 case IAR_MACPANID0_LSB:
284 case IAR_MACPANID0_MSB:
285 case IAR_MACSHORTADDRS0_LSB:
286 case IAR_MACSHORTADDRS0_MSB:
287 case IAR_MACLONGADDRS0_0:
288 case IAR_MACLONGADDRS0_8:
289 case IAR_MACLONGADDRS0_16:
290 case IAR_MACLONGADDRS0_24:
291 case IAR_MACLONGADDRS0_32:
292 case IAR_MACLONGADDRS0_40:
293 case IAR_MACLONGADDRS0_48:
294 case IAR_MACLONGADDRS0_56:
295 case IAR_RX_FRAME_FILTER:
296 case IAR_PLL_INT1:
297 case IAR_PLL_FRAC1_LSB:
298 case IAR_PLL_FRAC1_MSB:
299 case IAR_MACPANID1_LSB:
300 case IAR_MACPANID1_MSB:
301 case IAR_MACSHORTADDRS1_LSB:
302 case IAR_MACSHORTADDRS1_MSB:
303 case IAR_MACLONGADDRS1_0:
304 case IAR_MACLONGADDRS1_8:
305 case IAR_MACLONGADDRS1_16:
306 case IAR_MACLONGADDRS1_24:
307 case IAR_MACLONGADDRS1_32:
308 case IAR_MACLONGADDRS1_40:
309 case IAR_MACLONGADDRS1_48:
310 case IAR_MACLONGADDRS1_56:
311 case IAR_DUAL_PAN_CTRL:
312 case IAR_DUAL_PAN_DWELL:
313 case IAR_CCA1_THRESH:
314 case IAR_CCA1_ED_OFFSET_COMP:
315 case IAR_LQI_OFFSET_COMP:
316 case IAR_CCA_CTRL:
317 case IAR_CCA2_CORR_PEAKS:
318 case IAR_CCA2_CORR_THRESH:
319 case IAR_TMR_PRESCALE:
320 case IAR_ANT_PAD_CTRL:
321 case IAR_MISC_PAD_CTRL:
322 case IAR_BSM_CTRL:
323 case IAR_RNG:
324 case IAR_RX_WTR_MARK:
325 case IAR_SOFT_RESET:
326 case IAR_TXDELAY:
327 case IAR_ACKDELAY:
328 case IAR_CORR_NVAL:
329 case IAR_ANT_AGC_CTRL:
330 case IAR_AGC_THR1:
331 case IAR_AGC_THR2:
332 case IAR_PA_CAL:
333 case IAR_ATT_RSSI1:
334 case IAR_ATT_RSSI2:
335 case IAR_RSSI_OFFSET:
336 case IAR_XTAL_CTRL:
337 case IAR_CHF_PMA_GAIN:
338 case IAR_CHF_IBUF:
339 case IAR_CHF_QBUF:
340 case IAR_CHF_IRIN:
341 case IAR_CHF_QRIN:
342 case IAR_CHF_IL:
343 case IAR_CHF_QL:
344 case IAR_CHF_CC1:
345 case IAR_CHF_CCL:
346 case IAR_CHF_CC2:
347 case IAR_CHF_IROUT:
348 case IAR_CHF_QROUT:
349 case IAR_PA_TUNING:
350 case IAR_VCO_CTRL1:
351 case IAR_VCO_CTRL2:
352 return true;
353 default:
354 return false;
355 }
356 }
357
358 static bool
mcr20a_iar_readable(struct device * dev,unsigned int reg)359 mcr20a_iar_readable(struct device *dev, unsigned int reg)
360 {
361 bool rc;
362
363 /* all writeable are also readable */
364 rc = mcr20a_iar_writeable(dev, reg);
365 if (rc)
366 return rc;
367
368 /* readonly regs */
369 switch (reg) {
370 case IAR_PART_ID:
371 case IAR_DUAL_PAN_STS:
372 case IAR_RX_BYTE_COUNT:
373 case IAR_FILTERFAIL_CODE1:
374 case IAR_FILTERFAIL_CODE2:
375 case IAR_RSSI:
376 return true;
377 default:
378 return false;
379 }
380 }
381
382 static bool
mcr20a_iar_volatile(struct device * dev,unsigned int reg)383 mcr20a_iar_volatile(struct device *dev, unsigned int reg)
384 {
385 /* can be changed during runtime */
386 switch (reg) {
387 case IAR_DUAL_PAN_STS:
388 case IAR_RX_BYTE_COUNT:
389 case IAR_FILTERFAIL_CODE1:
390 case IAR_FILTERFAIL_CODE2:
391 case IAR_RSSI:
392 return true;
393 default:
394 return false;
395 }
396 }
397
398 static const struct regmap_config mcr20a_iar_regmap = {
399 .name = "mcr20a_iar",
400 .reg_bits = 16,
401 .val_bits = 8,
402 .write_flag_mask = REGISTER_ACCESS | REGISTER_WRITE | IAR_INDEX,
403 .read_flag_mask = REGISTER_ACCESS | REGISTER_READ | IAR_INDEX,
404 .cache_type = REGCACHE_RBTREE,
405 .writeable_reg = mcr20a_iar_writeable,
406 .readable_reg = mcr20a_iar_readable,
407 .volatile_reg = mcr20a_iar_volatile,
408 .fast_io = true,
409 };
410
411 struct mcr20a_local {
412 struct spi_device *spi;
413
414 struct ieee802154_hw *hw;
415 struct mcr20a_platform_data *pdata;
416 struct regmap *regmap_dar;
417 struct regmap *regmap_iar;
418
419 u8 *buf;
420
421 bool is_tx;
422
423 /* for writing tx buffer */
424 struct spi_message tx_buf_msg;
425 u8 tx_header[1];
426 /* burst buffer write command */
427 struct spi_transfer tx_xfer_header;
428 u8 tx_len[1];
429 /* len of tx packet */
430 struct spi_transfer tx_xfer_len;
431 /* data of tx packet */
432 struct spi_transfer tx_xfer_buf;
433 struct sk_buff *tx_skb;
434
435 /* for read length rxfifo */
436 struct spi_message reg_msg;
437 u8 reg_cmd[1];
438 u8 reg_data[MCR20A_IRQSTS_NUM];
439 struct spi_transfer reg_xfer_cmd;
440 struct spi_transfer reg_xfer_data;
441
442 /* receive handling */
443 struct spi_message rx_buf_msg;
444 u8 rx_header[1];
445 struct spi_transfer rx_xfer_header;
446 u8 rx_lqi[1];
447 struct spi_transfer rx_xfer_lqi;
448 u8 rx_buf[MCR20A_MAX_BUF];
449 struct spi_transfer rx_xfer_buf;
450
451 /* isr handling for reading intstat */
452 struct spi_message irq_msg;
453 u8 irq_header[1];
454 u8 irq_data[MCR20A_IRQSTS_NUM];
455 struct spi_transfer irq_xfer_data;
456 struct spi_transfer irq_xfer_header;
457 };
458
459 static void
mcr20a_write_tx_buf_complete(void * context)460 mcr20a_write_tx_buf_complete(void *context)
461 {
462 struct mcr20a_local *lp = context;
463 int ret;
464
465 dev_dbg(printdev(lp), "%s\n", __func__);
466
467 lp->reg_msg.complete = NULL;
468 lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_PHY_CTRL1);
469 lp->reg_data[0] = MCR20A_XCVSEQ_TX;
470 lp->reg_xfer_data.len = 1;
471
472 ret = spi_async(lp->spi, &lp->reg_msg);
473 if (ret)
474 dev_err(printdev(lp), "failed to set SEQ TX\n");
475 }
476
477 static int
mcr20a_xmit(struct ieee802154_hw * hw,struct sk_buff * skb)478 mcr20a_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
479 {
480 struct mcr20a_local *lp = hw->priv;
481
482 dev_dbg(printdev(lp), "%s\n", __func__);
483
484 lp->tx_skb = skb;
485
486 print_hex_dump_debug("mcr20a tx: ", DUMP_PREFIX_OFFSET, 16, 1,
487 skb->data, skb->len, 0);
488
489 lp->is_tx = 1;
490
491 lp->reg_msg.complete = NULL;
492 lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_PHY_CTRL1);
493 lp->reg_data[0] = MCR20A_XCVSEQ_IDLE;
494 lp->reg_xfer_data.len = 1;
495
496 return spi_async(lp->spi, &lp->reg_msg);
497 }
498
499 static int
mcr20a_ed(struct ieee802154_hw * hw,u8 * level)500 mcr20a_ed(struct ieee802154_hw *hw, u8 *level)
501 {
502 WARN_ON(!level);
503 *level = 0xbe;
504 return 0;
505 }
506
507 static int
mcr20a_set_channel(struct ieee802154_hw * hw,u8 page,u8 channel)508 mcr20a_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
509 {
510 struct mcr20a_local *lp = hw->priv;
511 int ret;
512
513 dev_dbg(printdev(lp), "%s\n", __func__);
514
515 /* freqency = ((PLL_INT+64) + (PLL_FRAC/65536)) * 32 MHz */
516 ret = regmap_write(lp->regmap_dar, DAR_PLL_INT0, PLL_INT[channel - 11]);
517 if (ret)
518 return ret;
519 ret = regmap_write(lp->regmap_dar, DAR_PLL_FRAC0_LSB, 0x00);
520 if (ret)
521 return ret;
522 ret = regmap_write(lp->regmap_dar, DAR_PLL_FRAC0_MSB,
523 PLL_FRAC[channel - 11]);
524 if (ret)
525 return ret;
526
527 return 0;
528 }
529
530 static int
mcr20a_start(struct ieee802154_hw * hw)531 mcr20a_start(struct ieee802154_hw *hw)
532 {
533 struct mcr20a_local *lp = hw->priv;
534 int ret;
535
536 dev_dbg(printdev(lp), "%s\n", __func__);
537
538 /* No slotted operation */
539 dev_dbg(printdev(lp), "no slotted operation\n");
540 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
541 DAR_PHY_CTRL1_SLOTTED, 0x0);
542
543 /* enable irq */
544 enable_irq(lp->spi->irq);
545
546 /* Unmask SEQ interrupt */
547 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL2,
548 DAR_PHY_CTRL2_SEQMSK, 0x0);
549
550 /* Start the RX sequence */
551 dev_dbg(printdev(lp), "start the RX sequence\n");
552 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
553 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX);
554
555 return 0;
556 }
557
558 static void
mcr20a_stop(struct ieee802154_hw * hw)559 mcr20a_stop(struct ieee802154_hw *hw)
560 {
561 struct mcr20a_local *lp = hw->priv;
562
563 dev_dbg(printdev(lp), "%s\n", __func__);
564
565 /* stop all running sequence */
566 regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
567 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_IDLE);
568
569 /* disable irq */
570 disable_irq(lp->spi->irq);
571 }
572
573 static int
mcr20a_set_hw_addr_filt(struct ieee802154_hw * hw,struct ieee802154_hw_addr_filt * filt,unsigned long changed)574 mcr20a_set_hw_addr_filt(struct ieee802154_hw *hw,
575 struct ieee802154_hw_addr_filt *filt,
576 unsigned long changed)
577 {
578 struct mcr20a_local *lp = hw->priv;
579
580 dev_dbg(printdev(lp), "%s\n", __func__);
581
582 if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
583 u16 addr = le16_to_cpu(filt->short_addr);
584
585 regmap_write(lp->regmap_iar, IAR_MACSHORTADDRS0_LSB, addr);
586 regmap_write(lp->regmap_iar, IAR_MACSHORTADDRS0_MSB, addr >> 8);
587 }
588
589 if (changed & IEEE802154_AFILT_PANID_CHANGED) {
590 u16 pan = le16_to_cpu(filt->pan_id);
591
592 regmap_write(lp->regmap_iar, IAR_MACPANID0_LSB, pan);
593 regmap_write(lp->regmap_iar, IAR_MACPANID0_MSB, pan >> 8);
594 }
595
596 if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) {
597 u8 addr[8], i;
598
599 memcpy(addr, &filt->ieee_addr, 8);
600 for (i = 0; i < 8; i++)
601 regmap_write(lp->regmap_iar,
602 IAR_MACLONGADDRS0_0 + i, addr[i]);
603 }
604
605 if (changed & IEEE802154_AFILT_PANC_CHANGED) {
606 if (filt->pan_coord) {
607 regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
608 DAR_PHY_CTRL4_PANCORDNTR0, 0x10);
609 } else {
610 regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
611 DAR_PHY_CTRL4_PANCORDNTR0, 0x00);
612 }
613 }
614
615 return 0;
616 }
617
618 /* -30 dBm to 10 dBm */
619 #define MCR20A_MAX_TX_POWERS 0x14
620 static const s32 mcr20a_powers[MCR20A_MAX_TX_POWERS + 1] = {
621 -3000, -2800, -2600, -2400, -2200, -2000, -1800, -1600, -1400,
622 -1200, -1000, -800, -600, -400, -200, 0, 200, 400, 600, 800, 1000
623 };
624
625 static int
mcr20a_set_txpower(struct ieee802154_hw * hw,s32 mbm)626 mcr20a_set_txpower(struct ieee802154_hw *hw, s32 mbm)
627 {
628 struct mcr20a_local *lp = hw->priv;
629 u32 i;
630
631 dev_dbg(printdev(lp), "%s(%d)\n", __func__, mbm);
632
633 for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) {
634 if (lp->hw->phy->supported.tx_powers[i] == mbm)
635 return regmap_write(lp->regmap_dar, DAR_PA_PWR,
636 ((i + 8) & 0x1F));
637 }
638
639 return -EINVAL;
640 }
641
642 #define MCR20A_MAX_ED_LEVELS MCR20A_MIN_CCA_THRESHOLD
643 static s32 mcr20a_ed_levels[MCR20A_MAX_ED_LEVELS + 1];
644
645 static int
mcr20a_set_cca_mode(struct ieee802154_hw * hw,const struct wpan_phy_cca * cca)646 mcr20a_set_cca_mode(struct ieee802154_hw *hw,
647 const struct wpan_phy_cca *cca)
648 {
649 struct mcr20a_local *lp = hw->priv;
650 unsigned int cca_mode = 0xff;
651 bool cca_mode_and = false;
652 int ret;
653
654 dev_dbg(printdev(lp), "%s\n", __func__);
655
656 /* mapping 802.15.4 to driver spec */
657 switch (cca->mode) {
658 case NL802154_CCA_ENERGY:
659 cca_mode = MCR20A_CCA_MODE1;
660 break;
661 case NL802154_CCA_CARRIER:
662 cca_mode = MCR20A_CCA_MODE2;
663 break;
664 case NL802154_CCA_ENERGY_CARRIER:
665 switch (cca->opt) {
666 case NL802154_CCA_OPT_ENERGY_CARRIER_AND:
667 cca_mode = MCR20A_CCA_MODE3;
668 cca_mode_and = true;
669 break;
670 case NL802154_CCA_OPT_ENERGY_CARRIER_OR:
671 cca_mode = MCR20A_CCA_MODE3;
672 cca_mode_and = false;
673 break;
674 default:
675 return -EINVAL;
676 }
677 break;
678 default:
679 return -EINVAL;
680 }
681 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
682 DAR_PHY_CTRL4_CCATYPE_MASK,
683 cca_mode << DAR_PHY_CTRL4_CCATYPE_SHIFT);
684 if (ret < 0)
685 return ret;
686
687 if (cca_mode == MCR20A_CCA_MODE3) {
688 if (cca_mode_and) {
689 ret = regmap_update_bits(lp->regmap_iar, IAR_CCA_CTRL,
690 IAR_CCA_CTRL_CCA3_AND_NOT_OR,
691 0x08);
692 } else {
693 ret = regmap_update_bits(lp->regmap_iar,
694 IAR_CCA_CTRL,
695 IAR_CCA_CTRL_CCA3_AND_NOT_OR,
696 0x00);
697 }
698 if (ret < 0)
699 return ret;
700 }
701
702 return ret;
703 }
704
705 static int
mcr20a_set_cca_ed_level(struct ieee802154_hw * hw,s32 mbm)706 mcr20a_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
707 {
708 struct mcr20a_local *lp = hw->priv;
709 u32 i;
710
711 dev_dbg(printdev(lp), "%s\n", __func__);
712
713 for (i = 0; i < hw->phy->supported.cca_ed_levels_size; i++) {
714 if (hw->phy->supported.cca_ed_levels[i] == mbm)
715 return regmap_write(lp->regmap_iar, IAR_CCA1_THRESH, i);
716 }
717
718 return 0;
719 }
720
721 static int
mcr20a_set_promiscuous_mode(struct ieee802154_hw * hw,const bool on)722 mcr20a_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on)
723 {
724 struct mcr20a_local *lp = hw->priv;
725 int ret;
726 u8 rx_frame_filter_reg = 0x0;
727
728 dev_dbg(printdev(lp), "%s(%d)\n", __func__, on);
729
730 if (on) {
731 /* All frame types accepted*/
732 rx_frame_filter_reg &= ~(IAR_RX_FRAME_FLT_FRM_VER);
733 rx_frame_filter_reg |= (IAR_RX_FRAME_FLT_ACK_FT |
734 IAR_RX_FRAME_FLT_NS_FT);
735
736 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
737 DAR_PHY_CTRL4_PROMISCUOUS,
738 DAR_PHY_CTRL4_PROMISCUOUS);
739 if (ret < 0)
740 return ret;
741
742 ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER,
743 rx_frame_filter_reg);
744 if (ret < 0)
745 return ret;
746 } else {
747 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
748 DAR_PHY_CTRL4_PROMISCUOUS, 0x0);
749 if (ret < 0)
750 return ret;
751
752 ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER,
753 IAR_RX_FRAME_FLT_FRM_VER |
754 IAR_RX_FRAME_FLT_BEACON_FT |
755 IAR_RX_FRAME_FLT_DATA_FT |
756 IAR_RX_FRAME_FLT_CMD_FT);
757 if (ret < 0)
758 return ret;
759 }
760
761 return 0;
762 }
763
764 static const struct ieee802154_ops mcr20a_hw_ops = {
765 .owner = THIS_MODULE,
766 .xmit_async = mcr20a_xmit,
767 .ed = mcr20a_ed,
768 .set_channel = mcr20a_set_channel,
769 .start = mcr20a_start,
770 .stop = mcr20a_stop,
771 .set_hw_addr_filt = mcr20a_set_hw_addr_filt,
772 .set_txpower = mcr20a_set_txpower,
773 .set_cca_mode = mcr20a_set_cca_mode,
774 .set_cca_ed_level = mcr20a_set_cca_ed_level,
775 .set_promiscuous_mode = mcr20a_set_promiscuous_mode,
776 };
777
778 static int
mcr20a_request_rx(struct mcr20a_local * lp)779 mcr20a_request_rx(struct mcr20a_local *lp)
780 {
781 dev_dbg(printdev(lp), "%s\n", __func__);
782
783 /* Start the RX sequence */
784 regmap_update_bits_async(lp->regmap_dar, DAR_PHY_CTRL1,
785 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX);
786
787 return 0;
788 }
789
790 static void
mcr20a_handle_rx_read_buf_complete(void * context)791 mcr20a_handle_rx_read_buf_complete(void *context)
792 {
793 struct mcr20a_local *lp = context;
794 u8 len = lp->reg_data[0] & DAR_RX_FRAME_LENGTH_MASK;
795 struct sk_buff *skb;
796
797 dev_dbg(printdev(lp), "%s\n", __func__);
798
799 dev_dbg(printdev(lp), "RX is done\n");
800
801 if (!ieee802154_is_valid_psdu_len(len)) {
802 dev_vdbg(&lp->spi->dev, "corrupted frame received\n");
803 len = IEEE802154_MTU;
804 }
805
806 len = len - 2; /* get rid of frame check field */
807
808 skb = dev_alloc_skb(len);
809 if (!skb)
810 return;
811
812 memcpy(skb_put(skb, len), lp->rx_buf, len);
813 ieee802154_rx_irqsafe(lp->hw, skb, lp->rx_lqi[0]);
814
815 print_hex_dump_debug("mcr20a rx: ", DUMP_PREFIX_OFFSET, 16, 1,
816 lp->rx_buf, len, 0);
817 pr_debug("mcr20a rx: lqi: %02hhx\n", lp->rx_lqi[0]);
818
819 /* start RX sequence */
820 mcr20a_request_rx(lp);
821 }
822
823 static void
mcr20a_handle_rx_read_len_complete(void * context)824 mcr20a_handle_rx_read_len_complete(void *context)
825 {
826 struct mcr20a_local *lp = context;
827 u8 len;
828 int ret;
829
830 dev_dbg(printdev(lp), "%s\n", __func__);
831
832 /* get the length of received frame */
833 len = lp->reg_data[0] & DAR_RX_FRAME_LENGTH_MASK;
834 dev_dbg(printdev(lp), "frame len : %d\n", len);
835
836 /* prepare to read the rx buf */
837 lp->rx_buf_msg.complete = mcr20a_handle_rx_read_buf_complete;
838 lp->rx_header[0] = MCR20A_BURST_READ_PACKET_BUF;
839 lp->rx_xfer_buf.len = len;
840
841 ret = spi_async(lp->spi, &lp->rx_buf_msg);
842 if (ret)
843 dev_err(printdev(lp), "failed to read rx buffer length\n");
844 }
845
846 static int
mcr20a_handle_rx(struct mcr20a_local * lp)847 mcr20a_handle_rx(struct mcr20a_local *lp)
848 {
849 dev_dbg(printdev(lp), "%s\n", __func__);
850 lp->reg_msg.complete = mcr20a_handle_rx_read_len_complete;
851 lp->reg_cmd[0] = MCR20A_READ_REG(DAR_RX_FRM_LEN);
852 lp->reg_xfer_data.len = 1;
853
854 return spi_async(lp->spi, &lp->reg_msg);
855 }
856
857 static int
mcr20a_handle_tx_complete(struct mcr20a_local * lp)858 mcr20a_handle_tx_complete(struct mcr20a_local *lp)
859 {
860 dev_dbg(printdev(lp), "%s\n", __func__);
861
862 ieee802154_xmit_complete(lp->hw, lp->tx_skb, false);
863
864 return mcr20a_request_rx(lp);
865 }
866
867 static int
mcr20a_handle_tx(struct mcr20a_local * lp)868 mcr20a_handle_tx(struct mcr20a_local *lp)
869 {
870 int ret;
871
872 dev_dbg(printdev(lp), "%s\n", __func__);
873
874 /* write tx buffer */
875 lp->tx_header[0] = MCR20A_BURST_WRITE_PACKET_BUF;
876 /* add 2 bytes of FCS */
877 lp->tx_len[0] = lp->tx_skb->len + 2;
878 lp->tx_xfer_buf.tx_buf = lp->tx_skb->data;
879 /* add 1 byte psduLength */
880 lp->tx_xfer_buf.len = lp->tx_skb->len + 1;
881
882 ret = spi_async(lp->spi, &lp->tx_buf_msg);
883 if (ret) {
884 dev_err(printdev(lp), "SPI write Failed for TX buf\n");
885 return ret;
886 }
887
888 return 0;
889 }
890
891 static void
mcr20a_irq_clean_complete(void * context)892 mcr20a_irq_clean_complete(void *context)
893 {
894 struct mcr20a_local *lp = context;
895 u8 seq_state = lp->irq_data[DAR_IRQ_STS1] & DAR_PHY_CTRL1_XCVSEQ_MASK;
896
897 dev_dbg(printdev(lp), "%s\n", __func__);
898
899 enable_irq(lp->spi->irq);
900
901 dev_dbg(printdev(lp), "IRQ STA1 (%02x) STA2 (%02x)\n",
902 lp->irq_data[DAR_IRQ_STS1], lp->irq_data[DAR_IRQ_STS2]);
903
904 switch (seq_state) {
905 /* TX IRQ, RX IRQ and SEQ IRQ */
906 case (DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
907 if (lp->is_tx) {
908 lp->is_tx = 0;
909 dev_dbg(printdev(lp), "TX is done. No ACK\n");
910 mcr20a_handle_tx_complete(lp);
911 }
912 break;
913 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ):
914 /* rx is starting */
915 dev_dbg(printdev(lp), "RX is starting\n");
916 mcr20a_handle_rx(lp);
917 break;
918 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
919 if (lp->is_tx) {
920 /* tx is done */
921 lp->is_tx = 0;
922 dev_dbg(printdev(lp), "TX is done. Get ACK\n");
923 mcr20a_handle_tx_complete(lp);
924 } else {
925 /* rx is starting */
926 dev_dbg(printdev(lp), "RX is starting\n");
927 mcr20a_handle_rx(lp);
928 }
929 break;
930 case (DAR_IRQSTS1_SEQIRQ):
931 if (lp->is_tx) {
932 dev_dbg(printdev(lp), "TX is starting\n");
933 mcr20a_handle_tx(lp);
934 } else {
935 dev_dbg(printdev(lp), "MCR20A is stop\n");
936 }
937 break;
938 }
939 }
940
mcr20a_irq_status_complete(void * context)941 static void mcr20a_irq_status_complete(void *context)
942 {
943 int ret;
944 struct mcr20a_local *lp = context;
945
946 dev_dbg(printdev(lp), "%s\n", __func__);
947 regmap_update_bits_async(lp->regmap_dar, DAR_PHY_CTRL1,
948 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_IDLE);
949
950 lp->reg_msg.complete = mcr20a_irq_clean_complete;
951 lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_IRQ_STS1);
952 memcpy(lp->reg_data, lp->irq_data, MCR20A_IRQSTS_NUM);
953 lp->reg_xfer_data.len = MCR20A_IRQSTS_NUM;
954
955 ret = spi_async(lp->spi, &lp->reg_msg);
956
957 if (ret)
958 dev_err(printdev(lp), "failed to clean irq status\n");
959 }
960
mcr20a_irq_isr(int irq,void * data)961 static irqreturn_t mcr20a_irq_isr(int irq, void *data)
962 {
963 struct mcr20a_local *lp = data;
964 int ret;
965
966 disable_irq_nosync(irq);
967
968 lp->irq_header[0] = MCR20A_READ_REG(DAR_IRQ_STS1);
969 /* read IRQSTSx */
970 ret = spi_async(lp->spi, &lp->irq_msg);
971 if (ret) {
972 enable_irq(irq);
973 return IRQ_NONE;
974 }
975
976 return IRQ_HANDLED;
977 }
978
mcr20a_get_platform_data(struct spi_device * spi,struct mcr20a_platform_data * pdata)979 static int mcr20a_get_platform_data(struct spi_device *spi,
980 struct mcr20a_platform_data *pdata)
981 {
982 int ret = 0;
983
984 if (!spi->dev.of_node)
985 return -EINVAL;
986
987 pdata->rst_gpio = of_get_named_gpio(spi->dev.of_node, "rst_b-gpio", 0);
988 dev_dbg(&spi->dev, "rst_b-gpio: %d\n", pdata->rst_gpio);
989
990 return ret;
991 }
992
mcr20a_hw_setup(struct mcr20a_local * lp)993 static void mcr20a_hw_setup(struct mcr20a_local *lp)
994 {
995 u8 i;
996 struct ieee802154_hw *hw = lp->hw;
997 struct wpan_phy *phy = lp->hw->phy;
998
999 dev_dbg(printdev(lp), "%s\n", __func__);
1000
1001 phy->symbol_duration = 16;
1002 phy->lifs_period = 40;
1003 phy->sifs_period = 12;
1004
1005 hw->flags = IEEE802154_HW_TX_OMIT_CKSUM |
1006 IEEE802154_HW_AFILT |
1007 IEEE802154_HW_PROMISCUOUS;
1008
1009 phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL |
1010 WPAN_PHY_FLAG_CCA_MODE;
1011
1012 phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) |
1013 BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER);
1014 phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) |
1015 BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR);
1016
1017 /* initiating cca_ed_levels */
1018 for (i = MCR20A_MAX_CCA_THRESHOLD; i < MCR20A_MIN_CCA_THRESHOLD + 1;
1019 ++i) {
1020 mcr20a_ed_levels[i] = -i * 100;
1021 }
1022
1023 phy->supported.cca_ed_levels = mcr20a_ed_levels;
1024 phy->supported.cca_ed_levels_size = ARRAY_SIZE(mcr20a_ed_levels);
1025
1026 phy->cca.mode = NL802154_CCA_ENERGY;
1027
1028 phy->supported.channels[0] = MCR20A_VALID_CHANNELS;
1029 phy->current_page = 0;
1030 /* MCR20A default reset value */
1031 phy->current_channel = 20;
1032 phy->symbol_duration = 16;
1033 phy->supported.tx_powers = mcr20a_powers;
1034 phy->supported.tx_powers_size = ARRAY_SIZE(mcr20a_powers);
1035 phy->cca_ed_level = phy->supported.cca_ed_levels[75];
1036 phy->transmit_power = phy->supported.tx_powers[0x0F];
1037 }
1038
1039 static void
mcr20a_setup_tx_spi_messages(struct mcr20a_local * lp)1040 mcr20a_setup_tx_spi_messages(struct mcr20a_local *lp)
1041 {
1042 spi_message_init(&lp->tx_buf_msg);
1043 lp->tx_buf_msg.context = lp;
1044 lp->tx_buf_msg.complete = mcr20a_write_tx_buf_complete;
1045
1046 lp->tx_xfer_header.len = 1;
1047 lp->tx_xfer_header.tx_buf = lp->tx_header;
1048
1049 lp->tx_xfer_len.len = 1;
1050 lp->tx_xfer_len.tx_buf = lp->tx_len;
1051
1052 spi_message_add_tail(&lp->tx_xfer_header, &lp->tx_buf_msg);
1053 spi_message_add_tail(&lp->tx_xfer_len, &lp->tx_buf_msg);
1054 spi_message_add_tail(&lp->tx_xfer_buf, &lp->tx_buf_msg);
1055 }
1056
1057 static void
mcr20a_setup_rx_spi_messages(struct mcr20a_local * lp)1058 mcr20a_setup_rx_spi_messages(struct mcr20a_local *lp)
1059 {
1060 spi_message_init(&lp->reg_msg);
1061 lp->reg_msg.context = lp;
1062
1063 lp->reg_xfer_cmd.len = 1;
1064 lp->reg_xfer_cmd.tx_buf = lp->reg_cmd;
1065 lp->reg_xfer_cmd.rx_buf = lp->reg_cmd;
1066
1067 lp->reg_xfer_data.rx_buf = lp->reg_data;
1068 lp->reg_xfer_data.tx_buf = lp->reg_data;
1069
1070 spi_message_add_tail(&lp->reg_xfer_cmd, &lp->reg_msg);
1071 spi_message_add_tail(&lp->reg_xfer_data, &lp->reg_msg);
1072
1073 spi_message_init(&lp->rx_buf_msg);
1074 lp->rx_buf_msg.context = lp;
1075 lp->rx_buf_msg.complete = mcr20a_handle_rx_read_buf_complete;
1076 lp->rx_xfer_header.len = 1;
1077 lp->rx_xfer_header.tx_buf = lp->rx_header;
1078 lp->rx_xfer_header.rx_buf = lp->rx_header;
1079
1080 lp->rx_xfer_buf.rx_buf = lp->rx_buf;
1081
1082 lp->rx_xfer_lqi.len = 1;
1083 lp->rx_xfer_lqi.rx_buf = lp->rx_lqi;
1084
1085 spi_message_add_tail(&lp->rx_xfer_header, &lp->rx_buf_msg);
1086 spi_message_add_tail(&lp->rx_xfer_buf, &lp->rx_buf_msg);
1087 spi_message_add_tail(&lp->rx_xfer_lqi, &lp->rx_buf_msg);
1088 }
1089
1090 static void
mcr20a_setup_irq_spi_messages(struct mcr20a_local * lp)1091 mcr20a_setup_irq_spi_messages(struct mcr20a_local *lp)
1092 {
1093 spi_message_init(&lp->irq_msg);
1094 lp->irq_msg.context = lp;
1095 lp->irq_msg.complete = mcr20a_irq_status_complete;
1096 lp->irq_xfer_header.len = 1;
1097 lp->irq_xfer_header.tx_buf = lp->irq_header;
1098 lp->irq_xfer_header.rx_buf = lp->irq_header;
1099
1100 lp->irq_xfer_data.len = MCR20A_IRQSTS_NUM;
1101 lp->irq_xfer_data.rx_buf = lp->irq_data;
1102
1103 spi_message_add_tail(&lp->irq_xfer_header, &lp->irq_msg);
1104 spi_message_add_tail(&lp->irq_xfer_data, &lp->irq_msg);
1105 }
1106
1107 static int
mcr20a_phy_init(struct mcr20a_local * lp)1108 mcr20a_phy_init(struct mcr20a_local *lp)
1109 {
1110 u8 index;
1111 unsigned int phy_reg = 0;
1112 int ret;
1113
1114 dev_dbg(printdev(lp), "%s\n", __func__);
1115
1116 /* Disable Tristate on COCO MISO for SPI reads */
1117 ret = regmap_write(lp->regmap_iar, IAR_MISC_PAD_CTRL, 0x02);
1118 if (ret)
1119 goto err_ret;
1120
1121 /* Clear all PP IRQ bits in IRQSTS1 to avoid unexpected interrupts
1122 * immediately after init
1123 */
1124 ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS1, 0xEF);
1125 if (ret)
1126 goto err_ret;
1127
1128 /* Clear all PP IRQ bits in IRQSTS2 */
1129 ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS2,
1130 DAR_IRQSTS2_ASM_IRQ | DAR_IRQSTS2_PB_ERR_IRQ |
1131 DAR_IRQSTS2_WAKE_IRQ);
1132 if (ret)
1133 goto err_ret;
1134
1135 /* Disable all timer interrupts */
1136 ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS3, 0xFF);
1137 if (ret)
1138 goto err_ret;
1139
1140 /* PHY_CTRL1 : default HW settings + AUTOACK enabled */
1141 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
1142 DAR_PHY_CTRL1_AUTOACK, DAR_PHY_CTRL1_AUTOACK);
1143
1144 /* PHY_CTRL2 : disable all interrupts */
1145 ret = regmap_write(lp->regmap_dar, DAR_PHY_CTRL2, 0xFF);
1146 if (ret)
1147 goto err_ret;
1148
1149 /* PHY_CTRL3 : disable all timers and remaining interrupts */
1150 ret = regmap_write(lp->regmap_dar, DAR_PHY_CTRL3,
1151 DAR_PHY_CTRL3_ASM_MSK | DAR_PHY_CTRL3_PB_ERR_MSK |
1152 DAR_PHY_CTRL3_WAKE_MSK);
1153 if (ret)
1154 goto err_ret;
1155
1156 /* SRC_CTRL : enable Acknowledge Frame Pending and
1157 * Source Address Matching Enable
1158 */
1159 ret = regmap_write(lp->regmap_dar, DAR_SRC_CTRL,
1160 DAR_SRC_CTRL_ACK_FRM_PND |
1161 (DAR_SRC_CTRL_INDEX << DAR_SRC_CTRL_INDEX_SHIFT));
1162 if (ret)
1163 goto err_ret;
1164
1165 /* RX_FRAME_FILTER */
1166 /* FRM_VER[1:0] = b11. Accept FrameVersion 0 and 1 packets */
1167 ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER,
1168 IAR_RX_FRAME_FLT_FRM_VER |
1169 IAR_RX_FRAME_FLT_BEACON_FT |
1170 IAR_RX_FRAME_FLT_DATA_FT |
1171 IAR_RX_FRAME_FLT_CMD_FT);
1172 if (ret)
1173 goto err_ret;
1174
1175 dev_info(printdev(lp), "MCR20A DAR overwrites version: 0x%02x\n",
1176 MCR20A_OVERWRITE_VERSION);
1177
1178 /* Overwrites direct registers */
1179 ret = regmap_write(lp->regmap_dar, DAR_OVERWRITE_VER,
1180 MCR20A_OVERWRITE_VERSION);
1181 if (ret)
1182 goto err_ret;
1183
1184 /* Overwrites indirect registers */
1185 ret = regmap_multi_reg_write(lp->regmap_iar, mar20a_iar_overwrites,
1186 ARRAY_SIZE(mar20a_iar_overwrites));
1187 if (ret)
1188 goto err_ret;
1189
1190 /* Clear HW indirect queue */
1191 dev_dbg(printdev(lp), "clear HW indirect queue\n");
1192 for (index = 0; index < MCR20A_PHY_INDIRECT_QUEUE_SIZE; index++) {
1193 phy_reg = (u8)(((index & DAR_SRC_CTRL_INDEX) <<
1194 DAR_SRC_CTRL_INDEX_SHIFT)
1195 | (DAR_SRC_CTRL_SRCADDR_EN)
1196 | (DAR_SRC_CTRL_INDEX_DISABLE));
1197 ret = regmap_write(lp->regmap_dar, DAR_SRC_CTRL, phy_reg);
1198 if (ret)
1199 goto err_ret;
1200 phy_reg = 0;
1201 }
1202
1203 /* Assign HW Indirect hash table to PAN0 */
1204 ret = regmap_read(lp->regmap_iar, IAR_DUAL_PAN_CTRL, &phy_reg);
1205 if (ret)
1206 goto err_ret;
1207
1208 /* Clear current lvl */
1209 phy_reg &= ~IAR_DUAL_PAN_CTRL_DUAL_PAN_SAM_LVL_MSK;
1210
1211 /* Set new lvl */
1212 phy_reg |= MCR20A_PHY_INDIRECT_QUEUE_SIZE <<
1213 IAR_DUAL_PAN_CTRL_DUAL_PAN_SAM_LVL_SHIFT;
1214 ret = regmap_write(lp->regmap_iar, IAR_DUAL_PAN_CTRL, phy_reg);
1215 if (ret)
1216 goto err_ret;
1217
1218 /* Set CCA threshold to -75 dBm */
1219 ret = regmap_write(lp->regmap_iar, IAR_CCA1_THRESH, 0x4B);
1220 if (ret)
1221 goto err_ret;
1222
1223 /* Set prescaller to obtain 1 symbol (16us) timebase */
1224 ret = regmap_write(lp->regmap_iar, IAR_TMR_PRESCALE, 0x05);
1225 if (ret)
1226 goto err_ret;
1227
1228 /* Enable autodoze mode. */
1229 ret = regmap_update_bits(lp->regmap_dar, DAR_PWR_MODES,
1230 DAR_PWR_MODES_AUTODOZE,
1231 DAR_PWR_MODES_AUTODOZE);
1232 if (ret)
1233 goto err_ret;
1234
1235 /* Disable clk_out */
1236 ret = regmap_update_bits(lp->regmap_dar, DAR_CLK_OUT_CTRL,
1237 DAR_CLK_OUT_CTRL_EN, 0x0);
1238 if (ret)
1239 goto err_ret;
1240
1241 return 0;
1242
1243 err_ret:
1244 return ret;
1245 }
1246
1247 static int
mcr20a_probe(struct spi_device * spi)1248 mcr20a_probe(struct spi_device *spi)
1249 {
1250 struct ieee802154_hw *hw;
1251 struct mcr20a_local *lp;
1252 struct mcr20a_platform_data *pdata;
1253 int irq_type;
1254 int ret = -ENOMEM;
1255
1256 dev_dbg(&spi->dev, "%s\n", __func__);
1257
1258 if (!spi->irq) {
1259 dev_err(&spi->dev, "no IRQ specified\n");
1260 return -EINVAL;
1261 }
1262
1263 pdata = kmalloc(sizeof(*pdata), GFP_KERNEL);
1264 if (!pdata)
1265 return -ENOMEM;
1266
1267 /* set mcr20a platform data */
1268 ret = mcr20a_get_platform_data(spi, pdata);
1269 if (ret < 0) {
1270 dev_crit(&spi->dev, "mcr20a_get_platform_data failed.\n");
1271 goto free_pdata;
1272 }
1273
1274 /* init reset gpio */
1275 if (gpio_is_valid(pdata->rst_gpio)) {
1276 ret = devm_gpio_request_one(&spi->dev, pdata->rst_gpio,
1277 GPIOF_OUT_INIT_HIGH, "reset");
1278 if (ret)
1279 goto free_pdata;
1280 }
1281
1282 /* reset mcr20a */
1283 if (gpio_is_valid(pdata->rst_gpio)) {
1284 usleep_range(10, 20);
1285 gpio_set_value_cansleep(pdata->rst_gpio, 0);
1286 usleep_range(10, 20);
1287 gpio_set_value_cansleep(pdata->rst_gpio, 1);
1288 usleep_range(120, 240);
1289 }
1290
1291 /* allocate ieee802154_hw and private data */
1292 hw = ieee802154_alloc_hw(sizeof(*lp), &mcr20a_hw_ops);
1293 if (!hw) {
1294 dev_crit(&spi->dev, "ieee802154_alloc_hw failed\n");
1295 ret = -ENOMEM;
1296 goto free_pdata;
1297 }
1298
1299 /* init mcr20a local data */
1300 lp = hw->priv;
1301 lp->hw = hw;
1302 lp->spi = spi;
1303 lp->spi->dev.platform_data = pdata;
1304 lp->pdata = pdata;
1305
1306 /* init ieee802154_hw */
1307 hw->parent = &spi->dev;
1308 ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
1309
1310 /* init buf */
1311 lp->buf = devm_kzalloc(&spi->dev, SPI_COMMAND_BUFFER, GFP_KERNEL);
1312
1313 if (!lp->buf) {
1314 ret = -ENOMEM;
1315 goto free_dev;
1316 }
1317
1318 mcr20a_setup_tx_spi_messages(lp);
1319 mcr20a_setup_rx_spi_messages(lp);
1320 mcr20a_setup_irq_spi_messages(lp);
1321
1322 /* setup regmap */
1323 lp->regmap_dar = devm_regmap_init_spi(spi, &mcr20a_dar_regmap);
1324 if (IS_ERR(lp->regmap_dar)) {
1325 ret = PTR_ERR(lp->regmap_dar);
1326 dev_err(&spi->dev, "Failed to allocate dar map: %d\n",
1327 ret);
1328 goto free_dev;
1329 }
1330
1331 lp->regmap_iar = devm_regmap_init_spi(spi, &mcr20a_iar_regmap);
1332 if (IS_ERR(lp->regmap_iar)) {
1333 ret = PTR_ERR(lp->regmap_iar);
1334 dev_err(&spi->dev, "Failed to allocate iar map: %d\n", ret);
1335 goto free_dev;
1336 }
1337
1338 mcr20a_hw_setup(lp);
1339
1340 spi_set_drvdata(spi, lp);
1341
1342 ret = mcr20a_phy_init(lp);
1343 if (ret < 0) {
1344 dev_crit(&spi->dev, "mcr20a_phy_init failed\n");
1345 goto free_dev;
1346 }
1347
1348 irq_type = irq_get_trigger_type(spi->irq);
1349 if (!irq_type)
1350 irq_type = IRQF_TRIGGER_FALLING;
1351
1352 ret = devm_request_irq(&spi->dev, spi->irq, mcr20a_irq_isr,
1353 irq_type, dev_name(&spi->dev), lp);
1354 if (ret) {
1355 dev_err(&spi->dev, "could not request_irq for mcr20a\n");
1356 ret = -ENODEV;
1357 goto free_dev;
1358 }
1359
1360 /* disable_irq by default and wait for starting hardware */
1361 disable_irq(spi->irq);
1362
1363 ret = ieee802154_register_hw(hw);
1364 if (ret) {
1365 dev_crit(&spi->dev, "ieee802154_register_hw failed\n");
1366 goto free_dev;
1367 }
1368
1369 return ret;
1370
1371 free_dev:
1372 ieee802154_free_hw(lp->hw);
1373 free_pdata:
1374 kfree(pdata);
1375
1376 return ret;
1377 }
1378
mcr20a_remove(struct spi_device * spi)1379 static int mcr20a_remove(struct spi_device *spi)
1380 {
1381 struct mcr20a_local *lp = spi_get_drvdata(spi);
1382
1383 dev_dbg(&spi->dev, "%s\n", __func__);
1384
1385 ieee802154_unregister_hw(lp->hw);
1386 ieee802154_free_hw(lp->hw);
1387
1388 return 0;
1389 }
1390
1391 static const struct of_device_id mcr20a_of_match[] = {
1392 { .compatible = "nxp,mcr20a", },
1393 { },
1394 };
1395 MODULE_DEVICE_TABLE(of, mcr20a_of_match);
1396
1397 static const struct spi_device_id mcr20a_device_id[] = {
1398 { .name = "mcr20a", },
1399 { },
1400 };
1401 MODULE_DEVICE_TABLE(spi, mcr20a_device_id);
1402
1403 static struct spi_driver mcr20a_driver = {
1404 .id_table = mcr20a_device_id,
1405 .driver = {
1406 .of_match_table = of_match_ptr(mcr20a_of_match),
1407 .name = "mcr20a",
1408 },
1409 .probe = mcr20a_probe,
1410 .remove = mcr20a_remove,
1411 };
1412
1413 module_spi_driver(mcr20a_driver);
1414
1415 MODULE_DESCRIPTION("MCR20A Transceiver Driver");
1416 MODULE_LICENSE("GPL v2");
1417 MODULE_AUTHOR("Xue Liu <liuxuenetmail@gmail>");
1418