1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2022 MediaTek Inc.
3 *
4 * Author: Lorenzo Bianconi <lorenzo@kernel.org>
5 * Sujuan Chen <sujuan.chen@mediatek.com>
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/mfd/syscon.h>
12 #include <linux/of.h>
13 #include <linux/of_irq.h>
14 #include <linux/bitfield.h>
15
16 #include "mtk_wed.h"
17 #include "mtk_wed_regs.h"
18 #include "mtk_wed_wo.h"
19
20 static u32
mtk_wed_mmio_r32(struct mtk_wed_wo * wo,u32 reg)21 mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg)
22 {
23 u32 val;
24
25 if (regmap_read(wo->mmio.regs, reg, &val))
26 val = ~0;
27
28 return val;
29 }
30
31 static void
mtk_wed_mmio_w32(struct mtk_wed_wo * wo,u32 reg,u32 val)32 mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
33 {
34 regmap_write(wo->mmio.regs, reg, val);
35 }
36
37 static u32
mtk_wed_wo_get_isr(struct mtk_wed_wo * wo)38 mtk_wed_wo_get_isr(struct mtk_wed_wo *wo)
39 {
40 u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
41
42 return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
43 }
44
45 static void
mtk_wed_wo_set_isr(struct mtk_wed_wo * wo,u32 mask)46 mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask)
47 {
48 mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
49 }
50
51 static void
mtk_wed_wo_set_ack(struct mtk_wed_wo * wo,u32 mask)52 mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask)
53 {
54 mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
55 }
56
57 static void
mtk_wed_wo_set_isr_mask(struct mtk_wed_wo * wo,u32 mask,u32 val,bool set)58 mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set)
59 {
60 unsigned long flags;
61
62 spin_lock_irqsave(&wo->mmio.lock, flags);
63 wo->mmio.irq_mask &= ~mask;
64 wo->mmio.irq_mask |= val;
65 if (set)
66 mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
67 spin_unlock_irqrestore(&wo->mmio.lock, flags);
68 }
69
70 static void
mtk_wed_wo_irq_enable(struct mtk_wed_wo * wo,u32 mask)71 mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask)
72 {
73 mtk_wed_wo_set_isr_mask(wo, 0, mask, false);
74 tasklet_schedule(&wo->mmio.irq_tasklet);
75 }
76
77 static void
mtk_wed_wo_irq_disable(struct mtk_wed_wo * wo,u32 mask)78 mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask)
79 {
80 mtk_wed_wo_set_isr_mask(wo, mask, 0, true);
81 }
82
83 static void
mtk_wed_wo_kickout(struct mtk_wed_wo * wo)84 mtk_wed_wo_kickout(struct mtk_wed_wo *wo)
85 {
86 mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
87 mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
88 }
89
90 static void
mtk_wed_wo_queue_kick(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q,u32 val)91 mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
92 u32 val)
93 {
94 wmb();
95 mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val);
96 }
97
98 static void *
mtk_wed_wo_dequeue(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q,u32 * len,bool flush)99 mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
100 bool flush)
101 {
102 int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
103 int index = (q->tail + 1) % q->n_desc;
104 struct mtk_wed_wo_queue_entry *entry;
105 struct mtk_wed_wo_queue_desc *desc;
106 void *buf;
107
108 if (!q->queued)
109 return NULL;
110
111 if (flush)
112 q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE);
113 else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE)))
114 return NULL;
115
116 q->tail = index;
117 q->queued--;
118
119 desc = &q->desc[index];
120 entry = &q->entry[index];
121 buf = entry->buf;
122 if (len)
123 *len = FIELD_GET(MTK_WED_WO_CTL_SD_LEN0,
124 le32_to_cpu(READ_ONCE(desc->ctrl)));
125 if (buf)
126 dma_unmap_single(wo->hw->dev, entry->addr, buf_len,
127 DMA_FROM_DEVICE);
128 entry->buf = NULL;
129
130 return buf;
131 }
132
133 static int
mtk_wed_wo_queue_refill(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q,bool rx)134 mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
135 bool rx)
136 {
137 enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
138 int n_buf = 0;
139
140 while (q->queued < q->n_desc) {
141 struct mtk_wed_wo_queue_entry *entry;
142 dma_addr_t addr;
143 void *buf;
144
145 buf = page_frag_alloc(&q->cache, q->buf_size, GFP_ATOMIC);
146 if (!buf)
147 break;
148
149 addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir);
150 if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
151 skb_free_frag(buf);
152 break;
153 }
154
155 q->head = (q->head + 1) % q->n_desc;
156 entry = &q->entry[q->head];
157 entry->addr = addr;
158 entry->len = q->buf_size;
159 q->entry[q->head].buf = buf;
160
161 if (rx) {
162 struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head];
163 u32 ctrl = MTK_WED_WO_CTL_LAST_SEC0 |
164 FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0,
165 entry->len);
166
167 WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
168 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
169 }
170 q->queued++;
171 n_buf++;
172 }
173
174 return n_buf;
175 }
176
177 static void
mtk_wed_wo_rx_complete(struct mtk_wed_wo * wo)178 mtk_wed_wo_rx_complete(struct mtk_wed_wo *wo)
179 {
180 mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK);
181 mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK);
182 }
183
184 static void
mtk_wed_wo_rx_run_queue(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q)185 mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
186 {
187 for (;;) {
188 struct mtk_wed_mcu_hdr *hdr;
189 struct sk_buff *skb;
190 void *data;
191 u32 len;
192
193 data = mtk_wed_wo_dequeue(wo, q, &len, false);
194 if (!data)
195 break;
196
197 skb = build_skb(data, q->buf_size);
198 if (!skb) {
199 skb_free_frag(data);
200 continue;
201 }
202
203 __skb_put(skb, len);
204 if (mtk_wed_mcu_check_msg(wo, skb)) {
205 dev_kfree_skb(skb);
206 continue;
207 }
208
209 hdr = (struct mtk_wed_mcu_hdr *)skb->data;
210 if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP))
211 mtk_wed_mcu_rx_event(wo, skb);
212 else
213 mtk_wed_mcu_rx_unsolicited_event(wo, skb);
214 }
215
216 if (mtk_wed_wo_queue_refill(wo, q, true)) {
217 u32 index = (q->head - 1) % q->n_desc;
218
219 mtk_wed_wo_queue_kick(wo, q, index);
220 }
221 }
222
223 static irqreturn_t
mtk_wed_wo_irq_handler(int irq,void * data)224 mtk_wed_wo_irq_handler(int irq, void *data)
225 {
226 struct mtk_wed_wo *wo = data;
227
228 mtk_wed_wo_set_isr(wo, 0);
229 tasklet_schedule(&wo->mmio.irq_tasklet);
230
231 return IRQ_HANDLED;
232 }
233
mtk_wed_wo_irq_tasklet(struct tasklet_struct * t)234 static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t)
235 {
236 struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet);
237 u32 intr, mask;
238
239 /* disable interrupts */
240 mtk_wed_wo_set_isr(wo, 0);
241
242 intr = mtk_wed_wo_get_isr(wo);
243 intr &= wo->mmio.irq_mask;
244 mask = intr & (MTK_WED_WO_RXCH_INT_MASK | MTK_WED_WO_EXCEPTION_INT_MASK);
245 mtk_wed_wo_irq_disable(wo, mask);
246
247 if (intr & MTK_WED_WO_RXCH_INT_MASK) {
248 mtk_wed_wo_rx_run_queue(wo, &wo->q_rx);
249 mtk_wed_wo_rx_complete(wo);
250 }
251 }
252
253 /* mtk wed wo hw queues */
254
255 static int
mtk_wed_wo_queue_alloc(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q,int n_desc,int buf_size,int index,struct mtk_wed_wo_queue_regs * regs)256 mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
257 int n_desc, int buf_size, int index,
258 struct mtk_wed_wo_queue_regs *regs)
259 {
260 q->regs = *regs;
261 q->n_desc = n_desc;
262 q->buf_size = buf_size;
263
264 q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc),
265 &q->desc_dma, GFP_KERNEL);
266 if (!q->desc)
267 return -ENOMEM;
268
269 q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry),
270 GFP_KERNEL);
271 if (!q->entry)
272 return -ENOMEM;
273
274 return 0;
275 }
276
277 static void
mtk_wed_wo_queue_free(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q)278 mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
279 {
280 mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
281 dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc,
282 q->desc_dma);
283 }
284
285 static void
mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q)286 mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
287 {
288 struct page *page;
289 int i;
290
291 for (i = 0; i < q->n_desc; i++) {
292 struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
293
294 dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
295 DMA_TO_DEVICE);
296 skb_free_frag(entry->buf);
297 entry->buf = NULL;
298 }
299
300 if (!q->cache.va)
301 return;
302
303 page = virt_to_page(q->cache.va);
304 __page_frag_cache_drain(page, q->cache.pagecnt_bias);
305 memset(&q->cache, 0, sizeof(q->cache));
306 }
307
308 static void
mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q)309 mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
310 {
311 struct page *page;
312
313 for (;;) {
314 void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
315
316 if (!buf)
317 break;
318
319 skb_free_frag(buf);
320 }
321
322 if (!q->cache.va)
323 return;
324
325 page = virt_to_page(q->cache.va);
326 __page_frag_cache_drain(page, q->cache.pagecnt_bias);
327 memset(&q->cache, 0, sizeof(q->cache));
328 }
329
330 static void
mtk_wed_wo_queue_reset(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q)331 mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
332 {
333 mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
334 mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma);
335 mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc);
336 }
337
mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q,struct sk_buff * skb)338 int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
339 struct sk_buff *skb)
340 {
341 struct mtk_wed_wo_queue_entry *entry;
342 struct mtk_wed_wo_queue_desc *desc;
343 int ret = 0, index;
344 u32 ctrl;
345
346 q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx);
347 index = (q->head + 1) % q->n_desc;
348 if (q->tail == index) {
349 ret = -ENOMEM;
350 goto out;
351 }
352
353 entry = &q->entry[index];
354 if (skb->len > entry->len) {
355 ret = -ENOMEM;
356 goto out;
357 }
358
359 desc = &q->desc[index];
360 q->head = index;
361
362 dma_sync_single_for_cpu(wo->hw->dev, entry->addr, skb->len,
363 DMA_TO_DEVICE);
364 memcpy(entry->buf, skb->data, skb->len);
365 dma_sync_single_for_device(wo->hw->dev, entry->addr, skb->len,
366 DMA_TO_DEVICE);
367
368 ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) |
369 MTK_WED_WO_CTL_LAST_SEC0 | MTK_WED_WO_CTL_DMA_DONE;
370 WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr));
371 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
372
373 mtk_wed_wo_queue_kick(wo, q, q->head);
374 mtk_wed_wo_kickout(wo);
375 out:
376 dev_kfree_skb(skb);
377
378 return ret;
379 }
380
381 static int
mtk_wed_wo_exception_init(struct mtk_wed_wo * wo)382 mtk_wed_wo_exception_init(struct mtk_wed_wo *wo)
383 {
384 return 0;
385 }
386
387 static int
mtk_wed_wo_hardware_init(struct mtk_wed_wo * wo)388 mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
389 {
390 struct mtk_wed_wo_queue_regs regs;
391 struct device_node *np;
392 int ret;
393
394 np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0);
395 if (!np)
396 return -ENODEV;
397
398 wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL);
399 if (IS_ERR(wo->mmio.regs)) {
400 ret = PTR_ERR(wo->mmio.regs);
401 goto error_put;
402 }
403
404 wo->mmio.irq = irq_of_parse_and_map(np, 0);
405 wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK;
406 spin_lock_init(&wo->mmio.lock);
407 tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet);
408
409 ret = devm_request_irq(wo->hw->dev, wo->mmio.irq,
410 mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH,
411 KBUILD_MODNAME, wo);
412 if (ret)
413 goto error;
414
415 regs.desc_base = MTK_WED_WO_CCIF_DUMMY1;
416 regs.ring_size = MTK_WED_WO_CCIF_DUMMY2;
417 regs.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
418 regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
419
420 ret = mtk_wed_wo_queue_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
421 MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
422 ®s);
423 if (ret)
424 goto error;
425
426 mtk_wed_wo_queue_refill(wo, &wo->q_tx, false);
427 mtk_wed_wo_queue_reset(wo, &wo->q_tx);
428
429 regs.desc_base = MTK_WED_WO_CCIF_DUMMY5;
430 regs.ring_size = MTK_WED_WO_CCIF_DUMMY6;
431 regs.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
432 regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
433
434 ret = mtk_wed_wo_queue_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
435 MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
436 ®s);
437 if (ret)
438 goto error;
439
440 mtk_wed_wo_queue_refill(wo, &wo->q_rx, true);
441 mtk_wed_wo_queue_reset(wo, &wo->q_rx);
442
443 /* rx queue irqmask */
444 mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
445
446 return 0;
447
448 error:
449 devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
450 error_put:
451 of_node_put(np);
452 return ret;
453 }
454
455 static void
mtk_wed_wo_hw_deinit(struct mtk_wed_wo * wo)456 mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo)
457 {
458 /* disable interrupts */
459 mtk_wed_wo_set_isr(wo, 0);
460
461 tasklet_disable(&wo->mmio.irq_tasklet);
462
463 disable_irq(wo->mmio.irq);
464 devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
465
466 mtk_wed_wo_queue_tx_clean(wo, &wo->q_tx);
467 mtk_wed_wo_queue_rx_clean(wo, &wo->q_rx);
468 mtk_wed_wo_queue_free(wo, &wo->q_tx);
469 mtk_wed_wo_queue_free(wo, &wo->q_rx);
470 }
471
mtk_wed_wo_init(struct mtk_wed_hw * hw)472 int mtk_wed_wo_init(struct mtk_wed_hw *hw)
473 {
474 struct mtk_wed_wo *wo;
475 int ret;
476
477 wo = devm_kzalloc(hw->dev, sizeof(*wo), GFP_KERNEL);
478 if (!wo)
479 return -ENOMEM;
480
481 hw->wed_wo = wo;
482 wo->hw = hw;
483
484 ret = mtk_wed_wo_hardware_init(wo);
485 if (ret)
486 return ret;
487
488 ret = mtk_wed_mcu_init(wo);
489 if (ret)
490 return ret;
491
492 return mtk_wed_wo_exception_init(wo);
493 }
494
mtk_wed_wo_deinit(struct mtk_wed_hw * hw)495 void mtk_wed_wo_deinit(struct mtk_wed_hw *hw)
496 {
497 struct mtk_wed_wo *wo = hw->wed_wo;
498
499 mtk_wed_wo_hw_deinit(wo);
500 }
501