1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (C) 2021 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
3 * Copyright (C) 2021 Jernej Skrabec <jernej.skrabec@gmail.com>
4 *
5 * Based on rtw88/pci.c:
6 * Copyright(c) 2018-2019 Realtek Corporation
7 */
8
9 #include <linux/module.h>
10 #include <linux/mmc/host.h>
11 #include <linux/mmc/sdio_func.h>
12 #include "main.h"
13 #include "debug.h"
14 #include "fw.h"
15 #include "ps.h"
16 #include "reg.h"
17 #include "rx.h"
18 #include "sdio.h"
19 #include "tx.h"
20
21 #define RTW_SDIO_INDIRECT_RW_RETRIES 50
22
rtw_sdio_is_bus_addr(u32 addr)23 static bool rtw_sdio_is_bus_addr(u32 addr)
24 {
25 return !!(addr & RTW_SDIO_BUS_MSK);
26 }
27
rtw_sdio_bus_claim_needed(struct rtw_sdio * rtwsdio)28 static bool rtw_sdio_bus_claim_needed(struct rtw_sdio *rtwsdio)
29 {
30 return !rtwsdio->irq_thread ||
31 rtwsdio->irq_thread != current;
32 }
33
rtw_sdio_to_bus_offset(struct rtw_dev * rtwdev,u32 addr)34 static u32 rtw_sdio_to_bus_offset(struct rtw_dev *rtwdev, u32 addr)
35 {
36 switch (addr & RTW_SDIO_BUS_MSK) {
37 case WLAN_IOREG_OFFSET:
38 addr &= WLAN_IOREG_REG_MSK;
39 addr |= FIELD_PREP(REG_SDIO_CMD_ADDR_MSK,
40 REG_SDIO_CMD_ADDR_MAC_REG);
41 break;
42 case SDIO_LOCAL_OFFSET:
43 addr &= SDIO_LOCAL_REG_MSK;
44 addr |= FIELD_PREP(REG_SDIO_CMD_ADDR_MSK,
45 REG_SDIO_CMD_ADDR_SDIO_REG);
46 break;
47 default:
48 rtw_warn(rtwdev, "Cannot convert addr 0x%08x to bus offset",
49 addr);
50 }
51
52 return addr;
53 }
54
rtw_sdio_use_memcpy_io(struct rtw_dev * rtwdev,u32 addr,u8 alignment)55 static bool rtw_sdio_use_memcpy_io(struct rtw_dev *rtwdev, u32 addr,
56 u8 alignment)
57 {
58 return IS_ALIGNED(addr, alignment) &&
59 test_bit(RTW_FLAG_POWERON, rtwdev->flags);
60 }
61
rtw_sdio_writel(struct rtw_dev * rtwdev,u32 val,u32 addr,int * err_ret)62 static void rtw_sdio_writel(struct rtw_dev *rtwdev, u32 val, u32 addr,
63 int *err_ret)
64 {
65 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
66 u8 buf[4];
67 int i;
68
69 if (rtw_sdio_use_memcpy_io(rtwdev, addr, 4)) {
70 sdio_writel(rtwsdio->sdio_func, val, addr, err_ret);
71 return;
72 }
73
74 *(__le32 *)buf = cpu_to_le32(val);
75
76 for (i = 0; i < 4; i++) {
77 sdio_writeb(rtwsdio->sdio_func, buf[i], addr + i, err_ret);
78 if (*err_ret)
79 return;
80 }
81 }
82
rtw_sdio_writew(struct rtw_dev * rtwdev,u16 val,u32 addr,int * err_ret)83 static void rtw_sdio_writew(struct rtw_dev *rtwdev, u16 val, u32 addr,
84 int *err_ret)
85 {
86 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
87 u8 buf[2];
88 int i;
89
90 *(__le16 *)buf = cpu_to_le16(val);
91
92 for (i = 0; i < 2; i++) {
93 sdio_writeb(rtwsdio->sdio_func, buf[i], addr + i, err_ret);
94 if (*err_ret)
95 return;
96 }
97 }
98
rtw_sdio_readl(struct rtw_dev * rtwdev,u32 addr,int * err_ret)99 static u32 rtw_sdio_readl(struct rtw_dev *rtwdev, u32 addr, int *err_ret)
100 {
101 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
102 u8 buf[4];
103 int i;
104
105 if (rtw_sdio_use_memcpy_io(rtwdev, addr, 4))
106 return sdio_readl(rtwsdio->sdio_func, addr, err_ret);
107
108 for (i = 0; i < 4; i++) {
109 buf[i] = sdio_readb(rtwsdio->sdio_func, addr + i, err_ret);
110 if (*err_ret)
111 return 0;
112 }
113
114 return le32_to_cpu(*(__le32 *)buf);
115 }
116
rtw_sdio_readw(struct rtw_dev * rtwdev,u32 addr,int * err_ret)117 static u16 rtw_sdio_readw(struct rtw_dev *rtwdev, u32 addr, int *err_ret)
118 {
119 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
120 u8 buf[2];
121 int i;
122
123 for (i = 0; i < 2; i++) {
124 buf[i] = sdio_readb(rtwsdio->sdio_func, addr + i, err_ret);
125 if (*err_ret)
126 return 0;
127 }
128
129 return le16_to_cpu(*(__le16 *)buf);
130 }
131
rtw_sdio_to_io_address(struct rtw_dev * rtwdev,u32 addr,bool direct)132 static u32 rtw_sdio_to_io_address(struct rtw_dev *rtwdev, u32 addr,
133 bool direct)
134 {
135 if (!direct)
136 return addr;
137
138 if (!rtw_sdio_is_bus_addr(addr))
139 addr |= WLAN_IOREG_OFFSET;
140
141 return rtw_sdio_to_bus_offset(rtwdev, addr);
142 }
143
rtw_sdio_use_direct_io(struct rtw_dev * rtwdev,u32 addr)144 static bool rtw_sdio_use_direct_io(struct rtw_dev *rtwdev, u32 addr)
145 {
146 return !rtw_sdio_is_sdio30_supported(rtwdev) ||
147 rtw_sdio_is_bus_addr(addr);
148 }
149
rtw_sdio_indirect_reg_cfg(struct rtw_dev * rtwdev,u32 addr,u32 cfg)150 static int rtw_sdio_indirect_reg_cfg(struct rtw_dev *rtwdev, u32 addr, u32 cfg)
151 {
152 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
153 unsigned int retry;
154 u32 reg_cfg;
155 int ret;
156 u8 tmp;
157
158 reg_cfg = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_CFG);
159
160 rtw_sdio_writel(rtwdev, addr | cfg | BIT_SDIO_INDIRECT_REG_CFG_UNK20,
161 reg_cfg, &ret);
162 if (ret)
163 return ret;
164
165 for (retry = 0; retry < RTW_SDIO_INDIRECT_RW_RETRIES; retry++) {
166 tmp = sdio_readb(rtwsdio->sdio_func, reg_cfg + 2, &ret);
167 if (!ret && (tmp & BIT(4)))
168 return 0;
169 }
170
171 return -ETIMEDOUT;
172 }
173
rtw_sdio_indirect_read8(struct rtw_dev * rtwdev,u32 addr,int * err_ret)174 static u8 rtw_sdio_indirect_read8(struct rtw_dev *rtwdev, u32 addr,
175 int *err_ret)
176 {
177 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
178 u32 reg_data;
179
180 *err_ret = rtw_sdio_indirect_reg_cfg(rtwdev, addr,
181 BIT_SDIO_INDIRECT_REG_CFG_READ);
182 if (*err_ret)
183 return 0;
184
185 reg_data = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_DATA);
186 return sdio_readb(rtwsdio->sdio_func, reg_data, err_ret);
187 }
188
rtw_sdio_indirect_read_bytes(struct rtw_dev * rtwdev,u32 addr,u8 * buf,int count)189 static int rtw_sdio_indirect_read_bytes(struct rtw_dev *rtwdev, u32 addr,
190 u8 *buf, int count)
191 {
192 int i, ret = 0;
193
194 for (i = 0; i < count; i++) {
195 buf[i] = rtw_sdio_indirect_read8(rtwdev, addr + i, &ret);
196 if (ret)
197 break;
198 }
199
200 return ret;
201 }
202
rtw_sdio_indirect_read16(struct rtw_dev * rtwdev,u32 addr,int * err_ret)203 static u16 rtw_sdio_indirect_read16(struct rtw_dev *rtwdev, u32 addr,
204 int *err_ret)
205 {
206 u32 reg_data;
207 u8 buf[2];
208
209 if (!IS_ALIGNED(addr, 2)) {
210 *err_ret = rtw_sdio_indirect_read_bytes(rtwdev, addr, buf, 2);
211 if (*err_ret)
212 return 0;
213
214 return le16_to_cpu(*(__le16 *)buf);
215 }
216
217 *err_ret = rtw_sdio_indirect_reg_cfg(rtwdev, addr,
218 BIT_SDIO_INDIRECT_REG_CFG_READ);
219 if (*err_ret)
220 return 0;
221
222 reg_data = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_DATA);
223 return rtw_sdio_readw(rtwdev, reg_data, err_ret);
224 }
225
rtw_sdio_indirect_read32(struct rtw_dev * rtwdev,u32 addr,int * err_ret)226 static u32 rtw_sdio_indirect_read32(struct rtw_dev *rtwdev, u32 addr,
227 int *err_ret)
228 {
229 u32 reg_data;
230 u8 buf[4];
231
232 if (!IS_ALIGNED(addr, 4)) {
233 *err_ret = rtw_sdio_indirect_read_bytes(rtwdev, addr, buf, 4);
234 if (*err_ret)
235 return 0;
236
237 return le32_to_cpu(*(__le32 *)buf);
238 }
239
240 *err_ret = rtw_sdio_indirect_reg_cfg(rtwdev, addr,
241 BIT_SDIO_INDIRECT_REG_CFG_READ);
242 if (*err_ret)
243 return 0;
244
245 reg_data = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_DATA);
246 return rtw_sdio_readl(rtwdev, reg_data, err_ret);
247 }
248
rtw_sdio_read8(struct rtw_dev * rtwdev,u32 addr)249 static u8 rtw_sdio_read8(struct rtw_dev *rtwdev, u32 addr)
250 {
251 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
252 bool direct, bus_claim;
253 int ret;
254 u8 val;
255
256 direct = rtw_sdio_use_direct_io(rtwdev, addr);
257 addr = rtw_sdio_to_io_address(rtwdev, addr, direct);
258 bus_claim = rtw_sdio_bus_claim_needed(rtwsdio);
259
260 if (bus_claim)
261 sdio_claim_host(rtwsdio->sdio_func);
262
263 if (direct)
264 val = sdio_readb(rtwsdio->sdio_func, addr, &ret);
265 else
266 val = rtw_sdio_indirect_read8(rtwdev, addr, &ret);
267
268 if (bus_claim)
269 sdio_release_host(rtwsdio->sdio_func);
270
271 if (ret)
272 rtw_warn(rtwdev, "sdio read8 failed (0x%x): %d", addr, ret);
273
274 return val;
275 }
276
rtw_sdio_read16(struct rtw_dev * rtwdev,u32 addr)277 static u16 rtw_sdio_read16(struct rtw_dev *rtwdev, u32 addr)
278 {
279 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
280 bool direct, bus_claim;
281 int ret;
282 u16 val;
283
284 direct = rtw_sdio_use_direct_io(rtwdev, addr);
285 addr = rtw_sdio_to_io_address(rtwdev, addr, direct);
286 bus_claim = rtw_sdio_bus_claim_needed(rtwsdio);
287
288 if (bus_claim)
289 sdio_claim_host(rtwsdio->sdio_func);
290
291 if (direct)
292 val = rtw_sdio_readw(rtwdev, addr, &ret);
293 else
294 val = rtw_sdio_indirect_read16(rtwdev, addr, &ret);
295
296 if (bus_claim)
297 sdio_release_host(rtwsdio->sdio_func);
298
299 if (ret)
300 rtw_warn(rtwdev, "sdio read16 failed (0x%x): %d", addr, ret);
301
302 return val;
303 }
304
rtw_sdio_read32(struct rtw_dev * rtwdev,u32 addr)305 static u32 rtw_sdio_read32(struct rtw_dev *rtwdev, u32 addr)
306 {
307 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
308 bool direct, bus_claim;
309 u32 val;
310 int ret;
311
312 direct = rtw_sdio_use_direct_io(rtwdev, addr);
313 addr = rtw_sdio_to_io_address(rtwdev, addr, direct);
314 bus_claim = rtw_sdio_bus_claim_needed(rtwsdio);
315
316 if (bus_claim)
317 sdio_claim_host(rtwsdio->sdio_func);
318
319 if (direct)
320 val = rtw_sdio_readl(rtwdev, addr, &ret);
321 else
322 val = rtw_sdio_indirect_read32(rtwdev, addr, &ret);
323
324 if (bus_claim)
325 sdio_release_host(rtwsdio->sdio_func);
326
327 if (ret)
328 rtw_warn(rtwdev, "sdio read32 failed (0x%x): %d", addr, ret);
329
330 return val;
331 }
332
rtw_sdio_indirect_write8(struct rtw_dev * rtwdev,u8 val,u32 addr,int * err_ret)333 static void rtw_sdio_indirect_write8(struct rtw_dev *rtwdev, u8 val, u32 addr,
334 int *err_ret)
335 {
336 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
337 u32 reg_data;
338
339 reg_data = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_DATA);
340 sdio_writeb(rtwsdio->sdio_func, val, reg_data, err_ret);
341 if (*err_ret)
342 return;
343
344 *err_ret = rtw_sdio_indirect_reg_cfg(rtwdev, addr,
345 BIT_SDIO_INDIRECT_REG_CFG_WRITE);
346 }
347
rtw_sdio_indirect_write16(struct rtw_dev * rtwdev,u16 val,u32 addr,int * err_ret)348 static void rtw_sdio_indirect_write16(struct rtw_dev *rtwdev, u16 val, u32 addr,
349 int *err_ret)
350 {
351 u32 reg_data;
352
353 if (!IS_ALIGNED(addr, 2)) {
354 addr = rtw_sdio_to_io_address(rtwdev, addr, true);
355 rtw_sdio_writew(rtwdev, val, addr, err_ret);
356 return;
357 }
358
359 reg_data = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_DATA);
360 rtw_sdio_writew(rtwdev, val, reg_data, err_ret);
361 if (*err_ret)
362 return;
363
364 *err_ret = rtw_sdio_indirect_reg_cfg(rtwdev, addr,
365 BIT_SDIO_INDIRECT_REG_CFG_WRITE |
366 BIT_SDIO_INDIRECT_REG_CFG_WORD);
367 }
368
rtw_sdio_indirect_write32(struct rtw_dev * rtwdev,u32 val,u32 addr,int * err_ret)369 static void rtw_sdio_indirect_write32(struct rtw_dev *rtwdev, u32 val,
370 u32 addr, int *err_ret)
371 {
372 u32 reg_data;
373
374 if (!IS_ALIGNED(addr, 4)) {
375 addr = rtw_sdio_to_io_address(rtwdev, addr, true);
376 rtw_sdio_writel(rtwdev, val, addr, err_ret);
377 return;
378 }
379
380 reg_data = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_DATA);
381 rtw_sdio_writel(rtwdev, val, reg_data, err_ret);
382
383 *err_ret = rtw_sdio_indirect_reg_cfg(rtwdev, addr,
384 BIT_SDIO_INDIRECT_REG_CFG_WRITE |
385 BIT_SDIO_INDIRECT_REG_CFG_DWORD);
386 }
387
rtw_sdio_write8(struct rtw_dev * rtwdev,u32 addr,u8 val)388 static void rtw_sdio_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
389 {
390 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
391 bool direct, bus_claim;
392 int ret;
393
394 direct = rtw_sdio_use_direct_io(rtwdev, addr);
395 addr = rtw_sdio_to_io_address(rtwdev, addr, direct);
396 bus_claim = rtw_sdio_bus_claim_needed(rtwsdio);
397
398 if (bus_claim)
399 sdio_claim_host(rtwsdio->sdio_func);
400
401 if (direct)
402 sdio_writeb(rtwsdio->sdio_func, val, addr, &ret);
403 else
404 rtw_sdio_indirect_write8(rtwdev, val, addr, &ret);
405
406 if (bus_claim)
407 sdio_release_host(rtwsdio->sdio_func);
408
409 if (ret)
410 rtw_warn(rtwdev, "sdio write8 failed (0x%x): %d", addr, ret);
411 }
412
rtw_sdio_write16(struct rtw_dev * rtwdev,u32 addr,u16 val)413 static void rtw_sdio_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
414 {
415 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
416 bool direct, bus_claim;
417 int ret;
418
419 direct = rtw_sdio_use_direct_io(rtwdev, addr);
420 addr = rtw_sdio_to_io_address(rtwdev, addr, direct);
421 bus_claim = rtw_sdio_bus_claim_needed(rtwsdio);
422
423 if (bus_claim)
424 sdio_claim_host(rtwsdio->sdio_func);
425
426 if (direct)
427 rtw_sdio_writew(rtwdev, val, addr, &ret);
428 else
429 rtw_sdio_indirect_write16(rtwdev, val, addr, &ret);
430
431 if (bus_claim)
432 sdio_release_host(rtwsdio->sdio_func);
433
434 if (ret)
435 rtw_warn(rtwdev, "sdio write16 failed (0x%x): %d", addr, ret);
436 }
437
rtw_sdio_write32(struct rtw_dev * rtwdev,u32 addr,u32 val)438 static void rtw_sdio_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
439 {
440 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
441 bool direct, bus_claim;
442 int ret;
443
444 direct = rtw_sdio_use_direct_io(rtwdev, addr);
445 addr = rtw_sdio_to_io_address(rtwdev, addr, direct);
446 bus_claim = rtw_sdio_bus_claim_needed(rtwsdio);
447
448 if (bus_claim)
449 sdio_claim_host(rtwsdio->sdio_func);
450
451 if (direct)
452 rtw_sdio_writel(rtwdev, val, addr, &ret);
453 else
454 rtw_sdio_indirect_write32(rtwdev, val, addr, &ret);
455
456 if (bus_claim)
457 sdio_release_host(rtwsdio->sdio_func);
458
459 if (ret)
460 rtw_warn(rtwdev, "sdio write32 failed (0x%x): %d", addr, ret);
461 }
462
rtw_sdio_get_tx_addr(struct rtw_dev * rtwdev,size_t size,enum rtw_tx_queue_type queue)463 static u32 rtw_sdio_get_tx_addr(struct rtw_dev *rtwdev, size_t size,
464 enum rtw_tx_queue_type queue)
465 {
466 u32 txaddr;
467
468 switch (queue) {
469 case RTW_TX_QUEUE_BCN:
470 case RTW_TX_QUEUE_H2C:
471 case RTW_TX_QUEUE_HI0:
472 txaddr = FIELD_PREP(REG_SDIO_CMD_ADDR_MSK,
473 REG_SDIO_CMD_ADDR_TXFF_HIGH);
474 break;
475 case RTW_TX_QUEUE_VI:
476 case RTW_TX_QUEUE_VO:
477 txaddr = FIELD_PREP(REG_SDIO_CMD_ADDR_MSK,
478 REG_SDIO_CMD_ADDR_TXFF_NORMAL);
479 break;
480 case RTW_TX_QUEUE_BE:
481 case RTW_TX_QUEUE_BK:
482 txaddr = FIELD_PREP(REG_SDIO_CMD_ADDR_MSK,
483 REG_SDIO_CMD_ADDR_TXFF_LOW);
484 break;
485 case RTW_TX_QUEUE_MGMT:
486 txaddr = FIELD_PREP(REG_SDIO_CMD_ADDR_MSK,
487 REG_SDIO_CMD_ADDR_TXFF_EXTRA);
488 break;
489 default:
490 rtw_warn(rtwdev, "Unsupported queue for TX addr: 0x%02x\n",
491 queue);
492 return 0;
493 }
494
495 txaddr += DIV_ROUND_UP(size, 4);
496
497 return txaddr;
498 };
499
rtw_sdio_read_port(struct rtw_dev * rtwdev,u8 * buf,size_t count)500 static int rtw_sdio_read_port(struct rtw_dev *rtwdev, u8 *buf, size_t count)
501 {
502 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
503 bool bus_claim = rtw_sdio_bus_claim_needed(rtwsdio);
504 u32 rxaddr = rtwsdio->rx_addr++;
505 int ret;
506
507 if (bus_claim)
508 sdio_claim_host(rtwsdio->sdio_func);
509
510 ret = sdio_memcpy_fromio(rtwsdio->sdio_func, buf,
511 RTW_SDIO_ADDR_RX_RX0FF_GEN(rxaddr), count);
512 if (ret)
513 rtw_warn(rtwdev,
514 "Failed to read %zu byte(s) from SDIO port 0x%08x",
515 count, rxaddr);
516
517 if (bus_claim)
518 sdio_release_host(rtwsdio->sdio_func);
519
520 return ret;
521 }
522
rtw_sdio_check_free_txpg(struct rtw_dev * rtwdev,u8 queue,size_t count)523 static int rtw_sdio_check_free_txpg(struct rtw_dev *rtwdev, u8 queue,
524 size_t count)
525 {
526 unsigned int pages_free, pages_needed;
527
528 if (rtw_chip_wcpu_11n(rtwdev)) {
529 u32 free_txpg;
530
531 free_txpg = rtw_sdio_read32(rtwdev, REG_SDIO_FREE_TXPG);
532
533 switch (queue) {
534 case RTW_TX_QUEUE_BCN:
535 case RTW_TX_QUEUE_H2C:
536 case RTW_TX_QUEUE_HI0:
537 case RTW_TX_QUEUE_MGMT:
538 /* high */
539 pages_free = free_txpg & 0xff;
540 break;
541 case RTW_TX_QUEUE_VI:
542 case RTW_TX_QUEUE_VO:
543 /* normal */
544 pages_free = (free_txpg >> 8) & 0xff;
545 break;
546 case RTW_TX_QUEUE_BE:
547 case RTW_TX_QUEUE_BK:
548 /* low */
549 pages_free = (free_txpg >> 16) & 0xff;
550 break;
551 default:
552 rtw_warn(rtwdev, "Unknown mapping for queue %u\n", queue);
553 return -EINVAL;
554 }
555
556 /* add the pages from the public queue */
557 pages_free += (free_txpg >> 24) & 0xff;
558 } else {
559 u32 free_txpg[3];
560
561 free_txpg[0] = rtw_sdio_read32(rtwdev, REG_SDIO_FREE_TXPG);
562 free_txpg[1] = rtw_sdio_read32(rtwdev, REG_SDIO_FREE_TXPG + 4);
563 free_txpg[2] = rtw_sdio_read32(rtwdev, REG_SDIO_FREE_TXPG + 8);
564
565 switch (queue) {
566 case RTW_TX_QUEUE_BCN:
567 case RTW_TX_QUEUE_H2C:
568 case RTW_TX_QUEUE_HI0:
569 /* high */
570 pages_free = free_txpg[0] & 0xfff;
571 break;
572 case RTW_TX_QUEUE_VI:
573 case RTW_TX_QUEUE_VO:
574 /* normal */
575 pages_free = (free_txpg[0] >> 16) & 0xfff;
576 break;
577 case RTW_TX_QUEUE_BE:
578 case RTW_TX_QUEUE_BK:
579 /* low */
580 pages_free = free_txpg[1] & 0xfff;
581 break;
582 case RTW_TX_QUEUE_MGMT:
583 /* extra */
584 pages_free = free_txpg[2] & 0xfff;
585 break;
586 default:
587 rtw_warn(rtwdev, "Unknown mapping for queue %u\n", queue);
588 return -EINVAL;
589 }
590
591 /* add the pages from the public queue */
592 pages_free += (free_txpg[1] >> 16) & 0xfff;
593 }
594
595 pages_needed = DIV_ROUND_UP(count, rtwdev->chip->page_size);
596
597 if (pages_needed > pages_free) {
598 rtw_dbg(rtwdev, RTW_DBG_SDIO,
599 "Not enough free pages (%u needed, %u free) in queue %u for %zu bytes\n",
600 pages_needed, pages_free, queue, count);
601 return -EBUSY;
602 }
603
604 return 0;
605 }
606
rtw_sdio_write_port(struct rtw_dev * rtwdev,struct sk_buff * skb,enum rtw_tx_queue_type queue)607 static int rtw_sdio_write_port(struct rtw_dev *rtwdev, struct sk_buff *skb,
608 enum rtw_tx_queue_type queue)
609 {
610 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
611 bool bus_claim;
612 size_t txsize;
613 u32 txaddr;
614 int ret;
615
616 txaddr = rtw_sdio_get_tx_addr(rtwdev, skb->len, queue);
617 if (!txaddr)
618 return -EINVAL;
619
620 txsize = sdio_align_size(rtwsdio->sdio_func, skb->len);
621
622 ret = rtw_sdio_check_free_txpg(rtwdev, queue, txsize);
623 if (ret)
624 return ret;
625
626 if (!IS_ALIGNED((unsigned long)skb->data, RTW_SDIO_DATA_PTR_ALIGN))
627 rtw_warn(rtwdev, "Got unaligned SKB in %s() for queue %u\n",
628 __func__, queue);
629
630 bus_claim = rtw_sdio_bus_claim_needed(rtwsdio);
631
632 if (bus_claim)
633 sdio_claim_host(rtwsdio->sdio_func);
634
635 ret = sdio_memcpy_toio(rtwsdio->sdio_func, txaddr, skb->data, txsize);
636
637 if (bus_claim)
638 sdio_release_host(rtwsdio->sdio_func);
639
640 if (ret)
641 rtw_warn(rtwdev,
642 "Failed to write %zu byte(s) to SDIO port 0x%08x",
643 txsize, txaddr);
644
645 return ret;
646 }
647
rtw_sdio_init(struct rtw_dev * rtwdev)648 static void rtw_sdio_init(struct rtw_dev *rtwdev)
649 {
650 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
651
652 rtwsdio->irq_mask = REG_SDIO_HIMR_RX_REQUEST | REG_SDIO_HIMR_CPWM1;
653 }
654
rtw_sdio_enable_rx_aggregation(struct rtw_dev * rtwdev)655 static void rtw_sdio_enable_rx_aggregation(struct rtw_dev *rtwdev)
656 {
657 u8 size, timeout;
658
659 if (rtw_chip_wcpu_11n(rtwdev)) {
660 size = 0x6;
661 timeout = 0x6;
662 } else {
663 size = 0xff;
664 timeout = 0x1;
665 }
666
667 /* Make the firmware honor the size limit configured below */
668 rtw_write32_set(rtwdev, REG_RXDMA_AGG_PG_TH, BIT_EN_PRE_CALC);
669
670 rtw_write8_set(rtwdev, REG_TXDMA_PQ_MAP, BIT_RXDMA_AGG_EN);
671
672 rtw_write16(rtwdev, REG_RXDMA_AGG_PG_TH,
673 FIELD_PREP(BIT_RXDMA_AGG_PG_TH, size) |
674 FIELD_PREP(BIT_DMA_AGG_TO_V1, timeout));
675
676 rtw_write8_set(rtwdev, REG_RXDMA_MODE, BIT_DMA_MODE);
677 }
678
rtw_sdio_enable_interrupt(struct rtw_dev * rtwdev)679 static void rtw_sdio_enable_interrupt(struct rtw_dev *rtwdev)
680 {
681 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
682
683 rtw_write32(rtwdev, REG_SDIO_HIMR, rtwsdio->irq_mask);
684 }
685
rtw_sdio_disable_interrupt(struct rtw_dev * rtwdev)686 static void rtw_sdio_disable_interrupt(struct rtw_dev *rtwdev)
687 {
688 rtw_write32(rtwdev, REG_SDIO_HIMR, 0x0);
689 }
690
rtw_sdio_get_tx_qsel(struct rtw_dev * rtwdev,struct sk_buff * skb,u8 queue)691 static u8 rtw_sdio_get_tx_qsel(struct rtw_dev *rtwdev, struct sk_buff *skb,
692 u8 queue)
693 {
694 switch (queue) {
695 case RTW_TX_QUEUE_BCN:
696 return TX_DESC_QSEL_BEACON;
697 case RTW_TX_QUEUE_H2C:
698 return TX_DESC_QSEL_H2C;
699 case RTW_TX_QUEUE_MGMT:
700 if (rtw_chip_wcpu_11n(rtwdev))
701 return TX_DESC_QSEL_HIGH;
702 else
703 return TX_DESC_QSEL_MGMT;
704 case RTW_TX_QUEUE_HI0:
705 return TX_DESC_QSEL_HIGH;
706 default:
707 return skb->priority;
708 }
709 }
710
rtw_sdio_setup(struct rtw_dev * rtwdev)711 static int rtw_sdio_setup(struct rtw_dev *rtwdev)
712 {
713 /* nothing to do */
714 return 0;
715 }
716
rtw_sdio_start(struct rtw_dev * rtwdev)717 static int rtw_sdio_start(struct rtw_dev *rtwdev)
718 {
719 rtw_sdio_enable_rx_aggregation(rtwdev);
720 rtw_sdio_enable_interrupt(rtwdev);
721
722 return 0;
723 }
724
rtw_sdio_stop(struct rtw_dev * rtwdev)725 static void rtw_sdio_stop(struct rtw_dev *rtwdev)
726 {
727 rtw_sdio_disable_interrupt(rtwdev);
728 }
729
rtw_sdio_deep_ps_enter(struct rtw_dev * rtwdev)730 static void rtw_sdio_deep_ps_enter(struct rtw_dev *rtwdev)
731 {
732 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
733 bool tx_empty = true;
734 u8 queue;
735
736 if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE)) {
737 /* Deep PS state is not allowed to TX-DMA */
738 for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
739 /* BCN queue is rsvd page, does not have DMA interrupt
740 * H2C queue is managed by firmware
741 */
742 if (queue == RTW_TX_QUEUE_BCN ||
743 queue == RTW_TX_QUEUE_H2C)
744 continue;
745
746 /* check if there is any skb DMAing */
747 if (skb_queue_len(&rtwsdio->tx_queue[queue])) {
748 tx_empty = false;
749 break;
750 }
751 }
752 }
753
754 if (!tx_empty) {
755 rtw_dbg(rtwdev, RTW_DBG_PS,
756 "TX path not empty, cannot enter deep power save state\n");
757 return;
758 }
759
760 set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
761 rtw_power_mode_change(rtwdev, true);
762 }
763
rtw_sdio_deep_ps_leave(struct rtw_dev * rtwdev)764 static void rtw_sdio_deep_ps_leave(struct rtw_dev *rtwdev)
765 {
766 if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
767 rtw_power_mode_change(rtwdev, false);
768 }
769
rtw_sdio_deep_ps(struct rtw_dev * rtwdev,bool enter)770 static void rtw_sdio_deep_ps(struct rtw_dev *rtwdev, bool enter)
771 {
772 if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
773 rtw_sdio_deep_ps_enter(rtwdev);
774
775 if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
776 rtw_sdio_deep_ps_leave(rtwdev);
777 }
778
rtw_sdio_tx_kick_off(struct rtw_dev * rtwdev)779 static void rtw_sdio_tx_kick_off(struct rtw_dev *rtwdev)
780 {
781 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
782
783 queue_work(rtwsdio->txwq, &rtwsdio->tx_handler_data->work);
784 }
785
rtw_sdio_link_ps(struct rtw_dev * rtwdev,bool enter)786 static void rtw_sdio_link_ps(struct rtw_dev *rtwdev, bool enter)
787 {
788 /* nothing to do */
789 }
790
rtw_sdio_interface_cfg(struct rtw_dev * rtwdev)791 static void rtw_sdio_interface_cfg(struct rtw_dev *rtwdev)
792 {
793 u32 val;
794
795 rtw_read32(rtwdev, REG_SDIO_FREE_TXPG);
796
797 val = rtw_read32(rtwdev, REG_SDIO_TX_CTRL);
798 val &= 0xfff8;
799 rtw_write32(rtwdev, REG_SDIO_TX_CTRL, val);
800 }
801
rtw_sdio_get_tx_data(struct sk_buff * skb)802 static struct rtw_sdio_tx_data *rtw_sdio_get_tx_data(struct sk_buff *skb)
803 {
804 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
805
806 BUILD_BUG_ON(sizeof(struct rtw_sdio_tx_data) >
807 sizeof(info->status.status_driver_data));
808
809 return (struct rtw_sdio_tx_data *)info->status.status_driver_data;
810 }
811
rtw_sdio_tx_skb_prepare(struct rtw_dev * rtwdev,struct rtw_tx_pkt_info * pkt_info,struct sk_buff * skb,enum rtw_tx_queue_type queue)812 static void rtw_sdio_tx_skb_prepare(struct rtw_dev *rtwdev,
813 struct rtw_tx_pkt_info *pkt_info,
814 struct sk_buff *skb,
815 enum rtw_tx_queue_type queue)
816 {
817 const struct rtw_chip_info *chip = rtwdev->chip;
818 unsigned long data_addr, aligned_addr;
819 size_t offset;
820 u8 *pkt_desc;
821
822 pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
823
824 data_addr = (unsigned long)pkt_desc;
825 aligned_addr = ALIGN(data_addr, RTW_SDIO_DATA_PTR_ALIGN);
826
827 if (data_addr != aligned_addr) {
828 /* Ensure that the start of the pkt_desc is always aligned at
829 * RTW_SDIO_DATA_PTR_ALIGN.
830 */
831 offset = RTW_SDIO_DATA_PTR_ALIGN - (aligned_addr - data_addr);
832
833 pkt_desc = skb_push(skb, offset);
834
835 /* By inserting padding to align the start of the pkt_desc we
836 * need to inform the firmware that the actual data starts at
837 * a different offset than normal.
838 */
839 pkt_info->offset += offset;
840 }
841
842 memset(pkt_desc, 0, chip->tx_pkt_desc_sz);
843
844 pkt_info->qsel = rtw_sdio_get_tx_qsel(rtwdev, skb, queue);
845
846 rtw_tx_fill_tx_desc(pkt_info, skb);
847 rtw_tx_fill_txdesc_checksum(rtwdev, pkt_info, pkt_desc);
848 }
849
rtw_sdio_write_data(struct rtw_dev * rtwdev,struct rtw_tx_pkt_info * pkt_info,struct sk_buff * skb,enum rtw_tx_queue_type queue)850 static int rtw_sdio_write_data(struct rtw_dev *rtwdev,
851 struct rtw_tx_pkt_info *pkt_info,
852 struct sk_buff *skb,
853 enum rtw_tx_queue_type queue)
854 {
855 int ret;
856
857 rtw_sdio_tx_skb_prepare(rtwdev, pkt_info, skb, queue);
858
859 ret = rtw_sdio_write_port(rtwdev, skb, queue);
860 dev_kfree_skb_any(skb);
861
862 return ret;
863 }
864
rtw_sdio_write_data_rsvd_page(struct rtw_dev * rtwdev,u8 * buf,u32 size)865 static int rtw_sdio_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
866 u32 size)
867 {
868 struct rtw_tx_pkt_info pkt_info = {};
869 struct sk_buff *skb;
870
871 skb = rtw_tx_write_data_rsvd_page_get(rtwdev, &pkt_info, buf, size);
872 if (!skb)
873 return -ENOMEM;
874
875 return rtw_sdio_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
876 }
877
rtw_sdio_write_data_h2c(struct rtw_dev * rtwdev,u8 * buf,u32 size)878 static int rtw_sdio_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
879 {
880 struct rtw_tx_pkt_info pkt_info = {};
881 struct sk_buff *skb;
882
883 skb = rtw_tx_write_data_h2c_get(rtwdev, &pkt_info, buf, size);
884 if (!skb)
885 return -ENOMEM;
886
887 return rtw_sdio_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
888 }
889
rtw_sdio_tx_write(struct rtw_dev * rtwdev,struct rtw_tx_pkt_info * pkt_info,struct sk_buff * skb)890 static int rtw_sdio_tx_write(struct rtw_dev *rtwdev,
891 struct rtw_tx_pkt_info *pkt_info,
892 struct sk_buff *skb)
893 {
894 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
895 enum rtw_tx_queue_type queue = rtw_tx_queue_mapping(skb);
896 struct rtw_sdio_tx_data *tx_data;
897
898 rtw_sdio_tx_skb_prepare(rtwdev, pkt_info, skb, queue);
899
900 tx_data = rtw_sdio_get_tx_data(skb);
901 tx_data->sn = pkt_info->sn;
902
903 skb_queue_tail(&rtwsdio->tx_queue[queue], skb);
904
905 return 0;
906 }
907
rtw_sdio_tx_err_isr(struct rtw_dev * rtwdev)908 static void rtw_sdio_tx_err_isr(struct rtw_dev *rtwdev)
909 {
910 u32 val = rtw_read32(rtwdev, REG_TXDMA_STATUS);
911
912 rtw_write32(rtwdev, REG_TXDMA_STATUS, val);
913 }
914
rtw_sdio_rx_skb(struct rtw_dev * rtwdev,struct sk_buff * skb,u32 pkt_offset,struct rtw_rx_pkt_stat * pkt_stat,struct ieee80211_rx_status * rx_status)915 static void rtw_sdio_rx_skb(struct rtw_dev *rtwdev, struct sk_buff *skb,
916 u32 pkt_offset, struct rtw_rx_pkt_stat *pkt_stat,
917 struct ieee80211_rx_status *rx_status)
918 {
919 *IEEE80211_SKB_RXCB(skb) = *rx_status;
920
921 if (pkt_stat->is_c2h) {
922 skb_put(skb, pkt_stat->pkt_len + pkt_offset);
923 rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, skb);
924 return;
925 }
926
927 skb_put(skb, pkt_stat->pkt_len);
928 skb_reserve(skb, pkt_offset);
929
930 rtw_rx_stats(rtwdev, pkt_stat->vif, skb);
931
932 ieee80211_rx_irqsafe(rtwdev->hw, skb);
933 }
934
rtw_sdio_rxfifo_recv(struct rtw_dev * rtwdev,u32 rx_len)935 static void rtw_sdio_rxfifo_recv(struct rtw_dev *rtwdev, u32 rx_len)
936 {
937 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
938 const struct rtw_chip_info *chip = rtwdev->chip;
939 u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
940 struct ieee80211_rx_status rx_status;
941 struct rtw_rx_pkt_stat pkt_stat;
942 struct sk_buff *skb, *split_skb;
943 u32 pkt_offset, curr_pkt_len;
944 size_t bufsz;
945 u8 *rx_desc;
946 int ret;
947
948 bufsz = sdio_align_size(rtwsdio->sdio_func, rx_len);
949
950 skb = dev_alloc_skb(bufsz);
951 if (!skb)
952 return;
953
954 ret = rtw_sdio_read_port(rtwdev, skb->data, bufsz);
955 if (ret) {
956 dev_kfree_skb_any(skb);
957 return;
958 }
959
960 while (true) {
961 rx_desc = skb->data;
962 chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat,
963 &rx_status);
964 pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
965 pkt_stat.shift;
966
967 curr_pkt_len = ALIGN(pkt_offset + pkt_stat.pkt_len,
968 RTW_SDIO_DATA_PTR_ALIGN);
969
970 if ((curr_pkt_len + pkt_desc_sz) >= rx_len) {
971 /* Use the original skb (with it's adjusted offset)
972 * when processing the last (or even the only) entry to
973 * have it's memory freed automatically.
974 */
975 rtw_sdio_rx_skb(rtwdev, skb, pkt_offset, &pkt_stat,
976 &rx_status);
977 break;
978 }
979
980 split_skb = dev_alloc_skb(curr_pkt_len);
981 if (!split_skb) {
982 rtw_sdio_rx_skb(rtwdev, skb, pkt_offset, &pkt_stat,
983 &rx_status);
984 break;
985 }
986
987 skb_copy_header(split_skb, skb);
988 memcpy(split_skb->data, skb->data, curr_pkt_len);
989
990 rtw_sdio_rx_skb(rtwdev, split_skb, pkt_offset, &pkt_stat,
991 &rx_status);
992
993 /* Move to the start of the next RX descriptor */
994 skb_reserve(skb, curr_pkt_len);
995 rx_len -= curr_pkt_len;
996 }
997 }
998
rtw_sdio_rx_isr(struct rtw_dev * rtwdev)999 static void rtw_sdio_rx_isr(struct rtw_dev *rtwdev)
1000 {
1001 u32 rx_len, hisr, total_rx_bytes = 0;
1002
1003 do {
1004 if (rtw_chip_wcpu_11n(rtwdev))
1005 rx_len = rtw_read16(rtwdev, REG_SDIO_RX0_REQ_LEN);
1006 else
1007 rx_len = rtw_read32(rtwdev, REG_SDIO_RX0_REQ_LEN);
1008
1009 if (!rx_len)
1010 break;
1011
1012 rtw_sdio_rxfifo_recv(rtwdev, rx_len);
1013
1014 total_rx_bytes += rx_len;
1015
1016 if (rtw_chip_wcpu_11n(rtwdev)) {
1017 /* Stop if no more RX requests are pending, even if
1018 * rx_len could be greater than zero in the next
1019 * iteration. This is needed because the RX buffer may
1020 * already contain data while either HW or FW are not
1021 * done filling that buffer yet. Still reading the
1022 * buffer can result in packets where
1023 * rtw_rx_pkt_stat.pkt_len is zero or points beyond the
1024 * end of the buffer.
1025 */
1026 hisr = rtw_read32(rtwdev, REG_SDIO_HISR);
1027 } else {
1028 /* RTW_WCPU_11AC chips have improved hardware or
1029 * firmware and can use rx_len unconditionally.
1030 */
1031 hisr = REG_SDIO_HISR_RX_REQUEST;
1032 }
1033 } while (total_rx_bytes < SZ_64K && hisr & REG_SDIO_HISR_RX_REQUEST);
1034 }
1035
rtw_sdio_handle_interrupt(struct sdio_func * sdio_func)1036 static void rtw_sdio_handle_interrupt(struct sdio_func *sdio_func)
1037 {
1038 struct ieee80211_hw *hw = sdio_get_drvdata(sdio_func);
1039 struct rtw_sdio *rtwsdio;
1040 struct rtw_dev *rtwdev;
1041 u32 hisr;
1042
1043 rtwdev = hw->priv;
1044 rtwsdio = (struct rtw_sdio *)rtwdev->priv;
1045
1046 rtwsdio->irq_thread = current;
1047
1048 hisr = rtw_read32(rtwdev, REG_SDIO_HISR);
1049
1050 if (hisr & REG_SDIO_HISR_TXERR)
1051 rtw_sdio_tx_err_isr(rtwdev);
1052 if (hisr & REG_SDIO_HISR_RX_REQUEST) {
1053 hisr &= ~REG_SDIO_HISR_RX_REQUEST;
1054 rtw_sdio_rx_isr(rtwdev);
1055 }
1056
1057 rtw_write32(rtwdev, REG_SDIO_HISR, hisr);
1058
1059 rtwsdio->irq_thread = NULL;
1060 }
1061
rtw_sdio_suspend(struct device * dev)1062 static int __maybe_unused rtw_sdio_suspend(struct device *dev)
1063 {
1064 struct sdio_func *func = dev_to_sdio_func(dev);
1065 struct ieee80211_hw *hw = dev_get_drvdata(dev);
1066 struct rtw_dev *rtwdev = hw->priv;
1067 int ret;
1068
1069 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
1070 if (ret)
1071 rtw_err(rtwdev, "Failed to host PM flag MMC_PM_KEEP_POWER");
1072
1073 return ret;
1074 }
1075
rtw_sdio_resume(struct device * dev)1076 static int __maybe_unused rtw_sdio_resume(struct device *dev)
1077 {
1078 return 0;
1079 }
1080
1081 SIMPLE_DEV_PM_OPS(rtw_sdio_pm_ops, rtw_sdio_suspend, rtw_sdio_resume);
1082 EXPORT_SYMBOL(rtw_sdio_pm_ops);
1083
rtw_sdio_claim(struct rtw_dev * rtwdev,struct sdio_func * sdio_func)1084 static int rtw_sdio_claim(struct rtw_dev *rtwdev, struct sdio_func *sdio_func)
1085 {
1086 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
1087 int ret;
1088
1089 sdio_claim_host(sdio_func);
1090
1091 ret = sdio_enable_func(sdio_func);
1092 if (ret) {
1093 rtw_err(rtwdev, "Failed to enable SDIO func");
1094 goto err_release_host;
1095 }
1096
1097 ret = sdio_set_block_size(sdio_func, RTW_SDIO_BLOCK_SIZE);
1098 if (ret) {
1099 rtw_err(rtwdev, "Failed to set SDIO block size to 512");
1100 goto err_disable_func;
1101 }
1102
1103 rtwsdio->sdio_func = sdio_func;
1104
1105 rtwsdio->sdio3_bus_mode = mmc_card_uhs(sdio_func->card);
1106
1107 sdio_set_drvdata(sdio_func, rtwdev->hw);
1108 SET_IEEE80211_DEV(rtwdev->hw, &sdio_func->dev);
1109
1110 sdio_release_host(sdio_func);
1111
1112 return 0;
1113
1114 err_disable_func:
1115 sdio_disable_func(sdio_func);
1116 err_release_host:
1117 sdio_release_host(sdio_func);
1118 return ret;
1119 }
1120
rtw_sdio_declaim(struct rtw_dev * rtwdev,struct sdio_func * sdio_func)1121 static void rtw_sdio_declaim(struct rtw_dev *rtwdev,
1122 struct sdio_func *sdio_func)
1123 {
1124 sdio_claim_host(sdio_func);
1125 sdio_disable_func(sdio_func);
1126 sdio_release_host(sdio_func);
1127 }
1128
1129 static struct rtw_hci_ops rtw_sdio_ops = {
1130 .tx_write = rtw_sdio_tx_write,
1131 .tx_kick_off = rtw_sdio_tx_kick_off,
1132 .setup = rtw_sdio_setup,
1133 .start = rtw_sdio_start,
1134 .stop = rtw_sdio_stop,
1135 .deep_ps = rtw_sdio_deep_ps,
1136 .link_ps = rtw_sdio_link_ps,
1137 .interface_cfg = rtw_sdio_interface_cfg,
1138
1139 .read8 = rtw_sdio_read8,
1140 .read16 = rtw_sdio_read16,
1141 .read32 = rtw_sdio_read32,
1142 .write8 = rtw_sdio_write8,
1143 .write16 = rtw_sdio_write16,
1144 .write32 = rtw_sdio_write32,
1145 .write_data_rsvd_page = rtw_sdio_write_data_rsvd_page,
1146 .write_data_h2c = rtw_sdio_write_data_h2c,
1147 };
1148
rtw_sdio_request_irq(struct rtw_dev * rtwdev,struct sdio_func * sdio_func)1149 static int rtw_sdio_request_irq(struct rtw_dev *rtwdev,
1150 struct sdio_func *sdio_func)
1151 {
1152 int ret;
1153
1154 sdio_claim_host(sdio_func);
1155 ret = sdio_claim_irq(sdio_func, &rtw_sdio_handle_interrupt);
1156 sdio_release_host(sdio_func);
1157
1158 if (ret) {
1159 rtw_err(rtwdev, "failed to claim SDIO IRQ");
1160 return ret;
1161 }
1162
1163 return 0;
1164 }
1165
rtw_sdio_indicate_tx_status(struct rtw_dev * rtwdev,struct sk_buff * skb)1166 static void rtw_sdio_indicate_tx_status(struct rtw_dev *rtwdev,
1167 struct sk_buff *skb)
1168 {
1169 struct rtw_sdio_tx_data *tx_data = rtw_sdio_get_tx_data(skb);
1170 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1171 struct ieee80211_hw *hw = rtwdev->hw;
1172
1173 /* enqueue to wait for tx report */
1174 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
1175 rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
1176 return;
1177 }
1178
1179 /* always ACK for others, then they won't be marked as drop */
1180 ieee80211_tx_info_clear_status(info);
1181 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1182 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
1183 else
1184 info->flags |= IEEE80211_TX_STAT_ACK;
1185
1186 ieee80211_tx_status_irqsafe(hw, skb);
1187 }
1188
rtw_sdio_process_tx_queue(struct rtw_dev * rtwdev,enum rtw_tx_queue_type queue)1189 static void rtw_sdio_process_tx_queue(struct rtw_dev *rtwdev,
1190 enum rtw_tx_queue_type queue)
1191 {
1192 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
1193 struct sk_buff *skb;
1194 int ret;
1195
1196 skb = skb_dequeue(&rtwsdio->tx_queue[queue]);
1197 if (!skb)
1198 return;
1199
1200 ret = rtw_sdio_write_port(rtwdev, skb, queue);
1201 if (ret) {
1202 skb_queue_head(&rtwsdio->tx_queue[queue], skb);
1203 return;
1204 }
1205
1206 if (queue <= RTW_TX_QUEUE_VO)
1207 rtw_sdio_indicate_tx_status(rtwdev, skb);
1208 else
1209 dev_kfree_skb_any(skb);
1210 }
1211
rtw_sdio_tx_handler(struct work_struct * work)1212 static void rtw_sdio_tx_handler(struct work_struct *work)
1213 {
1214 struct rtw_sdio_work_data *work_data =
1215 container_of(work, struct rtw_sdio_work_data, work);
1216 struct rtw_sdio *rtwsdio;
1217 struct rtw_dev *rtwdev;
1218 int limit, queue;
1219
1220 rtwdev = work_data->rtwdev;
1221 rtwsdio = (struct rtw_sdio *)rtwdev->priv;
1222
1223 if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))
1224 rtw_sdio_deep_ps_leave(rtwdev);
1225
1226 for (queue = RTK_MAX_TX_QUEUE_NUM - 1; queue >= 0; queue--) {
1227 for (limit = 0; limit < 1000; limit++) {
1228 rtw_sdio_process_tx_queue(rtwdev, queue);
1229
1230 if (skb_queue_empty(&rtwsdio->tx_queue[queue]))
1231 break;
1232 }
1233 }
1234 }
1235
rtw_sdio_free_irq(struct rtw_dev * rtwdev,struct sdio_func * sdio_func)1236 static void rtw_sdio_free_irq(struct rtw_dev *rtwdev,
1237 struct sdio_func *sdio_func)
1238 {
1239 sdio_claim_host(sdio_func);
1240 sdio_release_irq(sdio_func);
1241 sdio_release_host(sdio_func);
1242 }
1243
rtw_sdio_init_tx(struct rtw_dev * rtwdev)1244 static int rtw_sdio_init_tx(struct rtw_dev *rtwdev)
1245 {
1246 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
1247 int i;
1248
1249 rtwsdio->txwq = create_singlethread_workqueue("rtw88_sdio: tx wq");
1250 if (!rtwsdio->txwq) {
1251 rtw_err(rtwdev, "failed to create TX work queue\n");
1252 return -ENOMEM;
1253 }
1254
1255 for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++)
1256 skb_queue_head_init(&rtwsdio->tx_queue[i]);
1257 rtwsdio->tx_handler_data = kmalloc(sizeof(*rtwsdio->tx_handler_data),
1258 GFP_KERNEL);
1259 if (!rtwsdio->tx_handler_data)
1260 goto err_destroy_wq;
1261
1262 rtwsdio->tx_handler_data->rtwdev = rtwdev;
1263 INIT_WORK(&rtwsdio->tx_handler_data->work, rtw_sdio_tx_handler);
1264
1265 return 0;
1266
1267 err_destroy_wq:
1268 destroy_workqueue(rtwsdio->txwq);
1269 return -ENOMEM;
1270 }
1271
rtw_sdio_deinit_tx(struct rtw_dev * rtwdev)1272 static void rtw_sdio_deinit_tx(struct rtw_dev *rtwdev)
1273 {
1274 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
1275 int i;
1276
1277 for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++)
1278 skb_queue_purge(&rtwsdio->tx_queue[i]);
1279
1280 flush_workqueue(rtwsdio->txwq);
1281 destroy_workqueue(rtwsdio->txwq);
1282 kfree(rtwsdio->tx_handler_data);
1283 }
1284
rtw_sdio_probe(struct sdio_func * sdio_func,const struct sdio_device_id * id)1285 int rtw_sdio_probe(struct sdio_func *sdio_func,
1286 const struct sdio_device_id *id)
1287 {
1288 struct ieee80211_hw *hw;
1289 struct rtw_dev *rtwdev;
1290 int drv_data_size;
1291 int ret;
1292
1293 drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_sdio);
1294 hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops);
1295 if (!hw) {
1296 dev_err(&sdio_func->dev, "failed to allocate hw");
1297 return -ENOMEM;
1298 }
1299
1300 rtwdev = hw->priv;
1301 rtwdev->hw = hw;
1302 rtwdev->dev = &sdio_func->dev;
1303 rtwdev->chip = (struct rtw_chip_info *)id->driver_data;
1304 rtwdev->hci.ops = &rtw_sdio_ops;
1305 rtwdev->hci.type = RTW_HCI_TYPE_SDIO;
1306
1307 ret = rtw_core_init(rtwdev);
1308 if (ret)
1309 goto err_release_hw;
1310
1311 rtw_dbg(rtwdev, RTW_DBG_SDIO,
1312 "rtw88 SDIO probe: vendor=0x%04x device=%04x class=%02x",
1313 id->vendor, id->device, id->class);
1314
1315 ret = rtw_sdio_claim(rtwdev, sdio_func);
1316 if (ret) {
1317 rtw_err(rtwdev, "failed to claim SDIO device");
1318 goto err_deinit_core;
1319 }
1320
1321 rtw_sdio_init(rtwdev);
1322
1323 ret = rtw_sdio_init_tx(rtwdev);
1324 if (ret) {
1325 rtw_err(rtwdev, "failed to init SDIO TX queue\n");
1326 goto err_sdio_declaim;
1327 }
1328
1329 ret = rtw_chip_info_setup(rtwdev);
1330 if (ret) {
1331 rtw_err(rtwdev, "failed to setup chip information");
1332 goto err_destroy_txwq;
1333 }
1334
1335 ret = rtw_sdio_request_irq(rtwdev, sdio_func);
1336 if (ret)
1337 goto err_destroy_txwq;
1338
1339 ret = rtw_register_hw(rtwdev, hw);
1340 if (ret) {
1341 rtw_err(rtwdev, "failed to register hw");
1342 goto err_free_irq;
1343 }
1344
1345 return 0;
1346
1347 err_free_irq:
1348 rtw_sdio_free_irq(rtwdev, sdio_func);
1349 err_destroy_txwq:
1350 rtw_sdio_deinit_tx(rtwdev);
1351 err_sdio_declaim:
1352 rtw_sdio_declaim(rtwdev, sdio_func);
1353 err_deinit_core:
1354 rtw_core_deinit(rtwdev);
1355 err_release_hw:
1356 ieee80211_free_hw(hw);
1357
1358 return ret;
1359 }
1360 EXPORT_SYMBOL(rtw_sdio_probe);
1361
rtw_sdio_remove(struct sdio_func * sdio_func)1362 void rtw_sdio_remove(struct sdio_func *sdio_func)
1363 {
1364 struct ieee80211_hw *hw = sdio_get_drvdata(sdio_func);
1365 struct rtw_dev *rtwdev;
1366
1367 if (!hw)
1368 return;
1369
1370 rtwdev = hw->priv;
1371
1372 rtw_unregister_hw(rtwdev, hw);
1373 rtw_sdio_disable_interrupt(rtwdev);
1374 rtw_sdio_free_irq(rtwdev, sdio_func);
1375 rtw_sdio_declaim(rtwdev, sdio_func);
1376 rtw_sdio_deinit_tx(rtwdev);
1377 rtw_core_deinit(rtwdev);
1378 ieee80211_free_hw(hw);
1379 }
1380 EXPORT_SYMBOL(rtw_sdio_remove);
1381
rtw_sdio_shutdown(struct device * dev)1382 void rtw_sdio_shutdown(struct device *dev)
1383 {
1384 struct sdio_func *sdio_func = dev_to_sdio_func(dev);
1385 const struct rtw_chip_info *chip;
1386 struct ieee80211_hw *hw;
1387 struct rtw_dev *rtwdev;
1388
1389 hw = sdio_get_drvdata(sdio_func);
1390 if (!hw)
1391 return;
1392
1393 rtwdev = hw->priv;
1394 chip = rtwdev->chip;
1395
1396 if (chip->ops->shutdown)
1397 chip->ops->shutdown(rtwdev);
1398 }
1399 EXPORT_SYMBOL(rtw_sdio_shutdown);
1400
1401 MODULE_AUTHOR("Martin Blumenstingl");
1402 MODULE_AUTHOR("Jernej Skrabec");
1403 MODULE_DESCRIPTION("Realtek 802.11ac wireless SDIO driver");
1404 MODULE_LICENSE("Dual BSD/GPL");
1405