1 /******************************************************************************
2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
6 * Copyright(c) 2018 Intel Corporation
7 *
8 * Portions of this file are derived from the ipw3945 project, as well
9 * as portions of the ieee80211 subsystem header files.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 * You should have received a copy of the GNU General Public License along with
21 * this program.
22 *
23 * The full GNU General Public License is included in this distribution in the
24 * file called LICENSE.
25 *
26 * Contact Information:
27 * Intel Linux Wireless <linuxwifi@intel.com>
28 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
29 *
30 *****************************************************************************/
31 #include <linux/sched.h>
32 #include <linux/wait.h>
33 #include <linux/gfp.h>
34
35 #include "iwl-prph.h"
36 #include "iwl-io.h"
37 #include "internal.h"
38 #include "iwl-op-mode.h"
39 #include "iwl-context-info-gen3.h"
40
41 /******************************************************************************
42 *
43 * RX path functions
44 *
45 ******************************************************************************/
46
47 /*
48 * Rx theory of operation
49 *
50 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
51 * each of which point to Receive Buffers to be filled by the NIC. These get
52 * used not only for Rx frames, but for any command response or notification
53 * from the NIC. The driver and NIC manage the Rx buffers by means
54 * of indexes into the circular buffer.
55 *
56 * Rx Queue Indexes
57 * The host/firmware share two index registers for managing the Rx buffers.
58 *
59 * The READ index maps to the first position that the firmware may be writing
60 * to -- the driver can read up to (but not including) this position and get
61 * good data.
62 * The READ index is managed by the firmware once the card is enabled.
63 *
64 * The WRITE index maps to the last position the driver has read from -- the
65 * position preceding WRITE is the last slot the firmware can place a packet.
66 *
67 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
68 * WRITE = READ.
69 *
70 * During initialization, the host sets up the READ queue position to the first
71 * INDEX position, and WRITE to the last (READ - 1 wrapped)
72 *
73 * When the firmware places a packet in a buffer, it will advance the READ index
74 * and fire the RX interrupt. The driver can then query the READ index and
75 * process as many packets as possible, moving the WRITE index forward as it
76 * resets the Rx queue buffers with new memory.
77 *
78 * The management in the driver is as follows:
79 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
80 * When the interrupt handler is called, the request is processed.
81 * The page is either stolen - transferred to the upper layer
82 * or reused - added immediately to the iwl->rxq->rx_free list.
83 * + When the page is stolen - the driver updates the matching queue's used
84 * count, detaches the RBD and transfers it to the queue used list.
85 * When there are two used RBDs - they are transferred to the allocator empty
86 * list. Work is then scheduled for the allocator to start allocating
87 * eight buffers.
88 * When there are another 6 used RBDs - they are transferred to the allocator
89 * empty list and the driver tries to claim the pre-allocated buffers and
90 * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
91 * until ready.
92 * When there are 8+ buffers in the free list - either from allocation or from
93 * 8 reused unstolen pages - restock is called to update the FW and indexes.
94 * + In order to make sure the allocator always has RBDs to use for allocation
95 * the allocator has initial pool in the size of num_queues*(8-2) - the
96 * maximum missing RBDs per allocation request (request posted with 2
97 * empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
98 * The queues supplies the recycle of the rest of the RBDs.
99 * + A received packet is processed and handed to the kernel network stack,
100 * detached from the iwl->rxq. The driver 'processed' index is updated.
101 * + If there are no allocated buffers in iwl->rxq->rx_free,
102 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
103 * If there were enough free buffers and RX_STALLED is set it is cleared.
104 *
105 *
106 * Driver sequence:
107 *
108 * iwl_rxq_alloc() Allocates rx_free
109 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
110 * iwl_pcie_rxq_restock.
111 * Used only during initialization.
112 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
113 * queue, updates firmware pointers, and updates
114 * the WRITE index.
115 * iwl_pcie_rx_allocator() Background work for allocating pages.
116 *
117 * -- enable interrupts --
118 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
119 * READ INDEX, detaching the SKB from the pool.
120 * Moves the packet buffer from queue to rx_used.
121 * Posts and claims requests to the allocator.
122 * Calls iwl_pcie_rxq_restock to refill any empty
123 * slots.
124 *
125 * RBD life-cycle:
126 *
127 * Init:
128 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
129 *
130 * Regular Receive interrupt:
131 * Page Stolen:
132 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
133 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
134 * Page not Stolen:
135 * rxq.queue -> rxq.rx_free -> rxq.queue
136 * ...
137 *
138 */
139
140 /*
141 * iwl_rxq_space - Return number of free slots available in queue.
142 */
iwl_rxq_space(const struct iwl_rxq * rxq)143 static int iwl_rxq_space(const struct iwl_rxq *rxq)
144 {
145 /* Make sure rx queue size is a power of 2 */
146 WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
147
148 /*
149 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
150 * between empty and completely full queues.
151 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
152 * defined for negative dividends.
153 */
154 return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
155 }
156
157 /*
158 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
159 */
iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)160 static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
161 {
162 return cpu_to_le32((u32)(dma_addr >> 8));
163 }
164
165 /*
166 * iwl_pcie_rx_stop - stops the Rx DMA
167 */
iwl_pcie_rx_stop(struct iwl_trans * trans)168 int iwl_pcie_rx_stop(struct iwl_trans *trans)
169 {
170 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
171 /* TODO: remove this for 22560 once fw does it */
172 iwl_write_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
173 return iwl_poll_prph_bit(trans, RFH_GEN_STATUS_GEN3,
174 RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
175 } else if (trans->cfg->mq_rx_supported) {
176 iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
177 return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
178 RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
179 } else {
180 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
181 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
182 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
183 1000);
184 }
185 }
186
187 /*
188 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
189 */
iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans * trans,struct iwl_rxq * rxq)190 static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
191 struct iwl_rxq *rxq)
192 {
193 u32 reg;
194
195 lockdep_assert_held(&rxq->lock);
196
197 /*
198 * explicitly wake up the NIC if:
199 * 1. shadow registers aren't enabled
200 * 2. there is a chance that the NIC is asleep
201 */
202 if (!trans->cfg->base_params->shadow_reg_enable &&
203 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
204 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
205
206 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
207 IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
208 reg);
209 iwl_set_bit(trans, CSR_GP_CNTRL,
210 BIT(trans->cfg->csr->flag_mac_access_req));
211 rxq->need_update = true;
212 return;
213 }
214 }
215
216 rxq->write_actual = round_down(rxq->write, 8);
217 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
218 iwl_write32(trans, HBUS_TARG_WRPTR,
219 (rxq->write_actual |
220 ((FIRST_RX_QUEUE + rxq->id) << 16)));
221 else if (trans->cfg->mq_rx_supported)
222 iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
223 rxq->write_actual);
224 else
225 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
226 }
227
iwl_pcie_rxq_check_wrptr(struct iwl_trans * trans)228 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
229 {
230 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
231 int i;
232
233 for (i = 0; i < trans->num_rx_queues; i++) {
234 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
235
236 if (!rxq->need_update)
237 continue;
238 spin_lock(&rxq->lock);
239 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
240 rxq->need_update = false;
241 spin_unlock(&rxq->lock);
242 }
243 }
244
iwl_pcie_restock_bd(struct iwl_trans * trans,struct iwl_rxq * rxq,struct iwl_rx_mem_buffer * rxb)245 static void iwl_pcie_restock_bd(struct iwl_trans *trans,
246 struct iwl_rxq *rxq,
247 struct iwl_rx_mem_buffer *rxb)
248 {
249 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
250 struct iwl_rx_transfer_desc *bd = rxq->bd;
251
252 bd[rxq->write].type_n_size =
253 cpu_to_le32((IWL_RX_TD_TYPE & IWL_RX_TD_TYPE_MSK) |
254 ((IWL_RX_TD_SIZE_2K >> 8) & IWL_RX_TD_SIZE_MSK));
255 bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
256 bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
257 } else {
258 __le64 *bd = rxq->bd;
259
260 bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
261 }
262 }
263
264 /*
265 * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
266 */
iwl_pcie_rxmq_restock(struct iwl_trans * trans,struct iwl_rxq * rxq)267 static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
268 struct iwl_rxq *rxq)
269 {
270 struct iwl_rx_mem_buffer *rxb;
271
272 /*
273 * If the device isn't enabled - no need to try to add buffers...
274 * This can happen when we stop the device and still have an interrupt
275 * pending. We stop the APM before we sync the interrupts because we
276 * have to (see comment there). On the other hand, since the APM is
277 * stopped, we cannot access the HW (in particular not prph).
278 * So don't try to restock if the APM has been already stopped.
279 */
280 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
281 return;
282
283 spin_lock(&rxq->lock);
284 while (rxq->free_count) {
285 /* Get next free Rx buffer, remove from free list */
286 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
287 list);
288 list_del(&rxb->list);
289 rxb->invalid = false;
290 /* 12 first bits are expected to be empty */
291 WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
292 /* Point to Rx buffer via next RBD in circular buffer */
293 iwl_pcie_restock_bd(trans, rxq, rxb);
294 rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
295 rxq->free_count--;
296 }
297 spin_unlock(&rxq->lock);
298
299 /*
300 * If we've added more space for the firmware to place data, tell it.
301 * Increment device's write pointer in multiples of 8.
302 */
303 if (rxq->write_actual != (rxq->write & ~0x7)) {
304 spin_lock(&rxq->lock);
305 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
306 spin_unlock(&rxq->lock);
307 }
308 }
309
310 /*
311 * iwl_pcie_rxsq_restock - restock implementation for single queue rx
312 */
iwl_pcie_rxsq_restock(struct iwl_trans * trans,struct iwl_rxq * rxq)313 static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
314 struct iwl_rxq *rxq)
315 {
316 struct iwl_rx_mem_buffer *rxb;
317
318 /*
319 * If the device isn't enabled - not need to try to add buffers...
320 * This can happen when we stop the device and still have an interrupt
321 * pending. We stop the APM before we sync the interrupts because we
322 * have to (see comment there). On the other hand, since the APM is
323 * stopped, we cannot access the HW (in particular not prph).
324 * So don't try to restock if the APM has been already stopped.
325 */
326 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
327 return;
328
329 spin_lock(&rxq->lock);
330 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
331 __le32 *bd = (__le32 *)rxq->bd;
332 /* The overwritten rxb must be a used one */
333 rxb = rxq->queue[rxq->write];
334 BUG_ON(rxb && rxb->page);
335
336 /* Get next free Rx buffer, remove from free list */
337 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
338 list);
339 list_del(&rxb->list);
340 rxb->invalid = false;
341
342 /* Point to Rx buffer via next RBD in circular buffer */
343 bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
344 rxq->queue[rxq->write] = rxb;
345 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
346 rxq->free_count--;
347 }
348 spin_unlock(&rxq->lock);
349
350 /* If we've added more space for the firmware to place data, tell it.
351 * Increment device's write pointer in multiples of 8. */
352 if (rxq->write_actual != (rxq->write & ~0x7)) {
353 spin_lock(&rxq->lock);
354 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
355 spin_unlock(&rxq->lock);
356 }
357 }
358
359 /*
360 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
361 *
362 * If there are slots in the RX queue that need to be restocked,
363 * and we have free pre-allocated buffers, fill the ranks as much
364 * as we can, pulling from rx_free.
365 *
366 * This moves the 'write' index forward to catch up with 'processed', and
367 * also updates the memory address in the firmware to reference the new
368 * target buffer.
369 */
370 static
iwl_pcie_rxq_restock(struct iwl_trans * trans,struct iwl_rxq * rxq)371 void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
372 {
373 if (trans->cfg->mq_rx_supported)
374 iwl_pcie_rxmq_restock(trans, rxq);
375 else
376 iwl_pcie_rxsq_restock(trans, rxq);
377 }
378
379 /*
380 * iwl_pcie_rx_alloc_page - allocates and returns a page.
381 *
382 */
iwl_pcie_rx_alloc_page(struct iwl_trans * trans,gfp_t priority)383 static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
384 gfp_t priority)
385 {
386 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
387 struct page *page;
388 gfp_t gfp_mask = priority;
389
390 if (trans_pcie->rx_page_order > 0)
391 gfp_mask |= __GFP_COMP;
392
393 /* Alloc a new receive buffer */
394 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
395 if (!page) {
396 if (net_ratelimit())
397 IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
398 trans_pcie->rx_page_order);
399 /*
400 * Issue an error if we don't have enough pre-allocated
401 * buffers.
402 ` */
403 if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
404 IWL_CRIT(trans,
405 "Failed to alloc_pages\n");
406 return NULL;
407 }
408 return page;
409 }
410
411 /*
412 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
413 *
414 * A used RBD is an Rx buffer that has been given to the stack. To use it again
415 * a page must be allocated and the RBD must point to the page. This function
416 * doesn't change the HW pointer but handles the list of pages that is used by
417 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
418 * allocated buffers.
419 */
iwl_pcie_rxq_alloc_rbs(struct iwl_trans * trans,gfp_t priority,struct iwl_rxq * rxq)420 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
421 struct iwl_rxq *rxq)
422 {
423 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
424 struct iwl_rx_mem_buffer *rxb;
425 struct page *page;
426
427 while (1) {
428 spin_lock(&rxq->lock);
429 if (list_empty(&rxq->rx_used)) {
430 spin_unlock(&rxq->lock);
431 return;
432 }
433 spin_unlock(&rxq->lock);
434
435 /* Alloc a new receive buffer */
436 page = iwl_pcie_rx_alloc_page(trans, priority);
437 if (!page)
438 return;
439
440 spin_lock(&rxq->lock);
441
442 if (list_empty(&rxq->rx_used)) {
443 spin_unlock(&rxq->lock);
444 __free_pages(page, trans_pcie->rx_page_order);
445 return;
446 }
447 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
448 list);
449 list_del(&rxb->list);
450 spin_unlock(&rxq->lock);
451
452 BUG_ON(rxb->page);
453 rxb->page = page;
454 /* Get physical address of the RB */
455 rxb->page_dma =
456 dma_map_page(trans->dev, page, 0,
457 PAGE_SIZE << trans_pcie->rx_page_order,
458 DMA_FROM_DEVICE);
459 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
460 rxb->page = NULL;
461 spin_lock(&rxq->lock);
462 list_add(&rxb->list, &rxq->rx_used);
463 spin_unlock(&rxq->lock);
464 __free_pages(page, trans_pcie->rx_page_order);
465 return;
466 }
467
468 spin_lock(&rxq->lock);
469
470 list_add_tail(&rxb->list, &rxq->rx_free);
471 rxq->free_count++;
472
473 spin_unlock(&rxq->lock);
474 }
475 }
476
iwl_pcie_free_rbs_pool(struct iwl_trans * trans)477 void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
478 {
479 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
480 int i;
481
482 for (i = 0; i < RX_POOL_SIZE; i++) {
483 if (!trans_pcie->rx_pool[i].page)
484 continue;
485 dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
486 PAGE_SIZE << trans_pcie->rx_page_order,
487 DMA_FROM_DEVICE);
488 __free_pages(trans_pcie->rx_pool[i].page,
489 trans_pcie->rx_page_order);
490 trans_pcie->rx_pool[i].page = NULL;
491 }
492 }
493
494 /*
495 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
496 *
497 * Allocates for each received request 8 pages
498 * Called as a scheduled work item.
499 */
iwl_pcie_rx_allocator(struct iwl_trans * trans)500 static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
501 {
502 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
503 struct iwl_rb_allocator *rba = &trans_pcie->rba;
504 struct list_head local_empty;
505 int pending = atomic_xchg(&rba->req_pending, 0);
506
507 IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
508
509 /* If we were scheduled - there is at least one request */
510 spin_lock(&rba->lock);
511 /* swap out the rba->rbd_empty to a local list */
512 list_replace_init(&rba->rbd_empty, &local_empty);
513 spin_unlock(&rba->lock);
514
515 while (pending) {
516 int i;
517 LIST_HEAD(local_allocated);
518 gfp_t gfp_mask = GFP_KERNEL;
519
520 /* Do not post a warning if there are only a few requests */
521 if (pending < RX_PENDING_WATERMARK)
522 gfp_mask |= __GFP_NOWARN;
523
524 for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
525 struct iwl_rx_mem_buffer *rxb;
526 struct page *page;
527
528 /* List should never be empty - each reused RBD is
529 * returned to the list, and initial pool covers any
530 * possible gap between the time the page is allocated
531 * to the time the RBD is added.
532 */
533 BUG_ON(list_empty(&local_empty));
534 /* Get the first rxb from the rbd list */
535 rxb = list_first_entry(&local_empty,
536 struct iwl_rx_mem_buffer, list);
537 BUG_ON(rxb->page);
538
539 /* Alloc a new receive buffer */
540 page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
541 if (!page)
542 continue;
543 rxb->page = page;
544
545 /* Get physical address of the RB */
546 rxb->page_dma = dma_map_page(trans->dev, page, 0,
547 PAGE_SIZE << trans_pcie->rx_page_order,
548 DMA_FROM_DEVICE);
549 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
550 rxb->page = NULL;
551 __free_pages(page, trans_pcie->rx_page_order);
552 continue;
553 }
554
555 /* move the allocated entry to the out list */
556 list_move(&rxb->list, &local_allocated);
557 i++;
558 }
559
560 pending--;
561 if (!pending) {
562 pending = atomic_xchg(&rba->req_pending, 0);
563 IWL_DEBUG_RX(trans,
564 "Pending allocation requests = %d\n",
565 pending);
566 }
567
568 spin_lock(&rba->lock);
569 /* add the allocated rbds to the allocator allocated list */
570 list_splice_tail(&local_allocated, &rba->rbd_allocated);
571 /* get more empty RBDs for current pending requests */
572 list_splice_tail_init(&rba->rbd_empty, &local_empty);
573 spin_unlock(&rba->lock);
574
575 atomic_inc(&rba->req_ready);
576 }
577
578 spin_lock(&rba->lock);
579 /* return unused rbds to the allocator empty list */
580 list_splice_tail(&local_empty, &rba->rbd_empty);
581 spin_unlock(&rba->lock);
582 }
583
584 /*
585 * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
586 .*
587 .* Called by queue when the queue posted allocation request and
588 * has freed 8 RBDs in order to restock itself.
589 * This function directly moves the allocated RBs to the queue's ownership
590 * and updates the relevant counters.
591 */
iwl_pcie_rx_allocator_get(struct iwl_trans * trans,struct iwl_rxq * rxq)592 static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
593 struct iwl_rxq *rxq)
594 {
595 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
596 struct iwl_rb_allocator *rba = &trans_pcie->rba;
597 int i;
598
599 lockdep_assert_held(&rxq->lock);
600
601 /*
602 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
603 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
604 * function will return early, as there are no ready requests.
605 * atomic_dec_if_positive will perofrm the *actual* decrement only if
606 * req_ready > 0, i.e. - there are ready requests and the function
607 * hands one request to the caller.
608 */
609 if (atomic_dec_if_positive(&rba->req_ready) < 0)
610 return;
611
612 spin_lock(&rba->lock);
613 for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
614 /* Get next free Rx buffer, remove it from free list */
615 struct iwl_rx_mem_buffer *rxb =
616 list_first_entry(&rba->rbd_allocated,
617 struct iwl_rx_mem_buffer, list);
618
619 list_move(&rxb->list, &rxq->rx_free);
620 }
621 spin_unlock(&rba->lock);
622
623 rxq->used_count -= RX_CLAIM_REQ_ALLOC;
624 rxq->free_count += RX_CLAIM_REQ_ALLOC;
625 }
626
iwl_pcie_rx_allocator_work(struct work_struct * data)627 void iwl_pcie_rx_allocator_work(struct work_struct *data)
628 {
629 struct iwl_rb_allocator *rba_p =
630 container_of(data, struct iwl_rb_allocator, rx_alloc);
631 struct iwl_trans_pcie *trans_pcie =
632 container_of(rba_p, struct iwl_trans_pcie, rba);
633
634 iwl_pcie_rx_allocator(trans_pcie->trans);
635 }
636
iwl_pcie_free_bd_size(struct iwl_trans * trans,bool use_rx_td)637 static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
638 {
639 struct iwl_rx_transfer_desc *rx_td;
640
641 if (use_rx_td)
642 return sizeof(*rx_td);
643 else
644 return trans->cfg->mq_rx_supported ? sizeof(__le64) :
645 sizeof(__le32);
646 }
647
iwl_pcie_free_rxq_dma(struct iwl_trans * trans,struct iwl_rxq * rxq)648 static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
649 struct iwl_rxq *rxq)
650 {
651 struct device *dev = trans->dev;
652 bool use_rx_td = (trans->cfg->device_family >=
653 IWL_DEVICE_FAMILY_22560);
654 int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
655
656 if (rxq->bd)
657 dma_free_coherent(trans->dev,
658 free_size * rxq->queue_size,
659 rxq->bd, rxq->bd_dma);
660 rxq->bd_dma = 0;
661 rxq->bd = NULL;
662
663 if (rxq->rb_stts)
664 dma_free_coherent(trans->dev,
665 use_rx_td ? sizeof(__le16) :
666 sizeof(struct iwl_rb_status),
667 rxq->rb_stts, rxq->rb_stts_dma);
668 rxq->rb_stts_dma = 0;
669 rxq->rb_stts = NULL;
670
671 if (rxq->used_bd)
672 dma_free_coherent(trans->dev,
673 (use_rx_td ? sizeof(*rxq->cd) :
674 sizeof(__le32)) * rxq->queue_size,
675 rxq->used_bd, rxq->used_bd_dma);
676 rxq->used_bd_dma = 0;
677 rxq->used_bd = NULL;
678
679 if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
680 return;
681
682 if (rxq->tr_tail)
683 dma_free_coherent(dev, sizeof(__le16),
684 rxq->tr_tail, rxq->tr_tail_dma);
685 rxq->tr_tail_dma = 0;
686 rxq->tr_tail = NULL;
687
688 if (rxq->cr_tail)
689 dma_free_coherent(dev, sizeof(__le16),
690 rxq->cr_tail, rxq->cr_tail_dma);
691 rxq->cr_tail_dma = 0;
692 rxq->cr_tail = NULL;
693 }
694
iwl_pcie_alloc_rxq_dma(struct iwl_trans * trans,struct iwl_rxq * rxq)695 static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
696 struct iwl_rxq *rxq)
697 {
698 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
699 struct device *dev = trans->dev;
700 int i;
701 int free_size;
702 bool use_rx_td = (trans->cfg->device_family >=
703 IWL_DEVICE_FAMILY_22560);
704
705 spin_lock_init(&rxq->lock);
706 if (trans->cfg->mq_rx_supported)
707 rxq->queue_size = MQ_RX_TABLE_SIZE;
708 else
709 rxq->queue_size = RX_QUEUE_SIZE;
710
711 free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
712
713 /*
714 * Allocate the circular buffer of Read Buffer Descriptors
715 * (RBDs)
716 */
717 rxq->bd = dma_zalloc_coherent(dev,
718 free_size * rxq->queue_size,
719 &rxq->bd_dma, GFP_KERNEL);
720 if (!rxq->bd)
721 goto err;
722
723 if (trans->cfg->mq_rx_supported) {
724 rxq->used_bd = dma_zalloc_coherent(dev,
725 (use_rx_td ?
726 sizeof(*rxq->cd) :
727 sizeof(__le32)) *
728 rxq->queue_size,
729 &rxq->used_bd_dma,
730 GFP_KERNEL);
731 if (!rxq->used_bd)
732 goto err;
733 }
734
735 /* Allocate the driver's pointer to receive buffer status */
736 rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ?
737 sizeof(__le16) :
738 sizeof(struct iwl_rb_status),
739 &rxq->rb_stts_dma,
740 GFP_KERNEL);
741 if (!rxq->rb_stts)
742 goto err;
743
744 if (!use_rx_td)
745 return 0;
746
747 /* Allocate the driver's pointer to TR tail */
748 rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
749 &rxq->tr_tail_dma,
750 GFP_KERNEL);
751 if (!rxq->tr_tail)
752 goto err;
753
754 /* Allocate the driver's pointer to CR tail */
755 rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
756 &rxq->cr_tail_dma,
757 GFP_KERNEL);
758 if (!rxq->cr_tail)
759 goto err;
760 /*
761 * W/A 22560 device step Z0 must be non zero bug
762 * TODO: remove this when stop supporting Z0
763 */
764 *rxq->cr_tail = cpu_to_le16(500);
765
766 return 0;
767
768 err:
769 for (i = 0; i < trans->num_rx_queues; i++) {
770 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
771
772 iwl_pcie_free_rxq_dma(trans, rxq);
773 }
774 kfree(trans_pcie->rxq);
775
776 return -ENOMEM;
777 }
778
iwl_pcie_rx_alloc(struct iwl_trans * trans)779 static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
780 {
781 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
782 struct iwl_rb_allocator *rba = &trans_pcie->rba;
783 int i, ret;
784
785 if (WARN_ON(trans_pcie->rxq))
786 return -EINVAL;
787
788 trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
789 GFP_KERNEL);
790 if (!trans_pcie->rxq)
791 return -EINVAL;
792
793 spin_lock_init(&rba->lock);
794
795 for (i = 0; i < trans->num_rx_queues; i++) {
796 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
797
798 ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
799 if (ret)
800 return ret;
801 }
802 return 0;
803 }
804
iwl_pcie_rx_hw_init(struct iwl_trans * trans,struct iwl_rxq * rxq)805 static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
806 {
807 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
808 u32 rb_size;
809 unsigned long flags;
810 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
811
812 switch (trans_pcie->rx_buf_size) {
813 case IWL_AMSDU_4K:
814 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
815 break;
816 case IWL_AMSDU_8K:
817 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
818 break;
819 case IWL_AMSDU_12K:
820 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
821 break;
822 default:
823 WARN_ON(1);
824 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
825 }
826
827 if (!iwl_trans_grab_nic_access(trans, &flags))
828 return;
829
830 /* Stop Rx DMA */
831 iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
832 /* reset and flush pointers */
833 iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
834 iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
835 iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
836
837 /* Reset driver's Rx queue write index */
838 iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
839
840 /* Tell device where to find RBD circular buffer in DRAM */
841 iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
842 (u32)(rxq->bd_dma >> 8));
843
844 /* Tell device where in DRAM to update its Rx status */
845 iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
846 rxq->rb_stts_dma >> 4);
847
848 /* Enable Rx DMA
849 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
850 * the credit mechanism in 5000 HW RX FIFO
851 * Direct rx interrupts to hosts
852 * Rx buffer size 4 or 8k or 12k
853 * RB timeout 0x10
854 * 256 RBDs
855 */
856 iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
857 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
858 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
859 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
860 rb_size |
861 (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
862 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
863
864 iwl_trans_release_nic_access(trans, &flags);
865
866 /* Set interrupt coalescing timer to default (2048 usecs) */
867 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
868
869 /* W/A for interrupt coalescing bug in 7260 and 3160 */
870 if (trans->cfg->host_interrupt_operation_mode)
871 iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
872 }
873
iwl_pcie_enable_rx_wake(struct iwl_trans * trans,bool enable)874 void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable)
875 {
876 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_9000)
877 return;
878
879 if (CSR_HW_REV_STEP(trans->hw_rev) != SILICON_A_STEP)
880 return;
881
882 if (!trans->cfg->integrated)
883 return;
884
885 /*
886 * Turn on the chicken-bits that cause MAC wakeup for RX-related
887 * values.
888 * This costs some power, but needed for W/A 9000 integrated A-step
889 * bug where shadow registers are not in the retention list and their
890 * value is lost when NIC powers down
891 */
892 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
893 CSR_MAC_SHADOW_REG_CTRL_RX_WAKE);
894 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTL2,
895 CSR_MAC_SHADOW_REG_CTL2_RX_WAKE);
896 }
897
iwl_pcie_rx_mq_hw_init(struct iwl_trans * trans)898 static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
899 {
900 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
901 u32 rb_size, enabled = 0;
902 unsigned long flags;
903 int i;
904
905 switch (trans_pcie->rx_buf_size) {
906 case IWL_AMSDU_2K:
907 rb_size = RFH_RXF_DMA_RB_SIZE_2K;
908 break;
909 case IWL_AMSDU_4K:
910 rb_size = RFH_RXF_DMA_RB_SIZE_4K;
911 break;
912 case IWL_AMSDU_8K:
913 rb_size = RFH_RXF_DMA_RB_SIZE_8K;
914 break;
915 case IWL_AMSDU_12K:
916 rb_size = RFH_RXF_DMA_RB_SIZE_12K;
917 break;
918 default:
919 WARN_ON(1);
920 rb_size = RFH_RXF_DMA_RB_SIZE_4K;
921 }
922
923 if (!iwl_trans_grab_nic_access(trans, &flags))
924 return;
925
926 /* Stop Rx DMA */
927 iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
928 /* disable free amd used rx queue operation */
929 iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
930
931 for (i = 0; i < trans->num_rx_queues; i++) {
932 /* Tell device where to find RBD free table in DRAM */
933 iwl_write_prph64_no_grab(trans,
934 RFH_Q_FRBDCB_BA_LSB(i),
935 trans_pcie->rxq[i].bd_dma);
936 /* Tell device where to find RBD used table in DRAM */
937 iwl_write_prph64_no_grab(trans,
938 RFH_Q_URBDCB_BA_LSB(i),
939 trans_pcie->rxq[i].used_bd_dma);
940 /* Tell device where in DRAM to update its Rx status */
941 iwl_write_prph64_no_grab(trans,
942 RFH_Q_URBD_STTS_WPTR_LSB(i),
943 trans_pcie->rxq[i].rb_stts_dma);
944 /* Reset device indice tables */
945 iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
946 iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
947 iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
948
949 enabled |= BIT(i) | BIT(i + 16);
950 }
951
952 /*
953 * Enable Rx DMA
954 * Rx buffer size 4 or 8k or 12k
955 * Min RB size 4 or 8
956 * Drop frames that exceed RB size
957 * 512 RBDs
958 */
959 iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
960 RFH_DMA_EN_ENABLE_VAL | rb_size |
961 RFH_RXF_DMA_MIN_RB_4_8 |
962 RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
963 RFH_RXF_DMA_RBDCB_SIZE_512);
964
965 /*
966 * Activate DMA snooping.
967 * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
968 * Default queue is 0
969 */
970 iwl_write_prph_no_grab(trans, RFH_GEN_CFG,
971 RFH_GEN_CFG_RFH_DMA_SNOOP |
972 RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
973 RFH_GEN_CFG_SERVICE_DMA_SNOOP |
974 RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
975 trans->cfg->integrated ?
976 RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
977 RFH_GEN_CFG_RB_CHUNK_SIZE_128));
978 /* Enable the relevant rx queues */
979 iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
980
981 iwl_trans_release_nic_access(trans, &flags);
982
983 /* Set interrupt coalescing timer to default (2048 usecs) */
984 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
985
986 iwl_pcie_enable_rx_wake(trans, true);
987 }
988
iwl_pcie_rx_init_rxb_lists(struct iwl_rxq * rxq)989 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
990 {
991 lockdep_assert_held(&rxq->lock);
992
993 INIT_LIST_HEAD(&rxq->rx_free);
994 INIT_LIST_HEAD(&rxq->rx_used);
995 rxq->free_count = 0;
996 rxq->used_count = 0;
997 }
998
iwl_pcie_dummy_napi_poll(struct napi_struct * napi,int budget)999 int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
1000 {
1001 WARN_ON(1);
1002 return 0;
1003 }
1004
_iwl_pcie_rx_init(struct iwl_trans * trans)1005 static int _iwl_pcie_rx_init(struct iwl_trans *trans)
1006 {
1007 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1008 struct iwl_rxq *def_rxq;
1009 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1010 int i, err, queue_size, allocator_pool_size, num_alloc;
1011
1012 if (!trans_pcie->rxq) {
1013 err = iwl_pcie_rx_alloc(trans);
1014 if (err)
1015 return err;
1016 }
1017 def_rxq = trans_pcie->rxq;
1018
1019 cancel_work_sync(&rba->rx_alloc);
1020
1021 spin_lock(&rba->lock);
1022 atomic_set(&rba->req_pending, 0);
1023 atomic_set(&rba->req_ready, 0);
1024 INIT_LIST_HEAD(&rba->rbd_allocated);
1025 INIT_LIST_HEAD(&rba->rbd_empty);
1026 spin_unlock(&rba->lock);
1027
1028 /* free all first - we might be reconfigured for a different size */
1029 iwl_pcie_free_rbs_pool(trans);
1030
1031 for (i = 0; i < RX_QUEUE_SIZE; i++)
1032 def_rxq->queue[i] = NULL;
1033
1034 for (i = 0; i < trans->num_rx_queues; i++) {
1035 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1036
1037 rxq->id = i;
1038
1039 spin_lock(&rxq->lock);
1040 /*
1041 * Set read write pointer to reflect that we have processed
1042 * and used all buffers, but have not restocked the Rx queue
1043 * with fresh buffers
1044 */
1045 rxq->read = 0;
1046 rxq->write = 0;
1047 rxq->write_actual = 0;
1048 memset(rxq->rb_stts, 0,
1049 (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
1050 sizeof(__le16) : sizeof(struct iwl_rb_status));
1051
1052 iwl_pcie_rx_init_rxb_lists(rxq);
1053
1054 if (!rxq->napi.poll)
1055 netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
1056 iwl_pcie_dummy_napi_poll, 64);
1057
1058 spin_unlock(&rxq->lock);
1059 }
1060
1061 /* move the pool to the default queue and allocator ownerships */
1062 queue_size = trans->cfg->mq_rx_supported ?
1063 MQ_RX_NUM_RBDS : RX_QUEUE_SIZE;
1064 allocator_pool_size = trans->num_rx_queues *
1065 (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
1066 num_alloc = queue_size + allocator_pool_size;
1067 BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
1068 ARRAY_SIZE(trans_pcie->rx_pool));
1069 for (i = 0; i < num_alloc; i++) {
1070 struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
1071
1072 if (i < allocator_pool_size)
1073 list_add(&rxb->list, &rba->rbd_empty);
1074 else
1075 list_add(&rxb->list, &def_rxq->rx_used);
1076 trans_pcie->global_table[i] = rxb;
1077 rxb->vid = (u16)(i + 1);
1078 rxb->invalid = true;
1079 }
1080
1081 iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
1082
1083 return 0;
1084 }
1085
iwl_pcie_rx_init(struct iwl_trans * trans)1086 int iwl_pcie_rx_init(struct iwl_trans *trans)
1087 {
1088 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1089 int ret = _iwl_pcie_rx_init(trans);
1090
1091 if (ret)
1092 return ret;
1093
1094 if (trans->cfg->mq_rx_supported)
1095 iwl_pcie_rx_mq_hw_init(trans);
1096 else
1097 iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
1098
1099 iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
1100
1101 spin_lock(&trans_pcie->rxq->lock);
1102 iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
1103 spin_unlock(&trans_pcie->rxq->lock);
1104
1105 return 0;
1106 }
1107
iwl_pcie_gen2_rx_init(struct iwl_trans * trans)1108 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
1109 {
1110 /*
1111 * We don't configure the RFH.
1112 * Restock will be done at alive, after firmware configured the RFH.
1113 */
1114 return _iwl_pcie_rx_init(trans);
1115 }
1116
iwl_pcie_rx_free(struct iwl_trans * trans)1117 void iwl_pcie_rx_free(struct iwl_trans *trans)
1118 {
1119 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1120 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1121 int i;
1122
1123 /*
1124 * if rxq is NULL, it means that nothing has been allocated,
1125 * exit now
1126 */
1127 if (!trans_pcie->rxq) {
1128 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
1129 return;
1130 }
1131
1132 cancel_work_sync(&rba->rx_alloc);
1133
1134 iwl_pcie_free_rbs_pool(trans);
1135
1136 for (i = 0; i < trans->num_rx_queues; i++) {
1137 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1138
1139 iwl_pcie_free_rxq_dma(trans, rxq);
1140
1141 if (rxq->napi.poll)
1142 netif_napi_del(&rxq->napi);
1143 }
1144 kfree(trans_pcie->rxq);
1145 }
1146
1147 /*
1148 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
1149 *
1150 * Called when a RBD can be reused. The RBD is transferred to the allocator.
1151 * When there are 2 empty RBDs - a request for allocation is posted
1152 */
iwl_pcie_rx_reuse_rbd(struct iwl_trans * trans,struct iwl_rx_mem_buffer * rxb,struct iwl_rxq * rxq,bool emergency)1153 static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
1154 struct iwl_rx_mem_buffer *rxb,
1155 struct iwl_rxq *rxq, bool emergency)
1156 {
1157 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1158 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1159
1160 /* Move the RBD to the used list, will be moved to allocator in batches
1161 * before claiming or posting a request*/
1162 list_add_tail(&rxb->list, &rxq->rx_used);
1163
1164 if (unlikely(emergency))
1165 return;
1166
1167 /* Count the allocator owned RBDs */
1168 rxq->used_count++;
1169
1170 /* If we have RX_POST_REQ_ALLOC new released rx buffers -
1171 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
1172 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
1173 * after but we still need to post another request.
1174 */
1175 if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
1176 /* Move the 2 RBDs to the allocator ownership.
1177 Allocator has another 6 from pool for the request completion*/
1178 spin_lock(&rba->lock);
1179 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
1180 spin_unlock(&rba->lock);
1181
1182 atomic_inc(&rba->req_pending);
1183 queue_work(rba->alloc_wq, &rba->rx_alloc);
1184 }
1185 }
1186
iwl_pcie_rx_handle_rb(struct iwl_trans * trans,struct iwl_rxq * rxq,struct iwl_rx_mem_buffer * rxb,bool emergency)1187 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
1188 struct iwl_rxq *rxq,
1189 struct iwl_rx_mem_buffer *rxb,
1190 bool emergency)
1191 {
1192 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1193 struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
1194 bool page_stolen = false;
1195 int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
1196 u32 offset = 0;
1197
1198 if (WARN_ON(!rxb))
1199 return;
1200
1201 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
1202
1203 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
1204 struct iwl_rx_packet *pkt;
1205 u16 sequence;
1206 bool reclaim;
1207 int index, cmd_index, len;
1208 struct iwl_rx_cmd_buffer rxcb = {
1209 ._offset = offset,
1210 ._rx_page_order = trans_pcie->rx_page_order,
1211 ._page = rxb->page,
1212 ._page_stolen = false,
1213 .truesize = max_len,
1214 };
1215
1216 pkt = rxb_addr(&rxcb);
1217
1218 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
1219 IWL_DEBUG_RX(trans,
1220 "Q %d: RB end marker at offset %d\n",
1221 rxq->id, offset);
1222 break;
1223 }
1224
1225 WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1226 FH_RSCSR_RXQ_POS != rxq->id,
1227 "frame on invalid queue - is on %d and indicates %d\n",
1228 rxq->id,
1229 (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1230 FH_RSCSR_RXQ_POS);
1231
1232 IWL_DEBUG_RX(trans,
1233 "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
1234 rxq->id, offset,
1235 iwl_get_cmd_string(trans,
1236 iwl_cmd_id(pkt->hdr.cmd,
1237 pkt->hdr.group_id,
1238 0)),
1239 pkt->hdr.group_id, pkt->hdr.cmd,
1240 le16_to_cpu(pkt->hdr.sequence));
1241
1242 len = iwl_rx_packet_len(pkt);
1243 len += sizeof(u32); /* account for status word */
1244 trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
1245 trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
1246
1247 /* Reclaim a command buffer only if this packet is a response
1248 * to a (driver-originated) command.
1249 * If the packet (e.g. Rx frame) originated from uCode,
1250 * there is no command buffer to reclaim.
1251 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1252 * but apparently a few don't get set; catch them here. */
1253 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
1254 if (reclaim && !pkt->hdr.group_id) {
1255 int i;
1256
1257 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
1258 if (trans_pcie->no_reclaim_cmds[i] ==
1259 pkt->hdr.cmd) {
1260 reclaim = false;
1261 break;
1262 }
1263 }
1264 }
1265
1266 sequence = le16_to_cpu(pkt->hdr.sequence);
1267 index = SEQ_TO_INDEX(sequence);
1268 cmd_index = iwl_pcie_get_cmd_index(txq, index);
1269
1270 if (rxq->id == 0)
1271 iwl_op_mode_rx(trans->op_mode, &rxq->napi,
1272 &rxcb);
1273 else
1274 iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
1275 &rxcb, rxq->id);
1276
1277 if (reclaim) {
1278 kzfree(txq->entries[cmd_index].free_buf);
1279 txq->entries[cmd_index].free_buf = NULL;
1280 }
1281
1282 /*
1283 * After here, we should always check rxcb._page_stolen,
1284 * if it is true then one of the handlers took the page.
1285 */
1286
1287 if (reclaim) {
1288 /* Invoke any callbacks, transfer the buffer to caller,
1289 * and fire off the (possibly) blocking
1290 * iwl_trans_send_cmd()
1291 * as we reclaim the driver command queue */
1292 if (!rxcb._page_stolen)
1293 iwl_pcie_hcmd_complete(trans, &rxcb);
1294 else
1295 IWL_WARN(trans, "Claim null rxb?\n");
1296 }
1297
1298 page_stolen |= rxcb._page_stolen;
1299 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1300 break;
1301 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
1302 }
1303
1304 /* page was stolen from us -- free our reference */
1305 if (page_stolen) {
1306 __free_pages(rxb->page, trans_pcie->rx_page_order);
1307 rxb->page = NULL;
1308 }
1309
1310 /* Reuse the page if possible. For notification packets and
1311 * SKBs that fail to Rx correctly, add them back into the
1312 * rx_free list for reuse later. */
1313 if (rxb->page != NULL) {
1314 rxb->page_dma =
1315 dma_map_page(trans->dev, rxb->page, 0,
1316 PAGE_SIZE << trans_pcie->rx_page_order,
1317 DMA_FROM_DEVICE);
1318 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
1319 /*
1320 * free the page(s) as well to not break
1321 * the invariant that the items on the used
1322 * list have no page(s)
1323 */
1324 __free_pages(rxb->page, trans_pcie->rx_page_order);
1325 rxb->page = NULL;
1326 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1327 } else {
1328 list_add_tail(&rxb->list, &rxq->rx_free);
1329 rxq->free_count++;
1330 }
1331 } else
1332 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1333 }
1334
iwl_pcie_get_rxb(struct iwl_trans * trans,struct iwl_rxq * rxq,int i)1335 static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
1336 struct iwl_rxq *rxq, int i)
1337 {
1338 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1339 struct iwl_rx_mem_buffer *rxb;
1340 u16 vid;
1341
1342 if (!trans->cfg->mq_rx_supported) {
1343 rxb = rxq->queue[i];
1344 rxq->queue[i] = NULL;
1345 return rxb;
1346 }
1347
1348 /* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
1349 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1350 vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
1351 else
1352 vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
1353
1354 if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table))
1355 goto out_err;
1356
1357 rxb = trans_pcie->global_table[vid - 1];
1358 if (rxb->invalid)
1359 goto out_err;
1360
1361 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1362 rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE;
1363
1364 rxb->invalid = true;
1365
1366 return rxb;
1367
1368 out_err:
1369 WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
1370 iwl_force_nmi(trans);
1371 return NULL;
1372 }
1373
1374 /*
1375 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1376 */
iwl_pcie_rx_handle(struct iwl_trans * trans,int queue)1377 static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
1378 {
1379 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1380 struct iwl_rxq *rxq = &trans_pcie->rxq[queue];
1381 u32 r, i, count = 0;
1382 bool emergency = false;
1383
1384 restart:
1385 spin_lock(&rxq->lock);
1386 /* uCode's read index (stored in shared DRAM) indicates the last Rx
1387 * buffer that the driver may process (last buffer filled by ucode). */
1388 r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
1389 i = rxq->read;
1390
1391 /* W/A 9000 device step A0 wrap-around bug */
1392 r &= (rxq->queue_size - 1);
1393
1394 /* Rx interrupt, but nothing sent from uCode */
1395 if (i == r)
1396 IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
1397
1398 while (i != r) {
1399 struct iwl_rx_mem_buffer *rxb;
1400
1401 if (unlikely(rxq->used_count == rxq->queue_size / 2))
1402 emergency = true;
1403
1404 rxb = iwl_pcie_get_rxb(trans, rxq, i);
1405 if (!rxb)
1406 goto out;
1407
1408 IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
1409 iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
1410
1411 i = (i + 1) & (rxq->queue_size - 1);
1412
1413 /*
1414 * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
1415 * try to claim the pre-allocated buffers from the allocator.
1416 * If not ready - will try to reclaim next time.
1417 * There is no need to reschedule work - allocator exits only
1418 * on success
1419 */
1420 if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
1421 iwl_pcie_rx_allocator_get(trans, rxq);
1422
1423 if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
1424 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1425
1426 /* Add the remaining empty RBDs for allocator use */
1427 spin_lock(&rba->lock);
1428 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
1429 spin_unlock(&rba->lock);
1430 } else if (emergency) {
1431 count++;
1432 if (count == 8) {
1433 count = 0;
1434 if (rxq->used_count < rxq->queue_size / 3)
1435 emergency = false;
1436
1437 rxq->read = i;
1438 spin_unlock(&rxq->lock);
1439 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1440 iwl_pcie_rxq_restock(trans, rxq);
1441 goto restart;
1442 }
1443 }
1444 }
1445 out:
1446 /* Backtrack one entry */
1447 rxq->read = i;
1448 /* update cr tail with the rxq read pointer */
1449 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1450 *rxq->cr_tail = cpu_to_le16(r);
1451 spin_unlock(&rxq->lock);
1452
1453 /*
1454 * handle a case where in emergency there are some unallocated RBDs.
1455 * those RBDs are in the used list, but are not tracked by the queue's
1456 * used_count which counts allocator owned RBDs.
1457 * unallocated emergency RBDs must be allocated on exit, otherwise
1458 * when called again the function may not be in emergency mode and
1459 * they will be handed to the allocator with no tracking in the RBD
1460 * allocator counters, which will lead to them never being claimed back
1461 * by the queue.
1462 * by allocating them here, they are now in the queue free list, and
1463 * will be restocked by the next call of iwl_pcie_rxq_restock.
1464 */
1465 if (unlikely(emergency && count))
1466 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1467
1468 if (rxq->napi.poll)
1469 napi_gro_flush(&rxq->napi, false);
1470
1471 iwl_pcie_rxq_restock(trans, rxq);
1472 }
1473
iwl_pcie_get_trans_pcie(struct msix_entry * entry)1474 static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
1475 {
1476 u8 queue = entry->entry;
1477 struct msix_entry *entries = entry - queue;
1478
1479 return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
1480 }
1481
1482 /*
1483 * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
1484 * This interrupt handler should be used with RSS queue only.
1485 */
iwl_pcie_irq_rx_msix_handler(int irq,void * dev_id)1486 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
1487 {
1488 struct msix_entry *entry = dev_id;
1489 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
1490 struct iwl_trans *trans = trans_pcie->trans;
1491
1492 trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
1493
1494 if (WARN_ON(entry->entry >= trans->num_rx_queues))
1495 return IRQ_NONE;
1496
1497 lock_map_acquire(&trans->sync_cmd_lockdep_map);
1498
1499 local_bh_disable();
1500 iwl_pcie_rx_handle(trans, entry->entry);
1501 local_bh_enable();
1502
1503 iwl_pcie_clear_irq(trans, entry);
1504
1505 lock_map_release(&trans->sync_cmd_lockdep_map);
1506
1507 return IRQ_HANDLED;
1508 }
1509
1510 /*
1511 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1512 */
iwl_pcie_irq_handle_error(struct iwl_trans * trans)1513 static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1514 {
1515 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1516 int i;
1517
1518 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1519 if (trans->cfg->internal_wimax_coex &&
1520 !trans->cfg->apmg_not_supported &&
1521 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1522 APMS_CLK_VAL_MRB_FUNC_MODE) ||
1523 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1524 APMG_PS_CTRL_VAL_RESET_REQ))) {
1525 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1526 iwl_op_mode_wimax_active(trans->op_mode);
1527 wake_up(&trans_pcie->wait_command_queue);
1528 return;
1529 }
1530
1531 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
1532 if (!trans_pcie->txq[i])
1533 continue;
1534 del_timer(&trans_pcie->txq[i]->stuck_timer);
1535 }
1536
1537 /* The STATUS_FW_ERROR bit is set in this function. This must happen
1538 * before we wake up the command caller, to ensure a proper cleanup. */
1539 iwl_trans_fw_error(trans);
1540
1541 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1542 wake_up(&trans_pcie->wait_command_queue);
1543 }
1544
iwl_pcie_int_cause_non_ict(struct iwl_trans * trans)1545 static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1546 {
1547 u32 inta;
1548
1549 lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1550
1551 trace_iwlwifi_dev_irq(trans->dev);
1552
1553 /* Discover which interrupts are active/pending */
1554 inta = iwl_read32(trans, CSR_INT);
1555
1556 /* the thread will service interrupts and re-enable them */
1557 return inta;
1558 }
1559
1560 /* a device (PCI-E) page is 4096 bytes long */
1561 #define ICT_SHIFT 12
1562 #define ICT_SIZE (1 << ICT_SHIFT)
1563 #define ICT_COUNT (ICT_SIZE / sizeof(u32))
1564
1565 /* interrupt handler using ict table, with this interrupt driver will
1566 * stop using INTA register to get device's interrupt, reading this register
1567 * is expensive, device will write interrupts in ICT dram table, increment
1568 * index then will fire interrupt to driver, driver will OR all ICT table
1569 * entries from current index up to table entry with 0 value. the result is
1570 * the interrupt we need to service, driver will set the entries back to 0 and
1571 * set index.
1572 */
iwl_pcie_int_cause_ict(struct iwl_trans * trans)1573 static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1574 {
1575 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1576 u32 inta;
1577 u32 val = 0;
1578 u32 read;
1579
1580 trace_iwlwifi_dev_irq(trans->dev);
1581
1582 /* Ignore interrupt if there's nothing in NIC to service.
1583 * This may be due to IRQ shared with another device,
1584 * or due to sporadic interrupts thrown from our NIC. */
1585 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1586 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1587 if (!read)
1588 return 0;
1589
1590 /*
1591 * Collect all entries up to the first 0, starting from ict_index;
1592 * note we already read at ict_index.
1593 */
1594 do {
1595 val |= read;
1596 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1597 trans_pcie->ict_index, read);
1598 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1599 trans_pcie->ict_index =
1600 ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1601
1602 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1603 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1604 read);
1605 } while (read);
1606
1607 /* We should not get this value, just ignore it. */
1608 if (val == 0xffffffff)
1609 val = 0;
1610
1611 /*
1612 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1613 * (bit 15 before shifting it to 31) to clear when using interrupt
1614 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1615 * so we use them to decide on the real state of the Rx bit.
1616 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1617 */
1618 if (val & 0xC0000)
1619 val |= 0x8000;
1620
1621 inta = (0xff & val) | ((0xff00 & val) << 16);
1622 return inta;
1623 }
1624
iwl_pcie_handle_rfkill_irq(struct iwl_trans * trans)1625 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
1626 {
1627 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1628 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1629 bool hw_rfkill, prev, report;
1630
1631 mutex_lock(&trans_pcie->mutex);
1632 prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1633 hw_rfkill = iwl_is_rfkill_set(trans);
1634 if (hw_rfkill) {
1635 set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1636 set_bit(STATUS_RFKILL_HW, &trans->status);
1637 }
1638 if (trans_pcie->opmode_down)
1639 report = hw_rfkill;
1640 else
1641 report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1642
1643 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
1644 hw_rfkill ? "disable radio" : "enable radio");
1645
1646 isr_stats->rfkill++;
1647
1648 if (prev != report)
1649 iwl_trans_pcie_rf_kill(trans, report);
1650 mutex_unlock(&trans_pcie->mutex);
1651
1652 if (hw_rfkill) {
1653 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
1654 &trans->status))
1655 IWL_DEBUG_RF_KILL(trans,
1656 "Rfkill while SYNC HCMD in flight\n");
1657 wake_up(&trans_pcie->wait_command_queue);
1658 } else {
1659 clear_bit(STATUS_RFKILL_HW, &trans->status);
1660 if (trans_pcie->opmode_down)
1661 clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1662 }
1663 }
1664
iwl_pcie_irq_handler(int irq,void * dev_id)1665 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1666 {
1667 struct iwl_trans *trans = dev_id;
1668 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1669 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1670 u32 inta = 0;
1671 u32 handled = 0;
1672
1673 lock_map_acquire(&trans->sync_cmd_lockdep_map);
1674
1675 spin_lock(&trans_pcie->irq_lock);
1676
1677 /* dram interrupt table not set yet,
1678 * use legacy interrupt.
1679 */
1680 if (likely(trans_pcie->use_ict))
1681 inta = iwl_pcie_int_cause_ict(trans);
1682 else
1683 inta = iwl_pcie_int_cause_non_ict(trans);
1684
1685 if (iwl_have_debug_level(IWL_DL_ISR)) {
1686 IWL_DEBUG_ISR(trans,
1687 "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
1688 inta, trans_pcie->inta_mask,
1689 iwl_read32(trans, CSR_INT_MASK),
1690 iwl_read32(trans, CSR_FH_INT_STATUS));
1691 if (inta & (~trans_pcie->inta_mask))
1692 IWL_DEBUG_ISR(trans,
1693 "We got a masked interrupt (0x%08x)\n",
1694 inta & (~trans_pcie->inta_mask));
1695 }
1696
1697 inta &= trans_pcie->inta_mask;
1698
1699 /*
1700 * Ignore interrupt if there's nothing in NIC to service.
1701 * This may be due to IRQ shared with another device,
1702 * or due to sporadic interrupts thrown from our NIC.
1703 */
1704 if (unlikely(!inta)) {
1705 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1706 /*
1707 * Re-enable interrupts here since we don't
1708 * have anything to service
1709 */
1710 if (test_bit(STATUS_INT_ENABLED, &trans->status))
1711 _iwl_enable_interrupts(trans);
1712 spin_unlock(&trans_pcie->irq_lock);
1713 lock_map_release(&trans->sync_cmd_lockdep_map);
1714 return IRQ_NONE;
1715 }
1716
1717 if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1718 /*
1719 * Hardware disappeared. It might have
1720 * already raised an interrupt.
1721 */
1722 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1723 spin_unlock(&trans_pcie->irq_lock);
1724 goto out;
1725 }
1726
1727 /* Ack/clear/reset pending uCode interrupts.
1728 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1729 */
1730 /* There is a hardware bug in the interrupt mask function that some
1731 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
1732 * they are disabled in the CSR_INT_MASK register. Furthermore the
1733 * ICT interrupt handling mechanism has another bug that might cause
1734 * these unmasked interrupts fail to be detected. We workaround the
1735 * hardware bugs here by ACKing all the possible interrupts so that
1736 * interrupt coalescing can still be achieved.
1737 */
1738 iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1739
1740 if (iwl_have_debug_level(IWL_DL_ISR))
1741 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1742 inta, iwl_read32(trans, CSR_INT_MASK));
1743
1744 spin_unlock(&trans_pcie->irq_lock);
1745
1746 /* Now service all interrupt bits discovered above. */
1747 if (inta & CSR_INT_BIT_HW_ERR) {
1748 IWL_ERR(trans, "Hardware error detected. Restarting.\n");
1749
1750 /* Tell the device to stop sending interrupts */
1751 iwl_disable_interrupts(trans);
1752
1753 isr_stats->hw++;
1754 iwl_pcie_irq_handle_error(trans);
1755
1756 handled |= CSR_INT_BIT_HW_ERR;
1757
1758 goto out;
1759 }
1760
1761 if (iwl_have_debug_level(IWL_DL_ISR)) {
1762 /* NIC fires this, but we don't use it, redundant with WAKEUP */
1763 if (inta & CSR_INT_BIT_SCD) {
1764 IWL_DEBUG_ISR(trans,
1765 "Scheduler finished to transmit the frame/frames.\n");
1766 isr_stats->sch++;
1767 }
1768
1769 /* Alive notification via Rx interrupt will do the real work */
1770 if (inta & CSR_INT_BIT_ALIVE) {
1771 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1772 isr_stats->alive++;
1773 if (trans->cfg->gen2) {
1774 /*
1775 * We can restock, since firmware configured
1776 * the RFH
1777 */
1778 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
1779 }
1780 }
1781 }
1782
1783 /* Safely ignore these bits for debug checks below */
1784 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1785
1786 /* HW RF KILL switch toggled */
1787 if (inta & CSR_INT_BIT_RF_KILL) {
1788 iwl_pcie_handle_rfkill_irq(trans);
1789 handled |= CSR_INT_BIT_RF_KILL;
1790 }
1791
1792 /* Chip got too hot and stopped itself */
1793 if (inta & CSR_INT_BIT_CT_KILL) {
1794 IWL_ERR(trans, "Microcode CT kill error detected.\n");
1795 isr_stats->ctkill++;
1796 handled |= CSR_INT_BIT_CT_KILL;
1797 }
1798
1799 /* Error detected by uCode */
1800 if (inta & CSR_INT_BIT_SW_ERR) {
1801 IWL_ERR(trans, "Microcode SW error detected. "
1802 " Restarting 0x%X.\n", inta);
1803 isr_stats->sw++;
1804 iwl_pcie_irq_handle_error(trans);
1805 handled |= CSR_INT_BIT_SW_ERR;
1806 }
1807
1808 /* uCode wakes up after power-down sleep */
1809 if (inta & CSR_INT_BIT_WAKEUP) {
1810 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1811 iwl_pcie_rxq_check_wrptr(trans);
1812 iwl_pcie_txq_check_wrptrs(trans);
1813
1814 isr_stats->wakeup++;
1815
1816 handled |= CSR_INT_BIT_WAKEUP;
1817 }
1818
1819 /* All uCode command responses, including Tx command responses,
1820 * Rx "responses" (frame-received notification), and other
1821 * notifications from uCode come through here*/
1822 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1823 CSR_INT_BIT_RX_PERIODIC)) {
1824 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1825 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1826 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1827 iwl_write32(trans, CSR_FH_INT_STATUS,
1828 CSR_FH_INT_RX_MASK);
1829 }
1830 if (inta & CSR_INT_BIT_RX_PERIODIC) {
1831 handled |= CSR_INT_BIT_RX_PERIODIC;
1832 iwl_write32(trans,
1833 CSR_INT, CSR_INT_BIT_RX_PERIODIC);
1834 }
1835 /* Sending RX interrupt require many steps to be done in the
1836 * the device:
1837 * 1- write interrupt to current index in ICT table.
1838 * 2- dma RX frame.
1839 * 3- update RX shared data to indicate last write index.
1840 * 4- send interrupt.
1841 * This could lead to RX race, driver could receive RX interrupt
1842 * but the shared data changes does not reflect this;
1843 * periodic interrupt will detect any dangling Rx activity.
1844 */
1845
1846 /* Disable periodic interrupt; we use it as just a one-shot. */
1847 iwl_write8(trans, CSR_INT_PERIODIC_REG,
1848 CSR_INT_PERIODIC_DIS);
1849
1850 /*
1851 * Enable periodic interrupt in 8 msec only if we received
1852 * real RX interrupt (instead of just periodic int), to catch
1853 * any dangling Rx interrupt. If it was just the periodic
1854 * interrupt, there was no dangling Rx activity, and no need
1855 * to extend the periodic interrupt; one-shot is enough.
1856 */
1857 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
1858 iwl_write8(trans, CSR_INT_PERIODIC_REG,
1859 CSR_INT_PERIODIC_ENA);
1860
1861 isr_stats->rx++;
1862
1863 local_bh_disable();
1864 iwl_pcie_rx_handle(trans, 0);
1865 local_bh_enable();
1866 }
1867
1868 /* This "Tx" DMA channel is used only for loading uCode */
1869 if (inta & CSR_INT_BIT_FH_TX) {
1870 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
1871 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
1872 isr_stats->tx++;
1873 handled |= CSR_INT_BIT_FH_TX;
1874 /* Wake up uCode load routine, now that load is complete */
1875 trans_pcie->ucode_write_complete = true;
1876 wake_up(&trans_pcie->ucode_write_waitq);
1877 }
1878
1879 if (inta & ~handled) {
1880 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1881 isr_stats->unhandled++;
1882 }
1883
1884 if (inta & ~(trans_pcie->inta_mask)) {
1885 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
1886 inta & ~trans_pcie->inta_mask);
1887 }
1888
1889 spin_lock(&trans_pcie->irq_lock);
1890 /* only Re-enable all interrupt if disabled by irq */
1891 if (test_bit(STATUS_INT_ENABLED, &trans->status))
1892 _iwl_enable_interrupts(trans);
1893 /* we are loading the firmware, enable FH_TX interrupt only */
1894 else if (handled & CSR_INT_BIT_FH_TX)
1895 iwl_enable_fw_load_int(trans);
1896 /* Re-enable RF_KILL if it occurred */
1897 else if (handled & CSR_INT_BIT_RF_KILL)
1898 iwl_enable_rfkill_int(trans);
1899 spin_unlock(&trans_pcie->irq_lock);
1900
1901 out:
1902 lock_map_release(&trans->sync_cmd_lockdep_map);
1903 return IRQ_HANDLED;
1904 }
1905
1906 /******************************************************************************
1907 *
1908 * ICT functions
1909 *
1910 ******************************************************************************/
1911
1912 /* Free dram table */
iwl_pcie_free_ict(struct iwl_trans * trans)1913 void iwl_pcie_free_ict(struct iwl_trans *trans)
1914 {
1915 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1916
1917 if (trans_pcie->ict_tbl) {
1918 dma_free_coherent(trans->dev, ICT_SIZE,
1919 trans_pcie->ict_tbl,
1920 trans_pcie->ict_tbl_dma);
1921 trans_pcie->ict_tbl = NULL;
1922 trans_pcie->ict_tbl_dma = 0;
1923 }
1924 }
1925
1926 /*
1927 * allocate dram shared table, it is an aligned memory
1928 * block of ICT_SIZE.
1929 * also reset all data related to ICT table interrupt.
1930 */
iwl_pcie_alloc_ict(struct iwl_trans * trans)1931 int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1932 {
1933 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1934
1935 trans_pcie->ict_tbl =
1936 dma_zalloc_coherent(trans->dev, ICT_SIZE,
1937 &trans_pcie->ict_tbl_dma,
1938 GFP_KERNEL);
1939 if (!trans_pcie->ict_tbl)
1940 return -ENOMEM;
1941
1942 /* just an API sanity check ... it is guaranteed to be aligned */
1943 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
1944 iwl_pcie_free_ict(trans);
1945 return -EINVAL;
1946 }
1947
1948 return 0;
1949 }
1950
1951 /* Device is going up inform it about using ICT interrupt table,
1952 * also we need to tell the driver to start using ICT interrupt.
1953 */
iwl_pcie_reset_ict(struct iwl_trans * trans)1954 void iwl_pcie_reset_ict(struct iwl_trans *trans)
1955 {
1956 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1957 u32 val;
1958
1959 if (!trans_pcie->ict_tbl)
1960 return;
1961
1962 spin_lock(&trans_pcie->irq_lock);
1963 _iwl_disable_interrupts(trans);
1964
1965 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1966
1967 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
1968
1969 val |= CSR_DRAM_INT_TBL_ENABLE |
1970 CSR_DRAM_INIT_TBL_WRAP_CHECK |
1971 CSR_DRAM_INIT_TBL_WRITE_POINTER;
1972
1973 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
1974
1975 iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
1976 trans_pcie->use_ict = true;
1977 trans_pcie->ict_index = 0;
1978 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
1979 _iwl_enable_interrupts(trans);
1980 spin_unlock(&trans_pcie->irq_lock);
1981 }
1982
1983 /* Device is going down disable ict interrupt usage */
iwl_pcie_disable_ict(struct iwl_trans * trans)1984 void iwl_pcie_disable_ict(struct iwl_trans *trans)
1985 {
1986 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1987
1988 spin_lock(&trans_pcie->irq_lock);
1989 trans_pcie->use_ict = false;
1990 spin_unlock(&trans_pcie->irq_lock);
1991 }
1992
iwl_pcie_isr(int irq,void * data)1993 irqreturn_t iwl_pcie_isr(int irq, void *data)
1994 {
1995 struct iwl_trans *trans = data;
1996
1997 if (!trans)
1998 return IRQ_NONE;
1999
2000 /* Disable (but don't clear!) interrupts here to avoid
2001 * back-to-back ISRs and sporadic interrupts from our NIC.
2002 * If we have something to service, the tasklet will re-enable ints.
2003 * If we *don't* have something, we'll re-enable before leaving here.
2004 */
2005 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
2006
2007 return IRQ_WAKE_THREAD;
2008 }
2009
iwl_pcie_msix_isr(int irq,void * data)2010 irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
2011 {
2012 return IRQ_WAKE_THREAD;
2013 }
2014
iwl_pcie_irq_msix_handler(int irq,void * dev_id)2015 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
2016 {
2017 struct msix_entry *entry = dev_id;
2018 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
2019 struct iwl_trans *trans = trans_pcie->trans;
2020 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2021 u32 inta_fh, inta_hw;
2022
2023 lock_map_acquire(&trans->sync_cmd_lockdep_map);
2024
2025 spin_lock(&trans_pcie->irq_lock);
2026 inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
2027 inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
2028 /*
2029 * Clear causes registers to avoid being handling the same cause.
2030 */
2031 iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
2032 iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
2033 spin_unlock(&trans_pcie->irq_lock);
2034
2035 trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
2036
2037 if (unlikely(!(inta_fh | inta_hw))) {
2038 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
2039 lock_map_release(&trans->sync_cmd_lockdep_map);
2040 return IRQ_NONE;
2041 }
2042
2043 if (iwl_have_debug_level(IWL_DL_ISR))
2044 IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n",
2045 inta_fh,
2046 iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
2047
2048 if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
2049 inta_fh & MSIX_FH_INT_CAUSES_Q0) {
2050 local_bh_disable();
2051 iwl_pcie_rx_handle(trans, 0);
2052 local_bh_enable();
2053 }
2054
2055 if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
2056 inta_fh & MSIX_FH_INT_CAUSES_Q1) {
2057 local_bh_disable();
2058 iwl_pcie_rx_handle(trans, 1);
2059 local_bh_enable();
2060 }
2061
2062 /* This "Tx" DMA channel is used only for loading uCode */
2063 if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
2064 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
2065 isr_stats->tx++;
2066 /*
2067 * Wake up uCode load routine,
2068 * now that load is complete
2069 */
2070 trans_pcie->ucode_write_complete = true;
2071 wake_up(&trans_pcie->ucode_write_waitq);
2072 }
2073
2074 /* Error detected by uCode */
2075 if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
2076 (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
2077 (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
2078 IWL_ERR(trans,
2079 "Microcode SW error detected. Restarting 0x%X.\n",
2080 inta_fh);
2081 isr_stats->sw++;
2082 iwl_pcie_irq_handle_error(trans);
2083 }
2084
2085 /* After checking FH register check HW register */
2086 if (iwl_have_debug_level(IWL_DL_ISR))
2087 IWL_DEBUG_ISR(trans,
2088 "ISR inta_hw 0x%08x, enabled 0x%08x\n",
2089 inta_hw,
2090 iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
2091
2092 /* Alive notification via Rx interrupt will do the real work */
2093 if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
2094 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
2095 isr_stats->alive++;
2096 if (trans->cfg->gen2) {
2097 /* We can restock, since firmware configured the RFH */
2098 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
2099 }
2100 }
2101
2102 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560 &&
2103 inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) {
2104 /* Reflect IML transfer status */
2105 int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
2106
2107 IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res);
2108 if (res == IWL_IMAGE_RESP_FAIL) {
2109 isr_stats->sw++;
2110 iwl_pcie_irq_handle_error(trans);
2111 }
2112 } else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
2113 /* uCode wakes up after power-down sleep */
2114 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
2115 iwl_pcie_rxq_check_wrptr(trans);
2116 iwl_pcie_txq_check_wrptrs(trans);
2117
2118 isr_stats->wakeup++;
2119 }
2120
2121 /* Chip got too hot and stopped itself */
2122 if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
2123 IWL_ERR(trans, "Microcode CT kill error detected.\n");
2124 isr_stats->ctkill++;
2125 }
2126
2127 /* HW RF KILL switch toggled */
2128 if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
2129 iwl_pcie_handle_rfkill_irq(trans);
2130
2131 if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
2132 IWL_ERR(trans,
2133 "Hardware error detected. Restarting.\n");
2134
2135 isr_stats->hw++;
2136 iwl_pcie_irq_handle_error(trans);
2137 }
2138
2139 iwl_pcie_clear_irq(trans, entry);
2140
2141 lock_map_release(&trans->sync_cmd_lockdep_map);
2142
2143 return IRQ_HANDLED;
2144 }
2145