1 /*
2  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/delay.h>
18 #include "mt76x2.h"
19 #include "mt76x2_trace.h"
20 
mt76x2_set_irq_mask(struct mt76x2_dev * dev,u32 clear,u32 set)21 void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set)
22 {
23 	unsigned long flags;
24 
25 	spin_lock_irqsave(&dev->irq_lock, flags);
26 	dev->irqmask &= ~clear;
27 	dev->irqmask |= set;
28 	mt76_wr(dev, MT_INT_MASK_CSR, dev->irqmask);
29 	spin_unlock_irqrestore(&dev->irq_lock, flags);
30 }
31 
mt76x2_rx_poll_complete(struct mt76_dev * mdev,enum mt76_rxq_id q)32 void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
33 {
34 	struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
35 
36 	mt76x2_irq_enable(dev, MT_INT_RX_DONE(q));
37 }
38 
mt76x2_irq_handler(int irq,void * dev_instance)39 irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance)
40 {
41 	struct mt76x2_dev *dev = dev_instance;
42 	u32 intr;
43 
44 	intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
45 	mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
46 
47 	if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
48 		return IRQ_NONE;
49 
50 	trace_dev_irq(dev, intr, dev->irqmask);
51 
52 	intr &= dev->irqmask;
53 
54 	if (intr & MT_INT_TX_DONE_ALL) {
55 		mt76x2_irq_disable(dev, MT_INT_TX_DONE_ALL);
56 		tasklet_schedule(&dev->tx_tasklet);
57 	}
58 
59 	if (intr & MT_INT_RX_DONE(0)) {
60 		mt76x2_irq_disable(dev, MT_INT_RX_DONE(0));
61 		napi_schedule(&dev->mt76.napi[0]);
62 	}
63 
64 	if (intr & MT_INT_RX_DONE(1)) {
65 		mt76x2_irq_disable(dev, MT_INT_RX_DONE(1));
66 		napi_schedule(&dev->mt76.napi[1]);
67 	}
68 
69 	if (intr & MT_INT_PRE_TBTT)
70 		tasklet_schedule(&dev->pre_tbtt_tasklet);
71 
72 	/* send buffered multicast frames now */
73 	if (intr & MT_INT_TBTT)
74 		mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]);
75 
76 	if (intr & MT_INT_TX_STAT) {
77 		mt76x2_mac_poll_tx_status(dev, true);
78 		tasklet_schedule(&dev->tx_tasklet);
79 	}
80 
81 	if (intr & MT_INT_GPTIMER) {
82 		mt76x2_irq_disable(dev, MT_INT_GPTIMER);
83 		tasklet_schedule(&dev->dfs_pd.dfs_tasklet);
84 	}
85 
86 	return IRQ_HANDLED;
87 }
88 
89