1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/kernel.h>
4 #include <linux/init.h>
5 #include <linux/types.h>
6 #include <linux/pci.h>
7 #include <linux/slab.h>
8 #include <linux/errno.h>
9 #include <linux/interrupt.h>
10 #include "adf_accel_devices.h"
11 #include "adf_common_drv.h"
12 #include "adf_cfg.h"
13 #include "adf_cfg_strings.h"
14 #include "adf_cfg_common.h"
15 #include "adf_transport_access_macros.h"
16 #include "adf_transport_internal.h"
17
adf_enable_msix(struct adf_accel_dev * accel_dev)18 static int adf_enable_msix(struct adf_accel_dev *accel_dev)
19 {
20 struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
21 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
22 u32 msix_num_entries = 1;
23
24 /* If SR-IOV is disabled, add entries for each bank */
25 if (!accel_dev->pf.vf_info) {
26 int i;
27
28 msix_num_entries += hw_data->num_banks;
29 for (i = 0; i < msix_num_entries; i++)
30 pci_dev_info->msix_entries.entries[i].entry = i;
31 } else {
32 pci_dev_info->msix_entries.entries[0].entry =
33 hw_data->num_banks;
34 }
35
36 if (pci_enable_msix_exact(pci_dev_info->pci_dev,
37 pci_dev_info->msix_entries.entries,
38 msix_num_entries)) {
39 dev_err(&GET_DEV(accel_dev), "Failed to enable MSI-X IRQ(s)\n");
40 return -EFAULT;
41 }
42 return 0;
43 }
44
adf_disable_msix(struct adf_accel_pci * pci_dev_info)45 static void adf_disable_msix(struct adf_accel_pci *pci_dev_info)
46 {
47 pci_disable_msix(pci_dev_info->pci_dev);
48 }
49
adf_msix_isr_bundle(int irq,void * bank_ptr)50 static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
51 {
52 struct adf_etr_bank_data *bank = bank_ptr;
53
54 WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0);
55 tasklet_hi_schedule(&bank->resp_handler);
56 return IRQ_HANDLED;
57 }
58
adf_msix_isr_ae(int irq,void * dev_ptr)59 static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
60 {
61 struct adf_accel_dev *accel_dev = dev_ptr;
62
63 #ifdef CONFIG_PCI_IOV
64 /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
65 if (accel_dev->pf.vf_info) {
66 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
67 struct adf_bar *pmisc =
68 &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
69 void __iomem *pmisc_bar_addr = pmisc->virt_addr;
70 u32 vf_mask;
71
72 /* Get the interrupt sources triggered by VFs */
73 vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU5) &
74 0x0000FFFF) << 16) |
75 ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU3) &
76 0x01FFFE00) >> 9);
77
78 if (vf_mask) {
79 struct adf_accel_vf_info *vf_info;
80 bool irq_handled = false;
81 int i;
82
83 /* Disable VF2PF interrupts for VFs with pending ints */
84 adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
85
86 /*
87 * Schedule tasklets to handle VF2PF interrupt BHs
88 * unless the VF is malicious and is attempting to
89 * flood the host OS with VF2PF interrupts.
90 */
91 for_each_set_bit(i, (const unsigned long *)&vf_mask,
92 (sizeof(vf_mask) * BITS_PER_BYTE)) {
93 vf_info = accel_dev->pf.vf_info + i;
94
95 if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
96 dev_info(&GET_DEV(accel_dev),
97 "Too many ints from VF%d\n",
98 vf_info->vf_nr + 1);
99 continue;
100 }
101
102 /* Tasklet will re-enable ints from this VF */
103 tasklet_hi_schedule(&vf_info->vf2pf_bh_tasklet);
104 irq_handled = true;
105 }
106
107 if (irq_handled)
108 return IRQ_HANDLED;
109 }
110 }
111 #endif /* CONFIG_PCI_IOV */
112
113 dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
114 accel_dev->accel_id);
115
116 return IRQ_NONE;
117 }
118
adf_request_irqs(struct adf_accel_dev * accel_dev)119 static int adf_request_irqs(struct adf_accel_dev *accel_dev)
120 {
121 struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
122 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
123 struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
124 struct adf_etr_data *etr_data = accel_dev->transport;
125 int ret, i = 0;
126 char *name;
127
128 /* Request msix irq for all banks unless SR-IOV enabled */
129 if (!accel_dev->pf.vf_info) {
130 for (i = 0; i < hw_data->num_banks; i++) {
131 struct adf_etr_bank_data *bank = &etr_data->banks[i];
132 unsigned int cpu, cpus = num_online_cpus();
133
134 name = *(pci_dev_info->msix_entries.names + i);
135 snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
136 "qat%d-bundle%d", accel_dev->accel_id, i);
137 ret = request_irq(msixe[i].vector,
138 adf_msix_isr_bundle, 0, name, bank);
139 if (ret) {
140 dev_err(&GET_DEV(accel_dev),
141 "failed to enable irq %d for %s\n",
142 msixe[i].vector, name);
143 return ret;
144 }
145
146 cpu = ((accel_dev->accel_id * hw_data->num_banks) +
147 i) % cpus;
148 irq_set_affinity_hint(msixe[i].vector,
149 get_cpu_mask(cpu));
150 }
151 }
152
153 /* Request msix irq for AE */
154 name = *(pci_dev_info->msix_entries.names + i);
155 snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
156 "qat%d-ae-cluster", accel_dev->accel_id);
157 ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev);
158 if (ret) {
159 dev_err(&GET_DEV(accel_dev),
160 "failed to enable irq %d, for %s\n",
161 msixe[i].vector, name);
162 return ret;
163 }
164 return ret;
165 }
166
adf_free_irqs(struct adf_accel_dev * accel_dev)167 static void adf_free_irqs(struct adf_accel_dev *accel_dev)
168 {
169 struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
170 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
171 struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
172 struct adf_etr_data *etr_data = accel_dev->transport;
173 int i = 0;
174
175 if (pci_dev_info->msix_entries.num_entries > 1) {
176 for (i = 0; i < hw_data->num_banks; i++) {
177 irq_set_affinity_hint(msixe[i].vector, NULL);
178 free_irq(msixe[i].vector, &etr_data->banks[i]);
179 }
180 }
181 irq_set_affinity_hint(msixe[i].vector, NULL);
182 free_irq(msixe[i].vector, accel_dev);
183 }
184
adf_isr_alloc_msix_entry_table(struct adf_accel_dev * accel_dev)185 static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
186 {
187 int i;
188 char **names;
189 struct msix_entry *entries;
190 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
191 u32 msix_num_entries = 1;
192
193 /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
194 if (!accel_dev->pf.vf_info)
195 msix_num_entries += hw_data->num_banks;
196
197 entries = kcalloc_node(msix_num_entries, sizeof(*entries),
198 GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
199 if (!entries)
200 return -ENOMEM;
201
202 names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL);
203 if (!names) {
204 kfree(entries);
205 return -ENOMEM;
206 }
207 for (i = 0; i < msix_num_entries; i++) {
208 *(names + i) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
209 if (!(*(names + i)))
210 goto err;
211 }
212 accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
213 accel_dev->accel_pci_dev.msix_entries.entries = entries;
214 accel_dev->accel_pci_dev.msix_entries.names = names;
215 return 0;
216 err:
217 for (i = 0; i < msix_num_entries; i++)
218 kfree(*(names + i));
219 kfree(entries);
220 kfree(names);
221 return -ENOMEM;
222 }
223
adf_isr_free_msix_entry_table(struct adf_accel_dev * accel_dev)224 static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
225 {
226 char **names = accel_dev->accel_pci_dev.msix_entries.names;
227 int i;
228
229 kfree(accel_dev->accel_pci_dev.msix_entries.entries);
230 for (i = 0; i < accel_dev->accel_pci_dev.msix_entries.num_entries; i++)
231 kfree(*(names + i));
232 kfree(names);
233 }
234
adf_setup_bh(struct adf_accel_dev * accel_dev)235 static int adf_setup_bh(struct adf_accel_dev *accel_dev)
236 {
237 struct adf_etr_data *priv_data = accel_dev->transport;
238 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
239 int i;
240
241 for (i = 0; i < hw_data->num_banks; i++)
242 tasklet_init(&priv_data->banks[i].resp_handler,
243 adf_response_handler,
244 (unsigned long)&priv_data->banks[i]);
245 return 0;
246 }
247
adf_cleanup_bh(struct adf_accel_dev * accel_dev)248 static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
249 {
250 struct adf_etr_data *priv_data = accel_dev->transport;
251 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
252 int i;
253
254 for (i = 0; i < hw_data->num_banks; i++) {
255 tasklet_disable(&priv_data->banks[i].resp_handler);
256 tasklet_kill(&priv_data->banks[i].resp_handler);
257 }
258 }
259
260 /**
261 * adf_isr_resource_free() - Free IRQ for acceleration device
262 * @accel_dev: Pointer to acceleration device.
263 *
264 * Function frees interrupts for acceleration device.
265 */
adf_isr_resource_free(struct adf_accel_dev * accel_dev)266 void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
267 {
268 adf_free_irqs(accel_dev);
269 adf_cleanup_bh(accel_dev);
270 adf_disable_msix(&accel_dev->accel_pci_dev);
271 adf_isr_free_msix_entry_table(accel_dev);
272 }
273 EXPORT_SYMBOL_GPL(adf_isr_resource_free);
274
275 /**
276 * adf_isr_resource_alloc() - Allocate IRQ for acceleration device
277 * @accel_dev: Pointer to acceleration device.
278 *
279 * Function allocates interrupts for acceleration device.
280 *
281 * Return: 0 on success, error code otherwise.
282 */
adf_isr_resource_alloc(struct adf_accel_dev * accel_dev)283 int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
284 {
285 int ret;
286
287 ret = adf_isr_alloc_msix_entry_table(accel_dev);
288 if (ret)
289 return ret;
290 if (adf_enable_msix(accel_dev))
291 goto err_out;
292
293 if (adf_setup_bh(accel_dev))
294 goto err_out;
295
296 if (adf_request_irqs(accel_dev))
297 goto err_out;
298
299 return 0;
300 err_out:
301 adf_isr_resource_free(accel_dev);
302 return -EFAULT;
303 }
304 EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
305