1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/kernel.h>
4 #include <linux/init.h>
5 #include <linux/types.h>
6 #include <linux/pci.h>
7 #include <linux/slab.h>
8 #include <linux/errno.h>
9 #include <linux/interrupt.h>
10 #include "adf_accel_devices.h"
11 #include "adf_common_drv.h"
12 #include "adf_cfg.h"
13 #include "adf_cfg_strings.h"
14 #include "adf_cfg_common.h"
15 #include "adf_transport_access_macros.h"
16 #include "adf_transport_internal.h"
17 
18 #define ADF_MAX_NUM_VFS	32
19 #define ADF_ERRSOU3	(0x3A000 + 0x0C)
20 #define ADF_ERRSOU5	(0x3A000 + 0xD8)
21 #define ADF_ERRMSK3	(0x3A000 + 0x1C)
22 #define ADF_ERRMSK5	(0x3A000 + 0xDC)
23 #define ADF_ERR_REG_VF2PF_L(vf_src)	(((vf_src) & 0x01FFFE00) >> 9)
24 #define ADF_ERR_REG_VF2PF_U(vf_src)	(((vf_src) & 0x0000FFFF) << 16)
25 
adf_enable_msix(struct adf_accel_dev * accel_dev)26 static int adf_enable_msix(struct adf_accel_dev *accel_dev)
27 {
28 	struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
29 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
30 	u32 msix_num_entries = 1;
31 
32 	if (hw_data->set_msix_rttable)
33 		hw_data->set_msix_rttable(accel_dev);
34 
35 	/* If SR-IOV is disabled, add entries for each bank */
36 	if (!accel_dev->pf.vf_info) {
37 		int i;
38 
39 		msix_num_entries += hw_data->num_banks;
40 		for (i = 0; i < msix_num_entries; i++)
41 			pci_dev_info->msix_entries.entries[i].entry = i;
42 	} else {
43 		pci_dev_info->msix_entries.entries[0].entry =
44 			hw_data->num_banks;
45 	}
46 
47 	if (pci_enable_msix_exact(pci_dev_info->pci_dev,
48 				  pci_dev_info->msix_entries.entries,
49 				  msix_num_entries)) {
50 		dev_err(&GET_DEV(accel_dev), "Failed to enable MSI-X IRQ(s)\n");
51 		return -EFAULT;
52 	}
53 	return 0;
54 }
55 
adf_disable_msix(struct adf_accel_pci * pci_dev_info)56 static void adf_disable_msix(struct adf_accel_pci *pci_dev_info)
57 {
58 	pci_disable_msix(pci_dev_info->pci_dev);
59 }
60 
adf_msix_isr_bundle(int irq,void * bank_ptr)61 static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
62 {
63 	struct adf_etr_bank_data *bank = bank_ptr;
64 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
65 
66 	csr_ops->write_csr_int_flag_and_col(bank->csr_addr, bank->bank_number,
67 					    0);
68 	tasklet_hi_schedule(&bank->resp_handler);
69 	return IRQ_HANDLED;
70 }
71 
adf_msix_isr_ae(int irq,void * dev_ptr)72 static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
73 {
74 	struct adf_accel_dev *accel_dev = dev_ptr;
75 
76 #ifdef CONFIG_PCI_IOV
77 	/* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
78 	if (accel_dev->pf.vf_info) {
79 		struct adf_hw_device_data *hw_data = accel_dev->hw_device;
80 		struct adf_bar *pmisc =
81 			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
82 		void __iomem *pmisc_addr = pmisc->virt_addr;
83 		u32 errsou3, errsou5, errmsk3, errmsk5;
84 		unsigned long vf_mask;
85 
86 		/* Get the interrupt sources triggered by VFs */
87 		errsou3 = ADF_CSR_RD(pmisc_addr, ADF_ERRSOU3);
88 		errsou5 = ADF_CSR_RD(pmisc_addr, ADF_ERRSOU5);
89 		vf_mask = ADF_ERR_REG_VF2PF_L(errsou3);
90 		vf_mask |= ADF_ERR_REG_VF2PF_U(errsou5);
91 
92 		/* To avoid adding duplicate entries to work queue, clear
93 		 * vf_int_mask_sets bits that are already masked in ERRMSK register.
94 		 */
95 		errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_ERRMSK3);
96 		errmsk5 = ADF_CSR_RD(pmisc_addr, ADF_ERRMSK5);
97 		vf_mask &= ~ADF_ERR_REG_VF2PF_L(errmsk3);
98 		vf_mask &= ~ADF_ERR_REG_VF2PF_U(errmsk5);
99 
100 		if (vf_mask) {
101 			struct adf_accel_vf_info *vf_info;
102 			bool irq_handled = false;
103 			int i;
104 
105 			/* Disable VF2PF interrupts for VFs with pending ints */
106 			adf_disable_vf2pf_interrupts_irq(accel_dev, vf_mask);
107 
108 			/*
109 			 * Handle VF2PF interrupt unless the VF is malicious and
110 			 * is attempting to flood the host OS with VF2PF interrupts.
111 			 */
112 			for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) {
113 				vf_info = accel_dev->pf.vf_info + i;
114 
115 				if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
116 					dev_info(&GET_DEV(accel_dev),
117 						 "Too many ints from VF%d\n",
118 						  vf_info->vf_nr + 1);
119 					continue;
120 				}
121 
122 				adf_schedule_vf2pf_handler(vf_info);
123 				irq_handled = true;
124 			}
125 
126 			if (irq_handled)
127 				return IRQ_HANDLED;
128 		}
129 	}
130 #endif /* CONFIG_PCI_IOV */
131 
132 	dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
133 		accel_dev->accel_id);
134 
135 	return IRQ_NONE;
136 }
137 
adf_request_irqs(struct adf_accel_dev * accel_dev)138 static int adf_request_irqs(struct adf_accel_dev *accel_dev)
139 {
140 	struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
141 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
142 	struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
143 	struct adf_etr_data *etr_data = accel_dev->transport;
144 	int ret, i = 0;
145 	char *name;
146 
147 	/* Request msix irq for all banks unless SR-IOV enabled */
148 	if (!accel_dev->pf.vf_info) {
149 		for (i = 0; i < hw_data->num_banks; i++) {
150 			struct adf_etr_bank_data *bank = &etr_data->banks[i];
151 			unsigned int cpu, cpus = num_online_cpus();
152 
153 			name = *(pci_dev_info->msix_entries.names + i);
154 			snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
155 				 "qat%d-bundle%d", accel_dev->accel_id, i);
156 			ret = request_irq(msixe[i].vector,
157 					  adf_msix_isr_bundle, 0, name, bank);
158 			if (ret) {
159 				dev_err(&GET_DEV(accel_dev),
160 					"failed to enable irq %d for %s\n",
161 					msixe[i].vector, name);
162 				return ret;
163 			}
164 
165 			cpu = ((accel_dev->accel_id * hw_data->num_banks) +
166 			       i) % cpus;
167 			irq_set_affinity_hint(msixe[i].vector,
168 					      get_cpu_mask(cpu));
169 		}
170 	}
171 
172 	/* Request msix irq for AE */
173 	name = *(pci_dev_info->msix_entries.names + i);
174 	snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
175 		 "qat%d-ae-cluster", accel_dev->accel_id);
176 	ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev);
177 	if (ret) {
178 		dev_err(&GET_DEV(accel_dev),
179 			"failed to enable irq %d, for %s\n",
180 			msixe[i].vector, name);
181 		return ret;
182 	}
183 	return ret;
184 }
185 
adf_free_irqs(struct adf_accel_dev * accel_dev)186 static void adf_free_irqs(struct adf_accel_dev *accel_dev)
187 {
188 	struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
189 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
190 	struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
191 	struct adf_etr_data *etr_data = accel_dev->transport;
192 	int i = 0;
193 
194 	if (pci_dev_info->msix_entries.num_entries > 1) {
195 		for (i = 0; i < hw_data->num_banks; i++) {
196 			irq_set_affinity_hint(msixe[i].vector, NULL);
197 			free_irq(msixe[i].vector, &etr_data->banks[i]);
198 		}
199 	}
200 	irq_set_affinity_hint(msixe[i].vector, NULL);
201 	free_irq(msixe[i].vector, accel_dev);
202 }
203 
adf_isr_alloc_msix_entry_table(struct adf_accel_dev * accel_dev)204 static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
205 {
206 	int i;
207 	char **names;
208 	struct msix_entry *entries;
209 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
210 	u32 msix_num_entries = 1;
211 
212 	/* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
213 	if (!accel_dev->pf.vf_info)
214 		msix_num_entries += hw_data->num_banks;
215 
216 	entries = kcalloc_node(msix_num_entries, sizeof(*entries),
217 			       GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
218 	if (!entries)
219 		return -ENOMEM;
220 
221 	names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL);
222 	if (!names) {
223 		kfree(entries);
224 		return -ENOMEM;
225 	}
226 	for (i = 0; i < msix_num_entries; i++) {
227 		*(names + i) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
228 		if (!(*(names + i)))
229 			goto err;
230 	}
231 	accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
232 	accel_dev->accel_pci_dev.msix_entries.entries = entries;
233 	accel_dev->accel_pci_dev.msix_entries.names = names;
234 	return 0;
235 err:
236 	for (i = 0; i < msix_num_entries; i++)
237 		kfree(*(names + i));
238 	kfree(entries);
239 	kfree(names);
240 	return -ENOMEM;
241 }
242 
adf_isr_free_msix_entry_table(struct adf_accel_dev * accel_dev)243 static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
244 {
245 	char **names = accel_dev->accel_pci_dev.msix_entries.names;
246 	int i;
247 
248 	kfree(accel_dev->accel_pci_dev.msix_entries.entries);
249 	for (i = 0; i < accel_dev->accel_pci_dev.msix_entries.num_entries; i++)
250 		kfree(*(names + i));
251 	kfree(names);
252 }
253 
adf_setup_bh(struct adf_accel_dev * accel_dev)254 static int adf_setup_bh(struct adf_accel_dev *accel_dev)
255 {
256 	struct adf_etr_data *priv_data = accel_dev->transport;
257 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
258 	int i;
259 
260 	for (i = 0; i < hw_data->num_banks; i++)
261 		tasklet_init(&priv_data->banks[i].resp_handler,
262 			     adf_response_handler,
263 			     (unsigned long)&priv_data->banks[i]);
264 	return 0;
265 }
266 
adf_cleanup_bh(struct adf_accel_dev * accel_dev)267 static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
268 {
269 	struct adf_etr_data *priv_data = accel_dev->transport;
270 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
271 	int i;
272 
273 	for (i = 0; i < hw_data->num_banks; i++) {
274 		tasklet_disable(&priv_data->banks[i].resp_handler);
275 		tasklet_kill(&priv_data->banks[i].resp_handler);
276 	}
277 }
278 
279 /**
280  * adf_isr_resource_free() - Free IRQ for acceleration device
281  * @accel_dev:  Pointer to acceleration device.
282  *
283  * Function frees interrupts for acceleration device.
284  */
adf_isr_resource_free(struct adf_accel_dev * accel_dev)285 void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
286 {
287 	adf_free_irqs(accel_dev);
288 	adf_cleanup_bh(accel_dev);
289 	adf_disable_msix(&accel_dev->accel_pci_dev);
290 	adf_isr_free_msix_entry_table(accel_dev);
291 }
292 EXPORT_SYMBOL_GPL(adf_isr_resource_free);
293 
294 /**
295  * adf_isr_resource_alloc() - Allocate IRQ for acceleration device
296  * @accel_dev:  Pointer to acceleration device.
297  *
298  * Function allocates interrupts for acceleration device.
299  *
300  * Return: 0 on success, error code otherwise.
301  */
adf_isr_resource_alloc(struct adf_accel_dev * accel_dev)302 int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
303 {
304 	int ret;
305 
306 	ret = adf_isr_alloc_msix_entry_table(accel_dev);
307 	if (ret)
308 		goto err_out;
309 
310 	ret = adf_enable_msix(accel_dev);
311 	if (ret)
312 		goto err_free_msix_table;
313 
314 	ret = adf_setup_bh(accel_dev);
315 	if (ret)
316 		goto err_disable_msix;
317 
318 	ret = adf_request_irqs(accel_dev);
319 	if (ret)
320 		goto err_cleanup_bh;
321 
322 	return 0;
323 
324 err_cleanup_bh:
325 	adf_cleanup_bh(accel_dev);
326 
327 err_disable_msix:
328 	adf_disable_msix(&accel_dev->accel_pci_dev);
329 
330 err_free_msix_table:
331 	adf_isr_free_msix_entry_table(accel_dev);
332 
333 err_out:
334 	return ret;
335 }
336 EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
337