1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2020 Intel Corporation */
3 #include <linux/device.h>
4 #include <linux/module.h>
5 #include <linux/pci.h>
6
7 #include <adf_accel_devices.h>
8 #include <adf_cfg.h>
9 #include <adf_common_drv.h>
10 #include <adf_dbgfs.h>
11 #include <adf_heartbeat.h>
12
13 #include "adf_4xxx_hw_data.h"
14 #include "qat_compression.h"
15 #include "qat_crypto.h"
16 #include "adf_transport_access_macros.h"
17
18 static const struct pci_device_id adf_pci_tbl[] = {
19 { PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), },
20 { PCI_VDEVICE(INTEL, ADF_401XX_PCI_DEVICE_ID), },
21 { PCI_VDEVICE(INTEL, ADF_402XX_PCI_DEVICE_ID), },
22 { }
23 };
24 MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
25
26 enum configs {
27 DEV_CFG_CY = 0,
28 DEV_CFG_DC,
29 DEV_CFG_SYM,
30 DEV_CFG_ASYM,
31 DEV_CFG_ASYM_SYM,
32 DEV_CFG_ASYM_DC,
33 DEV_CFG_DC_ASYM,
34 DEV_CFG_SYM_DC,
35 DEV_CFG_DC_SYM,
36 };
37
38 static const char * const services_operations[] = {
39 ADF_CFG_CY,
40 ADF_CFG_DC,
41 ADF_CFG_SYM,
42 ADF_CFG_ASYM,
43 ADF_CFG_ASYM_SYM,
44 ADF_CFG_ASYM_DC,
45 ADF_CFG_DC_ASYM,
46 ADF_CFG_SYM_DC,
47 ADF_CFG_DC_SYM,
48 };
49
adf_cleanup_accel(struct adf_accel_dev * accel_dev)50 static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
51 {
52 if (accel_dev->hw_device) {
53 adf_clean_hw_data_4xxx(accel_dev->hw_device);
54 accel_dev->hw_device = NULL;
55 }
56 adf_dbgfs_exit(accel_dev);
57 adf_cfg_dev_remove(accel_dev);
58 adf_devmgr_rm_dev(accel_dev, NULL);
59 }
60
adf_cfg_dev_init(struct adf_accel_dev * accel_dev)61 static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev)
62 {
63 const char *config;
64 int ret;
65
66 config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY;
67
68 ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
69 if (ret)
70 return ret;
71
72 /* Default configuration is crypto only for even devices
73 * and compression for odd devices
74 */
75 ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
76 ADF_SERVICES_ENABLED, config,
77 ADF_STR);
78 if (ret)
79 return ret;
80
81 adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_MIN_MS);
82
83 return 0;
84 }
85
adf_crypto_dev_config(struct adf_accel_dev * accel_dev)86 static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
87 {
88 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
89 int banks = GET_MAX_BANKS(accel_dev);
90 int cpus = num_online_cpus();
91 unsigned long bank, val;
92 int instances;
93 int ret;
94 int i;
95
96 if (adf_hw_dev_has_crypto(accel_dev))
97 instances = min(cpus, banks / 2);
98 else
99 instances = 0;
100
101 for (i = 0; i < instances; i++) {
102 val = i;
103 bank = i * 2;
104 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
105 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
106 key, &bank, ADF_DEC);
107 if (ret)
108 goto err;
109
110 bank += 1;
111 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
112 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
113 key, &bank, ADF_DEC);
114 if (ret)
115 goto err;
116
117 snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
118 i);
119 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
120 key, &val, ADF_DEC);
121 if (ret)
122 goto err;
123
124 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
125 val = 128;
126 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
127 key, &val, ADF_DEC);
128 if (ret)
129 goto err;
130
131 val = 512;
132 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
133 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
134 key, &val, ADF_DEC);
135 if (ret)
136 goto err;
137
138 val = 0;
139 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
140 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
141 key, &val, ADF_DEC);
142 if (ret)
143 goto err;
144
145 val = 0;
146 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
147 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
148 key, &val, ADF_DEC);
149 if (ret)
150 goto err;
151
152 val = 1;
153 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
154 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
155 key, &val, ADF_DEC);
156 if (ret)
157 goto err;
158
159 val = 1;
160 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
161 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
162 key, &val, ADF_DEC);
163 if (ret)
164 goto err;
165
166 val = ADF_COALESCING_DEF_TIME;
167 snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
168 ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
169 key, &val, ADF_DEC);
170 if (ret)
171 goto err;
172 }
173
174 val = i;
175 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
176 &val, ADF_DEC);
177 if (ret)
178 goto err;
179
180 val = 0;
181 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
182 &val, ADF_DEC);
183 if (ret)
184 goto err;
185
186 return 0;
187 err:
188 dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n");
189 return ret;
190 }
191
adf_comp_dev_config(struct adf_accel_dev * accel_dev)192 static int adf_comp_dev_config(struct adf_accel_dev *accel_dev)
193 {
194 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
195 int banks = GET_MAX_BANKS(accel_dev);
196 int cpus = num_online_cpus();
197 unsigned long val;
198 int instances;
199 int ret;
200 int i;
201
202 if (adf_hw_dev_has_compression(accel_dev))
203 instances = min(cpus, banks);
204 else
205 instances = 0;
206
207 for (i = 0; i < instances; i++) {
208 val = i;
209 snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
210 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
211 key, &val, ADF_DEC);
212 if (ret)
213 goto err;
214
215 val = 512;
216 snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
217 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
218 key, &val, ADF_DEC);
219 if (ret)
220 goto err;
221
222 val = 0;
223 snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
224 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
225 key, &val, ADF_DEC);
226 if (ret)
227 goto err;
228
229 val = 1;
230 snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
231 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
232 key, &val, ADF_DEC);
233 if (ret)
234 goto err;
235
236 val = ADF_COALESCING_DEF_TIME;
237 snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
238 ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
239 key, &val, ADF_DEC);
240 if (ret)
241 goto err;
242 }
243
244 val = i;
245 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
246 &val, ADF_DEC);
247 if (ret)
248 goto err;
249
250 val = 0;
251 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
252 &val, ADF_DEC);
253 if (ret)
254 goto err;
255
256 return 0;
257 err:
258 dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n");
259 return ret;
260 }
261
adf_no_dev_config(struct adf_accel_dev * accel_dev)262 static int adf_no_dev_config(struct adf_accel_dev *accel_dev)
263 {
264 unsigned long val;
265 int ret;
266
267 val = 0;
268 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
269 &val, ADF_DEC);
270 if (ret)
271 return ret;
272
273 return adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
274 &val, ADF_DEC);
275 }
276
adf_gen4_dev_config(struct adf_accel_dev * accel_dev)277 int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
278 {
279 char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
280 int ret;
281
282 ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
283 if (ret)
284 goto err;
285
286 ret = adf_cfg_section_add(accel_dev, "Accelerator0");
287 if (ret)
288 goto err;
289
290 ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
291 ADF_SERVICES_ENABLED, services);
292 if (ret)
293 goto err;
294
295 ret = sysfs_match_string(services_operations, services);
296 if (ret < 0)
297 goto err;
298
299 switch (ret) {
300 case DEV_CFG_CY:
301 case DEV_CFG_ASYM_SYM:
302 ret = adf_crypto_dev_config(accel_dev);
303 break;
304 case DEV_CFG_DC:
305 ret = adf_comp_dev_config(accel_dev);
306 break;
307 default:
308 ret = adf_no_dev_config(accel_dev);
309 break;
310 }
311
312 if (ret)
313 goto err;
314
315 set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
316
317 return ret;
318
319 err:
320 dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n");
321 return ret;
322 }
323
adf_probe(struct pci_dev * pdev,const struct pci_device_id * ent)324 static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
325 {
326 struct adf_accel_dev *accel_dev;
327 struct adf_accel_pci *accel_pci_dev;
328 struct adf_hw_device_data *hw_data;
329 unsigned int i, bar_nr;
330 unsigned long bar_mask;
331 struct adf_bar *bar;
332 int ret;
333
334 if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
335 /*
336 * If the accelerator is connected to a node with no memory
337 * there is no point in using the accelerator since the remote
338 * memory transaction will be very slow.
339 */
340 dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
341 return -EINVAL;
342 }
343
344 accel_dev = devm_kzalloc(&pdev->dev, sizeof(*accel_dev), GFP_KERNEL);
345 if (!accel_dev)
346 return -ENOMEM;
347
348 INIT_LIST_HEAD(&accel_dev->crypto_list);
349 accel_pci_dev = &accel_dev->accel_pci_dev;
350 accel_pci_dev->pci_dev = pdev;
351
352 /*
353 * Add accel device to accel table
354 * This should be called before adf_cleanup_accel is called
355 */
356 if (adf_devmgr_add_dev(accel_dev, NULL)) {
357 dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
358 return -EFAULT;
359 }
360
361 accel_dev->owner = THIS_MODULE;
362 /* Allocate and initialise device hardware meta-data structure */
363 hw_data = devm_kzalloc(&pdev->dev, sizeof(*hw_data), GFP_KERNEL);
364 if (!hw_data) {
365 ret = -ENOMEM;
366 goto out_err;
367 }
368
369 accel_dev->hw_device = hw_data;
370 adf_init_hw_data_4xxx(accel_dev->hw_device, ent->device);
371
372 pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
373 pci_read_config_dword(pdev, ADF_4XXX_FUSECTL4_OFFSET, &hw_data->fuses);
374
375 /* Get Accelerators and Accelerators Engines masks */
376 hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
377 hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
378 accel_pci_dev->sku = hw_data->get_sku(hw_data);
379 /* If the device has no acceleration engines then ignore it */
380 if (!hw_data->accel_mask || !hw_data->ae_mask ||
381 (~hw_data->ae_mask & 0x01)) {
382 dev_err(&pdev->dev, "No acceleration units found.\n");
383 ret = -EFAULT;
384 goto out_err;
385 }
386
387 /* Create device configuration table */
388 ret = adf_cfg_dev_add(accel_dev);
389 if (ret)
390 goto out_err;
391
392 /* Enable PCI device */
393 ret = pcim_enable_device(pdev);
394 if (ret) {
395 dev_err(&pdev->dev, "Can't enable PCI device.\n");
396 goto out_err;
397 }
398
399 /* Set DMA identifier */
400 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
401 if (ret) {
402 dev_err(&pdev->dev, "No usable DMA configuration.\n");
403 goto out_err;
404 }
405
406 ret = adf_cfg_dev_init(accel_dev);
407 if (ret) {
408 dev_err(&pdev->dev, "Failed to initialize configuration.\n");
409 goto out_err;
410 }
411
412 /* Get accelerator capabilities mask */
413 hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
414 if (!hw_data->accel_capabilities_mask) {
415 dev_err(&pdev->dev, "Failed to get capabilities mask.\n");
416 ret = -EINVAL;
417 goto out_err;
418 }
419
420 /* Find and map all the device's BARS */
421 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_4XXX_BAR_MASK;
422
423 ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev));
424 if (ret) {
425 dev_err(&pdev->dev, "Failed to map pci regions.\n");
426 goto out_err;
427 }
428
429 i = 0;
430 for_each_set_bit(bar_nr, &bar_mask, PCI_STD_NUM_BARS) {
431 bar = &accel_pci_dev->pci_bars[i++];
432 bar->virt_addr = pcim_iomap_table(pdev)[bar_nr];
433 }
434
435 pci_set_master(pdev);
436
437 if (pci_save_state(pdev)) {
438 dev_err(&pdev->dev, "Failed to save pci state.\n");
439 ret = -ENOMEM;
440 goto out_err;
441 }
442
443 adf_dbgfs_init(accel_dev);
444
445 ret = adf_dev_up(accel_dev, true);
446 if (ret)
447 goto out_err_dev_stop;
448
449 ret = adf_sysfs_init(accel_dev);
450 if (ret)
451 goto out_err_dev_stop;
452
453 return ret;
454
455 out_err_dev_stop:
456 adf_dev_down(accel_dev, false);
457 out_err:
458 adf_cleanup_accel(accel_dev);
459 return ret;
460 }
461
adf_remove(struct pci_dev * pdev)462 static void adf_remove(struct pci_dev *pdev)
463 {
464 struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
465
466 if (!accel_dev) {
467 pr_err("QAT: Driver removal failed\n");
468 return;
469 }
470 adf_dev_down(accel_dev, false);
471 adf_cleanup_accel(accel_dev);
472 }
473
474 static struct pci_driver adf_driver = {
475 .id_table = adf_pci_tbl,
476 .name = ADF_4XXX_DEVICE_NAME,
477 .probe = adf_probe,
478 .remove = adf_remove,
479 .sriov_configure = adf_sriov_configure,
480 .err_handler = &adf_err_handler,
481 };
482
483 module_pci_driver(adf_driver);
484
485 MODULE_LICENSE("Dual BSD/GPL");
486 MODULE_AUTHOR("Intel");
487 MODULE_FIRMWARE(ADF_4XXX_FW);
488 MODULE_FIRMWARE(ADF_4XXX_MMP);
489 MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
490 MODULE_VERSION(ADF_DRV_VERSION);
491 MODULE_SOFTDEP("pre: crypto-intel_qat");
492