1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/aer.h>
3 #include <linux/delay.h>
4 #include <linux/firmware.h>
5 #include <linux/list.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/pci.h>
9 #include <linux/pci_ids.h>
10
11 #include "nitrox_dev.h"
12 #include "nitrox_common.h"
13 #include "nitrox_csr.h"
14 #include "nitrox_hal.h"
15 #include "nitrox_isr.h"
16 #include "nitrox_debugfs.h"
17
18 #define CNN55XX_DEV_ID 0x12
19 #define UCODE_HLEN 48
20 #define DEFAULT_SE_GROUP 0
21 #define DEFAULT_AE_GROUP 0
22
23 #define DRIVER_VERSION "1.2"
24 #define CNN55XX_UCD_BLOCK_SIZE 32768
25 #define CNN55XX_MAX_UCODE_SIZE (CNN55XX_UCD_BLOCK_SIZE * 2)
26 #define FW_DIR "cavium/"
27 /* SE microcode */
28 #define SE_FW FW_DIR "cnn55xx_se.fw"
29 /* AE microcode */
30 #define AE_FW FW_DIR "cnn55xx_ae.fw"
31
32 static const char nitrox_driver_name[] = "CNN55XX";
33
34 static LIST_HEAD(ndevlist);
35 static DEFINE_MUTEX(devlist_lock);
36 static unsigned int num_devices;
37
38 /**
39 * nitrox_pci_tbl - PCI Device ID Table
40 */
41 static const struct pci_device_id nitrox_pci_tbl[] = {
42 {PCI_VDEVICE(CAVIUM, CNN55XX_DEV_ID), 0},
43 /* required last entry */
44 {0, }
45 };
46 MODULE_DEVICE_TABLE(pci, nitrox_pci_tbl);
47
48 static unsigned int qlen = DEFAULT_CMD_QLEN;
49 module_param(qlen, uint, 0644);
50 MODULE_PARM_DESC(qlen, "Command queue length - default 2048");
51
52 #ifdef CONFIG_PCI_IOV
53 int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs);
54 #else
nitrox_sriov_configure(struct pci_dev * pdev,int num_vfs)55 int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs)
56 {
57 return 0;
58 }
59 #endif
60
61 /**
62 * struct ucode - Firmware Header
63 * @id: microcode ID
64 * @version: firmware version
65 * @code_size: code section size
66 * @raz: alignment
67 * @code: code section
68 */
69 struct ucode {
70 u8 id;
71 char version[VERSION_LEN - 1];
72 __be32 code_size;
73 u8 raz[12];
74 u64 code[];
75 };
76
77 /**
78 * write_to_ucd_unit - Write Firmware to NITROX UCD unit
79 */
write_to_ucd_unit(struct nitrox_device * ndev,u32 ucode_size,u64 * ucode_data,int block_num)80 static void write_to_ucd_unit(struct nitrox_device *ndev, u32 ucode_size,
81 u64 *ucode_data, int block_num)
82 {
83 u32 code_size;
84 u64 offset, data;
85 int i = 0;
86
87 /*
88 * UCD structure
89 *
90 * -------------
91 * | BLK 7 |
92 * -------------
93 * | BLK 6 |
94 * -------------
95 * | ... |
96 * -------------
97 * | BLK 0 |
98 * -------------
99 * Total of 8 blocks, each size 32KB
100 */
101
102 /* set the block number */
103 offset = UCD_UCODE_LOAD_BLOCK_NUM;
104 nitrox_write_csr(ndev, offset, block_num);
105
106 code_size = roundup(ucode_size, 16);
107 while (code_size) {
108 data = ucode_data[i];
109 /* write 8 bytes at a time */
110 offset = UCD_UCODE_LOAD_IDX_DATAX(i);
111 nitrox_write_csr(ndev, offset, data);
112 code_size -= 8;
113 i++;
114 }
115
116 usleep_range(300, 400);
117 }
118
nitrox_load_fw(struct nitrox_device * ndev)119 static int nitrox_load_fw(struct nitrox_device *ndev)
120 {
121 const struct firmware *fw;
122 const char *fw_name;
123 struct ucode *ucode;
124 u64 *ucode_data;
125 u64 offset;
126 union ucd_core_eid_ucode_block_num core_2_eid_val;
127 union aqm_grp_execmsk_lo aqm_grp_execmask_lo;
128 union aqm_grp_execmsk_hi aqm_grp_execmask_hi;
129 u32 ucode_size;
130 int ret, i = 0;
131
132 fw_name = SE_FW;
133 dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name);
134
135 ret = request_firmware(&fw, fw_name, DEV(ndev));
136 if (ret < 0) {
137 dev_err(DEV(ndev), "failed to get firmware %s\n", fw_name);
138 return ret;
139 }
140
141 ucode = (struct ucode *)fw->data;
142
143 ucode_size = be32_to_cpu(ucode->code_size) * 2;
144 if (!ucode_size || ucode_size > CNN55XX_MAX_UCODE_SIZE) {
145 dev_err(DEV(ndev), "Invalid ucode size: %u for firmware %s\n",
146 ucode_size, fw_name);
147 release_firmware(fw);
148 return -EINVAL;
149 }
150 ucode_data = ucode->code;
151
152 /* copy the firmware version */
153 memcpy(&ndev->hw.fw_name[0][0], ucode->version, (VERSION_LEN - 2));
154 ndev->hw.fw_name[0][VERSION_LEN - 1] = '\0';
155
156 /* Load SE Firmware on UCD Block 0 */
157 write_to_ucd_unit(ndev, ucode_size, ucode_data, 0);
158
159 release_firmware(fw);
160
161 /* put all SE cores in DEFAULT_SE_GROUP */
162 offset = POM_GRP_EXECMASKX(DEFAULT_SE_GROUP);
163 nitrox_write_csr(ndev, offset, (~0ULL));
164
165 /* write block number and firmware length
166 * bit:<2:0> block number
167 * bit:3 is set SE uses 32KB microcode
168 * bit:3 is clear SE uses 64KB microcode
169 */
170 core_2_eid_val.value = 0ULL;
171 core_2_eid_val.ucode_blk = 0;
172 if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE)
173 core_2_eid_val.ucode_len = 1;
174 else
175 core_2_eid_val.ucode_len = 0;
176
177 for (i = 0; i < ndev->hw.se_cores; i++) {
178 offset = UCD_SE_EID_UCODE_BLOCK_NUMX(i);
179 nitrox_write_csr(ndev, offset, core_2_eid_val.value);
180 }
181
182
183 fw_name = AE_FW;
184 dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name);
185
186 ret = request_firmware(&fw, fw_name, DEV(ndev));
187 if (ret < 0) {
188 dev_err(DEV(ndev), "failed to get firmware %s\n", fw_name);
189 return ret;
190 }
191
192 ucode = (struct ucode *)fw->data;
193
194 ucode_size = be32_to_cpu(ucode->code_size) * 2;
195 if (!ucode_size || ucode_size > CNN55XX_MAX_UCODE_SIZE) {
196 dev_err(DEV(ndev), "Invalid ucode size: %u for firmware %s\n",
197 ucode_size, fw_name);
198 release_firmware(fw);
199 return -EINVAL;
200 }
201 ucode_data = ucode->code;
202
203 /* copy the firmware version */
204 memcpy(&ndev->hw.fw_name[1][0], ucode->version, (VERSION_LEN - 2));
205 ndev->hw.fw_name[1][VERSION_LEN - 1] = '\0';
206
207 /* Load AE Firmware on UCD Block 2 */
208 write_to_ucd_unit(ndev, ucode_size, ucode_data, 2);
209
210 release_firmware(fw);
211
212 /* put all AE cores in DEFAULT_AE_GROUP */
213 offset = AQM_GRP_EXECMSK_LOX(DEFAULT_AE_GROUP);
214 aqm_grp_execmask_lo.exec_0_to_39 = 0xFFFFFFFFFFULL;
215 nitrox_write_csr(ndev, offset, aqm_grp_execmask_lo.value);
216 offset = AQM_GRP_EXECMSK_HIX(DEFAULT_AE_GROUP);
217 aqm_grp_execmask_hi.exec_40_to_79 = 0xFFFFFFFFFFULL;
218 nitrox_write_csr(ndev, offset, aqm_grp_execmask_hi.value);
219
220 /* write block number and firmware length
221 * bit:<2:0> block number
222 * bit:3 is set AE uses 32KB microcode
223 * bit:3 is clear AE uses 64KB microcode
224 */
225 core_2_eid_val.value = 0ULL;
226 core_2_eid_val.ucode_blk = 2;
227 if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE)
228 core_2_eid_val.ucode_len = 1;
229 else
230 core_2_eid_val.ucode_len = 0;
231
232 for (i = 0; i < ndev->hw.ae_cores; i++) {
233 offset = UCD_AE_EID_UCODE_BLOCK_NUMX(i);
234 nitrox_write_csr(ndev, offset, core_2_eid_val.value);
235 }
236
237 return 0;
238 }
239
240 /**
241 * nitrox_add_to_devlist - add NITROX device to global device list
242 * @ndev: NITROX device
243 */
nitrox_add_to_devlist(struct nitrox_device * ndev)244 static int nitrox_add_to_devlist(struct nitrox_device *ndev)
245 {
246 struct nitrox_device *dev;
247 int ret = 0;
248
249 INIT_LIST_HEAD(&ndev->list);
250 refcount_set(&ndev->refcnt, 1);
251
252 mutex_lock(&devlist_lock);
253 list_for_each_entry(dev, &ndevlist, list) {
254 if (dev == ndev) {
255 ret = -EEXIST;
256 goto unlock;
257 }
258 }
259 ndev->idx = num_devices++;
260 list_add_tail(&ndev->list, &ndevlist);
261 unlock:
262 mutex_unlock(&devlist_lock);
263 return ret;
264 }
265
266 /**
267 * nitrox_remove_from_devlist - remove NITROX device from
268 * global device list
269 * @ndev: NITROX device
270 */
nitrox_remove_from_devlist(struct nitrox_device * ndev)271 static void nitrox_remove_from_devlist(struct nitrox_device *ndev)
272 {
273 mutex_lock(&devlist_lock);
274 list_del(&ndev->list);
275 num_devices--;
276 mutex_unlock(&devlist_lock);
277 }
278
nitrox_get_first_device(void)279 struct nitrox_device *nitrox_get_first_device(void)
280 {
281 struct nitrox_device *ndev;
282
283 mutex_lock(&devlist_lock);
284 list_for_each_entry(ndev, &ndevlist, list) {
285 if (nitrox_ready(ndev))
286 break;
287 }
288 mutex_unlock(&devlist_lock);
289 if (&ndev->list == &ndevlist)
290 return NULL;
291
292 refcount_inc(&ndev->refcnt);
293 /* barrier to sync with other cpus */
294 smp_mb__after_atomic();
295 return ndev;
296 }
297
nitrox_put_device(struct nitrox_device * ndev)298 void nitrox_put_device(struct nitrox_device *ndev)
299 {
300 if (!ndev)
301 return;
302
303 refcount_dec(&ndev->refcnt);
304 /* barrier to sync with other cpus */
305 smp_mb__after_atomic();
306 }
307
nitrox_device_flr(struct pci_dev * pdev)308 static int nitrox_device_flr(struct pci_dev *pdev)
309 {
310 int pos = 0;
311
312 pos = pci_save_state(pdev);
313 if (pos) {
314 dev_err(&pdev->dev, "Failed to save pci state\n");
315 return -ENOMEM;
316 }
317
318 /* check flr support */
319 if (pcie_has_flr(pdev))
320 pcie_flr(pdev);
321
322 pci_restore_state(pdev);
323
324 return 0;
325 }
326
nitrox_pf_sw_init(struct nitrox_device * ndev)327 static int nitrox_pf_sw_init(struct nitrox_device *ndev)
328 {
329 int err;
330
331 err = nitrox_common_sw_init(ndev);
332 if (err)
333 return err;
334
335 err = nitrox_register_interrupts(ndev);
336 if (err)
337 nitrox_common_sw_cleanup(ndev);
338
339 return err;
340 }
341
nitrox_pf_sw_cleanup(struct nitrox_device * ndev)342 static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev)
343 {
344 nitrox_unregister_interrupts(ndev);
345 nitrox_common_sw_cleanup(ndev);
346 }
347
348 /**
349 * nitrox_bist_check - Check NITROX BIST registers status
350 * @ndev: NITROX device
351 */
nitrox_bist_check(struct nitrox_device * ndev)352 static int nitrox_bist_check(struct nitrox_device *ndev)
353 {
354 u64 value = 0;
355 int i;
356
357 for (i = 0; i < NR_CLUSTERS; i++) {
358 value += nitrox_read_csr(ndev, EMU_BIST_STATUSX(i));
359 value += nitrox_read_csr(ndev, EFL_CORE_BIST_REGX(i));
360 }
361 value += nitrox_read_csr(ndev, UCD_BIST_STATUS);
362 value += nitrox_read_csr(ndev, NPS_CORE_BIST_REG);
363 value += nitrox_read_csr(ndev, NPS_CORE_NPC_BIST_REG);
364 value += nitrox_read_csr(ndev, NPS_PKT_SLC_BIST_REG);
365 value += nitrox_read_csr(ndev, NPS_PKT_IN_BIST_REG);
366 value += nitrox_read_csr(ndev, POM_BIST_REG);
367 value += nitrox_read_csr(ndev, BMI_BIST_REG);
368 value += nitrox_read_csr(ndev, EFL_TOP_BIST_STAT);
369 value += nitrox_read_csr(ndev, BMO_BIST_REG);
370 value += nitrox_read_csr(ndev, LBC_BIST_STATUS);
371 value += nitrox_read_csr(ndev, PEM_BIST_STATUSX(0));
372 if (value)
373 return -EIO;
374 return 0;
375 }
376
nitrox_pf_hw_init(struct nitrox_device * ndev)377 static int nitrox_pf_hw_init(struct nitrox_device *ndev)
378 {
379 int err;
380
381 err = nitrox_bist_check(ndev);
382 if (err) {
383 dev_err(&ndev->pdev->dev, "BIST check failed\n");
384 return err;
385 }
386 /* get cores information */
387 nitrox_get_hwinfo(ndev);
388
389 nitrox_config_nps_core_unit(ndev);
390 nitrox_config_aqm_unit(ndev);
391 nitrox_config_nps_pkt_unit(ndev);
392 nitrox_config_pom_unit(ndev);
393 nitrox_config_efl_unit(ndev);
394 /* configure IO units */
395 nitrox_config_bmi_unit(ndev);
396 nitrox_config_bmo_unit(ndev);
397 /* configure Local Buffer Cache */
398 nitrox_config_lbc_unit(ndev);
399 nitrox_config_rand_unit(ndev);
400
401 /* load firmware on cores */
402 err = nitrox_load_fw(ndev);
403 if (err)
404 return err;
405
406 nitrox_config_emu_unit(ndev);
407
408 return 0;
409 }
410
411 /**
412 * nitrox_probe - NITROX Initialization function.
413 * @pdev: PCI device information struct
414 * @id: entry in nitrox_pci_tbl
415 *
416 * Return: 0, if the driver is bound to the device, or
417 * a negative error if there is failure.
418 */
nitrox_probe(struct pci_dev * pdev,const struct pci_device_id * id)419 static int nitrox_probe(struct pci_dev *pdev,
420 const struct pci_device_id *id)
421 {
422 struct nitrox_device *ndev;
423 int err;
424
425 dev_info_once(&pdev->dev, "%s driver version %s\n",
426 nitrox_driver_name, DRIVER_VERSION);
427
428 err = pci_enable_device_mem(pdev);
429 if (err)
430 return err;
431
432 /* do FLR */
433 err = nitrox_device_flr(pdev);
434 if (err) {
435 dev_err(&pdev->dev, "FLR failed\n");
436 pci_disable_device(pdev);
437 return err;
438 }
439
440 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
441 dev_dbg(&pdev->dev, "DMA to 64-BIT address\n");
442 } else {
443 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
444 if (err) {
445 dev_err(&pdev->dev, "DMA configuration failed\n");
446 pci_disable_device(pdev);
447 return err;
448 }
449 }
450
451 err = pci_request_mem_regions(pdev, nitrox_driver_name);
452 if (err) {
453 pci_disable_device(pdev);
454 dev_err(&pdev->dev, "Failed to request mem regions!\n");
455 return err;
456 }
457 pci_set_master(pdev);
458
459 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
460 if (!ndev) {
461 err = -ENOMEM;
462 goto ndev_fail;
463 }
464
465 pci_set_drvdata(pdev, ndev);
466 ndev->pdev = pdev;
467
468 /* add to device list */
469 nitrox_add_to_devlist(ndev);
470
471 ndev->hw.vendor_id = pdev->vendor;
472 ndev->hw.device_id = pdev->device;
473 ndev->hw.revision_id = pdev->revision;
474 /* command timeout in jiffies */
475 ndev->timeout = msecs_to_jiffies(CMD_TIMEOUT);
476 ndev->node = dev_to_node(&pdev->dev);
477 if (ndev->node == NUMA_NO_NODE)
478 ndev->node = 0;
479
480 ndev->bar_addr = ioremap(pci_resource_start(pdev, 0),
481 pci_resource_len(pdev, 0));
482 if (!ndev->bar_addr) {
483 err = -EIO;
484 goto ioremap_err;
485 }
486 /* allocate command queus based on cpus, max queues are 64 */
487 ndev->nr_queues = min_t(u32, MAX_PF_QUEUES, num_online_cpus());
488 ndev->qlen = qlen;
489
490 err = nitrox_pf_sw_init(ndev);
491 if (err)
492 goto ioremap_err;
493
494 err = nitrox_pf_hw_init(ndev);
495 if (err)
496 goto pf_hw_fail;
497
498 nitrox_debugfs_init(ndev);
499
500 /* clear the statistics */
501 atomic64_set(&ndev->stats.posted, 0);
502 atomic64_set(&ndev->stats.completed, 0);
503 atomic64_set(&ndev->stats.dropped, 0);
504
505 atomic_set(&ndev->state, __NDEV_READY);
506 /* barrier to sync with other cpus */
507 smp_mb__after_atomic();
508
509 err = nitrox_crypto_register();
510 if (err)
511 goto crypto_fail;
512
513 return 0;
514
515 crypto_fail:
516 nitrox_debugfs_exit(ndev);
517 atomic_set(&ndev->state, __NDEV_NOT_READY);
518 /* barrier to sync with other cpus */
519 smp_mb__after_atomic();
520 pf_hw_fail:
521 nitrox_pf_sw_cleanup(ndev);
522 ioremap_err:
523 nitrox_remove_from_devlist(ndev);
524 kfree(ndev);
525 pci_set_drvdata(pdev, NULL);
526 ndev_fail:
527 pci_release_mem_regions(pdev);
528 pci_disable_device(pdev);
529 return err;
530 }
531
532 /**
533 * nitrox_remove - Unbind the driver from the device.
534 * @pdev: PCI device information struct
535 */
nitrox_remove(struct pci_dev * pdev)536 static void nitrox_remove(struct pci_dev *pdev)
537 {
538 struct nitrox_device *ndev = pci_get_drvdata(pdev);
539
540 if (!ndev)
541 return;
542
543 if (!refcount_dec_and_test(&ndev->refcnt)) {
544 dev_err(DEV(ndev), "Device refcnt not zero (%d)\n",
545 refcount_read(&ndev->refcnt));
546 return;
547 }
548
549 dev_info(DEV(ndev), "Removing Device %x:%x\n",
550 ndev->hw.vendor_id, ndev->hw.device_id);
551
552 atomic_set(&ndev->state, __NDEV_NOT_READY);
553 /* barrier to sync with other cpus */
554 smp_mb__after_atomic();
555
556 nitrox_remove_from_devlist(ndev);
557
558 #ifdef CONFIG_PCI_IOV
559 /* disable SR-IOV */
560 nitrox_sriov_configure(pdev, 0);
561 #endif
562 nitrox_crypto_unregister();
563 nitrox_debugfs_exit(ndev);
564 nitrox_pf_sw_cleanup(ndev);
565
566 iounmap(ndev->bar_addr);
567 kfree(ndev);
568
569 pci_set_drvdata(pdev, NULL);
570 pci_release_mem_regions(pdev);
571 pci_disable_device(pdev);
572 }
573
nitrox_shutdown(struct pci_dev * pdev)574 static void nitrox_shutdown(struct pci_dev *pdev)
575 {
576 pci_set_drvdata(pdev, NULL);
577 pci_release_mem_regions(pdev);
578 pci_disable_device(pdev);
579 }
580
581 static struct pci_driver nitrox_driver = {
582 .name = nitrox_driver_name,
583 .id_table = nitrox_pci_tbl,
584 .probe = nitrox_probe,
585 .remove = nitrox_remove,
586 .shutdown = nitrox_shutdown,
587 #ifdef CONFIG_PCI_IOV
588 .sriov_configure = nitrox_sriov_configure,
589 #endif
590 };
591
592 module_pci_driver(nitrox_driver);
593
594 MODULE_AUTHOR("Srikanth Jampala <Jampala.Srikanth@cavium.com>");
595 MODULE_DESCRIPTION("Cavium CNN55XX PF Driver" DRIVER_VERSION " ");
596 MODULE_LICENSE("GPL");
597 MODULE_VERSION(DRIVER_VERSION);
598 MODULE_FIRMWARE(SE_FW);
599