1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Intel IFC VF NIC driver for virtio dataplane offloading
4 *
5 * Copyright (C) 2020 Intel Corporation.
6 *
7 * Author: Zhu Lingshan <lingshan.zhu@intel.com>
8 *
9 */
10
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/sysfs.h>
15 #include "ifcvf_base.h"
16
17 #define DRIVER_AUTHOR "Intel Corporation"
18 #define IFCVF_DRIVER_NAME "ifcvf"
19
ifcvf_config_changed(int irq,void * arg)20 static irqreturn_t ifcvf_config_changed(int irq, void *arg)
21 {
22 struct ifcvf_hw *vf = arg;
23
24 if (vf->config_cb.callback)
25 return vf->config_cb.callback(vf->config_cb.private);
26
27 return IRQ_HANDLED;
28 }
29
ifcvf_vq_intr_handler(int irq,void * arg)30 static irqreturn_t ifcvf_vq_intr_handler(int irq, void *arg)
31 {
32 struct vring_info *vring = arg;
33
34 if (vring->cb.callback)
35 return vring->cb.callback(vring->cb.private);
36
37 return IRQ_HANDLED;
38 }
39
ifcvf_vqs_reused_intr_handler(int irq,void * arg)40 static irqreturn_t ifcvf_vqs_reused_intr_handler(int irq, void *arg)
41 {
42 struct ifcvf_hw *vf = arg;
43 struct vring_info *vring;
44 int i;
45
46 for (i = 0; i < vf->nr_vring; i++) {
47 vring = &vf->vring[i];
48 if (vring->cb.callback)
49 vring->cb.callback(vring->cb.private);
50 }
51
52 return IRQ_HANDLED;
53 }
54
ifcvf_dev_intr_handler(int irq,void * arg)55 static irqreturn_t ifcvf_dev_intr_handler(int irq, void *arg)
56 {
57 struct ifcvf_hw *vf = arg;
58 u8 isr;
59
60 isr = vp_ioread8(vf->isr);
61 if (isr & VIRTIO_PCI_ISR_CONFIG)
62 ifcvf_config_changed(irq, arg);
63
64 return ifcvf_vqs_reused_intr_handler(irq, arg);
65 }
66
ifcvf_free_irq_vectors(void * data)67 static void ifcvf_free_irq_vectors(void *data)
68 {
69 pci_free_irq_vectors(data);
70 }
71
ifcvf_free_per_vq_irq(struct ifcvf_adapter * adapter)72 static void ifcvf_free_per_vq_irq(struct ifcvf_adapter *adapter)
73 {
74 struct pci_dev *pdev = adapter->pdev;
75 struct ifcvf_hw *vf = &adapter->vf;
76 int i;
77
78 for (i = 0; i < vf->nr_vring; i++) {
79 if (vf->vring[i].irq != -EINVAL) {
80 devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
81 vf->vring[i].irq = -EINVAL;
82 }
83 }
84 }
85
ifcvf_free_vqs_reused_irq(struct ifcvf_adapter * adapter)86 static void ifcvf_free_vqs_reused_irq(struct ifcvf_adapter *adapter)
87 {
88 struct pci_dev *pdev = adapter->pdev;
89 struct ifcvf_hw *vf = &adapter->vf;
90
91 if (vf->vqs_reused_irq != -EINVAL) {
92 devm_free_irq(&pdev->dev, vf->vqs_reused_irq, vf);
93 vf->vqs_reused_irq = -EINVAL;
94 }
95
96 }
97
ifcvf_free_vq_irq(struct ifcvf_adapter * adapter)98 static void ifcvf_free_vq_irq(struct ifcvf_adapter *adapter)
99 {
100 struct ifcvf_hw *vf = &adapter->vf;
101
102 if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
103 ifcvf_free_per_vq_irq(adapter);
104 else
105 ifcvf_free_vqs_reused_irq(adapter);
106 }
107
ifcvf_free_config_irq(struct ifcvf_adapter * adapter)108 static void ifcvf_free_config_irq(struct ifcvf_adapter *adapter)
109 {
110 struct pci_dev *pdev = adapter->pdev;
111 struct ifcvf_hw *vf = &adapter->vf;
112
113 if (vf->config_irq == -EINVAL)
114 return;
115
116 /* If the irq is shared by all vqs and the config interrupt,
117 * it is already freed in ifcvf_free_vq_irq, so here only
118 * need to free config irq when msix_vector_status != MSIX_VECTOR_DEV_SHARED
119 */
120 if (vf->msix_vector_status != MSIX_VECTOR_DEV_SHARED) {
121 devm_free_irq(&pdev->dev, vf->config_irq, vf);
122 vf->config_irq = -EINVAL;
123 }
124 }
125
ifcvf_free_irq(struct ifcvf_adapter * adapter)126 static void ifcvf_free_irq(struct ifcvf_adapter *adapter)
127 {
128 struct pci_dev *pdev = adapter->pdev;
129
130 ifcvf_free_vq_irq(adapter);
131 ifcvf_free_config_irq(adapter);
132 ifcvf_free_irq_vectors(pdev);
133 }
134
135 /* ifcvf MSIX vectors allocator, this helper tries to allocate
136 * vectors for all virtqueues and the config interrupt.
137 * It returns the number of allocated vectors, negative
138 * return value when fails.
139 */
ifcvf_alloc_vectors(struct ifcvf_adapter * adapter)140 static int ifcvf_alloc_vectors(struct ifcvf_adapter *adapter)
141 {
142 struct pci_dev *pdev = adapter->pdev;
143 struct ifcvf_hw *vf = &adapter->vf;
144 int max_intr, ret;
145
146 /* all queues and config interrupt */
147 max_intr = vf->nr_vring + 1;
148 ret = pci_alloc_irq_vectors(pdev, 1, max_intr, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
149
150 if (ret < 0) {
151 IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
152 return ret;
153 }
154
155 if (ret < max_intr)
156 IFCVF_INFO(pdev,
157 "Requested %u vectors, however only %u allocated, lower performance\n",
158 max_intr, ret);
159
160 return ret;
161 }
162
ifcvf_request_per_vq_irq(struct ifcvf_adapter * adapter)163 static int ifcvf_request_per_vq_irq(struct ifcvf_adapter *adapter)
164 {
165 struct pci_dev *pdev = adapter->pdev;
166 struct ifcvf_hw *vf = &adapter->vf;
167 int i, vector, ret, irq;
168
169 vf->vqs_reused_irq = -EINVAL;
170 for (i = 0; i < vf->nr_vring; i++) {
171 snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n", pci_name(pdev), i);
172 vector = i;
173 irq = pci_irq_vector(pdev, vector);
174 ret = devm_request_irq(&pdev->dev, irq,
175 ifcvf_vq_intr_handler, 0,
176 vf->vring[i].msix_name,
177 &vf->vring[i]);
178 if (ret) {
179 IFCVF_ERR(pdev, "Failed to request irq for vq %d\n", i);
180 goto err;
181 }
182
183 vf->vring[i].irq = irq;
184 ret = ifcvf_set_vq_vector(vf, i, vector);
185 if (ret == VIRTIO_MSI_NO_VECTOR) {
186 IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
187 goto err;
188 }
189 }
190
191 return 0;
192 err:
193 ifcvf_free_irq(adapter);
194
195 return -EFAULT;
196 }
197
ifcvf_request_vqs_reused_irq(struct ifcvf_adapter * adapter)198 static int ifcvf_request_vqs_reused_irq(struct ifcvf_adapter *adapter)
199 {
200 struct pci_dev *pdev = adapter->pdev;
201 struct ifcvf_hw *vf = &adapter->vf;
202 int i, vector, ret, irq;
203
204 vector = 0;
205 snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-vqs-reused-irq\n", pci_name(pdev));
206 irq = pci_irq_vector(pdev, vector);
207 ret = devm_request_irq(&pdev->dev, irq,
208 ifcvf_vqs_reused_intr_handler, 0,
209 vf->vring[0].msix_name, vf);
210 if (ret) {
211 IFCVF_ERR(pdev, "Failed to request reused irq for the device\n");
212 goto err;
213 }
214
215 vf->vqs_reused_irq = irq;
216 for (i = 0; i < vf->nr_vring; i++) {
217 vf->vring[i].irq = -EINVAL;
218 ret = ifcvf_set_vq_vector(vf, i, vector);
219 if (ret == VIRTIO_MSI_NO_VECTOR) {
220 IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
221 goto err;
222 }
223 }
224
225 return 0;
226 err:
227 ifcvf_free_irq(adapter);
228
229 return -EFAULT;
230 }
231
ifcvf_request_dev_irq(struct ifcvf_adapter * adapter)232 static int ifcvf_request_dev_irq(struct ifcvf_adapter *adapter)
233 {
234 struct pci_dev *pdev = adapter->pdev;
235 struct ifcvf_hw *vf = &adapter->vf;
236 int i, vector, ret, irq;
237
238 vector = 0;
239 snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-dev-irq\n", pci_name(pdev));
240 irq = pci_irq_vector(pdev, vector);
241 ret = devm_request_irq(&pdev->dev, irq,
242 ifcvf_dev_intr_handler, 0,
243 vf->vring[0].msix_name, vf);
244 if (ret) {
245 IFCVF_ERR(pdev, "Failed to request irq for the device\n");
246 goto err;
247 }
248
249 vf->vqs_reused_irq = irq;
250 for (i = 0; i < vf->nr_vring; i++) {
251 vf->vring[i].irq = -EINVAL;
252 ret = ifcvf_set_vq_vector(vf, i, vector);
253 if (ret == VIRTIO_MSI_NO_VECTOR) {
254 IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
255 goto err;
256 }
257 }
258
259 vf->config_irq = irq;
260 ret = ifcvf_set_config_vector(vf, vector);
261 if (ret == VIRTIO_MSI_NO_VECTOR) {
262 IFCVF_ERR(pdev, "No msix vector for device config\n");
263 goto err;
264 }
265
266 return 0;
267 err:
268 ifcvf_free_irq(adapter);
269
270 return -EFAULT;
271
272 }
273
ifcvf_request_vq_irq(struct ifcvf_adapter * adapter)274 static int ifcvf_request_vq_irq(struct ifcvf_adapter *adapter)
275 {
276 struct ifcvf_hw *vf = &adapter->vf;
277 int ret;
278
279 if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
280 ret = ifcvf_request_per_vq_irq(adapter);
281 else
282 ret = ifcvf_request_vqs_reused_irq(adapter);
283
284 return ret;
285 }
286
ifcvf_request_config_irq(struct ifcvf_adapter * adapter)287 static int ifcvf_request_config_irq(struct ifcvf_adapter *adapter)
288 {
289 struct pci_dev *pdev = adapter->pdev;
290 struct ifcvf_hw *vf = &adapter->vf;
291 int config_vector, ret;
292
293 if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
294 config_vector = vf->nr_vring;
295 else if (vf->msix_vector_status == MSIX_VECTOR_SHARED_VQ_AND_CONFIG)
296 /* vector 0 for vqs and 1 for config interrupt */
297 config_vector = 1;
298 else if (vf->msix_vector_status == MSIX_VECTOR_DEV_SHARED)
299 /* re-use the vqs vector */
300 return 0;
301 else
302 return -EINVAL;
303
304 snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
305 pci_name(pdev));
306 vf->config_irq = pci_irq_vector(pdev, config_vector);
307 ret = devm_request_irq(&pdev->dev, vf->config_irq,
308 ifcvf_config_changed, 0,
309 vf->config_msix_name, vf);
310 if (ret) {
311 IFCVF_ERR(pdev, "Failed to request config irq\n");
312 goto err;
313 }
314
315 ret = ifcvf_set_config_vector(vf, config_vector);
316 if (ret == VIRTIO_MSI_NO_VECTOR) {
317 IFCVF_ERR(pdev, "No msix vector for device config\n");
318 goto err;
319 }
320
321 return 0;
322 err:
323 ifcvf_free_irq(adapter);
324
325 return -EFAULT;
326 }
327
ifcvf_request_irq(struct ifcvf_adapter * adapter)328 static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
329 {
330 struct ifcvf_hw *vf = &adapter->vf;
331 int nvectors, ret, max_intr;
332
333 nvectors = ifcvf_alloc_vectors(adapter);
334 if (nvectors <= 0)
335 return -EFAULT;
336
337 vf->msix_vector_status = MSIX_VECTOR_PER_VQ_AND_CONFIG;
338 max_intr = vf->nr_vring + 1;
339 if (nvectors < max_intr)
340 vf->msix_vector_status = MSIX_VECTOR_SHARED_VQ_AND_CONFIG;
341
342 if (nvectors == 1) {
343 vf->msix_vector_status = MSIX_VECTOR_DEV_SHARED;
344 ret = ifcvf_request_dev_irq(adapter);
345
346 return ret;
347 }
348
349 ret = ifcvf_request_vq_irq(adapter);
350 if (ret)
351 return ret;
352
353 ret = ifcvf_request_config_irq(adapter);
354
355 if (ret)
356 return ret;
357
358 return 0;
359 }
360
ifcvf_start_datapath(void * private)361 static int ifcvf_start_datapath(void *private)
362 {
363 struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
364 u8 status;
365 int ret;
366
367 ret = ifcvf_start_hw(vf);
368 if (ret < 0) {
369 status = ifcvf_get_status(vf);
370 status |= VIRTIO_CONFIG_S_FAILED;
371 ifcvf_set_status(vf, status);
372 }
373
374 return ret;
375 }
376
ifcvf_stop_datapath(void * private)377 static int ifcvf_stop_datapath(void *private)
378 {
379 struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
380 int i;
381
382 for (i = 0; i < vf->nr_vring; i++)
383 vf->vring[i].cb.callback = NULL;
384
385 ifcvf_stop_hw(vf);
386
387 return 0;
388 }
389
ifcvf_reset_vring(struct ifcvf_adapter * adapter)390 static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
391 {
392 struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
393 int i;
394
395 for (i = 0; i < vf->nr_vring; i++) {
396 vf->vring[i].last_avail_idx = 0;
397 vf->vring[i].desc = 0;
398 vf->vring[i].avail = 0;
399 vf->vring[i].used = 0;
400 vf->vring[i].ready = 0;
401 vf->vring[i].cb.callback = NULL;
402 vf->vring[i].cb.private = NULL;
403 }
404
405 ifcvf_reset(vf);
406 }
407
vdpa_to_adapter(struct vdpa_device * vdpa_dev)408 static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
409 {
410 return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
411 }
412
vdpa_to_vf(struct vdpa_device * vdpa_dev)413 static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
414 {
415 struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
416
417 return &adapter->vf;
418 }
419
ifcvf_vdpa_get_device_features(struct vdpa_device * vdpa_dev)420 static u64 ifcvf_vdpa_get_device_features(struct vdpa_device *vdpa_dev)
421 {
422 struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
423 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
424 struct pci_dev *pdev = adapter->pdev;
425 u32 type = vf->dev_type;
426 u64 features;
427
428 if (type == VIRTIO_ID_NET || type == VIRTIO_ID_BLOCK)
429 features = ifcvf_get_features(vf);
430 else {
431 features = 0;
432 IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
433 }
434
435 return features;
436 }
437
ifcvf_vdpa_set_driver_features(struct vdpa_device * vdpa_dev,u64 features)438 static int ifcvf_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 features)
439 {
440 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
441 int ret;
442
443 ret = ifcvf_verify_min_features(vf, features);
444 if (ret)
445 return ret;
446
447 vf->req_features = features;
448
449 return 0;
450 }
451
ifcvf_vdpa_get_driver_features(struct vdpa_device * vdpa_dev)452 static u64 ifcvf_vdpa_get_driver_features(struct vdpa_device *vdpa_dev)
453 {
454 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
455
456 return vf->req_features;
457 }
458
ifcvf_vdpa_get_status(struct vdpa_device * vdpa_dev)459 static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
460 {
461 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
462
463 return ifcvf_get_status(vf);
464 }
465
ifcvf_vdpa_set_status(struct vdpa_device * vdpa_dev,u8 status)466 static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
467 {
468 struct ifcvf_adapter *adapter;
469 struct ifcvf_hw *vf;
470 u8 status_old;
471 int ret;
472
473 vf = vdpa_to_vf(vdpa_dev);
474 adapter = vdpa_to_adapter(vdpa_dev);
475 status_old = ifcvf_get_status(vf);
476
477 if (status_old == status)
478 return;
479
480 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
481 !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
482 ret = ifcvf_request_irq(adapter);
483 if (ret) {
484 status = ifcvf_get_status(vf);
485 status |= VIRTIO_CONFIG_S_FAILED;
486 ifcvf_set_status(vf, status);
487 return;
488 }
489
490 if (ifcvf_start_datapath(adapter) < 0)
491 IFCVF_ERR(adapter->pdev,
492 "Failed to set ifcvf vdpa status %u\n",
493 status);
494 }
495
496 ifcvf_set_status(vf, status);
497 }
498
ifcvf_vdpa_reset(struct vdpa_device * vdpa_dev)499 static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
500 {
501 struct ifcvf_adapter *adapter;
502 struct ifcvf_hw *vf;
503 u8 status_old;
504
505 vf = vdpa_to_vf(vdpa_dev);
506 adapter = vdpa_to_adapter(vdpa_dev);
507 status_old = ifcvf_get_status(vf);
508
509 if (status_old == 0)
510 return 0;
511
512 if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
513 ifcvf_stop_datapath(adapter);
514 ifcvf_free_irq(adapter);
515 }
516
517 ifcvf_reset_vring(adapter);
518
519 return 0;
520 }
521
ifcvf_vdpa_get_vq_num_max(struct vdpa_device * vdpa_dev)522 static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
523 {
524 return IFCVF_QUEUE_MAX;
525 }
526
ifcvf_vdpa_get_vq_state(struct vdpa_device * vdpa_dev,u16 qid,struct vdpa_vq_state * state)527 static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
528 struct vdpa_vq_state *state)
529 {
530 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
531
532 state->split.avail_index = ifcvf_get_vq_state(vf, qid);
533 return 0;
534 }
535
ifcvf_vdpa_set_vq_state(struct vdpa_device * vdpa_dev,u16 qid,const struct vdpa_vq_state * state)536 static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
537 const struct vdpa_vq_state *state)
538 {
539 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
540
541 return ifcvf_set_vq_state(vf, qid, state->split.avail_index);
542 }
543
ifcvf_vdpa_set_vq_cb(struct vdpa_device * vdpa_dev,u16 qid,struct vdpa_callback * cb)544 static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
545 struct vdpa_callback *cb)
546 {
547 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
548
549 vf->vring[qid].cb = *cb;
550 }
551
ifcvf_vdpa_set_vq_ready(struct vdpa_device * vdpa_dev,u16 qid,bool ready)552 static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
553 u16 qid, bool ready)
554 {
555 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
556
557 vf->vring[qid].ready = ready;
558 }
559
ifcvf_vdpa_get_vq_ready(struct vdpa_device * vdpa_dev,u16 qid)560 static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
561 {
562 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
563
564 return vf->vring[qid].ready;
565 }
566
ifcvf_vdpa_set_vq_num(struct vdpa_device * vdpa_dev,u16 qid,u32 num)567 static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
568 u32 num)
569 {
570 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
571
572 vf->vring[qid].size = num;
573 }
574
ifcvf_vdpa_set_vq_address(struct vdpa_device * vdpa_dev,u16 qid,u64 desc_area,u64 driver_area,u64 device_area)575 static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
576 u64 desc_area, u64 driver_area,
577 u64 device_area)
578 {
579 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
580
581 vf->vring[qid].desc = desc_area;
582 vf->vring[qid].avail = driver_area;
583 vf->vring[qid].used = device_area;
584
585 return 0;
586 }
587
ifcvf_vdpa_kick_vq(struct vdpa_device * vdpa_dev,u16 qid)588 static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
589 {
590 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
591
592 ifcvf_notify_queue(vf, qid);
593 }
594
ifcvf_vdpa_get_generation(struct vdpa_device * vdpa_dev)595 static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
596 {
597 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
598
599 return vp_ioread8(&vf->common_cfg->config_generation);
600 }
601
ifcvf_vdpa_get_device_id(struct vdpa_device * vdpa_dev)602 static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
603 {
604 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
605
606 return vf->dev_type;
607 }
608
ifcvf_vdpa_get_vendor_id(struct vdpa_device * vdpa_dev)609 static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
610 {
611 struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
612 struct pci_dev *pdev = adapter->pdev;
613
614 return pdev->subsystem_vendor;
615 }
616
ifcvf_vdpa_get_vq_align(struct vdpa_device * vdpa_dev)617 static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
618 {
619 return IFCVF_QUEUE_ALIGNMENT;
620 }
621
ifcvf_vdpa_get_config_size(struct vdpa_device * vdpa_dev)622 static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
623 {
624 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
625
626 return vf->config_size;
627 }
628
ifcvf_vdpa_get_vq_group(struct vdpa_device * vdpa,u16 idx)629 static u32 ifcvf_vdpa_get_vq_group(struct vdpa_device *vdpa, u16 idx)
630 {
631 return 0;
632 }
633
ifcvf_vdpa_get_config(struct vdpa_device * vdpa_dev,unsigned int offset,void * buf,unsigned int len)634 static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
635 unsigned int offset,
636 void *buf, unsigned int len)
637 {
638 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
639
640 ifcvf_read_dev_config(vf, offset, buf, len);
641 }
642
ifcvf_vdpa_set_config(struct vdpa_device * vdpa_dev,unsigned int offset,const void * buf,unsigned int len)643 static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
644 unsigned int offset, const void *buf,
645 unsigned int len)
646 {
647 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
648
649 ifcvf_write_dev_config(vf, offset, buf, len);
650 }
651
ifcvf_vdpa_set_config_cb(struct vdpa_device * vdpa_dev,struct vdpa_callback * cb)652 static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
653 struct vdpa_callback *cb)
654 {
655 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
656
657 vf->config_cb.callback = cb->callback;
658 vf->config_cb.private = cb->private;
659 }
660
ifcvf_vdpa_get_vq_irq(struct vdpa_device * vdpa_dev,u16 qid)661 static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
662 u16 qid)
663 {
664 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
665
666 if (vf->vqs_reused_irq < 0)
667 return vf->vring[qid].irq;
668 else
669 return -EINVAL;
670 }
671
ifcvf_get_vq_notification(struct vdpa_device * vdpa_dev,u16 idx)672 static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev,
673 u16 idx)
674 {
675 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
676 struct vdpa_notification_area area;
677
678 area.addr = vf->vring[idx].notify_pa;
679 if (!vf->notify_off_multiplier)
680 area.size = PAGE_SIZE;
681 else
682 area.size = vf->notify_off_multiplier;
683
684 return area;
685 }
686
687 /*
688 * IFCVF currently doesn't have on-chip IOMMU, so not
689 * implemented set_map()/dma_map()/dma_unmap()
690 */
691 static const struct vdpa_config_ops ifc_vdpa_ops = {
692 .get_device_features = ifcvf_vdpa_get_device_features,
693 .set_driver_features = ifcvf_vdpa_set_driver_features,
694 .get_driver_features = ifcvf_vdpa_get_driver_features,
695 .get_status = ifcvf_vdpa_get_status,
696 .set_status = ifcvf_vdpa_set_status,
697 .reset = ifcvf_vdpa_reset,
698 .get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
699 .get_vq_state = ifcvf_vdpa_get_vq_state,
700 .set_vq_state = ifcvf_vdpa_set_vq_state,
701 .set_vq_cb = ifcvf_vdpa_set_vq_cb,
702 .set_vq_ready = ifcvf_vdpa_set_vq_ready,
703 .get_vq_ready = ifcvf_vdpa_get_vq_ready,
704 .set_vq_num = ifcvf_vdpa_set_vq_num,
705 .set_vq_address = ifcvf_vdpa_set_vq_address,
706 .get_vq_irq = ifcvf_vdpa_get_vq_irq,
707 .kick_vq = ifcvf_vdpa_kick_vq,
708 .get_generation = ifcvf_vdpa_get_generation,
709 .get_device_id = ifcvf_vdpa_get_device_id,
710 .get_vendor_id = ifcvf_vdpa_get_vendor_id,
711 .get_vq_align = ifcvf_vdpa_get_vq_align,
712 .get_vq_group = ifcvf_vdpa_get_vq_group,
713 .get_config_size = ifcvf_vdpa_get_config_size,
714 .get_config = ifcvf_vdpa_get_config,
715 .set_config = ifcvf_vdpa_set_config,
716 .set_config_cb = ifcvf_vdpa_set_config_cb,
717 .get_vq_notification = ifcvf_get_vq_notification,
718 };
719
720 static struct virtio_device_id id_table_net[] = {
721 {VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID},
722 {0},
723 };
724
725 static struct virtio_device_id id_table_blk[] = {
726 {VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID},
727 {0},
728 };
729
get_dev_type(struct pci_dev * pdev)730 static u32 get_dev_type(struct pci_dev *pdev)
731 {
732 u32 dev_type;
733
734 /* This drirver drives both modern virtio devices and transitional
735 * devices in modern mode.
736 * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
737 * so legacy devices and transitional devices in legacy
738 * mode will not work for vDPA, this driver will not
739 * drive devices with legacy interface.
740 */
741
742 if (pdev->device < 0x1040)
743 dev_type = pdev->subsystem_device;
744 else
745 dev_type = pdev->device - 0x1040;
746
747 return dev_type;
748 }
749
ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev * mdev,const char * name,const struct vdpa_dev_set_config * config)750 static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
751 const struct vdpa_dev_set_config *config)
752 {
753 struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
754 struct ifcvf_adapter *adapter;
755 struct vdpa_device *vdpa_dev;
756 struct pci_dev *pdev;
757 struct ifcvf_hw *vf;
758 int ret;
759
760 ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
761 if (!ifcvf_mgmt_dev->adapter)
762 return -EOPNOTSUPP;
763
764 adapter = ifcvf_mgmt_dev->adapter;
765 vf = &adapter->vf;
766 pdev = adapter->pdev;
767 vdpa_dev = &adapter->vdpa;
768
769 if (name)
770 ret = dev_set_name(&vdpa_dev->dev, "%s", name);
771 else
772 ret = dev_set_name(&vdpa_dev->dev, "vdpa%u", vdpa_dev->index);
773
774 ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring);
775 if (ret) {
776 put_device(&adapter->vdpa.dev);
777 IFCVF_ERR(pdev, "Failed to register to vDPA bus");
778 return ret;
779 }
780
781 return 0;
782 }
783
784
ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev * mdev,struct vdpa_device * dev)785 static void ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
786 {
787 struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
788
789 ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
790 _vdpa_unregister_device(dev);
791 ifcvf_mgmt_dev->adapter = NULL;
792 }
793
794 static const struct vdpa_mgmtdev_ops ifcvf_vdpa_mgmt_dev_ops = {
795 .dev_add = ifcvf_vdpa_dev_add,
796 .dev_del = ifcvf_vdpa_dev_del
797 };
798
ifcvf_probe(struct pci_dev * pdev,const struct pci_device_id * id)799 static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
800 {
801 struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
802 struct device *dev = &pdev->dev;
803 struct ifcvf_adapter *adapter;
804 struct ifcvf_hw *vf;
805 u32 dev_type;
806 int ret, i;
807
808 ret = pcim_enable_device(pdev);
809 if (ret) {
810 IFCVF_ERR(pdev, "Failed to enable device\n");
811 return ret;
812 }
813 ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
814 IFCVF_DRIVER_NAME);
815 if (ret) {
816 IFCVF_ERR(pdev, "Failed to request MMIO region\n");
817 return ret;
818 }
819
820 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
821 if (ret) {
822 IFCVF_ERR(pdev, "No usable DMA configuration\n");
823 return ret;
824 }
825
826 ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
827 if (ret) {
828 IFCVF_ERR(pdev,
829 "Failed for adding devres for freeing irq vectors\n");
830 return ret;
831 }
832
833 pci_set_master(pdev);
834
835 adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
836 dev, &ifc_vdpa_ops, 1, 1, NULL, false);
837 if (IS_ERR(adapter)) {
838 IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
839 return PTR_ERR(adapter);
840 }
841
842 vf = &adapter->vf;
843 vf->dev_type = get_dev_type(pdev);
844 vf->base = pcim_iomap_table(pdev);
845
846 adapter->pdev = pdev;
847 adapter->vdpa.dma_dev = &pdev->dev;
848
849 ret = ifcvf_init_hw(vf, pdev);
850 if (ret) {
851 IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
852 return ret;
853 }
854
855 for (i = 0; i < vf->nr_vring; i++)
856 vf->vring[i].irq = -EINVAL;
857
858 vf->hw_features = ifcvf_get_hw_features(vf);
859 vf->config_size = ifcvf_get_config_size(vf);
860
861 ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
862 if (!ifcvf_mgmt_dev) {
863 IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
864 return -ENOMEM;
865 }
866
867 ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
868 ifcvf_mgmt_dev->mdev.device = dev;
869 ifcvf_mgmt_dev->adapter = adapter;
870
871 dev_type = get_dev_type(pdev);
872 switch (dev_type) {
873 case VIRTIO_ID_NET:
874 ifcvf_mgmt_dev->mdev.id_table = id_table_net;
875 break;
876 case VIRTIO_ID_BLOCK:
877 ifcvf_mgmt_dev->mdev.id_table = id_table_blk;
878 break;
879 default:
880 IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", dev_type);
881 ret = -EOPNOTSUPP;
882 goto err;
883 }
884
885 ifcvf_mgmt_dev->mdev.max_supported_vqs = vf->nr_vring;
886 ifcvf_mgmt_dev->mdev.supported_features = vf->hw_features;
887
888 adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
889
890
891 ret = vdpa_mgmtdev_register(&ifcvf_mgmt_dev->mdev);
892 if (ret) {
893 IFCVF_ERR(pdev,
894 "Failed to initialize the management interfaces\n");
895 goto err;
896 }
897
898 pci_set_drvdata(pdev, ifcvf_mgmt_dev);
899
900 return 0;
901
902 err:
903 kfree(ifcvf_mgmt_dev);
904 return ret;
905 }
906
ifcvf_remove(struct pci_dev * pdev)907 static void ifcvf_remove(struct pci_dev *pdev)
908 {
909 struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
910
911 ifcvf_mgmt_dev = pci_get_drvdata(pdev);
912 vdpa_mgmtdev_unregister(&ifcvf_mgmt_dev->mdev);
913 kfree(ifcvf_mgmt_dev);
914 }
915
916 static struct pci_device_id ifcvf_pci_ids[] = {
917 /* N3000 network device */
918 { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
919 N3000_DEVICE_ID,
920 PCI_VENDOR_ID_INTEL,
921 N3000_SUBSYS_DEVICE_ID) },
922 /* C5000X-PL network device */
923 { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
924 VIRTIO_TRANS_ID_NET,
925 PCI_VENDOR_ID_INTEL,
926 VIRTIO_ID_NET) },
927 /* C5000X-PL block device */
928 { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
929 VIRTIO_TRANS_ID_BLOCK,
930 PCI_VENDOR_ID_INTEL,
931 VIRTIO_ID_BLOCK) },
932
933 { 0 },
934 };
935 MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids);
936
937 static struct pci_driver ifcvf_driver = {
938 .name = IFCVF_DRIVER_NAME,
939 .id_table = ifcvf_pci_ids,
940 .probe = ifcvf_probe,
941 .remove = ifcvf_remove,
942 };
943
944 module_pci_driver(ifcvf_driver);
945
946 MODULE_LICENSE("GPL v2");
947