1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * vDPA bridge driver for modern virtio-pci device
4 *
5 * Copyright (c) 2020, Red Hat Inc. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
7 *
8 * Based on virtio_pci_modern.c.
9 */
10
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/vdpa.h>
15 #include <linux/virtio.h>
16 #include <linux/virtio_config.h>
17 #include <linux/virtio_ring.h>
18 #include <linux/virtio_pci.h>
19 #include <linux/virtio_pci_modern.h>
20
21 #define VP_VDPA_QUEUE_MAX 256
22 #define VP_VDPA_DRIVER_NAME "vp_vdpa"
23 #define VP_VDPA_NAME_SIZE 256
24
25 struct vp_vring {
26 void __iomem *notify;
27 char msix_name[VP_VDPA_NAME_SIZE];
28 struct vdpa_callback cb;
29 resource_size_t notify_pa;
30 int irq;
31 };
32
33 struct vp_vdpa {
34 struct vdpa_device vdpa;
35 struct virtio_pci_modern_device mdev;
36 struct vp_vring *vring;
37 struct vdpa_callback config_cb;
38 char msix_name[VP_VDPA_NAME_SIZE];
39 int config_irq;
40 int queues;
41 int vectors;
42 };
43
vdpa_to_vp(struct vdpa_device * vdpa)44 static struct vp_vdpa *vdpa_to_vp(struct vdpa_device *vdpa)
45 {
46 return container_of(vdpa, struct vp_vdpa, vdpa);
47 }
48
vdpa_to_mdev(struct vdpa_device * vdpa)49 static struct virtio_pci_modern_device *vdpa_to_mdev(struct vdpa_device *vdpa)
50 {
51 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
52
53 return &vp_vdpa->mdev;
54 }
55
vp_vdpa_get_features(struct vdpa_device * vdpa)56 static u64 vp_vdpa_get_features(struct vdpa_device *vdpa)
57 {
58 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
59
60 return vp_modern_get_features(mdev);
61 }
62
vp_vdpa_set_features(struct vdpa_device * vdpa,u64 features)63 static int vp_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
64 {
65 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
66
67 vp_modern_set_features(mdev, features);
68
69 return 0;
70 }
71
vp_vdpa_get_status(struct vdpa_device * vdpa)72 static u8 vp_vdpa_get_status(struct vdpa_device *vdpa)
73 {
74 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
75
76 return vp_modern_get_status(mdev);
77 }
78
vp_vdpa_free_irq(struct vp_vdpa * vp_vdpa)79 static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa)
80 {
81 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
82 struct pci_dev *pdev = mdev->pci_dev;
83 int i;
84
85 for (i = 0; i < vp_vdpa->queues; i++) {
86 if (vp_vdpa->vring[i].irq != VIRTIO_MSI_NO_VECTOR) {
87 vp_modern_queue_vector(mdev, i, VIRTIO_MSI_NO_VECTOR);
88 devm_free_irq(&pdev->dev, vp_vdpa->vring[i].irq,
89 &vp_vdpa->vring[i]);
90 vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
91 }
92 }
93
94 if (vp_vdpa->config_irq != VIRTIO_MSI_NO_VECTOR) {
95 vp_modern_config_vector(mdev, VIRTIO_MSI_NO_VECTOR);
96 devm_free_irq(&pdev->dev, vp_vdpa->config_irq, vp_vdpa);
97 vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
98 }
99
100 if (vp_vdpa->vectors) {
101 pci_free_irq_vectors(pdev);
102 vp_vdpa->vectors = 0;
103 }
104 }
105
vp_vdpa_vq_handler(int irq,void * arg)106 static irqreturn_t vp_vdpa_vq_handler(int irq, void *arg)
107 {
108 struct vp_vring *vring = arg;
109
110 if (vring->cb.callback)
111 return vring->cb.callback(vring->cb.private);
112
113 return IRQ_HANDLED;
114 }
115
vp_vdpa_config_handler(int irq,void * arg)116 static irqreturn_t vp_vdpa_config_handler(int irq, void *arg)
117 {
118 struct vp_vdpa *vp_vdpa = arg;
119
120 if (vp_vdpa->config_cb.callback)
121 return vp_vdpa->config_cb.callback(vp_vdpa->config_cb.private);
122
123 return IRQ_HANDLED;
124 }
125
vp_vdpa_request_irq(struct vp_vdpa * vp_vdpa)126 static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa)
127 {
128 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
129 struct pci_dev *pdev = mdev->pci_dev;
130 int i, ret, irq;
131 int queues = vp_vdpa->queues;
132 int vectors = queues + 1;
133
134 ret = pci_alloc_irq_vectors(pdev, vectors, vectors, PCI_IRQ_MSIX);
135 if (ret != vectors) {
136 dev_err(&pdev->dev,
137 "vp_vdpa: fail to allocate irq vectors want %d but %d\n",
138 vectors, ret);
139 return ret;
140 }
141
142 vp_vdpa->vectors = vectors;
143
144 for (i = 0; i < queues; i++) {
145 snprintf(vp_vdpa->vring[i].msix_name, VP_VDPA_NAME_SIZE,
146 "vp-vdpa[%s]-%d\n", pci_name(pdev), i);
147 irq = pci_irq_vector(pdev, i);
148 ret = devm_request_irq(&pdev->dev, irq,
149 vp_vdpa_vq_handler,
150 0, vp_vdpa->vring[i].msix_name,
151 &vp_vdpa->vring[i]);
152 if (ret) {
153 dev_err(&pdev->dev,
154 "vp_vdpa: fail to request irq for vq %d\n", i);
155 goto err;
156 }
157 vp_modern_queue_vector(mdev, i, i);
158 vp_vdpa->vring[i].irq = irq;
159 }
160
161 snprintf(vp_vdpa->msix_name, VP_VDPA_NAME_SIZE, "vp-vdpa[%s]-config\n",
162 pci_name(pdev));
163 irq = pci_irq_vector(pdev, queues);
164 ret = devm_request_irq(&pdev->dev, irq, vp_vdpa_config_handler, 0,
165 vp_vdpa->msix_name, vp_vdpa);
166 if (ret) {
167 dev_err(&pdev->dev,
168 "vp_vdpa: fail to request irq for vq %d\n", i);
169 goto err;
170 }
171 vp_modern_config_vector(mdev, queues);
172 vp_vdpa->config_irq = irq;
173
174 return 0;
175 err:
176 vp_vdpa_free_irq(vp_vdpa);
177 return ret;
178 }
179
vp_vdpa_set_status(struct vdpa_device * vdpa,u8 status)180 static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
181 {
182 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
183 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
184 u8 s = vp_vdpa_get_status(vdpa);
185
186 if (status & VIRTIO_CONFIG_S_DRIVER_OK &&
187 !(s & VIRTIO_CONFIG_S_DRIVER_OK)) {
188 vp_vdpa_request_irq(vp_vdpa);
189 }
190
191 vp_modern_set_status(mdev, status);
192 }
193
vp_vdpa_reset(struct vdpa_device * vdpa)194 static int vp_vdpa_reset(struct vdpa_device *vdpa)
195 {
196 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
197 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
198 u8 s = vp_vdpa_get_status(vdpa);
199
200 vp_modern_set_status(mdev, 0);
201
202 if (s & VIRTIO_CONFIG_S_DRIVER_OK)
203 vp_vdpa_free_irq(vp_vdpa);
204
205 return 0;
206 }
207
vp_vdpa_get_vq_num_max(struct vdpa_device * vdpa)208 static u16 vp_vdpa_get_vq_num_max(struct vdpa_device *vdpa)
209 {
210 return VP_VDPA_QUEUE_MAX;
211 }
212
vp_vdpa_get_vq_state(struct vdpa_device * vdpa,u16 qid,struct vdpa_vq_state * state)213 static int vp_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid,
214 struct vdpa_vq_state *state)
215 {
216 /* Note that this is not supported by virtio specification, so
217 * we return -EOPNOTSUPP here. This means we can't support live
218 * migration, vhost device start/stop.
219 */
220 return -EOPNOTSUPP;
221 }
222
vp_vdpa_set_vq_state_split(struct vdpa_device * vdpa,const struct vdpa_vq_state * state)223 static int vp_vdpa_set_vq_state_split(struct vdpa_device *vdpa,
224 const struct vdpa_vq_state *state)
225 {
226 const struct vdpa_vq_state_split *split = &state->split;
227
228 if (split->avail_index == 0)
229 return 0;
230
231 return -EOPNOTSUPP;
232 }
233
vp_vdpa_set_vq_state_packed(struct vdpa_device * vdpa,const struct vdpa_vq_state * state)234 static int vp_vdpa_set_vq_state_packed(struct vdpa_device *vdpa,
235 const struct vdpa_vq_state *state)
236 {
237 const struct vdpa_vq_state_packed *packed = &state->packed;
238
239 if (packed->last_avail_counter == 1 &&
240 packed->last_avail_idx == 0 &&
241 packed->last_used_counter == 1 &&
242 packed->last_used_idx == 0)
243 return 0;
244
245 return -EOPNOTSUPP;
246 }
247
vp_vdpa_set_vq_state(struct vdpa_device * vdpa,u16 qid,const struct vdpa_vq_state * state)248 static int vp_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid,
249 const struct vdpa_vq_state *state)
250 {
251 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
252
253 /* Note that this is not supported by virtio specification.
254 * But if the state is by chance equal to the device initial
255 * state, we can let it go.
256 */
257 if ((vp_modern_get_status(mdev) & VIRTIO_CONFIG_S_FEATURES_OK) &&
258 !vp_modern_get_queue_enable(mdev, qid)) {
259 if (vp_modern_get_driver_features(mdev) &
260 BIT_ULL(VIRTIO_F_RING_PACKED))
261 return vp_vdpa_set_vq_state_packed(vdpa, state);
262 else
263 return vp_vdpa_set_vq_state_split(vdpa, state);
264 }
265
266 return -EOPNOTSUPP;
267 }
268
vp_vdpa_set_vq_cb(struct vdpa_device * vdpa,u16 qid,struct vdpa_callback * cb)269 static void vp_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid,
270 struct vdpa_callback *cb)
271 {
272 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
273
274 vp_vdpa->vring[qid].cb = *cb;
275 }
276
vp_vdpa_set_vq_ready(struct vdpa_device * vdpa,u16 qid,bool ready)277 static void vp_vdpa_set_vq_ready(struct vdpa_device *vdpa,
278 u16 qid, bool ready)
279 {
280 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
281
282 vp_modern_set_queue_enable(mdev, qid, ready);
283 }
284
vp_vdpa_get_vq_ready(struct vdpa_device * vdpa,u16 qid)285 static bool vp_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid)
286 {
287 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
288
289 return vp_modern_get_queue_enable(mdev, qid);
290 }
291
vp_vdpa_set_vq_num(struct vdpa_device * vdpa,u16 qid,u32 num)292 static void vp_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid,
293 u32 num)
294 {
295 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
296
297 vp_modern_set_queue_size(mdev, qid, num);
298 }
299
vp_vdpa_set_vq_address(struct vdpa_device * vdpa,u16 qid,u64 desc_area,u64 driver_area,u64 device_area)300 static int vp_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid,
301 u64 desc_area, u64 driver_area,
302 u64 device_area)
303 {
304 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
305
306 vp_modern_queue_address(mdev, qid, desc_area,
307 driver_area, device_area);
308
309 return 0;
310 }
311
vp_vdpa_kick_vq(struct vdpa_device * vdpa,u16 qid)312 static void vp_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid)
313 {
314 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
315
316 vp_iowrite16(qid, vp_vdpa->vring[qid].notify);
317 }
318
vp_vdpa_get_generation(struct vdpa_device * vdpa)319 static u32 vp_vdpa_get_generation(struct vdpa_device *vdpa)
320 {
321 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
322
323 return vp_modern_generation(mdev);
324 }
325
vp_vdpa_get_device_id(struct vdpa_device * vdpa)326 static u32 vp_vdpa_get_device_id(struct vdpa_device *vdpa)
327 {
328 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
329
330 return mdev->id.device;
331 }
332
vp_vdpa_get_vendor_id(struct vdpa_device * vdpa)333 static u32 vp_vdpa_get_vendor_id(struct vdpa_device *vdpa)
334 {
335 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
336
337 return mdev->id.vendor;
338 }
339
vp_vdpa_get_vq_align(struct vdpa_device * vdpa)340 static u32 vp_vdpa_get_vq_align(struct vdpa_device *vdpa)
341 {
342 return PAGE_SIZE;
343 }
344
vp_vdpa_get_config_size(struct vdpa_device * vdpa)345 static size_t vp_vdpa_get_config_size(struct vdpa_device *vdpa)
346 {
347 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
348
349 return mdev->device_len;
350 }
351
vp_vdpa_get_config(struct vdpa_device * vdpa,unsigned int offset,void * buf,unsigned int len)352 static void vp_vdpa_get_config(struct vdpa_device *vdpa,
353 unsigned int offset,
354 void *buf, unsigned int len)
355 {
356 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
357 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
358 u8 old, new;
359 u8 *p;
360 int i;
361
362 do {
363 old = vp_ioread8(&mdev->common->config_generation);
364 p = buf;
365 for (i = 0; i < len; i++)
366 *p++ = vp_ioread8(mdev->device + offset + i);
367
368 new = vp_ioread8(&mdev->common->config_generation);
369 } while (old != new);
370 }
371
vp_vdpa_set_config(struct vdpa_device * vdpa,unsigned int offset,const void * buf,unsigned int len)372 static void vp_vdpa_set_config(struct vdpa_device *vdpa,
373 unsigned int offset, const void *buf,
374 unsigned int len)
375 {
376 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
377 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
378 const u8 *p = buf;
379 int i;
380
381 for (i = 0; i < len; i++)
382 vp_iowrite8(*p++, mdev->device + offset + i);
383 }
384
vp_vdpa_set_config_cb(struct vdpa_device * vdpa,struct vdpa_callback * cb)385 static void vp_vdpa_set_config_cb(struct vdpa_device *vdpa,
386 struct vdpa_callback *cb)
387 {
388 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
389
390 vp_vdpa->config_cb = *cb;
391 }
392
393 static struct vdpa_notification_area
vp_vdpa_get_vq_notification(struct vdpa_device * vdpa,u16 qid)394 vp_vdpa_get_vq_notification(struct vdpa_device *vdpa, u16 qid)
395 {
396 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
397 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
398 struct vdpa_notification_area notify;
399
400 notify.addr = vp_vdpa->vring[qid].notify_pa;
401 notify.size = mdev->notify_offset_multiplier;
402
403 return notify;
404 }
405
406 static const struct vdpa_config_ops vp_vdpa_ops = {
407 .get_features = vp_vdpa_get_features,
408 .set_features = vp_vdpa_set_features,
409 .get_status = vp_vdpa_get_status,
410 .set_status = vp_vdpa_set_status,
411 .reset = vp_vdpa_reset,
412 .get_vq_num_max = vp_vdpa_get_vq_num_max,
413 .get_vq_state = vp_vdpa_get_vq_state,
414 .get_vq_notification = vp_vdpa_get_vq_notification,
415 .set_vq_state = vp_vdpa_set_vq_state,
416 .set_vq_cb = vp_vdpa_set_vq_cb,
417 .set_vq_ready = vp_vdpa_set_vq_ready,
418 .get_vq_ready = vp_vdpa_get_vq_ready,
419 .set_vq_num = vp_vdpa_set_vq_num,
420 .set_vq_address = vp_vdpa_set_vq_address,
421 .kick_vq = vp_vdpa_kick_vq,
422 .get_generation = vp_vdpa_get_generation,
423 .get_device_id = vp_vdpa_get_device_id,
424 .get_vendor_id = vp_vdpa_get_vendor_id,
425 .get_vq_align = vp_vdpa_get_vq_align,
426 .get_config_size = vp_vdpa_get_config_size,
427 .get_config = vp_vdpa_get_config,
428 .set_config = vp_vdpa_set_config,
429 .set_config_cb = vp_vdpa_set_config_cb,
430 };
431
vp_vdpa_free_irq_vectors(void * data)432 static void vp_vdpa_free_irq_vectors(void *data)
433 {
434 pci_free_irq_vectors(data);
435 }
436
vp_vdpa_probe(struct pci_dev * pdev,const struct pci_device_id * id)437 static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
438 {
439 struct virtio_pci_modern_device *mdev;
440 struct device *dev = &pdev->dev;
441 struct vp_vdpa *vp_vdpa;
442 int ret, i;
443
444 ret = pcim_enable_device(pdev);
445 if (ret)
446 return ret;
447
448 vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
449 dev, &vp_vdpa_ops, NULL, false);
450 if (IS_ERR(vp_vdpa)) {
451 dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
452 return PTR_ERR(vp_vdpa);
453 }
454
455 mdev = &vp_vdpa->mdev;
456 mdev->pci_dev = pdev;
457
458 ret = vp_modern_probe(mdev);
459 if (ret) {
460 dev_err(&pdev->dev, "Failed to probe modern PCI device\n");
461 goto err;
462 }
463
464 pci_set_master(pdev);
465 pci_set_drvdata(pdev, vp_vdpa);
466
467 vp_vdpa->vdpa.dma_dev = &pdev->dev;
468 vp_vdpa->queues = vp_modern_get_num_queues(mdev);
469
470 ret = devm_add_action_or_reset(dev, vp_vdpa_free_irq_vectors, pdev);
471 if (ret) {
472 dev_err(&pdev->dev,
473 "Failed for adding devres for freeing irq vectors\n");
474 goto err;
475 }
476
477 vp_vdpa->vring = devm_kcalloc(&pdev->dev, vp_vdpa->queues,
478 sizeof(*vp_vdpa->vring),
479 GFP_KERNEL);
480 if (!vp_vdpa->vring) {
481 ret = -ENOMEM;
482 dev_err(&pdev->dev, "Fail to allocate virtqueues\n");
483 goto err;
484 }
485
486 for (i = 0; i < vp_vdpa->queues; i++) {
487 vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
488 vp_vdpa->vring[i].notify =
489 vp_modern_map_vq_notify(mdev, i,
490 &vp_vdpa->vring[i].notify_pa);
491 if (!vp_vdpa->vring[i].notify) {
492 ret = -EINVAL;
493 dev_warn(&pdev->dev, "Fail to map vq notify %d\n", i);
494 goto err;
495 }
496 }
497 vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
498
499 ret = vdpa_register_device(&vp_vdpa->vdpa, vp_vdpa->queues);
500 if (ret) {
501 dev_err(&pdev->dev, "Failed to register to vdpa bus\n");
502 goto err;
503 }
504
505 return 0;
506
507 err:
508 put_device(&vp_vdpa->vdpa.dev);
509 return ret;
510 }
511
vp_vdpa_remove(struct pci_dev * pdev)512 static void vp_vdpa_remove(struct pci_dev *pdev)
513 {
514 struct vp_vdpa *vp_vdpa = pci_get_drvdata(pdev);
515
516 vdpa_unregister_device(&vp_vdpa->vdpa);
517 vp_modern_remove(&vp_vdpa->mdev);
518 }
519
520 static struct pci_driver vp_vdpa_driver = {
521 .name = "vp-vdpa",
522 .id_table = NULL, /* only dynamic ids */
523 .probe = vp_vdpa_probe,
524 .remove = vp_vdpa_remove,
525 };
526
527 module_pci_driver(vp_vdpa_driver);
528
529 MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
530 MODULE_DESCRIPTION("vp-vdpa");
531 MODULE_LICENSE("GPL");
532 MODULE_VERSION("1");
533