1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2018-2020 Intel Corporation.
4 * Copyright (C) 2020 Red Hat, Inc.
5 *
6 * Author: Tiwei Bie <tiwei.bie@intel.com>
7 * Jason Wang <jasowang@redhat.com>
8 *
9 * Thanks Michael S. Tsirkin for the valuable comments and
10 * suggestions. And thanks to Cunming Liang and Zhihong Wang for all
11 * their supports.
12 */
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/cdev.h>
17 #include <linux/device.h>
18 #include <linux/mm.h>
19 #include <linux/iommu.h>
20 #include <linux/uuid.h>
21 #include <linux/vdpa.h>
22 #include <linux/nospec.h>
23 #include <linux/vhost.h>
24 #include <linux/virtio_net.h>
25
26 #include "vhost.h"
27
28 enum {
29 VHOST_VDPA_BACKEND_FEATURES =
30 (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
31 (1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
32 };
33
34 #define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
35
36 struct vhost_vdpa {
37 struct vhost_dev vdev;
38 struct iommu_domain *domain;
39 struct vhost_virtqueue *vqs;
40 struct completion completion;
41 struct vdpa_device *vdpa;
42 struct device dev;
43 struct cdev cdev;
44 atomic_t opened;
45 int nvqs;
46 int virtio_id;
47 int minor;
48 struct eventfd_ctx *config_ctx;
49 int in_batch;
50 struct vdpa_iova_range range;
51 };
52
53 static DEFINE_IDA(vhost_vdpa_ida);
54
55 static dev_t vhost_vdpa_major;
56
handle_vq_kick(struct vhost_work * work)57 static void handle_vq_kick(struct vhost_work *work)
58 {
59 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
60 poll.work);
61 struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
62 const struct vdpa_config_ops *ops = v->vdpa->config;
63
64 ops->kick_vq(v->vdpa, vq - v->vqs);
65 }
66
vhost_vdpa_virtqueue_cb(void * private)67 static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
68 {
69 struct vhost_virtqueue *vq = private;
70 struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
71
72 if (call_ctx)
73 eventfd_signal(call_ctx, 1);
74
75 return IRQ_HANDLED;
76 }
77
vhost_vdpa_config_cb(void * private)78 static irqreturn_t vhost_vdpa_config_cb(void *private)
79 {
80 struct vhost_vdpa *v = private;
81 struct eventfd_ctx *config_ctx = v->config_ctx;
82
83 if (config_ctx)
84 eventfd_signal(config_ctx, 1);
85
86 return IRQ_HANDLED;
87 }
88
vhost_vdpa_setup_vq_irq(struct vhost_vdpa * v,u16 qid)89 static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
90 {
91 struct vhost_virtqueue *vq = &v->vqs[qid];
92 const struct vdpa_config_ops *ops = v->vdpa->config;
93 struct vdpa_device *vdpa = v->vdpa;
94 int ret, irq;
95
96 if (!ops->get_vq_irq)
97 return;
98
99 irq = ops->get_vq_irq(vdpa, qid);
100 irq_bypass_unregister_producer(&vq->call_ctx.producer);
101 if (!vq->call_ctx.ctx || irq < 0)
102 return;
103
104 vq->call_ctx.producer.token = vq->call_ctx.ctx;
105 vq->call_ctx.producer.irq = irq;
106 ret = irq_bypass_register_producer(&vq->call_ctx.producer);
107 if (unlikely(ret))
108 dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret = %d\n",
109 qid, vq->call_ctx.producer.token, ret);
110 }
111
vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa * v,u16 qid)112 static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
113 {
114 struct vhost_virtqueue *vq = &v->vqs[qid];
115
116 irq_bypass_unregister_producer(&vq->call_ctx.producer);
117 }
118
vhost_vdpa_reset(struct vhost_vdpa * v)119 static void vhost_vdpa_reset(struct vhost_vdpa *v)
120 {
121 struct vdpa_device *vdpa = v->vdpa;
122
123 vdpa_reset(vdpa);
124 v->in_batch = 0;
125 }
126
vhost_vdpa_get_device_id(struct vhost_vdpa * v,u8 __user * argp)127 static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
128 {
129 struct vdpa_device *vdpa = v->vdpa;
130 const struct vdpa_config_ops *ops = vdpa->config;
131 u32 device_id;
132
133 device_id = ops->get_device_id(vdpa);
134
135 if (copy_to_user(argp, &device_id, sizeof(device_id)))
136 return -EFAULT;
137
138 return 0;
139 }
140
vhost_vdpa_get_status(struct vhost_vdpa * v,u8 __user * statusp)141 static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
142 {
143 struct vdpa_device *vdpa = v->vdpa;
144 const struct vdpa_config_ops *ops = vdpa->config;
145 u8 status;
146
147 status = ops->get_status(vdpa);
148
149 if (copy_to_user(statusp, &status, sizeof(status)))
150 return -EFAULT;
151
152 return 0;
153 }
154
vhost_vdpa_set_status(struct vhost_vdpa * v,u8 __user * statusp)155 static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
156 {
157 struct vdpa_device *vdpa = v->vdpa;
158 const struct vdpa_config_ops *ops = vdpa->config;
159 u8 status, status_old;
160 int nvqs = v->nvqs;
161 u16 i;
162
163 if (copy_from_user(&status, statusp, sizeof(status)))
164 return -EFAULT;
165
166 status_old = ops->get_status(vdpa);
167
168 /*
169 * Userspace shouldn't remove status bits unless reset the
170 * status to 0.
171 */
172 if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
173 return -EINVAL;
174
175 ops->set_status(vdpa, status);
176
177 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
178 for (i = 0; i < nvqs; i++)
179 vhost_vdpa_setup_vq_irq(v, i);
180
181 if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
182 for (i = 0; i < nvqs; i++)
183 vhost_vdpa_unsetup_vq_irq(v, i);
184
185 return 0;
186 }
187
vhost_vdpa_config_validate(struct vhost_vdpa * v,struct vhost_vdpa_config * c)188 static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
189 struct vhost_vdpa_config *c)
190 {
191 long size = 0;
192
193 switch (v->virtio_id) {
194 case VIRTIO_ID_NET:
195 size = sizeof(struct virtio_net_config);
196 break;
197 }
198
199 if (c->len == 0)
200 return -EINVAL;
201
202 if (c->len > size - c->off)
203 return -E2BIG;
204
205 return 0;
206 }
207
vhost_vdpa_get_config(struct vhost_vdpa * v,struct vhost_vdpa_config __user * c)208 static long vhost_vdpa_get_config(struct vhost_vdpa *v,
209 struct vhost_vdpa_config __user *c)
210 {
211 struct vdpa_device *vdpa = v->vdpa;
212 struct vhost_vdpa_config config;
213 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
214 u8 *buf;
215
216 if (copy_from_user(&config, c, size))
217 return -EFAULT;
218 if (vhost_vdpa_config_validate(v, &config))
219 return -EINVAL;
220 buf = kvzalloc(config.len, GFP_KERNEL);
221 if (!buf)
222 return -ENOMEM;
223
224 vdpa_get_config(vdpa, config.off, buf, config.len);
225
226 if (copy_to_user(c->buf, buf, config.len)) {
227 kvfree(buf);
228 return -EFAULT;
229 }
230
231 kvfree(buf);
232 return 0;
233 }
234
vhost_vdpa_set_config(struct vhost_vdpa * v,struct vhost_vdpa_config __user * c)235 static long vhost_vdpa_set_config(struct vhost_vdpa *v,
236 struct vhost_vdpa_config __user *c)
237 {
238 struct vdpa_device *vdpa = v->vdpa;
239 const struct vdpa_config_ops *ops = vdpa->config;
240 struct vhost_vdpa_config config;
241 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
242 u8 *buf;
243
244 if (copy_from_user(&config, c, size))
245 return -EFAULT;
246 if (vhost_vdpa_config_validate(v, &config))
247 return -EINVAL;
248 buf = kvzalloc(config.len, GFP_KERNEL);
249 if (!buf)
250 return -ENOMEM;
251
252 if (copy_from_user(buf, c->buf, config.len)) {
253 kvfree(buf);
254 return -EFAULT;
255 }
256
257 ops->set_config(vdpa, config.off, buf, config.len);
258
259 kvfree(buf);
260 return 0;
261 }
262
vhost_vdpa_get_features(struct vhost_vdpa * v,u64 __user * featurep)263 static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
264 {
265 struct vdpa_device *vdpa = v->vdpa;
266 const struct vdpa_config_ops *ops = vdpa->config;
267 u64 features;
268
269 features = ops->get_features(vdpa);
270
271 if (copy_to_user(featurep, &features, sizeof(features)))
272 return -EFAULT;
273
274 return 0;
275 }
276
vhost_vdpa_set_features(struct vhost_vdpa * v,u64 __user * featurep)277 static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
278 {
279 struct vdpa_device *vdpa = v->vdpa;
280 const struct vdpa_config_ops *ops = vdpa->config;
281 u64 features;
282
283 /*
284 * It's not allowed to change the features after they have
285 * been negotiated.
286 */
287 if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
288 return -EBUSY;
289
290 if (copy_from_user(&features, featurep, sizeof(features)))
291 return -EFAULT;
292
293 if (vdpa_set_features(vdpa, features))
294 return -EINVAL;
295
296 return 0;
297 }
298
vhost_vdpa_get_vring_num(struct vhost_vdpa * v,u16 __user * argp)299 static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
300 {
301 struct vdpa_device *vdpa = v->vdpa;
302 const struct vdpa_config_ops *ops = vdpa->config;
303 u16 num;
304
305 num = ops->get_vq_num_max(vdpa);
306
307 if (copy_to_user(argp, &num, sizeof(num)))
308 return -EFAULT;
309
310 return 0;
311 }
312
vhost_vdpa_config_put(struct vhost_vdpa * v)313 static void vhost_vdpa_config_put(struct vhost_vdpa *v)
314 {
315 if (v->config_ctx)
316 eventfd_ctx_put(v->config_ctx);
317 }
318
vhost_vdpa_set_config_call(struct vhost_vdpa * v,u32 __user * argp)319 static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
320 {
321 struct vdpa_callback cb;
322 int fd;
323 struct eventfd_ctx *ctx;
324
325 cb.callback = vhost_vdpa_config_cb;
326 cb.private = v->vdpa;
327 if (copy_from_user(&fd, argp, sizeof(fd)))
328 return -EFAULT;
329
330 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
331 swap(ctx, v->config_ctx);
332
333 if (!IS_ERR_OR_NULL(ctx))
334 eventfd_ctx_put(ctx);
335
336 if (IS_ERR(v->config_ctx))
337 return PTR_ERR(v->config_ctx);
338
339 v->vdpa->config->set_config_cb(v->vdpa, &cb);
340
341 return 0;
342 }
343
vhost_vdpa_get_iova_range(struct vhost_vdpa * v,u32 __user * argp)344 static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
345 {
346 struct vhost_vdpa_iova_range range = {
347 .first = v->range.first,
348 .last = v->range.last,
349 };
350
351 if (copy_to_user(argp, &range, sizeof(range)))
352 return -EFAULT;
353 return 0;
354 }
355
vhost_vdpa_vring_ioctl(struct vhost_vdpa * v,unsigned int cmd,void __user * argp)356 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
357 void __user *argp)
358 {
359 struct vdpa_device *vdpa = v->vdpa;
360 const struct vdpa_config_ops *ops = vdpa->config;
361 struct vdpa_vq_state vq_state;
362 struct vdpa_callback cb;
363 struct vhost_virtqueue *vq;
364 struct vhost_vring_state s;
365 u32 idx;
366 long r;
367
368 r = get_user(idx, (u32 __user *)argp);
369 if (r < 0)
370 return r;
371
372 if (idx >= v->nvqs)
373 return -ENOBUFS;
374
375 idx = array_index_nospec(idx, v->nvqs);
376 vq = &v->vqs[idx];
377
378 switch (cmd) {
379 case VHOST_VDPA_SET_VRING_ENABLE:
380 if (copy_from_user(&s, argp, sizeof(s)))
381 return -EFAULT;
382 ops->set_vq_ready(vdpa, idx, s.num);
383 return 0;
384 case VHOST_GET_VRING_BASE:
385 r = ops->get_vq_state(v->vdpa, idx, &vq_state);
386 if (r)
387 return r;
388
389 vq->last_avail_idx = vq_state.avail_index;
390 break;
391 }
392
393 r = vhost_vring_ioctl(&v->vdev, cmd, argp);
394 if (r)
395 return r;
396
397 switch (cmd) {
398 case VHOST_SET_VRING_ADDR:
399 if (ops->set_vq_address(vdpa, idx,
400 (u64)(uintptr_t)vq->desc,
401 (u64)(uintptr_t)vq->avail,
402 (u64)(uintptr_t)vq->used))
403 r = -EINVAL;
404 break;
405
406 case VHOST_SET_VRING_BASE:
407 vq_state.avail_index = vq->last_avail_idx;
408 if (ops->set_vq_state(vdpa, idx, &vq_state))
409 r = -EINVAL;
410 break;
411
412 case VHOST_SET_VRING_CALL:
413 if (vq->call_ctx.ctx) {
414 cb.callback = vhost_vdpa_virtqueue_cb;
415 cb.private = vq;
416 } else {
417 cb.callback = NULL;
418 cb.private = NULL;
419 }
420 ops->set_vq_cb(vdpa, idx, &cb);
421 vhost_vdpa_setup_vq_irq(v, idx);
422 break;
423
424 case VHOST_SET_VRING_NUM:
425 ops->set_vq_num(vdpa, idx, vq->num);
426 break;
427 }
428
429 return r;
430 }
431
vhost_vdpa_unlocked_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)432 static long vhost_vdpa_unlocked_ioctl(struct file *filep,
433 unsigned int cmd, unsigned long arg)
434 {
435 struct vhost_vdpa *v = filep->private_data;
436 struct vhost_dev *d = &v->vdev;
437 void __user *argp = (void __user *)arg;
438 u64 __user *featurep = argp;
439 u64 features;
440 long r = 0;
441
442 if (cmd == VHOST_SET_BACKEND_FEATURES) {
443 if (copy_from_user(&features, featurep, sizeof(features)))
444 return -EFAULT;
445 if (features & ~VHOST_VDPA_BACKEND_FEATURES)
446 return -EOPNOTSUPP;
447 vhost_set_backend_features(&v->vdev, features);
448 return 0;
449 }
450
451 mutex_lock(&d->mutex);
452
453 switch (cmd) {
454 case VHOST_VDPA_GET_DEVICE_ID:
455 r = vhost_vdpa_get_device_id(v, argp);
456 break;
457 case VHOST_VDPA_GET_STATUS:
458 r = vhost_vdpa_get_status(v, argp);
459 break;
460 case VHOST_VDPA_SET_STATUS:
461 r = vhost_vdpa_set_status(v, argp);
462 break;
463 case VHOST_VDPA_GET_CONFIG:
464 r = vhost_vdpa_get_config(v, argp);
465 break;
466 case VHOST_VDPA_SET_CONFIG:
467 r = vhost_vdpa_set_config(v, argp);
468 break;
469 case VHOST_GET_FEATURES:
470 r = vhost_vdpa_get_features(v, argp);
471 break;
472 case VHOST_SET_FEATURES:
473 r = vhost_vdpa_set_features(v, argp);
474 break;
475 case VHOST_VDPA_GET_VRING_NUM:
476 r = vhost_vdpa_get_vring_num(v, argp);
477 break;
478 case VHOST_SET_LOG_BASE:
479 case VHOST_SET_LOG_FD:
480 r = -ENOIOCTLCMD;
481 break;
482 case VHOST_VDPA_SET_CONFIG_CALL:
483 r = vhost_vdpa_set_config_call(v, argp);
484 break;
485 case VHOST_GET_BACKEND_FEATURES:
486 features = VHOST_VDPA_BACKEND_FEATURES;
487 if (copy_to_user(featurep, &features, sizeof(features)))
488 r = -EFAULT;
489 break;
490 case VHOST_VDPA_GET_IOVA_RANGE:
491 r = vhost_vdpa_get_iova_range(v, argp);
492 break;
493 default:
494 r = vhost_dev_ioctl(&v->vdev, cmd, argp);
495 if (r == -ENOIOCTLCMD)
496 r = vhost_vdpa_vring_ioctl(v, cmd, argp);
497 break;
498 }
499
500 mutex_unlock(&d->mutex);
501 return r;
502 }
503
vhost_vdpa_iotlb_unmap(struct vhost_vdpa * v,u64 start,u64 last)504 static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
505 {
506 struct vhost_dev *dev = &v->vdev;
507 struct vhost_iotlb *iotlb = dev->iotlb;
508 struct vhost_iotlb_map *map;
509 struct page *page;
510 unsigned long pfn, pinned;
511
512 while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
513 pinned = map->size >> PAGE_SHIFT;
514 for (pfn = map->addr >> PAGE_SHIFT;
515 pinned > 0; pfn++, pinned--) {
516 page = pfn_to_page(pfn);
517 if (map->perm & VHOST_ACCESS_WO)
518 set_page_dirty_lock(page);
519 unpin_user_page(page);
520 }
521 atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
522 vhost_iotlb_map_free(iotlb, map);
523 }
524 }
525
vhost_vdpa_iotlb_free(struct vhost_vdpa * v)526 static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
527 {
528 struct vhost_dev *dev = &v->vdev;
529
530 vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
531 kfree(dev->iotlb);
532 dev->iotlb = NULL;
533 }
534
perm_to_iommu_flags(u32 perm)535 static int perm_to_iommu_flags(u32 perm)
536 {
537 int flags = 0;
538
539 switch (perm) {
540 case VHOST_ACCESS_WO:
541 flags |= IOMMU_WRITE;
542 break;
543 case VHOST_ACCESS_RO:
544 flags |= IOMMU_READ;
545 break;
546 case VHOST_ACCESS_RW:
547 flags |= (IOMMU_WRITE | IOMMU_READ);
548 break;
549 default:
550 WARN(1, "invalidate vhost IOTLB permission\n");
551 break;
552 }
553
554 return flags | IOMMU_CACHE;
555 }
556
vhost_vdpa_map(struct vhost_vdpa * v,u64 iova,u64 size,u64 pa,u32 perm)557 static int vhost_vdpa_map(struct vhost_vdpa *v,
558 u64 iova, u64 size, u64 pa, u32 perm)
559 {
560 struct vhost_dev *dev = &v->vdev;
561 struct vdpa_device *vdpa = v->vdpa;
562 const struct vdpa_config_ops *ops = vdpa->config;
563 int r = 0;
564
565 r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
566 pa, perm);
567 if (r)
568 return r;
569
570 if (ops->dma_map) {
571 r = ops->dma_map(vdpa, iova, size, pa, perm);
572 } else if (ops->set_map) {
573 if (!v->in_batch)
574 r = ops->set_map(vdpa, dev->iotlb);
575 } else {
576 r = iommu_map(v->domain, iova, pa, size,
577 perm_to_iommu_flags(perm));
578 }
579
580 if (r)
581 vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
582 else
583 atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
584
585 return r;
586 }
587
vhost_vdpa_unmap(struct vhost_vdpa * v,u64 iova,u64 size)588 static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
589 {
590 struct vhost_dev *dev = &v->vdev;
591 struct vdpa_device *vdpa = v->vdpa;
592 const struct vdpa_config_ops *ops = vdpa->config;
593
594 vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
595
596 if (ops->dma_map) {
597 ops->dma_unmap(vdpa, iova, size);
598 } else if (ops->set_map) {
599 if (!v->in_batch)
600 ops->set_map(vdpa, dev->iotlb);
601 } else {
602 iommu_unmap(v->domain, iova, size);
603 }
604 }
605
vhost_vdpa_process_iotlb_update(struct vhost_vdpa * v,struct vhost_iotlb_msg * msg)606 static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
607 struct vhost_iotlb_msg *msg)
608 {
609 struct vhost_dev *dev = &v->vdev;
610 struct vhost_iotlb *iotlb = dev->iotlb;
611 struct page **page_list;
612 unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
613 unsigned int gup_flags = FOLL_LONGTERM;
614 unsigned long npages, cur_base, map_pfn, last_pfn = 0;
615 unsigned long lock_limit, sz2pin, nchunks, i;
616 u64 iova = msg->iova;
617 long pinned;
618 int ret = 0;
619
620 if (msg->iova < v->range.first ||
621 msg->iova + msg->size - 1 > v->range.last)
622 return -EINVAL;
623
624 if (vhost_iotlb_itree_first(iotlb, msg->iova,
625 msg->iova + msg->size - 1))
626 return -EEXIST;
627
628 /* Limit the use of memory for bookkeeping */
629 page_list = (struct page **) __get_free_page(GFP_KERNEL);
630 if (!page_list)
631 return -ENOMEM;
632
633 if (msg->perm & VHOST_ACCESS_WO)
634 gup_flags |= FOLL_WRITE;
635
636 npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
637 if (!npages) {
638 ret = -EINVAL;
639 goto free;
640 }
641
642 mmap_read_lock(dev->mm);
643
644 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
645 if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
646 ret = -ENOMEM;
647 goto unlock;
648 }
649
650 cur_base = msg->uaddr & PAGE_MASK;
651 iova &= PAGE_MASK;
652 nchunks = 0;
653
654 while (npages) {
655 sz2pin = min_t(unsigned long, npages, list_size);
656 pinned = pin_user_pages(cur_base, sz2pin,
657 gup_flags, page_list, NULL);
658 if (sz2pin != pinned) {
659 if (pinned < 0) {
660 ret = pinned;
661 } else {
662 unpin_user_pages(page_list, pinned);
663 ret = -ENOMEM;
664 }
665 goto out;
666 }
667 nchunks++;
668
669 if (!last_pfn)
670 map_pfn = page_to_pfn(page_list[0]);
671
672 for (i = 0; i < pinned; i++) {
673 unsigned long this_pfn = page_to_pfn(page_list[i]);
674 u64 csize;
675
676 if (last_pfn && (this_pfn != last_pfn + 1)) {
677 /* Pin a contiguous chunk of memory */
678 csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
679 ret = vhost_vdpa_map(v, iova, csize,
680 map_pfn << PAGE_SHIFT,
681 msg->perm);
682 if (ret) {
683 /*
684 * Unpin the pages that are left unmapped
685 * from this point on in the current
686 * page_list. The remaining outstanding
687 * ones which may stride across several
688 * chunks will be covered in the common
689 * error path subsequently.
690 */
691 unpin_user_pages(&page_list[i],
692 pinned - i);
693 goto out;
694 }
695
696 map_pfn = this_pfn;
697 iova += csize;
698 nchunks = 0;
699 }
700
701 last_pfn = this_pfn;
702 }
703
704 cur_base += pinned << PAGE_SHIFT;
705 npages -= pinned;
706 }
707
708 /* Pin the rest chunk */
709 ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
710 map_pfn << PAGE_SHIFT, msg->perm);
711 out:
712 if (ret) {
713 if (nchunks) {
714 unsigned long pfn;
715
716 /*
717 * Unpin the outstanding pages which are yet to be
718 * mapped but haven't due to vdpa_map() or
719 * pin_user_pages() failure.
720 *
721 * Mapped pages are accounted in vdpa_map(), hence
722 * the corresponding unpinning will be handled by
723 * vdpa_unmap().
724 */
725 WARN_ON(!last_pfn);
726 for (pfn = map_pfn; pfn <= last_pfn; pfn++)
727 unpin_user_page(pfn_to_page(pfn));
728 }
729 vhost_vdpa_unmap(v, msg->iova, msg->size);
730 }
731 unlock:
732 mmap_read_unlock(dev->mm);
733 free:
734 free_page((unsigned long)page_list);
735 return ret;
736 }
737
vhost_vdpa_process_iotlb_msg(struct vhost_dev * dev,struct vhost_iotlb_msg * msg)738 static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
739 struct vhost_iotlb_msg *msg)
740 {
741 struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
742 struct vdpa_device *vdpa = v->vdpa;
743 const struct vdpa_config_ops *ops = vdpa->config;
744 int r = 0;
745
746 r = vhost_dev_check_owner(dev);
747 if (r)
748 return r;
749
750 switch (msg->type) {
751 case VHOST_IOTLB_UPDATE:
752 r = vhost_vdpa_process_iotlb_update(v, msg);
753 break;
754 case VHOST_IOTLB_INVALIDATE:
755 vhost_vdpa_unmap(v, msg->iova, msg->size);
756 break;
757 case VHOST_IOTLB_BATCH_BEGIN:
758 v->in_batch = true;
759 break;
760 case VHOST_IOTLB_BATCH_END:
761 if (v->in_batch && ops->set_map)
762 ops->set_map(vdpa, dev->iotlb);
763 v->in_batch = false;
764 break;
765 default:
766 r = -EINVAL;
767 break;
768 }
769
770 return r;
771 }
772
vhost_vdpa_chr_write_iter(struct kiocb * iocb,struct iov_iter * from)773 static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
774 struct iov_iter *from)
775 {
776 struct file *file = iocb->ki_filp;
777 struct vhost_vdpa *v = file->private_data;
778 struct vhost_dev *dev = &v->vdev;
779
780 return vhost_chr_write_iter(dev, from);
781 }
782
vhost_vdpa_alloc_domain(struct vhost_vdpa * v)783 static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
784 {
785 struct vdpa_device *vdpa = v->vdpa;
786 const struct vdpa_config_ops *ops = vdpa->config;
787 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
788 struct bus_type *bus;
789 int ret;
790
791 /* Device want to do DMA by itself */
792 if (ops->set_map || ops->dma_map)
793 return 0;
794
795 bus = dma_dev->bus;
796 if (!bus)
797 return -EFAULT;
798
799 if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
800 return -ENOTSUPP;
801
802 v->domain = iommu_domain_alloc(bus);
803 if (!v->domain)
804 return -EIO;
805
806 ret = iommu_attach_device(v->domain, dma_dev);
807 if (ret)
808 goto err_attach;
809
810 return 0;
811
812 err_attach:
813 iommu_domain_free(v->domain);
814 return ret;
815 }
816
vhost_vdpa_free_domain(struct vhost_vdpa * v)817 static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
818 {
819 struct vdpa_device *vdpa = v->vdpa;
820 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
821
822 if (v->domain) {
823 iommu_detach_device(v->domain, dma_dev);
824 iommu_domain_free(v->domain);
825 }
826
827 v->domain = NULL;
828 }
829
vhost_vdpa_set_iova_range(struct vhost_vdpa * v)830 static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
831 {
832 struct vdpa_iova_range *range = &v->range;
833 struct iommu_domain_geometry geo;
834 struct vdpa_device *vdpa = v->vdpa;
835 const struct vdpa_config_ops *ops = vdpa->config;
836
837 if (ops->get_iova_range) {
838 *range = ops->get_iova_range(vdpa);
839 } else if (v->domain &&
840 !iommu_domain_get_attr(v->domain,
841 DOMAIN_ATTR_GEOMETRY, &geo) &&
842 geo.force_aperture) {
843 range->first = geo.aperture_start;
844 range->last = geo.aperture_end;
845 } else {
846 range->first = 0;
847 range->last = ULLONG_MAX;
848 }
849 }
850
vhost_vdpa_open(struct inode * inode,struct file * filep)851 static int vhost_vdpa_open(struct inode *inode, struct file *filep)
852 {
853 struct vhost_vdpa *v;
854 struct vhost_dev *dev;
855 struct vhost_virtqueue **vqs;
856 int nvqs, i, r, opened;
857
858 v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
859
860 opened = atomic_cmpxchg(&v->opened, 0, 1);
861 if (opened)
862 return -EBUSY;
863
864 nvqs = v->nvqs;
865 vhost_vdpa_reset(v);
866
867 vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
868 if (!vqs) {
869 r = -ENOMEM;
870 goto err;
871 }
872
873 dev = &v->vdev;
874 for (i = 0; i < nvqs; i++) {
875 vqs[i] = &v->vqs[i];
876 vqs[i]->handle_kick = handle_vq_kick;
877 }
878 vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
879 vhost_vdpa_process_iotlb_msg);
880
881 dev->iotlb = vhost_iotlb_alloc(0, 0);
882 if (!dev->iotlb) {
883 r = -ENOMEM;
884 goto err_init_iotlb;
885 }
886
887 r = vhost_vdpa_alloc_domain(v);
888 if (r)
889 goto err_init_iotlb;
890
891 vhost_vdpa_set_iova_range(v);
892
893 filep->private_data = v;
894
895 return 0;
896
897 err_init_iotlb:
898 vhost_dev_cleanup(&v->vdev);
899 kfree(vqs);
900 err:
901 atomic_dec(&v->opened);
902 return r;
903 }
904
vhost_vdpa_clean_irq(struct vhost_vdpa * v)905 static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
906 {
907 struct vhost_virtqueue *vq;
908 int i;
909
910 for (i = 0; i < v->nvqs; i++) {
911 vq = &v->vqs[i];
912 if (vq->call_ctx.producer.irq)
913 irq_bypass_unregister_producer(&vq->call_ctx.producer);
914 }
915 }
916
vhost_vdpa_release(struct inode * inode,struct file * filep)917 static int vhost_vdpa_release(struct inode *inode, struct file *filep)
918 {
919 struct vhost_vdpa *v = filep->private_data;
920 struct vhost_dev *d = &v->vdev;
921
922 mutex_lock(&d->mutex);
923 filep->private_data = NULL;
924 vhost_vdpa_reset(v);
925 vhost_dev_stop(&v->vdev);
926 vhost_vdpa_iotlb_free(v);
927 vhost_vdpa_free_domain(v);
928 vhost_vdpa_config_put(v);
929 vhost_vdpa_clean_irq(v);
930 vhost_dev_cleanup(&v->vdev);
931 kfree(v->vdev.vqs);
932 mutex_unlock(&d->mutex);
933
934 atomic_dec(&v->opened);
935 complete(&v->completion);
936
937 return 0;
938 }
939
940 #ifdef CONFIG_MMU
vhost_vdpa_fault(struct vm_fault * vmf)941 static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
942 {
943 struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
944 struct vdpa_device *vdpa = v->vdpa;
945 const struct vdpa_config_ops *ops = vdpa->config;
946 struct vdpa_notification_area notify;
947 struct vm_area_struct *vma = vmf->vma;
948 u16 index = vma->vm_pgoff;
949
950 notify = ops->get_vq_notification(vdpa, index);
951
952 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
953 if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
954 notify.addr >> PAGE_SHIFT, PAGE_SIZE,
955 vma->vm_page_prot))
956 return VM_FAULT_SIGBUS;
957
958 return VM_FAULT_NOPAGE;
959 }
960
961 static const struct vm_operations_struct vhost_vdpa_vm_ops = {
962 .fault = vhost_vdpa_fault,
963 };
964
vhost_vdpa_mmap(struct file * file,struct vm_area_struct * vma)965 static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
966 {
967 struct vhost_vdpa *v = vma->vm_file->private_data;
968 struct vdpa_device *vdpa = v->vdpa;
969 const struct vdpa_config_ops *ops = vdpa->config;
970 struct vdpa_notification_area notify;
971 unsigned long index = vma->vm_pgoff;
972
973 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
974 return -EINVAL;
975 if ((vma->vm_flags & VM_SHARED) == 0)
976 return -EINVAL;
977 if (vma->vm_flags & VM_READ)
978 return -EINVAL;
979 if (index > 65535)
980 return -EINVAL;
981 if (!ops->get_vq_notification)
982 return -ENOTSUPP;
983
984 /* To be safe and easily modelled by userspace, We only
985 * support the doorbell which sits on the page boundary and
986 * does not share the page with other registers.
987 */
988 notify = ops->get_vq_notification(vdpa, index);
989 if (notify.addr & (PAGE_SIZE - 1))
990 return -EINVAL;
991 if (vma->vm_end - vma->vm_start != notify.size)
992 return -ENOTSUPP;
993
994 vma->vm_ops = &vhost_vdpa_vm_ops;
995 return 0;
996 }
997 #endif /* CONFIG_MMU */
998
999 static const struct file_operations vhost_vdpa_fops = {
1000 .owner = THIS_MODULE,
1001 .open = vhost_vdpa_open,
1002 .release = vhost_vdpa_release,
1003 .write_iter = vhost_vdpa_chr_write_iter,
1004 .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
1005 #ifdef CONFIG_MMU
1006 .mmap = vhost_vdpa_mmap,
1007 #endif /* CONFIG_MMU */
1008 .compat_ioctl = compat_ptr_ioctl,
1009 };
1010
vhost_vdpa_release_dev(struct device * device)1011 static void vhost_vdpa_release_dev(struct device *device)
1012 {
1013 struct vhost_vdpa *v =
1014 container_of(device, struct vhost_vdpa, dev);
1015
1016 ida_simple_remove(&vhost_vdpa_ida, v->minor);
1017 kfree(v->vqs);
1018 kfree(v);
1019 }
1020
vhost_vdpa_probe(struct vdpa_device * vdpa)1021 static int vhost_vdpa_probe(struct vdpa_device *vdpa)
1022 {
1023 const struct vdpa_config_ops *ops = vdpa->config;
1024 struct vhost_vdpa *v;
1025 int minor;
1026 int r;
1027
1028 /* Currently, we only accept the network devices. */
1029 if (ops->get_device_id(vdpa) != VIRTIO_ID_NET)
1030 return -ENOTSUPP;
1031
1032 v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1033 if (!v)
1034 return -ENOMEM;
1035
1036 minor = ida_simple_get(&vhost_vdpa_ida, 0,
1037 VHOST_VDPA_DEV_MAX, GFP_KERNEL);
1038 if (minor < 0) {
1039 kfree(v);
1040 return minor;
1041 }
1042
1043 atomic_set(&v->opened, 0);
1044 v->minor = minor;
1045 v->vdpa = vdpa;
1046 v->nvqs = vdpa->nvqs;
1047 v->virtio_id = ops->get_device_id(vdpa);
1048
1049 device_initialize(&v->dev);
1050 v->dev.release = vhost_vdpa_release_dev;
1051 v->dev.parent = &vdpa->dev;
1052 v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
1053 v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
1054 GFP_KERNEL);
1055 if (!v->vqs) {
1056 r = -ENOMEM;
1057 goto err;
1058 }
1059
1060 r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
1061 if (r)
1062 goto err;
1063
1064 cdev_init(&v->cdev, &vhost_vdpa_fops);
1065 v->cdev.owner = THIS_MODULE;
1066
1067 r = cdev_device_add(&v->cdev, &v->dev);
1068 if (r)
1069 goto err;
1070
1071 init_completion(&v->completion);
1072 vdpa_set_drvdata(vdpa, v);
1073
1074 return 0;
1075
1076 err:
1077 put_device(&v->dev);
1078 return r;
1079 }
1080
vhost_vdpa_remove(struct vdpa_device * vdpa)1081 static void vhost_vdpa_remove(struct vdpa_device *vdpa)
1082 {
1083 struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
1084 int opened;
1085
1086 cdev_device_del(&v->cdev, &v->dev);
1087
1088 do {
1089 opened = atomic_cmpxchg(&v->opened, 0, 1);
1090 if (!opened)
1091 break;
1092 wait_for_completion(&v->completion);
1093 } while (1);
1094
1095 put_device(&v->dev);
1096 }
1097
1098 static struct vdpa_driver vhost_vdpa_driver = {
1099 .driver = {
1100 .name = "vhost_vdpa",
1101 },
1102 .probe = vhost_vdpa_probe,
1103 .remove = vhost_vdpa_remove,
1104 };
1105
vhost_vdpa_init(void)1106 static int __init vhost_vdpa_init(void)
1107 {
1108 int r;
1109
1110 r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
1111 "vhost-vdpa");
1112 if (r)
1113 goto err_alloc_chrdev;
1114
1115 r = vdpa_register_driver(&vhost_vdpa_driver);
1116 if (r)
1117 goto err_vdpa_register_driver;
1118
1119 return 0;
1120
1121 err_vdpa_register_driver:
1122 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1123 err_alloc_chrdev:
1124 return r;
1125 }
1126 module_init(vhost_vdpa_init);
1127
vhost_vdpa_exit(void)1128 static void __exit vhost_vdpa_exit(void)
1129 {
1130 vdpa_unregister_driver(&vhost_vdpa_driver);
1131 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1132 }
1133 module_exit(vhost_vdpa_exit);
1134
1135 MODULE_VERSION("0.0.1");
1136 MODULE_LICENSE("GPL v2");
1137 MODULE_AUTHOR("Intel Corporation");
1138 MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");
1139