1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2
3 /*
4 * Xen para-virtual DRM device
5 *
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
7 *
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
10
11 #include <drm/drmP.h>
12
13 #if defined(CONFIG_X86)
14 #include <drm/drm_cache.h>
15 #endif
16 #include <linux/errno.h>
17 #include <linux/mm.h>
18
19 #include <asm/xen/hypervisor.h>
20 #include <xen/balloon.h>
21 #include <xen/xen.h>
22 #include <xen/xenbus.h>
23 #include <xen/interface/io/ring.h>
24 #include <xen/interface/io/displif.h>
25
26 #include "xen_drm_front.h"
27 #include "xen_drm_front_shbuf.h"
28
29 struct xen_drm_front_shbuf_ops {
30 /*
31 * Calculate number of grefs required to handle this buffer,
32 * e.g. if grefs are required for page directory only or the buffer
33 * pages as well.
34 */
35 void (*calc_num_grefs)(struct xen_drm_front_shbuf *buf);
36 /* Fill page directory according to para-virtual display protocol. */
37 void (*fill_page_dir)(struct xen_drm_front_shbuf *buf);
38 /* Claim grant references for the pages of the buffer. */
39 int (*grant_refs_for_buffer)(struct xen_drm_front_shbuf *buf,
40 grant_ref_t *priv_gref_head, int gref_idx);
41 /* Map grant references of the buffer. */
42 int (*map)(struct xen_drm_front_shbuf *buf);
43 /* Unmap grant references of the buffer. */
44 int (*unmap)(struct xen_drm_front_shbuf *buf);
45 };
46
xen_drm_front_shbuf_get_dir_start(struct xen_drm_front_shbuf * buf)47 grant_ref_t xen_drm_front_shbuf_get_dir_start(struct xen_drm_front_shbuf *buf)
48 {
49 if (!buf->grefs)
50 return GRANT_INVALID_REF;
51
52 return buf->grefs[0];
53 }
54
xen_drm_front_shbuf_map(struct xen_drm_front_shbuf * buf)55 int xen_drm_front_shbuf_map(struct xen_drm_front_shbuf *buf)
56 {
57 if (buf->ops->map)
58 return buf->ops->map(buf);
59
60 /* no need to map own grant references */
61 return 0;
62 }
63
xen_drm_front_shbuf_unmap(struct xen_drm_front_shbuf * buf)64 int xen_drm_front_shbuf_unmap(struct xen_drm_front_shbuf *buf)
65 {
66 if (buf->ops->unmap)
67 return buf->ops->unmap(buf);
68
69 /* no need to unmap own grant references */
70 return 0;
71 }
72
xen_drm_front_shbuf_flush(struct xen_drm_front_shbuf * buf)73 void xen_drm_front_shbuf_flush(struct xen_drm_front_shbuf *buf)
74 {
75 #if defined(CONFIG_X86)
76 drm_clflush_pages(buf->pages, buf->num_pages);
77 #endif
78 }
79
xen_drm_front_shbuf_free(struct xen_drm_front_shbuf * buf)80 void xen_drm_front_shbuf_free(struct xen_drm_front_shbuf *buf)
81 {
82 if (buf->grefs) {
83 int i;
84
85 for (i = 0; i < buf->num_grefs; i++)
86 if (buf->grefs[i] != GRANT_INVALID_REF)
87 gnttab_end_foreign_access(buf->grefs[i],
88 0, 0UL);
89 }
90 kfree(buf->grefs);
91 kfree(buf->directory);
92 kfree(buf);
93 }
94
95 /*
96 * number of grefs a page can hold with respect to the
97 * struct xendispl_page_directory header
98 */
99 #define XEN_DRM_NUM_GREFS_PER_PAGE ((PAGE_SIZE - \
100 offsetof(struct xendispl_page_directory, gref)) / \
101 sizeof(grant_ref_t))
102
get_num_pages_dir(struct xen_drm_front_shbuf * buf)103 static int get_num_pages_dir(struct xen_drm_front_shbuf *buf)
104 {
105 /* number of pages the page directory consumes itself */
106 return DIV_ROUND_UP(buf->num_pages, XEN_DRM_NUM_GREFS_PER_PAGE);
107 }
108
backend_calc_num_grefs(struct xen_drm_front_shbuf * buf)109 static void backend_calc_num_grefs(struct xen_drm_front_shbuf *buf)
110 {
111 /* only for pages the page directory consumes itself */
112 buf->num_grefs = get_num_pages_dir(buf);
113 }
114
guest_calc_num_grefs(struct xen_drm_front_shbuf * buf)115 static void guest_calc_num_grefs(struct xen_drm_front_shbuf *buf)
116 {
117 /*
118 * number of pages the page directory consumes itself
119 * plus grefs for the buffer pages
120 */
121 buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages;
122 }
123
124 #define xen_page_to_vaddr(page) \
125 ((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
126
backend_unmap(struct xen_drm_front_shbuf * buf)127 static int backend_unmap(struct xen_drm_front_shbuf *buf)
128 {
129 struct gnttab_unmap_grant_ref *unmap_ops;
130 int i, ret;
131
132 if (!buf->pages || !buf->backend_map_handles || !buf->grefs)
133 return 0;
134
135 unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops),
136 GFP_KERNEL);
137 if (!unmap_ops) {
138 DRM_ERROR("Failed to get memory while unmapping\n");
139 return -ENOMEM;
140 }
141
142 for (i = 0; i < buf->num_pages; i++) {
143 phys_addr_t addr;
144
145 addr = xen_page_to_vaddr(buf->pages[i]);
146 gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map,
147 buf->backend_map_handles[i]);
148 }
149
150 ret = gnttab_unmap_refs(unmap_ops, NULL, buf->pages,
151 buf->num_pages);
152
153 for (i = 0; i < buf->num_pages; i++) {
154 if (unlikely(unmap_ops[i].status != GNTST_okay))
155 DRM_ERROR("Failed to unmap page %d: %d\n",
156 i, unmap_ops[i].status);
157 }
158
159 if (ret)
160 DRM_ERROR("Failed to unmap grant references, ret %d", ret);
161
162 kfree(unmap_ops);
163 kfree(buf->backend_map_handles);
164 buf->backend_map_handles = NULL;
165 return ret;
166 }
167
backend_map(struct xen_drm_front_shbuf * buf)168 static int backend_map(struct xen_drm_front_shbuf *buf)
169 {
170 struct gnttab_map_grant_ref *map_ops = NULL;
171 unsigned char *ptr;
172 int ret, cur_gref, cur_dir_page, cur_page, grefs_left;
173
174 map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL);
175 if (!map_ops)
176 return -ENOMEM;
177
178 buf->backend_map_handles = kcalloc(buf->num_pages,
179 sizeof(*buf->backend_map_handles),
180 GFP_KERNEL);
181 if (!buf->backend_map_handles) {
182 kfree(map_ops);
183 return -ENOMEM;
184 }
185
186 /*
187 * read page directory to get grefs from the backend: for external
188 * buffer we only allocate buf->grefs for the page directory,
189 * so buf->num_grefs has number of pages in the page directory itself
190 */
191 ptr = buf->directory;
192 grefs_left = buf->num_pages;
193 cur_page = 0;
194 for (cur_dir_page = 0; cur_dir_page < buf->num_grefs; cur_dir_page++) {
195 struct xendispl_page_directory *page_dir =
196 (struct xendispl_page_directory *)ptr;
197 int to_copy = XEN_DRM_NUM_GREFS_PER_PAGE;
198
199 if (to_copy > grefs_left)
200 to_copy = grefs_left;
201
202 for (cur_gref = 0; cur_gref < to_copy; cur_gref++) {
203 phys_addr_t addr;
204
205 addr = xen_page_to_vaddr(buf->pages[cur_page]);
206 gnttab_set_map_op(&map_ops[cur_page], addr,
207 GNTMAP_host_map,
208 page_dir->gref[cur_gref],
209 buf->xb_dev->otherend_id);
210 cur_page++;
211 }
212
213 grefs_left -= to_copy;
214 ptr += PAGE_SIZE;
215 }
216 ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages);
217
218 /* save handles even if error, so we can unmap */
219 for (cur_page = 0; cur_page < buf->num_pages; cur_page++) {
220 buf->backend_map_handles[cur_page] = map_ops[cur_page].handle;
221 if (unlikely(map_ops[cur_page].status != GNTST_okay))
222 DRM_ERROR("Failed to map page %d: %d\n",
223 cur_page, map_ops[cur_page].status);
224 }
225
226 if (ret) {
227 DRM_ERROR("Failed to map grant references, ret %d", ret);
228 backend_unmap(buf);
229 }
230
231 kfree(map_ops);
232 return ret;
233 }
234
backend_fill_page_dir(struct xen_drm_front_shbuf * buf)235 static void backend_fill_page_dir(struct xen_drm_front_shbuf *buf)
236 {
237 struct xendispl_page_directory *page_dir;
238 unsigned char *ptr;
239 int i, num_pages_dir;
240
241 ptr = buf->directory;
242 num_pages_dir = get_num_pages_dir(buf);
243
244 /* fill only grefs for the page directory itself */
245 for (i = 0; i < num_pages_dir - 1; i++) {
246 page_dir = (struct xendispl_page_directory *)ptr;
247
248 page_dir->gref_dir_next_page = buf->grefs[i + 1];
249 ptr += PAGE_SIZE;
250 }
251 /* last page must say there is no more pages */
252 page_dir = (struct xendispl_page_directory *)ptr;
253 page_dir->gref_dir_next_page = GRANT_INVALID_REF;
254 }
255
guest_fill_page_dir(struct xen_drm_front_shbuf * buf)256 static void guest_fill_page_dir(struct xen_drm_front_shbuf *buf)
257 {
258 unsigned char *ptr;
259 int cur_gref, grefs_left, to_copy, i, num_pages_dir;
260
261 ptr = buf->directory;
262 num_pages_dir = get_num_pages_dir(buf);
263
264 /*
265 * while copying, skip grefs at start, they are for pages
266 * granted for the page directory itself
267 */
268 cur_gref = num_pages_dir;
269 grefs_left = buf->num_pages;
270 for (i = 0; i < num_pages_dir; i++) {
271 struct xendispl_page_directory *page_dir =
272 (struct xendispl_page_directory *)ptr;
273
274 if (grefs_left <= XEN_DRM_NUM_GREFS_PER_PAGE) {
275 to_copy = grefs_left;
276 page_dir->gref_dir_next_page = GRANT_INVALID_REF;
277 } else {
278 to_copy = XEN_DRM_NUM_GREFS_PER_PAGE;
279 page_dir->gref_dir_next_page = buf->grefs[i + 1];
280 }
281 memcpy(&page_dir->gref, &buf->grefs[cur_gref],
282 to_copy * sizeof(grant_ref_t));
283 ptr += PAGE_SIZE;
284 grefs_left -= to_copy;
285 cur_gref += to_copy;
286 }
287 }
288
guest_grant_refs_for_buffer(struct xen_drm_front_shbuf * buf,grant_ref_t * priv_gref_head,int gref_idx)289 static int guest_grant_refs_for_buffer(struct xen_drm_front_shbuf *buf,
290 grant_ref_t *priv_gref_head,
291 int gref_idx)
292 {
293 int i, cur_ref, otherend_id;
294
295 otherend_id = buf->xb_dev->otherend_id;
296 for (i = 0; i < buf->num_pages; i++) {
297 cur_ref = gnttab_claim_grant_reference(priv_gref_head);
298 if (cur_ref < 0)
299 return cur_ref;
300
301 gnttab_grant_foreign_access_ref(cur_ref, otherend_id,
302 xen_page_to_gfn(buf->pages[i]),
303 0);
304 buf->grefs[gref_idx++] = cur_ref;
305 }
306 return 0;
307 }
308
grant_references(struct xen_drm_front_shbuf * buf)309 static int grant_references(struct xen_drm_front_shbuf *buf)
310 {
311 grant_ref_t priv_gref_head;
312 int ret, i, j, cur_ref;
313 int otherend_id, num_pages_dir;
314
315 ret = gnttab_alloc_grant_references(buf->num_grefs, &priv_gref_head);
316 if (ret < 0) {
317 DRM_ERROR("Cannot allocate grant references\n");
318 return ret;
319 }
320
321 otherend_id = buf->xb_dev->otherend_id;
322 j = 0;
323 num_pages_dir = get_num_pages_dir(buf);
324 for (i = 0; i < num_pages_dir; i++) {
325 unsigned long frame;
326
327 cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
328 if (cur_ref < 0)
329 return cur_ref;
330
331 frame = xen_page_to_gfn(virt_to_page(buf->directory +
332 PAGE_SIZE * i));
333 gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
334 buf->grefs[j++] = cur_ref;
335 }
336
337 if (buf->ops->grant_refs_for_buffer) {
338 ret = buf->ops->grant_refs_for_buffer(buf, &priv_gref_head, j);
339 if (ret)
340 return ret;
341 }
342
343 gnttab_free_grant_references(priv_gref_head);
344 return 0;
345 }
346
alloc_storage(struct xen_drm_front_shbuf * buf)347 static int alloc_storage(struct xen_drm_front_shbuf *buf)
348 {
349 buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
350 if (!buf->grefs)
351 return -ENOMEM;
352
353 buf->directory = kcalloc(get_num_pages_dir(buf), PAGE_SIZE, GFP_KERNEL);
354 if (!buf->directory)
355 return -ENOMEM;
356
357 return 0;
358 }
359
360 /*
361 * For be allocated buffers we don't need grant_refs_for_buffer as those
362 * grant references are allocated at backend side
363 */
364 static const struct xen_drm_front_shbuf_ops backend_ops = {
365 .calc_num_grefs = backend_calc_num_grefs,
366 .fill_page_dir = backend_fill_page_dir,
367 .map = backend_map,
368 .unmap = backend_unmap
369 };
370
371 /* For locally granted references we do not need to map/unmap the references */
372 static const struct xen_drm_front_shbuf_ops local_ops = {
373 .calc_num_grefs = guest_calc_num_grefs,
374 .fill_page_dir = guest_fill_page_dir,
375 .grant_refs_for_buffer = guest_grant_refs_for_buffer,
376 };
377
378 struct xen_drm_front_shbuf *
xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg * cfg)379 xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg)
380 {
381 struct xen_drm_front_shbuf *buf;
382 int ret;
383
384 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
385 if (!buf)
386 return ERR_PTR(-ENOMEM);
387
388 if (cfg->be_alloc)
389 buf->ops = &backend_ops;
390 else
391 buf->ops = &local_ops;
392
393 buf->xb_dev = cfg->xb_dev;
394 buf->num_pages = DIV_ROUND_UP(cfg->size, PAGE_SIZE);
395 buf->pages = cfg->pages;
396
397 buf->ops->calc_num_grefs(buf);
398
399 ret = alloc_storage(buf);
400 if (ret)
401 goto fail;
402
403 ret = grant_references(buf);
404 if (ret)
405 goto fail;
406
407 buf->ops->fill_page_dir(buf);
408
409 return buf;
410
411 fail:
412 xen_drm_front_shbuf_free(buf);
413 return ERR_PTR(ret);
414 }
415