1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 
29 #include <drm/ttm/ttm_placement.h>
30 
31 #include "vmwgfx_drv.h"
32 #include "ttm_object.h"
33 
34 
35 /**
36  * struct vmw_user_buffer_object - User-space-visible buffer object
37  *
38  * @prime: The prime object providing user visibility.
39  * @vbo: The struct vmw_buffer_object
40  */
41 struct vmw_user_buffer_object {
42 	struct ttm_prime_object prime;
43 	struct vmw_buffer_object vbo;
44 };
45 
46 
47 /**
48  * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
49  * vmw_buffer_object.
50  *
51  * @bo: Pointer to the TTM buffer object.
52  * Return: Pointer to the struct vmw_buffer_object embedding the
53  * TTM buffer object.
54  */
55 static struct vmw_buffer_object *
vmw_buffer_object(struct ttm_buffer_object * bo)56 vmw_buffer_object(struct ttm_buffer_object *bo)
57 {
58 	return container_of(bo, struct vmw_buffer_object, base);
59 }
60 
61 
62 /**
63  * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
64  * vmw_user_buffer_object.
65  *
66  * @bo: Pointer to the TTM buffer object.
67  * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
68  * object.
69  */
70 static struct vmw_user_buffer_object *
vmw_user_buffer_object(struct ttm_buffer_object * bo)71 vmw_user_buffer_object(struct ttm_buffer_object *bo)
72 {
73 	struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
74 
75 	return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
76 }
77 
78 
79 /**
80  * vmw_bo_pin_in_placement - Validate a buffer to placement.
81  *
82  * @dev_priv:  Driver private.
83  * @buf:  DMA buffer to move.
84  * @placement:  The placement to pin it.
85  * @interruptible:  Use interruptible wait.
86  * Return: Zero on success, Negative error code on failure. In particular
87  * -ERESTARTSYS if interrupted by a signal
88  */
vmw_bo_pin_in_placement(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,struct ttm_placement * placement,bool interruptible)89 int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
90 			    struct vmw_buffer_object *buf,
91 			    struct ttm_placement *placement,
92 			    bool interruptible)
93 {
94 	struct ttm_operation_ctx ctx = {interruptible, false };
95 	struct ttm_buffer_object *bo = &buf->base;
96 	int ret;
97 	uint32_t new_flags;
98 
99 	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
100 	if (unlikely(ret != 0))
101 		return ret;
102 
103 	vmw_execbuf_release_pinned_bo(dev_priv);
104 
105 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
106 	if (unlikely(ret != 0))
107 		goto err;
108 
109 	if (buf->pin_count > 0)
110 		ret = ttm_bo_mem_compat(placement, &bo->mem,
111 					&new_flags) == true ? 0 : -EINVAL;
112 	else
113 		ret = ttm_bo_validate(bo, placement, &ctx);
114 
115 	if (!ret)
116 		vmw_bo_pin_reserved(buf, true);
117 
118 	ttm_bo_unreserve(bo);
119 
120 err:
121 	ttm_write_unlock(&dev_priv->reservation_sem);
122 	return ret;
123 }
124 
125 
126 /**
127  * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
128  *
129  * This function takes the reservation_sem in write mode.
130  * Flushes and unpins the query bo to avoid failures.
131  *
132  * @dev_priv:  Driver private.
133  * @buf:  DMA buffer to move.
134  * @pin:  Pin buffer if true.
135  * @interruptible:  Use interruptible wait.
136  * Return: Zero on success, Negative error code on failure. In particular
137  * -ERESTARTSYS if interrupted by a signal
138  */
vmw_bo_pin_in_vram_or_gmr(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)139 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
140 			      struct vmw_buffer_object *buf,
141 			      bool interruptible)
142 {
143 	struct ttm_operation_ctx ctx = {interruptible, false };
144 	struct ttm_buffer_object *bo = &buf->base;
145 	int ret;
146 	uint32_t new_flags;
147 
148 	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
149 	if (unlikely(ret != 0))
150 		return ret;
151 
152 	vmw_execbuf_release_pinned_bo(dev_priv);
153 
154 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
155 	if (unlikely(ret != 0))
156 		goto err;
157 
158 	if (buf->pin_count > 0) {
159 		ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
160 					&new_flags) == true ? 0 : -EINVAL;
161 		goto out_unreserve;
162 	}
163 
164 	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
165 	if (likely(ret == 0) || ret == -ERESTARTSYS)
166 		goto out_unreserve;
167 
168 	ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
169 
170 out_unreserve:
171 	if (!ret)
172 		vmw_bo_pin_reserved(buf, true);
173 
174 	ttm_bo_unreserve(bo);
175 err:
176 	ttm_write_unlock(&dev_priv->reservation_sem);
177 	return ret;
178 }
179 
180 
181 /**
182  * vmw_bo_pin_in_vram - Move a buffer to vram.
183  *
184  * This function takes the reservation_sem in write mode.
185  * Flushes and unpins the query bo to avoid failures.
186  *
187  * @dev_priv:  Driver private.
188  * @buf:  DMA buffer to move.
189  * @interruptible:  Use interruptible wait.
190  * Return: Zero on success, Negative error code on failure. In particular
191  * -ERESTARTSYS if interrupted by a signal
192  */
vmw_bo_pin_in_vram(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)193 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
194 		       struct vmw_buffer_object *buf,
195 		       bool interruptible)
196 {
197 	return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
198 				       interruptible);
199 }
200 
201 
202 /**
203  * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
204  *
205  * This function takes the reservation_sem in write mode.
206  * Flushes and unpins the query bo to avoid failures.
207  *
208  * @dev_priv:  Driver private.
209  * @buf:  DMA buffer to pin.
210  * @interruptible:  Use interruptible wait.
211  * Return: Zero on success, Negative error code on failure. In particular
212  * -ERESTARTSYS if interrupted by a signal
213  */
vmw_bo_pin_in_start_of_vram(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)214 int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
215 				struct vmw_buffer_object *buf,
216 				bool interruptible)
217 {
218 	struct ttm_operation_ctx ctx = {interruptible, false };
219 	struct ttm_buffer_object *bo = &buf->base;
220 	struct ttm_placement placement;
221 	struct ttm_place place;
222 	int ret = 0;
223 	uint32_t new_flags;
224 
225 	place = vmw_vram_placement.placement[0];
226 	place.lpfn = bo->num_pages;
227 	placement.num_placement = 1;
228 	placement.placement = &place;
229 	placement.num_busy_placement = 1;
230 	placement.busy_placement = &place;
231 
232 	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
233 	if (unlikely(ret != 0))
234 		return ret;
235 
236 	vmw_execbuf_release_pinned_bo(dev_priv);
237 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
238 	if (unlikely(ret != 0))
239 		goto err_unlock;
240 
241 	/*
242 	 * Is this buffer already in vram but not at the start of it?
243 	 * In that case, evict it first because TTM isn't good at handling
244 	 * that situation.
245 	 */
246 	if (bo->mem.mem_type == TTM_PL_VRAM &&
247 	    bo->mem.start < bo->num_pages &&
248 	    bo->mem.start > 0 &&
249 	    buf->pin_count == 0) {
250 		ctx.interruptible = false;
251 		(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
252 	}
253 
254 	if (buf->pin_count > 0)
255 		ret = ttm_bo_mem_compat(&placement, &bo->mem,
256 					&new_flags) == true ? 0 : -EINVAL;
257 	else
258 		ret = ttm_bo_validate(bo, &placement, &ctx);
259 
260 	/* For some reason we didn't end up at the start of vram */
261 	WARN_ON(ret == 0 && bo->offset != 0);
262 	if (!ret)
263 		vmw_bo_pin_reserved(buf, true);
264 
265 	ttm_bo_unreserve(bo);
266 err_unlock:
267 	ttm_write_unlock(&dev_priv->reservation_sem);
268 
269 	return ret;
270 }
271 
272 
273 /**
274  * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
275  *
276  * This function takes the reservation_sem in write mode.
277  *
278  * @dev_priv:  Driver private.
279  * @buf:  DMA buffer to unpin.
280  * @interruptible:  Use interruptible wait.
281  * Return: Zero on success, Negative error code on failure. In particular
282  * -ERESTARTSYS if interrupted by a signal
283  */
vmw_bo_unpin(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)284 int vmw_bo_unpin(struct vmw_private *dev_priv,
285 		 struct vmw_buffer_object *buf,
286 		 bool interruptible)
287 {
288 	struct ttm_buffer_object *bo = &buf->base;
289 	int ret;
290 
291 	ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
292 	if (unlikely(ret != 0))
293 		return ret;
294 
295 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
296 	if (unlikely(ret != 0))
297 		goto err;
298 
299 	vmw_bo_pin_reserved(buf, false);
300 
301 	ttm_bo_unreserve(bo);
302 
303 err:
304 	ttm_read_unlock(&dev_priv->reservation_sem);
305 	return ret;
306 }
307 
308 /**
309  * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
310  * of a buffer.
311  *
312  * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
313  * @ptr: SVGAGuestPtr returning the result.
314  */
vmw_bo_get_guest_ptr(const struct ttm_buffer_object * bo,SVGAGuestPtr * ptr)315 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
316 			  SVGAGuestPtr *ptr)
317 {
318 	if (bo->mem.mem_type == TTM_PL_VRAM) {
319 		ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
320 		ptr->offset = bo->offset;
321 	} else {
322 		ptr->gmrId = bo->mem.start;
323 		ptr->offset = 0;
324 	}
325 }
326 
327 
328 /**
329  * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
330  *
331  * @vbo: The buffer object. Must be reserved.
332  * @pin: Whether to pin or unpin.
333  *
334  */
vmw_bo_pin_reserved(struct vmw_buffer_object * vbo,bool pin)335 void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
336 {
337 	struct ttm_operation_ctx ctx = { false, true };
338 	struct ttm_place pl;
339 	struct ttm_placement placement;
340 	struct ttm_buffer_object *bo = &vbo->base;
341 	uint32_t old_mem_type = bo->mem.mem_type;
342 	int ret;
343 
344 	dma_resv_assert_held(bo->base.resv);
345 
346 	if (pin) {
347 		if (vbo->pin_count++ > 0)
348 			return;
349 	} else {
350 		WARN_ON(vbo->pin_count <= 0);
351 		if (--vbo->pin_count > 0)
352 			return;
353 	}
354 
355 	pl.fpfn = 0;
356 	pl.lpfn = 0;
357 	pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
358 		| TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
359 	if (pin)
360 		pl.flags |= TTM_PL_FLAG_NO_EVICT;
361 
362 	memset(&placement, 0, sizeof(placement));
363 	placement.num_placement = 1;
364 	placement.placement = &pl;
365 
366 	ret = ttm_bo_validate(bo, &placement, &ctx);
367 
368 	BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
369 }
370 
371 
372 /**
373  * vmw_bo_map_and_cache - Map a buffer object and cache the map
374  *
375  * @vbo: The buffer object to map
376  * Return: A kernel virtual address or NULL if mapping failed.
377  *
378  * This function maps a buffer object into the kernel address space, or
379  * returns the virtual kernel address of an already existing map. The virtual
380  * address remains valid as long as the buffer object is pinned or reserved.
381  * The cached map is torn down on either
382  * 1) Buffer object move
383  * 2) Buffer object swapout
384  * 3) Buffer object destruction
385  *
386  */
vmw_bo_map_and_cache(struct vmw_buffer_object * vbo)387 void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
388 {
389 	struct ttm_buffer_object *bo = &vbo->base;
390 	bool not_used;
391 	void *virtual;
392 	int ret;
393 
394 	virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
395 	if (virtual)
396 		return virtual;
397 
398 	ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
399 	if (ret)
400 		DRM_ERROR("Buffer object map failed: %d.\n", ret);
401 
402 	return ttm_kmap_obj_virtual(&vbo->map, &not_used);
403 }
404 
405 
406 /**
407  * vmw_bo_unmap - Tear down a cached buffer object map.
408  *
409  * @vbo: The buffer object whose map we are tearing down.
410  *
411  * This function tears down a cached map set up using
412  * vmw_buffer_object_map_and_cache().
413  */
vmw_bo_unmap(struct vmw_buffer_object * vbo)414 void vmw_bo_unmap(struct vmw_buffer_object *vbo)
415 {
416 	if (vbo->map.bo == NULL)
417 		return;
418 
419 	ttm_bo_kunmap(&vbo->map);
420 }
421 
422 
423 /**
424  * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
425  *
426  * @dev_priv: Pointer to a struct vmw_private identifying the device.
427  * @size: The requested buffer size.
428  * @user: Whether this is an ordinary dma buffer or a user dma buffer.
429  */
vmw_bo_acc_size(struct vmw_private * dev_priv,size_t size,bool user)430 static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
431 			      bool user)
432 {
433 	static size_t struct_size, user_struct_size;
434 	size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
435 	size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
436 
437 	if (unlikely(struct_size == 0)) {
438 		size_t backend_size = ttm_round_pot(vmw_tt_size);
439 
440 		struct_size = backend_size +
441 			ttm_round_pot(sizeof(struct vmw_buffer_object));
442 		user_struct_size = backend_size +
443 		  ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
444 				      TTM_OBJ_EXTRA_SIZE;
445 	}
446 
447 	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
448 		page_array_size +=
449 			ttm_round_pot(num_pages * sizeof(dma_addr_t));
450 
451 	return ((user) ? user_struct_size : struct_size) +
452 		page_array_size;
453 }
454 
455 
456 /**
457  * vmw_bo_bo_free - vmw buffer object destructor
458  *
459  * @bo: Pointer to the embedded struct ttm_buffer_object
460  */
vmw_bo_bo_free(struct ttm_buffer_object * bo)461 void vmw_bo_bo_free(struct ttm_buffer_object *bo)
462 {
463 	struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
464 
465 	vmw_bo_unmap(vmw_bo);
466 	kfree(vmw_bo);
467 }
468 
469 
470 /**
471  * vmw_user_bo_destroy - vmw buffer object destructor
472  *
473  * @bo: Pointer to the embedded struct ttm_buffer_object
474  */
vmw_user_bo_destroy(struct ttm_buffer_object * bo)475 static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
476 {
477 	struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
478 
479 	vmw_bo_unmap(&vmw_user_bo->vbo);
480 	ttm_prime_object_kfree(vmw_user_bo, prime);
481 }
482 
483 
484 /**
485  * vmw_bo_init - Initialize a vmw buffer object
486  *
487  * @dev_priv: Pointer to the device private struct
488  * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
489  * @size: Buffer object size in bytes.
490  * @placement: Initial placement.
491  * @interruptible: Whether waits should be performed interruptible.
492  * @bo_free: The buffer object destructor.
493  * Returns: Zero on success, negative error code on error.
494  *
495  * Note that on error, the code will free the buffer object.
496  */
vmw_bo_init(struct vmw_private * dev_priv,struct vmw_buffer_object * vmw_bo,size_t size,struct ttm_placement * placement,bool interruptible,void (* bo_free)(struct ttm_buffer_object * bo))497 int vmw_bo_init(struct vmw_private *dev_priv,
498 		struct vmw_buffer_object *vmw_bo,
499 		size_t size, struct ttm_placement *placement,
500 		bool interruptible,
501 		void (*bo_free)(struct ttm_buffer_object *bo))
502 {
503 	struct ttm_bo_device *bdev = &dev_priv->bdev;
504 	size_t acc_size;
505 	int ret;
506 	bool user = (bo_free == &vmw_user_bo_destroy);
507 
508 	WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
509 
510 	acc_size = vmw_bo_acc_size(dev_priv, size, user);
511 	memset(vmw_bo, 0, sizeof(*vmw_bo));
512 	BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
513 	vmw_bo->base.priority = 3;
514 
515 	INIT_LIST_HEAD(&vmw_bo->res_list);
516 
517 	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
518 			  ttm_bo_type_device, placement,
519 			  0, interruptible, acc_size,
520 			  NULL, NULL, bo_free);
521 	return ret;
522 }
523 
524 
525 /**
526  * vmw_user_bo_release - TTM reference base object release callback for
527  * vmw user buffer objects
528  *
529  * @p_base: The TTM base object pointer about to be unreferenced.
530  *
531  * Clears the TTM base object pointer and drops the reference the
532  * base object has on the underlying struct vmw_buffer_object.
533  */
vmw_user_bo_release(struct ttm_base_object ** p_base)534 static void vmw_user_bo_release(struct ttm_base_object **p_base)
535 {
536 	struct vmw_user_buffer_object *vmw_user_bo;
537 	struct ttm_base_object *base = *p_base;
538 
539 	*p_base = NULL;
540 
541 	if (unlikely(base == NULL))
542 		return;
543 
544 	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
545 				   prime.base);
546 	ttm_bo_put(&vmw_user_bo->vbo.base);
547 }
548 
549 
550 /**
551  * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
552  * for vmw user buffer objects
553  *
554  * @base: Pointer to the TTM base object
555  * @ref_type: Reference type of the reference reaching zero.
556  *
557  * Called when user-space drops its last synccpu reference on the buffer
558  * object, Either explicitly or as part of a cleanup file close.
559  */
vmw_user_bo_ref_obj_release(struct ttm_base_object * base,enum ttm_ref_type ref_type)560 static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
561 					enum ttm_ref_type ref_type)
562 {
563 	struct vmw_user_buffer_object *user_bo;
564 
565 	user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
566 
567 	switch (ref_type) {
568 	case TTM_REF_SYNCCPU_WRITE:
569 		ttm_bo_synccpu_write_release(&user_bo->vbo.base);
570 		break;
571 	default:
572 		WARN_ONCE(true, "Undefined buffer object reference release.\n");
573 	}
574 }
575 
576 
577 /**
578  * vmw_user_bo_alloc - Allocate a user buffer object
579  *
580  * @dev_priv: Pointer to a struct device private.
581  * @tfile: Pointer to a struct ttm_object_file on which to register the user
582  * object.
583  * @size: Size of the buffer object.
584  * @shareable: Boolean whether the buffer is shareable with other open files.
585  * @handle: Pointer to where the handle value should be assigned.
586  * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
587  * should be assigned.
588  * Return: Zero on success, negative error code on error.
589  */
vmw_user_bo_alloc(struct vmw_private * dev_priv,struct ttm_object_file * tfile,uint32_t size,bool shareable,uint32_t * handle,struct vmw_buffer_object ** p_vbo,struct ttm_base_object ** p_base)590 int vmw_user_bo_alloc(struct vmw_private *dev_priv,
591 		      struct ttm_object_file *tfile,
592 		      uint32_t size,
593 		      bool shareable,
594 		      uint32_t *handle,
595 		      struct vmw_buffer_object **p_vbo,
596 		      struct ttm_base_object **p_base)
597 {
598 	struct vmw_user_buffer_object *user_bo;
599 	int ret;
600 
601 	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
602 	if (unlikely(!user_bo)) {
603 		DRM_ERROR("Failed to allocate a buffer.\n");
604 		return -ENOMEM;
605 	}
606 
607 	ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
608 			  (dev_priv->has_mob) ?
609 			  &vmw_sys_placement :
610 			  &vmw_vram_sys_placement, true,
611 			  &vmw_user_bo_destroy);
612 	if (unlikely(ret != 0))
613 		return ret;
614 
615 	ttm_bo_get(&user_bo->vbo.base);
616 	ret = ttm_prime_object_init(tfile,
617 				    size,
618 				    &user_bo->prime,
619 				    shareable,
620 				    ttm_buffer_type,
621 				    &vmw_user_bo_release,
622 				    &vmw_user_bo_ref_obj_release);
623 	if (unlikely(ret != 0)) {
624 		ttm_bo_put(&user_bo->vbo.base);
625 		goto out_no_base_object;
626 	}
627 
628 	*p_vbo = &user_bo->vbo;
629 	if (p_base) {
630 		*p_base = &user_bo->prime.base;
631 		kref_get(&(*p_base)->refcount);
632 	}
633 	*handle = user_bo->prime.base.handle;
634 
635 out_no_base_object:
636 	return ret;
637 }
638 
639 
640 /**
641  * vmw_user_bo_verify_access - verify access permissions on this
642  * buffer object.
643  *
644  * @bo: Pointer to the buffer object being accessed
645  * @tfile: Identifying the caller.
646  */
vmw_user_bo_verify_access(struct ttm_buffer_object * bo,struct ttm_object_file * tfile)647 int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
648 			      struct ttm_object_file *tfile)
649 {
650 	struct vmw_user_buffer_object *vmw_user_bo;
651 
652 	if (unlikely(bo->destroy != vmw_user_bo_destroy))
653 		return -EPERM;
654 
655 	vmw_user_bo = vmw_user_buffer_object(bo);
656 
657 	/* Check that the caller has opened the object. */
658 	if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
659 		return 0;
660 
661 	DRM_ERROR("Could not grant buffer access.\n");
662 	return -EPERM;
663 }
664 
665 
666 /**
667  * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
668  * access, idling previous GPU operations on the buffer and optionally
669  * blocking it for further command submissions.
670  *
671  * @user_bo: Pointer to the buffer object being grabbed for CPU access
672  * @tfile: Identifying the caller.
673  * @flags: Flags indicating how the grab should be performed.
674  * Return: Zero on success, Negative error code on error. In particular,
675  * -EBUSY will be returned if a dontblock operation is requested and the
676  * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
677  * interrupted by a signal.
678  *
679  * A blocking grab will be automatically released when @tfile is closed.
680  */
vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object * user_bo,struct ttm_object_file * tfile,uint32_t flags)681 static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
682 				    struct ttm_object_file *tfile,
683 				    uint32_t flags)
684 {
685 	struct ttm_buffer_object *bo = &user_bo->vbo.base;
686 	bool existed;
687 	int ret;
688 
689 	if (flags & drm_vmw_synccpu_allow_cs) {
690 		bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
691 		long lret;
692 
693 		lret = dma_resv_wait_timeout_rcu
694 			(bo->base.resv, true, true,
695 			 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
696 		if (!lret)
697 			return -EBUSY;
698 		else if (lret < 0)
699 			return lret;
700 		return 0;
701 	}
702 
703 	ret = ttm_bo_synccpu_write_grab
704 		(bo, !!(flags & drm_vmw_synccpu_dontblock));
705 	if (unlikely(ret != 0))
706 		return ret;
707 
708 	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
709 				 TTM_REF_SYNCCPU_WRITE, &existed, false);
710 	if (ret != 0 || existed)
711 		ttm_bo_synccpu_write_release(&user_bo->vbo.base);
712 
713 	return ret;
714 }
715 
716 /**
717  * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
718  * and unblock command submission on the buffer if blocked.
719  *
720  * @handle: Handle identifying the buffer object.
721  * @tfile: Identifying the caller.
722  * @flags: Flags indicating the type of release.
723  */
vmw_user_bo_synccpu_release(uint32_t handle,struct ttm_object_file * tfile,uint32_t flags)724 static int vmw_user_bo_synccpu_release(uint32_t handle,
725 					   struct ttm_object_file *tfile,
726 					   uint32_t flags)
727 {
728 	if (!(flags & drm_vmw_synccpu_allow_cs))
729 		return ttm_ref_object_base_unref(tfile, handle,
730 						 TTM_REF_SYNCCPU_WRITE);
731 
732 	return 0;
733 }
734 
735 
736 /**
737  * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
738  * functionality.
739  *
740  * @dev: Identifies the drm device.
741  * @data: Pointer to the ioctl argument.
742  * @file_priv: Identifies the caller.
743  * Return: Zero on success, negative error code on error.
744  *
745  * This function checks the ioctl arguments for validity and calls the
746  * relevant synccpu functions.
747  */
vmw_user_bo_synccpu_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)748 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
749 			      struct drm_file *file_priv)
750 {
751 	struct drm_vmw_synccpu_arg *arg =
752 		(struct drm_vmw_synccpu_arg *) data;
753 	struct vmw_buffer_object *vbo;
754 	struct vmw_user_buffer_object *user_bo;
755 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
756 	struct ttm_base_object *buffer_base;
757 	int ret;
758 
759 	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
760 	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
761 			       drm_vmw_synccpu_dontblock |
762 			       drm_vmw_synccpu_allow_cs)) != 0) {
763 		DRM_ERROR("Illegal synccpu flags.\n");
764 		return -EINVAL;
765 	}
766 
767 	switch (arg->op) {
768 	case drm_vmw_synccpu_grab:
769 		ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
770 					     &buffer_base);
771 		if (unlikely(ret != 0))
772 			return ret;
773 
774 		user_bo = container_of(vbo, struct vmw_user_buffer_object,
775 				       vbo);
776 		ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
777 		vmw_bo_unreference(&vbo);
778 		ttm_base_object_unref(&buffer_base);
779 		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
780 			     ret != -EBUSY)) {
781 			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
782 				  (unsigned int) arg->handle);
783 			return ret;
784 		}
785 		break;
786 	case drm_vmw_synccpu_release:
787 		ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
788 						  arg->flags);
789 		if (unlikely(ret != 0)) {
790 			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
791 				  (unsigned int) arg->handle);
792 			return ret;
793 		}
794 		break;
795 	default:
796 		DRM_ERROR("Invalid synccpu operation.\n");
797 		return -EINVAL;
798 	}
799 
800 	return 0;
801 }
802 
803 
804 /**
805  * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
806  * allocation functionality.
807  *
808  * @dev: Identifies the drm device.
809  * @data: Pointer to the ioctl argument.
810  * @file_priv: Identifies the caller.
811  * Return: Zero on success, negative error code on error.
812  *
813  * This function checks the ioctl arguments for validity and allocates a
814  * struct vmw_user_buffer_object bo.
815  */
vmw_bo_alloc_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)816 int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
817 		       struct drm_file *file_priv)
818 {
819 	struct vmw_private *dev_priv = vmw_priv(dev);
820 	union drm_vmw_alloc_dmabuf_arg *arg =
821 	    (union drm_vmw_alloc_dmabuf_arg *)data;
822 	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
823 	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
824 	struct vmw_buffer_object *vbo;
825 	uint32_t handle;
826 	int ret;
827 
828 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
829 	if (unlikely(ret != 0))
830 		return ret;
831 
832 	ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
833 				req->size, false, &handle, &vbo,
834 				NULL);
835 	if (unlikely(ret != 0))
836 		goto out_no_bo;
837 
838 	rep->handle = handle;
839 	rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
840 	rep->cur_gmr_id = handle;
841 	rep->cur_gmr_offset = 0;
842 
843 	vmw_bo_unreference(&vbo);
844 
845 out_no_bo:
846 	ttm_read_unlock(&dev_priv->reservation_sem);
847 
848 	return ret;
849 }
850 
851 
852 /**
853  * vmw_bo_unref_ioctl - Generic handle close ioctl.
854  *
855  * @dev: Identifies the drm device.
856  * @data: Pointer to the ioctl argument.
857  * @file_priv: Identifies the caller.
858  * Return: Zero on success, negative error code on error.
859  *
860  * This function checks the ioctl arguments for validity and closes a
861  * handle to a TTM base object, optionally freeing the object.
862  */
vmw_bo_unref_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)863 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
864 		       struct drm_file *file_priv)
865 {
866 	struct drm_vmw_unref_dmabuf_arg *arg =
867 	    (struct drm_vmw_unref_dmabuf_arg *)data;
868 
869 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
870 					 arg->handle,
871 					 TTM_REF_USAGE);
872 }
873 
874 
875 /**
876  * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
877  *
878  * @tfile: The TTM object file the handle is registered with.
879  * @handle: The user buffer object handle
880  * @out: Pointer to a where a pointer to the embedded
881  * struct vmw_buffer_object should be placed.
882  * @p_base: Pointer to where a pointer to the TTM base object should be
883  * placed, or NULL if no such pointer is required.
884  * Return: Zero on success, Negative error code on error.
885  *
886  * Both the output base object pointer and the vmw buffer object pointer
887  * will be refcounted.
888  */
vmw_user_bo_lookup(struct ttm_object_file * tfile,uint32_t handle,struct vmw_buffer_object ** out,struct ttm_base_object ** p_base)889 int vmw_user_bo_lookup(struct ttm_object_file *tfile,
890 		       uint32_t handle, struct vmw_buffer_object **out,
891 		       struct ttm_base_object **p_base)
892 {
893 	struct vmw_user_buffer_object *vmw_user_bo;
894 	struct ttm_base_object *base;
895 
896 	base = ttm_base_object_lookup(tfile, handle);
897 	if (unlikely(base == NULL)) {
898 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
899 			  (unsigned long)handle);
900 		return -ESRCH;
901 	}
902 
903 	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
904 		ttm_base_object_unref(&base);
905 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
906 			  (unsigned long)handle);
907 		return -EINVAL;
908 	}
909 
910 	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
911 				   prime.base);
912 	ttm_bo_get(&vmw_user_bo->vbo.base);
913 	if (p_base)
914 		*p_base = base;
915 	else
916 		ttm_base_object_unref(&base);
917 	*out = &vmw_user_bo->vbo;
918 
919 	return 0;
920 }
921 
922 /**
923  * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
924  * @tfile: The TTM object file the handle is registered with.
925  * @handle: The user buffer object handle.
926  *
927  * This function looks up a struct vmw_user_bo and returns a pointer to the
928  * struct vmw_buffer_object it derives from without refcounting the pointer.
929  * The returned pointer is only valid until vmw_user_bo_noref_release() is
930  * called, and the object pointed to by the returned pointer may be doomed.
931  * Any persistent usage of the object requires a refcount to be taken using
932  * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
933  * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
934  * or scheduling functions may be called inbetween these function calls.
935  *
936  * Return: A struct vmw_buffer_object pointer if successful or negative
937  * error pointer on failure.
938  */
939 struct vmw_buffer_object *
vmw_user_bo_noref_lookup(struct ttm_object_file * tfile,u32 handle)940 vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
941 {
942 	struct vmw_user_buffer_object *vmw_user_bo;
943 	struct ttm_base_object *base;
944 
945 	base = ttm_base_object_noref_lookup(tfile, handle);
946 	if (!base) {
947 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
948 			  (unsigned long)handle);
949 		return ERR_PTR(-ESRCH);
950 	}
951 
952 	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
953 		ttm_base_object_noref_release();
954 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
955 			  (unsigned long)handle);
956 		return ERR_PTR(-EINVAL);
957 	}
958 
959 	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
960 				   prime.base);
961 	return &vmw_user_bo->vbo;
962 }
963 
964 /**
965  * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
966  *
967  * @tfile: The TTM object file to register the handle with.
968  * @vbo: The embedded vmw buffer object.
969  * @handle: Pointer to where the new handle should be placed.
970  * Return: Zero on success, Negative error code on error.
971  */
vmw_user_bo_reference(struct ttm_object_file * tfile,struct vmw_buffer_object * vbo,uint32_t * handle)972 int vmw_user_bo_reference(struct ttm_object_file *tfile,
973 			  struct vmw_buffer_object *vbo,
974 			  uint32_t *handle)
975 {
976 	struct vmw_user_buffer_object *user_bo;
977 
978 	if (vbo->base.destroy != vmw_user_bo_destroy)
979 		return -EINVAL;
980 
981 	user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
982 
983 	*handle = user_bo->prime.base.handle;
984 	return ttm_ref_object_add(tfile, &user_bo->prime.base,
985 				  TTM_REF_USAGE, NULL, false);
986 }
987 
988 
989 /**
990  * vmw_bo_fence_single - Utility function to fence a single TTM buffer
991  *                       object without unreserving it.
992  *
993  * @bo:             Pointer to the struct ttm_buffer_object to fence.
994  * @fence:          Pointer to the fence. If NULL, this function will
995  *                  insert a fence into the command stream..
996  *
997  * Contrary to the ttm_eu version of this function, it takes only
998  * a single buffer object instead of a list, and it also doesn't
999  * unreserve the buffer object, which needs to be done separately.
1000  */
vmw_bo_fence_single(struct ttm_buffer_object * bo,struct vmw_fence_obj * fence)1001 void vmw_bo_fence_single(struct ttm_buffer_object *bo,
1002 			 struct vmw_fence_obj *fence)
1003 {
1004 	struct ttm_bo_device *bdev = bo->bdev;
1005 
1006 	struct vmw_private *dev_priv =
1007 		container_of(bdev, struct vmw_private, bdev);
1008 
1009 	if (fence == NULL) {
1010 		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1011 		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1012 		dma_fence_put(&fence->base);
1013 	} else
1014 		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1015 }
1016 
1017 
1018 /**
1019  * vmw_dumb_create - Create a dumb kms buffer
1020  *
1021  * @file_priv: Pointer to a struct drm_file identifying the caller.
1022  * @dev: Pointer to the drm device.
1023  * @args: Pointer to a struct drm_mode_create_dumb structure
1024  * Return: Zero on success, negative error code on failure.
1025  *
1026  * This is a driver callback for the core drm create_dumb functionality.
1027  * Note that this is very similar to the vmw_bo_alloc ioctl, except
1028  * that the arguments have a different format.
1029  */
vmw_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)1030 int vmw_dumb_create(struct drm_file *file_priv,
1031 		    struct drm_device *dev,
1032 		    struct drm_mode_create_dumb *args)
1033 {
1034 	struct vmw_private *dev_priv = vmw_priv(dev);
1035 	struct vmw_buffer_object *vbo;
1036 	int ret;
1037 
1038 	args->pitch = args->width * ((args->bpp + 7) / 8);
1039 	args->size = args->pitch * args->height;
1040 
1041 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1042 	if (unlikely(ret != 0))
1043 		return ret;
1044 
1045 	ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1046 				    args->size, false, &args->handle,
1047 				    &vbo, NULL);
1048 	if (unlikely(ret != 0))
1049 		goto out_no_bo;
1050 
1051 	vmw_bo_unreference(&vbo);
1052 out_no_bo:
1053 	ttm_read_unlock(&dev_priv->reservation_sem);
1054 	return ret;
1055 }
1056 
1057 
1058 /**
1059  * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1060  *
1061  * @file_priv: Pointer to a struct drm_file identifying the caller.
1062  * @dev: Pointer to the drm device.
1063  * @handle: Handle identifying the dumb buffer.
1064  * @offset: The address space offset returned.
1065  * Return: Zero on success, negative error code on failure.
1066  *
1067  * This is a driver callback for the core drm dumb_map_offset functionality.
1068  */
vmw_dumb_map_offset(struct drm_file * file_priv,struct drm_device * dev,uint32_t handle,uint64_t * offset)1069 int vmw_dumb_map_offset(struct drm_file *file_priv,
1070 			struct drm_device *dev, uint32_t handle,
1071 			uint64_t *offset)
1072 {
1073 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1074 	struct vmw_buffer_object *out_buf;
1075 	int ret;
1076 
1077 	ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
1078 	if (ret != 0)
1079 		return -EINVAL;
1080 
1081 	*offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
1082 	vmw_bo_unreference(&out_buf);
1083 	return 0;
1084 }
1085 
1086 
1087 /**
1088  * vmw_dumb_destroy - Destroy a dumb boffer
1089  *
1090  * @file_priv: Pointer to a struct drm_file identifying the caller.
1091  * @dev: Pointer to the drm device.
1092  * @handle: Handle identifying the dumb buffer.
1093  * Return: Zero on success, negative error code on failure.
1094  *
1095  * This is a driver callback for the core drm dumb_destroy functionality.
1096  */
vmw_dumb_destroy(struct drm_file * file_priv,struct drm_device * dev,uint32_t handle)1097 int vmw_dumb_destroy(struct drm_file *file_priv,
1098 		     struct drm_device *dev,
1099 		     uint32_t handle)
1100 {
1101 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1102 					 handle, TTM_REF_USAGE);
1103 }
1104 
1105 
1106 /**
1107  * vmw_bo_swap_notify - swapout notify callback.
1108  *
1109  * @bo: The buffer object to be swapped out.
1110  */
vmw_bo_swap_notify(struct ttm_buffer_object * bo)1111 void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
1112 {
1113 	/* Is @bo embedded in a struct vmw_buffer_object? */
1114 	if (bo->destroy != vmw_bo_bo_free &&
1115 	    bo->destroy != vmw_user_bo_destroy)
1116 		return;
1117 
1118 	/* Kill any cached kernel maps before swapout */
1119 	vmw_bo_unmap(vmw_buffer_object(bo));
1120 }
1121 
1122 
1123 /**
1124  * vmw_bo_move_notify - TTM move_notify_callback
1125  *
1126  * @bo: The TTM buffer object about to move.
1127  * @mem: The struct ttm_mem_reg indicating to what memory
1128  *       region the move is taking place.
1129  *
1130  * Detaches cached maps and device bindings that require that the
1131  * buffer doesn't move.
1132  */
vmw_bo_move_notify(struct ttm_buffer_object * bo,struct ttm_mem_reg * mem)1133 void vmw_bo_move_notify(struct ttm_buffer_object *bo,
1134 			struct ttm_mem_reg *mem)
1135 {
1136 	struct vmw_buffer_object *vbo;
1137 
1138 	if (mem == NULL)
1139 		return;
1140 
1141 	/* Make sure @bo is embedded in a struct vmw_buffer_object? */
1142 	if (bo->destroy != vmw_bo_bo_free &&
1143 	    bo->destroy != vmw_user_bo_destroy)
1144 		return;
1145 
1146 	vbo = container_of(bo, struct vmw_buffer_object, base);
1147 
1148 	/*
1149 	 * Kill any cached kernel maps before move to or from VRAM.
1150 	 * With other types of moves, the underlying pages stay the same,
1151 	 * and the map can be kept.
1152 	 */
1153 	if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
1154 		vmw_bo_unmap(vbo);
1155 
1156 	/*
1157 	 * If we're moving a backup MOB out of MOB placement, then make sure we
1158 	 * read back all resource content first, and unbind the MOB from
1159 	 * the resource.
1160 	 */
1161 	if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
1162 		vmw_resource_unbind_list(vbo);
1163 }
1164