1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/ktime.h>
29 #include <linux/pagemap.h>
30 #include <drm/drmP.h>
31 #include <drm/amdgpu_drm.h>
32 #include "amdgpu.h"
33 #include "amdgpu_display.h"
34 
amdgpu_gem_object_free(struct drm_gem_object * gobj)35 void amdgpu_gem_object_free(struct drm_gem_object *gobj)
36 {
37 	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
38 
39 	if (robj) {
40 		amdgpu_mn_unregister(robj);
41 		amdgpu_bo_unref(&robj);
42 	}
43 }
44 
amdgpu_gem_object_create(struct amdgpu_device * adev,unsigned long size,int alignment,u32 initial_domain,u64 flags,enum ttm_bo_type type,struct reservation_object * resv,struct drm_gem_object ** obj)45 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
46 			     int alignment, u32 initial_domain,
47 			     u64 flags, enum ttm_bo_type type,
48 			     struct reservation_object *resv,
49 			     struct drm_gem_object **obj)
50 {
51 	struct amdgpu_bo *bo;
52 	struct amdgpu_bo_param bp;
53 	int r;
54 
55 	memset(&bp, 0, sizeof(bp));
56 	*obj = NULL;
57 	/* At least align on page size */
58 	if (alignment < PAGE_SIZE) {
59 		alignment = PAGE_SIZE;
60 	}
61 
62 	bp.size = size;
63 	bp.byte_align = alignment;
64 	bp.type = type;
65 	bp.resv = resv;
66 	bp.preferred_domain = initial_domain;
67 retry:
68 	bp.flags = flags;
69 	bp.domain = initial_domain;
70 	r = amdgpu_bo_create(adev, &bp, &bo);
71 	if (r) {
72 		if (r != -ERESTARTSYS) {
73 			if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
74 				flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
75 				goto retry;
76 			}
77 
78 			if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
79 				initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
80 				goto retry;
81 			}
82 			DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
83 				  size, initial_domain, alignment, r);
84 		}
85 		return r;
86 	}
87 	*obj = &bo->gem_base;
88 
89 	return 0;
90 }
91 
amdgpu_gem_force_release(struct amdgpu_device * adev)92 void amdgpu_gem_force_release(struct amdgpu_device *adev)
93 {
94 	struct drm_device *ddev = adev->ddev;
95 	struct drm_file *file;
96 
97 	mutex_lock(&ddev->filelist_mutex);
98 
99 	list_for_each_entry(file, &ddev->filelist, lhead) {
100 		struct drm_gem_object *gobj;
101 		int handle;
102 
103 		WARN_ONCE(1, "Still active user space clients!\n");
104 		spin_lock(&file->table_lock);
105 		idr_for_each_entry(&file->object_idr, gobj, handle) {
106 			WARN_ONCE(1, "And also active allocations!\n");
107 			drm_gem_object_put_unlocked(gobj);
108 		}
109 		idr_destroy(&file->object_idr);
110 		spin_unlock(&file->table_lock);
111 	}
112 
113 	mutex_unlock(&ddev->filelist_mutex);
114 }
115 
116 /*
117  * Call from drm_gem_handle_create which appear in both new and open ioctl
118  * case.
119  */
amdgpu_gem_object_open(struct drm_gem_object * obj,struct drm_file * file_priv)120 int amdgpu_gem_object_open(struct drm_gem_object *obj,
121 			   struct drm_file *file_priv)
122 {
123 	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
124 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
125 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
126 	struct amdgpu_vm *vm = &fpriv->vm;
127 	struct amdgpu_bo_va *bo_va;
128 	struct mm_struct *mm;
129 	int r;
130 
131 	mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
132 	if (mm && mm != current->mm)
133 		return -EPERM;
134 
135 	if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
136 	    abo->tbo.resv != vm->root.base.bo->tbo.resv)
137 		return -EPERM;
138 
139 	r = amdgpu_bo_reserve(abo, false);
140 	if (r)
141 		return r;
142 
143 	bo_va = amdgpu_vm_bo_find(vm, abo);
144 	if (!bo_va) {
145 		bo_va = amdgpu_vm_bo_add(adev, vm, abo);
146 	} else {
147 		++bo_va->ref_count;
148 	}
149 	amdgpu_bo_unreserve(abo);
150 	return 0;
151 }
152 
amdgpu_gem_object_close(struct drm_gem_object * obj,struct drm_file * file_priv)153 void amdgpu_gem_object_close(struct drm_gem_object *obj,
154 			     struct drm_file *file_priv)
155 {
156 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
157 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
158 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
159 	struct amdgpu_vm *vm = &fpriv->vm;
160 
161 	struct amdgpu_bo_list_entry vm_pd;
162 	struct list_head list, duplicates;
163 	struct ttm_validate_buffer tv;
164 	struct ww_acquire_ctx ticket;
165 	struct amdgpu_bo_va *bo_va;
166 	int r;
167 
168 	INIT_LIST_HEAD(&list);
169 	INIT_LIST_HEAD(&duplicates);
170 
171 	tv.bo = &bo->tbo;
172 	tv.shared = true;
173 	list_add(&tv.head, &list);
174 
175 	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
176 
177 	r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
178 	if (r) {
179 		dev_err(adev->dev, "leaking bo va because "
180 			"we fail to reserve bo (%d)\n", r);
181 		return;
182 	}
183 	bo_va = amdgpu_vm_bo_find(vm, bo);
184 	if (bo_va && --bo_va->ref_count == 0) {
185 		amdgpu_vm_bo_rmv(adev, bo_va);
186 
187 		if (amdgpu_vm_ready(vm)) {
188 			struct dma_fence *fence = NULL;
189 
190 			r = amdgpu_vm_clear_freed(adev, vm, &fence);
191 			if (unlikely(r)) {
192 				dev_err(adev->dev, "failed to clear page "
193 					"tables on GEM object close (%d)\n", r);
194 			}
195 
196 			if (fence) {
197 				amdgpu_bo_fence(bo, fence, true);
198 				dma_fence_put(fence);
199 			}
200 		}
201 	}
202 	ttm_eu_backoff_reservation(&ticket, &list);
203 }
204 
205 /*
206  * GEM ioctls.
207  */
amdgpu_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)208 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
209 			    struct drm_file *filp)
210 {
211 	struct amdgpu_device *adev = dev->dev_private;
212 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
213 	struct amdgpu_vm *vm = &fpriv->vm;
214 	union drm_amdgpu_gem_create *args = data;
215 	uint64_t flags = args->in.domain_flags;
216 	uint64_t size = args->in.bo_size;
217 	struct reservation_object *resv = NULL;
218 	struct drm_gem_object *gobj;
219 	uint32_t handle;
220 	int r;
221 
222 	/* reject invalid gem flags */
223 	if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
224 		      AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
225 		      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
226 		      AMDGPU_GEM_CREATE_VRAM_CLEARED |
227 		      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
228 		      AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
229 
230 		return -EINVAL;
231 
232 	/* reject invalid gem domains */
233 	if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
234 		return -EINVAL;
235 
236 	/* create a gem object to contain this object in */
237 	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
238 	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
239 		if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
240 			/* if gds bo is created from user space, it must be
241 			 * passed to bo list
242 			 */
243 			DRM_ERROR("GDS bo cannot be per-vm-bo\n");
244 			return -EINVAL;
245 		}
246 		flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
247 		if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
248 			size = size << AMDGPU_GDS_SHIFT;
249 		else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
250 			size = size << AMDGPU_GWS_SHIFT;
251 		else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
252 			size = size << AMDGPU_OA_SHIFT;
253 		else
254 			return -EINVAL;
255 	}
256 	size = roundup(size, PAGE_SIZE);
257 
258 	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
259 		r = amdgpu_bo_reserve(vm->root.base.bo, false);
260 		if (r)
261 			return r;
262 
263 		resv = vm->root.base.bo->tbo.resv;
264 	}
265 
266 	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
267 				     (u32)(0xffffffff & args->in.domains),
268 				     flags, ttm_bo_type_device, resv, &gobj);
269 	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
270 		if (!r) {
271 			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
272 
273 			abo->parent = amdgpu_bo_ref(vm->root.base.bo);
274 		}
275 		amdgpu_bo_unreserve(vm->root.base.bo);
276 	}
277 	if (r)
278 		return r;
279 
280 	r = drm_gem_handle_create(filp, gobj, &handle);
281 	/* drop reference from allocate - handle holds it now */
282 	drm_gem_object_put_unlocked(gobj);
283 	if (r)
284 		return r;
285 
286 	memset(args, 0, sizeof(*args));
287 	args->out.handle = handle;
288 	return 0;
289 }
290 
amdgpu_gem_userptr_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)291 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
292 			     struct drm_file *filp)
293 {
294 	struct ttm_operation_ctx ctx = { true, false };
295 	struct amdgpu_device *adev = dev->dev_private;
296 	struct drm_amdgpu_gem_userptr *args = data;
297 	struct drm_gem_object *gobj;
298 	struct amdgpu_bo *bo;
299 	uint32_t handle;
300 	int r;
301 
302 	if (offset_in_page(args->addr | args->size))
303 		return -EINVAL;
304 
305 	/* reject unknown flag values */
306 	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
307 	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
308 	    AMDGPU_GEM_USERPTR_REGISTER))
309 		return -EINVAL;
310 
311 	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
312 	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
313 
314 		/* if we want to write to it we must install a MMU notifier */
315 		return -EACCES;
316 	}
317 
318 	/* create a gem object to contain this object in */
319 	r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
320 				     0, ttm_bo_type_device, NULL, &gobj);
321 	if (r)
322 		return r;
323 
324 	bo = gem_to_amdgpu_bo(gobj);
325 	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
326 	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
327 	r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
328 	if (r)
329 		goto release_object;
330 
331 	if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
332 		r = amdgpu_mn_register(bo, args->addr);
333 		if (r)
334 			goto release_object;
335 	}
336 
337 	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
338 		r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
339 						 bo->tbo.ttm->pages);
340 		if (r)
341 			goto release_object;
342 
343 		r = amdgpu_bo_reserve(bo, true);
344 		if (r)
345 			goto free_pages;
346 
347 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
348 		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
349 		amdgpu_bo_unreserve(bo);
350 		if (r)
351 			goto free_pages;
352 	}
353 
354 	r = drm_gem_handle_create(filp, gobj, &handle);
355 	/* drop reference from allocate - handle holds it now */
356 	drm_gem_object_put_unlocked(gobj);
357 	if (r)
358 		return r;
359 
360 	args->handle = handle;
361 	return 0;
362 
363 free_pages:
364 	release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages);
365 
366 release_object:
367 	drm_gem_object_put_unlocked(gobj);
368 
369 	return r;
370 }
371 
amdgpu_mode_dumb_mmap(struct drm_file * filp,struct drm_device * dev,uint32_t handle,uint64_t * offset_p)372 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
373 			  struct drm_device *dev,
374 			  uint32_t handle, uint64_t *offset_p)
375 {
376 	struct drm_gem_object *gobj;
377 	struct amdgpu_bo *robj;
378 
379 	gobj = drm_gem_object_lookup(filp, handle);
380 	if (gobj == NULL) {
381 		return -ENOENT;
382 	}
383 	robj = gem_to_amdgpu_bo(gobj);
384 	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
385 	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
386 		drm_gem_object_put_unlocked(gobj);
387 		return -EPERM;
388 	}
389 	*offset_p = amdgpu_bo_mmap_offset(robj);
390 	drm_gem_object_put_unlocked(gobj);
391 	return 0;
392 }
393 
amdgpu_gem_mmap_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)394 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
395 			  struct drm_file *filp)
396 {
397 	union drm_amdgpu_gem_mmap *args = data;
398 	uint32_t handle = args->in.handle;
399 	memset(args, 0, sizeof(*args));
400 	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
401 }
402 
403 /**
404  * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
405  *
406  * @timeout_ns: timeout in ns
407  *
408  * Calculate the timeout in jiffies from an absolute timeout in ns.
409  */
amdgpu_gem_timeout(uint64_t timeout_ns)410 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
411 {
412 	unsigned long timeout_jiffies;
413 	ktime_t timeout;
414 
415 	/* clamp timeout if it's to large */
416 	if (((int64_t)timeout_ns) < 0)
417 		return MAX_SCHEDULE_TIMEOUT;
418 
419 	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
420 	if (ktime_to_ns(timeout) < 0)
421 		return 0;
422 
423 	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
424 	/*  clamp timeout to avoid unsigned-> signed overflow */
425 	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
426 		return MAX_SCHEDULE_TIMEOUT - 1;
427 
428 	return timeout_jiffies;
429 }
430 
amdgpu_gem_wait_idle_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)431 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
432 			      struct drm_file *filp)
433 {
434 	union drm_amdgpu_gem_wait_idle *args = data;
435 	struct drm_gem_object *gobj;
436 	struct amdgpu_bo *robj;
437 	uint32_t handle = args->in.handle;
438 	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
439 	int r = 0;
440 	long ret;
441 
442 	gobj = drm_gem_object_lookup(filp, handle);
443 	if (gobj == NULL) {
444 		return -ENOENT;
445 	}
446 	robj = gem_to_amdgpu_bo(gobj);
447 	ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
448 						  timeout);
449 
450 	/* ret == 0 means not signaled,
451 	 * ret > 0 means signaled
452 	 * ret < 0 means interrupted before timeout
453 	 */
454 	if (ret >= 0) {
455 		memset(args, 0, sizeof(*args));
456 		args->out.status = (ret == 0);
457 	} else
458 		r = ret;
459 
460 	drm_gem_object_put_unlocked(gobj);
461 	return r;
462 }
463 
amdgpu_gem_metadata_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)464 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
465 				struct drm_file *filp)
466 {
467 	struct drm_amdgpu_gem_metadata *args = data;
468 	struct drm_gem_object *gobj;
469 	struct amdgpu_bo *robj;
470 	int r = -1;
471 
472 	DRM_DEBUG("%d \n", args->handle);
473 	gobj = drm_gem_object_lookup(filp, args->handle);
474 	if (gobj == NULL)
475 		return -ENOENT;
476 	robj = gem_to_amdgpu_bo(gobj);
477 
478 	r = amdgpu_bo_reserve(robj, false);
479 	if (unlikely(r != 0))
480 		goto out;
481 
482 	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
483 		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
484 		r = amdgpu_bo_get_metadata(robj, args->data.data,
485 					   sizeof(args->data.data),
486 					   &args->data.data_size_bytes,
487 					   &args->data.flags);
488 	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
489 		if (args->data.data_size_bytes > sizeof(args->data.data)) {
490 			r = -EINVAL;
491 			goto unreserve;
492 		}
493 		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
494 		if (!r)
495 			r = amdgpu_bo_set_metadata(robj, args->data.data,
496 						   args->data.data_size_bytes,
497 						   args->data.flags);
498 	}
499 
500 unreserve:
501 	amdgpu_bo_unreserve(robj);
502 out:
503 	drm_gem_object_put_unlocked(gobj);
504 	return r;
505 }
506 
507 /**
508  * amdgpu_gem_va_update_vm -update the bo_va in its VM
509  *
510  * @adev: amdgpu_device pointer
511  * @vm: vm to update
512  * @bo_va: bo_va to update
513  * @operation: map, unmap or clear
514  *
515  * Update the bo_va directly after setting its address. Errors are not
516  * vital here, so they are not reported back to userspace.
517  */
amdgpu_gem_va_update_vm(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo_va * bo_va,uint32_t operation)518 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
519 				    struct amdgpu_vm *vm,
520 				    struct amdgpu_bo_va *bo_va,
521 				    uint32_t operation)
522 {
523 	int r;
524 
525 	if (!amdgpu_vm_ready(vm))
526 		return;
527 
528 	r = amdgpu_vm_clear_freed(adev, vm, NULL);
529 	if (r)
530 		goto error;
531 
532 	if (operation == AMDGPU_VA_OP_MAP ||
533 	    operation == AMDGPU_VA_OP_REPLACE) {
534 		r = amdgpu_vm_bo_update(adev, bo_va, false);
535 		if (r)
536 			goto error;
537 	}
538 
539 	r = amdgpu_vm_update_directories(adev, vm);
540 
541 error:
542 	if (r && r != -ERESTARTSYS)
543 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
544 }
545 
amdgpu_gem_va_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)546 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
547 			  struct drm_file *filp)
548 {
549 	const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
550 		AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
551 		AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
552 	const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
553 		AMDGPU_VM_PAGE_PRT;
554 
555 	struct drm_amdgpu_gem_va *args = data;
556 	struct drm_gem_object *gobj;
557 	struct amdgpu_device *adev = dev->dev_private;
558 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
559 	struct amdgpu_bo *abo;
560 	struct amdgpu_bo_va *bo_va;
561 	struct amdgpu_bo_list_entry vm_pd;
562 	struct ttm_validate_buffer tv;
563 	struct ww_acquire_ctx ticket;
564 	struct list_head list, duplicates;
565 	uint64_t va_flags;
566 	int r = 0;
567 
568 	if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
569 		dev_dbg(&dev->pdev->dev,
570 			"va_address 0x%LX is in reserved area 0x%LX\n",
571 			args->va_address, AMDGPU_VA_RESERVED_SIZE);
572 		return -EINVAL;
573 	}
574 
575 	if (args->va_address >= AMDGPU_VA_HOLE_START &&
576 	    args->va_address < AMDGPU_VA_HOLE_END) {
577 		dev_dbg(&dev->pdev->dev,
578 			"va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
579 			args->va_address, AMDGPU_VA_HOLE_START,
580 			AMDGPU_VA_HOLE_END);
581 		return -EINVAL;
582 	}
583 
584 	args->va_address &= AMDGPU_VA_HOLE_MASK;
585 
586 	if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
587 		dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
588 			args->flags);
589 		return -EINVAL;
590 	}
591 
592 	switch (args->operation) {
593 	case AMDGPU_VA_OP_MAP:
594 	case AMDGPU_VA_OP_UNMAP:
595 	case AMDGPU_VA_OP_CLEAR:
596 	case AMDGPU_VA_OP_REPLACE:
597 		break;
598 	default:
599 		dev_dbg(&dev->pdev->dev, "unsupported operation %d\n",
600 			args->operation);
601 		return -EINVAL;
602 	}
603 
604 	INIT_LIST_HEAD(&list);
605 	INIT_LIST_HEAD(&duplicates);
606 	if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
607 	    !(args->flags & AMDGPU_VM_PAGE_PRT)) {
608 		gobj = drm_gem_object_lookup(filp, args->handle);
609 		if (gobj == NULL)
610 			return -ENOENT;
611 		abo = gem_to_amdgpu_bo(gobj);
612 		tv.bo = &abo->tbo;
613 		tv.shared = !!(abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID);
614 		list_add(&tv.head, &list);
615 	} else {
616 		gobj = NULL;
617 		abo = NULL;
618 	}
619 
620 	amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
621 
622 	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
623 	if (r)
624 		goto error_unref;
625 
626 	if (abo) {
627 		bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
628 		if (!bo_va) {
629 			r = -ENOENT;
630 			goto error_backoff;
631 		}
632 	} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
633 		bo_va = fpriv->prt_va;
634 	} else {
635 		bo_va = NULL;
636 	}
637 
638 	switch (args->operation) {
639 	case AMDGPU_VA_OP_MAP:
640 		r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
641 					args->map_size);
642 		if (r)
643 			goto error_backoff;
644 
645 		va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
646 		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
647 				     args->offset_in_bo, args->map_size,
648 				     va_flags);
649 		break;
650 	case AMDGPU_VA_OP_UNMAP:
651 		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
652 		break;
653 
654 	case AMDGPU_VA_OP_CLEAR:
655 		r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
656 						args->va_address,
657 						args->map_size);
658 		break;
659 	case AMDGPU_VA_OP_REPLACE:
660 		r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
661 					args->map_size);
662 		if (r)
663 			goto error_backoff;
664 
665 		va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
666 		r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
667 					     args->offset_in_bo, args->map_size,
668 					     va_flags);
669 		break;
670 	default:
671 		break;
672 	}
673 	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
674 		amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
675 					args->operation);
676 
677 error_backoff:
678 	ttm_eu_backoff_reservation(&ticket, &list);
679 
680 error_unref:
681 	drm_gem_object_put_unlocked(gobj);
682 	return r;
683 }
684 
amdgpu_gem_op_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)685 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
686 			struct drm_file *filp)
687 {
688 	struct amdgpu_device *adev = dev->dev_private;
689 	struct drm_amdgpu_gem_op *args = data;
690 	struct drm_gem_object *gobj;
691 	struct amdgpu_bo *robj;
692 	int r;
693 
694 	gobj = drm_gem_object_lookup(filp, args->handle);
695 	if (gobj == NULL) {
696 		return -ENOENT;
697 	}
698 	robj = gem_to_amdgpu_bo(gobj);
699 
700 	r = amdgpu_bo_reserve(robj, false);
701 	if (unlikely(r))
702 		goto out;
703 
704 	switch (args->op) {
705 	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
706 		struct drm_amdgpu_gem_create_in info;
707 		void __user *out = u64_to_user_ptr(args->value);
708 
709 		info.bo_size = robj->gem_base.size;
710 		info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
711 		info.domains = robj->preferred_domains;
712 		info.domain_flags = robj->flags;
713 		amdgpu_bo_unreserve(robj);
714 		if (copy_to_user(out, &info, sizeof(info)))
715 			r = -EFAULT;
716 		break;
717 	}
718 	case AMDGPU_GEM_OP_SET_PLACEMENT:
719 		if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
720 			r = -EINVAL;
721 			amdgpu_bo_unreserve(robj);
722 			break;
723 		}
724 		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
725 			r = -EPERM;
726 			amdgpu_bo_unreserve(robj);
727 			break;
728 		}
729 		robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
730 							AMDGPU_GEM_DOMAIN_GTT |
731 							AMDGPU_GEM_DOMAIN_CPU);
732 		robj->allowed_domains = robj->preferred_domains;
733 		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
734 			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
735 
736 		if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
737 			amdgpu_vm_bo_invalidate(adev, robj, true);
738 
739 		amdgpu_bo_unreserve(robj);
740 		break;
741 	default:
742 		amdgpu_bo_unreserve(robj);
743 		r = -EINVAL;
744 	}
745 
746 out:
747 	drm_gem_object_put_unlocked(gobj);
748 	return r;
749 }
750 
amdgpu_mode_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)751 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
752 			    struct drm_device *dev,
753 			    struct drm_mode_create_dumb *args)
754 {
755 	struct amdgpu_device *adev = dev->dev_private;
756 	struct drm_gem_object *gobj;
757 	uint32_t handle;
758 	u32 domain;
759 	int r;
760 
761 	args->pitch = amdgpu_align_pitch(adev, args->width,
762 					 DIV_ROUND_UP(args->bpp, 8), 0);
763 	args->size = (u64)args->pitch * args->height;
764 	args->size = ALIGN(args->size, PAGE_SIZE);
765 	domain = amdgpu_bo_get_preferred_pin_domain(adev,
766 				amdgpu_display_supported_domains(adev));
767 	r = amdgpu_gem_object_create(adev, args->size, 0, domain,
768 				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
769 				     ttm_bo_type_device, NULL, &gobj);
770 	if (r)
771 		return -ENOMEM;
772 
773 	r = drm_gem_handle_create(file_priv, gobj, &handle);
774 	/* drop reference from allocate - handle holds it now */
775 	drm_gem_object_put_unlocked(gobj);
776 	if (r) {
777 		return r;
778 	}
779 	args->handle = handle;
780 	return 0;
781 }
782 
783 #if defined(CONFIG_DEBUG_FS)
784 
785 #define amdgpu_debugfs_gem_bo_print_flag(m, bo, flag)	\
786 	if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) {	\
787 		seq_printf((m), " " #flag);		\
788 	}
789 
amdgpu_debugfs_gem_bo_info(int id,void * ptr,void * data)790 static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
791 {
792 	struct drm_gem_object *gobj = ptr;
793 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
794 	struct seq_file *m = data;
795 
796 	struct dma_buf_attachment *attachment;
797 	struct dma_buf *dma_buf;
798 	unsigned domain;
799 	const char *placement;
800 	unsigned pin_count;
801 
802 	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
803 	switch (domain) {
804 	case AMDGPU_GEM_DOMAIN_VRAM:
805 		placement = "VRAM";
806 		break;
807 	case AMDGPU_GEM_DOMAIN_GTT:
808 		placement = " GTT";
809 		break;
810 	case AMDGPU_GEM_DOMAIN_CPU:
811 	default:
812 		placement = " CPU";
813 		break;
814 	}
815 	seq_printf(m, "\t0x%08x: %12ld byte %s",
816 		   id, amdgpu_bo_size(bo), placement);
817 
818 	pin_count = READ_ONCE(bo->pin_count);
819 	if (pin_count)
820 		seq_printf(m, " pin count %d", pin_count);
821 
822 	dma_buf = READ_ONCE(bo->gem_base.dma_buf);
823 	attachment = READ_ONCE(bo->gem_base.import_attach);
824 
825 	if (attachment)
826 		seq_printf(m, " imported from %p", dma_buf);
827 	else if (dma_buf)
828 		seq_printf(m, " exported as %p", dma_buf);
829 
830 	amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
831 	amdgpu_debugfs_gem_bo_print_flag(m, bo, NO_CPU_ACCESS);
832 	amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_GTT_USWC);
833 	amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CLEARED);
834 	amdgpu_debugfs_gem_bo_print_flag(m, bo, SHADOW);
835 	amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
836 	amdgpu_debugfs_gem_bo_print_flag(m, bo, VM_ALWAYS_VALID);
837 	amdgpu_debugfs_gem_bo_print_flag(m, bo, EXPLICIT_SYNC);
838 
839 	seq_printf(m, "\n");
840 
841 	return 0;
842 }
843 
amdgpu_debugfs_gem_info(struct seq_file * m,void * data)844 static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
845 {
846 	struct drm_info_node *node = (struct drm_info_node *)m->private;
847 	struct drm_device *dev = node->minor->dev;
848 	struct drm_file *file;
849 	int r;
850 
851 	r = mutex_lock_interruptible(&dev->filelist_mutex);
852 	if (r)
853 		return r;
854 
855 	list_for_each_entry(file, &dev->filelist, lhead) {
856 		struct task_struct *task;
857 
858 		/*
859 		 * Although we have a valid reference on file->pid, that does
860 		 * not guarantee that the task_struct who called get_pid() is
861 		 * still alive (e.g. get_pid(current) => fork() => exit()).
862 		 * Therefore, we need to protect this ->comm access using RCU.
863 		 */
864 		rcu_read_lock();
865 		task = pid_task(file->pid, PIDTYPE_PID);
866 		seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
867 			   task ? task->comm : "<unknown>");
868 		rcu_read_unlock();
869 
870 		spin_lock(&file->table_lock);
871 		idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
872 		spin_unlock(&file->table_lock);
873 	}
874 
875 	mutex_unlock(&dev->filelist_mutex);
876 	return 0;
877 }
878 
879 static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
880 	{"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
881 };
882 #endif
883 
amdgpu_debugfs_gem_init(struct amdgpu_device * adev)884 int amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
885 {
886 #if defined(CONFIG_DEBUG_FS)
887 	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
888 #endif
889 	return 0;
890 }
891