1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2014-2016 Intel Corporation
5 */
6
7 #include "i915_drv.h"
8 #include "i915_gem_object.h"
9 #include "i915_scatterlist.h"
10 #include "i915_gem_lmem.h"
11 #include "i915_gem_mman.h"
12
__i915_gem_object_set_pages(struct drm_i915_gem_object * obj,struct sg_table * pages,unsigned int sg_page_sizes)13 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
14 struct sg_table *pages,
15 unsigned int sg_page_sizes)
16 {
17 struct drm_i915_private *i915 = to_i915(obj->base.dev);
18 unsigned long supported = INTEL_INFO(i915)->page_sizes;
19 int i;
20
21 lockdep_assert_held(&obj->mm.lock);
22
23 if (i915_gem_object_is_volatile(obj))
24 obj->mm.madv = I915_MADV_DONTNEED;
25
26 /* Make the pages coherent with the GPU (flushing any swapin). */
27 if (obj->cache_dirty) {
28 obj->write_domain = 0;
29 if (i915_gem_object_has_struct_page(obj))
30 drm_clflush_sg(pages);
31 obj->cache_dirty = false;
32 }
33
34 obj->mm.get_page.sg_pos = pages->sgl;
35 obj->mm.get_page.sg_idx = 0;
36
37 obj->mm.pages = pages;
38
39 if (i915_gem_object_is_tiled(obj) &&
40 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
41 GEM_BUG_ON(obj->mm.quirked);
42 __i915_gem_object_pin_pages(obj);
43 obj->mm.quirked = true;
44 }
45
46 GEM_BUG_ON(!sg_page_sizes);
47 obj->mm.page_sizes.phys = sg_page_sizes;
48
49 /*
50 * Calculate the supported page-sizes which fit into the given
51 * sg_page_sizes. This will give us the page-sizes which we may be able
52 * to use opportunistically when later inserting into the GTT. For
53 * example if phys=2G, then in theory we should be able to use 1G, 2M,
54 * 64K or 4K pages, although in practice this will depend on a number of
55 * other factors.
56 */
57 obj->mm.page_sizes.sg = 0;
58 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
59 if (obj->mm.page_sizes.phys & ~0u << i)
60 obj->mm.page_sizes.sg |= BIT(i);
61 }
62 GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
63
64 if (i915_gem_object_is_shrinkable(obj)) {
65 struct list_head *list;
66 unsigned long flags;
67
68 spin_lock_irqsave(&i915->mm.obj_lock, flags);
69
70 i915->mm.shrink_count++;
71 i915->mm.shrink_memory += obj->base.size;
72
73 if (obj->mm.madv != I915_MADV_WILLNEED)
74 list = &i915->mm.purge_list;
75 else
76 list = &i915->mm.shrink_list;
77 list_add_tail(&obj->mm.link, list);
78
79 atomic_set(&obj->mm.shrink_pin, 0);
80 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
81 }
82 }
83
____i915_gem_object_get_pages(struct drm_i915_gem_object * obj)84 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
85 {
86 struct drm_i915_private *i915 = to_i915(obj->base.dev);
87 int err;
88
89 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
90 drm_dbg(&i915->drm,
91 "Attempting to obtain a purgeable object\n");
92 return -EFAULT;
93 }
94
95 err = obj->ops->get_pages(obj);
96 GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
97
98 return err;
99 }
100
101 /* Ensure that the associated pages are gathered from the backing storage
102 * and pinned into our object. i915_gem_object_pin_pages() may be called
103 * multiple times before they are released by a single call to
104 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
105 * either as a result of memory pressure (reaping pages under the shrinker)
106 * or as the object is itself released.
107 */
__i915_gem_object_get_pages(struct drm_i915_gem_object * obj)108 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
109 {
110 int err;
111
112 err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
113 if (err)
114 return err;
115
116 if (unlikely(!i915_gem_object_has_pages(obj))) {
117 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
118
119 err = ____i915_gem_object_get_pages(obj);
120 if (err)
121 goto unlock;
122
123 smp_mb__before_atomic();
124 }
125 atomic_inc(&obj->mm.pages_pin_count);
126
127 unlock:
128 mutex_unlock(&obj->mm.lock);
129 return err;
130 }
131
132 /* Immediately discard the backing storage */
i915_gem_object_truncate(struct drm_i915_gem_object * obj)133 void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
134 {
135 drm_gem_free_mmap_offset(&obj->base);
136 if (obj->ops->truncate)
137 obj->ops->truncate(obj);
138 }
139
140 /* Try to discard unwanted pages */
i915_gem_object_writeback(struct drm_i915_gem_object * obj)141 void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
142 {
143 lockdep_assert_held(&obj->mm.lock);
144 GEM_BUG_ON(i915_gem_object_has_pages(obj));
145
146 if (obj->ops->writeback)
147 obj->ops->writeback(obj);
148 }
149
__i915_gem_object_reset_page_iter(struct drm_i915_gem_object * obj)150 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
151 {
152 struct radix_tree_iter iter;
153 void __rcu **slot;
154
155 rcu_read_lock();
156 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
157 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
158 rcu_read_unlock();
159 }
160
unmap_object(struct drm_i915_gem_object * obj,void * ptr)161 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
162 {
163 if (is_vmalloc_addr(ptr))
164 vunmap(ptr);
165 }
166
167 struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object * obj)168 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
169 {
170 struct sg_table *pages;
171
172 pages = fetch_and_zero(&obj->mm.pages);
173 if (IS_ERR_OR_NULL(pages))
174 return pages;
175
176 if (i915_gem_object_is_volatile(obj))
177 obj->mm.madv = I915_MADV_WILLNEED;
178
179 i915_gem_object_make_unshrinkable(obj);
180
181 if (obj->mm.mapping) {
182 unmap_object(obj, page_mask_bits(obj->mm.mapping));
183 obj->mm.mapping = NULL;
184 }
185
186 __i915_gem_object_reset_page_iter(obj);
187 obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
188
189 return pages;
190 }
191
__i915_gem_object_put_pages(struct drm_i915_gem_object * obj)192 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
193 {
194 struct sg_table *pages;
195 int err;
196
197 if (i915_gem_object_has_pinned_pages(obj))
198 return -EBUSY;
199
200 /* May be called by shrinker from within get_pages() (on another bo) */
201 mutex_lock(&obj->mm.lock);
202 if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
203 err = -EBUSY;
204 goto unlock;
205 }
206
207 i915_gem_object_release_mmap_offset(obj);
208
209 /*
210 * ->put_pages might need to allocate memory for the bit17 swizzle
211 * array, hence protect them from being reaped by removing them from gtt
212 * lists early.
213 */
214 pages = __i915_gem_object_unset_pages(obj);
215
216 /*
217 * XXX Temporary hijinx to avoid updating all backends to handle
218 * NULL pages. In the future, when we have more asynchronous
219 * get_pages backends we should be better able to handle the
220 * cancellation of the async task in a more uniform manner.
221 */
222 if (!pages && !i915_gem_object_needs_async_cancel(obj))
223 pages = ERR_PTR(-EINVAL);
224
225 if (!IS_ERR(pages))
226 obj->ops->put_pages(obj, pages);
227
228 err = 0;
229 unlock:
230 mutex_unlock(&obj->mm.lock);
231
232 return err;
233 }
234
235 /* The 'mapping' part of i915_gem_object_pin_map() below */
i915_gem_object_map_page(struct drm_i915_gem_object * obj,enum i915_map_type type)236 static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
237 enum i915_map_type type)
238 {
239 unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
240 struct page *stack[32], **pages = stack, *page;
241 struct sgt_iter iter;
242 pgprot_t pgprot;
243 void *vaddr;
244
245 switch (type) {
246 default:
247 MISSING_CASE(type);
248 fallthrough; /* to use PAGE_KERNEL anyway */
249 case I915_MAP_WB:
250 /*
251 * On 32b, highmem using a finite set of indirect PTE (i.e.
252 * vmap) to provide virtual mappings of the high pages.
253 * As these are finite, map_new_virtual() must wait for some
254 * other kmap() to finish when it runs out. If we map a large
255 * number of objects, there is no method for it to tell us
256 * to release the mappings, and we deadlock.
257 *
258 * However, if we make an explicit vmap of the page, that
259 * uses a larger vmalloc arena, and also has the ability
260 * to tell us to release unwanted mappings. Most importantly,
261 * it will fail and propagate an error instead of waiting
262 * forever.
263 *
264 * So if the page is beyond the 32b boundary, make an explicit
265 * vmap.
266 */
267 if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
268 return page_address(sg_page(obj->mm.pages->sgl));
269 pgprot = PAGE_KERNEL;
270 break;
271 case I915_MAP_WC:
272 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
273 break;
274 }
275
276 if (n_pages > ARRAY_SIZE(stack)) {
277 /* Too big for stack -- allocate temporary array instead */
278 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
279 if (!pages)
280 return NULL;
281 }
282
283 i = 0;
284 for_each_sgt_page(page, iter, obj->mm.pages)
285 pages[i++] = page;
286 vaddr = vmap(pages, n_pages, 0, pgprot);
287 if (pages != stack)
288 kvfree(pages);
289 return vaddr;
290 }
291
i915_gem_object_map_pfn(struct drm_i915_gem_object * obj,enum i915_map_type type)292 static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
293 enum i915_map_type type)
294 {
295 resource_size_t iomap = obj->mm.region->iomap.base -
296 obj->mm.region->region.start;
297 unsigned long n_pfn = obj->base.size >> PAGE_SHIFT;
298 unsigned long stack[32], *pfns = stack, i;
299 struct sgt_iter iter;
300 dma_addr_t addr;
301 void *vaddr;
302
303 if (type != I915_MAP_WC)
304 return NULL;
305
306 if (n_pfn > ARRAY_SIZE(stack)) {
307 /* Too big for stack -- allocate temporary array instead */
308 pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
309 if (!pfns)
310 return NULL;
311 }
312
313 i = 0;
314 for_each_sgt_daddr(addr, iter, obj->mm.pages)
315 pfns[i++] = (iomap + addr) >> PAGE_SHIFT;
316 vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
317 if (pfns != stack)
318 kvfree(pfns);
319 return vaddr;
320 }
321
322 /* get, pin, and map the pages of the object into kernel space */
i915_gem_object_pin_map(struct drm_i915_gem_object * obj,enum i915_map_type type)323 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
324 enum i915_map_type type)
325 {
326 enum i915_map_type has_type;
327 unsigned int flags;
328 bool pinned;
329 void *ptr;
330 int err;
331
332 flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
333 if (!i915_gem_object_type_has(obj, flags))
334 return ERR_PTR(-ENXIO);
335
336 err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
337 if (err)
338 return ERR_PTR(err);
339
340 pinned = !(type & I915_MAP_OVERRIDE);
341 type &= ~I915_MAP_OVERRIDE;
342
343 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
344 if (unlikely(!i915_gem_object_has_pages(obj))) {
345 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
346
347 err = ____i915_gem_object_get_pages(obj);
348 if (err)
349 goto err_unlock;
350
351 smp_mb__before_atomic();
352 }
353 atomic_inc(&obj->mm.pages_pin_count);
354 pinned = false;
355 }
356 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
357
358 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
359 if (ptr && has_type != type) {
360 if (pinned) {
361 err = -EBUSY;
362 goto err_unpin;
363 }
364
365 unmap_object(obj, ptr);
366
367 ptr = obj->mm.mapping = NULL;
368 }
369
370 if (!ptr) {
371 if (GEM_WARN_ON(type == I915_MAP_WC &&
372 !static_cpu_has(X86_FEATURE_PAT)))
373 ptr = NULL;
374 else if (i915_gem_object_has_struct_page(obj))
375 ptr = i915_gem_object_map_page(obj, type);
376 else
377 ptr = i915_gem_object_map_pfn(obj, type);
378 if (!ptr) {
379 err = -ENOMEM;
380 goto err_unpin;
381 }
382
383 obj->mm.mapping = page_pack_bits(ptr, type);
384 }
385
386 out_unlock:
387 mutex_unlock(&obj->mm.lock);
388 return ptr;
389
390 err_unpin:
391 atomic_dec(&obj->mm.pages_pin_count);
392 err_unlock:
393 ptr = ERR_PTR(err);
394 goto out_unlock;
395 }
396
__i915_gem_object_flush_map(struct drm_i915_gem_object * obj,unsigned long offset,unsigned long size)397 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
398 unsigned long offset,
399 unsigned long size)
400 {
401 enum i915_map_type has_type;
402 void *ptr;
403
404 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
405 GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
406 offset, size, obj->base.size));
407
408 wmb(); /* let all previous writes be visible to coherent partners */
409 obj->mm.dirty = true;
410
411 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
412 return;
413
414 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
415 if (has_type == I915_MAP_WC)
416 return;
417
418 drm_clflush_virt_range(ptr + offset, size);
419 if (size == obj->base.size) {
420 obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
421 obj->cache_dirty = false;
422 }
423 }
424
__i915_gem_object_release_map(struct drm_i915_gem_object * obj)425 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
426 {
427 GEM_BUG_ON(!obj->mm.mapping);
428
429 /*
430 * We allow removing the mapping from underneath pinned pages!
431 *
432 * Furthermore, since this is an unsafe operation reserved only
433 * for construction time manipulation, we ignore locking prudence.
434 */
435 unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
436
437 i915_gem_object_unpin_map(obj);
438 }
439
440 struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object * obj,unsigned int n,unsigned int * offset)441 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
442 unsigned int n,
443 unsigned int *offset)
444 {
445 struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
446 struct scatterlist *sg;
447 unsigned int idx, count;
448
449 might_sleep();
450 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
451 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
452
453 /* As we iterate forward through the sg, we record each entry in a
454 * radixtree for quick repeated (backwards) lookups. If we have seen
455 * this index previously, we will have an entry for it.
456 *
457 * Initial lookup is O(N), but this is amortized to O(1) for
458 * sequential page access (where each new request is consecutive
459 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
460 * i.e. O(1) with a large constant!
461 */
462 if (n < READ_ONCE(iter->sg_idx))
463 goto lookup;
464
465 mutex_lock(&iter->lock);
466
467 /* We prefer to reuse the last sg so that repeated lookup of this
468 * (or the subsequent) sg are fast - comparing against the last
469 * sg is faster than going through the radixtree.
470 */
471
472 sg = iter->sg_pos;
473 idx = iter->sg_idx;
474 count = __sg_page_count(sg);
475
476 while (idx + count <= n) {
477 void *entry;
478 unsigned long i;
479 int ret;
480
481 /* If we cannot allocate and insert this entry, or the
482 * individual pages from this range, cancel updating the
483 * sg_idx so that on this lookup we are forced to linearly
484 * scan onwards, but on future lookups we will try the
485 * insertion again (in which case we need to be careful of
486 * the error return reporting that we have already inserted
487 * this index).
488 */
489 ret = radix_tree_insert(&iter->radix, idx, sg);
490 if (ret && ret != -EEXIST)
491 goto scan;
492
493 entry = xa_mk_value(idx);
494 for (i = 1; i < count; i++) {
495 ret = radix_tree_insert(&iter->radix, idx + i, entry);
496 if (ret && ret != -EEXIST)
497 goto scan;
498 }
499
500 idx += count;
501 sg = ____sg_next(sg);
502 count = __sg_page_count(sg);
503 }
504
505 scan:
506 iter->sg_pos = sg;
507 iter->sg_idx = idx;
508
509 mutex_unlock(&iter->lock);
510
511 if (unlikely(n < idx)) /* insertion completed by another thread */
512 goto lookup;
513
514 /* In case we failed to insert the entry into the radixtree, we need
515 * to look beyond the current sg.
516 */
517 while (idx + count <= n) {
518 idx += count;
519 sg = ____sg_next(sg);
520 count = __sg_page_count(sg);
521 }
522
523 *offset = n - idx;
524 return sg;
525
526 lookup:
527 rcu_read_lock();
528
529 sg = radix_tree_lookup(&iter->radix, n);
530 GEM_BUG_ON(!sg);
531
532 /* If this index is in the middle of multi-page sg entry,
533 * the radix tree will contain a value entry that points
534 * to the start of that range. We will return the pointer to
535 * the base page and the offset of this page within the
536 * sg entry's range.
537 */
538 *offset = 0;
539 if (unlikely(xa_is_value(sg))) {
540 unsigned long base = xa_to_value(sg);
541
542 sg = radix_tree_lookup(&iter->radix, base);
543 GEM_BUG_ON(!sg);
544
545 *offset = n - base;
546 }
547
548 rcu_read_unlock();
549
550 return sg;
551 }
552
553 struct page *
i915_gem_object_get_page(struct drm_i915_gem_object * obj,unsigned int n)554 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
555 {
556 struct scatterlist *sg;
557 unsigned int offset;
558
559 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
560
561 sg = i915_gem_object_get_sg(obj, n, &offset);
562 return nth_page(sg_page(sg), offset);
563 }
564
565 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
566 struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object * obj,unsigned int n)567 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
568 unsigned int n)
569 {
570 struct page *page;
571
572 page = i915_gem_object_get_page(obj, n);
573 if (!obj->mm.dirty)
574 set_page_dirty(page);
575
576 return page;
577 }
578
579 dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object * obj,unsigned long n,unsigned int * len)580 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
581 unsigned long n,
582 unsigned int *len)
583 {
584 struct scatterlist *sg;
585 unsigned int offset;
586
587 sg = i915_gem_object_get_sg(obj, n, &offset);
588
589 if (len)
590 *len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
591
592 return sg_dma_address(sg) + (offset << PAGE_SHIFT);
593 }
594
595 dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object * obj,unsigned long n)596 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
597 unsigned long n)
598 {
599 return i915_gem_object_get_dma_address_len(obj, n, NULL);
600 }
601