1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2014-2016 Intel Corporation
5 */
6
7 #include "i915_drv.h"
8 #include "i915_gem_object.h"
9 #include "i915_scatterlist.h"
10
__i915_gem_object_set_pages(struct drm_i915_gem_object * obj,struct sg_table * pages,unsigned int sg_page_sizes)11 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
12 struct sg_table *pages,
13 unsigned int sg_page_sizes)
14 {
15 struct drm_i915_private *i915 = to_i915(obj->base.dev);
16 unsigned long supported = INTEL_INFO(i915)->page_sizes;
17 int i;
18
19 lockdep_assert_held(&obj->mm.lock);
20
21 /* Make the pages coherent with the GPU (flushing any swapin). */
22 if (obj->cache_dirty) {
23 obj->write_domain = 0;
24 if (i915_gem_object_has_struct_page(obj))
25 drm_clflush_sg(pages);
26 obj->cache_dirty = false;
27 }
28
29 obj->mm.get_page.sg_pos = pages->sgl;
30 obj->mm.get_page.sg_idx = 0;
31
32 obj->mm.pages = pages;
33
34 if (i915_gem_object_is_tiled(obj) &&
35 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
36 GEM_BUG_ON(obj->mm.quirked);
37 __i915_gem_object_pin_pages(obj);
38 obj->mm.quirked = true;
39 }
40
41 GEM_BUG_ON(!sg_page_sizes);
42 obj->mm.page_sizes.phys = sg_page_sizes;
43
44 /*
45 * Calculate the supported page-sizes which fit into the given
46 * sg_page_sizes. This will give us the page-sizes which we may be able
47 * to use opportunistically when later inserting into the GTT. For
48 * example if phys=2G, then in theory we should be able to use 1G, 2M,
49 * 64K or 4K pages, although in practice this will depend on a number of
50 * other factors.
51 */
52 obj->mm.page_sizes.sg = 0;
53 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
54 if (obj->mm.page_sizes.phys & ~0u << i)
55 obj->mm.page_sizes.sg |= BIT(i);
56 }
57 GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
58
59 if (i915_gem_object_is_shrinkable(obj)) {
60 struct list_head *list;
61 unsigned long flags;
62
63 spin_lock_irqsave(&i915->mm.obj_lock, flags);
64
65 i915->mm.shrink_count++;
66 i915->mm.shrink_memory += obj->base.size;
67
68 if (obj->mm.madv != I915_MADV_WILLNEED)
69 list = &i915->mm.purge_list;
70 else
71 list = &i915->mm.shrink_list;
72 list_add_tail(&obj->mm.link, list);
73
74 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
75 }
76 }
77
____i915_gem_object_get_pages(struct drm_i915_gem_object * obj)78 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
79 {
80 int err;
81
82 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
83 DRM_DEBUG("Attempting to obtain a purgeable object\n");
84 return -EFAULT;
85 }
86
87 err = obj->ops->get_pages(obj);
88 GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
89
90 return err;
91 }
92
93 /* Ensure that the associated pages are gathered from the backing storage
94 * and pinned into our object. i915_gem_object_pin_pages() may be called
95 * multiple times before they are released by a single call to
96 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
97 * either as a result of memory pressure (reaping pages under the shrinker)
98 * or as the object is itself released.
99 */
__i915_gem_object_get_pages(struct drm_i915_gem_object * obj)100 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
101 {
102 int err;
103
104 err = mutex_lock_interruptible(&obj->mm.lock);
105 if (err)
106 return err;
107
108 if (unlikely(!i915_gem_object_has_pages(obj))) {
109 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
110
111 err = ____i915_gem_object_get_pages(obj);
112 if (err)
113 goto unlock;
114
115 smp_mb__before_atomic();
116 }
117 atomic_inc(&obj->mm.pages_pin_count);
118
119 unlock:
120 mutex_unlock(&obj->mm.lock);
121 return err;
122 }
123
124 /* Immediately discard the backing storage */
i915_gem_object_truncate(struct drm_i915_gem_object * obj)125 void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
126 {
127 drm_gem_free_mmap_offset(&obj->base);
128 if (obj->ops->truncate)
129 obj->ops->truncate(obj);
130 }
131
132 /* Try to discard unwanted pages */
i915_gem_object_writeback(struct drm_i915_gem_object * obj)133 void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
134 {
135 lockdep_assert_held(&obj->mm.lock);
136 GEM_BUG_ON(i915_gem_object_has_pages(obj));
137
138 if (obj->ops->writeback)
139 obj->ops->writeback(obj);
140 }
141
__i915_gem_object_reset_page_iter(struct drm_i915_gem_object * obj)142 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
143 {
144 struct radix_tree_iter iter;
145 void __rcu **slot;
146
147 rcu_read_lock();
148 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
149 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
150 rcu_read_unlock();
151 }
152
153 struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object * obj)154 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
155 {
156 struct sg_table *pages;
157
158 pages = fetch_and_zero(&obj->mm.pages);
159 if (IS_ERR_OR_NULL(pages))
160 return pages;
161
162 i915_gem_object_make_unshrinkable(obj);
163
164 if (obj->mm.mapping) {
165 void *ptr;
166
167 ptr = page_mask_bits(obj->mm.mapping);
168 if (is_vmalloc_addr(ptr))
169 vunmap(ptr);
170 else
171 kunmap(kmap_to_page(ptr));
172
173 obj->mm.mapping = NULL;
174 }
175
176 __i915_gem_object_reset_page_iter(obj);
177 obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
178
179 return pages;
180 }
181
__i915_gem_object_put_pages(struct drm_i915_gem_object * obj,enum i915_mm_subclass subclass)182 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
183 enum i915_mm_subclass subclass)
184 {
185 struct sg_table *pages;
186 int err;
187
188 if (i915_gem_object_has_pinned_pages(obj))
189 return -EBUSY;
190
191 GEM_BUG_ON(atomic_read(&obj->bind_count));
192
193 /* May be called by shrinker from within get_pages() (on another bo) */
194 mutex_lock_nested(&obj->mm.lock, subclass);
195 if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
196 err = -EBUSY;
197 goto unlock;
198 }
199
200 /*
201 * ->put_pages might need to allocate memory for the bit17 swizzle
202 * array, hence protect them from being reaped by removing them from gtt
203 * lists early.
204 */
205 pages = __i915_gem_object_unset_pages(obj);
206
207 /*
208 * XXX Temporary hijinx to avoid updating all backends to handle
209 * NULL pages. In the future, when we have more asynchronous
210 * get_pages backends we should be better able to handle the
211 * cancellation of the async task in a more uniform manner.
212 */
213 if (!pages && !i915_gem_object_needs_async_cancel(obj))
214 pages = ERR_PTR(-EINVAL);
215
216 if (!IS_ERR(pages))
217 obj->ops->put_pages(obj, pages);
218
219 err = 0;
220 unlock:
221 mutex_unlock(&obj->mm.lock);
222
223 return err;
224 }
225
226 /* The 'mapping' part of i915_gem_object_pin_map() below */
i915_gem_object_map(const struct drm_i915_gem_object * obj,enum i915_map_type type)227 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
228 enum i915_map_type type)
229 {
230 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
231 struct sg_table *sgt = obj->mm.pages;
232 struct sgt_iter sgt_iter;
233 struct page *page;
234 struct page *stack_pages[32];
235 struct page **pages = stack_pages;
236 unsigned long i = 0;
237 pgprot_t pgprot;
238 void *addr;
239
240 /* A single page can always be kmapped */
241 if (n_pages == 1 && type == I915_MAP_WB)
242 return kmap(sg_page(sgt->sgl));
243
244 if (n_pages > ARRAY_SIZE(stack_pages)) {
245 /* Too big for stack -- allocate temporary array instead */
246 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
247 if (!pages)
248 return NULL;
249 }
250
251 for_each_sgt_page(page, sgt_iter, sgt)
252 pages[i++] = page;
253
254 /* Check that we have the expected number of pages */
255 GEM_BUG_ON(i != n_pages);
256
257 switch (type) {
258 default:
259 MISSING_CASE(type);
260 /* fallthrough - to use PAGE_KERNEL anyway */
261 case I915_MAP_WB:
262 pgprot = PAGE_KERNEL;
263 break;
264 case I915_MAP_WC:
265 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
266 break;
267 }
268 addr = vmap(pages, n_pages, 0, pgprot);
269
270 if (pages != stack_pages)
271 kvfree(pages);
272
273 return addr;
274 }
275
276 /* get, pin, and map the pages of the object into kernel space */
i915_gem_object_pin_map(struct drm_i915_gem_object * obj,enum i915_map_type type)277 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
278 enum i915_map_type type)
279 {
280 enum i915_map_type has_type;
281 bool pinned;
282 void *ptr;
283 int err;
284
285 if (unlikely(!i915_gem_object_has_struct_page(obj)))
286 return ERR_PTR(-ENXIO);
287
288 err = mutex_lock_interruptible(&obj->mm.lock);
289 if (err)
290 return ERR_PTR(err);
291
292 pinned = !(type & I915_MAP_OVERRIDE);
293 type &= ~I915_MAP_OVERRIDE;
294
295 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
296 if (unlikely(!i915_gem_object_has_pages(obj))) {
297 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
298
299 err = ____i915_gem_object_get_pages(obj);
300 if (err)
301 goto err_unlock;
302
303 smp_mb__before_atomic();
304 }
305 atomic_inc(&obj->mm.pages_pin_count);
306 pinned = false;
307 }
308 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
309
310 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
311 if (ptr && has_type != type) {
312 if (pinned) {
313 err = -EBUSY;
314 goto err_unpin;
315 }
316
317 if (is_vmalloc_addr(ptr))
318 vunmap(ptr);
319 else
320 kunmap(kmap_to_page(ptr));
321
322 ptr = obj->mm.mapping = NULL;
323 }
324
325 if (!ptr) {
326 ptr = i915_gem_object_map(obj, type);
327 if (!ptr) {
328 err = -ENOMEM;
329 goto err_unpin;
330 }
331
332 obj->mm.mapping = page_pack_bits(ptr, type);
333 }
334
335 out_unlock:
336 mutex_unlock(&obj->mm.lock);
337 return ptr;
338
339 err_unpin:
340 atomic_dec(&obj->mm.pages_pin_count);
341 err_unlock:
342 ptr = ERR_PTR(err);
343 goto out_unlock;
344 }
345
__i915_gem_object_flush_map(struct drm_i915_gem_object * obj,unsigned long offset,unsigned long size)346 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
347 unsigned long offset,
348 unsigned long size)
349 {
350 enum i915_map_type has_type;
351 void *ptr;
352
353 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
354 GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
355 offset, size, obj->base.size));
356
357 obj->mm.dirty = true;
358
359 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
360 return;
361
362 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
363 if (has_type == I915_MAP_WC)
364 return;
365
366 drm_clflush_virt_range(ptr + offset, size);
367 if (size == obj->base.size) {
368 obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
369 obj->cache_dirty = false;
370 }
371 }
372
373 struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object * obj,unsigned int n,unsigned int * offset)374 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
375 unsigned int n,
376 unsigned int *offset)
377 {
378 struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
379 struct scatterlist *sg;
380 unsigned int idx, count;
381
382 might_sleep();
383 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
384 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
385
386 /* As we iterate forward through the sg, we record each entry in a
387 * radixtree for quick repeated (backwards) lookups. If we have seen
388 * this index previously, we will have an entry for it.
389 *
390 * Initial lookup is O(N), but this is amortized to O(1) for
391 * sequential page access (where each new request is consecutive
392 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
393 * i.e. O(1) with a large constant!
394 */
395 if (n < READ_ONCE(iter->sg_idx))
396 goto lookup;
397
398 mutex_lock(&iter->lock);
399
400 /* We prefer to reuse the last sg so that repeated lookup of this
401 * (or the subsequent) sg are fast - comparing against the last
402 * sg is faster than going through the radixtree.
403 */
404
405 sg = iter->sg_pos;
406 idx = iter->sg_idx;
407 count = __sg_page_count(sg);
408
409 while (idx + count <= n) {
410 void *entry;
411 unsigned long i;
412 int ret;
413
414 /* If we cannot allocate and insert this entry, or the
415 * individual pages from this range, cancel updating the
416 * sg_idx so that on this lookup we are forced to linearly
417 * scan onwards, but on future lookups we will try the
418 * insertion again (in which case we need to be careful of
419 * the error return reporting that we have already inserted
420 * this index).
421 */
422 ret = radix_tree_insert(&iter->radix, idx, sg);
423 if (ret && ret != -EEXIST)
424 goto scan;
425
426 entry = xa_mk_value(idx);
427 for (i = 1; i < count; i++) {
428 ret = radix_tree_insert(&iter->radix, idx + i, entry);
429 if (ret && ret != -EEXIST)
430 goto scan;
431 }
432
433 idx += count;
434 sg = ____sg_next(sg);
435 count = __sg_page_count(sg);
436 }
437
438 scan:
439 iter->sg_pos = sg;
440 iter->sg_idx = idx;
441
442 mutex_unlock(&iter->lock);
443
444 if (unlikely(n < idx)) /* insertion completed by another thread */
445 goto lookup;
446
447 /* In case we failed to insert the entry into the radixtree, we need
448 * to look beyond the current sg.
449 */
450 while (idx + count <= n) {
451 idx += count;
452 sg = ____sg_next(sg);
453 count = __sg_page_count(sg);
454 }
455
456 *offset = n - idx;
457 return sg;
458
459 lookup:
460 rcu_read_lock();
461
462 sg = radix_tree_lookup(&iter->radix, n);
463 GEM_BUG_ON(!sg);
464
465 /* If this index is in the middle of multi-page sg entry,
466 * the radix tree will contain a value entry that points
467 * to the start of that range. We will return the pointer to
468 * the base page and the offset of this page within the
469 * sg entry's range.
470 */
471 *offset = 0;
472 if (unlikely(xa_is_value(sg))) {
473 unsigned long base = xa_to_value(sg);
474
475 sg = radix_tree_lookup(&iter->radix, base);
476 GEM_BUG_ON(!sg);
477
478 *offset = n - base;
479 }
480
481 rcu_read_unlock();
482
483 return sg;
484 }
485
486 struct page *
i915_gem_object_get_page(struct drm_i915_gem_object * obj,unsigned int n)487 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
488 {
489 struct scatterlist *sg;
490 unsigned int offset;
491
492 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
493
494 sg = i915_gem_object_get_sg(obj, n, &offset);
495 return nth_page(sg_page(sg), offset);
496 }
497
498 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
499 struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object * obj,unsigned int n)500 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
501 unsigned int n)
502 {
503 struct page *page;
504
505 page = i915_gem_object_get_page(obj, n);
506 if (!obj->mm.dirty)
507 set_page_dirty(page);
508
509 return page;
510 }
511
512 dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object * obj,unsigned long n,unsigned int * len)513 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
514 unsigned long n,
515 unsigned int *len)
516 {
517 struct scatterlist *sg;
518 unsigned int offset;
519
520 sg = i915_gem_object_get_sg(obj, n, &offset);
521
522 if (len)
523 *len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
524
525 return sg_dma_address(sg) + (offset << PAGE_SHIFT);
526 }
527
528 dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object * obj,unsigned long n)529 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
530 unsigned long n)
531 {
532 return i915_gem_object_get_dma_address_len(obj, n, NULL);
533 }
534