1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2008 Intel Corporation
5 */
6
7 #include <linux/string.h>
8 #include <linux/bitops.h>
9 #include <drm/i915_drm.h>
10
11 #include "i915_drv.h"
12 #include "i915_gem.h"
13 #include "i915_gem_ioctls.h"
14 #include "i915_gem_object.h"
15
16 /**
17 * DOC: buffer object tiling
18 *
19 * i915_gem_set_tiling_ioctl() and i915_gem_get_tiling_ioctl() is the userspace
20 * interface to declare fence register requirements.
21 *
22 * In principle GEM doesn't care at all about the internal data layout of an
23 * object, and hence it also doesn't care about tiling or swizzling. There's two
24 * exceptions:
25 *
26 * - For X and Y tiling the hardware provides detilers for CPU access, so called
27 * fences. Since there's only a limited amount of them the kernel must manage
28 * these, and therefore userspace must tell the kernel the object tiling if it
29 * wants to use fences for detiling.
30 * - On gen3 and gen4 platforms have a swizzling pattern for tiled objects which
31 * depends upon the physical page frame number. When swapping such objects the
32 * page frame number might change and the kernel must be able to fix this up
33 * and hence now the tiling. Note that on a subset of platforms with
34 * asymmetric memory channel population the swizzling pattern changes in an
35 * unknown way, and for those the kernel simply forbids swapping completely.
36 *
37 * Since neither of this applies for new tiling layouts on modern platforms like
38 * W, Ys and Yf tiling GEM only allows object tiling to be set to X or Y tiled.
39 * Anything else can be handled in userspace entirely without the kernel's
40 * invovlement.
41 */
42
43 /**
44 * i915_gem_fence_size - required global GTT size for a fence
45 * @i915: i915 device
46 * @size: object size
47 * @tiling: tiling mode
48 * @stride: tiling stride
49 *
50 * Return the required global GTT size for a fence (view of a tiled object),
51 * taking into account potential fence register mapping.
52 */
i915_gem_fence_size(struct drm_i915_private * i915,u32 size,unsigned int tiling,unsigned int stride)53 u32 i915_gem_fence_size(struct drm_i915_private *i915,
54 u32 size, unsigned int tiling, unsigned int stride)
55 {
56 u32 ggtt_size;
57
58 GEM_BUG_ON(!size);
59
60 if (tiling == I915_TILING_NONE)
61 return size;
62
63 GEM_BUG_ON(!stride);
64
65 if (INTEL_GEN(i915) >= 4) {
66 stride *= i915_gem_tile_height(tiling);
67 GEM_BUG_ON(!IS_ALIGNED(stride, I965_FENCE_PAGE));
68 return roundup(size, stride);
69 }
70
71 /* Previous chips need a power-of-two fence region when tiling */
72 if (IS_GEN(i915, 3))
73 ggtt_size = 1024*1024;
74 else
75 ggtt_size = 512*1024;
76
77 while (ggtt_size < size)
78 ggtt_size <<= 1;
79
80 return ggtt_size;
81 }
82
83 /**
84 * i915_gem_fence_alignment - required global GTT alignment for a fence
85 * @i915: i915 device
86 * @size: object size
87 * @tiling: tiling mode
88 * @stride: tiling stride
89 *
90 * Return the required global GTT alignment for a fence (a view of a tiled
91 * object), taking into account potential fence register mapping.
92 */
i915_gem_fence_alignment(struct drm_i915_private * i915,u32 size,unsigned int tiling,unsigned int stride)93 u32 i915_gem_fence_alignment(struct drm_i915_private *i915, u32 size,
94 unsigned int tiling, unsigned int stride)
95 {
96 GEM_BUG_ON(!size);
97
98 /*
99 * Minimum alignment is 4k (GTT page size), but might be greater
100 * if a fence register is needed for the object.
101 */
102 if (tiling == I915_TILING_NONE)
103 return I915_GTT_MIN_ALIGNMENT;
104
105 if (INTEL_GEN(i915) >= 4)
106 return I965_FENCE_PAGE;
107
108 /*
109 * Previous chips need to be aligned to the size of the smallest
110 * fence register that can contain the object.
111 */
112 return i915_gem_fence_size(i915, size, tiling, stride);
113 }
114
115 /* Check pitch constriants for all chips & tiling formats */
116 static bool
i915_tiling_ok(struct drm_i915_gem_object * obj,unsigned int tiling,unsigned int stride)117 i915_tiling_ok(struct drm_i915_gem_object *obj,
118 unsigned int tiling, unsigned int stride)
119 {
120 struct drm_i915_private *i915 = to_i915(obj->base.dev);
121 unsigned int tile_width;
122
123 /* Linear is always fine */
124 if (tiling == I915_TILING_NONE)
125 return true;
126
127 if (tiling > I915_TILING_LAST)
128 return false;
129
130 /* check maximum stride & object size */
131 /* i965+ stores the end address of the gtt mapping in the fence
132 * reg, so dont bother to check the size */
133 if (INTEL_GEN(i915) >= 7) {
134 if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL)
135 return false;
136 } else if (INTEL_GEN(i915) >= 4) {
137 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
138 return false;
139 } else {
140 if (stride > 8192)
141 return false;
142
143 if (!is_power_of_2(stride))
144 return false;
145 }
146
147 if (IS_GEN(i915, 2) ||
148 (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915)))
149 tile_width = 128;
150 else
151 tile_width = 512;
152
153 if (!stride || !IS_ALIGNED(stride, tile_width))
154 return false;
155
156 return true;
157 }
158
i915_vma_fence_prepare(struct i915_vma * vma,int tiling_mode,unsigned int stride)159 static bool i915_vma_fence_prepare(struct i915_vma *vma,
160 int tiling_mode, unsigned int stride)
161 {
162 struct drm_i915_private *i915 = vma->vm->i915;
163 u32 size, alignment;
164
165 if (!i915_vma_is_map_and_fenceable(vma))
166 return true;
167
168 size = i915_gem_fence_size(i915, vma->size, tiling_mode, stride);
169 if (vma->node.size < size)
170 return false;
171
172 alignment = i915_gem_fence_alignment(i915, vma->size, tiling_mode, stride);
173 if (!IS_ALIGNED(vma->node.start, alignment))
174 return false;
175
176 return true;
177 }
178
179 /* Make the current GTT allocation valid for the change in tiling. */
180 static int
i915_gem_object_fence_prepare(struct drm_i915_gem_object * obj,int tiling_mode,unsigned int stride)181 i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
182 int tiling_mode, unsigned int stride)
183 {
184 struct i915_vma *vma;
185 int ret;
186
187 if (tiling_mode == I915_TILING_NONE)
188 return 0;
189
190 for_each_ggtt_vma(vma, obj) {
191 if (i915_vma_fence_prepare(vma, tiling_mode, stride))
192 continue;
193
194 ret = i915_vma_unbind(vma);
195 if (ret)
196 return ret;
197 }
198
199 return 0;
200 }
201
202 int
i915_gem_object_set_tiling(struct drm_i915_gem_object * obj,unsigned int tiling,unsigned int stride)203 i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
204 unsigned int tiling, unsigned int stride)
205 {
206 struct drm_i915_private *i915 = to_i915(obj->base.dev);
207 struct i915_vma *vma;
208 int err;
209
210 /* Make sure we don't cross-contaminate obj->tiling_and_stride */
211 BUILD_BUG_ON(I915_TILING_LAST & STRIDE_MASK);
212
213 GEM_BUG_ON(!i915_tiling_ok(obj, tiling, stride));
214 GEM_BUG_ON(!stride ^ (tiling == I915_TILING_NONE));
215 lockdep_assert_held(&i915->drm.struct_mutex);
216
217 if ((tiling | stride) == obj->tiling_and_stride)
218 return 0;
219
220 if (i915_gem_object_is_framebuffer(obj))
221 return -EBUSY;
222
223 /* We need to rebind the object if its current allocation
224 * no longer meets the alignment restrictions for its new
225 * tiling mode. Otherwise we can just leave it alone, but
226 * need to ensure that any fence register is updated before
227 * the next fenced (either through the GTT or by the BLT unit
228 * on older GPUs) access.
229 *
230 * After updating the tiling parameters, we then flag whether
231 * we need to update an associated fence register. Note this
232 * has to also include the unfenced register the GPU uses
233 * whilst executing a fenced command for an untiled object.
234 */
235
236 err = i915_gem_object_fence_prepare(obj, tiling, stride);
237 if (err)
238 return err;
239
240 i915_gem_object_lock(obj);
241 if (i915_gem_object_is_framebuffer(obj)) {
242 i915_gem_object_unlock(obj);
243 return -EBUSY;
244 }
245
246 /* If the memory has unknown (i.e. varying) swizzling, we pin the
247 * pages to prevent them being swapped out and causing corruption
248 * due to the change in swizzling.
249 */
250 mutex_lock(&obj->mm.lock);
251 if (i915_gem_object_has_pages(obj) &&
252 obj->mm.madv == I915_MADV_WILLNEED &&
253 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
254 if (tiling == I915_TILING_NONE) {
255 GEM_BUG_ON(!obj->mm.quirked);
256 __i915_gem_object_unpin_pages(obj);
257 obj->mm.quirked = false;
258 }
259 if (!i915_gem_object_is_tiled(obj)) {
260 GEM_BUG_ON(obj->mm.quirked);
261 __i915_gem_object_pin_pages(obj);
262 obj->mm.quirked = true;
263 }
264 }
265 mutex_unlock(&obj->mm.lock);
266
267 for_each_ggtt_vma(vma, obj) {
268 vma->fence_size =
269 i915_gem_fence_size(i915, vma->size, tiling, stride);
270 vma->fence_alignment =
271 i915_gem_fence_alignment(i915,
272 vma->size, tiling, stride);
273
274 if (vma->fence)
275 vma->fence->dirty = true;
276 }
277
278 obj->tiling_and_stride = tiling | stride;
279 i915_gem_object_unlock(obj);
280
281 /* Force the fence to be reacquired for GTT access */
282 i915_gem_object_release_mmap(obj);
283
284 /* Try to preallocate memory required to save swizzling on put-pages */
285 if (i915_gem_object_needs_bit17_swizzle(obj)) {
286 if (!obj->bit_17) {
287 obj->bit_17 = bitmap_zalloc(obj->base.size >> PAGE_SHIFT,
288 GFP_KERNEL);
289 }
290 } else {
291 bitmap_free(obj->bit_17);
292 obj->bit_17 = NULL;
293 }
294
295 return 0;
296 }
297
298 /**
299 * i915_gem_set_tiling_ioctl - IOCTL handler to set tiling mode
300 * @dev: DRM device
301 * @data: data pointer for the ioctl
302 * @file: DRM file for the ioctl call
303 *
304 * Sets the tiling mode of an object, returning the required swizzling of
305 * bit 6 of addresses in the object.
306 *
307 * Called by the user via ioctl.
308 *
309 * Returns:
310 * Zero on success, negative errno on failure.
311 */
312 int
i915_gem_set_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * file)313 i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
314 struct drm_file *file)
315 {
316 struct drm_i915_gem_set_tiling *args = data;
317 struct drm_i915_gem_object *obj;
318 int err;
319
320 obj = i915_gem_object_lookup(file, args->handle);
321 if (!obj)
322 return -ENOENT;
323
324 /*
325 * The tiling mode of proxy objects is handled by its generator, and
326 * not allowed to be changed by userspace.
327 */
328 if (i915_gem_object_is_proxy(obj)) {
329 err = -ENXIO;
330 goto err;
331 }
332
333 if (!i915_tiling_ok(obj, args->tiling_mode, args->stride)) {
334 err = -EINVAL;
335 goto err;
336 }
337
338 if (args->tiling_mode == I915_TILING_NONE) {
339 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
340 args->stride = 0;
341 } else {
342 if (args->tiling_mode == I915_TILING_X)
343 args->swizzle_mode = to_i915(dev)->mm.bit_6_swizzle_x;
344 else
345 args->swizzle_mode = to_i915(dev)->mm.bit_6_swizzle_y;
346
347 /* Hide bit 17 swizzling from the user. This prevents old Mesa
348 * from aborting the application on sw fallbacks to bit 17,
349 * and we use the pread/pwrite bit17 paths to swizzle for it.
350 * If there was a user that was relying on the swizzle
351 * information for drm_intel_bo_map()ed reads/writes this would
352 * break it, but we don't have any of those.
353 */
354 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
355 args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
356 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
357 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
358
359 /* If we can't handle the swizzling, make it untiled. */
360 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
361 args->tiling_mode = I915_TILING_NONE;
362 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
363 args->stride = 0;
364 }
365 }
366
367 err = mutex_lock_interruptible(&dev->struct_mutex);
368 if (err)
369 goto err;
370
371 err = i915_gem_object_set_tiling(obj, args->tiling_mode, args->stride);
372 mutex_unlock(&dev->struct_mutex);
373
374 /* We have to maintain this existing ABI... */
375 args->stride = i915_gem_object_get_stride(obj);
376 args->tiling_mode = i915_gem_object_get_tiling(obj);
377
378 err:
379 i915_gem_object_put(obj);
380 return err;
381 }
382
383 /**
384 * i915_gem_get_tiling_ioctl - IOCTL handler to get tiling mode
385 * @dev: DRM device
386 * @data: data pointer for the ioctl
387 * @file: DRM file for the ioctl call
388 *
389 * Returns the current tiling mode and required bit 6 swizzling for the object.
390 *
391 * Called by the user via ioctl.
392 *
393 * Returns:
394 * Zero on success, negative errno on failure.
395 */
396 int
i915_gem_get_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * file)397 i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
398 struct drm_file *file)
399 {
400 struct drm_i915_gem_get_tiling *args = data;
401 struct drm_i915_private *dev_priv = to_i915(dev);
402 struct drm_i915_gem_object *obj;
403 int err = -ENOENT;
404
405 rcu_read_lock();
406 obj = i915_gem_object_lookup_rcu(file, args->handle);
407 if (obj) {
408 args->tiling_mode =
409 READ_ONCE(obj->tiling_and_stride) & TILING_MASK;
410 err = 0;
411 }
412 rcu_read_unlock();
413 if (unlikely(err))
414 return err;
415
416 switch (args->tiling_mode) {
417 case I915_TILING_X:
418 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
419 break;
420 case I915_TILING_Y:
421 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
422 break;
423 default:
424 case I915_TILING_NONE:
425 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
426 break;
427 }
428
429 /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
430 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
431 args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
432 else
433 args->phys_swizzle_mode = args->swizzle_mode;
434 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
435 args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
436 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
437 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
438
439 return 0;
440 }
441