1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2014-2016 Intel Corporation
5 */
6
7 #include <linux/highmem.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/swap.h>
10
11 #include <drm/drm.h> /* for drm_legacy.h! */
12 #include <drm/drm_cache.h>
13
14 #include "gt/intel_gt.h"
15 #include "i915_drv.h"
16 #include "i915_gem_object.h"
17 #include "i915_gem_region.h"
18 #include "i915_scatterlist.h"
19
i915_gem_object_get_pages_phys(struct drm_i915_gem_object * obj)20 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
21 {
22 struct address_space *mapping = obj->base.filp->f_mapping;
23 struct scatterlist *sg;
24 struct sg_table *st;
25 dma_addr_t dma;
26 void *vaddr;
27 void *dst;
28 int i;
29
30 if (GEM_WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
31 return -EINVAL;
32
33 /*
34 * Always aligning to the object size, allows a single allocation
35 * to handle all possible callers, and given typical object sizes,
36 * the alignment of the buddy allocation will naturally match.
37 */
38 vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev,
39 roundup_pow_of_two(obj->base.size),
40 &dma, GFP_KERNEL);
41 if (!vaddr)
42 return -ENOMEM;
43
44 st = kmalloc(sizeof(*st), GFP_KERNEL);
45 if (!st)
46 goto err_pci;
47
48 if (sg_alloc_table(st, 1, GFP_KERNEL))
49 goto err_st;
50
51 sg = st->sgl;
52 sg->offset = 0;
53 sg->length = obj->base.size;
54
55 sg_assign_page(sg, (struct page *)vaddr);
56 sg_dma_address(sg) = dma;
57 sg_dma_len(sg) = obj->base.size;
58
59 dst = vaddr;
60 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
61 struct page *page;
62 void *src;
63
64 page = shmem_read_mapping_page(mapping, i);
65 if (IS_ERR(page))
66 goto err_st;
67
68 src = kmap_atomic(page);
69 memcpy(dst, src, PAGE_SIZE);
70 drm_clflush_virt_range(dst, PAGE_SIZE);
71 kunmap_atomic(src);
72
73 put_page(page);
74 dst += PAGE_SIZE;
75 }
76
77 intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
78
79 __i915_gem_object_set_pages(obj, st, sg->length);
80
81 return 0;
82
83 err_st:
84 kfree(st);
85 err_pci:
86 dma_free_coherent(&obj->base.dev->pdev->dev,
87 roundup_pow_of_two(obj->base.size),
88 vaddr, dma);
89 return -ENOMEM;
90 }
91
92 static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object * obj,struct sg_table * pages)93 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
94 struct sg_table *pages)
95 {
96 dma_addr_t dma = sg_dma_address(pages->sgl);
97 void *vaddr = sg_page(pages->sgl);
98
99 __i915_gem_object_release_shmem(obj, pages, false);
100
101 if (obj->mm.dirty) {
102 struct address_space *mapping = obj->base.filp->f_mapping;
103 void *src = vaddr;
104 int i;
105
106 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
107 struct page *page;
108 char *dst;
109
110 page = shmem_read_mapping_page(mapping, i);
111 if (IS_ERR(page))
112 continue;
113
114 dst = kmap_atomic(page);
115 drm_clflush_virt_range(src, PAGE_SIZE);
116 memcpy(dst, src, PAGE_SIZE);
117 kunmap_atomic(dst);
118
119 set_page_dirty(page);
120 if (obj->mm.madv == I915_MADV_WILLNEED)
121 mark_page_accessed(page);
122 put_page(page);
123
124 src += PAGE_SIZE;
125 }
126 obj->mm.dirty = false;
127 }
128
129 sg_free_table(pages);
130 kfree(pages);
131
132 dma_free_coherent(&obj->base.dev->pdev->dev,
133 roundup_pow_of_two(obj->base.size),
134 vaddr, dma);
135 }
136
137 static int
phys_pwrite(struct drm_i915_gem_object * obj,const struct drm_i915_gem_pwrite * args)138 phys_pwrite(struct drm_i915_gem_object *obj,
139 const struct drm_i915_gem_pwrite *args)
140 {
141 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
142 char __user *user_data = u64_to_user_ptr(args->data_ptr);
143 int err;
144
145 err = i915_gem_object_wait(obj,
146 I915_WAIT_INTERRUPTIBLE |
147 I915_WAIT_ALL,
148 MAX_SCHEDULE_TIMEOUT);
149 if (err)
150 return err;
151
152 /*
153 * We manually control the domain here and pretend that it
154 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
155 */
156 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
157
158 if (copy_from_user(vaddr, user_data, args->size))
159 return -EFAULT;
160
161 drm_clflush_virt_range(vaddr, args->size);
162 intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
163
164 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
165 return 0;
166 }
167
168 static int
phys_pread(struct drm_i915_gem_object * obj,const struct drm_i915_gem_pread * args)169 phys_pread(struct drm_i915_gem_object *obj,
170 const struct drm_i915_gem_pread *args)
171 {
172 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
173 char __user *user_data = u64_to_user_ptr(args->data_ptr);
174 int err;
175
176 err = i915_gem_object_wait(obj,
177 I915_WAIT_INTERRUPTIBLE,
178 MAX_SCHEDULE_TIMEOUT);
179 if (err)
180 return err;
181
182 drm_clflush_virt_range(vaddr, args->size);
183 if (copy_to_user(user_data, vaddr, args->size))
184 return -EFAULT;
185
186 return 0;
187 }
188
phys_release(struct drm_i915_gem_object * obj)189 static void phys_release(struct drm_i915_gem_object *obj)
190 {
191 fput(obj->base.filp);
192 }
193
194 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
195 .name = "i915_gem_object_phys",
196 .get_pages = i915_gem_object_get_pages_phys,
197 .put_pages = i915_gem_object_put_pages_phys,
198
199 .pread = phys_pread,
200 .pwrite = phys_pwrite,
201
202 .release = phys_release,
203 };
204
i915_gem_object_attach_phys(struct drm_i915_gem_object * obj,int align)205 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
206 {
207 struct sg_table *pages;
208 int err;
209
210 if (align > obj->base.size)
211 return -EINVAL;
212
213 if (obj->ops == &i915_gem_phys_ops)
214 return 0;
215
216 if (obj->ops != &i915_gem_shmem_ops)
217 return -EINVAL;
218
219 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
220 if (err)
221 return err;
222
223 mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
224
225 if (obj->mm.madv != I915_MADV_WILLNEED) {
226 err = -EFAULT;
227 goto err_unlock;
228 }
229
230 if (obj->mm.quirked) {
231 err = -EFAULT;
232 goto err_unlock;
233 }
234
235 if (obj->mm.mapping) {
236 err = -EBUSY;
237 goto err_unlock;
238 }
239
240 pages = __i915_gem_object_unset_pages(obj);
241
242 obj->ops = &i915_gem_phys_ops;
243
244 err = ____i915_gem_object_get_pages(obj);
245 if (err)
246 goto err_xfer;
247
248 /* Perma-pin (until release) the physical set of pages */
249 __i915_gem_object_pin_pages(obj);
250
251 if (!IS_ERR_OR_NULL(pages))
252 i915_gem_shmem_ops.put_pages(obj, pages);
253
254 i915_gem_object_release_memory_region(obj);
255
256 mutex_unlock(&obj->mm.lock);
257 return 0;
258
259 err_xfer:
260 obj->ops = &i915_gem_shmem_ops;
261 if (!IS_ERR_OR_NULL(pages)) {
262 unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
263
264 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
265 }
266 err_unlock:
267 mutex_unlock(&obj->mm.lock);
268 return err;
269 }
270
271 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
272 #include "selftests/i915_gem_phys.c"
273 #endif
274