1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3
4 #include <linux/err.h>
5 #include <linux/slab.h>
6 #include <linux/dma-buf.h>
7 #include <linux/dma-mapping.h>
8
9 #include <drm/panfrost_drm.h>
10 #include "panfrost_device.h"
11 #include "panfrost_gem.h"
12 #include "panfrost_mmu.h"
13
14 /* Called DRM core on the last userspace/kernel unreference of the
15 * BO.
16 */
panfrost_gem_free_object(struct drm_gem_object * obj)17 static void panfrost_gem_free_object(struct drm_gem_object *obj)
18 {
19 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
20 struct panfrost_device *pfdev = obj->dev->dev_private;
21
22 /*
23 * Make sure the BO is no longer inserted in the shrinker list before
24 * taking care of the destruction itself. If we don't do that we have a
25 * race condition between this function and what's done in
26 * panfrost_gem_shrinker_scan().
27 */
28 mutex_lock(&pfdev->shrinker_lock);
29 list_del_init(&bo->base.madv_list);
30 mutex_unlock(&pfdev->shrinker_lock);
31
32 /*
33 * If we still have mappings attached to the BO, there's a problem in
34 * our refcounting.
35 */
36 WARN_ON_ONCE(!list_empty(&bo->mappings.list));
37
38 if (bo->sgts) {
39 int i;
40 int n_sgt = bo->base.base.size / SZ_2M;
41
42 for (i = 0; i < n_sgt; i++) {
43 if (bo->sgts[i].sgl) {
44 dma_unmap_sgtable(pfdev->dev, &bo->sgts[i],
45 DMA_BIDIRECTIONAL, 0);
46 sg_free_table(&bo->sgts[i]);
47 }
48 }
49 kvfree(bo->sgts);
50 }
51
52 drm_gem_shmem_free_object(obj);
53 }
54
55 struct panfrost_gem_mapping *
panfrost_gem_mapping_get(struct panfrost_gem_object * bo,struct panfrost_file_priv * priv)56 panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
57 struct panfrost_file_priv *priv)
58 {
59 struct panfrost_gem_mapping *iter, *mapping = NULL;
60
61 mutex_lock(&bo->mappings.lock);
62 list_for_each_entry(iter, &bo->mappings.list, node) {
63 if (iter->mmu == &priv->mmu) {
64 kref_get(&iter->refcount);
65 mapping = iter;
66 break;
67 }
68 }
69 mutex_unlock(&bo->mappings.lock);
70
71 return mapping;
72 }
73
74 static void
panfrost_gem_teardown_mapping(struct panfrost_gem_mapping * mapping)75 panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
76 {
77 struct panfrost_file_priv *priv;
78
79 if (mapping->active)
80 panfrost_mmu_unmap(mapping);
81
82 priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu);
83 spin_lock(&priv->mm_lock);
84 if (drm_mm_node_allocated(&mapping->mmnode))
85 drm_mm_remove_node(&mapping->mmnode);
86 spin_unlock(&priv->mm_lock);
87 }
88
panfrost_gem_mapping_release(struct kref * kref)89 static void panfrost_gem_mapping_release(struct kref *kref)
90 {
91 struct panfrost_gem_mapping *mapping;
92
93 mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
94
95 panfrost_gem_teardown_mapping(mapping);
96 drm_gem_object_put(&mapping->obj->base.base);
97 kfree(mapping);
98 }
99
panfrost_gem_mapping_put(struct panfrost_gem_mapping * mapping)100 void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
101 {
102 if (!mapping)
103 return;
104
105 kref_put(&mapping->refcount, panfrost_gem_mapping_release);
106 }
107
panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object * bo)108 void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo)
109 {
110 struct panfrost_gem_mapping *mapping;
111
112 list_for_each_entry(mapping, &bo->mappings.list, node)
113 panfrost_gem_teardown_mapping(mapping);
114 }
115
panfrost_gem_open(struct drm_gem_object * obj,struct drm_file * file_priv)116 int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
117 {
118 int ret;
119 size_t size = obj->size;
120 u64 align;
121 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
122 unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
123 struct panfrost_file_priv *priv = file_priv->driver_priv;
124 struct panfrost_gem_mapping *mapping;
125
126 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
127 if (!mapping)
128 return -ENOMEM;
129
130 INIT_LIST_HEAD(&mapping->node);
131 kref_init(&mapping->refcount);
132 drm_gem_object_get(obj);
133 mapping->obj = bo;
134
135 /*
136 * Executable buffers cannot cross a 16MB boundary as the program
137 * counter is 24-bits. We assume executable buffers will be less than
138 * 16MB and aligning executable buffers to their size will avoid
139 * crossing a 16MB boundary.
140 */
141 if (!bo->noexec)
142 align = size >> PAGE_SHIFT;
143 else
144 align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
145
146 mapping->mmu = &priv->mmu;
147 spin_lock(&priv->mm_lock);
148 ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode,
149 size >> PAGE_SHIFT, align, color, 0);
150 spin_unlock(&priv->mm_lock);
151 if (ret)
152 goto err;
153
154 if (!bo->is_heap) {
155 ret = panfrost_mmu_map(mapping);
156 if (ret)
157 goto err;
158 }
159
160 mutex_lock(&bo->mappings.lock);
161 WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
162 list_add_tail(&mapping->node, &bo->mappings.list);
163 mutex_unlock(&bo->mappings.lock);
164
165 err:
166 if (ret)
167 panfrost_gem_mapping_put(mapping);
168 return ret;
169 }
170
panfrost_gem_close(struct drm_gem_object * obj,struct drm_file * file_priv)171 void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
172 {
173 struct panfrost_file_priv *priv = file_priv->driver_priv;
174 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
175 struct panfrost_gem_mapping *mapping = NULL, *iter;
176
177 mutex_lock(&bo->mappings.lock);
178 list_for_each_entry(iter, &bo->mappings.list, node) {
179 if (iter->mmu == &priv->mmu) {
180 mapping = iter;
181 list_del(&iter->node);
182 break;
183 }
184 }
185 mutex_unlock(&bo->mappings.lock);
186
187 panfrost_gem_mapping_put(mapping);
188 }
189
panfrost_gem_pin(struct drm_gem_object * obj)190 static int panfrost_gem_pin(struct drm_gem_object *obj)
191 {
192 if (to_panfrost_bo(obj)->is_heap)
193 return -EINVAL;
194
195 return drm_gem_shmem_pin(obj);
196 }
197
198 static const struct drm_gem_object_funcs panfrost_gem_funcs = {
199 .free = panfrost_gem_free_object,
200 .open = panfrost_gem_open,
201 .close = panfrost_gem_close,
202 .print_info = drm_gem_shmem_print_info,
203 .pin = panfrost_gem_pin,
204 .unpin = drm_gem_shmem_unpin,
205 .get_sg_table = drm_gem_shmem_get_sg_table,
206 .vmap = drm_gem_shmem_vmap,
207 .vunmap = drm_gem_shmem_vunmap,
208 .mmap = drm_gem_shmem_mmap,
209 };
210
211 /**
212 * panfrost_gem_create_object - Implementation of driver->gem_create_object.
213 * @dev: DRM device
214 * @size: Size in bytes of the memory the object will reference
215 *
216 * This lets the GEM helpers allocate object structs for us, and keep
217 * our BO stats correct.
218 */
panfrost_gem_create_object(struct drm_device * dev,size_t size)219 struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
220 {
221 struct panfrost_gem_object *obj;
222
223 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
224 if (!obj)
225 return NULL;
226
227 INIT_LIST_HEAD(&obj->mappings.list);
228 mutex_init(&obj->mappings.lock);
229 obj->base.base.funcs = &panfrost_gem_funcs;
230
231 return &obj->base.base;
232 }
233
234 struct panfrost_gem_object *
panfrost_gem_create_with_handle(struct drm_file * file_priv,struct drm_device * dev,size_t size,u32 flags,uint32_t * handle)235 panfrost_gem_create_with_handle(struct drm_file *file_priv,
236 struct drm_device *dev, size_t size,
237 u32 flags,
238 uint32_t *handle)
239 {
240 int ret;
241 struct drm_gem_shmem_object *shmem;
242 struct panfrost_gem_object *bo;
243
244 /* Round up heap allocations to 2MB to keep fault handling simple */
245 if (flags & PANFROST_BO_HEAP)
246 size = roundup(size, SZ_2M);
247
248 shmem = drm_gem_shmem_create(dev, size);
249 if (IS_ERR(shmem))
250 return ERR_CAST(shmem);
251
252 bo = to_panfrost_bo(&shmem->base);
253 bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
254 bo->is_heap = !!(flags & PANFROST_BO_HEAP);
255
256 /*
257 * Allocate an id of idr table where the obj is registered
258 * and handle has the id what user can see.
259 */
260 ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
261 /* drop reference from allocate - handle holds it now. */
262 drm_gem_object_put(&shmem->base);
263 if (ret)
264 return ERR_PTR(ret);
265
266 return bo;
267 }
268
269 struct drm_gem_object *
panfrost_gem_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)270 panfrost_gem_prime_import_sg_table(struct drm_device *dev,
271 struct dma_buf_attachment *attach,
272 struct sg_table *sgt)
273 {
274 struct drm_gem_object *obj;
275 struct panfrost_gem_object *bo;
276
277 obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
278 if (IS_ERR(obj))
279 return ERR_CAST(obj);
280
281 bo = to_panfrost_bo(obj);
282 bo->noexec = true;
283
284 return obj;
285 }
286