1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2021 Intel Corporation
4 */
5
6 #include <linux/slab.h>
7
8 #include <drm/ttm/ttm_bo_driver.h>
9 #include <drm/ttm/ttm_placement.h>
10
11 #include "i915_ttm_buddy_manager.h"
12
13 #include "i915_buddy.h"
14 #include "i915_gem.h"
15
16 struct i915_ttm_buddy_manager {
17 struct ttm_resource_manager manager;
18 struct i915_buddy_mm mm;
19 struct list_head reserved;
20 struct mutex lock;
21 u64 default_page_size;
22 };
23
24 static struct i915_ttm_buddy_manager *
to_buddy_manager(struct ttm_resource_manager * man)25 to_buddy_manager(struct ttm_resource_manager *man)
26 {
27 return container_of(man, struct i915_ttm_buddy_manager, manager);
28 }
29
i915_ttm_buddy_man_alloc(struct ttm_resource_manager * man,struct ttm_buffer_object * bo,const struct ttm_place * place,struct ttm_resource ** res)30 static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
31 struct ttm_buffer_object *bo,
32 const struct ttm_place *place,
33 struct ttm_resource **res)
34 {
35 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
36 struct i915_ttm_buddy_resource *bman_res;
37 struct i915_buddy_mm *mm = &bman->mm;
38 unsigned long n_pages;
39 unsigned int min_order;
40 u64 min_page_size;
41 u64 size;
42 int err;
43
44 GEM_BUG_ON(place->fpfn || place->lpfn);
45
46 bman_res = kzalloc(sizeof(*bman_res), GFP_KERNEL);
47 if (!bman_res)
48 return -ENOMEM;
49
50 ttm_resource_init(bo, place, &bman_res->base);
51 INIT_LIST_HEAD(&bman_res->blocks);
52 bman_res->mm = mm;
53
54 GEM_BUG_ON(!bman_res->base.num_pages);
55 size = bman_res->base.num_pages << PAGE_SHIFT;
56
57 min_page_size = bman->default_page_size;
58 if (bo->page_alignment)
59 min_page_size = bo->page_alignment << PAGE_SHIFT;
60
61 GEM_BUG_ON(min_page_size < mm->chunk_size);
62 min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
63 if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
64 size = roundup_pow_of_two(size);
65 min_order = ilog2(size) - ilog2(mm->chunk_size);
66 }
67
68 if (size > mm->size) {
69 err = -E2BIG;
70 goto err_free_res;
71 }
72
73 n_pages = size >> ilog2(mm->chunk_size);
74
75 do {
76 struct i915_buddy_block *block;
77 unsigned int order;
78
79 order = fls(n_pages) - 1;
80 GEM_BUG_ON(order > mm->max_order);
81 GEM_BUG_ON(order < min_order);
82
83 do {
84 mutex_lock(&bman->lock);
85 block = i915_buddy_alloc(mm, order);
86 mutex_unlock(&bman->lock);
87 if (!IS_ERR(block))
88 break;
89
90 if (order-- == min_order) {
91 err = -ENOSPC;
92 goto err_free_blocks;
93 }
94 } while (1);
95
96 n_pages -= BIT(order);
97
98 list_add_tail(&block->link, &bman_res->blocks);
99
100 if (!n_pages)
101 break;
102 } while (1);
103
104 *res = &bman_res->base;
105 return 0;
106
107 err_free_blocks:
108 mutex_lock(&bman->lock);
109 i915_buddy_free_list(mm, &bman_res->blocks);
110 mutex_unlock(&bman->lock);
111 err_free_res:
112 kfree(bman_res);
113 return err;
114 }
115
i915_ttm_buddy_man_free(struct ttm_resource_manager * man,struct ttm_resource * res)116 static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man,
117 struct ttm_resource *res)
118 {
119 struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
120 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
121
122 mutex_lock(&bman->lock);
123 i915_buddy_free_list(&bman->mm, &bman_res->blocks);
124 mutex_unlock(&bman->lock);
125
126 kfree(bman_res);
127 }
128
129 static const struct ttm_resource_manager_func i915_ttm_buddy_manager_func = {
130 .alloc = i915_ttm_buddy_man_alloc,
131 .free = i915_ttm_buddy_man_free,
132 };
133
134
135 /**
136 * i915_ttm_buddy_man_init - Setup buddy allocator based ttm manager
137 * @bdev: The ttm device
138 * @type: Memory type we want to manage
139 * @use_tt: Set use_tt for the manager
140 * @size: The size in bytes to manage
141 * @default_page_size: The default minimum page size in bytes for allocations,
142 * this must be at least as large as @chunk_size, and can be overridden by
143 * setting the BO page_alignment, to be larger or smaller as needed.
144 * @chunk_size: The minimum page size in bytes for our allocations i.e
145 * order-zero
146 *
147 * Note that the starting address is assumed to be zero here, since this
148 * simplifies keeping the property where allocated blocks having natural
149 * power-of-two alignment. So long as the real starting address is some large
150 * power-of-two, or naturally start from zero, then this should be fine. Also
151 * the &i915_ttm_buddy_man_reserve interface can be used to preserve alignment
152 * if say there is some unusable range from the start of the region. We can
153 * revisit this in the future and make the interface accept an actual starting
154 * offset and let it take care of the rest.
155 *
156 * Note that if the @size is not aligned to the @chunk_size then we perform the
157 * required rounding to get the usable size. The final size in pages can be
158 * taken from &ttm_resource_manager.size.
159 *
160 * Return: 0 on success, negative error code on failure.
161 */
i915_ttm_buddy_man_init(struct ttm_device * bdev,unsigned int type,bool use_tt,u64 size,u64 default_page_size,u64 chunk_size)162 int i915_ttm_buddy_man_init(struct ttm_device *bdev,
163 unsigned int type, bool use_tt,
164 u64 size, u64 default_page_size,
165 u64 chunk_size)
166 {
167 struct ttm_resource_manager *man;
168 struct i915_ttm_buddy_manager *bman;
169 int err;
170
171 bman = kzalloc(sizeof(*bman), GFP_KERNEL);
172 if (!bman)
173 return -ENOMEM;
174
175 err = i915_buddy_init(&bman->mm, size, chunk_size);
176 if (err)
177 goto err_free_bman;
178
179 mutex_init(&bman->lock);
180 INIT_LIST_HEAD(&bman->reserved);
181 GEM_BUG_ON(default_page_size < chunk_size);
182 bman->default_page_size = default_page_size;
183
184 man = &bman->manager;
185 man->use_tt = use_tt;
186 man->func = &i915_ttm_buddy_manager_func;
187 ttm_resource_manager_init(man, bman->mm.size >> PAGE_SHIFT);
188
189 ttm_resource_manager_set_used(man, true);
190 ttm_set_driver_manager(bdev, type, man);
191
192 return 0;
193
194 err_free_bman:
195 kfree(bman);
196 return err;
197 }
198
199 /**
200 * i915_ttm_buddy_man_fini - Destroy the buddy allocator ttm manager
201 * @bdev: The ttm device
202 * @type: Memory type we want to manage
203 *
204 * Note that if we reserved anything with &i915_ttm_buddy_man_reserve, this will
205 * also be freed for us here.
206 *
207 * Return: 0 on success, negative error code on failure.
208 */
i915_ttm_buddy_man_fini(struct ttm_device * bdev,unsigned int type)209 int i915_ttm_buddy_man_fini(struct ttm_device *bdev, unsigned int type)
210 {
211 struct ttm_resource_manager *man = ttm_manager_type(bdev, type);
212 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
213 struct i915_buddy_mm *mm = &bman->mm;
214 int ret;
215
216 ttm_resource_manager_set_used(man, false);
217
218 ret = ttm_resource_manager_evict_all(bdev, man);
219 if (ret)
220 return ret;
221
222 ttm_set_driver_manager(bdev, type, NULL);
223
224 mutex_lock(&bman->lock);
225 i915_buddy_free_list(mm, &bman->reserved);
226 i915_buddy_fini(mm);
227 mutex_unlock(&bman->lock);
228
229 ttm_resource_manager_cleanup(man);
230 kfree(bman);
231
232 return 0;
233 }
234
235 /**
236 * i915_ttm_buddy_man_reserve - Reserve address range
237 * @man: The buddy allocator ttm manager
238 * @start: The offset in bytes, where the region start is assumed to be zero
239 * @size: The size in bytes
240 *
241 * Note that the starting address for the region is always assumed to be zero.
242 *
243 * Return: 0 on success, negative error code on failure.
244 */
i915_ttm_buddy_man_reserve(struct ttm_resource_manager * man,u64 start,u64 size)245 int i915_ttm_buddy_man_reserve(struct ttm_resource_manager *man,
246 u64 start, u64 size)
247 {
248 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
249 struct i915_buddy_mm *mm = &bman->mm;
250 int ret;
251
252 mutex_lock(&bman->lock);
253 ret = i915_buddy_alloc_range(mm, &bman->reserved, start, size);
254 mutex_unlock(&bman->lock);
255
256 return ret;
257 }
258
259