1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2008-2012 Intel Corporation
5  */
6 
7 #include <linux/errno.h>
8 #include <linux/mutex.h>
9 
10 #include <drm/drm_mm.h>
11 #include <drm/i915_drm.h>
12 
13 #include "i915_drv.h"
14 #include "i915_gem_stolen.h"
15 
16 /*
17  * The BIOS typically reserves some of the system's memory for the exclusive
18  * use of the integrated graphics. This memory is no longer available for
19  * use by the OS and so the user finds that his system has less memory
20  * available than he put in. We refer to this memory as stolen.
21  *
22  * The BIOS will allocate its framebuffer from the stolen memory. Our
23  * goal is try to reuse that object for our own fbcon which must always
24  * be available for panics. Anything else we can reuse the stolen memory
25  * for is a boon.
26  */
27 
i915_gem_stolen_insert_node_in_range(struct drm_i915_private * dev_priv,struct drm_mm_node * node,u64 size,unsigned alignment,u64 start,u64 end)28 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
29 					 struct drm_mm_node *node, u64 size,
30 					 unsigned alignment, u64 start, u64 end)
31 {
32 	int ret;
33 
34 	if (!drm_mm_initialized(&dev_priv->mm.stolen))
35 		return -ENODEV;
36 
37 	/* WaSkipStolenMemoryFirstPage:bdw+ */
38 	if (INTEL_GEN(dev_priv) >= 8 && start < 4096)
39 		start = 4096;
40 
41 	mutex_lock(&dev_priv->mm.stolen_lock);
42 	ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
43 					  size, alignment, 0,
44 					  start, end, DRM_MM_INSERT_BEST);
45 	mutex_unlock(&dev_priv->mm.stolen_lock);
46 
47 	return ret;
48 }
49 
i915_gem_stolen_insert_node(struct drm_i915_private * dev_priv,struct drm_mm_node * node,u64 size,unsigned alignment)50 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
51 				struct drm_mm_node *node, u64 size,
52 				unsigned alignment)
53 {
54 	return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
55 						    alignment, 0, U64_MAX);
56 }
57 
i915_gem_stolen_remove_node(struct drm_i915_private * dev_priv,struct drm_mm_node * node)58 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
59 				 struct drm_mm_node *node)
60 {
61 	mutex_lock(&dev_priv->mm.stolen_lock);
62 	drm_mm_remove_node(node);
63 	mutex_unlock(&dev_priv->mm.stolen_lock);
64 }
65 
i915_adjust_stolen(struct drm_i915_private * dev_priv,struct resource * dsm)66 static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
67 			      struct resource *dsm)
68 {
69 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
70 	struct resource *r;
71 
72 	if (dsm->start == 0 || dsm->end <= dsm->start)
73 		return -EINVAL;
74 
75 	/*
76 	 * TODO: We have yet too encounter the case where the GTT wasn't at the
77 	 * end of stolen. With that assumption we could simplify this.
78 	 */
79 
80 	/* Make sure we don't clobber the GTT if it's within stolen memory */
81 	if (INTEL_GEN(dev_priv) <= 4 &&
82 	    !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) {
83 		struct resource stolen[2] = {*dsm, *dsm};
84 		struct resource ggtt_res;
85 		resource_size_t ggtt_start;
86 
87 		ggtt_start = I915_READ(PGTBL_CTL);
88 		if (IS_GEN(dev_priv, 4))
89 			ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
90 				     (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
91 		else
92 			ggtt_start &= PGTBL_ADDRESS_LO_MASK;
93 
94 		ggtt_res =
95 			(struct resource) DEFINE_RES_MEM(ggtt_start,
96 							 ggtt_total_entries(ggtt) * 4);
97 
98 		if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
99 			stolen[0].end = ggtt_res.start;
100 		if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
101 			stolen[1].start = ggtt_res.end;
102 
103 		/* Pick the larger of the two chunks */
104 		if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
105 			*dsm = stolen[0];
106 		else
107 			*dsm = stolen[1];
108 
109 		if (stolen[0].start != stolen[1].start ||
110 		    stolen[0].end != stolen[1].end) {
111 			DRM_DEBUG_DRIVER("GTT within stolen memory at %pR\n", &ggtt_res);
112 			DRM_DEBUG_DRIVER("Stolen memory adjusted to %pR\n", dsm);
113 		}
114 	}
115 
116 	/*
117 	 * Verify that nothing else uses this physical address. Stolen
118 	 * memory should be reserved by the BIOS and hidden from the
119 	 * kernel. So if the region is already marked as busy, something
120 	 * is seriously wrong.
121 	 */
122 	r = devm_request_mem_region(dev_priv->drm.dev, dsm->start,
123 				    resource_size(dsm),
124 				    "Graphics Stolen Memory");
125 	if (r == NULL) {
126 		/*
127 		 * One more attempt but this time requesting region from
128 		 * start + 1, as we have seen that this resolves the region
129 		 * conflict with the PCI Bus.
130 		 * This is a BIOS w/a: Some BIOS wrap stolen in the root
131 		 * PCI bus, but have an off-by-one error. Hence retry the
132 		 * reservation starting from 1 instead of 0.
133 		 * There's also BIOS with off-by-one on the other end.
134 		 */
135 		r = devm_request_mem_region(dev_priv->drm.dev, dsm->start + 1,
136 					    resource_size(dsm) - 2,
137 					    "Graphics Stolen Memory");
138 		/*
139 		 * GEN3 firmware likes to smash pci bridges into the stolen
140 		 * range. Apparently this works.
141 		 */
142 		if (r == NULL && !IS_GEN(dev_priv, 3)) {
143 			DRM_ERROR("conflict detected with stolen region: %pR\n",
144 				  dsm);
145 
146 			return -EBUSY;
147 		}
148 	}
149 
150 	return 0;
151 }
152 
i915_gem_cleanup_stolen(struct drm_i915_private * dev_priv)153 void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
154 {
155 	if (!drm_mm_initialized(&dev_priv->mm.stolen))
156 		return;
157 
158 	drm_mm_takedown(&dev_priv->mm.stolen);
159 }
160 
g4x_get_stolen_reserved(struct drm_i915_private * dev_priv,resource_size_t * base,resource_size_t * size)161 static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
162 				    resource_size_t *base,
163 				    resource_size_t *size)
164 {
165 	u32 reg_val = I915_READ(IS_GM45(dev_priv) ?
166 				CTG_STOLEN_RESERVED :
167 				ELK_STOLEN_RESERVED);
168 	resource_size_t stolen_top = dev_priv->dsm.end + 1;
169 
170 	DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n",
171 			 IS_GM45(dev_priv) ? "CTG" : "ELK", reg_val);
172 
173 	if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
174 		return;
175 
176 	/*
177 	 * Whether ILK really reuses the ELK register for this is unclear.
178 	 * Let's see if we catch anyone with this supposedly enabled on ILK.
179 	 */
180 	WARN(IS_GEN(dev_priv, 5), "ILK stolen reserved found? 0x%08x\n",
181 	     reg_val);
182 
183 	if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
184 		return;
185 
186 	*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
187 	WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
188 
189 	*size = stolen_top - *base;
190 }
191 
gen6_get_stolen_reserved(struct drm_i915_private * dev_priv,resource_size_t * base,resource_size_t * size)192 static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
193 				     resource_size_t *base,
194 				     resource_size_t *size)
195 {
196 	u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
197 
198 	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
199 
200 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
201 		return;
202 
203 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
204 
205 	switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
206 	case GEN6_STOLEN_RESERVED_1M:
207 		*size = 1024 * 1024;
208 		break;
209 	case GEN6_STOLEN_RESERVED_512K:
210 		*size = 512 * 1024;
211 		break;
212 	case GEN6_STOLEN_RESERVED_256K:
213 		*size = 256 * 1024;
214 		break;
215 	case GEN6_STOLEN_RESERVED_128K:
216 		*size = 128 * 1024;
217 		break;
218 	default:
219 		*size = 1024 * 1024;
220 		MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
221 	}
222 }
223 
vlv_get_stolen_reserved(struct drm_i915_private * dev_priv,resource_size_t * base,resource_size_t * size)224 static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv,
225 				    resource_size_t *base,
226 				    resource_size_t *size)
227 {
228 	u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
229 	resource_size_t stolen_top = dev_priv->dsm.end + 1;
230 
231 	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
232 
233 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
234 		return;
235 
236 	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
237 	default:
238 		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
239 		/* fall through */
240 	case GEN7_STOLEN_RESERVED_1M:
241 		*size = 1024 * 1024;
242 		break;
243 	}
244 
245 	/*
246 	 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
247 	 * reserved location as (top - size).
248 	 */
249 	*base = stolen_top - *size;
250 }
251 
gen7_get_stolen_reserved(struct drm_i915_private * dev_priv,resource_size_t * base,resource_size_t * size)252 static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
253 				     resource_size_t *base,
254 				     resource_size_t *size)
255 {
256 	u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
257 
258 	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
259 
260 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
261 		return;
262 
263 	*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
264 
265 	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
266 	case GEN7_STOLEN_RESERVED_1M:
267 		*size = 1024 * 1024;
268 		break;
269 	case GEN7_STOLEN_RESERVED_256K:
270 		*size = 256 * 1024;
271 		break;
272 	default:
273 		*size = 1024 * 1024;
274 		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
275 	}
276 }
277 
chv_get_stolen_reserved(struct drm_i915_private * dev_priv,resource_size_t * base,resource_size_t * size)278 static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
279 				    resource_size_t *base,
280 				    resource_size_t *size)
281 {
282 	u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
283 
284 	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
285 
286 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
287 		return;
288 
289 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
290 
291 	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
292 	case GEN8_STOLEN_RESERVED_1M:
293 		*size = 1024 * 1024;
294 		break;
295 	case GEN8_STOLEN_RESERVED_2M:
296 		*size = 2 * 1024 * 1024;
297 		break;
298 	case GEN8_STOLEN_RESERVED_4M:
299 		*size = 4 * 1024 * 1024;
300 		break;
301 	case GEN8_STOLEN_RESERVED_8M:
302 		*size = 8 * 1024 * 1024;
303 		break;
304 	default:
305 		*size = 8 * 1024 * 1024;
306 		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
307 	}
308 }
309 
bdw_get_stolen_reserved(struct drm_i915_private * dev_priv,resource_size_t * base,resource_size_t * size)310 static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
311 				    resource_size_t *base,
312 				    resource_size_t *size)
313 {
314 	u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
315 	resource_size_t stolen_top = dev_priv->dsm.end + 1;
316 
317 	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
318 
319 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
320 		return;
321 
322 	if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
323 		return;
324 
325 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
326 	*size = stolen_top - *base;
327 }
328 
icl_get_stolen_reserved(struct drm_i915_private * i915,resource_size_t * base,resource_size_t * size)329 static void icl_get_stolen_reserved(struct drm_i915_private *i915,
330 				    resource_size_t *base,
331 				    resource_size_t *size)
332 {
333 	u64 reg_val = intel_uncore_read64(&i915->uncore, GEN6_STOLEN_RESERVED);
334 
335 	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
336 
337 	*base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
338 
339 	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
340 	case GEN8_STOLEN_RESERVED_1M:
341 		*size = 1024 * 1024;
342 		break;
343 	case GEN8_STOLEN_RESERVED_2M:
344 		*size = 2 * 1024 * 1024;
345 		break;
346 	case GEN8_STOLEN_RESERVED_4M:
347 		*size = 4 * 1024 * 1024;
348 		break;
349 	case GEN8_STOLEN_RESERVED_8M:
350 		*size = 8 * 1024 * 1024;
351 		break;
352 	default:
353 		*size = 8 * 1024 * 1024;
354 		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
355 	}
356 }
357 
i915_gem_init_stolen(struct drm_i915_private * dev_priv)358 int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
359 {
360 	resource_size_t reserved_base, stolen_top;
361 	resource_size_t reserved_total, reserved_size;
362 
363 	mutex_init(&dev_priv->mm.stolen_lock);
364 
365 	if (intel_vgpu_active(dev_priv)) {
366 		dev_notice(dev_priv->drm.dev,
367 			   "%s, disabling use of stolen memory\n",
368 			   "iGVT-g active");
369 		return 0;
370 	}
371 
372 	if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) {
373 		dev_notice(dev_priv->drm.dev,
374 			   "%s, disabling use of stolen memory\n",
375 			   "DMAR active");
376 		return 0;
377 	}
378 
379 	if (resource_size(&intel_graphics_stolen_res) == 0)
380 		return 0;
381 
382 	dev_priv->dsm = intel_graphics_stolen_res;
383 
384 	if (i915_adjust_stolen(dev_priv, &dev_priv->dsm))
385 		return 0;
386 
387 	GEM_BUG_ON(dev_priv->dsm.start == 0);
388 	GEM_BUG_ON(dev_priv->dsm.end <= dev_priv->dsm.start);
389 
390 	stolen_top = dev_priv->dsm.end + 1;
391 	reserved_base = stolen_top;
392 	reserved_size = 0;
393 
394 	switch (INTEL_GEN(dev_priv)) {
395 	case 2:
396 	case 3:
397 		break;
398 	case 4:
399 		if (!IS_G4X(dev_priv))
400 			break;
401 		/* fall through */
402 	case 5:
403 		g4x_get_stolen_reserved(dev_priv,
404 					&reserved_base, &reserved_size);
405 		break;
406 	case 6:
407 		gen6_get_stolen_reserved(dev_priv,
408 					 &reserved_base, &reserved_size);
409 		break;
410 	case 7:
411 		if (IS_VALLEYVIEW(dev_priv))
412 			vlv_get_stolen_reserved(dev_priv,
413 						&reserved_base, &reserved_size);
414 		else
415 			gen7_get_stolen_reserved(dev_priv,
416 						 &reserved_base, &reserved_size);
417 		break;
418 	case 8:
419 	case 9:
420 	case 10:
421 		if (IS_LP(dev_priv))
422 			chv_get_stolen_reserved(dev_priv,
423 						&reserved_base, &reserved_size);
424 		else
425 			bdw_get_stolen_reserved(dev_priv,
426 						&reserved_base, &reserved_size);
427 		break;
428 	case 11:
429 	default:
430 		icl_get_stolen_reserved(dev_priv, &reserved_base,
431 					&reserved_size);
432 		break;
433 	}
434 
435 	/*
436 	 * Our expectation is that the reserved space is at the top of the
437 	 * stolen region and *never* at the bottom. If we see !reserved_base,
438 	 * it likely means we failed to read the registers correctly.
439 	 */
440 	if (!reserved_base) {
441 		DRM_ERROR("inconsistent reservation %pa + %pa; ignoring\n",
442 			  &reserved_base, &reserved_size);
443 		reserved_base = stolen_top;
444 		reserved_size = 0;
445 	}
446 
447 	dev_priv->dsm_reserved =
448 		(struct resource) DEFINE_RES_MEM(reserved_base, reserved_size);
449 
450 	if (!resource_contains(&dev_priv->dsm, &dev_priv->dsm_reserved)) {
451 		DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n",
452 			  &dev_priv->dsm_reserved, &dev_priv->dsm);
453 		return 0;
454 	}
455 
456 	/* It is possible for the reserved area to end before the end of stolen
457 	 * memory, so just consider the start. */
458 	reserved_total = stolen_top - reserved_base;
459 
460 	DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n",
461 			 (u64)resource_size(&dev_priv->dsm) >> 10,
462 			 ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10);
463 
464 	dev_priv->stolen_usable_size =
465 		resource_size(&dev_priv->dsm) - reserved_total;
466 
467 	/* Basic memrange allocator for stolen space. */
468 	drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->stolen_usable_size);
469 
470 	return 0;
471 }
472 
473 static struct sg_table *
i915_pages_create_for_stolen(struct drm_device * dev,resource_size_t offset,resource_size_t size)474 i915_pages_create_for_stolen(struct drm_device *dev,
475 			     resource_size_t offset, resource_size_t size)
476 {
477 	struct drm_i915_private *dev_priv = to_i915(dev);
478 	struct sg_table *st;
479 	struct scatterlist *sg;
480 
481 	GEM_BUG_ON(range_overflows(offset, size, resource_size(&dev_priv->dsm)));
482 
483 	/* We hide that we have no struct page backing our stolen object
484 	 * by wrapping the contiguous physical allocation with a fake
485 	 * dma mapping in a single scatterlist.
486 	 */
487 
488 	st = kmalloc(sizeof(*st), GFP_KERNEL);
489 	if (st == NULL)
490 		return ERR_PTR(-ENOMEM);
491 
492 	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
493 		kfree(st);
494 		return ERR_PTR(-ENOMEM);
495 	}
496 
497 	sg = st->sgl;
498 	sg->offset = 0;
499 	sg->length = size;
500 
501 	sg_dma_address(sg) = (dma_addr_t)dev_priv->dsm.start + offset;
502 	sg_dma_len(sg) = size;
503 
504 	return st;
505 }
506 
i915_gem_object_get_pages_stolen(struct drm_i915_gem_object * obj)507 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
508 {
509 	struct sg_table *pages =
510 		i915_pages_create_for_stolen(obj->base.dev,
511 					     obj->stolen->start,
512 					     obj->stolen->size);
513 	if (IS_ERR(pages))
514 		return PTR_ERR(pages);
515 
516 	__i915_gem_object_set_pages(obj, pages, obj->stolen->size);
517 
518 	return 0;
519 }
520 
i915_gem_object_put_pages_stolen(struct drm_i915_gem_object * obj,struct sg_table * pages)521 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
522 					     struct sg_table *pages)
523 {
524 	/* Should only be called from i915_gem_object_release_stolen() */
525 	sg_free_table(pages);
526 	kfree(pages);
527 }
528 
529 static void
i915_gem_object_release_stolen(struct drm_i915_gem_object * obj)530 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
531 {
532 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
533 	struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
534 
535 	GEM_BUG_ON(!stolen);
536 
537 	i915_gem_stolen_remove_node(dev_priv, stolen);
538 	kfree(stolen);
539 }
540 
541 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
542 	.get_pages = i915_gem_object_get_pages_stolen,
543 	.put_pages = i915_gem_object_put_pages_stolen,
544 	.release = i915_gem_object_release_stolen,
545 };
546 
547 static struct drm_i915_gem_object *
_i915_gem_object_create_stolen(struct drm_i915_private * dev_priv,struct drm_mm_node * stolen)548 _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
549 			       struct drm_mm_node *stolen)
550 {
551 	struct drm_i915_gem_object *obj;
552 	unsigned int cache_level;
553 
554 	obj = i915_gem_object_alloc();
555 	if (obj == NULL)
556 		return NULL;
557 
558 	drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size);
559 	i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
560 
561 	obj->stolen = stolen;
562 	obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
563 	cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
564 	i915_gem_object_set_cache_coherency(obj, cache_level);
565 
566 	if (i915_gem_object_pin_pages(obj))
567 		goto cleanup;
568 
569 	return obj;
570 
571 cleanup:
572 	i915_gem_object_free(obj);
573 	return NULL;
574 }
575 
576 struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_i915_private * dev_priv,resource_size_t size)577 i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
578 			      resource_size_t size)
579 {
580 	struct drm_i915_gem_object *obj;
581 	struct drm_mm_node *stolen;
582 	int ret;
583 
584 	if (!drm_mm_initialized(&dev_priv->mm.stolen))
585 		return NULL;
586 
587 	if (size == 0)
588 		return NULL;
589 
590 	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
591 	if (!stolen)
592 		return NULL;
593 
594 	ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
595 	if (ret) {
596 		kfree(stolen);
597 		return NULL;
598 	}
599 
600 	obj = _i915_gem_object_create_stolen(dev_priv, stolen);
601 	if (obj)
602 		return obj;
603 
604 	i915_gem_stolen_remove_node(dev_priv, stolen);
605 	kfree(stolen);
606 	return NULL;
607 }
608 
609 struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private * dev_priv,resource_size_t stolen_offset,resource_size_t gtt_offset,resource_size_t size)610 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
611 					       resource_size_t stolen_offset,
612 					       resource_size_t gtt_offset,
613 					       resource_size_t size)
614 {
615 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
616 	struct drm_i915_gem_object *obj;
617 	struct drm_mm_node *stolen;
618 	struct i915_vma *vma;
619 	int ret;
620 
621 	if (!drm_mm_initialized(&dev_priv->mm.stolen))
622 		return NULL;
623 
624 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
625 
626 	DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
627 			 &stolen_offset, &gtt_offset, &size);
628 
629 	/* KISS and expect everything to be page-aligned */
630 	if (WARN_ON(size == 0) ||
631 	    WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
632 	    WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
633 		return NULL;
634 
635 	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
636 	if (!stolen)
637 		return NULL;
638 
639 	stolen->start = stolen_offset;
640 	stolen->size = size;
641 	mutex_lock(&dev_priv->mm.stolen_lock);
642 	ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
643 	mutex_unlock(&dev_priv->mm.stolen_lock);
644 	if (ret) {
645 		DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
646 		kfree(stolen);
647 		return NULL;
648 	}
649 
650 	obj = _i915_gem_object_create_stolen(dev_priv, stolen);
651 	if (obj == NULL) {
652 		DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
653 		i915_gem_stolen_remove_node(dev_priv, stolen);
654 		kfree(stolen);
655 		return NULL;
656 	}
657 
658 	/* Some objects just need physical mem from stolen space */
659 	if (gtt_offset == I915_GTT_OFFSET_NONE)
660 		return obj;
661 
662 	ret = i915_gem_object_pin_pages(obj);
663 	if (ret)
664 		goto err;
665 
666 	vma = i915_vma_instance(obj, &ggtt->vm, NULL);
667 	if (IS_ERR(vma)) {
668 		ret = PTR_ERR(vma);
669 		goto err_pages;
670 	}
671 
672 	/* To simplify the initialisation sequence between KMS and GTT,
673 	 * we allow construction of the stolen object prior to
674 	 * setting up the GTT space. The actual reservation will occur
675 	 * later.
676 	 */
677 	ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
678 				   size, gtt_offset, obj->cache_level,
679 				   0);
680 	if (ret) {
681 		DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n");
682 		goto err_pages;
683 	}
684 
685 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
686 
687 	vma->pages = obj->mm.pages;
688 	vma->flags |= I915_VMA_GLOBAL_BIND;
689 	__i915_vma_set_map_and_fenceable(vma);
690 
691 	mutex_lock(&ggtt->vm.mutex);
692 	list_move_tail(&vma->vm_link, &ggtt->vm.bound_list);
693 	mutex_unlock(&ggtt->vm.mutex);
694 
695 	GEM_BUG_ON(i915_gem_object_is_shrinkable(obj));
696 	atomic_inc(&obj->bind_count);
697 
698 	return obj;
699 
700 err_pages:
701 	i915_gem_object_unpin_pages(obj);
702 err:
703 	i915_gem_object_put(obj);
704 	return NULL;
705 }
706