1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2018 Intel Corporation
5  */
6 
7 #include "i915_gem_batch_pool.h"
8 #include "i915_drv.h"
9 
10 /**
11  * DOC: batch pool
12  *
13  * In order to submit batch buffers as 'secure', the software command parser
14  * must ensure that a batch buffer cannot be modified after parsing. It does
15  * this by copying the user provided batch buffer contents to a kernel owned
16  * buffer from which the hardware will actually execute, and by carefully
17  * managing the address space bindings for such buffers.
18  *
19  * The batch pool framework provides a mechanism for the driver to manage a
20  * set of scratch buffers to use for this purpose. The framework can be
21  * extended to support other uses cases should they arise.
22  */
23 
24 /**
25  * i915_gem_batch_pool_init() - initialize a batch buffer pool
26  * @pool: the batch buffer pool
27  * @engine: the associated request submission engine
28  */
i915_gem_batch_pool_init(struct i915_gem_batch_pool * pool,struct intel_engine_cs * engine)29 void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool,
30 			      struct intel_engine_cs *engine)
31 {
32 	int n;
33 
34 	pool->engine = engine;
35 
36 	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
37 		INIT_LIST_HEAD(&pool->cache_list[n]);
38 }
39 
40 /**
41  * i915_gem_batch_pool_fini() - clean up a batch buffer pool
42  * @pool: the pool to clean up
43  *
44  * Note: Callers must hold the struct_mutex.
45  */
i915_gem_batch_pool_fini(struct i915_gem_batch_pool * pool)46 void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
47 {
48 	int n;
49 
50 	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
51 
52 	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
53 		struct drm_i915_gem_object *obj, *next;
54 
55 		list_for_each_entry_safe(obj, next,
56 					 &pool->cache_list[n],
57 					 batch_pool_link)
58 			__i915_gem_object_release_unless_active(obj);
59 
60 		INIT_LIST_HEAD(&pool->cache_list[n]);
61 	}
62 }
63 
64 /**
65  * i915_gem_batch_pool_get() - allocate a buffer from the pool
66  * @pool: the batch buffer pool
67  * @size: the minimum desired size of the returned buffer
68  *
69  * Returns an inactive buffer from @pool with at least @size bytes,
70  * with the pages pinned. The caller must i915_gem_object_unpin_pages()
71  * on the returned object.
72  *
73  * Note: Callers must hold the struct_mutex
74  *
75  * Return: the buffer object or an error pointer
76  */
77 struct drm_i915_gem_object *
i915_gem_batch_pool_get(struct i915_gem_batch_pool * pool,size_t size)78 i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
79 			size_t size)
80 {
81 	struct drm_i915_gem_object *obj;
82 	struct list_head *list;
83 	int n, ret;
84 
85 	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
86 
87 	/* Compute a power-of-two bucket, but throw everything greater than
88 	 * 16KiB into the same bucket: i.e. the the buckets hold objects of
89 	 * (1 page, 2 pages, 4 pages, 8+ pages).
90 	 */
91 	n = fls(size >> PAGE_SHIFT) - 1;
92 	if (n >= ARRAY_SIZE(pool->cache_list))
93 		n = ARRAY_SIZE(pool->cache_list) - 1;
94 	list = &pool->cache_list[n];
95 
96 	list_for_each_entry(obj, list, batch_pool_link) {
97 		/* The batches are strictly LRU ordered */
98 		if (i915_gem_object_is_active(obj)) {
99 			struct reservation_object *resv = obj->resv;
100 
101 			if (!reservation_object_test_signaled_rcu(resv, true))
102 				break;
103 
104 			i915_retire_requests(pool->engine->i915);
105 			GEM_BUG_ON(i915_gem_object_is_active(obj));
106 
107 			/*
108 			 * The object is now idle, clear the array of shared
109 			 * fences before we add a new request. Although, we
110 			 * remain on the same engine, we may be on a different
111 			 * timeline and so may continually grow the array,
112 			 * trapping a reference to all the old fences, rather
113 			 * than replace the existing fence.
114 			 */
115 			if (rcu_access_pointer(resv->fence)) {
116 				reservation_object_lock(resv, NULL);
117 				reservation_object_add_excl_fence(resv, NULL);
118 				reservation_object_unlock(resv);
119 			}
120 		}
121 
122 		GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->resv,
123 								 true));
124 
125 		if (obj->base.size >= size)
126 			goto found;
127 	}
128 
129 	obj = i915_gem_object_create_internal(pool->engine->i915, size);
130 	if (IS_ERR(obj))
131 		return obj;
132 
133 found:
134 	ret = i915_gem_object_pin_pages(obj);
135 	if (ret)
136 		return ERR_PTR(ret);
137 
138 	list_move_tail(&obj->batch_pool_link, list);
139 	return obj;
140 }
141