1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #ifndef __I915_GEM_OBJECT_TYPES_H__
8 #define __I915_GEM_OBJECT_TYPES_H__
9 
10 #include <drm/drm_gem.h>
11 #include <uapi/drm/i915_drm.h>
12 
13 #include "i915_active.h"
14 #include "i915_selftest.h"
15 
16 struct drm_i915_gem_object;
17 struct intel_fronbuffer;
18 
19 /*
20  * struct i915_lut_handle tracks the fast lookups from handle to vma used
21  * for execbuf. Although we use a radixtree for that mapping, in order to
22  * remove them as the object or context is closed, we need a secondary list
23  * and a translation entry (i915_lut_handle).
24  */
25 struct i915_lut_handle {
26 	struct list_head obj_link;
27 	struct i915_gem_context *ctx;
28 	u32 handle;
29 };
30 
31 struct drm_i915_gem_object_ops {
32 	unsigned int flags;
33 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE	BIT(0)
34 #define I915_GEM_OBJECT_HAS_IOMEM	BIT(1)
35 #define I915_GEM_OBJECT_IS_SHRINKABLE	BIT(2)
36 #define I915_GEM_OBJECT_IS_PROXY	BIT(3)
37 #define I915_GEM_OBJECT_NO_MMAP		BIT(4)
38 #define I915_GEM_OBJECT_ASYNC_CANCEL	BIT(5)
39 
40 	/* Interface between the GEM object and its backing storage.
41 	 * get_pages() is called once prior to the use of the associated set
42 	 * of pages before to binding them into the GTT, and put_pages() is
43 	 * called after we no longer need them. As we expect there to be
44 	 * associated cost with migrating pages between the backing storage
45 	 * and making them available for the GPU (e.g. clflush), we may hold
46 	 * onto the pages after they are no longer referenced by the GPU
47 	 * in case they may be used again shortly (for example migrating the
48 	 * pages to a different memory domain within the GTT). put_pages()
49 	 * will therefore most likely be called when the object itself is
50 	 * being released or under memory pressure (where we attempt to
51 	 * reap pages for the shrinker).
52 	 */
53 	int (*get_pages)(struct drm_i915_gem_object *obj);
54 	void (*put_pages)(struct drm_i915_gem_object *obj,
55 			  struct sg_table *pages);
56 	void (*truncate)(struct drm_i915_gem_object *obj);
57 	void (*writeback)(struct drm_i915_gem_object *obj);
58 
59 	int (*pread)(struct drm_i915_gem_object *obj,
60 		     const struct drm_i915_gem_pread *arg);
61 	int (*pwrite)(struct drm_i915_gem_object *obj,
62 		      const struct drm_i915_gem_pwrite *arg);
63 
64 	int (*dmabuf_export)(struct drm_i915_gem_object *obj);
65 	void (*release)(struct drm_i915_gem_object *obj);
66 
67 	const char *name; /* friendly name for debug, e.g. lockdep classes */
68 };
69 
70 enum i915_mmap_type {
71 	I915_MMAP_TYPE_GTT = 0,
72 	I915_MMAP_TYPE_WC,
73 	I915_MMAP_TYPE_WB,
74 	I915_MMAP_TYPE_UC,
75 };
76 
77 struct i915_mmap_offset {
78 	struct drm_vma_offset_node vma_node;
79 	struct drm_i915_gem_object *obj;
80 	enum i915_mmap_type mmap_type;
81 
82 	struct rb_node offset;
83 };
84 
85 struct drm_i915_gem_object {
86 	struct drm_gem_object base;
87 
88 	const struct drm_i915_gem_object_ops *ops;
89 
90 	struct {
91 		/**
92 		 * @vma.lock: protect the list/tree of vmas
93 		 */
94 		spinlock_t lock;
95 
96 		/**
97 		 * @vma.list: List of VMAs backed by this object
98 		 *
99 		 * The VMA on this list are ordered by type, all GGTT vma are
100 		 * placed at the head and all ppGTT vma are placed at the tail.
101 		 * The different types of GGTT vma are unordered between
102 		 * themselves, use the @vma.tree (which has a defined order
103 		 * between all VMA) to quickly find an exact match.
104 		 */
105 		struct list_head list;
106 
107 		/**
108 		 * @vma.tree: Ordered tree of VMAs backed by this object
109 		 *
110 		 * All VMA created for this object are placed in the @vma.tree
111 		 * for fast retrieval via a binary search in
112 		 * i915_vma_instance(). They are also added to @vma.list for
113 		 * easy iteration.
114 		 */
115 		struct rb_root tree;
116 	} vma;
117 
118 	/**
119 	 * @lut_list: List of vma lookup entries in use for this object.
120 	 *
121 	 * If this object is closed, we need to remove all of its VMA from
122 	 * the fast lookup index in associated contexts; @lut_list provides
123 	 * this translation from object to context->handles_vma.
124 	 */
125 	struct list_head lut_list;
126 	spinlock_t lut_lock; /* guards lut_list */
127 
128 	/**
129 	 * @obj_link: Link into @i915_gem_ww_ctx.obj_list
130 	 *
131 	 * When we lock this object through i915_gem_object_lock() with a
132 	 * context, we add it to the list to ensure we can unlock everything
133 	 * when i915_gem_ww_ctx_backoff() or i915_gem_ww_ctx_fini() are called.
134 	 */
135 	struct list_head obj_link;
136 
137 	/** Stolen memory for this object, instead of being backed by shmem. */
138 	struct drm_mm_node *stolen;
139 	union {
140 		struct rcu_head rcu;
141 		struct llist_node freed;
142 	};
143 
144 	/**
145 	 * Whether the object is currently in the GGTT mmap.
146 	 */
147 	unsigned int userfault_count;
148 	struct list_head userfault_link;
149 
150 	struct {
151 		spinlock_t lock; /* Protects access to mmo offsets */
152 		struct rb_root offsets;
153 	} mmo;
154 
155 	I915_SELFTEST_DECLARE(struct list_head st_link);
156 
157 	unsigned long flags;
158 #define I915_BO_ALLOC_CONTIGUOUS BIT(0)
159 #define I915_BO_ALLOC_VOLATILE   BIT(1)
160 #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
161 #define I915_BO_READONLY         BIT(2)
162 
163 	/*
164 	 * Is the object to be mapped as read-only to the GPU
165 	 * Only honoured if hardware has relevant pte bit
166 	 */
167 	unsigned int cache_level:3;
168 	unsigned int cache_coherent:2;
169 #define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
170 #define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
171 	unsigned int cache_dirty:1;
172 
173 	/**
174 	 * @read_domains: Read memory domains.
175 	 *
176 	 * These monitor which caches contain read/write data related to the
177 	 * object. When transitioning from one set of domains to another,
178 	 * the driver is called to ensure that caches are suitably flushed and
179 	 * invalidated.
180 	 */
181 	u16 read_domains;
182 
183 	/**
184 	 * @write_domain: Corresponding unique write memory domain.
185 	 */
186 	u16 write_domain;
187 
188 	struct intel_frontbuffer __rcu *frontbuffer;
189 
190 	/** Current tiling stride for the object, if it's tiled. */
191 	unsigned int tiling_and_stride;
192 #define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
193 #define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
194 #define STRIDE_MASK (~TILING_MASK)
195 
196 	struct {
197 		/*
198 		 * Protects the pages and their use. Do not use directly, but
199 		 * instead go through the pin/unpin interfaces.
200 		 */
201 		struct mutex lock;
202 		atomic_t pages_pin_count;
203 		atomic_t shrink_pin;
204 
205 		/**
206 		 * Memory region for this object.
207 		 */
208 		struct intel_memory_region *region;
209 		/**
210 		 * List of memory region blocks allocated for this object.
211 		 */
212 		struct list_head blocks;
213 		/**
214 		 * Element within memory_region->objects or region->purgeable
215 		 * if the object is marked as DONTNEED. Access is protected by
216 		 * region->obj_lock.
217 		 */
218 		struct list_head region_link;
219 
220 		struct sg_table *pages;
221 		void *mapping;
222 
223 		struct i915_page_sizes {
224 			/**
225 			 * The sg mask of the pages sg_table. i.e the mask of
226 			 * of the lengths for each sg entry.
227 			 */
228 			unsigned int phys;
229 
230 			/**
231 			 * The gtt page sizes we are allowed to use given the
232 			 * sg mask and the supported page sizes. This will
233 			 * express the smallest unit we can use for the whole
234 			 * object, as well as the larger sizes we may be able
235 			 * to use opportunistically.
236 			 */
237 			unsigned int sg;
238 
239 			/**
240 			 * The actual gtt page size usage. Since we can have
241 			 * multiple vma associated with this object we need to
242 			 * prevent any trampling of state, hence a copy of this
243 			 * struct also lives in each vma, therefore the gtt
244 			 * value here should only be read/write through the vma.
245 			 */
246 			unsigned int gtt;
247 		} page_sizes;
248 
249 		I915_SELFTEST_DECLARE(unsigned int page_mask);
250 
251 		struct i915_gem_object_page_iter {
252 			struct scatterlist *sg_pos;
253 			unsigned int sg_idx; /* in pages, but 32bit eek! */
254 
255 			struct radix_tree_root radix;
256 			struct mutex lock; /* protects this cache */
257 		} get_page;
258 
259 		/**
260 		 * Element within i915->mm.unbound_list or i915->mm.bound_list,
261 		 * locked by i915->mm.obj_lock.
262 		 */
263 		struct list_head link;
264 
265 		/**
266 		 * Advice: are the backing pages purgeable?
267 		 */
268 		unsigned int madv:2;
269 
270 		/**
271 		 * This is set if the object has been written to since the
272 		 * pages were last acquired.
273 		 */
274 		bool dirty:1;
275 
276 		/**
277 		 * This is set if the object has been pinned due to unknown
278 		 * swizzling.
279 		 */
280 		bool quirked:1;
281 	} mm;
282 
283 	/** Record of address bit 17 of each page at last unbind. */
284 	unsigned long *bit_17;
285 
286 	union {
287 		struct i915_gem_userptr {
288 			uintptr_t ptr;
289 
290 			struct i915_mm_struct *mm;
291 			struct i915_mmu_object *mmu_object;
292 			struct work_struct *work;
293 		} userptr;
294 
295 		unsigned long scratch;
296 		u64 encode;
297 
298 		void *gvt_info;
299 	};
300 };
301 
302 static inline struct drm_i915_gem_object *
to_intel_bo(struct drm_gem_object * gem)303 to_intel_bo(struct drm_gem_object *gem)
304 {
305 	/* Assert that to_intel_bo(NULL) == NULL */
306 	BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
307 
308 	return container_of(gem, struct drm_i915_gem_object, base);
309 }
310 
311 #endif
312