1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2014 Intel Corporation
4  */
5 
6 #include <linux/circ_buf.h>
7 
8 #include "gem/i915_gem_context.h"
9 
10 #include "gt/intel_context.h"
11 #include "gt/intel_engine_pm.h"
12 #include "gt/intel_gt.h"
13 #include "gt/intel_gt_pm.h"
14 #include "gt/intel_lrc_reg.h"
15 #include "intel_guc_submission.h"
16 
17 #include "i915_drv.h"
18 #include "i915_trace.h"
19 
20 enum {
21 	GUC_PREEMPT_NONE = 0,
22 	GUC_PREEMPT_INPROGRESS,
23 	GUC_PREEMPT_FINISHED,
24 };
25 #define GUC_PREEMPT_BREADCRUMB_DWORDS	0x8
26 #define GUC_PREEMPT_BREADCRUMB_BYTES	\
27 	(sizeof(u32) * GUC_PREEMPT_BREADCRUMB_DWORDS)
28 
29 /**
30  * DOC: GuC-based command submission
31  *
32  * GuC client:
33  * A intel_guc_client refers to a submission path through GuC. Currently, there
34  * is only one client, which is charged with all submissions to the GuC. This
35  * struct is the owner of a doorbell, a process descriptor and a workqueue (all
36  * of them inside a single gem object that contains all required pages for these
37  * elements).
38  *
39  * GuC stage descriptor:
40  * During initialization, the driver allocates a static pool of 1024 such
41  * descriptors, and shares them with the GuC.
42  * Currently, there exists a 1:1 mapping between a intel_guc_client and a
43  * guc_stage_desc (via the client's stage_id), so effectively only one
44  * gets used. This stage descriptor lets the GuC know about the doorbell,
45  * workqueue and process descriptor. Theoretically, it also lets the GuC
46  * know about our HW contexts (context ID, etc...), but we actually
47  * employ a kind of submission where the GuC uses the LRCA sent via the work
48  * item instead (the single guc_stage_desc associated to execbuf client
49  * contains information about the default kernel context only, but this is
50  * essentially unused). This is called a "proxy" submission.
51  *
52  * The Scratch registers:
53  * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
54  * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
55  * triggers an interrupt on the GuC via another register write (0xC4C8).
56  * Firmware writes a success/fail code back to the action register after
57  * processes the request. The kernel driver polls waiting for this update and
58  * then proceeds.
59  * See intel_guc_send()
60  *
61  * Doorbells:
62  * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW)
63  * mapped into process space.
64  *
65  * Work Items:
66  * There are several types of work items that the host may place into a
67  * workqueue, each with its own requirements and limitations. Currently only
68  * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
69  * represents in-order queue. The kernel driver packs ring tail pointer and an
70  * ELSP context descriptor dword into Work Item.
71  * See guc_add_request()
72  *
73  */
74 
to_priolist(struct rb_node * rb)75 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
76 {
77 	return rb_entry(rb, struct i915_priolist, node);
78 }
79 
is_high_priority(struct intel_guc_client * client)80 static inline bool is_high_priority(struct intel_guc_client *client)
81 {
82 	return (client->priority == GUC_CLIENT_PRIORITY_KMD_HIGH ||
83 		client->priority == GUC_CLIENT_PRIORITY_HIGH);
84 }
85 
reserve_doorbell(struct intel_guc_client * client)86 static int reserve_doorbell(struct intel_guc_client *client)
87 {
88 	unsigned long offset;
89 	unsigned long end;
90 	u16 id;
91 
92 	GEM_BUG_ON(client->doorbell_id != GUC_DOORBELL_INVALID);
93 
94 	/*
95 	 * The bitmap tracks which doorbell registers are currently in use.
96 	 * It is split into two halves; the first half is used for normal
97 	 * priority contexts, the second half for high-priority ones.
98 	 */
99 	offset = 0;
100 	end = GUC_NUM_DOORBELLS / 2;
101 	if (is_high_priority(client)) {
102 		offset = end;
103 		end += offset;
104 	}
105 
106 	id = find_next_zero_bit(client->guc->doorbell_bitmap, end, offset);
107 	if (id == end)
108 		return -ENOSPC;
109 
110 	__set_bit(id, client->guc->doorbell_bitmap);
111 	client->doorbell_id = id;
112 	DRM_DEBUG_DRIVER("client %u (high prio=%s) reserved doorbell: %d\n",
113 			 client->stage_id, yesno(is_high_priority(client)),
114 			 id);
115 	return 0;
116 }
117 
has_doorbell(struct intel_guc_client * client)118 static bool has_doorbell(struct intel_guc_client *client)
119 {
120 	if (client->doorbell_id == GUC_DOORBELL_INVALID)
121 		return false;
122 
123 	return test_bit(client->doorbell_id, client->guc->doorbell_bitmap);
124 }
125 
unreserve_doorbell(struct intel_guc_client * client)126 static void unreserve_doorbell(struct intel_guc_client *client)
127 {
128 	GEM_BUG_ON(!has_doorbell(client));
129 
130 	__clear_bit(client->doorbell_id, client->guc->doorbell_bitmap);
131 	client->doorbell_id = GUC_DOORBELL_INVALID;
132 }
133 
134 /*
135  * Tell the GuC to allocate or deallocate a specific doorbell
136  */
137 
__guc_allocate_doorbell(struct intel_guc * guc,u32 stage_id)138 static int __guc_allocate_doorbell(struct intel_guc *guc, u32 stage_id)
139 {
140 	u32 action[] = {
141 		INTEL_GUC_ACTION_ALLOCATE_DOORBELL,
142 		stage_id
143 	};
144 
145 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
146 }
147 
__guc_deallocate_doorbell(struct intel_guc * guc,u32 stage_id)148 static int __guc_deallocate_doorbell(struct intel_guc *guc, u32 stage_id)
149 {
150 	u32 action[] = {
151 		INTEL_GUC_ACTION_DEALLOCATE_DOORBELL,
152 		stage_id
153 	};
154 
155 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
156 }
157 
__get_stage_desc(struct intel_guc_client * client)158 static struct guc_stage_desc *__get_stage_desc(struct intel_guc_client *client)
159 {
160 	struct guc_stage_desc *base = client->guc->stage_desc_pool_vaddr;
161 
162 	return &base[client->stage_id];
163 }
164 
165 /*
166  * Initialise, update, or clear doorbell data shared with the GuC
167  *
168  * These functions modify shared data and so need access to the mapped
169  * client object which contains the page being used for the doorbell
170  */
171 
__update_doorbell_desc(struct intel_guc_client * client,u16 new_id)172 static void __update_doorbell_desc(struct intel_guc_client *client, u16 new_id)
173 {
174 	struct guc_stage_desc *desc;
175 
176 	/* Update the GuC's idea of the doorbell ID */
177 	desc = __get_stage_desc(client);
178 	desc->db_id = new_id;
179 }
180 
__get_doorbell(struct intel_guc_client * client)181 static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client)
182 {
183 	return client->vaddr + client->doorbell_offset;
184 }
185 
__doorbell_valid(struct intel_guc * guc,u16 db_id)186 static bool __doorbell_valid(struct intel_guc *guc, u16 db_id)
187 {
188 	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
189 
190 	GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
191 	return intel_uncore_read(uncore, GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID;
192 }
193 
__init_doorbell(struct intel_guc_client * client)194 static void __init_doorbell(struct intel_guc_client *client)
195 {
196 	struct guc_doorbell_info *doorbell;
197 
198 	doorbell = __get_doorbell(client);
199 	doorbell->db_status = GUC_DOORBELL_ENABLED;
200 	doorbell->cookie = 0;
201 }
202 
__fini_doorbell(struct intel_guc_client * client)203 static void __fini_doorbell(struct intel_guc_client *client)
204 {
205 	struct guc_doorbell_info *doorbell;
206 	u16 db_id = client->doorbell_id;
207 
208 	doorbell = __get_doorbell(client);
209 	doorbell->db_status = GUC_DOORBELL_DISABLED;
210 
211 	/* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit
212 	 * to go to zero after updating db_status before we call the GuC to
213 	 * release the doorbell
214 	 */
215 	if (wait_for_us(!__doorbell_valid(client->guc, db_id), 10))
216 		WARN_ONCE(true, "Doorbell never became invalid after disable\n");
217 }
218 
create_doorbell(struct intel_guc_client * client)219 static int create_doorbell(struct intel_guc_client *client)
220 {
221 	int ret;
222 
223 	if (WARN_ON(!has_doorbell(client)))
224 		return -ENODEV; /* internal setup error, should never happen */
225 
226 	__update_doorbell_desc(client, client->doorbell_id);
227 	__init_doorbell(client);
228 
229 	ret = __guc_allocate_doorbell(client->guc, client->stage_id);
230 	if (ret) {
231 		__fini_doorbell(client);
232 		__update_doorbell_desc(client, GUC_DOORBELL_INVALID);
233 		DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n",
234 				 client->stage_id, ret);
235 		return ret;
236 	}
237 
238 	return 0;
239 }
240 
destroy_doorbell(struct intel_guc_client * client)241 static int destroy_doorbell(struct intel_guc_client *client)
242 {
243 	int ret;
244 
245 	GEM_BUG_ON(!has_doorbell(client));
246 
247 	__fini_doorbell(client);
248 	ret = __guc_deallocate_doorbell(client->guc, client->stage_id);
249 	if (ret)
250 		DRM_ERROR("Couldn't destroy client %u doorbell: %d\n",
251 			  client->stage_id, ret);
252 
253 	__update_doorbell_desc(client, GUC_DOORBELL_INVALID);
254 
255 	return ret;
256 }
257 
__select_cacheline(struct intel_guc * guc)258 static unsigned long __select_cacheline(struct intel_guc *guc)
259 {
260 	unsigned long offset;
261 
262 	/* Doorbell uses a single cache line within a page */
263 	offset = offset_in_page(guc->db_cacheline);
264 
265 	/* Moving to next cache line to reduce contention */
266 	guc->db_cacheline += cache_line_size();
267 
268 	DRM_DEBUG_DRIVER("reserved cacheline 0x%lx, next 0x%x, linesize %u\n",
269 			 offset, guc->db_cacheline, cache_line_size());
270 	return offset;
271 }
272 
273 static inline struct guc_process_desc *
__get_process_desc(struct intel_guc_client * client)274 __get_process_desc(struct intel_guc_client *client)
275 {
276 	return client->vaddr + client->proc_desc_offset;
277 }
278 
279 /*
280  * Initialise the process descriptor shared with the GuC firmware.
281  */
guc_proc_desc_init(struct intel_guc_client * client)282 static void guc_proc_desc_init(struct intel_guc_client *client)
283 {
284 	struct guc_process_desc *desc;
285 
286 	desc = memset(__get_process_desc(client), 0, sizeof(*desc));
287 
288 	/*
289 	 * XXX: pDoorbell and WQVBaseAddress are pointers in process address
290 	 * space for ring3 clients (set them as in mmap_ioctl) or kernel
291 	 * space for kernel clients (map on demand instead? May make debug
292 	 * easier to have it mapped).
293 	 */
294 	desc->wq_base_addr = 0;
295 	desc->db_base_addr = 0;
296 
297 	desc->stage_id = client->stage_id;
298 	desc->wq_size_bytes = GUC_WQ_SIZE;
299 	desc->wq_status = WQ_STATUS_ACTIVE;
300 	desc->priority = client->priority;
301 }
302 
guc_proc_desc_fini(struct intel_guc_client * client)303 static void guc_proc_desc_fini(struct intel_guc_client *client)
304 {
305 	struct guc_process_desc *desc;
306 
307 	desc = __get_process_desc(client);
308 	memset(desc, 0, sizeof(*desc));
309 }
310 
guc_stage_desc_pool_create(struct intel_guc * guc)311 static int guc_stage_desc_pool_create(struct intel_guc *guc)
312 {
313 	struct i915_vma *vma;
314 	void *vaddr;
315 
316 	vma = intel_guc_allocate_vma(guc,
317 				     PAGE_ALIGN(sizeof(struct guc_stage_desc) *
318 				     GUC_MAX_STAGE_DESCRIPTORS));
319 	if (IS_ERR(vma))
320 		return PTR_ERR(vma);
321 
322 	vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
323 	if (IS_ERR(vaddr)) {
324 		i915_vma_unpin_and_release(&vma, 0);
325 		return PTR_ERR(vaddr);
326 	}
327 
328 	guc->stage_desc_pool = vma;
329 	guc->stage_desc_pool_vaddr = vaddr;
330 	ida_init(&guc->stage_ids);
331 
332 	return 0;
333 }
334 
guc_stage_desc_pool_destroy(struct intel_guc * guc)335 static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
336 {
337 	ida_destroy(&guc->stage_ids);
338 	i915_vma_unpin_and_release(&guc->stage_desc_pool, I915_VMA_RELEASE_MAP);
339 }
340 
341 /*
342  * Initialise/clear the stage descriptor shared with the GuC firmware.
343  *
344  * This descriptor tells the GuC where (in GGTT space) to find the important
345  * data structures relating to this client (doorbell, process descriptor,
346  * write queue, etc).
347  */
guc_stage_desc_init(struct intel_guc_client * client)348 static void guc_stage_desc_init(struct intel_guc_client *client)
349 {
350 	struct intel_guc *guc = client->guc;
351 	struct guc_stage_desc *desc;
352 	u32 gfx_addr;
353 
354 	desc = __get_stage_desc(client);
355 	memset(desc, 0, sizeof(*desc));
356 
357 	desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE |
358 			  GUC_STAGE_DESC_ATTR_KERNEL;
359 	if (is_high_priority(client))
360 		desc->attribute |= GUC_STAGE_DESC_ATTR_PREEMPT;
361 	desc->stage_id = client->stage_id;
362 	desc->priority = client->priority;
363 	desc->db_id = client->doorbell_id;
364 
365 	/*
366 	 * The doorbell, process descriptor, and workqueue are all parts
367 	 * of the client object, which the GuC will reference via the GGTT
368 	 */
369 	gfx_addr = intel_guc_ggtt_offset(guc, client->vma);
370 	desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
371 				client->doorbell_offset;
372 	desc->db_trigger_cpu = ptr_to_u64(__get_doorbell(client));
373 	desc->db_trigger_uk = gfx_addr + client->doorbell_offset;
374 	desc->process_desc = gfx_addr + client->proc_desc_offset;
375 	desc->wq_addr = gfx_addr + GUC_DB_SIZE;
376 	desc->wq_size = GUC_WQ_SIZE;
377 
378 	desc->desc_private = ptr_to_u64(client);
379 }
380 
guc_stage_desc_fini(struct intel_guc_client * client)381 static void guc_stage_desc_fini(struct intel_guc_client *client)
382 {
383 	struct guc_stage_desc *desc;
384 
385 	desc = __get_stage_desc(client);
386 	memset(desc, 0, sizeof(*desc));
387 }
388 
389 /* Construct a Work Item and append it to the GuC's Work Queue */
guc_wq_item_append(struct intel_guc_client * client,u32 target_engine,u32 context_desc,u32 ring_tail,u32 fence_id)390 static void guc_wq_item_append(struct intel_guc_client *client,
391 			       u32 target_engine, u32 context_desc,
392 			       u32 ring_tail, u32 fence_id)
393 {
394 	/* wqi_len is in DWords, and does not include the one-word header */
395 	const size_t wqi_size = sizeof(struct guc_wq_item);
396 	const u32 wqi_len = wqi_size / sizeof(u32) - 1;
397 	struct guc_process_desc *desc = __get_process_desc(client);
398 	struct guc_wq_item *wqi;
399 	u32 wq_off;
400 
401 	lockdep_assert_held(&client->wq_lock);
402 
403 	/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
404 	 * should not have the case where structure wqi is across page, neither
405 	 * wrapped to the beginning. This simplifies the implementation below.
406 	 *
407 	 * XXX: if not the case, we need save data to a temp wqi and copy it to
408 	 * workqueue buffer dw by dw.
409 	 */
410 	BUILD_BUG_ON(wqi_size != 16);
411 
412 	/* We expect the WQ to be active if we're appending items to it */
413 	GEM_BUG_ON(desc->wq_status != WQ_STATUS_ACTIVE);
414 
415 	/* Free space is guaranteed. */
416 	wq_off = READ_ONCE(desc->tail);
417 	GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head),
418 			      GUC_WQ_SIZE) < wqi_size);
419 	GEM_BUG_ON(wq_off & (wqi_size - 1));
420 
421 	/* WQ starts from the page after doorbell / process_desc */
422 	wqi = client->vaddr + wq_off + GUC_DB_SIZE;
423 
424 	if (I915_SELFTEST_ONLY(client->use_nop_wqi)) {
425 		wqi->header = WQ_TYPE_NOOP | (wqi_len << WQ_LEN_SHIFT);
426 	} else {
427 		/* Now fill in the 4-word work queue item */
428 		wqi->header = WQ_TYPE_INORDER |
429 			      (wqi_len << WQ_LEN_SHIFT) |
430 			      (target_engine << WQ_TARGET_SHIFT) |
431 			      WQ_NO_WCFLUSH_WAIT;
432 		wqi->context_desc = context_desc;
433 		wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
434 		GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
435 		wqi->fence_id = fence_id;
436 	}
437 
438 	/* Make the update visible to GuC */
439 	WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
440 }
441 
guc_ring_doorbell(struct intel_guc_client * client)442 static void guc_ring_doorbell(struct intel_guc_client *client)
443 {
444 	struct guc_doorbell_info *db;
445 	u32 cookie;
446 
447 	lockdep_assert_held(&client->wq_lock);
448 
449 	/* pointer of current doorbell cacheline */
450 	db = __get_doorbell(client);
451 
452 	/*
453 	 * We're not expecting the doorbell cookie to change behind our back,
454 	 * we also need to treat 0 as a reserved value.
455 	 */
456 	cookie = READ_ONCE(db->cookie);
457 	WARN_ON_ONCE(xchg(&db->cookie, cookie + 1 ?: cookie + 2) != cookie);
458 
459 	/* XXX: doorbell was lost and need to acquire it again */
460 	GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED);
461 }
462 
guc_add_request(struct intel_guc * guc,struct i915_request * rq)463 static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
464 {
465 	struct intel_guc_client *client = guc->execbuf_client;
466 	struct intel_engine_cs *engine = rq->engine;
467 	u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc);
468 	u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
469 
470 	guc_wq_item_append(client, engine->guc_id, ctx_desc,
471 			   ring_tail, rq->fence.seqno);
472 	guc_ring_doorbell(client);
473 }
474 
475 /*
476  * When we're doing submissions using regular execlists backend, writing to
477  * ELSP from CPU side is enough to make sure that writes to ringbuffer pages
478  * pinned in mappable aperture portion of GGTT are visible to command streamer.
479  * Writes done by GuC on our behalf are not guaranteeing such ordering,
480  * therefore, to ensure the flush, we're issuing a POSTING READ.
481  */
flush_ggtt_writes(struct i915_vma * vma)482 static void flush_ggtt_writes(struct i915_vma *vma)
483 {
484 	struct drm_i915_private *i915 = vma->vm->i915;
485 
486 	if (i915_vma_is_map_and_fenceable(vma))
487 		intel_uncore_posting_read_fw(&i915->uncore, GUC_STATUS);
488 }
489 
guc_submit(struct intel_engine_cs * engine,struct i915_request ** out,struct i915_request ** end)490 static void guc_submit(struct intel_engine_cs *engine,
491 		       struct i915_request **out,
492 		       struct i915_request **end)
493 {
494 	struct intel_guc *guc = &engine->gt->uc.guc;
495 	struct intel_guc_client *client = guc->execbuf_client;
496 
497 	spin_lock(&client->wq_lock);
498 
499 	do {
500 		struct i915_request *rq = *out++;
501 
502 		flush_ggtt_writes(rq->ring->vma);
503 		guc_add_request(guc, rq);
504 	} while (out != end);
505 
506 	spin_unlock(&client->wq_lock);
507 }
508 
rq_prio(const struct i915_request * rq)509 static inline int rq_prio(const struct i915_request *rq)
510 {
511 	return rq->sched.attr.priority | __NO_PREEMPTION;
512 }
513 
schedule_in(struct i915_request * rq,int idx)514 static struct i915_request *schedule_in(struct i915_request *rq, int idx)
515 {
516 	trace_i915_request_in(rq, idx);
517 
518 	/*
519 	 * Currently we are not tracking the rq->context being inflight
520 	 * (ce->inflight = rq->engine). It is only used by the execlists
521 	 * backend at the moment, a similar counting strategy would be
522 	 * required if we generalise the inflight tracking.
523 	 */
524 
525 	intel_gt_pm_get(rq->engine->gt);
526 	return i915_request_get(rq);
527 }
528 
schedule_out(struct i915_request * rq)529 static void schedule_out(struct i915_request *rq)
530 {
531 	trace_i915_request_out(rq);
532 
533 	intel_gt_pm_put(rq->engine->gt);
534 	i915_request_put(rq);
535 }
536 
__guc_dequeue(struct intel_engine_cs * engine)537 static void __guc_dequeue(struct intel_engine_cs *engine)
538 {
539 	struct intel_engine_execlists * const execlists = &engine->execlists;
540 	struct i915_request **first = execlists->inflight;
541 	struct i915_request ** const last_port = first + execlists->port_mask;
542 	struct i915_request *last = first[0];
543 	struct i915_request **port;
544 	bool submit = false;
545 	struct rb_node *rb;
546 
547 	lockdep_assert_held(&engine->active.lock);
548 
549 	if (last) {
550 		if (*++first)
551 			return;
552 
553 		last = NULL;
554 	}
555 
556 	/*
557 	 * We write directly into the execlists->inflight queue and don't use
558 	 * the execlists->pending queue, as we don't have a distinct switch
559 	 * event.
560 	 */
561 	port = first;
562 	while ((rb = rb_first_cached(&execlists->queue))) {
563 		struct i915_priolist *p = to_priolist(rb);
564 		struct i915_request *rq, *rn;
565 		int i;
566 
567 		priolist_for_each_request_consume(rq, rn, p, i) {
568 			if (last && rq->hw_context != last->hw_context) {
569 				if (port == last_port)
570 					goto done;
571 
572 				*port = schedule_in(last,
573 						    port - execlists->inflight);
574 				port++;
575 			}
576 
577 			list_del_init(&rq->sched.link);
578 			__i915_request_submit(rq);
579 			submit = true;
580 			last = rq;
581 		}
582 
583 		rb_erase_cached(&p->node, &execlists->queue);
584 		i915_priolist_free(p);
585 	}
586 done:
587 	execlists->queue_priority_hint =
588 		rb ? to_priolist(rb)->priority : INT_MIN;
589 	if (submit) {
590 		*port = schedule_in(last, port - execlists->inflight);
591 		*++port = NULL;
592 		guc_submit(engine, first, port);
593 	}
594 	execlists->active = execlists->inflight;
595 }
596 
guc_submission_tasklet(unsigned long data)597 static void guc_submission_tasklet(unsigned long data)
598 {
599 	struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
600 	struct intel_engine_execlists * const execlists = &engine->execlists;
601 	struct i915_request **port, *rq;
602 	unsigned long flags;
603 
604 	spin_lock_irqsave(&engine->active.lock, flags);
605 
606 	for (port = execlists->inflight; (rq = *port); port++) {
607 		if (!i915_request_completed(rq))
608 			break;
609 
610 		schedule_out(rq);
611 	}
612 	if (port != execlists->inflight) {
613 		int idx = port - execlists->inflight;
614 		int rem = ARRAY_SIZE(execlists->inflight) - idx;
615 		memmove(execlists->inflight, port, rem * sizeof(*port));
616 	}
617 
618 	__guc_dequeue(engine);
619 
620 	spin_unlock_irqrestore(&engine->active.lock, flags);
621 }
622 
guc_reset_prepare(struct intel_engine_cs * engine)623 static void guc_reset_prepare(struct intel_engine_cs *engine)
624 {
625 	struct intel_engine_execlists * const execlists = &engine->execlists;
626 
627 	GEM_TRACE("%s\n", engine->name);
628 
629 	/*
630 	 * Prevent request submission to the hardware until we have
631 	 * completed the reset in i915_gem_reset_finish(). If a request
632 	 * is completed by one engine, it may then queue a request
633 	 * to a second via its execlists->tasklet *just* as we are
634 	 * calling engine->init_hw() and also writing the ELSP.
635 	 * Turning off the execlists->tasklet until the reset is over
636 	 * prevents the race.
637 	 */
638 	__tasklet_disable_sync_once(&execlists->tasklet);
639 }
640 
641 static void
cancel_port_requests(struct intel_engine_execlists * const execlists)642 cancel_port_requests(struct intel_engine_execlists * const execlists)
643 {
644 	struct i915_request * const *port, *rq;
645 
646 	/* Note we are only using the inflight and not the pending queue */
647 
648 	for (port = execlists->active; (rq = *port); port++)
649 		schedule_out(rq);
650 	execlists->active =
651 		memset(execlists->inflight, 0, sizeof(execlists->inflight));
652 }
653 
guc_reset(struct intel_engine_cs * engine,bool stalled)654 static void guc_reset(struct intel_engine_cs *engine, bool stalled)
655 {
656 	struct intel_engine_execlists * const execlists = &engine->execlists;
657 	struct i915_request *rq;
658 	unsigned long flags;
659 
660 	spin_lock_irqsave(&engine->active.lock, flags);
661 
662 	cancel_port_requests(execlists);
663 
664 	/* Push back any incomplete requests for replay after the reset. */
665 	rq = execlists_unwind_incomplete_requests(execlists);
666 	if (!rq)
667 		goto out_unlock;
668 
669 	if (!i915_request_started(rq))
670 		stalled = false;
671 
672 	__i915_request_reset(rq, stalled);
673 	intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled);
674 
675 out_unlock:
676 	spin_unlock_irqrestore(&engine->active.lock, flags);
677 }
678 
guc_cancel_requests(struct intel_engine_cs * engine)679 static void guc_cancel_requests(struct intel_engine_cs *engine)
680 {
681 	struct intel_engine_execlists * const execlists = &engine->execlists;
682 	struct i915_request *rq, *rn;
683 	struct rb_node *rb;
684 	unsigned long flags;
685 
686 	GEM_TRACE("%s\n", engine->name);
687 
688 	/*
689 	 * Before we call engine->cancel_requests(), we should have exclusive
690 	 * access to the submission state. This is arranged for us by the
691 	 * caller disabling the interrupt generation, the tasklet and other
692 	 * threads that may then access the same state, giving us a free hand
693 	 * to reset state. However, we still need to let lockdep be aware that
694 	 * we know this state may be accessed in hardirq context, so we
695 	 * disable the irq around this manipulation and we want to keep
696 	 * the spinlock focused on its duties and not accidentally conflate
697 	 * coverage to the submission's irq state. (Similarly, although we
698 	 * shouldn't need to disable irq around the manipulation of the
699 	 * submission's irq state, we also wish to remind ourselves that
700 	 * it is irq state.)
701 	 */
702 	spin_lock_irqsave(&engine->active.lock, flags);
703 
704 	/* Cancel the requests on the HW and clear the ELSP tracker. */
705 	cancel_port_requests(execlists);
706 
707 	/* Mark all executing requests as skipped. */
708 	list_for_each_entry(rq, &engine->active.requests, sched.link) {
709 		if (!i915_request_signaled(rq))
710 			dma_fence_set_error(&rq->fence, -EIO);
711 
712 		i915_request_mark_complete(rq);
713 	}
714 
715 	/* Flush the queued requests to the timeline list (for retiring). */
716 	while ((rb = rb_first_cached(&execlists->queue))) {
717 		struct i915_priolist *p = to_priolist(rb);
718 		int i;
719 
720 		priolist_for_each_request_consume(rq, rn, p, i) {
721 			list_del_init(&rq->sched.link);
722 			__i915_request_submit(rq);
723 			dma_fence_set_error(&rq->fence, -EIO);
724 			i915_request_mark_complete(rq);
725 		}
726 
727 		rb_erase_cached(&p->node, &execlists->queue);
728 		i915_priolist_free(p);
729 	}
730 
731 	/* Remaining _unready_ requests will be nop'ed when submitted */
732 
733 	execlists->queue_priority_hint = INT_MIN;
734 	execlists->queue = RB_ROOT_CACHED;
735 
736 	spin_unlock_irqrestore(&engine->active.lock, flags);
737 }
738 
guc_reset_finish(struct intel_engine_cs * engine)739 static void guc_reset_finish(struct intel_engine_cs *engine)
740 {
741 	struct intel_engine_execlists * const execlists = &engine->execlists;
742 
743 	if (__tasklet_enable(&execlists->tasklet))
744 		/* And kick in case we missed a new request submission. */
745 		tasklet_hi_schedule(&execlists->tasklet);
746 
747 	GEM_TRACE("%s: depth->%d\n", engine->name,
748 		  atomic_read(&execlists->tasklet.count));
749 }
750 
751 /*
752  * Everything below here is concerned with setup & teardown, and is
753  * therefore not part of the somewhat time-critical batch-submission
754  * path of guc_submit() above.
755  */
756 
757 /* Check that a doorbell register is in the expected state */
doorbell_ok(struct intel_guc * guc,u16 db_id)758 static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
759 {
760 	bool valid;
761 
762 	GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
763 
764 	valid = __doorbell_valid(guc, db_id);
765 
766 	if (test_bit(db_id, guc->doorbell_bitmap) == valid)
767 		return true;
768 
769 	DRM_DEBUG_DRIVER("Doorbell %u has unexpected state: valid=%s\n",
770 			 db_id, yesno(valid));
771 
772 	return false;
773 }
774 
guc_verify_doorbells(struct intel_guc * guc)775 static bool guc_verify_doorbells(struct intel_guc *guc)
776 {
777 	bool doorbells_ok = true;
778 	u16 db_id;
779 
780 	for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
781 		if (!doorbell_ok(guc, db_id))
782 			doorbells_ok = false;
783 
784 	return doorbells_ok;
785 }
786 
787 /**
788  * guc_client_alloc() - Allocate an intel_guc_client
789  * @guc:	the intel_guc structure
790  * @priority:	four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
791  *		The kernel client to replace ExecList submission is created with
792  *		NORMAL priority. Priority of a client for scheduler can be HIGH,
793  *		while a preemption context can use CRITICAL.
794  *
795  * Return:	An intel_guc_client object if success, else NULL.
796  */
797 static struct intel_guc_client *
guc_client_alloc(struct intel_guc * guc,u32 priority)798 guc_client_alloc(struct intel_guc *guc, u32 priority)
799 {
800 	struct intel_guc_client *client;
801 	struct i915_vma *vma;
802 	void *vaddr;
803 	int ret;
804 
805 	client = kzalloc(sizeof(*client), GFP_KERNEL);
806 	if (!client)
807 		return ERR_PTR(-ENOMEM);
808 
809 	client->guc = guc;
810 	client->priority = priority;
811 	client->doorbell_id = GUC_DOORBELL_INVALID;
812 	spin_lock_init(&client->wq_lock);
813 
814 	ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS,
815 			     GFP_KERNEL);
816 	if (ret < 0)
817 		goto err_client;
818 
819 	client->stage_id = ret;
820 
821 	/* The first page is doorbell/proc_desc. Two followed pages are wq. */
822 	vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
823 	if (IS_ERR(vma)) {
824 		ret = PTR_ERR(vma);
825 		goto err_id;
826 	}
827 
828 	/* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
829 	client->vma = vma;
830 
831 	vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
832 	if (IS_ERR(vaddr)) {
833 		ret = PTR_ERR(vaddr);
834 		goto err_vma;
835 	}
836 	client->vaddr = vaddr;
837 
838 	ret = reserve_doorbell(client);
839 	if (ret)
840 		goto err_vaddr;
841 
842 	client->doorbell_offset = __select_cacheline(guc);
843 
844 	/*
845 	 * Since the doorbell only requires a single cacheline, we can save
846 	 * space by putting the application process descriptor in the same
847 	 * page. Use the half of the page that doesn't include the doorbell.
848 	 */
849 	if (client->doorbell_offset >= (GUC_DB_SIZE / 2))
850 		client->proc_desc_offset = 0;
851 	else
852 		client->proc_desc_offset = (GUC_DB_SIZE / 2);
853 
854 	DRM_DEBUG_DRIVER("new priority %u client %p: stage_id %u\n",
855 			 priority, client, client->stage_id);
856 	DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n",
857 			 client->doorbell_id, client->doorbell_offset);
858 
859 	return client;
860 
861 err_vaddr:
862 	i915_gem_object_unpin_map(client->vma->obj);
863 err_vma:
864 	i915_vma_unpin_and_release(&client->vma, 0);
865 err_id:
866 	ida_simple_remove(&guc->stage_ids, client->stage_id);
867 err_client:
868 	kfree(client);
869 	return ERR_PTR(ret);
870 }
871 
guc_client_free(struct intel_guc_client * client)872 static void guc_client_free(struct intel_guc_client *client)
873 {
874 	unreserve_doorbell(client);
875 	i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP);
876 	ida_simple_remove(&client->guc->stage_ids, client->stage_id);
877 	kfree(client);
878 }
879 
ctx_save_restore_disabled(struct intel_context * ce)880 static inline bool ctx_save_restore_disabled(struct intel_context *ce)
881 {
882 	u32 sr = ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1];
883 
884 #define SR_DISABLED \
885 	_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | \
886 			   CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)
887 
888 	return (sr & SR_DISABLED) == SR_DISABLED;
889 
890 #undef SR_DISABLED
891 }
892 
guc_clients_create(struct intel_guc * guc)893 static int guc_clients_create(struct intel_guc *guc)
894 {
895 	struct intel_guc_client *client;
896 
897 	GEM_BUG_ON(guc->execbuf_client);
898 
899 	client = guc_client_alloc(guc, GUC_CLIENT_PRIORITY_KMD_NORMAL);
900 	if (IS_ERR(client)) {
901 		DRM_ERROR("Failed to create GuC client for submission!\n");
902 		return PTR_ERR(client);
903 	}
904 	guc->execbuf_client = client;
905 
906 	return 0;
907 }
908 
guc_clients_destroy(struct intel_guc * guc)909 static void guc_clients_destroy(struct intel_guc *guc)
910 {
911 	struct intel_guc_client *client;
912 
913 	client = fetch_and_zero(&guc->execbuf_client);
914 	if (client)
915 		guc_client_free(client);
916 }
917 
__guc_client_enable(struct intel_guc_client * client)918 static int __guc_client_enable(struct intel_guc_client *client)
919 {
920 	int ret;
921 
922 	guc_proc_desc_init(client);
923 	guc_stage_desc_init(client);
924 
925 	ret = create_doorbell(client);
926 	if (ret)
927 		goto fail;
928 
929 	return 0;
930 
931 fail:
932 	guc_stage_desc_fini(client);
933 	guc_proc_desc_fini(client);
934 	return ret;
935 }
936 
__guc_client_disable(struct intel_guc_client * client)937 static void __guc_client_disable(struct intel_guc_client *client)
938 {
939 	/*
940 	 * By the time we're here, GuC may have already been reset. if that is
941 	 * the case, instead of trying (in vain) to communicate with it, let's
942 	 * just cleanup the doorbell HW and our internal state.
943 	 */
944 	if (intel_guc_is_running(client->guc))
945 		destroy_doorbell(client);
946 	else
947 		__fini_doorbell(client);
948 
949 	guc_stage_desc_fini(client);
950 	guc_proc_desc_fini(client);
951 }
952 
guc_clients_enable(struct intel_guc * guc)953 static int guc_clients_enable(struct intel_guc *guc)
954 {
955 	return __guc_client_enable(guc->execbuf_client);
956 }
957 
guc_clients_disable(struct intel_guc * guc)958 static void guc_clients_disable(struct intel_guc *guc)
959 {
960 	if (guc->execbuf_client)
961 		__guc_client_disable(guc->execbuf_client);
962 }
963 
964 /*
965  * Set up the memory resources to be shared with the GuC (via the GGTT)
966  * at firmware loading time.
967  */
intel_guc_submission_init(struct intel_guc * guc)968 int intel_guc_submission_init(struct intel_guc *guc)
969 {
970 	int ret;
971 
972 	if (guc->stage_desc_pool)
973 		return 0;
974 
975 	ret = guc_stage_desc_pool_create(guc);
976 	if (ret)
977 		return ret;
978 	/*
979 	 * Keep static analysers happy, let them know that we allocated the
980 	 * vma after testing that it didn't exist earlier.
981 	 */
982 	GEM_BUG_ON(!guc->stage_desc_pool);
983 
984 	WARN_ON(!guc_verify_doorbells(guc));
985 	ret = guc_clients_create(guc);
986 	if (ret)
987 		goto err_pool;
988 
989 	return 0;
990 
991 err_pool:
992 	guc_stage_desc_pool_destroy(guc);
993 	return ret;
994 }
995 
intel_guc_submission_fini(struct intel_guc * guc)996 void intel_guc_submission_fini(struct intel_guc *guc)
997 {
998 	guc_clients_destroy(guc);
999 	WARN_ON(!guc_verify_doorbells(guc));
1000 
1001 	if (guc->stage_desc_pool)
1002 		guc_stage_desc_pool_destroy(guc);
1003 }
1004 
guc_interrupts_capture(struct intel_gt * gt)1005 static void guc_interrupts_capture(struct intel_gt *gt)
1006 {
1007 	struct intel_rps *rps = &gt->i915->gt_pm.rps;
1008 	struct intel_uncore *uncore = gt->uncore;
1009 	struct intel_engine_cs *engine;
1010 	enum intel_engine_id id;
1011 	int irqs;
1012 
1013 	/* tell all command streamers to forward interrupts (but not vblank)
1014 	 * to GuC
1015 	 */
1016 	irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
1017 	for_each_engine(engine, gt->i915, id)
1018 		ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
1019 
1020 	/* route USER_INTERRUPT to Host, all others are sent to GuC. */
1021 	irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
1022 	       GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
1023 	/* These three registers have the same bit definitions */
1024 	intel_uncore_write(uncore, GUC_BCS_RCS_IER, ~irqs);
1025 	intel_uncore_write(uncore, GUC_VCS2_VCS1_IER, ~irqs);
1026 	intel_uncore_write(uncore, GUC_WD_VECS_IER, ~irqs);
1027 
1028 	/*
1029 	 * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all
1030 	 * (unmasked) PM interrupts to the GuC. All other bits of this
1031 	 * register *disable* generation of a specific interrupt.
1032 	 *
1033 	 * 'pm_intrmsk_mbz' indicates bits that are NOT to be set when
1034 	 * writing to the PM interrupt mask register, i.e. interrupts
1035 	 * that must not be disabled.
1036 	 *
1037 	 * If the GuC is handling these interrupts, then we must not let
1038 	 * the PM code disable ANY interrupt that the GuC is expecting.
1039 	 * So for each ENABLED (0) bit in this register, we must SET the
1040 	 * bit in pm_intrmsk_mbz so that it's left enabled for the GuC.
1041 	 * GuC needs ARAT expired interrupt unmasked hence it is set in
1042 	 * pm_intrmsk_mbz.
1043 	 *
1044 	 * Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will
1045 	 * result in the register bit being left SET!
1046 	 */
1047 	rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
1048 	rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
1049 }
1050 
guc_interrupts_release(struct intel_gt * gt)1051 static void guc_interrupts_release(struct intel_gt *gt)
1052 {
1053 	struct intel_rps *rps = &gt->i915->gt_pm.rps;
1054 	struct intel_uncore *uncore = gt->uncore;
1055 	struct intel_engine_cs *engine;
1056 	enum intel_engine_id id;
1057 	int irqs;
1058 
1059 	/*
1060 	 * tell all command streamers NOT to forward interrupts or vblank
1061 	 * to GuC.
1062 	 */
1063 	irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
1064 	irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
1065 	for_each_engine(engine, gt->i915, id)
1066 		ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
1067 
1068 	/* route all GT interrupts to the host */
1069 	intel_uncore_write(uncore, GUC_BCS_RCS_IER, 0);
1070 	intel_uncore_write(uncore, GUC_VCS2_VCS1_IER, 0);
1071 	intel_uncore_write(uncore, GUC_WD_VECS_IER, 0);
1072 
1073 	rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
1074 	rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
1075 }
1076 
guc_set_default_submission(struct intel_engine_cs * engine)1077 static void guc_set_default_submission(struct intel_engine_cs *engine)
1078 {
1079 	/*
1080 	 * We inherit a bunch of functions from execlists that we'd like
1081 	 * to keep using:
1082 	 *
1083 	 *    engine->submit_request = execlists_submit_request;
1084 	 *    engine->cancel_requests = execlists_cancel_requests;
1085 	 *    engine->schedule = execlists_schedule;
1086 	 *
1087 	 * But we need to override the actual submission backend in order
1088 	 * to talk to the GuC.
1089 	 */
1090 	intel_execlists_set_default_submission(engine);
1091 
1092 	engine->execlists.tasklet.func = guc_submission_tasklet;
1093 
1094 	/* do not use execlists park/unpark */
1095 	engine->park = engine->unpark = NULL;
1096 
1097 	engine->reset.prepare = guc_reset_prepare;
1098 	engine->reset.reset = guc_reset;
1099 	engine->reset.finish = guc_reset_finish;
1100 
1101 	engine->cancel_requests = guc_cancel_requests;
1102 
1103 	engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
1104 	engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
1105 
1106 	/*
1107 	 * For the breadcrumb irq to work we need the interrupts to stay
1108 	 * enabled. However, on all platforms on which we'll have support for
1109 	 * GuC submission we don't allow disabling the interrupts at runtime, so
1110 	 * we're always safe with the current flow.
1111 	 */
1112 	GEM_BUG_ON(engine->irq_enable || engine->irq_disable);
1113 }
1114 
intel_guc_submission_enable(struct intel_guc * guc)1115 int intel_guc_submission_enable(struct intel_guc *guc)
1116 {
1117 	struct intel_gt *gt = guc_to_gt(guc);
1118 	struct intel_engine_cs *engine;
1119 	enum intel_engine_id id;
1120 	int err;
1121 
1122 	err = i915_inject_load_error(gt->i915, -ENXIO);
1123 	if (err)
1124 		return err;
1125 
1126 	/*
1127 	 * We're using GuC work items for submitting work through GuC. Since
1128 	 * we're coalescing multiple requests from a single context into a
1129 	 * single work item prior to assigning it to execlist_port, we can
1130 	 * never have more work items than the total number of ports (for all
1131 	 * engines). The GuC firmware is controlling the HEAD of work queue,
1132 	 * and it is guaranteed that it will remove the work item from the
1133 	 * queue before our request is completed.
1134 	 */
1135 	BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.inflight) *
1136 		     sizeof(struct guc_wq_item) *
1137 		     I915_NUM_ENGINES > GUC_WQ_SIZE);
1138 
1139 	GEM_BUG_ON(!guc->execbuf_client);
1140 
1141 	err = guc_clients_enable(guc);
1142 	if (err)
1143 		return err;
1144 
1145 	/* Take over from manual control of ELSP (execlists) */
1146 	guc_interrupts_capture(gt);
1147 
1148 	for_each_engine(engine, gt->i915, id) {
1149 		engine->set_default_submission = guc_set_default_submission;
1150 		engine->set_default_submission(engine);
1151 	}
1152 
1153 	return 0;
1154 }
1155 
intel_guc_submission_disable(struct intel_guc * guc)1156 void intel_guc_submission_disable(struct intel_guc *guc)
1157 {
1158 	struct intel_gt *gt = guc_to_gt(guc);
1159 
1160 	GEM_BUG_ON(gt->awake); /* GT should be parked first */
1161 
1162 	guc_interrupts_release(gt);
1163 	guc_clients_disable(guc);
1164 }
1165 
__guc_submission_support(struct intel_guc * guc)1166 static bool __guc_submission_support(struct intel_guc *guc)
1167 {
1168 	/* XXX: GuC submission is unavailable for now */
1169 	return false;
1170 
1171 	if (!intel_guc_is_supported(guc))
1172 		return false;
1173 
1174 	return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION;
1175 }
1176 
intel_guc_submission_init_early(struct intel_guc * guc)1177 void intel_guc_submission_init_early(struct intel_guc *guc)
1178 {
1179 	guc->submission_supported = __guc_submission_support(guc);
1180 }
1181 
1182 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1183 #include "selftest_guc.c"
1184 #endif
1185