1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2019 Intel Corporation
4 */
5
6 #include <drm/drm_managed.h>
7 #include <drm/intel-gtt.h>
8
9 #include "gem/i915_gem_internal.h"
10 #include "gem/i915_gem_lmem.h"
11
12 #include "i915_drv.h"
13 #include "i915_perf_oa_regs.h"
14 #include "i915_reg.h"
15 #include "intel_context.h"
16 #include "intel_engine_pm.h"
17 #include "intel_engine_regs.h"
18 #include "intel_ggtt_gmch.h"
19 #include "intel_gt.h"
20 #include "intel_gt_buffer_pool.h"
21 #include "intel_gt_clock_utils.h"
22 #include "intel_gt_debugfs.h"
23 #include "intel_gt_mcr.h"
24 #include "intel_gt_pm.h"
25 #include "intel_gt_print.h"
26 #include "intel_gt_regs.h"
27 #include "intel_gt_requests.h"
28 #include "intel_migrate.h"
29 #include "intel_mocs.h"
30 #include "intel_pci_config.h"
31 #include "intel_rc6.h"
32 #include "intel_renderstate.h"
33 #include "intel_rps.h"
34 #include "intel_sa_media.h"
35 #include "intel_gt_sysfs.h"
36 #include "intel_tlb.h"
37 #include "intel_uncore.h"
38 #include "shmem_utils.h"
39
intel_gt_common_init_early(struct intel_gt * gt)40 void intel_gt_common_init_early(struct intel_gt *gt)
41 {
42 spin_lock_init(gt->irq_lock);
43
44 INIT_LIST_HEAD(>->closed_vma);
45 spin_lock_init(>->closed_lock);
46
47 init_llist_head(>->watchdog.list);
48 INIT_WORK(>->watchdog.work, intel_gt_watchdog_work);
49
50 intel_gt_init_buffer_pool(gt);
51 intel_gt_init_reset(gt);
52 intel_gt_init_requests(gt);
53 intel_gt_init_timelines(gt);
54 intel_gt_init_tlb(gt);
55 intel_gt_pm_init_early(gt);
56
57 intel_wopcm_init_early(>->wopcm);
58 intel_uc_init_early(>->uc);
59 intel_rps_init_early(>->rps);
60 }
61
62 /* Preliminary initialization of Tile 0 */
intel_root_gt_init_early(struct drm_i915_private * i915)63 int intel_root_gt_init_early(struct drm_i915_private *i915)
64 {
65 struct intel_gt *gt = to_gt(i915);
66
67 gt->i915 = i915;
68 gt->uncore = &i915->uncore;
69 gt->irq_lock = drmm_kzalloc(&i915->drm, sizeof(*gt->irq_lock), GFP_KERNEL);
70 if (!gt->irq_lock)
71 return -ENOMEM;
72
73 intel_gt_common_init_early(gt);
74
75 return 0;
76 }
77
intel_gt_probe_lmem(struct intel_gt * gt)78 static int intel_gt_probe_lmem(struct intel_gt *gt)
79 {
80 struct drm_i915_private *i915 = gt->i915;
81 unsigned int instance = gt->info.id;
82 int id = INTEL_REGION_LMEM_0 + instance;
83 struct intel_memory_region *mem;
84 int err;
85
86 mem = intel_gt_setup_lmem(gt);
87 if (IS_ERR(mem)) {
88 err = PTR_ERR(mem);
89 if (err == -ENODEV)
90 return 0;
91
92 gt_err(gt, "Failed to setup region(%d) type=%d\n",
93 err, INTEL_MEMORY_LOCAL);
94 return err;
95 }
96
97 mem->id = id;
98 mem->instance = instance;
99
100 intel_memory_region_set_name(mem, "local%u", mem->instance);
101
102 GEM_BUG_ON(!HAS_REGION(i915, id));
103 GEM_BUG_ON(i915->mm.regions[id]);
104 i915->mm.regions[id] = mem;
105
106 return 0;
107 }
108
intel_gt_assign_ggtt(struct intel_gt * gt)109 int intel_gt_assign_ggtt(struct intel_gt *gt)
110 {
111 /* Media GT shares primary GT's GGTT */
112 if (gt->type == GT_MEDIA) {
113 gt->ggtt = to_gt(gt->i915)->ggtt;
114 } else {
115 gt->ggtt = i915_ggtt_create(gt->i915);
116 if (IS_ERR(gt->ggtt))
117 return PTR_ERR(gt->ggtt);
118 }
119
120 list_add_tail(>->ggtt_link, >->ggtt->gt_list);
121
122 return 0;
123 }
124
intel_gt_init_mmio(struct intel_gt * gt)125 int intel_gt_init_mmio(struct intel_gt *gt)
126 {
127 intel_gt_init_clock_frequency(gt);
128
129 intel_uc_init_mmio(>->uc);
130 intel_sseu_info_init(gt);
131 intel_gt_mcr_init(gt);
132
133 return intel_engines_init_mmio(gt);
134 }
135
init_unused_ring(struct intel_gt * gt,u32 base)136 static void init_unused_ring(struct intel_gt *gt, u32 base)
137 {
138 struct intel_uncore *uncore = gt->uncore;
139
140 intel_uncore_write(uncore, RING_CTL(base), 0);
141 intel_uncore_write(uncore, RING_HEAD(base), 0);
142 intel_uncore_write(uncore, RING_TAIL(base), 0);
143 intel_uncore_write(uncore, RING_START(base), 0);
144 }
145
init_unused_rings(struct intel_gt * gt)146 static void init_unused_rings(struct intel_gt *gt)
147 {
148 struct drm_i915_private *i915 = gt->i915;
149
150 if (IS_I830(i915)) {
151 init_unused_ring(gt, PRB1_BASE);
152 init_unused_ring(gt, SRB0_BASE);
153 init_unused_ring(gt, SRB1_BASE);
154 init_unused_ring(gt, SRB2_BASE);
155 init_unused_ring(gt, SRB3_BASE);
156 } else if (GRAPHICS_VER(i915) == 2) {
157 init_unused_ring(gt, SRB0_BASE);
158 init_unused_ring(gt, SRB1_BASE);
159 } else if (GRAPHICS_VER(i915) == 3) {
160 init_unused_ring(gt, PRB1_BASE);
161 init_unused_ring(gt, PRB2_BASE);
162 }
163 }
164
intel_gt_init_hw(struct intel_gt * gt)165 int intel_gt_init_hw(struct intel_gt *gt)
166 {
167 struct drm_i915_private *i915 = gt->i915;
168 struct intel_uncore *uncore = gt->uncore;
169 int ret;
170
171 gt->last_init_time = ktime_get();
172
173 /* Double layer security blanket, see i915_gem_init() */
174 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
175
176 if (HAS_EDRAM(i915) && GRAPHICS_VER(i915) < 9)
177 intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
178
179 if (IS_HASWELL(i915))
180 intel_uncore_write(uncore,
181 HSW_MI_PREDICATE_RESULT_2,
182 IS_HASWELL_GT3(i915) ?
183 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
184
185 /* Apply the GT workarounds... */
186 intel_gt_apply_workarounds(gt);
187 /* ...and determine whether they are sticking. */
188 intel_gt_verify_workarounds(gt, "init");
189
190 intel_gt_init_swizzling(gt);
191
192 /*
193 * At least 830 can leave some of the unused rings
194 * "active" (ie. head != tail) after resume which
195 * will prevent c3 entry. Makes sure all unused rings
196 * are totally idle.
197 */
198 init_unused_rings(gt);
199
200 ret = i915_ppgtt_init_hw(gt);
201 if (ret) {
202 gt_err(gt, "Enabling PPGTT failed (%d)\n", ret);
203 goto out;
204 }
205
206 /* We can't enable contexts until all firmware is loaded */
207 ret = intel_uc_init_hw(>->uc);
208 if (ret) {
209 gt_probe_error(gt, "Enabling uc failed (%d)\n", ret);
210 goto out;
211 }
212
213 intel_mocs_init(gt);
214
215 out:
216 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
217 return ret;
218 }
219
gen6_clear_engine_error_register(struct intel_engine_cs * engine)220 static void gen6_clear_engine_error_register(struct intel_engine_cs *engine)
221 {
222 GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
223 GEN6_RING_FAULT_REG_POSTING_READ(engine);
224 }
225
intel_gt_perf_limit_reasons_reg(struct intel_gt * gt)226 i915_reg_t intel_gt_perf_limit_reasons_reg(struct intel_gt *gt)
227 {
228 /* GT0_PERF_LIMIT_REASONS is available only for Gen11+ */
229 if (GRAPHICS_VER(gt->i915) < 11)
230 return INVALID_MMIO_REG;
231
232 return gt->type == GT_MEDIA ?
233 MTL_MEDIA_PERF_LIMIT_REASONS : GT0_PERF_LIMIT_REASONS;
234 }
235
236 void
intel_gt_clear_error_registers(struct intel_gt * gt,intel_engine_mask_t engine_mask)237 intel_gt_clear_error_registers(struct intel_gt *gt,
238 intel_engine_mask_t engine_mask)
239 {
240 struct drm_i915_private *i915 = gt->i915;
241 struct intel_uncore *uncore = gt->uncore;
242 u32 eir;
243
244 if (GRAPHICS_VER(i915) != 2)
245 intel_uncore_write(uncore, PGTBL_ER, 0);
246
247 if (GRAPHICS_VER(i915) < 4)
248 intel_uncore_write(uncore, IPEIR(RENDER_RING_BASE), 0);
249 else
250 intel_uncore_write(uncore, IPEIR_I965, 0);
251
252 intel_uncore_write(uncore, EIR, 0);
253 eir = intel_uncore_read(uncore, EIR);
254 if (eir) {
255 /*
256 * some errors might have become stuck,
257 * mask them.
258 */
259 gt_dbg(gt, "EIR stuck: 0x%08x, masking\n", eir);
260 intel_uncore_rmw(uncore, EMR, 0, eir);
261 intel_uncore_write(uncore, GEN2_IIR,
262 I915_MASTER_ERROR_INTERRUPT);
263 }
264
265 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
266 intel_gt_mcr_multicast_rmw(gt, XEHP_RING_FAULT_REG,
267 RING_FAULT_VALID, 0);
268 intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG);
269 } else if (GRAPHICS_VER(i915) >= 12) {
270 intel_uncore_rmw(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID, 0);
271 intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
272 } else if (GRAPHICS_VER(i915) >= 8) {
273 intel_uncore_rmw(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID, 0);
274 intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
275 } else if (GRAPHICS_VER(i915) >= 6) {
276 struct intel_engine_cs *engine;
277 enum intel_engine_id id;
278
279 for_each_engine_masked(engine, gt, engine_mask, id)
280 gen6_clear_engine_error_register(engine);
281 }
282 }
283
gen6_check_faults(struct intel_gt * gt)284 static void gen6_check_faults(struct intel_gt *gt)
285 {
286 struct intel_engine_cs *engine;
287 enum intel_engine_id id;
288 u32 fault;
289
290 for_each_engine(engine, gt, id) {
291 fault = GEN6_RING_FAULT_REG_READ(engine);
292 if (fault & RING_FAULT_VALID) {
293 gt_dbg(gt, "Unexpected fault\n"
294 "\tAddr: 0x%08lx\n"
295 "\tAddress space: %s\n"
296 "\tSource ID: %d\n"
297 "\tType: %d\n",
298 fault & PAGE_MASK,
299 fault & RING_FAULT_GTTSEL_MASK ?
300 "GGTT" : "PPGTT",
301 RING_FAULT_SRCID(fault),
302 RING_FAULT_FAULT_TYPE(fault));
303 }
304 }
305 }
306
xehp_check_faults(struct intel_gt * gt)307 static void xehp_check_faults(struct intel_gt *gt)
308 {
309 u32 fault;
310
311 /*
312 * Although the fault register now lives in an MCR register range,
313 * the GAM registers are special and we only truly need to read
314 * the "primary" GAM instance rather than handling each instance
315 * individually. intel_gt_mcr_read_any() will automatically steer
316 * toward the primary instance.
317 */
318 fault = intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG);
319 if (fault & RING_FAULT_VALID) {
320 u32 fault_data0, fault_data1;
321 u64 fault_addr;
322
323 fault_data0 = intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA0);
324 fault_data1 = intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA1);
325
326 fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
327 ((u64)fault_data0 << 12);
328
329 gt_dbg(gt, "Unexpected fault\n"
330 "\tAddr: 0x%08x_%08x\n"
331 "\tAddress space: %s\n"
332 "\tEngine ID: %d\n"
333 "\tSource ID: %d\n"
334 "\tType: %d\n",
335 upper_32_bits(fault_addr), lower_32_bits(fault_addr),
336 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
337 GEN8_RING_FAULT_ENGINE_ID(fault),
338 RING_FAULT_SRCID(fault),
339 RING_FAULT_FAULT_TYPE(fault));
340 }
341 }
342
gen8_check_faults(struct intel_gt * gt)343 static void gen8_check_faults(struct intel_gt *gt)
344 {
345 struct intel_uncore *uncore = gt->uncore;
346 i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
347 u32 fault;
348
349 if (GRAPHICS_VER(gt->i915) >= 12) {
350 fault_reg = GEN12_RING_FAULT_REG;
351 fault_data0_reg = GEN12_FAULT_TLB_DATA0;
352 fault_data1_reg = GEN12_FAULT_TLB_DATA1;
353 } else {
354 fault_reg = GEN8_RING_FAULT_REG;
355 fault_data0_reg = GEN8_FAULT_TLB_DATA0;
356 fault_data1_reg = GEN8_FAULT_TLB_DATA1;
357 }
358
359 fault = intel_uncore_read(uncore, fault_reg);
360 if (fault & RING_FAULT_VALID) {
361 u32 fault_data0, fault_data1;
362 u64 fault_addr;
363
364 fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
365 fault_data1 = intel_uncore_read(uncore, fault_data1_reg);
366
367 fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
368 ((u64)fault_data0 << 12);
369
370 gt_dbg(gt, "Unexpected fault\n"
371 "\tAddr: 0x%08x_%08x\n"
372 "\tAddress space: %s\n"
373 "\tEngine ID: %d\n"
374 "\tSource ID: %d\n"
375 "\tType: %d\n",
376 upper_32_bits(fault_addr), lower_32_bits(fault_addr),
377 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
378 GEN8_RING_FAULT_ENGINE_ID(fault),
379 RING_FAULT_SRCID(fault),
380 RING_FAULT_FAULT_TYPE(fault));
381 }
382 }
383
intel_gt_check_and_clear_faults(struct intel_gt * gt)384 void intel_gt_check_and_clear_faults(struct intel_gt *gt)
385 {
386 struct drm_i915_private *i915 = gt->i915;
387
388 /* From GEN8 onwards we only have one 'All Engine Fault Register' */
389 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
390 xehp_check_faults(gt);
391 else if (GRAPHICS_VER(i915) >= 8)
392 gen8_check_faults(gt);
393 else if (GRAPHICS_VER(i915) >= 6)
394 gen6_check_faults(gt);
395 else
396 return;
397
398 intel_gt_clear_error_registers(gt, ALL_ENGINES);
399 }
400
intel_gt_flush_ggtt_writes(struct intel_gt * gt)401 void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
402 {
403 struct intel_uncore *uncore = gt->uncore;
404 intel_wakeref_t wakeref;
405
406 /*
407 * No actual flushing is required for the GTT write domain for reads
408 * from the GTT domain. Writes to it "immediately" go to main memory
409 * as far as we know, so there's no chipset flush. It also doesn't
410 * land in the GPU render cache.
411 *
412 * However, we do have to enforce the order so that all writes through
413 * the GTT land before any writes to the device, such as updates to
414 * the GATT itself.
415 *
416 * We also have to wait a bit for the writes to land from the GTT.
417 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
418 * timing. This issue has only been observed when switching quickly
419 * between GTT writes and CPU reads from inside the kernel on recent hw,
420 * and it appears to only affect discrete GTT blocks (i.e. on LLC
421 * system agents we cannot reproduce this behaviour, until Cannonlake
422 * that was!).
423 */
424
425 wmb();
426
427 if (INTEL_INFO(gt->i915)->has_coherent_ggtt)
428 return;
429
430 intel_gt_chipset_flush(gt);
431
432 with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) {
433 unsigned long flags;
434
435 spin_lock_irqsave(&uncore->lock, flags);
436 intel_uncore_posting_read_fw(uncore,
437 RING_HEAD(RENDER_RING_BASE));
438 spin_unlock_irqrestore(&uncore->lock, flags);
439 }
440 }
441
intel_gt_chipset_flush(struct intel_gt * gt)442 void intel_gt_chipset_flush(struct intel_gt *gt)
443 {
444 wmb();
445 if (GRAPHICS_VER(gt->i915) < 6)
446 intel_ggtt_gmch_flush();
447 }
448
intel_gt_driver_register(struct intel_gt * gt)449 void intel_gt_driver_register(struct intel_gt *gt)
450 {
451 intel_gsc_init(>->gsc, gt->i915);
452
453 intel_rps_driver_register(>->rps);
454
455 intel_gt_debugfs_register(gt);
456 intel_gt_sysfs_register(gt);
457 }
458
intel_gt_init_scratch(struct intel_gt * gt,unsigned int size)459 static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
460 {
461 struct drm_i915_private *i915 = gt->i915;
462 struct drm_i915_gem_object *obj;
463 struct i915_vma *vma;
464 int ret;
465
466 obj = i915_gem_object_create_lmem(i915, size,
467 I915_BO_ALLOC_VOLATILE |
468 I915_BO_ALLOC_GPU_ONLY);
469 if (IS_ERR(obj) && !IS_METEORLAKE(i915)) /* Wa_22018444074 */
470 obj = i915_gem_object_create_stolen(i915, size);
471 if (IS_ERR(obj))
472 obj = i915_gem_object_create_internal(i915, size);
473 if (IS_ERR(obj)) {
474 gt_err(gt, "Failed to allocate scratch page\n");
475 return PTR_ERR(obj);
476 }
477
478 vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
479 if (IS_ERR(vma)) {
480 ret = PTR_ERR(vma);
481 goto err_unref;
482 }
483
484 ret = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
485 if (ret)
486 goto err_unref;
487
488 gt->scratch = i915_vma_make_unshrinkable(vma);
489
490 return 0;
491
492 err_unref:
493 i915_gem_object_put(obj);
494 return ret;
495 }
496
intel_gt_fini_scratch(struct intel_gt * gt)497 static void intel_gt_fini_scratch(struct intel_gt *gt)
498 {
499 i915_vma_unpin_and_release(>->scratch, 0);
500 }
501
kernel_vm(struct intel_gt * gt)502 static struct i915_address_space *kernel_vm(struct intel_gt *gt)
503 {
504 if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
505 return &i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY)->vm;
506 else
507 return i915_vm_get(>->ggtt->vm);
508 }
509
__engines_record_defaults(struct intel_gt * gt)510 static int __engines_record_defaults(struct intel_gt *gt)
511 {
512 struct i915_request *requests[I915_NUM_ENGINES] = {};
513 struct intel_engine_cs *engine;
514 enum intel_engine_id id;
515 int err = 0;
516
517 /*
518 * As we reset the gpu during very early sanitisation, the current
519 * register state on the GPU should reflect its defaults values.
520 * We load a context onto the hw (with restore-inhibit), then switch
521 * over to a second context to save that default register state. We
522 * can then prime every new context with that state so they all start
523 * from the same default HW values.
524 */
525
526 for_each_engine(engine, gt, id) {
527 struct intel_renderstate so;
528 struct intel_context *ce;
529 struct i915_request *rq;
530
531 /* We must be able to switch to something! */
532 GEM_BUG_ON(!engine->kernel_context);
533
534 ce = intel_context_create(engine);
535 if (IS_ERR(ce)) {
536 err = PTR_ERR(ce);
537 goto out;
538 }
539
540 err = intel_renderstate_init(&so, ce);
541 if (err)
542 goto err;
543
544 rq = i915_request_create(ce);
545 if (IS_ERR(rq)) {
546 err = PTR_ERR(rq);
547 goto err_fini;
548 }
549
550 err = intel_engine_emit_ctx_wa(rq);
551 if (err)
552 goto err_rq;
553
554 err = intel_renderstate_emit(&so, rq);
555 if (err)
556 goto err_rq;
557
558 err_rq:
559 requests[id] = i915_request_get(rq);
560 i915_request_add(rq);
561 err_fini:
562 intel_renderstate_fini(&so, ce);
563 err:
564 if (err) {
565 intel_context_put(ce);
566 goto out;
567 }
568 }
569
570 /* Flush the default context image to memory, and enable powersaving. */
571 if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
572 err = -EIO;
573 goto out;
574 }
575
576 for (id = 0; id < ARRAY_SIZE(requests); id++) {
577 struct i915_request *rq;
578 struct file *state;
579
580 rq = requests[id];
581 if (!rq)
582 continue;
583
584 if (rq->fence.error) {
585 err = -EIO;
586 goto out;
587 }
588
589 GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
590 if (!rq->context->state)
591 continue;
592
593 /* Keep a copy of the state's backing pages; free the obj */
594 state = shmem_create_from_object(rq->context->state->obj);
595 if (IS_ERR(state)) {
596 err = PTR_ERR(state);
597 goto out;
598 }
599 rq->engine->default_state = state;
600 }
601
602 out:
603 /*
604 * If we have to abandon now, we expect the engines to be idle
605 * and ready to be torn-down. The quickest way we can accomplish
606 * this is by declaring ourselves wedged.
607 */
608 if (err)
609 intel_gt_set_wedged(gt);
610
611 for (id = 0; id < ARRAY_SIZE(requests); id++) {
612 struct intel_context *ce;
613 struct i915_request *rq;
614
615 rq = requests[id];
616 if (!rq)
617 continue;
618
619 ce = rq->context;
620 i915_request_put(rq);
621 intel_context_put(ce);
622 }
623 return err;
624 }
625
__engines_verify_workarounds(struct intel_gt * gt)626 static int __engines_verify_workarounds(struct intel_gt *gt)
627 {
628 struct intel_engine_cs *engine;
629 enum intel_engine_id id;
630 int err = 0;
631
632 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
633 return 0;
634
635 for_each_engine(engine, gt, id) {
636 if (intel_engine_verify_workarounds(engine, "load"))
637 err = -EIO;
638 }
639
640 /* Flush and restore the kernel context for safety */
641 if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME)
642 err = -EIO;
643
644 return err;
645 }
646
__intel_gt_disable(struct intel_gt * gt)647 static void __intel_gt_disable(struct intel_gt *gt)
648 {
649 intel_gt_set_wedged_on_fini(gt);
650
651 intel_gt_suspend_prepare(gt);
652 intel_gt_suspend_late(gt);
653
654 GEM_BUG_ON(intel_gt_pm_is_awake(gt));
655 }
656
intel_gt_wait_for_idle(struct intel_gt * gt,long timeout)657 int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
658 {
659 long remaining_timeout;
660
661 /* If the device is asleep, we have no requests outstanding */
662 if (!intel_gt_pm_is_awake(gt))
663 return 0;
664
665 while ((timeout = intel_gt_retire_requests_timeout(gt, timeout,
666 &remaining_timeout)) > 0) {
667 cond_resched();
668 if (signal_pending(current))
669 return -EINTR;
670 }
671
672 if (timeout)
673 return timeout;
674
675 if (remaining_timeout < 0)
676 remaining_timeout = 0;
677
678 return intel_uc_wait_for_idle(>->uc, remaining_timeout);
679 }
680
intel_gt_init(struct intel_gt * gt)681 int intel_gt_init(struct intel_gt *gt)
682 {
683 int err;
684
685 err = i915_inject_probe_error(gt->i915, -ENODEV);
686 if (err)
687 return err;
688
689 intel_gt_init_workarounds(gt);
690
691 /*
692 * This is just a security blanket to placate dragons.
693 * On some systems, we very sporadically observe that the first TLBs
694 * used by the CS may be stale, despite us poking the TLB reset. If
695 * we hold the forcewake during initialisation these problems
696 * just magically go away.
697 */
698 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
699
700 err = intel_gt_init_scratch(gt,
701 GRAPHICS_VER(gt->i915) == 2 ? SZ_256K : SZ_4K);
702 if (err)
703 goto out_fw;
704
705 intel_gt_pm_init(gt);
706
707 gt->vm = kernel_vm(gt);
708 if (!gt->vm) {
709 err = -ENOMEM;
710 goto err_pm;
711 }
712
713 intel_set_mocs_index(gt);
714
715 err = intel_engines_init(gt);
716 if (err)
717 goto err_engines;
718
719 err = intel_uc_init(>->uc);
720 if (err)
721 goto err_engines;
722
723 err = intel_gt_resume(gt);
724 if (err)
725 goto err_uc_init;
726
727 err = intel_gt_init_hwconfig(gt);
728 if (err)
729 gt_err(gt, "Failed to retrieve hwconfig table: %pe\n", ERR_PTR(err));
730
731 err = __engines_record_defaults(gt);
732 if (err)
733 goto err_gt;
734
735 err = __engines_verify_workarounds(gt);
736 if (err)
737 goto err_gt;
738
739 err = i915_inject_probe_error(gt->i915, -EIO);
740 if (err)
741 goto err_gt;
742
743 intel_uc_init_late(>->uc);
744
745 intel_migrate_init(>->migrate, gt);
746
747 goto out_fw;
748 err_gt:
749 __intel_gt_disable(gt);
750 intel_uc_fini_hw(>->uc);
751 err_uc_init:
752 intel_uc_fini(>->uc);
753 err_engines:
754 intel_engines_release(gt);
755 i915_vm_put(fetch_and_zero(>->vm));
756 err_pm:
757 intel_gt_pm_fini(gt);
758 intel_gt_fini_scratch(gt);
759 out_fw:
760 if (err)
761 intel_gt_set_wedged_on_init(gt);
762 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
763 return err;
764 }
765
intel_gt_driver_remove(struct intel_gt * gt)766 void intel_gt_driver_remove(struct intel_gt *gt)
767 {
768 __intel_gt_disable(gt);
769
770 intel_migrate_fini(>->migrate);
771 intel_uc_driver_remove(>->uc);
772
773 intel_engines_release(gt);
774
775 intel_gt_flush_buffer_pool(gt);
776 }
777
intel_gt_driver_unregister(struct intel_gt * gt)778 void intel_gt_driver_unregister(struct intel_gt *gt)
779 {
780 intel_wakeref_t wakeref;
781
782 intel_gt_sysfs_unregister(gt);
783 intel_rps_driver_unregister(>->rps);
784 intel_gsc_fini(>->gsc);
785
786 /*
787 * If we unload the driver and wedge before the GSC worker is complete,
788 * the worker will hit an error on its submission to the GSC engine and
789 * then exit. This is hard to hit for a user, but it is reproducible
790 * with skipping selftests. The error is handled gracefully by the
791 * worker, so there are no functional issues, but we still end up with
792 * an error message in dmesg, which is something we want to avoid as
793 * this is a supported scenario. We could modify the worker to better
794 * handle a wedging occurring during its execution, but that gets
795 * complicated for a couple of reasons:
796 * - We do want the error on runtime wedging, because there are
797 * implications for subsystems outside of GT (i.e., PXP, HDCP), it's
798 * only the error on driver unload that we want to silence.
799 * - The worker is responsible for multiple submissions (GSC FW load,
800 * HuC auth, SW proxy), so all of those will have to be adapted to
801 * handle the wedged_on_fini scenario.
802 * Therefore, it's much simpler to just wait for the worker to be done
803 * before wedging on driver removal, also considering that the worker
804 * will likely already be idle in the great majority of non-selftest
805 * scenarios.
806 */
807 intel_gsc_uc_flush_work(>->uc.gsc);
808
809 /*
810 * Upon unregistering the device to prevent any new users, cancel
811 * all in-flight requests so that we can quickly unbind the active
812 * resources.
813 */
814 intel_gt_set_wedged_on_fini(gt);
815
816 /* Scrub all HW state upon release */
817 with_intel_runtime_pm(gt->uncore->rpm, wakeref)
818 __intel_gt_reset(gt, ALL_ENGINES);
819 }
820
intel_gt_driver_release(struct intel_gt * gt)821 void intel_gt_driver_release(struct intel_gt *gt)
822 {
823 struct i915_address_space *vm;
824
825 vm = fetch_and_zero(>->vm);
826 if (vm) /* FIXME being called twice on error paths :( */
827 i915_vm_put(vm);
828
829 intel_wa_list_free(>->wa_list);
830 intel_gt_pm_fini(gt);
831 intel_gt_fini_scratch(gt);
832 intel_gt_fini_buffer_pool(gt);
833 intel_gt_fini_hwconfig(gt);
834 }
835
intel_gt_driver_late_release_all(struct drm_i915_private * i915)836 void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
837 {
838 struct intel_gt *gt;
839 unsigned int id;
840
841 /* We need to wait for inflight RCU frees to release their grip */
842 rcu_barrier();
843
844 for_each_gt(gt, i915, id) {
845 intel_uc_driver_late_release(>->uc);
846 intel_gt_fini_requests(gt);
847 intel_gt_fini_reset(gt);
848 intel_gt_fini_timelines(gt);
849 intel_gt_fini_tlb(gt);
850 intel_engines_free(gt);
851 }
852 }
853
intel_gt_tile_setup(struct intel_gt * gt,phys_addr_t phys_addr)854 static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
855 {
856 int ret;
857
858 if (!gt_is_root(gt)) {
859 struct intel_uncore *uncore;
860 spinlock_t *irq_lock;
861
862 uncore = drmm_kzalloc(>->i915->drm, sizeof(*uncore), GFP_KERNEL);
863 if (!uncore)
864 return -ENOMEM;
865
866 irq_lock = drmm_kzalloc(>->i915->drm, sizeof(*irq_lock), GFP_KERNEL);
867 if (!irq_lock)
868 return -ENOMEM;
869
870 gt->uncore = uncore;
871 gt->irq_lock = irq_lock;
872
873 intel_gt_common_init_early(gt);
874 }
875
876 intel_uncore_init_early(gt->uncore, gt);
877
878 ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
879 if (ret)
880 return ret;
881
882 gt->phys_addr = phys_addr;
883
884 return 0;
885 }
886
intel_gt_probe_all(struct drm_i915_private * i915)887 int intel_gt_probe_all(struct drm_i915_private *i915)
888 {
889 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
890 struct intel_gt *gt = to_gt(i915);
891 const struct intel_gt_definition *gtdef;
892 phys_addr_t phys_addr;
893 unsigned int mmio_bar;
894 unsigned int i;
895 int ret;
896
897 mmio_bar = intel_mmio_bar(GRAPHICS_VER(i915));
898 phys_addr = pci_resource_start(pdev, mmio_bar);
899
900 /*
901 * We always have at least one primary GT on any device
902 * and it has been already initialized early during probe
903 * in i915_driver_probe()
904 */
905 gt->i915 = i915;
906 gt->name = "Primary GT";
907 gt->info.engine_mask = INTEL_INFO(i915)->platform_engine_mask;
908
909 gt_dbg(gt, "Setting up %s\n", gt->name);
910 ret = intel_gt_tile_setup(gt, phys_addr);
911 if (ret)
912 return ret;
913
914 i915->gt[0] = gt;
915
916 if (!HAS_EXTRA_GT_LIST(i915))
917 return 0;
918
919 for (i = 1, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1];
920 gtdef->name != NULL;
921 i++, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1]) {
922 gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL);
923 if (!gt) {
924 ret = -ENOMEM;
925 goto err;
926 }
927
928 gt->i915 = i915;
929 gt->name = gtdef->name;
930 gt->type = gtdef->type;
931 gt->info.engine_mask = gtdef->engine_mask;
932 gt->info.id = i;
933
934 gt_dbg(gt, "Setting up %s\n", gt->name);
935 if (GEM_WARN_ON(range_overflows_t(resource_size_t,
936 gtdef->mapping_base,
937 SZ_16M,
938 pci_resource_len(pdev, mmio_bar)))) {
939 ret = -ENODEV;
940 goto err;
941 }
942
943 switch (gtdef->type) {
944 case GT_TILE:
945 ret = intel_gt_tile_setup(gt, phys_addr + gtdef->mapping_base);
946 break;
947
948 case GT_MEDIA:
949 ret = intel_sa_mediagt_setup(gt, phys_addr + gtdef->mapping_base,
950 gtdef->gsi_offset);
951 break;
952
953 case GT_PRIMARY:
954 /* Primary GT should not appear in extra GT list */
955 default:
956 MISSING_CASE(gtdef->type);
957 ret = -ENODEV;
958 }
959
960 if (ret)
961 goto err;
962
963 i915->gt[i] = gt;
964 }
965
966 return 0;
967
968 err:
969 i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret);
970 intel_gt_release_all(i915);
971
972 return ret;
973 }
974
intel_gt_tiles_init(struct drm_i915_private * i915)975 int intel_gt_tiles_init(struct drm_i915_private *i915)
976 {
977 struct intel_gt *gt;
978 unsigned int id;
979 int ret;
980
981 for_each_gt(gt, i915, id) {
982 ret = intel_gt_probe_lmem(gt);
983 if (ret)
984 return ret;
985 }
986
987 return 0;
988 }
989
intel_gt_release_all(struct drm_i915_private * i915)990 void intel_gt_release_all(struct drm_i915_private *i915)
991 {
992 struct intel_gt *gt;
993 unsigned int id;
994
995 for_each_gt(gt, i915, id)
996 i915->gt[id] = NULL;
997 }
998
intel_gt_info_print(const struct intel_gt_info * info,struct drm_printer * p)999 void intel_gt_info_print(const struct intel_gt_info *info,
1000 struct drm_printer *p)
1001 {
1002 drm_printf(p, "available engines: %x\n", info->engine_mask);
1003
1004 intel_sseu_dump(&info->sseu, p);
1005 }
1006
intel_gt_coherent_map_type(struct intel_gt * gt,struct drm_i915_gem_object * obj,bool always_coherent)1007 enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
1008 struct drm_i915_gem_object *obj,
1009 bool always_coherent)
1010 {
1011 /*
1012 * Wa_22016122933: always return I915_MAP_WC for Media
1013 * version 13.0 when the object is on the Media GT
1014 */
1015 if (i915_gem_object_is_lmem(obj) || intel_gt_needs_wa_22016122933(gt))
1016 return I915_MAP_WC;
1017 if (HAS_LLC(gt->i915) || always_coherent)
1018 return I915_MAP_WB;
1019 else
1020 return I915_MAP_WC;
1021 }
1022