Lines Matching refs:guc
12 static void gen8_guc_raise_irq(struct intel_guc *guc) in gen8_guc_raise_irq() argument
14 struct intel_gt *gt = guc_to_gt(guc); in gen8_guc_raise_irq()
19 static void gen11_guc_raise_irq(struct intel_guc *guc) in gen11_guc_raise_irq() argument
21 struct intel_gt *gt = guc_to_gt(guc); in gen11_guc_raise_irq()
26 static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i) in guc_send_reg() argument
28 GEM_BUG_ON(!guc->send_regs.base); in guc_send_reg()
29 GEM_BUG_ON(!guc->send_regs.count); in guc_send_reg()
30 GEM_BUG_ON(i >= guc->send_regs.count); in guc_send_reg()
32 return _MMIO(guc->send_regs.base + 4 * i); in guc_send_reg()
35 void intel_guc_init_send_regs(struct intel_guc *guc) in intel_guc_init_send_regs() argument
37 struct intel_gt *gt = guc_to_gt(guc); in intel_guc_init_send_regs()
42 guc->send_regs.base = in intel_guc_init_send_regs()
44 guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT; in intel_guc_init_send_regs()
46 guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0)); in intel_guc_init_send_regs()
47 guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN; in intel_guc_init_send_regs()
51 for (i = 0; i < guc->send_regs.count; i++) { in intel_guc_init_send_regs()
53 guc_send_reg(guc, i), in intel_guc_init_send_regs()
56 guc->send_regs.fw_domains = fw_domains; in intel_guc_init_send_regs()
59 void intel_guc_init_early(struct intel_guc *guc) in intel_guc_init_early() argument
61 struct drm_i915_private *i915 = guc_to_gt(guc)->i915; in intel_guc_init_early()
63 intel_guc_fw_init_early(guc); in intel_guc_init_early()
64 intel_guc_ct_init_early(&guc->ct); in intel_guc_init_early()
65 intel_guc_log_init_early(&guc->log); in intel_guc_init_early()
66 intel_guc_submission_init_early(guc); in intel_guc_init_early()
68 mutex_init(&guc->send_mutex); in intel_guc_init_early()
69 spin_lock_init(&guc->irq_lock); in intel_guc_init_early()
70 guc->send = intel_guc_send_nop; in intel_guc_init_early()
71 guc->handler = intel_guc_to_host_event_handler_nop; in intel_guc_init_early()
73 guc->notify = gen11_guc_raise_irq; in intel_guc_init_early()
74 guc->interrupts.reset = gen11_reset_guc_interrupts; in intel_guc_init_early()
75 guc->interrupts.enable = gen11_enable_guc_interrupts; in intel_guc_init_early()
76 guc->interrupts.disable = gen11_disable_guc_interrupts; in intel_guc_init_early()
78 guc->notify = gen8_guc_raise_irq; in intel_guc_init_early()
79 guc->interrupts.reset = gen9_reset_guc_interrupts; in intel_guc_init_early()
80 guc->interrupts.enable = gen9_enable_guc_interrupts; in intel_guc_init_early()
81 guc->interrupts.disable = gen9_disable_guc_interrupts; in intel_guc_init_early()
85 static int guc_shared_data_create(struct intel_guc *guc) in guc_shared_data_create() argument
90 vma = intel_guc_allocate_vma(guc, PAGE_SIZE); in guc_shared_data_create()
100 guc->shared_data = vma; in guc_shared_data_create()
101 guc->shared_data_vaddr = vaddr; in guc_shared_data_create()
106 static void guc_shared_data_destroy(struct intel_guc *guc) in guc_shared_data_destroy() argument
108 i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP); in guc_shared_data_destroy()
111 static u32 guc_ctl_debug_flags(struct intel_guc *guc) in guc_ctl_debug_flags() argument
113 u32 level = intel_guc_log_get_level(&guc->log); in guc_ctl_debug_flags()
125 static u32 guc_ctl_feature_flags(struct intel_guc *guc) in guc_ctl_feature_flags() argument
129 if (!intel_guc_is_submission_supported(guc)) in guc_ctl_feature_flags()
135 static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc) in guc_ctl_ctxinfo_flags() argument
139 if (intel_guc_is_submission_supported(guc)) { in guc_ctl_ctxinfo_flags()
142 base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool); in guc_ctl_ctxinfo_flags()
152 static u32 guc_ctl_log_params_flags(struct intel_guc *guc) in guc_ctl_log_params_flags() argument
154 u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT; in guc_ctl_log_params_flags()
193 static u32 guc_ctl_ads_flags(struct intel_guc *guc) in guc_ctl_ads_flags() argument
195 u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT; in guc_ctl_ads_flags()
206 static void guc_init_params(struct intel_guc *guc) in guc_init_params() argument
208 u32 *params = guc->params; in guc_init_params()
211 BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32)); in guc_init_params()
213 params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc); in guc_init_params()
214 params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc); in guc_init_params()
215 params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc); in guc_init_params()
216 params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc); in guc_init_params()
217 params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc); in guc_init_params()
228 void intel_guc_write_params(struct intel_guc *guc) in intel_guc_write_params() argument
230 struct intel_uncore *uncore = guc_to_gt(guc)->uncore; in intel_guc_write_params()
243 intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]); in intel_guc_write_params()
248 int intel_guc_init(struct intel_guc *guc) in intel_guc_init() argument
250 struct intel_gt *gt = guc_to_gt(guc); in intel_guc_init()
253 ret = intel_uc_fw_init(&guc->fw); in intel_guc_init()
257 ret = guc_shared_data_create(guc); in intel_guc_init()
260 GEM_BUG_ON(!guc->shared_data); in intel_guc_init()
262 ret = intel_guc_log_create(&guc->log); in intel_guc_init()
266 ret = intel_guc_ads_create(guc); in intel_guc_init()
269 GEM_BUG_ON(!guc->ads_vma); in intel_guc_init()
271 ret = intel_guc_ct_init(&guc->ct); in intel_guc_init()
275 if (intel_guc_is_submission_supported(guc)) { in intel_guc_init()
280 ret = intel_guc_submission_init(guc); in intel_guc_init()
286 guc_init_params(guc); in intel_guc_init()
294 intel_guc_ct_fini(&guc->ct); in intel_guc_init()
296 intel_guc_ads_destroy(guc); in intel_guc_init()
298 intel_guc_log_destroy(&guc->log); in intel_guc_init()
300 guc_shared_data_destroy(guc); in intel_guc_init()
302 intel_uc_fw_fini(&guc->fw); in intel_guc_init()
304 intel_uc_fw_cleanup_fetch(&guc->fw); in intel_guc_init()
309 void intel_guc_fini(struct intel_guc *guc) in intel_guc_fini() argument
311 struct intel_gt *gt = guc_to_gt(guc); in intel_guc_fini()
313 if (!intel_uc_fw_is_available(&guc->fw)) in intel_guc_fini()
318 if (intel_guc_is_submission_supported(guc)) in intel_guc_fini()
319 intel_guc_submission_fini(guc); in intel_guc_fini()
321 intel_guc_ct_fini(&guc->ct); in intel_guc_fini()
323 intel_guc_ads_destroy(guc); in intel_guc_fini()
324 intel_guc_log_destroy(&guc->log); in intel_guc_fini()
325 guc_shared_data_destroy(guc); in intel_guc_fini()
326 intel_uc_fw_fini(&guc->fw); in intel_guc_fini()
327 intel_uc_fw_cleanup_fetch(&guc->fw); in intel_guc_fini()
330 int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len, in intel_guc_send_nop() argument
337 void intel_guc_to_host_event_handler_nop(struct intel_guc *guc) in intel_guc_to_host_event_handler_nop() argument
345 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, in intel_guc_send_mmio() argument
348 struct intel_uncore *uncore = guc_to_gt(guc)->uncore; in intel_guc_send_mmio()
354 GEM_BUG_ON(len > guc->send_regs.count); in intel_guc_send_mmio()
363 mutex_lock(&guc->send_mutex); in intel_guc_send_mmio()
364 intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains); in intel_guc_send_mmio()
367 intel_uncore_write(uncore, guc_send_reg(guc, i), action[i]); in intel_guc_send_mmio()
369 intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1)); in intel_guc_send_mmio()
371 intel_guc_notify(guc); in intel_guc_send_mmio()
378 guc_send_reg(guc, 0), in intel_guc_send_mmio()
394 int count = min(response_buf_size, guc->send_regs.count - 1); in intel_guc_send_mmio()
398 guc_send_reg(guc, i + 1)); in intel_guc_send_mmio()
405 intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains); in intel_guc_send_mmio()
406 mutex_unlock(&guc->send_mutex); in intel_guc_send_mmio()
411 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, in intel_guc_to_host_process_recv_msg() argument
420 msg = payload[0] & guc->msg_enabled_mask; in intel_guc_to_host_process_recv_msg()
424 intel_guc_log_handle_flush_event(&guc->log); in intel_guc_to_host_process_recv_msg()
429 int intel_guc_sample_forcewake(struct intel_guc *guc) in intel_guc_sample_forcewake() argument
431 struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; in intel_guc_sample_forcewake()
442 return intel_guc_send(guc, action, ARRAY_SIZE(action)); in intel_guc_sample_forcewake()
456 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset) in intel_guc_auth_huc() argument
463 return intel_guc_send(guc, action, ARRAY_SIZE(action)); in intel_guc_auth_huc()
470 int intel_guc_suspend(struct intel_guc *guc) in intel_guc_suspend() argument
472 struct intel_uncore *uncore = guc_to_gt(guc)->uncore; in intel_guc_suspend()
493 ret = intel_guc_send(guc, action, ARRAY_SIZE(action)); in intel_guc_suspend()
518 int intel_guc_reset_engine(struct intel_guc *guc, in intel_guc_reset_engine() argument
523 GEM_BUG_ON(!guc->execbuf_client); in intel_guc_reset_engine()
530 data[5] = guc->execbuf_client->stage_id; in intel_guc_reset_engine()
531 data[6] = intel_guc_ggtt_offset(guc, guc->shared_data); in intel_guc_reset_engine()
533 return intel_guc_send(guc, data, ARRAY_SIZE(data)); in intel_guc_reset_engine()
540 int intel_guc_resume(struct intel_guc *guc) in intel_guc_resume() argument
547 return intel_guc_send(guc, action, ARRAY_SIZE(action)); in intel_guc_resume()
591 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size) in intel_guc_allocate_vma() argument
593 struct intel_gt *gt = guc_to_gt(guc); in intel_guc_allocate_vma()