1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Copyright © 2014-2019 Intel Corporation
4 */
5
6 #ifndef _INTEL_GUC_H_
7 #define _INTEL_GUC_H_
8
9 #include "intel_uncore.h"
10 #include "intel_guc_fw.h"
11 #include "intel_guc_fwif.h"
12 #include "intel_guc_ct.h"
13 #include "intel_guc_log.h"
14 #include "intel_guc_reg.h"
15 #include "intel_uc_fw.h"
16 #include "i915_utils.h"
17 #include "i915_vma.h"
18
19 struct __guc_ads_blob;
20
21 /*
22 * Top level structure of GuC. It handles firmware loading and manages client
23 * pool and doorbells. intel_guc owns a intel_guc_client to replace the legacy
24 * ExecList submission.
25 */
26 struct intel_guc {
27 struct intel_uc_fw fw;
28 struct intel_guc_log log;
29 struct intel_guc_ct ct;
30
31 /* intel_guc_recv interrupt related state */
32 spinlock_t irq_lock;
33 unsigned int msg_enabled_mask;
34
35 struct {
36 bool enabled;
37 void (*reset)(struct intel_guc *guc);
38 void (*enable)(struct intel_guc *guc);
39 void (*disable)(struct intel_guc *guc);
40 } interrupts;
41
42 bool submission_supported;
43
44 struct i915_vma *ads_vma;
45 struct __guc_ads_blob *ads_blob;
46
47 struct i915_vma *stage_desc_pool;
48 void *stage_desc_pool_vaddr;
49 struct ida stage_ids;
50 struct i915_vma *shared_data;
51 void *shared_data_vaddr;
52
53 struct intel_guc_client *execbuf_client;
54
55 DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
56 /* Cyclic counter mod pagesize */
57 u32 db_cacheline;
58
59 /* Control params for fw initialization */
60 u32 params[GUC_CTL_MAX_DWORDS];
61
62 /* GuC's FW specific registers used in MMIO send */
63 struct {
64 u32 base;
65 unsigned int count;
66 enum forcewake_domains fw_domains;
67 } send_regs;
68
69 /* Store msg (e.g. log flush) that we see while CTBs are disabled */
70 u32 mmio_msg;
71
72 /* To serialize the intel_guc_send actions */
73 struct mutex send_mutex;
74
75 /* GuC's FW specific send function */
76 int (*send)(struct intel_guc *guc, const u32 *data, u32 len,
77 u32 *response_buf, u32 response_buf_size);
78
79 /* GuC's FW specific event handler function */
80 void (*handler)(struct intel_guc *guc);
81
82 /* GuC's FW specific notify function */
83 void (*notify)(struct intel_guc *guc);
84 };
85
86 static
intel_guc_send(struct intel_guc * guc,const u32 * action,u32 len)87 inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
88 {
89 return guc->send(guc, action, len, NULL, 0);
90 }
91
92 static inline int
intel_guc_send_and_receive(struct intel_guc * guc,const u32 * action,u32 len,u32 * response_buf,u32 response_buf_size)93 intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len,
94 u32 *response_buf, u32 response_buf_size)
95 {
96 return guc->send(guc, action, len, response_buf, response_buf_size);
97 }
98
intel_guc_notify(struct intel_guc * guc)99 static inline void intel_guc_notify(struct intel_guc *guc)
100 {
101 guc->notify(guc);
102 }
103
intel_guc_to_host_event_handler(struct intel_guc * guc)104 static inline void intel_guc_to_host_event_handler(struct intel_guc *guc)
105 {
106 guc->handler(guc);
107 }
108
109 /* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
110 #define GUC_GGTT_TOP 0xFEE00000
111
112 /**
113 * intel_guc_ggtt_offset() - Get and validate the GGTT offset of @vma
114 * @guc: intel_guc structure.
115 * @vma: i915 graphics virtual memory area.
116 *
117 * GuC does not allow any gfx GGTT address that falls into range
118 * [0, ggtt.pin_bias), which is reserved for Boot ROM, SRAM and WOPCM.
119 * Currently, in order to exclude [0, ggtt.pin_bias) address space from
120 * GGTT, all gfx objects used by GuC are allocated with intel_guc_allocate_vma()
121 * and pinned with PIN_OFFSET_BIAS along with the value of ggtt.pin_bias.
122 *
123 * Return: GGTT offset of the @vma.
124 */
intel_guc_ggtt_offset(struct intel_guc * guc,struct i915_vma * vma)125 static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
126 struct i915_vma *vma)
127 {
128 u32 offset = i915_ggtt_offset(vma);
129
130 GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma));
131 GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
132
133 return offset;
134 }
135
136 void intel_guc_init_early(struct intel_guc *guc);
137 void intel_guc_init_send_regs(struct intel_guc *guc);
138 void intel_guc_write_params(struct intel_guc *guc);
139 int intel_guc_init(struct intel_guc *guc);
140 void intel_guc_fini(struct intel_guc *guc);
141 int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
142 u32 *response_buf, u32 response_buf_size);
143 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
144 u32 *response_buf, u32 response_buf_size);
145 void intel_guc_to_host_event_handler(struct intel_guc *guc);
146 void intel_guc_to_host_event_handler_nop(struct intel_guc *guc);
147 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
148 const u32 *payload, u32 len);
149 int intel_guc_sample_forcewake(struct intel_guc *guc);
150 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
151 int intel_guc_suspend(struct intel_guc *guc);
152 int intel_guc_resume(struct intel_guc *guc);
153 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
154
intel_guc_is_supported(struct intel_guc * guc)155 static inline bool intel_guc_is_supported(struct intel_guc *guc)
156 {
157 return intel_uc_fw_is_supported(&guc->fw);
158 }
159
intel_guc_is_enabled(struct intel_guc * guc)160 static inline bool intel_guc_is_enabled(struct intel_guc *guc)
161 {
162 return intel_uc_fw_is_enabled(&guc->fw);
163 }
164
intel_guc_is_running(struct intel_guc * guc)165 static inline bool intel_guc_is_running(struct intel_guc *guc)
166 {
167 return intel_uc_fw_is_running(&guc->fw);
168 }
169
intel_guc_sanitize(struct intel_guc * guc)170 static inline int intel_guc_sanitize(struct intel_guc *guc)
171 {
172 intel_uc_fw_sanitize(&guc->fw);
173 guc->mmio_msg = 0;
174
175 return 0;
176 }
177
intel_guc_is_submission_supported(struct intel_guc * guc)178 static inline bool intel_guc_is_submission_supported(struct intel_guc *guc)
179 {
180 return guc->submission_supported;
181 }
182
intel_guc_enable_msg(struct intel_guc * guc,u32 mask)183 static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask)
184 {
185 spin_lock_irq(&guc->irq_lock);
186 guc->msg_enabled_mask |= mask;
187 spin_unlock_irq(&guc->irq_lock);
188 }
189
intel_guc_disable_msg(struct intel_guc * guc,u32 mask)190 static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
191 {
192 spin_lock_irq(&guc->irq_lock);
193 guc->msg_enabled_mask &= ~mask;
194 spin_unlock_irq(&guc->irq_lock);
195 }
196
197 int intel_guc_reset_engine(struct intel_guc *guc,
198 struct intel_engine_cs *engine);
199
200 #endif
201