1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
3
4 #include <linux/clk.h>
5 #include <linux/iopoll.h>
6 #include <linux/pm_opp.h>
7 #include <soc/qcom/cmd-db.h>
8
9 #include "a6xx_gpu.h"
10 #include "a6xx_gmu.xml.h"
11
a6xx_gmu_irq(int irq,void * data)12 static irqreturn_t a6xx_gmu_irq(int irq, void *data)
13 {
14 struct a6xx_gmu *gmu = data;
15 u32 status;
16
17 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS);
18 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status);
19
20 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
21 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
22
23 /* Temporary until we can recover safely */
24 BUG();
25 }
26
27 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
28 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n");
29
30 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
31 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n",
32 gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS));
33
34 return IRQ_HANDLED;
35 }
36
a6xx_hfi_irq(int irq,void * data)37 static irqreturn_t a6xx_hfi_irq(int irq, void *data)
38 {
39 struct a6xx_gmu *gmu = data;
40 u32 status;
41
42 status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
43 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
44
45 if (status & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ)
46 tasklet_schedule(&gmu->hfi_tasklet);
47
48 if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
49 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
50
51 /* Temporary until we can recover safely */
52 BUG();
53 }
54
55 return IRQ_HANDLED;
56 }
57
58 /* Check to see if the GX rail is still powered */
a6xx_gmu_gx_is_on(struct a6xx_gmu * gmu)59 static bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
60 {
61 u32 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
62
63 return !(val &
64 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
65 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
66 }
67
a6xx_gmu_set_freq(struct a6xx_gmu * gmu,int index)68 static int a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
69 {
70 gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
71
72 gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
73 ((index << 24) & 0xff) | (3 & 0xf));
74
75 /*
76 * Send an invalid index as a vote for the bus bandwidth and let the
77 * firmware decide on the right vote
78 */
79 gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff);
80
81 /* Set and clear the OOB for DCVS to trigger the GMU */
82 a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
83 a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
84
85 return gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
86 }
87
a6xx_gmu_check_idle_level(struct a6xx_gmu * gmu)88 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
89 {
90 u32 val;
91 int local = gmu->idle_level;
92
93 /* SPTP and IFPC both report as IFPC */
94 if (gmu->idle_level == GMU_IDLE_STATE_SPTP)
95 local = GMU_IDLE_STATE_IFPC;
96
97 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
98
99 if (val == local) {
100 if (gmu->idle_level != GMU_IDLE_STATE_IFPC ||
101 !a6xx_gmu_gx_is_on(gmu))
102 return true;
103 }
104
105 return false;
106 }
107
108 /* Wait for the GMU to get to its most idle state */
a6xx_gmu_wait_for_idle(struct a6xx_gpu * a6xx_gpu)109 int a6xx_gmu_wait_for_idle(struct a6xx_gpu *a6xx_gpu)
110 {
111 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
112
113 return spin_until(a6xx_gmu_check_idle_level(gmu));
114 }
115
a6xx_gmu_start(struct a6xx_gmu * gmu)116 static int a6xx_gmu_start(struct a6xx_gmu *gmu)
117 {
118 int ret;
119 u32 val;
120
121 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
122 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
123
124 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
125 val == 0xbabeface, 100, 10000);
126
127 if (ret)
128 dev_err(gmu->dev, "GMU firmware initialization timed out\n");
129
130 return ret;
131 }
132
a6xx_gmu_hfi_start(struct a6xx_gmu * gmu)133 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
134 {
135 u32 val;
136 int ret;
137
138 gmu_rmw(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
139 A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 0);
140
141 gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
142
143 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
144 val & 1, 100, 10000);
145 if (ret)
146 dev_err(gmu->dev, "Unable to start the HFI queues\n");
147
148 return ret;
149 }
150
151 /* Trigger a OOB (out of band) request to the GMU */
a6xx_gmu_set_oob(struct a6xx_gmu * gmu,enum a6xx_gmu_oob_state state)152 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
153 {
154 int ret;
155 u32 val;
156 int request, ack;
157 const char *name;
158
159 switch (state) {
160 case GMU_OOB_GPU_SET:
161 request = GMU_OOB_GPU_SET_REQUEST;
162 ack = GMU_OOB_GPU_SET_ACK;
163 name = "GPU_SET";
164 break;
165 case GMU_OOB_BOOT_SLUMBER:
166 request = GMU_OOB_BOOT_SLUMBER_REQUEST;
167 ack = GMU_OOB_BOOT_SLUMBER_ACK;
168 name = "BOOT_SLUMBER";
169 break;
170 case GMU_OOB_DCVS_SET:
171 request = GMU_OOB_DCVS_REQUEST;
172 ack = GMU_OOB_DCVS_ACK;
173 name = "GPU_DCVS";
174 break;
175 default:
176 return -EINVAL;
177 }
178
179 /* Trigger the equested OOB operation */
180 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
181
182 /* Wait for the acknowledge interrupt */
183 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
184 val & (1 << ack), 100, 10000);
185
186 if (ret)
187 dev_err(gmu->dev,
188 "Timeout waiting for GMU OOB set %s: 0x%x\n",
189 name,
190 gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
191
192 /* Clear the acknowledge interrupt */
193 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack);
194
195 return ret;
196 }
197
198 /* Clear a pending OOB state in the GMU */
a6xx_gmu_clear_oob(struct a6xx_gmu * gmu,enum a6xx_gmu_oob_state state)199 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
200 {
201 switch (state) {
202 case GMU_OOB_GPU_SET:
203 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
204 1 << GMU_OOB_GPU_SET_CLEAR);
205 break;
206 case GMU_OOB_BOOT_SLUMBER:
207 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
208 1 << GMU_OOB_BOOT_SLUMBER_CLEAR);
209 break;
210 case GMU_OOB_DCVS_SET:
211 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
212 1 << GMU_OOB_DCVS_CLEAR);
213 break;
214 }
215 }
216
217 /* Enable CPU control of SPTP power power collapse */
a6xx_sptprac_enable(struct a6xx_gmu * gmu)218 static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
219 {
220 int ret;
221 u32 val;
222
223 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
224
225 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
226 (val & 0x38) == 0x28, 1, 100);
227
228 if (ret) {
229 dev_err(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
230 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
231 }
232
233 return 0;
234 }
235
236 /* Disable CPU control of SPTP power power collapse */
a6xx_sptprac_disable(struct a6xx_gmu * gmu)237 static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
238 {
239 u32 val;
240 int ret;
241
242 /* Make sure retention is on */
243 gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
244
245 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001);
246
247 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
248 (val & 0x04), 100, 10000);
249
250 if (ret)
251 dev_err(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
252 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
253 }
254
255 /* Let the GMU know we are starting a boot sequence */
a6xx_gmu_gfx_rail_on(struct a6xx_gmu * gmu)256 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
257 {
258 u32 vote;
259
260 /* Let the GMU know we are getting ready for boot */
261 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0);
262
263 /* Choose the "default" power level as the highest available */
264 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1];
265
266 gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff);
267 gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff);
268
269 /* Let the GMU know the boot sequence has started */
270 return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
271 }
272
273 /* Let the GMU know that we are about to go into slumber */
a6xx_gmu_notify_slumber(struct a6xx_gmu * gmu)274 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
275 {
276 int ret;
277
278 /* Disable the power counter so the GMU isn't busy */
279 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
280
281 /* Disable SPTP_PC if the CPU is responsible for it */
282 if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
283 a6xx_sptprac_disable(gmu);
284
285 /* Tell the GMU to get ready to slumber */
286 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
287
288 ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
289 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
290
291 if (!ret) {
292 /* Check to see if the GMU really did slumber */
293 if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
294 != 0x0f) {
295 dev_err(gmu->dev, "The GMU did not go into slumber\n");
296 ret = -ETIMEDOUT;
297 }
298 }
299
300 /* Put fence into allow mode */
301 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
302 return ret;
303 }
304
a6xx_rpmh_start(struct a6xx_gmu * gmu)305 static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
306 {
307 int ret;
308 u32 val;
309
310 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
311 /* Wait for the register to finish posting */
312 wmb();
313
314 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
315 val & (1 << 1), 100, 10000);
316 if (ret) {
317 dev_err(gmu->dev, "Unable to power on the GPU RSC\n");
318 return ret;
319 }
320
321 ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
322 !val, 100, 10000);
323
324 if (!ret) {
325 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
326
327 /* Re-enable the power counter */
328 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
329 return 0;
330 }
331
332 dev_err(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
333 return ret;
334 }
335
a6xx_rpmh_stop(struct a6xx_gmu * gmu)336 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
337 {
338 int ret;
339 u32 val;
340
341 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
342
343 ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
344 val, val & (1 << 16), 100, 10000);
345 if (ret)
346 dev_err(gmu->dev, "Unable to power off the GPU RSC\n");
347
348 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
349 }
350
a6xx_gmu_rpmh_init(struct a6xx_gmu * gmu)351 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
352 {
353 /* Disable SDE clock gating */
354 gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
355
356 /* Setup RSC PDC handshake for sleep and wakeup */
357 gmu_write(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
358 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
359 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
360 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
361 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
362 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
363 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
364 gmu_write(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
365 gmu_write(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
366 gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
367 gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
368
369 /* Load RSC sequencer uCode for sleep and wakeup */
370 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
371 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
372 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
373 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
374 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
375
376 /* Load PDC sequencer uCode for power up and power down sequence */
377 pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
378 pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
379 pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
380 pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
381 pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
382
383 /* Set TCS commands used by PDC sequence for low power modes */
384 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
385 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
386 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
387 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
388 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
389 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
390 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
391 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
392 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
393 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
394 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
395 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
396 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
397 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
398 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
399 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
400 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
401 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
402 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
403 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
404 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
405 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
406 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
407 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
408
409 /* Setup GPU PDC */
410 pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
411 pdc_write(gmu, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
412
413 /* ensure no writes happen before the uCode is fully written */
414 wmb();
415 }
416
417 /*
418 * The lowest 16 bits of this value are the number of XO clock cycles for main
419 * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are
420 * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
421 */
422
423 #define GMU_PWR_COL_HYST 0x000a1680
424
425 /* Set up the idle state for the GMU */
a6xx_gmu_power_config(struct a6xx_gmu * gmu)426 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
427 {
428 /* Disable GMU WB/RB buffer */
429 gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
430
431 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
432
433 switch (gmu->idle_level) {
434 case GMU_IDLE_STATE_IFPC:
435 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
436 GMU_PWR_COL_HYST);
437 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
438 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
439 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
440 /* Fall through */
441 case GMU_IDLE_STATE_SPTP:
442 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
443 GMU_PWR_COL_HYST);
444 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
445 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
446 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE);
447 }
448
449 /* Enable RPMh GPU client */
450 gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0,
451 A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE |
452 A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE |
453 A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE |
454 A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE |
455 A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE |
456 A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
457 }
458
a6xx_gmu_fw_start(struct a6xx_gmu * gmu,unsigned int state)459 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
460 {
461 static bool rpmh_init;
462 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
463 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
464 int i, ret;
465 u32 chipid;
466 u32 *image;
467
468 if (state == GMU_WARM_BOOT) {
469 ret = a6xx_rpmh_start(gmu);
470 if (ret)
471 return ret;
472 } else {
473 if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
474 "GMU firmware is not loaded\n"))
475 return -ENOENT;
476
477 /* Sanity check the size of the firmware that was loaded */
478 if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) {
479 dev_err(gmu->dev,
480 "GMU firmware is bigger than the available region\n");
481 return -EINVAL;
482 }
483
484 /* Turn on register retention */
485 gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
486
487 /* We only need to load the RPMh microcode once */
488 if (!rpmh_init) {
489 a6xx_gmu_rpmh_init(gmu);
490 rpmh_init = true;
491 } else if (state != GMU_RESET) {
492 ret = a6xx_rpmh_start(gmu);
493 if (ret)
494 return ret;
495 }
496
497 image = (u32 *) adreno_gpu->fw[ADRENO_FW_GMU]->data;
498
499 for (i = 0; i < adreno_gpu->fw[ADRENO_FW_GMU]->size >> 2; i++)
500 gmu_write(gmu, REG_A6XX_GMU_CM3_ITCM_START + i,
501 image[i]);
502 }
503
504 gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
505 gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
506
507 /* Write the iova of the HFI table */
508 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi->iova);
509 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
510
511 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
512 (1 << 31) | (0xa << 18) | (0xa0));
513
514 chipid = adreno_gpu->rev.core << 24;
515 chipid |= adreno_gpu->rev.major << 16;
516 chipid |= adreno_gpu->rev.minor << 12;
517 chipid |= adreno_gpu->rev.patchid << 8;
518
519 gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
520
521 /* Set up the lowest idle level on the GMU */
522 a6xx_gmu_power_config(gmu);
523
524 ret = a6xx_gmu_start(gmu);
525 if (ret)
526 return ret;
527
528 ret = a6xx_gmu_gfx_rail_on(gmu);
529 if (ret)
530 return ret;
531
532 /* Enable SPTP_PC if the CPU is responsible for it */
533 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
534 ret = a6xx_sptprac_enable(gmu);
535 if (ret)
536 return ret;
537 }
538
539 ret = a6xx_gmu_hfi_start(gmu);
540 if (ret)
541 return ret;
542
543 /* FIXME: Do we need this wmb() here? */
544 wmb();
545
546 return 0;
547 }
548
549 #define A6XX_HFI_IRQ_MASK \
550 (A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ | \
551 A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
552
553 #define A6XX_GMU_IRQ_MASK \
554 (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
555 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
556 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
557
a6xx_gmu_irq_enable(struct a6xx_gmu * gmu)558 static void a6xx_gmu_irq_enable(struct a6xx_gmu *gmu)
559 {
560 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
561 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
562
563 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK,
564 ~A6XX_GMU_IRQ_MASK);
565 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
566 ~A6XX_HFI_IRQ_MASK);
567
568 enable_irq(gmu->gmu_irq);
569 enable_irq(gmu->hfi_irq);
570 }
571
a6xx_gmu_irq_disable(struct a6xx_gmu * gmu)572 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
573 {
574 disable_irq(gmu->gmu_irq);
575 disable_irq(gmu->hfi_irq);
576
577 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0);
578 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
579 }
580
a6xx_gmu_reset(struct a6xx_gpu * a6xx_gpu)581 int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu)
582 {
583 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
584 int ret;
585 u32 val;
586
587 /* Flush all the queues */
588 a6xx_hfi_stop(gmu);
589
590 /* Stop the interrupts */
591 a6xx_gmu_irq_disable(gmu);
592
593 /* Force off SPTP in case the GMU is managing it */
594 a6xx_sptprac_disable(gmu);
595
596 /* Make sure there are no outstanding RPMh votes */
597 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
598 (val & 1), 100, 10000);
599 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
600 (val & 1), 100, 10000);
601 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
602 (val & 1), 100, 10000);
603 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
604 (val & 1), 100, 1000);
605
606 /* Force off the GX GSDC */
607 regulator_force_disable(gmu->gx);
608
609 /* Disable the resources */
610 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
611 pm_runtime_put_sync(gmu->dev);
612
613 /* Re-enable the resources */
614 pm_runtime_get_sync(gmu->dev);
615
616 /* Use a known rate to bring up the GMU */
617 clk_set_rate(gmu->core_clk, 200000000);
618 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
619 if (ret)
620 goto out;
621
622 a6xx_gmu_irq_enable(gmu);
623
624 ret = a6xx_gmu_fw_start(gmu, GMU_RESET);
625 if (!ret)
626 ret = a6xx_hfi_start(gmu, GMU_COLD_BOOT);
627
628 /* Set the GPU back to the highest power frequency */
629 a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
630
631 out:
632 if (ret)
633 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
634
635 return ret;
636 }
637
a6xx_gmu_resume(struct a6xx_gpu * a6xx_gpu)638 int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
639 {
640 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
641 int status, ret;
642
643 if (WARN(!gmu->mmio, "The GMU is not set up yet\n"))
644 return 0;
645
646 /* Turn on the resources */
647 pm_runtime_get_sync(gmu->dev);
648
649 /* Use a known rate to bring up the GMU */
650 clk_set_rate(gmu->core_clk, 200000000);
651 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
652 if (ret)
653 goto out;
654
655 a6xx_gmu_irq_enable(gmu);
656
657 /* Check to see if we are doing a cold or warm boot */
658 status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
659 GMU_WARM_BOOT : GMU_COLD_BOOT;
660
661 ret = a6xx_gmu_fw_start(gmu, status);
662 if (ret)
663 goto out;
664
665 ret = a6xx_hfi_start(gmu, status);
666
667 /* Set the GPU to the highest power frequency */
668 a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
669
670 out:
671 /* Make sure to turn off the boot OOB request on error */
672 if (ret)
673 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
674
675 return ret;
676 }
677
a6xx_gmu_isidle(struct a6xx_gmu * gmu)678 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
679 {
680 u32 reg;
681
682 if (!gmu->mmio)
683 return true;
684
685 reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
686
687 if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB)
688 return false;
689
690 return true;
691 }
692
a6xx_gmu_stop(struct a6xx_gpu * a6xx_gpu)693 int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
694 {
695 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
696 u32 val;
697
698 /*
699 * The GMU may still be in slumber unless the GPU started so check and
700 * skip putting it back into slumber if so
701 */
702 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
703
704 if (val != 0xf) {
705 int ret = a6xx_gmu_wait_for_idle(a6xx_gpu);
706
707 /* Temporary until we can recover safely */
708 BUG_ON(ret);
709
710 /* tell the GMU we want to slumber */
711 a6xx_gmu_notify_slumber(gmu);
712
713 ret = gmu_poll_timeout(gmu,
714 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
715 !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
716 100, 10000);
717
718 /*
719 * Let the user know we failed to slumber but don't worry too
720 * much because we are powering down anyway
721 */
722
723 if (ret)
724 dev_err(gmu->dev,
725 "Unable to slumber GMU: status = 0%x/0%x\n",
726 gmu_read(gmu,
727 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
728 gmu_read(gmu,
729 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
730 }
731
732 /* Turn off HFI */
733 a6xx_hfi_stop(gmu);
734
735 /* Stop the interrupts and mask the hardware */
736 a6xx_gmu_irq_disable(gmu);
737
738 /* Tell RPMh to power off the GPU */
739 a6xx_rpmh_stop(gmu);
740
741 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
742
743 pm_runtime_put_sync(gmu->dev);
744
745 return 0;
746 }
747
a6xx_gmu_memory_free(struct a6xx_gmu * gmu,struct a6xx_gmu_bo * bo)748 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
749 {
750 int count, i;
751 u64 iova;
752
753 if (IS_ERR_OR_NULL(bo))
754 return;
755
756 count = bo->size >> PAGE_SHIFT;
757 iova = bo->iova;
758
759 for (i = 0; i < count; i++, iova += PAGE_SIZE) {
760 iommu_unmap(gmu->domain, iova, PAGE_SIZE);
761 __free_pages(bo->pages[i], 0);
762 }
763
764 kfree(bo->pages);
765 kfree(bo);
766 }
767
a6xx_gmu_memory_alloc(struct a6xx_gmu * gmu,size_t size)768 static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
769 size_t size)
770 {
771 struct a6xx_gmu_bo *bo;
772 int ret, count, i;
773
774 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
775 if (!bo)
776 return ERR_PTR(-ENOMEM);
777
778 bo->size = PAGE_ALIGN(size);
779
780 count = bo->size >> PAGE_SHIFT;
781
782 bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
783 if (!bo->pages) {
784 kfree(bo);
785 return ERR_PTR(-ENOMEM);
786 }
787
788 for (i = 0; i < count; i++) {
789 bo->pages[i] = alloc_page(GFP_KERNEL);
790 if (!bo->pages[i])
791 goto err;
792 }
793
794 bo->iova = gmu->uncached_iova_base;
795
796 for (i = 0; i < count; i++) {
797 ret = iommu_map(gmu->domain,
798 bo->iova + (PAGE_SIZE * i),
799 page_to_phys(bo->pages[i]), PAGE_SIZE,
800 IOMMU_READ | IOMMU_WRITE);
801
802 if (ret) {
803 dev_err(gmu->dev, "Unable to map GMU buffer object\n");
804
805 for (i = i - 1 ; i >= 0; i--)
806 iommu_unmap(gmu->domain,
807 bo->iova + (PAGE_SIZE * i),
808 PAGE_SIZE);
809
810 goto err;
811 }
812 }
813
814 bo->virt = vmap(bo->pages, count, VM_IOREMAP,
815 pgprot_writecombine(PAGE_KERNEL));
816 if (!bo->virt)
817 goto err;
818
819 /* Align future IOVA addresses on 1MB boundaries */
820 gmu->uncached_iova_base += ALIGN(size, SZ_1M);
821
822 return bo;
823
824 err:
825 for (i = 0; i < count; i++) {
826 if (bo->pages[i])
827 __free_pages(bo->pages[i], 0);
828 }
829
830 kfree(bo->pages);
831 kfree(bo);
832
833 return ERR_PTR(-ENOMEM);
834 }
835
a6xx_gmu_memory_probe(struct a6xx_gmu * gmu)836 static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
837 {
838 int ret;
839
840 /*
841 * The GMU address space is hardcoded to treat the range
842 * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
843 * between the GMU and the CPU will live in this space
844 */
845 gmu->uncached_iova_base = 0x60000000;
846
847
848 gmu->domain = iommu_domain_alloc(&platform_bus_type);
849 if (!gmu->domain)
850 return -ENODEV;
851
852 ret = iommu_attach_device(gmu->domain, gmu->dev);
853
854 if (ret) {
855 iommu_domain_free(gmu->domain);
856 gmu->domain = NULL;
857 }
858
859 return ret;
860 }
861
862 /* Get the list of RPMh voltage levels from cmd-db */
a6xx_gmu_rpmh_arc_cmds(const char * id,void * vals,int size)863 static int a6xx_gmu_rpmh_arc_cmds(const char *id, void *vals, int size)
864 {
865 u32 len = cmd_db_read_aux_data_len(id);
866
867 if (!len)
868 return 0;
869
870 if (WARN_ON(len > size))
871 return -EINVAL;
872
873 cmd_db_read_aux_data(id, vals, len);
874
875 /*
876 * The data comes back as an array of unsigned shorts so adjust the
877 * count accordingly
878 */
879 return len >> 1;
880 }
881
882 /* Return the 'arc-level' for the given frequency */
a6xx_gmu_get_arc_level(struct device * dev,unsigned long freq)883 static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq)
884 {
885 struct dev_pm_opp *opp;
886 struct device_node *np;
887 u32 val = 0;
888
889 if (!freq)
890 return 0;
891
892 opp = dev_pm_opp_find_freq_exact(dev, freq, true);
893 if (IS_ERR(opp))
894 return 0;
895
896 np = dev_pm_opp_get_of_node(opp);
897
898 if (np) {
899 of_property_read_u32(np, "qcom,level", &val);
900 of_node_put(np);
901 }
902
903 dev_pm_opp_put(opp);
904
905 return val;
906 }
907
a6xx_gmu_rpmh_arc_votes_init(struct device * dev,u32 * votes,unsigned long * freqs,int freqs_count,u16 * pri,int pri_count,u16 * sec,int sec_count)908 static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
909 unsigned long *freqs, int freqs_count,
910 u16 *pri, int pri_count,
911 u16 *sec, int sec_count)
912 {
913 int i, j;
914
915 /* Construct a vote for each frequency */
916 for (i = 0; i < freqs_count; i++) {
917 u8 pindex = 0, sindex = 0;
918 u32 level = a6xx_gmu_get_arc_level(dev, freqs[i]);
919
920 /* Get the primary index that matches the arc level */
921 for (j = 0; j < pri_count; j++) {
922 if (pri[j] >= level) {
923 pindex = j;
924 break;
925 }
926 }
927
928 if (j == pri_count) {
929 dev_err(dev,
930 "Level %u not found in in the RPMh list\n",
931 level);
932 dev_err(dev, "Available levels:\n");
933 for (j = 0; j < pri_count; j++)
934 dev_err(dev, " %u\n", pri[j]);
935
936 return -EINVAL;
937 }
938
939 /*
940 * Look for a level in in the secondary list that matches. If
941 * nothing fits, use the maximum non zero vote
942 */
943
944 for (j = 0; j < sec_count; j++) {
945 if (sec[j] >= level) {
946 sindex = j;
947 break;
948 } else if (sec[j]) {
949 sindex = j;
950 }
951 }
952
953 /* Construct the vote */
954 votes[i] = ((pri[pindex] & 0xffff) << 16) |
955 (sindex << 8) | pindex;
956 }
957
958 return 0;
959 }
960
961 /*
962 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
963 * to construct the list of votes on the CPU and send it over. Query the RPMh
964 * voltage levels and build the votes
965 */
966
a6xx_gmu_rpmh_votes_init(struct a6xx_gmu * gmu)967 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
968 {
969 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
970 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
971 struct msm_gpu *gpu = &adreno_gpu->base;
972
973 u16 gx[16], cx[16], mx[16];
974 u32 gxcount, cxcount, mxcount;
975 int ret;
976
977 /* Get the list of available voltage levels for each component */
978 gxcount = a6xx_gmu_rpmh_arc_cmds("gfx.lvl", gx, sizeof(gx));
979 cxcount = a6xx_gmu_rpmh_arc_cmds("cx.lvl", cx, sizeof(cx));
980 mxcount = a6xx_gmu_rpmh_arc_cmds("mx.lvl", mx, sizeof(mx));
981
982 /* Build the GX votes */
983 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
984 gmu->gpu_freqs, gmu->nr_gpu_freqs,
985 gx, gxcount, mx, mxcount);
986
987 /* Build the CX votes */
988 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
989 gmu->gmu_freqs, gmu->nr_gmu_freqs,
990 cx, cxcount, mx, mxcount);
991
992 return ret;
993 }
994
a6xx_gmu_build_freq_table(struct device * dev,unsigned long * freqs,u32 size)995 static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
996 u32 size)
997 {
998 int count = dev_pm_opp_get_opp_count(dev);
999 struct dev_pm_opp *opp;
1000 int i, index = 0;
1001 unsigned long freq = 1;
1002
1003 /*
1004 * The OPP table doesn't contain the "off" frequency level so we need to
1005 * add 1 to the table size to account for it
1006 */
1007
1008 if (WARN(count + 1 > size,
1009 "The GMU frequency table is being truncated\n"))
1010 count = size - 1;
1011
1012 /* Set the "off" frequency */
1013 freqs[index++] = 0;
1014
1015 for (i = 0; i < count; i++) {
1016 opp = dev_pm_opp_find_freq_ceil(dev, &freq);
1017 if (IS_ERR(opp))
1018 break;
1019
1020 dev_pm_opp_put(opp);
1021 freqs[index++] = freq++;
1022 }
1023
1024 return index;
1025 }
1026
a6xx_gmu_pwrlevels_probe(struct a6xx_gmu * gmu)1027 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
1028 {
1029 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1030 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1031 struct msm_gpu *gpu = &adreno_gpu->base;
1032
1033 int ret = 0;
1034
1035 /*
1036 * The GMU handles its own frequency switching so build a list of
1037 * available frequencies to send during initialization
1038 */
1039 ret = dev_pm_opp_of_add_table(gmu->dev);
1040 if (ret) {
1041 dev_err(gmu->dev, "Unable to set the OPP table for the GMU\n");
1042 return ret;
1043 }
1044
1045 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev,
1046 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs));
1047
1048 /*
1049 * The GMU also handles GPU frequency switching so build a list
1050 * from the GPU OPP table
1051 */
1052 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
1053 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
1054
1055 /* Build the list of RPMh votes that we'll send to the GMU */
1056 return a6xx_gmu_rpmh_votes_init(gmu);
1057 }
1058
a6xx_gmu_clocks_probe(struct a6xx_gmu * gmu)1059 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
1060 {
1061 int ret = msm_clk_bulk_get(gmu->dev, &gmu->clocks);
1062
1063 if (ret < 1)
1064 return ret;
1065
1066 gmu->nr_clocks = ret;
1067
1068 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
1069 gmu->nr_clocks, "gmu");
1070
1071 return 0;
1072 }
1073
a6xx_gmu_get_mmio(struct platform_device * pdev,const char * name)1074 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
1075 const char *name)
1076 {
1077 void __iomem *ret;
1078 struct resource *res = platform_get_resource_byname(pdev,
1079 IORESOURCE_MEM, name);
1080
1081 if (!res) {
1082 dev_err(&pdev->dev, "Unable to find the %s registers\n", name);
1083 return ERR_PTR(-EINVAL);
1084 }
1085
1086 ret = devm_ioremap(&pdev->dev, res->start, resource_size(res));
1087 if (!ret) {
1088 dev_err(&pdev->dev, "Unable to map the %s registers\n", name);
1089 return ERR_PTR(-EINVAL);
1090 }
1091
1092 return ret;
1093 }
1094
a6xx_gmu_get_irq(struct a6xx_gmu * gmu,struct platform_device * pdev,const char * name,irq_handler_t handler)1095 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
1096 const char *name, irq_handler_t handler)
1097 {
1098 int irq, ret;
1099
1100 irq = platform_get_irq_byname(pdev, name);
1101
1102 ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH,
1103 name, gmu);
1104 if (ret) {
1105 dev_err(&pdev->dev, "Unable to get interrupt %s\n", name);
1106 return ret;
1107 }
1108
1109 disable_irq(irq);
1110
1111 return irq;
1112 }
1113
a6xx_gmu_remove(struct a6xx_gpu * a6xx_gpu)1114 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
1115 {
1116 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1117
1118 if (IS_ERR_OR_NULL(gmu->mmio))
1119 return;
1120
1121 pm_runtime_disable(gmu->dev);
1122 a6xx_gmu_stop(a6xx_gpu);
1123
1124 a6xx_gmu_irq_disable(gmu);
1125 a6xx_gmu_memory_free(gmu, gmu->hfi);
1126
1127 iommu_detach_device(gmu->domain, gmu->dev);
1128
1129 iommu_domain_free(gmu->domain);
1130 }
1131
a6xx_gmu_probe(struct a6xx_gpu * a6xx_gpu,struct device_node * node)1132 int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
1133 {
1134 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1135 struct platform_device *pdev = of_find_device_by_node(node);
1136 int ret;
1137
1138 if (!pdev)
1139 return -ENODEV;
1140
1141 gmu->dev = &pdev->dev;
1142
1143 of_dma_configure(gmu->dev, node, false);
1144
1145 /* Fow now, don't do anything fancy until we get our feet under us */
1146 gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
1147
1148 pm_runtime_enable(gmu->dev);
1149 gmu->gx = devm_regulator_get(gmu->dev, "vdd");
1150
1151 /* Get the list of clocks */
1152 ret = a6xx_gmu_clocks_probe(gmu);
1153 if (ret)
1154 return ret;
1155
1156 /* Set up the IOMMU context bank */
1157 ret = a6xx_gmu_memory_probe(gmu);
1158 if (ret)
1159 return ret;
1160
1161 /* Allocate memory for for the HFI queues */
1162 gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
1163 if (IS_ERR(gmu->hfi))
1164 goto err;
1165
1166 /* Allocate memory for the GMU debug region */
1167 gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K);
1168 if (IS_ERR(gmu->debug))
1169 goto err;
1170
1171 /* Map the GMU registers */
1172 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
1173
1174 /* Map the GPU power domain controller registers */
1175 gmu->pdc_mmio = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
1176
1177 if (IS_ERR(gmu->mmio) || IS_ERR(gmu->pdc_mmio))
1178 goto err;
1179
1180 /* Get the HFI and GMU interrupts */
1181 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
1182 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
1183
1184 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
1185 goto err;
1186
1187 /* Set up a tasklet to handle GMU HFI responses */
1188 tasklet_init(&gmu->hfi_tasklet, a6xx_hfi_task, (unsigned long) gmu);
1189
1190 /* Get the power levels for the GMU and GPU */
1191 a6xx_gmu_pwrlevels_probe(gmu);
1192
1193 /* Set up the HFI queues */
1194 a6xx_hfi_init(gmu);
1195
1196 return 0;
1197 err:
1198 a6xx_gmu_memory_free(gmu, gmu->hfi);
1199
1200 if (gmu->domain) {
1201 iommu_detach_device(gmu->domain, gmu->dev);
1202
1203 iommu_domain_free(gmu->domain);
1204 }
1205
1206 return -ENODEV;
1207 }
1208