1 // SPDX-License-Identifier: GPL-2.0-only
2 /**************************************************************************
3 * Copyright (c) 2007, Intel Corporation.
4 * All Rights Reserved.
5 *
6 * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
7 * develop this driver.
8 *
9 **************************************************************************/
10
11 #include <drm/drm_drv.h>
12 #include <drm/drm_vblank.h>
13
14 #include "power.h"
15 #include "psb_drv.h"
16 #include "psb_intel_reg.h"
17 #include "psb_irq.h"
18 #include "psb_reg.h"
19
20 /*
21 * inline functions
22 */
23
gma_pipestat(int pipe)24 static inline u32 gma_pipestat(int pipe)
25 {
26 if (pipe == 0)
27 return PIPEASTAT;
28 if (pipe == 1)
29 return PIPEBSTAT;
30 if (pipe == 2)
31 return PIPECSTAT;
32 BUG();
33 }
34
gma_pipe_event(int pipe)35 static inline u32 gma_pipe_event(int pipe)
36 {
37 if (pipe == 0)
38 return _PSB_PIPEA_EVENT_FLAG;
39 if (pipe == 1)
40 return _MDFLD_PIPEB_EVENT_FLAG;
41 if (pipe == 2)
42 return _MDFLD_PIPEC_EVENT_FLAG;
43 BUG();
44 }
45
gma_pipeconf(int pipe)46 static inline u32 gma_pipeconf(int pipe)
47 {
48 if (pipe == 0)
49 return PIPEACONF;
50 if (pipe == 1)
51 return PIPEBCONF;
52 if (pipe == 2)
53 return PIPECCONF;
54 BUG();
55 }
56
gma_enable_pipestat(struct drm_psb_private * dev_priv,int pipe,u32 mask)57 void gma_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
58 {
59 if ((dev_priv->pipestat[pipe] & mask) != mask) {
60 u32 reg = gma_pipestat(pipe);
61 dev_priv->pipestat[pipe] |= mask;
62 /* Enable the interrupt, clear any pending status */
63 if (gma_power_begin(&dev_priv->dev, false)) {
64 u32 writeVal = PSB_RVDC32(reg);
65 writeVal |= (mask | (mask >> 16));
66 PSB_WVDC32(writeVal, reg);
67 (void) PSB_RVDC32(reg);
68 gma_power_end(&dev_priv->dev);
69 }
70 }
71 }
72
gma_disable_pipestat(struct drm_psb_private * dev_priv,int pipe,u32 mask)73 void gma_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
74 {
75 if ((dev_priv->pipestat[pipe] & mask) != 0) {
76 u32 reg = gma_pipestat(pipe);
77 dev_priv->pipestat[pipe] &= ~mask;
78 if (gma_power_begin(&dev_priv->dev, false)) {
79 u32 writeVal = PSB_RVDC32(reg);
80 writeVal &= ~mask;
81 PSB_WVDC32(writeVal, reg);
82 (void) PSB_RVDC32(reg);
83 gma_power_end(&dev_priv->dev);
84 }
85 }
86 }
87
88 /*
89 * Display controller interrupt handler for pipe event.
90 */
gma_pipe_event_handler(struct drm_device * dev,int pipe)91 static void gma_pipe_event_handler(struct drm_device *dev, int pipe)
92 {
93 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
94
95 uint32_t pipe_stat_val = 0;
96 uint32_t pipe_stat_reg = gma_pipestat(pipe);
97 uint32_t pipe_enable = dev_priv->pipestat[pipe];
98 uint32_t pipe_status = dev_priv->pipestat[pipe] >> 16;
99 uint32_t pipe_clear;
100 uint32_t i = 0;
101
102 spin_lock(&dev_priv->irqmask_lock);
103
104 pipe_stat_val = PSB_RVDC32(pipe_stat_reg);
105 pipe_stat_val &= pipe_enable | pipe_status;
106 pipe_stat_val &= pipe_stat_val >> 16;
107
108 spin_unlock(&dev_priv->irqmask_lock);
109
110 /* Clear the 2nd level interrupt status bits
111 * Sometimes the bits are very sticky so we repeat until they unstick */
112 for (i = 0; i < 0xffff; i++) {
113 PSB_WVDC32(PSB_RVDC32(pipe_stat_reg), pipe_stat_reg);
114 pipe_clear = PSB_RVDC32(pipe_stat_reg) & pipe_status;
115
116 if (pipe_clear == 0)
117 break;
118 }
119
120 if (pipe_clear)
121 dev_err(dev->dev,
122 "%s, can't clear status bits for pipe %d, its value = 0x%x.\n",
123 __func__, pipe, PSB_RVDC32(pipe_stat_reg));
124
125 if (pipe_stat_val & PIPE_VBLANK_STATUS) {
126 struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
127 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
128 unsigned long flags;
129
130 drm_handle_vblank(dev, pipe);
131
132 spin_lock_irqsave(&dev->event_lock, flags);
133 if (gma_crtc->page_flip_event) {
134 drm_crtc_send_vblank_event(crtc,
135 gma_crtc->page_flip_event);
136 gma_crtc->page_flip_event = NULL;
137 drm_crtc_vblank_put(crtc);
138 }
139 spin_unlock_irqrestore(&dev->event_lock, flags);
140 }
141 }
142
143 /*
144 * Display controller interrupt handler.
145 */
gma_vdc_interrupt(struct drm_device * dev,uint32_t vdc_stat)146 static void gma_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
147 {
148 if (vdc_stat & _PSB_IRQ_ASLE)
149 psb_intel_opregion_asle_intr(dev);
150
151 if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
152 gma_pipe_event_handler(dev, 0);
153
154 if (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)
155 gma_pipe_event_handler(dev, 1);
156 }
157
158 /*
159 * SGX interrupt handler
160 */
gma_sgx_interrupt(struct drm_device * dev,u32 stat_1,u32 stat_2)161 static void gma_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
162 {
163 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
164 u32 val, addr;
165
166 if (stat_1 & _PSB_CE_TWOD_COMPLETE)
167 val = PSB_RSGX32(PSB_CR_2D_BLIT_STATUS);
168
169 if (stat_2 & _PSB_CE2_BIF_REQUESTER_FAULT) {
170 val = PSB_RSGX32(PSB_CR_BIF_INT_STAT);
171 addr = PSB_RSGX32(PSB_CR_BIF_FAULT);
172 if (val) {
173 if (val & _PSB_CBI_STAT_PF_N_RW)
174 DRM_ERROR("SGX MMU page fault:");
175 else
176 DRM_ERROR("SGX MMU read / write protection fault:");
177
178 if (val & _PSB_CBI_STAT_FAULT_CACHE)
179 DRM_ERROR("\tCache requestor");
180 if (val & _PSB_CBI_STAT_FAULT_TA)
181 DRM_ERROR("\tTA requestor");
182 if (val & _PSB_CBI_STAT_FAULT_VDM)
183 DRM_ERROR("\tVDM requestor");
184 if (val & _PSB_CBI_STAT_FAULT_2D)
185 DRM_ERROR("\t2D requestor");
186 if (val & _PSB_CBI_STAT_FAULT_PBE)
187 DRM_ERROR("\tPBE requestor");
188 if (val & _PSB_CBI_STAT_FAULT_TSP)
189 DRM_ERROR("\tTSP requestor");
190 if (val & _PSB_CBI_STAT_FAULT_ISP)
191 DRM_ERROR("\tISP requestor");
192 if (val & _PSB_CBI_STAT_FAULT_USSEPDS)
193 DRM_ERROR("\tUSSEPDS requestor");
194 if (val & _PSB_CBI_STAT_FAULT_HOST)
195 DRM_ERROR("\tHost requestor");
196
197 DRM_ERROR("\tMMU failing address is 0x%08x.\n",
198 (unsigned int)addr);
199 }
200 }
201
202 /* Clear bits */
203 PSB_WSGX32(stat_1, PSB_CR_EVENT_HOST_CLEAR);
204 PSB_WSGX32(stat_2, PSB_CR_EVENT_HOST_CLEAR2);
205 PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR2);
206 }
207
gma_irq_handler(int irq,void * arg)208 static irqreturn_t gma_irq_handler(int irq, void *arg)
209 {
210 struct drm_device *dev = arg;
211 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
212 uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, hotplug_int = 0;
213 u32 sgx_stat_1, sgx_stat_2;
214 int handled = 0;
215
216 spin_lock(&dev_priv->irqmask_lock);
217
218 vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
219
220 if (vdc_stat & (_PSB_PIPE_EVENT_FLAG|_PSB_IRQ_ASLE))
221 dsp_int = 1;
222
223 if (vdc_stat & _PSB_IRQ_SGX_FLAG)
224 sgx_int = 1;
225 if (vdc_stat & _PSB_IRQ_DISP_HOTSYNC)
226 hotplug_int = 1;
227
228 vdc_stat &= dev_priv->vdc_irq_mask;
229 spin_unlock(&dev_priv->irqmask_lock);
230
231 if (dsp_int) {
232 gma_vdc_interrupt(dev, vdc_stat);
233 handled = 1;
234 }
235
236 if (sgx_int) {
237 sgx_stat_1 = PSB_RSGX32(PSB_CR_EVENT_STATUS);
238 sgx_stat_2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
239 gma_sgx_interrupt(dev, sgx_stat_1, sgx_stat_2);
240 handled = 1;
241 }
242
243 /* Note: this bit has other meanings on some devices, so we will
244 need to address that later if it ever matters */
245 if (hotplug_int && dev_priv->ops->hotplug) {
246 handled = dev_priv->ops->hotplug(dev);
247 REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
248 }
249
250 PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
251 (void) PSB_RVDC32(PSB_INT_IDENTITY_R);
252 rmb();
253
254 if (!handled)
255 return IRQ_NONE;
256
257 return IRQ_HANDLED;
258 }
259
gma_irq_preinstall(struct drm_device * dev)260 void gma_irq_preinstall(struct drm_device *dev)
261 {
262 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
263 unsigned long irqflags;
264
265 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
266
267 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
268 PSB_WVDC32(0x00000000, PSB_INT_MASK_R);
269 PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
270 PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
271 PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
272
273 if (dev->vblank[0].enabled)
274 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
275 if (dev->vblank[1].enabled)
276 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
277
278 /* Revisit this area - want per device masks ? */
279 if (dev_priv->ops->hotplug)
280 dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC;
281 dev_priv->vdc_irq_mask |= _PSB_IRQ_ASLE | _PSB_IRQ_SGX_FLAG;
282
283 /* This register is safe even if display island is off */
284 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
285 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
286 }
287
gma_irq_postinstall(struct drm_device * dev)288 void gma_irq_postinstall(struct drm_device *dev)
289 {
290 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
291 unsigned long irqflags;
292 unsigned int i;
293
294 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
295
296 /* Enable 2D and MMU fault interrupts */
297 PSB_WSGX32(_PSB_CE2_BIF_REQUESTER_FAULT, PSB_CR_EVENT_HOST_ENABLE2);
298 PSB_WSGX32(_PSB_CE_TWOD_COMPLETE, PSB_CR_EVENT_HOST_ENABLE);
299 PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); /* Post */
300
301 /* This register is safe even if display island is off */
302 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
303 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
304
305 for (i = 0; i < dev->num_crtcs; ++i) {
306 if (dev->vblank[i].enabled)
307 gma_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
308 else
309 gma_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
310 }
311
312 if (dev_priv->ops->hotplug_enable)
313 dev_priv->ops->hotplug_enable(dev, true);
314
315 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
316 }
317
gma_irq_install(struct drm_device * dev)318 int gma_irq_install(struct drm_device *dev)
319 {
320 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
321 struct pci_dev *pdev = to_pci_dev(dev->dev);
322 int ret;
323
324 if (dev_priv->use_msi && pci_enable_msi(pdev)) {
325 dev_warn(dev->dev, "Enabling MSI failed!\n");
326 dev_priv->use_msi = false;
327 }
328
329 if (pdev->irq == IRQ_NOTCONNECTED)
330 return -ENOTCONN;
331
332 gma_irq_preinstall(dev);
333
334 /* PCI devices require shared interrupts. */
335 ret = request_irq(pdev->irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev);
336 if (ret)
337 return ret;
338
339 gma_irq_postinstall(dev);
340
341 return 0;
342 }
343
gma_irq_uninstall(struct drm_device * dev)344 void gma_irq_uninstall(struct drm_device *dev)
345 {
346 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
347 struct pci_dev *pdev = to_pci_dev(dev->dev);
348 unsigned long irqflags;
349 unsigned int i;
350
351 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
352
353 if (dev_priv->ops->hotplug_enable)
354 dev_priv->ops->hotplug_enable(dev, false);
355
356 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
357
358 for (i = 0; i < dev->num_crtcs; ++i) {
359 if (dev->vblank[i].enabled)
360 gma_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
361 }
362
363 dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
364 _PSB_IRQ_MSVDX_FLAG |
365 _LNC_IRQ_TOPAZ_FLAG;
366
367 /* These two registers are safe even if display island is off */
368 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
369 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
370
371 wmb();
372
373 /* This register is safe even if display island is off */
374 PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
375 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
376
377 free_irq(pdev->irq, dev);
378 if (dev_priv->use_msi)
379 pci_disable_msi(pdev);
380 }
381
gma_crtc_enable_vblank(struct drm_crtc * crtc)382 int gma_crtc_enable_vblank(struct drm_crtc *crtc)
383 {
384 struct drm_device *dev = crtc->dev;
385 unsigned int pipe = crtc->index;
386 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
387 unsigned long irqflags;
388 uint32_t reg_val = 0;
389 uint32_t pipeconf_reg = gma_pipeconf(pipe);
390
391 if (gma_power_begin(dev, false)) {
392 reg_val = REG_READ(pipeconf_reg);
393 gma_power_end(dev);
394 }
395
396 if (!(reg_val & PIPEACONF_ENABLE))
397 return -EINVAL;
398
399 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
400
401 if (pipe == 0)
402 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
403 else if (pipe == 1)
404 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
405
406 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
407 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
408 gma_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
409
410 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
411
412 return 0;
413 }
414
gma_crtc_disable_vblank(struct drm_crtc * crtc)415 void gma_crtc_disable_vblank(struct drm_crtc *crtc)
416 {
417 struct drm_device *dev = crtc->dev;
418 unsigned int pipe = crtc->index;
419 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
420 unsigned long irqflags;
421
422 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
423
424 if (pipe == 0)
425 dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEA_FLAG;
426 else if (pipe == 1)
427 dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEB_FLAG;
428
429 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
430 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
431 gma_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
432
433 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
434 }
435
436 /* Called from drm generic code, passed a 'crtc', which
437 * we use as a pipe index
438 */
gma_crtc_get_vblank_counter(struct drm_crtc * crtc)439 u32 gma_crtc_get_vblank_counter(struct drm_crtc *crtc)
440 {
441 struct drm_device *dev = crtc->dev;
442 unsigned int pipe = crtc->index;
443 uint32_t high_frame = PIPEAFRAMEHIGH;
444 uint32_t low_frame = PIPEAFRAMEPIXEL;
445 uint32_t pipeconf_reg = PIPEACONF;
446 uint32_t reg_val = 0;
447 uint32_t high1 = 0, high2 = 0, low = 0, count = 0;
448
449 switch (pipe) {
450 case 0:
451 break;
452 case 1:
453 high_frame = PIPEBFRAMEHIGH;
454 low_frame = PIPEBFRAMEPIXEL;
455 pipeconf_reg = PIPEBCONF;
456 break;
457 case 2:
458 high_frame = PIPECFRAMEHIGH;
459 low_frame = PIPECFRAMEPIXEL;
460 pipeconf_reg = PIPECCONF;
461 break;
462 default:
463 dev_err(dev->dev, "%s, invalid pipe.\n", __func__);
464 return 0;
465 }
466
467 if (!gma_power_begin(dev, false))
468 return 0;
469
470 reg_val = REG_READ(pipeconf_reg);
471
472 if (!(reg_val & PIPEACONF_ENABLE)) {
473 dev_err(dev->dev, "trying to get vblank count for disabled pipe %u\n",
474 pipe);
475 goto err_gma_power_end;
476 }
477
478 /*
479 * High & low register fields aren't synchronized, so make sure
480 * we get a low value that's stable across two reads of the high
481 * register.
482 */
483 do {
484 high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
485 PIPE_FRAME_HIGH_SHIFT);
486 low = ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
487 PIPE_FRAME_LOW_SHIFT);
488 high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
489 PIPE_FRAME_HIGH_SHIFT);
490 } while (high1 != high2);
491
492 count = (high1 << 8) | low;
493
494 err_gma_power_end:
495 gma_power_end(dev);
496
497 return count;
498 }
499
500