1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "nv50.h"
25 #include "head.h"
26 #include "ior.h"
27 #include "channv50.h"
28 #include "rootnv50.h"
29 
30 #include <core/client.h>
31 #include <core/enum.h>
32 #include <core/ramht.h>
33 #include <subdev/bios.h>
34 #include <subdev/bios/disp.h>
35 #include <subdev/bios/init.h>
36 #include <subdev/bios/pll.h>
37 #include <subdev/devinit.h>
38 #include <subdev/timer.h>
39 
40 static const struct nvkm_disp_oclass *
nv50_disp_root_(struct nvkm_disp * base)41 nv50_disp_root_(struct nvkm_disp *base)
42 {
43 	return nv50_disp(base)->func->root;
44 }
45 
46 static void
nv50_disp_intr_(struct nvkm_disp * base)47 nv50_disp_intr_(struct nvkm_disp *base)
48 {
49 	struct nv50_disp *disp = nv50_disp(base);
50 	disp->func->intr(disp);
51 }
52 
53 static void
nv50_disp_fini_(struct nvkm_disp * base)54 nv50_disp_fini_(struct nvkm_disp *base)
55 {
56 	struct nv50_disp *disp = nv50_disp(base);
57 	disp->func->fini(disp);
58 }
59 
60 static int
nv50_disp_init_(struct nvkm_disp * base)61 nv50_disp_init_(struct nvkm_disp *base)
62 {
63 	struct nv50_disp *disp = nv50_disp(base);
64 	return disp->func->init(disp);
65 }
66 
67 static void *
nv50_disp_dtor_(struct nvkm_disp * base)68 nv50_disp_dtor_(struct nvkm_disp *base)
69 {
70 	struct nv50_disp *disp = nv50_disp(base);
71 
72 	nvkm_ramht_del(&disp->ramht);
73 	nvkm_gpuobj_del(&disp->inst);
74 
75 	nvkm_event_fini(&disp->uevent);
76 	if (disp->wq)
77 		destroy_workqueue(disp->wq);
78 
79 	return disp;
80 }
81 
82 static int
nv50_disp_oneinit_(struct nvkm_disp * base)83 nv50_disp_oneinit_(struct nvkm_disp *base)
84 {
85 	struct nv50_disp *disp = nv50_disp(base);
86 	const struct nv50_disp_func *func = disp->func;
87 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
88 	struct nvkm_device *device = subdev->device;
89 	int ret, i;
90 
91 	if (func->wndw.cnt) {
92 		disp->wndw.nr = func->wndw.cnt(&disp->base, &disp->wndw.mask);
93 		nvkm_debug(subdev, "Window(s): %d (%08lx)\n",
94 			   disp->wndw.nr, disp->wndw.mask);
95 	}
96 
97 	disp->head.nr = func->head.cnt(&disp->base, &disp->head.mask);
98 	nvkm_debug(subdev, "  Head(s): %d (%02lx)\n",
99 		   disp->head.nr, disp->head.mask);
100 	for_each_set_bit(i, &disp->head.mask, disp->head.nr) {
101 		ret = func->head.new(&disp->base, i);
102 		if (ret)
103 			return ret;
104 	}
105 
106 	if (func->dac.cnt) {
107 		disp->dac.nr = func->dac.cnt(&disp->base, &disp->dac.mask);
108 		nvkm_debug(subdev, "   DAC(s): %d (%02lx)\n",
109 			   disp->dac.nr, disp->dac.mask);
110 		for_each_set_bit(i, &disp->dac.mask, disp->dac.nr) {
111 			ret = func->dac.new(&disp->base, i);
112 			if (ret)
113 				return ret;
114 		}
115 	}
116 
117 	if (func->pior.cnt) {
118 		disp->pior.nr = func->pior.cnt(&disp->base, &disp->pior.mask);
119 		nvkm_debug(subdev, "  PIOR(s): %d (%02lx)\n",
120 			   disp->pior.nr, disp->pior.mask);
121 		for_each_set_bit(i, &disp->pior.mask, disp->pior.nr) {
122 			ret = func->pior.new(&disp->base, i);
123 			if (ret)
124 				return ret;
125 		}
126 	}
127 
128 	disp->sor.nr = func->sor.cnt(&disp->base, &disp->sor.mask);
129 	nvkm_debug(subdev, "   SOR(s): %d (%02lx)\n",
130 		   disp->sor.nr, disp->sor.mask);
131 	for_each_set_bit(i, &disp->sor.mask, disp->sor.nr) {
132 		ret = func->sor.new(&disp->base, i);
133 		if (ret)
134 			return ret;
135 	}
136 
137 	ret = nvkm_gpuobj_new(device, 0x10000, 0x10000, false, NULL,
138 			      &disp->inst);
139 	if (ret)
140 		return ret;
141 
142 	return nvkm_ramht_new(device, func->ramht_size ? func->ramht_size :
143 			      0x1000, 0, disp->inst, &disp->ramht);
144 }
145 
146 static const struct nvkm_disp_func
147 nv50_disp_ = {
148 	.dtor = nv50_disp_dtor_,
149 	.oneinit = nv50_disp_oneinit_,
150 	.init = nv50_disp_init_,
151 	.fini = nv50_disp_fini_,
152 	.intr = nv50_disp_intr_,
153 	.root = nv50_disp_root_,
154 };
155 
156 int
nv50_disp_new_(const struct nv50_disp_func * func,struct nvkm_device * device,int index,struct nvkm_disp ** pdisp)157 nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
158 	       int index, struct nvkm_disp **pdisp)
159 {
160 	struct nv50_disp *disp;
161 	int ret;
162 
163 	if (!(disp = kzalloc(sizeof(*disp), GFP_KERNEL)))
164 		return -ENOMEM;
165 	disp->func = func;
166 	*pdisp = &disp->base;
167 
168 	ret = nvkm_disp_ctor(&nv50_disp_, device, index, &disp->base);
169 	if (ret)
170 		return ret;
171 
172 	disp->wq = create_singlethread_workqueue("nvkm-disp");
173 	if (!disp->wq)
174 		return -ENOMEM;
175 
176 	INIT_WORK(&disp->supervisor, func->super);
177 
178 	return nvkm_event_init(func->uevent, 1, ARRAY_SIZE(disp->chan),
179 			       &disp->uevent);
180 }
181 
182 static u32
nv50_disp_super_iedt(struct nvkm_head * head,struct nvkm_outp * outp,u8 * ver,u8 * hdr,u8 * cnt,u8 * len,struct nvbios_outp * iedt)183 nv50_disp_super_iedt(struct nvkm_head *head, struct nvkm_outp *outp,
184 		     u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
185 		     struct nvbios_outp *iedt)
186 {
187 	struct nvkm_bios *bios = head->disp->engine.subdev.device->bios;
188 	const u8  l = ffs(outp->info.link);
189 	const u16 t = outp->info.hasht;
190 	const u16 m = (0x0100 << head->id) | (l << 6) | outp->info.or;
191 	u32 data = nvbios_outp_match(bios, t, m, ver, hdr, cnt, len, iedt);
192 	if (!data)
193 		OUTP_DBG(outp, "missing IEDT for %04x:%04x", t, m);
194 	return data;
195 }
196 
197 static void
nv50_disp_super_ied_on(struct nvkm_head * head,struct nvkm_ior * ior,int id,u32 khz)198 nv50_disp_super_ied_on(struct nvkm_head *head,
199 		       struct nvkm_ior *ior, int id, u32 khz)
200 {
201 	struct nvkm_subdev *subdev = &head->disp->engine.subdev;
202 	struct nvkm_bios *bios = subdev->device->bios;
203 	struct nvkm_outp *outp = ior->asy.outp;
204 	struct nvbios_ocfg iedtrs;
205 	struct nvbios_outp iedt;
206 	u8  ver, hdr, cnt, len, flags = 0x00;
207 	u32 data;
208 
209 	if (!outp) {
210 		IOR_DBG(ior, "nothing to attach");
211 		return;
212 	}
213 
214 	/* Lookup IED table for the device. */
215 	data = nv50_disp_super_iedt(head, outp, &ver, &hdr, &cnt, &len, &iedt);
216 	if (!data)
217 		return;
218 
219 	/* Lookup IEDT runtime settings for the current configuration. */
220 	if (ior->type == SOR) {
221 		if (ior->asy.proto == LVDS) {
222 			if (head->asy.or.depth == 24)
223 				flags |= 0x02;
224 		}
225 		if (ior->asy.link == 3)
226 			flags |= 0x01;
227 	}
228 
229 	data = nvbios_ocfg_match(bios, data, ior->asy.proto_evo, flags,
230 				 &ver, &hdr, &cnt, &len, &iedtrs);
231 	if (!data) {
232 		OUTP_DBG(outp, "missing IEDT RS for %02x:%02x",
233 			 ior->asy.proto_evo, flags);
234 		return;
235 	}
236 
237 	/* Execute the OnInt[23] script for the current frequency. */
238 	data = nvbios_oclk_match(bios, iedtrs.clkcmp[id], khz);
239 	if (!data) {
240 		OUTP_DBG(outp, "missing IEDT RSS %d for %02x:%02x %d khz",
241 			 id, ior->asy.proto_evo, flags, khz);
242 		return;
243 	}
244 
245 	nvbios_init(subdev, data,
246 		init.outp = &outp->info;
247 		init.or   = ior->id;
248 		init.link = ior->asy.link;
249 		init.head = head->id;
250 	);
251 }
252 
253 static void
nv50_disp_super_ied_off(struct nvkm_head * head,struct nvkm_ior * ior,int id)254 nv50_disp_super_ied_off(struct nvkm_head *head, struct nvkm_ior *ior, int id)
255 {
256 	struct nvkm_outp *outp = ior->arm.outp;
257 	struct nvbios_outp iedt;
258 	u8  ver, hdr, cnt, len;
259 	u32 data;
260 
261 	if (!outp) {
262 		IOR_DBG(ior, "nothing attached");
263 		return;
264 	}
265 
266 	data = nv50_disp_super_iedt(head, outp, &ver, &hdr, &cnt, &len, &iedt);
267 	if (!data)
268 		return;
269 
270 	nvbios_init(&head->disp->engine.subdev, iedt.script[id],
271 		init.outp = &outp->info;
272 		init.or   = ior->id;
273 		init.link = ior->arm.link;
274 		init.head = head->id;
275 	);
276 }
277 
278 static struct nvkm_ior *
nv50_disp_super_ior_asy(struct nvkm_head * head)279 nv50_disp_super_ior_asy(struct nvkm_head *head)
280 {
281 	struct nvkm_ior *ior;
282 	list_for_each_entry(ior, &head->disp->ior, head) {
283 		if (ior->asy.head & (1 << head->id)) {
284 			HEAD_DBG(head, "to %s", ior->name);
285 			return ior;
286 		}
287 	}
288 	HEAD_DBG(head, "nothing to attach");
289 	return NULL;
290 }
291 
292 static struct nvkm_ior *
nv50_disp_super_ior_arm(struct nvkm_head * head)293 nv50_disp_super_ior_arm(struct nvkm_head *head)
294 {
295 	struct nvkm_ior *ior;
296 	list_for_each_entry(ior, &head->disp->ior, head) {
297 		if (ior->arm.head & (1 << head->id)) {
298 			HEAD_DBG(head, "on %s", ior->name);
299 			return ior;
300 		}
301 	}
302 	HEAD_DBG(head, "nothing attached");
303 	return NULL;
304 }
305 
306 void
nv50_disp_super_3_0(struct nv50_disp * disp,struct nvkm_head * head)307 nv50_disp_super_3_0(struct nv50_disp *disp, struct nvkm_head *head)
308 {
309 	struct nvkm_ior *ior;
310 
311 	/* Determine which OR, if any, we're attaching to the head. */
312 	HEAD_DBG(head, "supervisor 3.0");
313 	ior = nv50_disp_super_ior_asy(head);
314 	if (!ior)
315 		return;
316 
317 	/* Execute OnInt3 IED script. */
318 	nv50_disp_super_ied_on(head, ior, 1, head->asy.hz / 1000);
319 
320 	/* OR-specific handling. */
321 	if (ior->func->war_3)
322 		ior->func->war_3(ior);
323 }
324 
325 static void
nv50_disp_super_2_2_dp(struct nvkm_head * head,struct nvkm_ior * ior)326 nv50_disp_super_2_2_dp(struct nvkm_head *head, struct nvkm_ior *ior)
327 {
328 	struct nvkm_subdev *subdev = &head->disp->engine.subdev;
329 	const u32      khz = head->asy.hz / 1000;
330 	const u32 linkKBps = ior->dp.bw * 27000;
331 	const u32   symbol = 100000;
332 	int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
333 	int TU, VTUi, VTUf, VTUa;
334 	u64 link_data_rate, link_ratio, unk;
335 	u32 best_diff = 64 * symbol;
336 	u64 h, v;
337 
338 	/* symbols/hblank - algorithm taken from comments in tegra driver */
339 	h = head->asy.hblanke + head->asy.htotal - head->asy.hblanks - 7;
340 	h = h * linkKBps;
341 	do_div(h, khz);
342 	h = h - (3 * ior->dp.ef) - (12 / ior->dp.nr);
343 
344 	/* symbols/vblank - algorithm taken from comments in tegra driver */
345 	v = head->asy.vblanks - head->asy.vblanke - 25;
346 	v = v * linkKBps;
347 	do_div(v, khz);
348 	v = v - ((36 / ior->dp.nr) + 3) - 1;
349 
350 	ior->func->dp.audio_sym(ior, head->id, h, v);
351 
352 	/* watermark / activesym */
353 	link_data_rate = (khz * head->asy.or.depth / 8) / ior->dp.nr;
354 
355 	/* calculate ratio of packed data rate to link symbol rate */
356 	link_ratio = link_data_rate * symbol;
357 	do_div(link_ratio, linkKBps);
358 
359 	for (TU = 64; ior->func->dp.activesym && TU >= 32; TU--) {
360 		/* calculate average number of valid symbols in each TU */
361 		u32 tu_valid = link_ratio * TU;
362 		u32 calc, diff;
363 
364 		/* find a hw representation for the fraction.. */
365 		VTUi = tu_valid / symbol;
366 		calc = VTUi * symbol;
367 		diff = tu_valid - calc;
368 		if (diff) {
369 			if (diff >= (symbol / 2)) {
370 				VTUf = symbol / (symbol - diff);
371 				if (symbol - (VTUf * diff))
372 					VTUf++;
373 
374 				if (VTUf <= 15) {
375 					VTUa  = 1;
376 					calc += symbol - (symbol / VTUf);
377 				} else {
378 					VTUa  = 0;
379 					VTUf  = 1;
380 					calc += symbol;
381 				}
382 			} else {
383 				VTUa  = 0;
384 				VTUf  = min((int)(symbol / diff), 15);
385 				calc += symbol / VTUf;
386 			}
387 
388 			diff = calc - tu_valid;
389 		} else {
390 			/* no remainder, but the hw doesn't like the fractional
391 			 * part to be zero.  decrement the integer part and
392 			 * have the fraction add a whole symbol back
393 			 */
394 			VTUa = 0;
395 			VTUf = 1;
396 			VTUi--;
397 		}
398 
399 		if (diff < best_diff) {
400 			best_diff = diff;
401 			bestTU = TU;
402 			bestVTUa = VTUa;
403 			bestVTUf = VTUf;
404 			bestVTUi = VTUi;
405 			if (diff == 0)
406 				break;
407 		}
408 	}
409 
410 	if (ior->func->dp.activesym) {
411 		if (!bestTU) {
412 			nvkm_error(subdev, "unable to determine dp config\n");
413 			return;
414 		}
415 		ior->func->dp.activesym(ior, head->id, bestTU,
416 					bestVTUa, bestVTUf, bestVTUi);
417 	} else {
418 		bestTU = 64;
419 	}
420 
421 	/* XXX close to vbios numbers, but not right */
422 	unk  = (symbol - link_ratio) * bestTU;
423 	unk *= link_ratio;
424 	do_div(unk, symbol);
425 	do_div(unk, symbol);
426 	unk += 6;
427 
428 	ior->func->dp.watermark(ior, head->id, unk);
429 }
430 
431 void
nv50_disp_super_2_2(struct nv50_disp * disp,struct nvkm_head * head)432 nv50_disp_super_2_2(struct nv50_disp *disp, struct nvkm_head *head)
433 {
434 	const u32 khz = head->asy.hz / 1000;
435 	struct nvkm_outp *outp;
436 	struct nvkm_ior *ior;
437 
438 	/* Determine which OR, if any, we're attaching from the head. */
439 	HEAD_DBG(head, "supervisor 2.2");
440 	ior = nv50_disp_super_ior_asy(head);
441 	if (!ior)
442 		return;
443 
444 	/* For some reason, NVIDIA decided not to:
445 	 *
446 	 * A) Give dual-link LVDS a separate EVO protocol, like for TMDS.
447 	 *  and
448 	 * B) Use SetControlOutputResource.PixelDepth on LVDS.
449 	 *
450 	 * Override the values we usually read from HW with the same
451 	 * data we pass though an ioctl instead.
452 	 */
453 	if (ior->type == SOR && ior->asy.proto == LVDS) {
454 		head->asy.or.depth = (disp->sor.lvdsconf & 0x0200) ? 24 : 18;
455 		ior->asy.link      = (disp->sor.lvdsconf & 0x0100) ? 3  : 1;
456 	}
457 
458 	/* Handle any link training, etc. */
459 	if ((outp = ior->asy.outp) && outp->func->acquire)
460 		outp->func->acquire(outp);
461 
462 	/* Execute OnInt2 IED script. */
463 	nv50_disp_super_ied_on(head, ior, 0, khz);
464 
465 	/* Program RG clock divider. */
466 	head->func->rgclk(head, ior->asy.rgdiv);
467 
468 	/* Mode-specific internal DP configuration. */
469 	if (ior->type == SOR && ior->asy.proto == DP)
470 		nv50_disp_super_2_2_dp(head, ior);
471 
472 	/* OR-specific handling. */
473 	ior->func->clock(ior);
474 	if (ior->func->war_2)
475 		ior->func->war_2(ior);
476 }
477 
478 void
nv50_disp_super_2_1(struct nv50_disp * disp,struct nvkm_head * head)479 nv50_disp_super_2_1(struct nv50_disp *disp, struct nvkm_head *head)
480 {
481 	struct nvkm_devinit *devinit = disp->base.engine.subdev.device->devinit;
482 	const u32 khz = head->asy.hz / 1000;
483 	HEAD_DBG(head, "supervisor 2.1 - %d khz", khz);
484 	if (khz)
485 		nvkm_devinit_pll_set(devinit, PLL_VPLL0 + head->id, khz);
486 }
487 
488 void
nv50_disp_super_2_0(struct nv50_disp * disp,struct nvkm_head * head)489 nv50_disp_super_2_0(struct nv50_disp *disp, struct nvkm_head *head)
490 {
491 	struct nvkm_outp *outp;
492 	struct nvkm_ior *ior;
493 
494 	/* Determine which OR, if any, we're detaching from the head. */
495 	HEAD_DBG(head, "supervisor 2.0");
496 	ior = nv50_disp_super_ior_arm(head);
497 	if (!ior)
498 		return;
499 
500 	/* Execute OffInt2 IED script. */
501 	nv50_disp_super_ied_off(head, ior, 2);
502 
503 	/* If we're shutting down the OR's only active head, execute
504 	 * the output path's disable function.
505 	 */
506 	if (ior->arm.head == (1 << head->id)) {
507 		if ((outp = ior->arm.outp) && outp->func->disable)
508 			outp->func->disable(outp, ior);
509 	}
510 }
511 
512 void
nv50_disp_super_1_0(struct nv50_disp * disp,struct nvkm_head * head)513 nv50_disp_super_1_0(struct nv50_disp *disp, struct nvkm_head *head)
514 {
515 	struct nvkm_ior *ior;
516 
517 	/* Determine which OR, if any, we're detaching from the head. */
518 	HEAD_DBG(head, "supervisor 1.0");
519 	ior = nv50_disp_super_ior_arm(head);
520 	if (!ior)
521 		return;
522 
523 	/* Execute OffInt1 IED script. */
524 	nv50_disp_super_ied_off(head, ior, 1);
525 }
526 
527 void
nv50_disp_super_1(struct nv50_disp * disp)528 nv50_disp_super_1(struct nv50_disp *disp)
529 {
530 	struct nvkm_head *head;
531 	struct nvkm_ior *ior;
532 
533 	list_for_each_entry(head, &disp->base.head, head) {
534 		head->func->state(head, &head->arm);
535 		head->func->state(head, &head->asy);
536 	}
537 
538 	list_for_each_entry(ior, &disp->base.ior, head) {
539 		ior->func->state(ior, &ior->arm);
540 		ior->func->state(ior, &ior->asy);
541 	}
542 }
543 
544 void
nv50_disp_super(struct work_struct * work)545 nv50_disp_super(struct work_struct *work)
546 {
547 	struct nv50_disp *disp =
548 		container_of(work, struct nv50_disp, supervisor);
549 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
550 	struct nvkm_device *device = subdev->device;
551 	struct nvkm_head *head;
552 	u32 super = nvkm_rd32(device, 0x610030);
553 
554 	nvkm_debug(subdev, "supervisor %08x %08x\n", disp->super, super);
555 
556 	if (disp->super & 0x00000010) {
557 		nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
558 		nv50_disp_super_1(disp);
559 		list_for_each_entry(head, &disp->base.head, head) {
560 			if (!(super & (0x00000020 << head->id)))
561 				continue;
562 			if (!(super & (0x00000080 << head->id)))
563 				continue;
564 			nv50_disp_super_1_0(disp, head);
565 		}
566 	} else
567 	if (disp->super & 0x00000020) {
568 		list_for_each_entry(head, &disp->base.head, head) {
569 			if (!(super & (0x00000080 << head->id)))
570 				continue;
571 			nv50_disp_super_2_0(disp, head);
572 		}
573 		nvkm_outp_route(&disp->base);
574 		list_for_each_entry(head, &disp->base.head, head) {
575 			if (!(super & (0x00000200 << head->id)))
576 				continue;
577 			nv50_disp_super_2_1(disp, head);
578 		}
579 		list_for_each_entry(head, &disp->base.head, head) {
580 			if (!(super & (0x00000080 << head->id)))
581 				continue;
582 			nv50_disp_super_2_2(disp, head);
583 		}
584 	} else
585 	if (disp->super & 0x00000040) {
586 		list_for_each_entry(head, &disp->base.head, head) {
587 			if (!(super & (0x00000080 << head->id)))
588 				continue;
589 			nv50_disp_super_3_0(disp, head);
590 		}
591 	}
592 
593 	nvkm_wr32(device, 0x610030, 0x80000000);
594 }
595 
596 static const struct nvkm_enum
597 nv50_disp_intr_error_type[] = {
598 	{ 3, "ILLEGAL_MTHD" },
599 	{ 4, "INVALID_VALUE" },
600 	{ 5, "INVALID_STATE" },
601 	{ 7, "INVALID_HANDLE" },
602 	{}
603 };
604 
605 static const struct nvkm_enum
606 nv50_disp_intr_error_code[] = {
607 	{ 0x00, "" },
608 	{}
609 };
610 
611 static void
nv50_disp_intr_error(struct nv50_disp * disp,int chid)612 nv50_disp_intr_error(struct nv50_disp *disp, int chid)
613 {
614 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
615 	struct nvkm_device *device = subdev->device;
616 	u32 data = nvkm_rd32(device, 0x610084 + (chid * 0x08));
617 	u32 addr = nvkm_rd32(device, 0x610080 + (chid * 0x08));
618 	u32 code = (addr & 0x00ff0000) >> 16;
619 	u32 type = (addr & 0x00007000) >> 12;
620 	u32 mthd = (addr & 0x00000ffc);
621 	const struct nvkm_enum *ec, *et;
622 
623 	et = nvkm_enum_find(nv50_disp_intr_error_type, type);
624 	ec = nvkm_enum_find(nv50_disp_intr_error_code, code);
625 
626 	nvkm_error(subdev,
627 		   "ERROR %d [%s] %02x [%s] chid %d mthd %04x data %08x\n",
628 		   type, et ? et->name : "", code, ec ? ec->name : "",
629 		   chid, mthd, data);
630 
631 	if (chid < ARRAY_SIZE(disp->chan)) {
632 		switch (mthd) {
633 		case 0x0080:
634 			nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
635 			break;
636 		default:
637 			break;
638 		}
639 	}
640 
641 	nvkm_wr32(device, 0x610020, 0x00010000 << chid);
642 	nvkm_wr32(device, 0x610080 + (chid * 0x08), 0x90000000);
643 }
644 
645 void
nv50_disp_intr(struct nv50_disp * disp)646 nv50_disp_intr(struct nv50_disp *disp)
647 {
648 	struct nvkm_device *device = disp->base.engine.subdev.device;
649 	u32 intr0 = nvkm_rd32(device, 0x610020);
650 	u32 intr1 = nvkm_rd32(device, 0x610024);
651 
652 	while (intr0 & 0x001f0000) {
653 		u32 chid = __ffs(intr0 & 0x001f0000) - 16;
654 		nv50_disp_intr_error(disp, chid);
655 		intr0 &= ~(0x00010000 << chid);
656 	}
657 
658 	while (intr0 & 0x0000001f) {
659 		u32 chid = __ffs(intr0 & 0x0000001f);
660 		nv50_disp_chan_uevent_send(disp, chid);
661 		intr0 &= ~(0x00000001 << chid);
662 	}
663 
664 	if (intr1 & 0x00000004) {
665 		nvkm_disp_vblank(&disp->base, 0);
666 		nvkm_wr32(device, 0x610024, 0x00000004);
667 	}
668 
669 	if (intr1 & 0x00000008) {
670 		nvkm_disp_vblank(&disp->base, 1);
671 		nvkm_wr32(device, 0x610024, 0x00000008);
672 	}
673 
674 	if (intr1 & 0x00000070) {
675 		disp->super = (intr1 & 0x00000070);
676 		queue_work(disp->wq, &disp->supervisor);
677 		nvkm_wr32(device, 0x610024, disp->super);
678 	}
679 }
680 
681 void
nv50_disp_fini(struct nv50_disp * disp)682 nv50_disp_fini(struct nv50_disp *disp)
683 {
684 	struct nvkm_device *device = disp->base.engine.subdev.device;
685 	/* disable all interrupts */
686 	nvkm_wr32(device, 0x610024, 0x00000000);
687 	nvkm_wr32(device, 0x610020, 0x00000000);
688 }
689 
690 int
nv50_disp_init(struct nv50_disp * disp)691 nv50_disp_init(struct nv50_disp *disp)
692 {
693 	struct nvkm_device *device = disp->base.engine.subdev.device;
694 	struct nvkm_head *head;
695 	u32 tmp;
696 	int i;
697 
698 	/* The below segments of code copying values from one register to
699 	 * another appear to inform EVO of the display capabilities or
700 	 * something similar.  NFI what the 0x614004 caps are for..
701 	 */
702 	tmp = nvkm_rd32(device, 0x614004);
703 	nvkm_wr32(device, 0x610184, tmp);
704 
705 	/* ... CRTC caps */
706 	list_for_each_entry(head, &disp->base.head, head) {
707 		tmp = nvkm_rd32(device, 0x616100 + (head->id * 0x800));
708 		nvkm_wr32(device, 0x610190 + (head->id * 0x10), tmp);
709 		tmp = nvkm_rd32(device, 0x616104 + (head->id * 0x800));
710 		nvkm_wr32(device, 0x610194 + (head->id * 0x10), tmp);
711 		tmp = nvkm_rd32(device, 0x616108 + (head->id * 0x800));
712 		nvkm_wr32(device, 0x610198 + (head->id * 0x10), tmp);
713 		tmp = nvkm_rd32(device, 0x61610c + (head->id * 0x800));
714 		nvkm_wr32(device, 0x61019c + (head->id * 0x10), tmp);
715 	}
716 
717 	/* ... DAC caps */
718 	for (i = 0; i < disp->dac.nr; i++) {
719 		tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
720 		nvkm_wr32(device, 0x6101d0 + (i * 0x04), tmp);
721 	}
722 
723 	/* ... SOR caps */
724 	for (i = 0; i < disp->sor.nr; i++) {
725 		tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
726 		nvkm_wr32(device, 0x6101e0 + (i * 0x04), tmp);
727 	}
728 
729 	/* ... PIOR caps */
730 	for (i = 0; i < disp->pior.nr; i++) {
731 		tmp = nvkm_rd32(device, 0x61e000 + (i * 0x800));
732 		nvkm_wr32(device, 0x6101f0 + (i * 0x04), tmp);
733 	}
734 
735 	/* steal display away from vbios, or something like that */
736 	if (nvkm_rd32(device, 0x610024) & 0x00000100) {
737 		nvkm_wr32(device, 0x610024, 0x00000100);
738 		nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
739 		if (nvkm_msec(device, 2000,
740 			if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
741 				break;
742 		) < 0)
743 			return -EBUSY;
744 	}
745 
746 	/* point at display engine memory area (hash table, objects) */
747 	nvkm_wr32(device, 0x610010, (disp->inst->addr >> 8) | 9);
748 
749 	/* enable supervisor interrupts, disable everything else */
750 	nvkm_wr32(device, 0x61002c, 0x00000370);
751 	nvkm_wr32(device, 0x610028, 0x00000000);
752 	return 0;
753 }
754 
755 static const struct nv50_disp_func
756 nv50_disp = {
757 	.init = nv50_disp_init,
758 	.fini = nv50_disp_fini,
759 	.intr = nv50_disp_intr,
760 	.uevent = &nv50_disp_chan_uevent,
761 	.super = nv50_disp_super,
762 	.root = &nv50_disp_root_oclass,
763 	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
764 	.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
765 	.sor = { .cnt = nv50_sor_cnt, .new = nv50_sor_new },
766 	.pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
767 };
768 
769 int
nv50_disp_new(struct nvkm_device * device,int index,struct nvkm_disp ** pdisp)770 nv50_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
771 {
772 	return nv50_disp_new_(&nv50_disp, device, index, pdisp);
773 }
774