1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "nv04.h"
25 #include "channv04.h"
26 #include "regsnv04.h"
27 
28 #include <core/client.h>
29 #include <core/ramht.h>
30 #include <subdev/instmem.h>
31 #include <subdev/timer.h>
32 #include <engine/sw.h>
33 
34 static const struct nv04_fifo_ramfc
35 nv04_fifo_ramfc[] = {
36 	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
37 	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
38 	{ 16,  0, 0x08,  0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
39 	{ 16, 16, 0x08,  0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
40 	{ 32,  0, 0x0c,  0, NV04_PFIFO_CACHE1_DMA_STATE },
41 	{ 32,  0, 0x10,  0, NV04_PFIFO_CACHE1_DMA_FETCH },
42 	{ 32,  0, 0x14,  0, NV04_PFIFO_CACHE1_ENGINE },
43 	{ 32,  0, 0x18,  0, NV04_PFIFO_CACHE1_PULL1 },
44 	{}
45 };
46 
47 void
nv04_fifo_pause(struct nvkm_fifo * base,unsigned long * pflags)48 nv04_fifo_pause(struct nvkm_fifo *base, unsigned long *pflags)
49 __acquires(fifo->base.lock)
50 {
51 	struct nv04_fifo *fifo = nv04_fifo(base);
52 	struct nvkm_device *device = fifo->base.engine.subdev.device;
53 	unsigned long flags;
54 
55 	spin_lock_irqsave(&fifo->base.lock, flags);
56 	*pflags = flags;
57 
58 	nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000000);
59 	nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
60 
61 	/* in some cases the puller may be left in an inconsistent state
62 	 * if you try to stop it while it's busy translating handles.
63 	 * sometimes you get a CACHE_ERROR, sometimes it just fails
64 	 * silently; sending incorrect instance offsets to PGRAPH after
65 	 * it's started up again.
66 	 *
67 	 * to avoid this, we invalidate the most recently calculated
68 	 * instance.
69 	 */
70 	nvkm_msec(device, 2000,
71 		u32 tmp = nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0);
72 		if (!(tmp & NV04_PFIFO_CACHE1_PULL0_HASH_BUSY))
73 			break;
74 	);
75 
76 	if (nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0) &
77 			  NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
78 		nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
79 
80 	nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0x00000000);
81 }
82 
83 void
nv04_fifo_start(struct nvkm_fifo * base,unsigned long * pflags)84 nv04_fifo_start(struct nvkm_fifo *base, unsigned long *pflags)
85 __releases(fifo->base.lock)
86 {
87 	struct nv04_fifo *fifo = nv04_fifo(base);
88 	struct nvkm_device *device = fifo->base.engine.subdev.device;
89 	unsigned long flags = *pflags;
90 
91 	nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
92 	nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000001);
93 
94 	spin_unlock_irqrestore(&fifo->base.lock, flags);
95 }
96 
97 static const char *
nv_dma_state_err(u32 state)98 nv_dma_state_err(u32 state)
99 {
100 	static const char * const desc[] = {
101 		"NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
102 		"INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
103 	};
104 	return desc[(state >> 29) & 0x7];
105 }
106 
107 static bool
nv04_fifo_swmthd(struct nvkm_device * device,u32 chid,u32 addr,u32 data)108 nv04_fifo_swmthd(struct nvkm_device *device, u32 chid, u32 addr, u32 data)
109 {
110 	struct nvkm_sw *sw = device->sw;
111 	const int subc = (addr & 0x0000e000) >> 13;
112 	const int mthd = (addr & 0x00001ffc);
113 	const u32 mask = 0x0000000f << (subc * 4);
114 	u32 engine = nvkm_rd32(device, 0x003280);
115 	bool handled = false;
116 
117 	switch (mthd) {
118 	case 0x0000 ... 0x0000: /* subchannel's engine -> software */
119 		nvkm_wr32(device, 0x003280, (engine &= ~mask));
120 	case 0x0180 ... 0x01fc: /* handle -> instance */
121 		data = nvkm_rd32(device, 0x003258) & 0x0000ffff;
122 	case 0x0100 ... 0x017c:
123 	case 0x0200 ... 0x1ffc: /* pass method down to sw */
124 		if (!(engine & mask) && sw)
125 			handled = nvkm_sw_mthd(sw, chid, subc, mthd, data);
126 		break;
127 	default:
128 		break;
129 	}
130 
131 	return handled;
132 }
133 
134 static void
nv04_fifo_cache_error(struct nv04_fifo * fifo,u32 chid,u32 get)135 nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get)
136 {
137 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
138 	struct nvkm_device *device = subdev->device;
139 	struct nvkm_fifo_chan *chan;
140 	unsigned long flags;
141 	u32 pull0 = nvkm_rd32(device, 0x003250);
142 	u32 mthd, data;
143 	int ptr;
144 
145 	/* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my
146 	 * G80 chips, but CACHE1 isn't big enough for this much data.. Tests
147 	 * show that it wraps around to the start at GET=0x800.. No clue as to
148 	 * why..
149 	 */
150 	ptr = (get & 0x7ff) >> 2;
151 
152 	if (device->card_type < NV_40) {
153 		mthd = nvkm_rd32(device, NV04_PFIFO_CACHE1_METHOD(ptr));
154 		data = nvkm_rd32(device, NV04_PFIFO_CACHE1_DATA(ptr));
155 	} else {
156 		mthd = nvkm_rd32(device, NV40_PFIFO_CACHE1_METHOD(ptr));
157 		data = nvkm_rd32(device, NV40_PFIFO_CACHE1_DATA(ptr));
158 	}
159 
160 	if (!(pull0 & 0x00000100) ||
161 	    !nv04_fifo_swmthd(device, chid, mthd, data)) {
162 		chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
163 		nvkm_error(subdev, "CACHE_ERROR - "
164 			   "ch %d [%s] subc %d mthd %04x data %08x\n",
165 			   chid, chan ? chan->object.client->name : "unknown",
166 			   (mthd >> 13) & 7, mthd & 0x1ffc, data);
167 		nvkm_fifo_chan_put(&fifo->base, flags, &chan);
168 	}
169 
170 	nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
171 	nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
172 
173 	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0,
174 		nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) & ~1);
175 	nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4);
176 	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0,
177 		nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) | 1);
178 	nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0);
179 
180 	nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH,
181 		nvkm_rd32(device, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
182 	nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
183 }
184 
185 static void
nv04_fifo_dma_pusher(struct nv04_fifo * fifo,u32 chid)186 nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid)
187 {
188 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
189 	struct nvkm_device *device = subdev->device;
190 	u32 dma_get = nvkm_rd32(device, 0x003244);
191 	u32 dma_put = nvkm_rd32(device, 0x003240);
192 	u32 push = nvkm_rd32(device, 0x003220);
193 	u32 state = nvkm_rd32(device, 0x003228);
194 	struct nvkm_fifo_chan *chan;
195 	unsigned long flags;
196 	const char *name;
197 
198 	chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
199 	name = chan ? chan->object.client->name : "unknown";
200 	if (device->card_type == NV_50) {
201 		u32 ho_get = nvkm_rd32(device, 0x003328);
202 		u32 ho_put = nvkm_rd32(device, 0x003320);
203 		u32 ib_get = nvkm_rd32(device, 0x003334);
204 		u32 ib_put = nvkm_rd32(device, 0x003330);
205 
206 		nvkm_error(subdev, "DMA_PUSHER - "
207 			   "ch %d [%s] get %02x%08x put %02x%08x ib_get %08x "
208 			   "ib_put %08x state %08x (err: %s) push %08x\n",
209 			   chid, name, ho_get, dma_get, ho_put, dma_put,
210 			   ib_get, ib_put, state, nv_dma_state_err(state),
211 			   push);
212 
213 		/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
214 		nvkm_wr32(device, 0x003364, 0x00000000);
215 		if (dma_get != dma_put || ho_get != ho_put) {
216 			nvkm_wr32(device, 0x003244, dma_put);
217 			nvkm_wr32(device, 0x003328, ho_put);
218 		} else
219 		if (ib_get != ib_put)
220 			nvkm_wr32(device, 0x003334, ib_put);
221 	} else {
222 		nvkm_error(subdev, "DMA_PUSHER - ch %d [%s] get %08x put %08x "
223 				   "state %08x (err: %s) push %08x\n",
224 			   chid, name, dma_get, dma_put, state,
225 			   nv_dma_state_err(state), push);
226 
227 		if (dma_get != dma_put)
228 			nvkm_wr32(device, 0x003244, dma_put);
229 	}
230 	nvkm_fifo_chan_put(&fifo->base, flags, &chan);
231 
232 	nvkm_wr32(device, 0x003228, 0x00000000);
233 	nvkm_wr32(device, 0x003220, 0x00000001);
234 	nvkm_wr32(device, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
235 }
236 
237 void
nv04_fifo_intr(struct nvkm_fifo * base)238 nv04_fifo_intr(struct nvkm_fifo *base)
239 {
240 	struct nv04_fifo *fifo = nv04_fifo(base);
241 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
242 	struct nvkm_device *device = subdev->device;
243 	u32 mask = nvkm_rd32(device, NV03_PFIFO_INTR_EN_0);
244 	u32 stat = nvkm_rd32(device, NV03_PFIFO_INTR_0) & mask;
245 	u32 reassign, chid, get, sem;
246 
247 	reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1;
248 	nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
249 
250 	chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & (fifo->base.nr - 1);
251 	get  = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET);
252 
253 	if (stat & NV_PFIFO_INTR_CACHE_ERROR) {
254 		nv04_fifo_cache_error(fifo, chid, get);
255 		stat &= ~NV_PFIFO_INTR_CACHE_ERROR;
256 	}
257 
258 	if (stat & NV_PFIFO_INTR_DMA_PUSHER) {
259 		nv04_fifo_dma_pusher(fifo, chid);
260 		stat &= ~NV_PFIFO_INTR_DMA_PUSHER;
261 	}
262 
263 	if (stat & NV_PFIFO_INTR_SEMAPHORE) {
264 		stat &= ~NV_PFIFO_INTR_SEMAPHORE;
265 		nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE);
266 
267 		sem = nvkm_rd32(device, NV10_PFIFO_CACHE1_SEMAPHORE);
268 		nvkm_wr32(device, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
269 
270 		nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4);
271 		nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
272 	}
273 
274 	if (device->card_type == NV_50) {
275 		if (stat & 0x00000010) {
276 			stat &= ~0x00000010;
277 			nvkm_wr32(device, 0x002100, 0x00000010);
278 		}
279 
280 		if (stat & 0x40000000) {
281 			nvkm_wr32(device, 0x002100, 0x40000000);
282 			nvkm_fifo_uevent(&fifo->base);
283 			stat &= ~0x40000000;
284 		}
285 	}
286 
287 	if (stat) {
288 		nvkm_warn(subdev, "intr %08x\n", stat);
289 		nvkm_mask(device, NV03_PFIFO_INTR_EN_0, stat, 0x00000000);
290 		nvkm_wr32(device, NV03_PFIFO_INTR_0, stat);
291 	}
292 
293 	nvkm_wr32(device, NV03_PFIFO_CACHES, reassign);
294 }
295 
296 void
nv04_fifo_init(struct nvkm_fifo * base)297 nv04_fifo_init(struct nvkm_fifo *base)
298 {
299 	struct nv04_fifo *fifo = nv04_fifo(base);
300 	struct nvkm_device *device = fifo->base.engine.subdev.device;
301 	struct nvkm_instmem *imem = device->imem;
302 	struct nvkm_ramht *ramht = imem->ramht;
303 	struct nvkm_memory *ramro = imem->ramro;
304 	struct nvkm_memory *ramfc = imem->ramfc;
305 
306 	nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff);
307 	nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
308 
309 	nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
310 					    ((ramht->bits - 9) << 16) |
311 					    (ramht->gpuobj->addr >> 8));
312 	nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
313 	nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8);
314 
315 	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1);
316 
317 	nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
318 	nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
319 
320 	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
321 	nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
322 	nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
323 }
324 
325 int
nv04_fifo_new_(const struct nvkm_fifo_func * func,struct nvkm_device * device,int index,int nr,const struct nv04_fifo_ramfc * ramfc,struct nvkm_fifo ** pfifo)326 nv04_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
327 	       int index, int nr, const struct nv04_fifo_ramfc *ramfc,
328 	       struct nvkm_fifo **pfifo)
329 {
330 	struct nv04_fifo *fifo;
331 	int ret;
332 
333 	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
334 		return -ENOMEM;
335 	fifo->ramfc = ramfc;
336 	*pfifo = &fifo->base;
337 
338 	ret = nvkm_fifo_ctor(func, device, index, nr, &fifo->base);
339 	if (ret)
340 		return ret;
341 
342 	set_bit(nr - 1, fifo->base.mask); /* inactive channel */
343 	return 0;
344 }
345 
346 static const struct nvkm_fifo_func
347 nv04_fifo = {
348 	.init = nv04_fifo_init,
349 	.intr = nv04_fifo_intr,
350 	.pause = nv04_fifo_pause,
351 	.start = nv04_fifo_start,
352 	.chan = {
353 		&nv04_fifo_dma_oclass,
354 		NULL
355 	},
356 };
357 
358 int
nv04_fifo_new(struct nvkm_device * device,int index,struct nvkm_fifo ** pfifo)359 nv04_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
360 {
361 	return nv04_fifo_new_(&nv04_fifo, device, index, 16,
362 			      nv04_fifo_ramfc, pfifo);
363 }
364