Lines Matching refs:runl
152 gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl) in gk104_fifo_runlist_commit() argument
164 mem = fifo->runlist[runl].mem[fifo->runlist[runl].next]; in gk104_fifo_runlist_commit()
165 fifo->runlist[runl].next = !fifo->runlist[runl].next; in gk104_fifo_runlist_commit()
168 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { in gk104_fifo_runlist_commit()
172 list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) { in gk104_fifo_runlist_commit()
190 nvkm_wr32(device, 0x002274, (runl << 20) | nr); in gk104_fifo_runlist_commit()
193 if (!(nvkm_rd32(device, 0x002284 + (runl * 0x08)) & 0x00100000)) in gk104_fifo_runlist_commit()
196 nvkm_error(subdev, "runlist %d update timeout\n", runl); in gk104_fifo_runlist_commit()
221 list_add_tail(&cgrp->head, &fifo->runlist[chan->runl].cgrp); in gk104_fifo_runlist_insert()
224 list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan); in gk104_fifo_runlist_insert()
251 int engn, runl; in gk104_fifo_recover_work() local
269 for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl)) in gk104_fifo_recover_work()
270 gk104_fifo_runlist_commit(fifo, runl); in gk104_fifo_recover_work()
279 gk104_fifo_recover_runl(struct gk104_fifo *fifo, int runl) in gk104_fifo_recover_runl() argument
283 const u32 runm = BIT(runl); in gk104_fifo_recover_runl()
294 nvkm_warn(subdev, "runlist %d: scheduled for recovery\n", runl); in gk104_fifo_recover_runl()
299 gk104_fifo_recover_chid(struct gk104_fifo *fifo, int runl, int chid) in gk104_fifo_recover_chid() argument
304 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { in gk104_fifo_recover_chid()
311 list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) { in gk104_fifo_recover_chid()
331 const u32 runl = (stat & 0x000f0000) >> 16; in gk104_fifo_recover_chan() local
333 unsigned long engn, engm = fifo->runlist[runl].engm; in gk104_fifo_recover_chan()
341 chan = gk104_fifo_recover_chid(fifo, runl, chid); in gk104_fifo_recover_chan()
352 gk104_fifo_recover_runl(fifo, runl); in gk104_fifo_recover_chan()
370 const u32 runl = fifo->engine[engn].runl; in gk104_fifo_recover_engn() local
381 gk104_fifo_recover_runl(fifo, runl); in gk104_fifo_recover_engn()
753 int runl = __ffs(mask); in gk104_fifo_intr_runlist() local
754 wake_up(&fifo->runlist[runl].wait); in gk104_fifo_intr_runlist()
755 nvkm_wr32(device, 0x002a00, 1 << runl); in gk104_fifo_intr_runlist()
756 mask &= ~(1 << runl); in gk104_fifo_intr_runlist()
878 int runl = mthd - NV_DEVICE_FIFO_RUNLIST_ENGINES(0), engn; in gk104_fifo_info() local
879 if (runl < fifo->runlist_nr) { in gk104_fifo_info()
880 unsigned long engm = fifo->runlist[runl].engm; in gk104_fifo_info()
903 int engn, runl, pbid, ret, i, j; in gk104_fifo_oneinit() local
921 while ((int)(engidx = nvkm_top_engine(device, i++, &runl, &engn)) >= 0) { in gk104_fifo_oneinit()
924 if (map[j] & (1 << runl)) { in gk104_fifo_oneinit()
931 engn, runl, pbid, nvkm_subdev_name[engidx]); in gk104_fifo_oneinit()
934 fifo->engine[engn].runl = runl; in gk104_fifo_oneinit()
937 fifo->runlist[runl].engm |= 1 << engn; in gk104_fifo_oneinit()
938 fifo->runlist_nr = max(fifo->runlist_nr, runl + 1); in gk104_fifo_oneinit()