Lines Matching full:fifo

37 gf100_fifo_uevent_init(struct nvkm_fifo *fifo)  in gf100_fifo_uevent_init()  argument
39 struct nvkm_device *device = fifo->engine.subdev.device; in gf100_fifo_uevent_init()
44 gf100_fifo_uevent_fini(struct nvkm_fifo *fifo) in gf100_fifo_uevent_fini() argument
46 struct nvkm_device *device = fifo->engine.subdev.device; in gf100_fifo_uevent_fini()
51 gf100_fifo_runlist_commit(struct gf100_fifo *fifo) in gf100_fifo_runlist_commit() argument
54 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gf100_fifo_runlist_commit()
60 mutex_lock(&fifo->base.mutex); in gf100_fifo_runlist_commit()
61 cur = fifo->runlist.mem[fifo->runlist.active]; in gf100_fifo_runlist_commit()
62 fifo->runlist.active = !fifo->runlist.active; in gf100_fifo_runlist_commit()
65 list_for_each_entry(chan, &fifo->chan, head) { in gf100_fifo_runlist_commit()
76 mutex_unlock(&fifo->base.mutex); in gf100_fifo_runlist_commit()
85 if (wait_event_timeout(fifo->runlist.wait, in gf100_fifo_runlist_commit()
89 mutex_unlock(&fifo->base.mutex); in gf100_fifo_runlist_commit()
93 gf100_fifo_runlist_remove(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan) in gf100_fifo_runlist_remove() argument
95 mutex_lock(&fifo->base.mutex); in gf100_fifo_runlist_remove()
97 mutex_unlock(&fifo->base.mutex); in gf100_fifo_runlist_remove()
101 gf100_fifo_runlist_insert(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan) in gf100_fifo_runlist_insert() argument
103 mutex_lock(&fifo->base.mutex); in gf100_fifo_runlist_insert()
104 list_add_tail(&chan->head, &fifo->chan); in gf100_fifo_runlist_insert()
105 mutex_unlock(&fifo->base.mutex); in gf100_fifo_runlist_insert()
109 gf100_fifo_id_engine(struct nvkm_fifo *fifo, int engi) in gf100_fifo_id_engine() argument
127 return nvkm_device_engine(fifo->engine.subdev.device, type, inst); in gf100_fifo_id_engine()
149 struct gf100_fifo *fifo = container_of(w, typeof(*fifo), recover.work); in gf100_fifo_recover_work() local
150 struct nvkm_device *device = fifo->base.engine.subdev.device; in gf100_fifo_recover_work()
155 spin_lock_irqsave(&fifo->base.lock, flags); in gf100_fifo_recover_work()
156 engm = fifo->recover.mask; in gf100_fifo_recover_work()
157 fifo->recover.mask = 0ULL; in gf100_fifo_recover_work()
158 spin_unlock_irqrestore(&fifo->base.lock, flags); in gf100_fifo_recover_work()
163 if ((engine = gf100_fifo_id_engine(&fifo->base, engn))) { in gf100_fifo_recover_work()
169 gf100_fifo_runlist_commit(fifo); in gf100_fifo_recover_work()
175 gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine, in gf100_fifo_recover() argument
178 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gf100_fifo_recover()
181 int engi = gf100_fifo_engine_id(&fifo->base, engine); in gf100_fifo_recover()
185 assert_spin_locked(&fifo->base.lock); in gf100_fifo_recover()
192 fifo->recover.mask |= BIT(engi); in gf100_fifo_recover()
193 schedule_work(&fifo->recover.work); in gf100_fifo_recover()
194 nvkm_fifo_kevent(&fifo->base, chid); in gf100_fifo_recover()
259 struct gf100_fifo *fifo = gf100_fifo(base); in gf100_fifo_fault() local
260 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gf100_fifo_fault()
294 chan = nvkm_fifo_chan_inst(&fifo->base, info->inst, &flags); in gf100_fifo_fault()
306 gf100_fifo_recover(fifo, engine, (void *)chan); in gf100_fifo_fault()
307 nvkm_fifo_chan_put(&fifo->base, flags, &chan); in gf100_fifo_fault()
317 gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo) in gf100_fifo_intr_sched_ctxsw() argument
319 struct nvkm_device *device = fifo->base.engine.subdev.device; in gf100_fifo_intr_sched_ctxsw()
325 spin_lock_irqsave(&fifo->base.lock, flags); in gf100_fifo_intr_sched_ctxsw()
336 list_for_each_entry(chan, &fifo->chan, head) { in gf100_fifo_intr_sched_ctxsw()
338 engine = gf100_fifo_id_engine(&fifo->base, engn); in gf100_fifo_intr_sched_ctxsw()
341 gf100_fifo_recover(fifo, engine, chan); in gf100_fifo_intr_sched_ctxsw()
347 spin_unlock_irqrestore(&fifo->base.lock, flags); in gf100_fifo_intr_sched_ctxsw()
351 gf100_fifo_intr_sched(struct gf100_fifo *fifo) in gf100_fifo_intr_sched() argument
353 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gf100_fifo_intr_sched()
365 gf100_fifo_intr_sched_ctxsw(fifo); in gf100_fifo_intr_sched()
373 gf100_fifo_intr_fault(struct nvkm_fifo *fifo, int unit) in gf100_fifo_intr_fault() argument
375 struct nvkm_device *device = fifo->engine.subdev.device; in gf100_fifo_intr_fault()
393 nvkm_fifo_fault(fifo, &info); in gf100_fifo_intr_fault()
405 gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit) in gf100_fifo_intr_pbdma() argument
407 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gf100_fifo_intr_pbdma()
429 chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags); in gf100_fifo_intr_pbdma()
435 nvkm_fifo_chan_put(&fifo->base, flags, &chan); in gf100_fifo_intr_pbdma()
443 gf100_fifo_intr_runlist(struct gf100_fifo *fifo) in gf100_fifo_intr_runlist() argument
445 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gf100_fifo_intr_runlist()
450 wake_up(&fifo->runlist.wait); in gf100_fifo_intr_runlist()
462 gf100_fifo_intr_engine_unit(struct gf100_fifo *fifo, int engn) in gf100_fifo_intr_engine_unit() argument
464 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gf100_fifo_intr_engine_unit()
475 nvkm_fifo_uevent(&fifo->base); in gf100_fifo_intr_engine_unit()
487 gf100_fifo_intr_engine(struct gf100_fifo *fifo) in gf100_fifo_intr_engine() argument
489 struct nvkm_device *device = fifo->base.engine.subdev.device; in gf100_fifo_intr_engine()
493 gf100_fifo_intr_engine_unit(fifo, unit); in gf100_fifo_intr_engine()
501 struct gf100_fifo *fifo = gf100_fifo(base); in gf100_fifo_intr() local
502 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gf100_fifo_intr()
515 gf100_fifo_intr_sched(fifo); in gf100_fifo_intr()
538 gf100_fifo_intr_fault(&fifo->base, unit); in gf100_fifo_intr()
549 gf100_fifo_intr_pbdma(fifo, unit); in gf100_fifo_intr()
557 gf100_fifo_intr_runlist(fifo); in gf100_fifo_intr()
562 gf100_fifo_intr_engine(fifo); in gf100_fifo_intr()
576 struct gf100_fifo *fifo = gf100_fifo(base); in gf100_fifo_oneinit() local
577 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gf100_fifo_oneinit()
584 fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x002204)); in gf100_fifo_oneinit()
585 nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr); in gf100_fifo_oneinit()
589 false, &fifo->runlist.mem[0]); in gf100_fifo_oneinit()
594 false, &fifo->runlist.mem[1]); in gf100_fifo_oneinit()
598 init_waitqueue_head(&fifo->runlist.wait); in gf100_fifo_oneinit()
601 0x1000, false, &fifo->user.mem); in gf100_fifo_oneinit()
605 ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem), in gf100_fifo_oneinit()
606 &fifo->user.bar); in gf100_fifo_oneinit()
610 return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0); in gf100_fifo_oneinit()
616 struct gf100_fifo *fifo = gf100_fifo(base); in gf100_fifo_fini() local
617 flush_work(&fifo->recover.work); in gf100_fifo_fini()
623 struct gf100_fifo *fifo = gf100_fifo(base); in gf100_fifo_init() local
624 struct nvkm_device *device = fifo->base.engine.subdev.device; in gf100_fifo_init()
628 nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1); in gf100_fifo_init()
629 nvkm_wr32(device, 0x002204, (1 << fifo->pbdma_nr) - 1); in gf100_fifo_init()
632 if (fifo->pbdma_nr >= 3) { in gf100_fifo_init()
642 for (i = 0; i < fifo->pbdma_nr; i++) { in gf100_fifo_init()
649 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12); in gf100_fifo_init()
659 struct gf100_fifo *fifo = gf100_fifo(base); in gf100_fifo_dtor() local
660 struct nvkm_device *device = fifo->base.engine.subdev.device; in gf100_fifo_dtor()
661 nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &fifo->user.bar); in gf100_fifo_dtor()
662 nvkm_memory_unref(&fifo->user.mem); in gf100_fifo_dtor()
663 nvkm_memory_unref(&fifo->runlist.mem[0]); in gf100_fifo_dtor()
664 nvkm_memory_unref(&fifo->runlist.mem[1]); in gf100_fifo_dtor()
665 return fifo; in gf100_fifo_dtor()
690 struct gf100_fifo *fifo; in gf100_fifo_new() local
692 if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL))) in gf100_fifo_new()
694 INIT_LIST_HEAD(&fifo->chan); in gf100_fifo_new()
695 INIT_WORK(&fifo->recover.work, gf100_fifo_recover_work); in gf100_fifo_new()
696 *pfifo = &fifo->base; in gf100_fifo_new()
698 return nvkm_fifo_ctor(&gf100_fifo, device, type, inst, 128, &fifo->base); in gf100_fifo_new()