Lines Matching full:fifo
40 gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn, in gk104_fifo_engine_status() argument
43 struct nvkm_engine *engine = fifo->engine[engn].engine; in gk104_fifo_engine_status()
44 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_engine_status()
90 struct gk104_fifo *fifo = gk104_fifo(base); in gk104_fifo_class_new() local
91 if (oclass->engn == &fifo->func->chan) { in gk104_fifo_class_new()
93 return user->ctor(fifo, oclass, argv, argc, pobject); in gk104_fifo_class_new()
95 if (oclass->engn == &fifo->func->user) { in gk104_fifo_class_new()
107 struct gk104_fifo *fifo = gk104_fifo(base); in gk104_fifo_class_get() local
110 if (fifo->func->user.ctor && c++ == index) { in gk104_fifo_class_get()
111 oclass->base = fifo->func->user.user; in gk104_fifo_class_get()
112 oclass->engn = &fifo->func->user; in gk104_fifo_class_get()
116 if (fifo->func->chan.ctor && c++ == index) { in gk104_fifo_class_get()
117 oclass->base = fifo->func->chan.user; in gk104_fifo_class_get()
118 oclass->engn = &fifo->func->chan; in gk104_fifo_class_get()
126 gk104_fifo_uevent_fini(struct nvkm_fifo *fifo) in gk104_fifo_uevent_fini() argument
128 struct nvkm_device *device = fifo->engine.subdev.device; in gk104_fifo_uevent_fini()
133 gk104_fifo_uevent_init(struct nvkm_fifo *fifo) in gk104_fifo_uevent_init() argument
135 struct nvkm_device *device = fifo->engine.subdev.device; in gk104_fifo_uevent_init()
140 gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl, in gk104_fifo_runlist_commit() argument
143 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_runlist_commit()
167 gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl) in gk104_fifo_runlist_update() argument
169 const struct gk104_fifo_runlist_func *func = fifo->func->runlist; in gk104_fifo_runlist_update()
175 mutex_lock(&fifo->base.mutex); in gk104_fifo_runlist_update()
176 mem = fifo->runlist[runl].mem[fifo->runlist[runl].next]; in gk104_fifo_runlist_update()
177 fifo->runlist[runl].next = !fifo->runlist[runl].next; in gk104_fifo_runlist_update()
180 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { in gk104_fifo_runlist_update()
184 list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) { in gk104_fifo_runlist_update()
192 func->commit(fifo, runl, mem, nr); in gk104_fifo_runlist_update()
193 mutex_unlock(&fifo->base.mutex); in gk104_fifo_runlist_update()
197 gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) in gk104_fifo_runlist_remove() argument
200 mutex_lock(&fifo->base.mutex); in gk104_fifo_runlist_remove()
206 mutex_unlock(&fifo->base.mutex); in gk104_fifo_runlist_remove()
210 gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) in gk104_fifo_runlist_insert() argument
213 mutex_lock(&fifo->base.mutex); in gk104_fifo_runlist_insert()
216 list_add_tail(&cgrp->head, &fifo->runlist[chan->runl].cgrp); in gk104_fifo_runlist_insert()
219 list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan); in gk104_fifo_runlist_insert()
221 mutex_unlock(&fifo->base.mutex); in gk104_fifo_runlist_insert()
240 gk104_fifo_pbdma_init(struct gk104_fifo *fifo) in gk104_fifo_pbdma_init() argument
242 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_pbdma_init()
243 nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1); in gk104_fifo_pbdma_init()
247 gk104_fifo_pbdma_nr(struct gk104_fifo *fifo) in gk104_fifo_pbdma_nr() argument
249 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_pbdma_nr()
273 struct gk104_fifo *fifo = gk104_fifo(base); in gk104_fifo_engine_id() local
279 for (engn = 0; engn < fifo->engine_nr && engine; engn++) { in gk104_fifo_engine_id()
280 if (fifo->engine[engn].engine == engine) in gk104_fifo_engine_id()
291 struct gk104_fifo *fifo = container_of(w, typeof(*fifo), recover.work); in gk104_fifo_recover_work() local
292 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_recover_work()
298 spin_lock_irqsave(&fifo->base.lock, flags); in gk104_fifo_recover_work()
299 runm = fifo->recover.runm; in gk104_fifo_recover_work()
300 engm = fifo->recover.engm; in gk104_fifo_recover_work()
301 fifo->recover.engm = 0; in gk104_fifo_recover_work()
302 fifo->recover.runm = 0; in gk104_fifo_recover_work()
303 spin_unlock_irqrestore(&fifo->base.lock, flags); in gk104_fifo_recover_work()
308 if ((engine = fifo->engine[engn].engine)) { in gk104_fifo_recover_work()
315 gk104_fifo_runlist_update(fifo, runl); in gk104_fifo_recover_work()
321 static void gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn);
324 gk104_fifo_recover_runl(struct gk104_fifo *fifo, int runl) in gk104_fifo_recover_runl() argument
326 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_recover_runl()
330 assert_spin_locked(&fifo->base.lock); in gk104_fifo_recover_runl()
331 if (fifo->recover.runm & runm) in gk104_fifo_recover_runl()
333 fifo->recover.runm |= runm; in gk104_fifo_recover_runl()
340 schedule_work(&fifo->recover.work); in gk104_fifo_recover_runl()
344 gk104_fifo_recover_chid(struct gk104_fifo *fifo, int runl, int chid) in gk104_fifo_recover_chid() argument
349 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { in gk104_fifo_recover_chid()
356 list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) { in gk104_fifo_recover_chid()
372 struct gk104_fifo *fifo = gk104_fifo(base); in gk104_fifo_recover_chan() local
373 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_recover_chan()
378 unsigned long engn, engm = fifo->runlist[runl].engm; in gk104_fifo_recover_chan()
381 assert_spin_locked(&fifo->base.lock); in gk104_fifo_recover_chan()
386 chan = gk104_fifo_recover_chid(fifo, runl, chid); in gk104_fifo_recover_chan()
389 nvkm_fifo_kevent(&fifo->base, chid); in gk104_fifo_recover_chan()
397 gk104_fifo_recover_runl(fifo, runl); in gk104_fifo_recover_chan()
400 for_each_set_bit(engn, &engm, fifo->engine_nr) { in gk104_fifo_recover_chan()
402 gk104_fifo_engine_status(fifo, engn, &status); in gk104_fifo_recover_chan()
405 gk104_fifo_recover_engn(fifo, engn); in gk104_fifo_recover_chan()
410 gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn) in gk104_fifo_recover_engn() argument
412 struct nvkm_engine *engine = fifo->engine[engn].engine; in gk104_fifo_recover_engn()
413 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_recover_engn()
415 const u32 runl = fifo->engine[engn].runl; in gk104_fifo_recover_engn()
420 assert_spin_locked(&fifo->base.lock); in gk104_fifo_recover_engn()
421 if (fifo->recover.engm & engm) in gk104_fifo_recover_engn()
423 fifo->recover.engm |= engm; in gk104_fifo_recover_engn()
426 gk104_fifo_recover_runl(fifo, runl); in gk104_fifo_recover_engn()
429 gk104_fifo_engine_status(fifo, engn, &status); in gk104_fifo_recover_engn()
432 gk104_fifo_recover_chan(&fifo->base, status.chan->id); in gk104_fifo_recover_engn()
441 const struct nvkm_enum *en = fifo->func->fault.engine; in gk104_fifo_recover_engn()
463 gk104_fifo_engine_status(fifo, engn, &status); in gk104_fifo_recover_engn()
476 schedule_work(&fifo->recover.work); in gk104_fifo_recover_engn()
482 struct gk104_fifo *fifo = gk104_fifo(base); in gk104_fifo_fault() local
483 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_fault()
492 er = nvkm_enum_find(fifo->func->fault.reason, info->reason); in gk104_fifo_fault()
493 ee = nvkm_enum_find(fifo->func->fault.engine, info->engine); in gk104_fifo_fault()
495 ec = nvkm_enum_find(fifo->func->fault.hubclient, info->client); in gk104_fifo_fault()
497 ec = nvkm_enum_find(fifo->func->fault.gpcclient, info->client); in gk104_fifo_fault()
500 ea = nvkm_enum_find(fifo->func->fault.access, info->access); in gk104_fifo_fault()
530 spin_lock_irqsave(&fifo->base.lock, flags); in gk104_fifo_fault()
531 chan = nvkm_fifo_chan_inst_locked(&fifo->base, info->inst); in gk104_fifo_fault()
544 gk104_fifo_recover_chan(&fifo->base, chan->chid); in gk104_fifo_fault()
551 int engn = fifo->base.func->engine_id(&fifo->base, engine); in gk104_fifo_fault()
553 gk104_fifo_recover_engn(fifo, engn); in gk104_fifo_fault()
556 spin_unlock_irqrestore(&fifo->base.lock, flags); in gk104_fifo_fault()
571 gk104_fifo_intr_bind(struct gk104_fifo *fifo) in gk104_fifo_intr_bind() argument
573 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr_bind()
590 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo) in gk104_fifo_intr_sched_ctxsw() argument
592 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_intr_sched_ctxsw()
599 spin_lock_irqsave(&fifo->base.lock, flags); in gk104_fifo_intr_sched_ctxsw()
603 for (engn = 0; engn < fifo->engine_nr; engn++) { in gk104_fifo_intr_sched_ctxsw()
606 gk104_fifo_engine_status(fifo, engn, &status); in gk104_fifo_intr_sched_ctxsw()
613 for_each_set_bit(engn, &engm, fifo->engine_nr) in gk104_fifo_intr_sched_ctxsw()
614 gk104_fifo_recover_engn(fifo, engn); in gk104_fifo_intr_sched_ctxsw()
617 spin_unlock_irqrestore(&fifo->base.lock, flags); in gk104_fifo_intr_sched_ctxsw()
621 gk104_fifo_intr_sched(struct gk104_fifo *fifo) in gk104_fifo_intr_sched() argument
623 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr_sched()
634 gk104_fifo_intr_sched_ctxsw(fifo); in gk104_fifo_intr_sched()
642 gk104_fifo_intr_chsw(struct gk104_fifo *fifo) in gk104_fifo_intr_chsw() argument
644 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr_chsw()
652 gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo) in gk104_fifo_intr_dropped_fault() argument
654 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr_dropped_fault()
695 gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit) in gk104_fifo_intr_pbdma_0() argument
697 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr_pbdma_0()
722 chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags); in gk104_fifo_intr_pbdma_0()
728 nvkm_fifo_chan_put(&fifo->base, flags, &chan); in gk104_fifo_intr_pbdma_0()
744 gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit) in gk104_fifo_intr_pbdma_1() argument
746 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr_pbdma_1()
765 gk104_fifo_intr_runlist(struct gk104_fifo *fifo) in gk104_fifo_intr_runlist() argument
767 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_intr_runlist()
771 wake_up(&fifo->runlist[runl].wait); in gk104_fifo_intr_runlist()
778 gk104_fifo_intr_engine(struct gk104_fifo *fifo) in gk104_fifo_intr_engine() argument
780 nvkm_fifo_uevent(&fifo->base); in gk104_fifo_intr_engine()
786 struct gk104_fifo *fifo = gk104_fifo(base); in gk104_fifo_intr() local
787 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr()
793 gk104_fifo_intr_bind(fifo); in gk104_fifo_intr()
805 gk104_fifo_intr_sched(fifo); in gk104_fifo_intr()
811 gk104_fifo_intr_chsw(fifo); in gk104_fifo_intr()
829 gk104_fifo_intr_dropped_fault(fifo); in gk104_fifo_intr()
838 fifo->func->intr.fault(&fifo->base, unit); in gk104_fifo_intr()
849 gk104_fifo_intr_pbdma_0(fifo, unit); in gk104_fifo_intr()
850 gk104_fifo_intr_pbdma_1(fifo, unit); in gk104_fifo_intr()
858 gk104_fifo_intr_runlist(fifo); in gk104_fifo_intr()
864 gk104_fifo_intr_engine(fifo); in gk104_fifo_intr()
878 struct gk104_fifo *fifo = gk104_fifo(base); in gk104_fifo_fini() local
879 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_fini()
880 flush_work(&fifo->recover.work); in gk104_fifo_fini()
881 /* allow mmu fault interrupts, even when we're not using fifo */ in gk104_fifo_fini()
888 struct gk104_fifo *fifo = gk104_fifo(base); in gk104_fifo_info() local
891 *data = (1ULL << fifo->runlist_nr) - 1; in gk104_fifo_info()
894 if (*data < fifo->runlist_nr) { in gk104_fifo_info()
895 unsigned long engm = fifo->runlist[*data].engm; in gk104_fifo_info()
899 for_each_set_bit(engn, &engm, fifo->engine_nr) { in gk104_fifo_info()
900 if ((engine = fifo->engine[engn].engine)) { in gk104_fifo_info()
938 struct gk104_fifo *fifo = gk104_fifo(base); in gk104_fifo_oneinit() local
939 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_oneinit()
946 fifo->pbdma_nr = fifo->func->pbdma->nr(fifo); in gk104_fifo_oneinit()
947 nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr); in gk104_fifo_oneinit()
950 if (!(map = kcalloc(fifo->pbdma_nr, sizeof(*map), GFP_KERNEL))) in gk104_fifo_oneinit()
953 for (i = 0; i < fifo->pbdma_nr; i++) in gk104_fifo_oneinit()
965 for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) { in gk104_fifo_oneinit()
972 fifo->engine[engn].engine = nvkm_device_engine(device, tdev->type, tdev->inst); in gk104_fifo_oneinit()
973 if (!fifo->engine[engn].engine) { in gk104_fifo_oneinit()
978 en = fifo->engine[engn].engine->subdev.name; in gk104_fifo_oneinit()
984 fifo->engine[engn].runl = tdev->runlist; in gk104_fifo_oneinit()
985 fifo->engine[engn].pbid = pbid; in gk104_fifo_oneinit()
986 fifo->engine_nr = max(fifo->engine_nr, engn + 1); in gk104_fifo_oneinit()
987 fifo->runlist[tdev->runlist].engm |= BIT(engn); in gk104_fifo_oneinit()
988 fifo->runlist[tdev->runlist].engm_sw |= BIT(engn); in gk104_fifo_oneinit()
990 fifo->runlist[tdev->runlist].engm_sw |= BIT(GK104_FIFO_ENGN_SW); in gk104_fifo_oneinit()
991 fifo->runlist_nr = max(fifo->runlist_nr, tdev->runlist + 1); in gk104_fifo_oneinit()
996 for (i = 0; i < fifo->runlist_nr; i++) { in gk104_fifo_oneinit()
997 for (j = 0; j < ARRAY_SIZE(fifo->runlist[i].mem); j++) { in gk104_fifo_oneinit()
999 fifo->base.nr * 2/* TSG+chan */ * in gk104_fifo_oneinit()
1000 fifo->func->runlist->size, in gk104_fifo_oneinit()
1002 &fifo->runlist[i].mem[j]); in gk104_fifo_oneinit()
1007 init_waitqueue_head(&fifo->runlist[i].wait); in gk104_fifo_oneinit()
1008 INIT_LIST_HEAD(&fifo->runlist[i].cgrp); in gk104_fifo_oneinit()
1009 INIT_LIST_HEAD(&fifo->runlist[i].chan); in gk104_fifo_oneinit()
1013 fifo->base.nr * 0x200, 0x1000, true, in gk104_fifo_oneinit()
1014 &fifo->user.mem); in gk104_fifo_oneinit()
1018 ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem), in gk104_fifo_oneinit()
1019 &fifo->user.bar); in gk104_fifo_oneinit()
1023 return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0); in gk104_fifo_oneinit()
1029 struct gk104_fifo *fifo = gk104_fifo(base); in gk104_fifo_init() local
1030 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_init()
1034 fifo->func->pbdma->init(fifo); in gk104_fifo_init()
1037 for (i = 0; i < fifo->pbdma_nr; i++) { in gk104_fifo_init()
1044 for (i = 0; i < fifo->pbdma_nr; i++) { in gk104_fifo_init()
1049 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12); in gk104_fifo_init()
1051 if (fifo->func->pbdma->init_timeout) in gk104_fifo_init()
1052 fifo->func->pbdma->init_timeout(fifo); in gk104_fifo_init()
1061 struct gk104_fifo *fifo = gk104_fifo(base); in gk104_fifo_dtor() local
1062 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_dtor()
1065 nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &fifo->user.bar); in gk104_fifo_dtor()
1066 nvkm_memory_unref(&fifo->user.mem); in gk104_fifo_dtor()
1068 for (i = 0; i < fifo->runlist_nr; i++) { in gk104_fifo_dtor()
1069 nvkm_memory_unref(&fifo->runlist[i].mem[1]); in gk104_fifo_dtor()
1070 nvkm_memory_unref(&fifo->runlist[i].mem[0]); in gk104_fifo_dtor()
1073 return fifo; in gk104_fifo_dtor()
1098 struct gk104_fifo *fifo; in gk104_fifo_new_() local
1100 if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL))) in gk104_fifo_new_()
1102 fifo->func = func; in gk104_fifo_new_()
1103 INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work); in gk104_fifo_new_()
1104 *pfifo = &fifo->base; in gk104_fifo_new_()
1106 return nvkm_fifo_ctor(&gk104_fifo_, device, type, inst, nr, &fifo->base); in gk104_fifo_new_()