Lines Matching +full:data +full:- +full:mirror

70 #define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
71 #define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
83 list_for_each_entry(ivmm, &svm->inst, head) { in nouveau_ivmm_find()
84 if (ivmm->inst == inst) in nouveau_ivmm_find()
100 struct hmm_mirror mirror; member
104 NV_DEBUG((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
106 NV_WARN((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
109 nouveau_svmm_bind(struct drm_device *dev, void *data, in nouveau_svmm_bind() argument
113 struct drm_nouveau_svm_bind *args = data; in nouveau_svmm_bind()
118 args->va_start &= PAGE_MASK; in nouveau_svmm_bind()
119 args->va_end &= PAGE_MASK; in nouveau_svmm_bind()
122 if (args->reserved0 || args->reserved1) in nouveau_svmm_bind()
123 return -EINVAL; in nouveau_svmm_bind()
124 if (args->header & (~NOUVEAU_SVM_BIND_VALID_MASK)) in nouveau_svmm_bind()
125 return -EINVAL; in nouveau_svmm_bind()
126 if (args->va_start >= args->va_end) in nouveau_svmm_bind()
127 return -EINVAL; in nouveau_svmm_bind()
128 if (!args->npages) in nouveau_svmm_bind()
129 return -EINVAL; in nouveau_svmm_bind()
131 cmd = args->header >> NOUVEAU_SVM_BIND_COMMAND_SHIFT; in nouveau_svmm_bind()
137 return -EINVAL; in nouveau_svmm_bind()
140 priority = args->header >> NOUVEAU_SVM_BIND_PRIORITY_SHIFT; in nouveau_svmm_bind()
144 target = args->header >> NOUVEAU_SVM_BIND_TARGET_SHIFT; in nouveau_svmm_bind()
150 return -EINVAL; in nouveau_svmm_bind()
158 if (args->stride) in nouveau_svmm_bind()
159 return -EINVAL; in nouveau_svmm_bind()
161 size = ((unsigned long)args->npages) << PAGE_SHIFT; in nouveau_svmm_bind()
162 if ((args->va_start + size) <= args->va_start) in nouveau_svmm_bind()
163 return -EINVAL; in nouveau_svmm_bind()
164 if ((args->va_start + size) > args->va_end) in nouveau_svmm_bind()
165 return -EINVAL; in nouveau_svmm_bind()
174 down_read(&mm->mmap_sem); in nouveau_svmm_bind()
176 for (addr = args->va_start, end = args->va_start + size; addr < end;) { in nouveau_svmm_bind()
184 next = min(vma->vm_end, end); in nouveau_svmm_bind()
186 nouveau_dmem_migrate_vma(cli->drm, vma, addr, next); in nouveau_svmm_bind()
195 args->result = 0; in nouveau_svmm_bind()
197 up_read(&mm->mmap_sem); in nouveau_svmm_bind()
209 mutex_lock(&svmm->vmm->cli->drm->svm->mutex); in nouveau_svmm_part()
210 ivmm = nouveau_ivmm_find(svmm->vmm->cli->drm->svm, inst); in nouveau_svmm_part()
212 list_del(&ivmm->head); in nouveau_svmm_part()
215 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex); in nouveau_svmm_part()
226 return -ENOMEM; in nouveau_svmm_join()
227 ivmm->svmm = svmm; in nouveau_svmm_join()
228 ivmm->inst = inst; in nouveau_svmm_join()
230 mutex_lock(&svmm->vmm->cli->drm->svm->mutex); in nouveau_svmm_join()
231 list_add(&ivmm->head, &svmm->vmm->cli->drm->svm->inst); in nouveau_svmm_join()
232 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex); in nouveau_svmm_join()
237 /* Invalidate SVMM address-range on GPU. */
242 bool super = svmm->vmm->vmm.object.client->super; in nouveau_svmm_invalidate()
243 svmm->vmm->vmm.object.client->super = true; in nouveau_svmm_invalidate()
244 nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR, in nouveau_svmm_invalidate()
247 .size = limit - start, in nouveau_svmm_invalidate()
249 svmm->vmm->vmm.object.client->super = super; in nouveau_svmm_invalidate()
254 nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror, in nouveau_svmm_sync_cpu_device_pagetables() argument
257 struct nouveau_svmm *svmm = container_of(mirror, typeof(*svmm), mirror); in nouveau_svmm_sync_cpu_device_pagetables()
258 unsigned long start = update->start; in nouveau_svmm_sync_cpu_device_pagetables()
259 unsigned long limit = update->end; in nouveau_svmm_sync_cpu_device_pagetables()
262 return -EAGAIN; in nouveau_svmm_sync_cpu_device_pagetables()
264 SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit); in nouveau_svmm_sync_cpu_device_pagetables()
266 mutex_lock(&svmm->mutex); in nouveau_svmm_sync_cpu_device_pagetables()
267 if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) { in nouveau_svmm_sync_cpu_device_pagetables()
268 if (start < svmm->unmanaged.start) { in nouveau_svmm_sync_cpu_device_pagetables()
270 svmm->unmanaged.limit); in nouveau_svmm_sync_cpu_device_pagetables()
272 start = svmm->unmanaged.limit; in nouveau_svmm_sync_cpu_device_pagetables()
276 mutex_unlock(&svmm->mutex); in nouveau_svmm_sync_cpu_device_pagetables()
281 nouveau_svmm_release(struct hmm_mirror *mirror) in nouveau_svmm_release() argument
296 hmm_mirror_unregister(&svmm->mirror); in nouveau_svmm_fini()
303 nouveau_svmm_init(struct drm_device *dev, void *data, in nouveau_svmm_init() argument
308 struct drm_nouveau_svm_init *args = data; in nouveau_svmm_init()
311 /* Allocate tracking for SVM-enabled VMM. */ in nouveau_svmm_init()
313 return -ENOMEM; in nouveau_svmm_init()
314 svmm->vmm = &cli->svm; in nouveau_svmm_init()
315 svmm->unmanaged.start = args->unmanaged_addr; in nouveau_svmm_init()
316 svmm->unmanaged.limit = args->unmanaged_addr + args->unmanaged_size; in nouveau_svmm_init()
317 mutex_init(&svmm->mutex); in nouveau_svmm_init()
320 mutex_lock(&cli->mutex); in nouveau_svmm_init()
321 if (cli->svm.cli) { in nouveau_svmm_init()
322 ret = -EBUSY; in nouveau_svmm_init()
332 ret = nvif_vmm_init(&cli->mmu, cli->vmm.vmm.object.oclass, true, in nouveau_svmm_init()
333 args->unmanaged_addr, args->unmanaged_size, in nouveau_svmm_init()
336 }, sizeof(struct gp100_vmm_v0), &cli->svm.vmm); in nouveau_svmm_init()
340 /* Enable HMM mirroring of CPU address-space to VMM. */ in nouveau_svmm_init()
341 svmm->mm = get_task_mm(current); in nouveau_svmm_init()
342 down_write(&svmm->mm->mmap_sem); in nouveau_svmm_init()
343 svmm->mirror.ops = &nouveau_svmm; in nouveau_svmm_init()
344 ret = hmm_mirror_register(&svmm->mirror, svmm->mm); in nouveau_svmm_init()
346 cli->svm.svmm = svmm; in nouveau_svmm_init()
347 cli->svm.cli = cli; in nouveau_svmm_init()
349 up_write(&svmm->mm->mmap_sem); in nouveau_svmm_init()
350 mmput(svmm->mm); in nouveau_svmm_init()
355 mutex_unlock(&cli->mutex); in nouveau_svmm_init()
378 WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object, in nouveau_svm_fault_replay()
394 WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object, in nouveau_svm_fault_cancel()
408 nouveau_svm_fault_cancel(svm, fault->inst, in nouveau_svm_fault_cancel_fault()
409 fault->hub, in nouveau_svm_fault_cancel_fault()
410 fault->gpc, in nouveau_svm_fault_cancel_fault()
411 fault->client); in nouveau_svm_fault_cancel_fault()
420 if ((ret = (s64)fa->inst - fb->inst)) in nouveau_svm_fault_cmp()
422 if ((ret = (s64)fa->addr - fb->addr)) in nouveau_svm_fault_cmp()
425 return (fa->access == 0 || fa->access == 3) - in nouveau_svm_fault_cmp()
426 (fb->access == 0 || fb->access == 3); in nouveau_svm_fault_cmp()
433 struct nvif_object *memory = &buffer->object; in nouveau_svm_fault_cache()
454 if (!buffer->fault[buffer->fault_nr]) { in nouveau_svm_fault_cache()
460 buffer->fault[buffer->fault_nr] = fault; in nouveau_svm_fault_cache()
463 fault = buffer->fault[buffer->fault_nr++]; in nouveau_svm_fault_cache()
464 fault->inst = inst; in nouveau_svm_fault_cache()
465 fault->addr = (u64)addrhi << 32 | addrlo; in nouveau_svm_fault_cache()
466 fault->time = (u64)timehi << 32 | timelo; in nouveau_svm_fault_cache()
467 fault->engine = engine; in nouveau_svm_fault_cache()
468 fault->gpc = gpc; in nouveau_svm_fault_cache()
469 fault->hub = hub; in nouveau_svm_fault_cache()
470 fault->access = (info & 0x000f0000) >> 16; in nouveau_svm_fault_cache()
471 fault->client = client; in nouveau_svm_fault_cache()
472 fault->fault = (info & 0x0000001f); in nouveau_svm_fault_cache()
475 fault->inst, fault->addr, fault->access); in nouveau_svm_fault_cache()
492 range->default_flags = 0; in nouveau_range_fault()
493 range->pfn_flags_mask = -1UL; in nouveau_range_fault()
495 ret = hmm_range_register(range, &svmm->mirror); in nouveau_range_fault()
497 up_read(&svmm->mm->mmap_sem); in nouveau_range_fault()
502 up_read(&svmm->mm->mmap_sem); in nouveau_range_fault()
503 return -EBUSY; in nouveau_range_fault()
509 ret = -EBUSY; in nouveau_range_fault()
510 up_read(&svmm->mm->mmap_sem); in nouveau_range_fault()
523 container_of(buffer, typeof(*svm), buffer[buffer->id]); in nouveau_svm_fault()
524 struct nvif_object *device = &svm->drm->client.device.object; in nouveau_svm_fault()
544 if (buffer->get == buffer->put) { in nouveau_svm_fault()
545 buffer->put = nvif_rd32(device, buffer->putaddr); in nouveau_svm_fault()
546 buffer->get = nvif_rd32(device, buffer->getaddr); in nouveau_svm_fault()
547 if (buffer->get == buffer->put) in nouveau_svm_fault()
550 buffer->fault_nr = 0; in nouveau_svm_fault()
552 SVM_DBG(svm, "get %08x put %08x", buffer->get, buffer->put); in nouveau_svm_fault()
553 while (buffer->get != buffer->put) { in nouveau_svm_fault()
554 nouveau_svm_fault_cache(svm, buffer, buffer->get * 0x20); in nouveau_svm_fault()
555 if (++buffer->get == buffer->entries) in nouveau_svm_fault()
556 buffer->get = 0; in nouveau_svm_fault()
558 nvif_wr32(device, buffer->getaddr, buffer->get); in nouveau_svm_fault()
559 SVM_DBG(svm, "%d fault(s) pending", buffer->fault_nr); in nouveau_svm_fault()
565 sort(buffer->fault, buffer->fault_nr, sizeof(*buffer->fault), in nouveau_svm_fault()
569 mutex_lock(&svm->mutex); in nouveau_svm_fault()
570 for (fi = 0, svmm = NULL; fi < buffer->fault_nr; fi++) { in nouveau_svm_fault()
571 if (!svmm || buffer->fault[fi]->inst != inst) { in nouveau_svm_fault()
573 nouveau_ivmm_find(svm, buffer->fault[fi]->inst); in nouveau_svm_fault()
574 svmm = ivmm ? ivmm->svmm : NULL; in nouveau_svm_fault()
575 inst = buffer->fault[fi]->inst; in nouveau_svm_fault()
576 SVM_DBG(svm, "inst %016llx -> svm-%p", inst, svmm); in nouveau_svm_fault()
578 buffer->fault[fi]->svmm = svmm; in nouveau_svm_fault()
580 mutex_unlock(&svm->mutex); in nouveau_svm_fault()
589 for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) { in nouveau_svm_fault()
590 /* Cancel any faults from non-SVM channels. */ in nouveau_svm_fault()
591 if (!(svmm = buffer->fault[fi]->svmm)) { in nouveau_svm_fault()
592 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]); in nouveau_svm_fault()
595 SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr); in nouveau_svm_fault()
600 start = buffer->fault[fi]->addr; in nouveau_svm_fault()
602 if (start < svmm->unmanaged.limit) in nouveau_svm_fault()
603 limit = min_t(u64, limit, svmm->unmanaged.start); in nouveau_svm_fault()
605 if (limit > svmm->unmanaged.start) in nouveau_svm_fault()
606 start = max_t(u64, start, svmm->unmanaged.limit); in nouveau_svm_fault()
607 SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit); in nouveau_svm_fault()
612 down_read(&svmm->mm->mmap_sem); in nouveau_svm_fault()
613 vma = find_vma_intersection(svmm->mm, start, limit); in nouveau_svm_fault()
615 SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit); in nouveau_svm_fault()
616 up_read(&svmm->mm->mmap_sem); in nouveau_svm_fault()
617 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]); in nouveau_svm_fault()
620 start = max_t(u64, start, vma->vm_start); in nouveau_svm_fault()
621 limit = min_t(u64, limit, vma->vm_end); in nouveau_svm_fault()
622 SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit); in nouveau_svm_fault()
624 if (buffer->fault[fi]->addr != start) { in nouveau_svm_fault()
625 SVMM_ERR(svmm, "addr %016llx", buffer->fault[fi]->addr); in nouveau_svm_fault()
626 up_read(&svmm->mm->mmap_sem); in nouveau_svm_fault()
627 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]); in nouveau_svm_fault()
631 /* Prepare the GPU-side update of all pages within the in nouveau_svm_fault()
642 if (buffer->fault[fn]->access != 0 /* READ. */ && in nouveau_svm_fault()
643 buffer->fault[fn]->access != 3 /* PREFETCH. */) { in nouveau_svm_fault()
658 while (++fn < buffer->fault_nr && in nouveau_svm_fault()
659 buffer->fault[fn]->svmm == svmm && in nouveau_svm_fault()
660 buffer->fault[fn ]->addr == in nouveau_svm_fault()
661 buffer->fault[fn - 1]->addr); in nouveau_svm_fault()
666 if (fn >= buffer->fault_nr || in nouveau_svm_fault()
667 buffer->fault[fn]->svmm != svmm || in nouveau_svm_fault()
668 buffer->fault[fn]->addr >= limit) in nouveau_svm_fault()
672 fill = (buffer->fault[fn ]->addr - in nouveau_svm_fault()
673 buffer->fault[fn - 1]->addr) >> PAGE_SHIFT; in nouveau_svm_fault()
674 while (--fill) in nouveau_svm_fault()
678 SVMM_DBG(svmm, "wndw %016llx-%016llx covering %d fault(s)", in nouveau_svm_fault()
680 args.i.p.addr + args.i.p.size, fn - fi); in nouveau_svm_fault()
692 mutex_lock(&svmm->mutex); in nouveau_svm_fault()
694 mutex_unlock(&svmm->mutex); in nouveau_svm_fault()
698 nouveau_dmem_convert_pfn(svm->drm, &range); in nouveau_svm_fault()
700 svmm->vmm->vmm.object.client->super = true; in nouveau_svm_fault()
701 ret = nvif_object_ioctl(&svmm->vmm->vmm.object, in nouveau_svm_fault()
705 svmm->vmm->vmm.object.client->super = false; in nouveau_svm_fault()
706 mutex_unlock(&svmm->mutex); in nouveau_svm_fault()
707 up_read(&svmm->mm->mmap_sem); in nouveau_svm_fault()
716 struct nouveau_svm_fault *fault = buffer->fault[fi++]; in nouveau_svm_fault()
717 pi = (fault->addr - range.start) >> PAGE_SHIFT; in nouveau_svm_fault()
721 fault->access != 0 && fault->access != 3)) { in nouveau_svm_fault()
738 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id]; in nouveau_svm_fault_buffer_fini()
739 nvif_notify_put(&buffer->notify); in nouveau_svm_fault_buffer_fini()
745 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id]; in nouveau_svm_fault_buffer_init()
746 struct nvif_object *device = &svm->drm->client.device.object; in nouveau_svm_fault_buffer_init()
747 buffer->get = nvif_rd32(device, buffer->getaddr); in nouveau_svm_fault_buffer_init()
748 buffer->put = nvif_rd32(device, buffer->putaddr); in nouveau_svm_fault_buffer_init()
749 SVM_DBG(svm, "get %08x put %08x (init)", buffer->get, buffer->put); in nouveau_svm_fault_buffer_init()
750 return nvif_notify_get(&buffer->notify); in nouveau_svm_fault_buffer_init()
756 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id]; in nouveau_svm_fault_buffer_dtor()
759 if (buffer->fault) { in nouveau_svm_fault_buffer_dtor()
760 for (i = 0; buffer->fault[i] && i < buffer->entries; i++) in nouveau_svm_fault_buffer_dtor()
761 kfree(buffer->fault[i]); in nouveau_svm_fault_buffer_dtor()
762 kvfree(buffer->fault); in nouveau_svm_fault_buffer_dtor()
767 nvif_notify_fini(&buffer->notify); in nouveau_svm_fault_buffer_dtor()
768 nvif_object_fini(&buffer->object); in nouveau_svm_fault_buffer_dtor()
774 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id]; in nouveau_svm_fault_buffer_ctor()
775 struct nouveau_drm *drm = svm->drm; in nouveau_svm_fault_buffer_ctor()
776 struct nvif_object *device = &drm->client.device.object; in nouveau_svm_fault_buffer_ctor()
780 buffer->id = id; in nouveau_svm_fault_buffer_ctor()
783 &buffer->object); in nouveau_svm_fault_buffer_ctor()
789 nvif_object_map(&buffer->object, NULL, 0); in nouveau_svm_fault_buffer_ctor()
790 buffer->entries = args.entries; in nouveau_svm_fault_buffer_ctor()
791 buffer->getaddr = args.get; in nouveau_svm_fault_buffer_ctor()
792 buffer->putaddr = args.put; in nouveau_svm_fault_buffer_ctor()
794 ret = nvif_notify_init(&buffer->object, nouveau_svm_fault, true, in nouveau_svm_fault_buffer_ctor()
796 &buffer->notify); in nouveau_svm_fault_buffer_ctor()
800 buffer->fault = kvzalloc(sizeof(*buffer->fault) * buffer->entries, GFP_KERNEL); in nouveau_svm_fault_buffer_ctor()
801 if (!buffer->fault) in nouveau_svm_fault_buffer_ctor()
802 return -ENOMEM; in nouveau_svm_fault_buffer_ctor()
810 struct nouveau_svm *svm = drm->svm; in nouveau_svm_resume()
818 struct nouveau_svm *svm = drm->svm; in nouveau_svm_suspend()
826 struct nouveau_svm *svm = drm->svm; in nouveau_svm_fini()
829 kfree(drm->svm); in nouveau_svm_fini()
830 drm->svm = NULL; in nouveau_svm_fini()
849 if (drm->client.device.info.family > NV_DEVICE_INFO_V0_PASCAL) in nouveau_svm_init()
852 if (!(drm->svm = svm = kzalloc(sizeof(*drm->svm), GFP_KERNEL))) in nouveau_svm_init()
855 drm->svm->drm = drm; in nouveau_svm_init()
856 mutex_init(&drm->svm->mutex); in nouveau_svm_init()
857 INIT_LIST_HEAD(&drm->svm->inst); in nouveau_svm_init()
859 ret = nvif_mclass(&drm->client.device.object, buffers); in nouveau_svm_init()