Lines Matching refs:vmm

75 	struct nvkm_vmm *vmm;  member
113 VMM_TRACE(_it->vmm, "%s "f, _buf, ##a); \
129 if (it->vmm->func->flush) { in nvkm_vmm_flush()
131 it->vmm->func->flush(it->vmm, it->flush); in nvkm_vmm_flush()
145 struct nvkm_vmm *vmm = it->vmm; in nvkm_vmm_unref_pdes() local
159 func->sparse(vmm, pgd->pt[0], pdei, 1); in nvkm_vmm_unref_pdes()
162 func->unmap(vmm, pgd->pt[0], pdei, 1); in nvkm_vmm_unref_pdes()
170 func->pde(vmm, pgd, pdei); in nvkm_vmm_unref_pdes()
177 func->pde(vmm, pgd, pdei); in nvkm_vmm_unref_pdes()
190 nvkm_mmu_ptc_put(vmm->mmu, vmm->bootstrapped, &pt); in nvkm_vmm_unref_pdes()
203 struct nvkm_vmm *vmm = it->vmm; in nvkm_vmm_unref_sptes() local
244 pair->func->sparse(vmm, pgt->pt[0], pteb, ptes); in nvkm_vmm_unref_sptes()
252 pair->func->invalid(vmm, pgt->pt[0], pteb, ptes); in nvkm_vmm_unref_sptes()
290 struct nvkm_vmm *vmm = it->vmm; in nvkm_vmm_ref_sptes() local
336 desc->func->sparse(vmm, pgt->pt[1], spti, sptc); in nvkm_vmm_ref_sptes()
339 pair->func->unmap(vmm, pgt->pt[0], pteb, ptes); in nvkm_vmm_ref_sptes()
346 pair->func->unmap(vmm, pgt->pt[0], pteb, ptes); in nvkm_vmm_ref_sptes()
407 struct nvkm_vmm *vmm = it->vmm; in nvkm_vmm_ref_hwpt() local
408 struct nvkm_mmu *mmu = vmm->mmu; in nvkm_vmm_ref_hwpt()
445 desc->func->sparse(vmm, pt, pteb, ptes); in nvkm_vmm_ref_hwpt()
447 desc->func->invalid(vmm, pt, pteb, ptes); in nvkm_vmm_ref_hwpt()
450 desc->func->unmap(vmm, pt, pteb, ptes); in nvkm_vmm_ref_hwpt()
458 desc->func->sparse(vmm, pt, 0, pten); in nvkm_vmm_ref_hwpt()
460 desc->func->invalid(vmm, pt, 0, pten); in nvkm_vmm_ref_hwpt()
466 it->desc[it->lvl].func->pde(it->vmm, pgd, pdei); in nvkm_vmm_ref_hwpt()
489 nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_iter() argument
501 it.vmm = vmm; in nvkm_vmm_iter()
511 it.pt[it.max] = vmm->pd; in nvkm_vmm_iter()
555 MAP_PTES(vmm, pt, ptei, ptes, map); in nvkm_vmm_iter()
557 CLR_PTES(vmm, pt, ptei, ptes); in nvkm_vmm_iter()
590 nvkm_vmm_ptes_sparse_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_ptes_sparse_put() argument
593 nvkm_vmm_iter(vmm, page, addr, size, "sparse unref", false, in nvkm_vmm_ptes_sparse_put()
600 nvkm_vmm_ptes_sparse_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_ptes_sparse_get() argument
604 u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "sparse ref", in nvkm_vmm_ptes_sparse_get()
609 nvkm_vmm_ptes_sparse_put(vmm, page, addr, size); in nvkm_vmm_ptes_sparse_get()
618 nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref) in nvkm_vmm_ptes_sparse() argument
620 const struct nvkm_vmm_page *page = vmm->func->page; in nvkm_vmm_ptes_sparse()
650 int ret = nvkm_vmm_ptes_sparse_get(vmm, &page[i], addr, block); in nvkm_vmm_ptes_sparse()
653 nvkm_vmm_ptes_sparse(vmm, start, size, false); in nvkm_vmm_ptes_sparse()
657 nvkm_vmm_ptes_sparse_put(vmm, &page[i], addr, block); in nvkm_vmm_ptes_sparse()
668 nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_ptes_unmap_put() argument
672 nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref", in nvkm_vmm_ptes_unmap_put()
679 nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_ptes_get_map() argument
683 u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true, in nvkm_vmm_ptes_get_map()
687 nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false); in nvkm_vmm_ptes_get_map()
694 nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_ptes_unmap() argument
698 nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, NULL, NULL, NULL, in nvkm_vmm_ptes_unmap()
704 nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_ptes_map() argument
708 nvkm_vmm_iter(vmm, page, addr, size, "map", false, in nvkm_vmm_ptes_map()
713 nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_ptes_put() argument
716 nvkm_vmm_iter(vmm, page, addr, size, "unref", false, in nvkm_vmm_ptes_put()
721 nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_ptes_get() argument
724 u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, in nvkm_vmm_ptes_get()
728 nvkm_vmm_ptes_put(vmm, page, addr, fail - addr); in nvkm_vmm_ptes_get()
771 nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_free_insert() argument
773 struct rb_node **ptr = &vmm->free.rb_node; in nvkm_vmm_free_insert()
795 rb_insert_color(&vma->tree, &vmm->free); in nvkm_vmm_free_insert()
799 nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_node_insert() argument
801 struct rb_node **ptr = &vmm->root.rb_node; in nvkm_vmm_node_insert()
817 rb_insert_color(&vma->tree, &vmm->root); in nvkm_vmm_node_insert()
821 nvkm_vmm_node_search(struct nvkm_vmm *vmm, u64 addr) in nvkm_vmm_node_search() argument
823 struct rb_node *node = vmm->root.rb_node; in nvkm_vmm_node_search()
838 nvkm_vmm_dtor(struct nvkm_vmm *vmm) in nvkm_vmm_dtor() argument
843 while ((node = rb_first(&vmm->root))) { in nvkm_vmm_dtor()
845 nvkm_vmm_put(vmm, &vma); in nvkm_vmm_dtor()
848 if (vmm->bootstrapped) { in nvkm_vmm_dtor()
849 const struct nvkm_vmm_page *page = vmm->func->page; in nvkm_vmm_dtor()
850 const u64 limit = vmm->limit - vmm->start; in nvkm_vmm_dtor()
855 nvkm_mmu_ptc_dump(vmm->mmu); in nvkm_vmm_dtor()
856 nvkm_vmm_ptes_put(vmm, page, vmm->start, limit); in nvkm_vmm_dtor()
859 vma = list_first_entry(&vmm->list, typeof(*vma), head); in nvkm_vmm_dtor()
862 WARN_ON(!list_empty(&vmm->list)); in nvkm_vmm_dtor()
864 if (vmm->nullp) { in nvkm_vmm_dtor()
865 dma_free_coherent(vmm->mmu->subdev.device->dev, 16 * 1024, in nvkm_vmm_dtor()
866 vmm->nullp, vmm->null); in nvkm_vmm_dtor()
869 if (vmm->pd) { in nvkm_vmm_dtor()
870 nvkm_mmu_ptc_put(vmm->mmu, true, &vmm->pd->pt[0]); in nvkm_vmm_dtor()
871 nvkm_vmm_pt_del(&vmm->pd); in nvkm_vmm_dtor()
878 const char *name, struct nvkm_vmm *vmm) in nvkm_vmm_ctor() argument
886 vmm->func = func; in nvkm_vmm_ctor()
887 vmm->mmu = mmu; in nvkm_vmm_ctor()
888 vmm->name = name; in nvkm_vmm_ctor()
889 vmm->debug = mmu->subdev.debug; in nvkm_vmm_ctor()
890 kref_init(&vmm->kref); in nvkm_vmm_ctor()
892 __mutex_init(&vmm->mutex, "&vmm->mutex", key ? key : &_key); in nvkm_vmm_ctor()
912 vmm->start = addr; in nvkm_vmm_ctor()
913 vmm->limit = size ? (addr + size) : (1ULL << bits); in nvkm_vmm_ctor()
914 if (vmm->start > vmm->limit || vmm->limit > (1ULL << bits)) in nvkm_vmm_ctor()
918 vmm->pd = nvkm_vmm_pt_new(desc, false, NULL); in nvkm_vmm_ctor()
919 if (!vmm->pd) in nvkm_vmm_ctor()
921 vmm->pd->refs[0] = 1; in nvkm_vmm_ctor()
922 INIT_LIST_HEAD(&vmm->join); in nvkm_vmm_ctor()
929 vmm->pd->pt[0] = nvkm_mmu_ptc_get(mmu, size, desc->align, true); in nvkm_vmm_ctor()
930 if (!vmm->pd->pt[0]) in nvkm_vmm_ctor()
935 INIT_LIST_HEAD(&vmm->list); in nvkm_vmm_ctor()
936 vmm->free = RB_ROOT; in nvkm_vmm_ctor()
937 vmm->root = RB_ROOT; in nvkm_vmm_ctor()
939 if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start))) in nvkm_vmm_ctor()
942 nvkm_vmm_free_insert(vmm, vma); in nvkm_vmm_ctor()
943 list_add(&vma->head, &vmm->list); in nvkm_vmm_ctor()
957 #define node(root, dir) ((root)->head.dir == &vmm->list) ? NULL : \
961 nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_unmap_region() argument
965 nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags); in nvkm_vmm_unmap_region()
972 rb_erase(&vma->tree, &vmm->root); in nvkm_vmm_unmap_region()
983 rb_erase(&next->tree, &vmm->root); in nvkm_vmm_unmap_region()
991 nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_unmap_locked() argument
993 const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd]; in nvkm_vmm_unmap_locked()
996 nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse); in nvkm_vmm_unmap_locked()
999 nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse); in nvkm_vmm_unmap_locked()
1002 nvkm_vmm_unmap_region(vmm, vma); in nvkm_vmm_unmap_locked()
1006 nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_unmap() argument
1009 mutex_lock(&vmm->mutex); in nvkm_vmm_unmap()
1010 nvkm_vmm_unmap_locked(vmm, vma); in nvkm_vmm_unmap()
1011 mutex_unlock(&vmm->mutex); in nvkm_vmm_unmap()
1016 nvkm_vmm_map_valid(struct nvkm_vmm *vmm, struct nvkm_vma *vma, in nvkm_vmm_map_valid() argument
1022 VMM_DEBUG(vmm, "%d !VRAM", map->page->shift); in nvkm_vmm_map_valid()
1029 VMM_DEBUG(vmm, "%d !HOST", map->page->shift); in nvkm_vmm_map_valid()
1042 VMM_DEBUG(vmm, "alignment %016llx %016llx %016llx %d %d", in nvkm_vmm_map_valid()
1048 return vmm->func->valid(vmm, argv, argc, map); in nvkm_vmm_map_valid()
1052 nvkm_vmm_map_choose(struct nvkm_vmm *vmm, struct nvkm_vma *vma, in nvkm_vmm_map_choose() argument
1055 for (map->page = vmm->func->page; map->page->shift; map->page++) { in nvkm_vmm_map_choose()
1056 VMM_DEBUG(vmm, "trying %d", map->page->shift); in nvkm_vmm_map_choose()
1057 if (!nvkm_vmm_map_valid(vmm, vma, argv, argc, map)) in nvkm_vmm_map_choose()
1064 nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, in nvkm_vmm_map_locked() argument
1072 VMM_DEBUG(vmm, "overrun %016llx %016llx %016llx", in nvkm_vmm_map_locked()
1082 const u32 debug = vmm->debug; in nvkm_vmm_map_locked()
1083 vmm->debug = 0; in nvkm_vmm_map_locked()
1084 ret = nvkm_vmm_map_choose(vmm, vma, argv, argc, map); in nvkm_vmm_map_locked()
1085 vmm->debug = debug; in nvkm_vmm_map_locked()
1087 VMM_DEBUG(vmm, "invalid at any page size"); in nvkm_vmm_map_locked()
1088 nvkm_vmm_map_choose(vmm, vma, argv, argc, map); in nvkm_vmm_map_locked()
1094 map->page = &vmm->func->page[vma->refd]; in nvkm_vmm_map_locked()
1096 map->page = &vmm->func->page[vma->page]; in nvkm_vmm_map_locked()
1098 ret = nvkm_vmm_map_valid(vmm, vma, argv, argc, map); in nvkm_vmm_map_locked()
1100 VMM_DEBUG(vmm, "invalid %d\n", ret); in nvkm_vmm_map_locked()
1132 ret = nvkm_vmm_ptes_get_map(vmm, map->page, vma->addr, vma->size, map, func); in nvkm_vmm_map_locked()
1136 vma->refd = map->page - vmm->func->page; in nvkm_vmm_map_locked()
1138 nvkm_vmm_ptes_map(vmm, map->page, vma->addr, vma->size, map, func); in nvkm_vmm_map_locked()
1141 nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags); in nvkm_vmm_map_locked()
1149 nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc, in nvkm_vmm_map() argument
1153 mutex_lock(&vmm->mutex); in nvkm_vmm_map()
1154 ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map); in nvkm_vmm_map()
1156 mutex_unlock(&vmm->mutex); in nvkm_vmm_map()
1161 nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_put_region() argument
1166 rb_erase(&prev->tree, &vmm->free); in nvkm_vmm_put_region()
1174 rb_erase(&next->tree, &vmm->free); in nvkm_vmm_put_region()
1180 nvkm_vmm_free_insert(vmm, vma); in nvkm_vmm_put_region()
1184 nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_put_locked() argument
1186 const struct nvkm_vmm_page *page = vmm->func->page; in nvkm_vmm_put_locked()
1209 nvkm_vmm_ptes_unmap_put(vmm, &page[refd], addr, in nvkm_vmm_put_locked()
1214 nvkm_vmm_ptes_put(vmm, &page[refd], addr, size); in nvkm_vmm_put_locked()
1226 nvkm_vmm_unmap_region(vmm, next); in nvkm_vmm_put_locked()
1238 nvkm_vmm_ptes_sparse_put(vmm, &page[vma->refd], vma->addr, vma->size); in nvkm_vmm_put_locked()
1249 nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, false); in nvkm_vmm_put_locked()
1253 rb_erase(&vma->tree, &vmm->root); in nvkm_vmm_put_locked()
1260 nvkm_vmm_put_region(vmm, vma); in nvkm_vmm_put_locked()
1264 nvkm_vmm_put(struct nvkm_vmm *vmm, struct nvkm_vma **pvma) in nvkm_vmm_put() argument
1268 mutex_lock(&vmm->mutex); in nvkm_vmm_put()
1269 nvkm_vmm_put_locked(vmm, vma); in nvkm_vmm_put()
1270 mutex_unlock(&vmm->mutex); in nvkm_vmm_put()
1276 nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse, in nvkm_vmm_get_locked() argument
1279 const struct nvkm_vmm_page *page = &vmm->func->page[NVKM_VMA_PAGE_NONE]; in nvkm_vmm_get_locked()
1285 VMM_TRACE(vmm, "getref %d mapref %d sparse %d " in nvkm_vmm_get_locked()
1291 VMM_DEBUG(vmm, "args %016llx %d %d %d", in nvkm_vmm_get_locked()
1302 if (unlikely((getref || vmm->func->page_block) && !shift)) { in nvkm_vmm_get_locked()
1303 VMM_DEBUG(vmm, "page size required: %d %016llx", in nvkm_vmm_get_locked()
1304 getref, vmm->func->page_block); in nvkm_vmm_get_locked()
1312 for (page = vmm->func->page; page->shift; page++) { in nvkm_vmm_get_locked()
1318 VMM_DEBUG(vmm, "page %d %016llx", shift, size); in nvkm_vmm_get_locked()
1327 temp = vmm->free.rb_node; in nvkm_vmm_get_locked()
1348 const int p = page - vmm->func->page; in nvkm_vmm_get_locked()
1351 if (vmm->func->page_block && prev && prev->page != p) in nvkm_vmm_get_locked()
1352 addr = ALIGN(addr, vmm->func->page_block); in nvkm_vmm_get_locked()
1356 if (vmm->func->page_block && next && next->page != p) in nvkm_vmm_get_locked()
1357 tail = ALIGN_DOWN(tail, vmm->func->page_block); in nvkm_vmm_get_locked()
1360 rb_erase(&this->tree, &vmm->free); in nvkm_vmm_get_locked()
1374 nvkm_vmm_put_region(vmm, vma); in nvkm_vmm_get_locked()
1377 nvkm_vmm_free_insert(vmm, vma); in nvkm_vmm_get_locked()
1383 nvkm_vmm_put_region(vmm, vma); in nvkm_vmm_get_locked()
1386 nvkm_vmm_free_insert(vmm, tmp); in nvkm_vmm_get_locked()
1391 ret = nvkm_vmm_ptes_sparse_get(vmm, page, vma->addr, vma->size); in nvkm_vmm_get_locked()
1393 ret = nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, true); in nvkm_vmm_get_locked()
1395 ret = nvkm_vmm_ptes_get(vmm, page, vma->addr, vma->size); in nvkm_vmm_get_locked()
1399 nvkm_vmm_put_region(vmm, vma); in nvkm_vmm_get_locked()
1405 vma->page = page - vmm->func->page; in nvkm_vmm_get_locked()
1408 nvkm_vmm_node_insert(vmm, vma); in nvkm_vmm_get_locked()
1414 nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma) in nvkm_vmm_get() argument
1417 mutex_lock(&vmm->mutex); in nvkm_vmm_get()
1418 ret = nvkm_vmm_get_locked(vmm, false, true, false, page, 0, size, pvma); in nvkm_vmm_get()
1419 mutex_unlock(&vmm->mutex); in nvkm_vmm_get()
1424 nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst) in nvkm_vmm_part() argument
1426 if (inst && vmm->func->part) { in nvkm_vmm_part()
1427 mutex_lock(&vmm->mutex); in nvkm_vmm_part()
1428 vmm->func->part(vmm, inst); in nvkm_vmm_part()
1429 mutex_unlock(&vmm->mutex); in nvkm_vmm_part()
1434 nvkm_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst) in nvkm_vmm_join() argument
1437 if (vmm->func->join) { in nvkm_vmm_join()
1438 mutex_lock(&vmm->mutex); in nvkm_vmm_join()
1439 ret = vmm->func->join(vmm, inst); in nvkm_vmm_join()
1440 mutex_unlock(&vmm->mutex); in nvkm_vmm_join()
1450 nvkm_memory_boot(it->pt[0]->pt[type]->memory, it->vmm); in nvkm_vmm_boot_ptes()
1455 nvkm_vmm_boot(struct nvkm_vmm *vmm) in nvkm_vmm_boot() argument
1457 const struct nvkm_vmm_page *page = vmm->func->page; in nvkm_vmm_boot()
1458 const u64 limit = vmm->limit - vmm->start; in nvkm_vmm_boot()
1464 ret = nvkm_vmm_ptes_get(vmm, page, vmm->start, limit); in nvkm_vmm_boot()
1468 nvkm_vmm_iter(vmm, page, vmm->start, limit, "bootstrap", false, in nvkm_vmm_boot()
1470 vmm->bootstrapped = true; in nvkm_vmm_boot()
1477 struct nvkm_vmm *vmm = container_of(kref, typeof(*vmm), kref); in nvkm_vmm_del() local
1478 nvkm_vmm_dtor(vmm); in nvkm_vmm_del()
1479 kfree(vmm); in nvkm_vmm_del()
1485 struct nvkm_vmm *vmm = *pvmm; in nvkm_vmm_unref() local
1486 if (vmm) { in nvkm_vmm_unref()
1487 kref_put(&vmm->kref, nvkm_vmm_del); in nvkm_vmm_unref()
1493 nvkm_vmm_ref(struct nvkm_vmm *vmm) in nvkm_vmm_ref() argument
1495 if (vmm) in nvkm_vmm_ref()
1496 kref_get(&vmm->kref); in nvkm_vmm_ref()
1497 return vmm; in nvkm_vmm_ref()
1506 struct nvkm_vmm *vmm = NULL; in nvkm_vmm_new() local
1508 ret = mmu->func->vmm.ctor(mmu, addr, size, argv, argc, key, name, &vmm); in nvkm_vmm_new()
1510 nvkm_vmm_unref(&vmm); in nvkm_vmm_new()
1511 *pvmm = vmm; in nvkm_vmm_new()