Lines Matching +full:pre +full:- +full:blending
30 #include <linux/dma-mapping.h>
51 * NV10-NV40 tiling helpers
59 int i = reg - drm->tile.reg; in nv10_bo_update_tile_region()
60 struct nvkm_fb *fb = nvxx_fb(&drm->client.device); in nv10_bo_update_tile_region()
61 struct nvkm_fb_tile *tile = &fb->tile.region[i]; in nv10_bo_update_tile_region()
63 nouveau_fence_unref(®->fence); in nv10_bo_update_tile_region()
65 if (tile->pitch) in nv10_bo_update_tile_region()
78 struct nouveau_drm_tile *tile = &drm->tile.reg[i]; in nv10_bo_get_tile_region()
80 spin_lock(&drm->tile.lock); in nv10_bo_get_tile_region()
82 if (!tile->used && in nv10_bo_get_tile_region()
83 (!tile->fence || nouveau_fence_done(tile->fence))) in nv10_bo_get_tile_region()
84 tile->used = true; in nv10_bo_get_tile_region()
88 spin_unlock(&drm->tile.lock); in nv10_bo_get_tile_region()
99 spin_lock(&drm->tile.lock); in nv10_bo_put_tile_region()
100 tile->fence = (struct nouveau_fence *)dma_fence_get(fence); in nv10_bo_put_tile_region()
101 tile->used = false; in nv10_bo_put_tile_region()
102 spin_unlock(&drm->tile.lock); in nv10_bo_put_tile_region()
111 struct nvkm_fb *fb = nvxx_fb(&drm->client.device); in nv10_bo_set_tiling()
115 for (i = 0; i < fb->tile.regions; i++) { in nv10_bo_set_tiling()
122 } else if (tile && fb->tile.region[i].pitch) { in nv10_bo_set_tiling()
138 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_del_ttm()
139 struct drm_device *dev = drm->dev; in nouveau_bo_del_ttm()
142 WARN_ON(nvbo->bo.pin_count > 0); in nouveau_bo_del_ttm()
144 nv10_bo_put_tile_region(dev, nvbo->tile, NULL); in nouveau_bo_del_ttm()
150 if (bo->base.dev) in nouveau_bo_del_ttm()
151 drm_gem_object_release(&bo->base); in nouveau_bo_del_ttm()
153 dma_resv_fini(&bo->base._resv); in nouveau_bo_del_ttm()
161 x += y - 1; in roundup_64()
169 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_fixup_align()
170 struct nvif_device *device = &drm->client.device; in nouveau_bo_fixup_align()
172 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) { in nouveau_bo_fixup_align()
173 if (nvbo->mode) { in nouveau_bo_fixup_align()
174 if (device->info.chipset >= 0x40) { in nouveau_bo_fixup_align()
176 *size = roundup_64(*size, 64 * nvbo->mode); in nouveau_bo_fixup_align()
178 } else if (device->info.chipset >= 0x30) { in nouveau_bo_fixup_align()
180 *size = roundup_64(*size, 64 * nvbo->mode); in nouveau_bo_fixup_align()
182 } else if (device->info.chipset >= 0x20) { in nouveau_bo_fixup_align()
184 *size = roundup_64(*size, 64 * nvbo->mode); in nouveau_bo_fixup_align()
186 } else if (device->info.chipset >= 0x10) { in nouveau_bo_fixup_align()
188 *size = roundup_64(*size, 32 * nvbo->mode); in nouveau_bo_fixup_align()
192 *size = roundup_64(*size, (1 << nvbo->page)); in nouveau_bo_fixup_align()
193 *align = max((1 << nvbo->page), *align); in nouveau_bo_fixup_align()
203 struct nouveau_drm *drm = cli->drm; in nouveau_bo_alloc()
205 struct nvif_mmu *mmu = &cli->mmu; in nouveau_bo_alloc()
206 struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm; in nouveau_bo_alloc()
207 int i, pi = -1; in nouveau_bo_alloc()
211 return ERR_PTR(-EINVAL); in nouveau_bo_alloc()
216 return ERR_PTR(-ENOMEM); in nouveau_bo_alloc()
217 INIT_LIST_HEAD(&nvbo->head); in nouveau_bo_alloc()
218 INIT_LIST_HEAD(&nvbo->entry); in nouveau_bo_alloc()
219 INIT_LIST_HEAD(&nvbo->vma_list); in nouveau_bo_alloc()
220 nvbo->bo.bdev = &drm->ttm.bdev; in nouveau_bo_alloc()
227 /* Determine if we can get a cache-coherent map, forcing in nouveau_bo_alloc()
231 nvbo->force_coherent = true; in nouveau_bo_alloc()
234 if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) { in nouveau_bo_alloc()
235 nvbo->kind = (tile_flags & 0x0000ff00) >> 8; in nouveau_bo_alloc()
236 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { in nouveau_bo_alloc()
238 return ERR_PTR(-EINVAL); in nouveau_bo_alloc()
241 nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind; in nouveau_bo_alloc()
243 if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { in nouveau_bo_alloc()
244 nvbo->kind = (tile_flags & 0x00007f00) >> 8; in nouveau_bo_alloc()
245 nvbo->comp = (tile_flags & 0x00030000) >> 16; in nouveau_bo_alloc()
246 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { in nouveau_bo_alloc()
248 return ERR_PTR(-EINVAL); in nouveau_bo_alloc()
251 nvbo->zeta = (tile_flags & 0x00000007); in nouveau_bo_alloc()
253 nvbo->mode = tile_mode; in nouveau_bo_alloc()
254 nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG); in nouveau_bo_alloc()
257 for (i = 0; i < vmm->page_nr; i++) { in nouveau_bo_alloc()
260 * size for the buffer up-front, and pre-allocate its in nouveau_bo_alloc()
265 if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE && in nouveau_bo_alloc()
266 (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram) in nouveau_bo_alloc()
269 (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) in nouveau_bo_alloc()
276 if (pi < 0 || !nvbo->comp || vmm->page[i].comp) in nouveau_bo_alloc()
280 if (*size >= 1ULL << vmm->page[i].shift) in nouveau_bo_alloc()
285 return ERR_PTR(-EINVAL); in nouveau_bo_alloc()
288 if (nvbo->comp && !vmm->page[pi].comp) { in nouveau_bo_alloc()
289 if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100) in nouveau_bo_alloc()
290 nvbo->kind = mmu->kind[nvbo->kind]; in nouveau_bo_alloc()
291 nvbo->comp = 0; in nouveau_bo_alloc()
293 nvbo->page = vmm->page[pi].shift; in nouveau_bo_alloc()
308 INIT_LIST_HEAD(&nvbo->io_reserve_lru); in nouveau_bo_init()
310 ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type, in nouveau_bo_init()
311 &nvbo->placement, align >> PAGE_SHIFT, false, sg, in nouveau_bo_init()
335 nvbo->bo.base.size = size; in nouveau_bo_new()
336 dma_resv_init(&nvbo->bo.base._resv); in nouveau_bo_new()
337 drm_vma_node_reset(&nvbo->bo.base.vma_node); in nouveau_bo_new()
371 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in set_placement_range()
372 u64 vram_size = drm->client.device.info.ram_size; in set_placement_range()
375 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && in set_placement_range()
376 nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) && in set_placement_range()
377 nvbo->bo.base.size < vram_size / 4) { in set_placement_range()
381 * speed up when alpha-blending and depth-test are enabled in set_placement_range()
384 if (nvbo->zeta) { in set_placement_range()
391 for (i = 0; i < nvbo->placement.num_placement; ++i) { in set_placement_range()
392 nvbo->placements[i].fpfn = fpfn; in set_placement_range()
393 nvbo->placements[i].lpfn = lpfn; in set_placement_range()
395 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { in set_placement_range()
396 nvbo->busy_placements[i].fpfn = fpfn; in set_placement_range()
397 nvbo->busy_placements[i].lpfn = lpfn; in set_placement_range()
406 struct ttm_placement *pl = &nvbo->placement; in nouveau_bo_placement_set()
408 pl->placement = nvbo->placements; in nouveau_bo_placement_set()
409 set_placement_list(nvbo->placements, &pl->num_placement, domain); in nouveau_bo_placement_set()
411 pl->busy_placement = nvbo->busy_placements; in nouveau_bo_placement_set()
412 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, in nouveau_bo_placement_set()
421 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_pin()
422 struct ttm_buffer_object *bo = &nvbo->bo; in nouveau_bo_pin()
430 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && in nouveau_bo_pin()
432 if (!nvbo->contig) { in nouveau_bo_pin()
433 nvbo->contig = true; in nouveau_bo_pin()
439 if (nvbo->bo.pin_count) { in nouveau_bo_pin()
442 switch (bo->resource->mem_type) { in nouveau_bo_pin()
456 bo->resource->mem_type, domain); in nouveau_bo_pin()
457 ret = -EBUSY; in nouveau_bo_pin()
459 ttm_bo_pin(&nvbo->bo); in nouveau_bo_pin()
475 ttm_bo_pin(&nvbo->bo); in nouveau_bo_pin()
477 switch (bo->resource->mem_type) { in nouveau_bo_pin()
479 drm->gem.vram_available -= bo->base.size; in nouveau_bo_pin()
482 drm->gem.gart_available -= bo->base.size; in nouveau_bo_pin()
490 nvbo->contig = false; in nouveau_bo_pin()
498 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_unpin()
499 struct ttm_buffer_object *bo = &nvbo->bo; in nouveau_bo_unpin()
506 ttm_bo_unpin(&nvbo->bo); in nouveau_bo_unpin()
507 if (!nvbo->bo.pin_count) { in nouveau_bo_unpin()
508 switch (bo->resource->mem_type) { in nouveau_bo_unpin()
510 drm->gem.vram_available += bo->base.size; in nouveau_bo_unpin()
513 drm->gem.gart_available += bo->base.size; in nouveau_bo_unpin()
529 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); in nouveau_bo_map()
533 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages, &nvbo->kmap); in nouveau_bo_map()
535 ttm_bo_unreserve(&nvbo->bo); in nouveau_bo_map()
545 ttm_bo_kunmap(&nvbo->kmap); in nouveau_bo_unmap()
551 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_sync_for_device()
552 struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; in nouveau_bo_sync_for_device()
555 if (!ttm_dma || !ttm_dma->dma_address) in nouveau_bo_sync_for_device()
557 if (!ttm_dma->pages) { in nouveau_bo_sync_for_device()
563 if (nvbo->force_coherent) in nouveau_bo_sync_for_device()
567 while (i < ttm_dma->num_pages) { in nouveau_bo_sync_for_device()
568 struct page *p = ttm_dma->pages[i]; in nouveau_bo_sync_for_device()
571 for (j = i + 1; j < ttm_dma->num_pages; ++j) { in nouveau_bo_sync_for_device()
572 if (++p != ttm_dma->pages[j]) in nouveau_bo_sync_for_device()
577 dma_sync_single_for_device(drm->dev->dev, in nouveau_bo_sync_for_device()
578 ttm_dma->dma_address[i], in nouveau_bo_sync_for_device()
587 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_sync_for_cpu()
588 struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; in nouveau_bo_sync_for_cpu()
591 if (!ttm_dma || !ttm_dma->dma_address) in nouveau_bo_sync_for_cpu()
593 if (!ttm_dma->pages) { in nouveau_bo_sync_for_cpu()
599 if (nvbo->force_coherent) in nouveau_bo_sync_for_cpu()
603 while (i < ttm_dma->num_pages) { in nouveau_bo_sync_for_cpu()
604 struct page *p = ttm_dma->pages[i]; in nouveau_bo_sync_for_cpu()
607 for (j = i + 1; j < ttm_dma->num_pages; ++j) { in nouveau_bo_sync_for_cpu()
608 if (++p != ttm_dma->pages[j]) in nouveau_bo_sync_for_cpu()
614 dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i], in nouveau_bo_sync_for_cpu()
622 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_add_io_reserve_lru()
625 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_bo_add_io_reserve_lru()
626 list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru); in nouveau_bo_add_io_reserve_lru()
627 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_bo_add_io_reserve_lru()
632 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_del_io_reserve_lru()
635 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_bo_del_io_reserve_lru()
636 list_del_init(&nvbo->io_reserve_lru); in nouveau_bo_del_io_reserve_lru()
637 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_bo_del_io_reserve_lru()
647 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx); in nouveau_bo_validate()
660 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); in nouveau_bo_wr16()
674 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); in nouveau_bo_rd32()
688 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); in nouveau_bo_wr32()
702 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_ttm_tt_create()
704 if (drm->agp.bridge) { in nouveau_ttm_tt_create()
705 return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags); in nouveau_ttm_tt_create()
720 return -EINVAL; in nouveau_ttm_tt_bind()
722 if (drm->agp.bridge) in nouveau_ttm_tt_bind()
734 if (drm->agp.bridge) { in nouveau_ttm_tt_unbind()
747 switch (bo->resource->mem_type) { in nouveau_bo_evict_flags()
757 *pl = nvbo->placement; in nouveau_bo_evict_flags()
764 struct nouveau_mem *old_mem = nouveau_mem(bo->resource); in nouveau_bo_move_prep()
766 struct nvif_vmm *vmm = &drm->client.vmm.vmm; in nouveau_bo_move_prep()
769 ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0, in nouveau_bo_move_prep()
770 old_mem->mem.size, &old_mem->vma[0]); in nouveau_bo_move_prep()
774 ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0, in nouveau_bo_move_prep()
775 new_mem->mem.size, &old_mem->vma[1]); in nouveau_bo_move_prep()
779 ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]); in nouveau_bo_move_prep()
783 ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]); in nouveau_bo_move_prep()
786 nvif_vmm_put(vmm, &old_mem->vma[1]); in nouveau_bo_move_prep()
787 nvif_vmm_put(vmm, &old_mem->vma[0]); in nouveau_bo_move_prep()
797 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_move_m2mf()
798 struct nouveau_channel *chan = drm->ttm.chan; in nouveau_bo_move_m2mf()
799 struct nouveau_cli *cli = (void *)chan->user.client; in nouveau_bo_move_m2mf()
807 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { in nouveau_bo_move_m2mf()
813 if (drm_drv_uses_atomic_modeset(drm->dev)) in nouveau_bo_move_m2mf()
814 mutex_lock(&cli->mutex); in nouveau_bo_move_m2mf()
816 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); in nouveau_bo_move_m2mf()
817 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible); in nouveau_bo_move_m2mf()
819 ret = drm->ttm.move(chan, bo, bo->resource, new_reg); in nouveau_bo_move_m2mf()
824 &fence->base, in nouveau_bo_move_m2mf()
831 mutex_unlock(&cli->mutex); in nouveau_bo_move_m2mf()
876 if (mthd->engine) in nouveau_bo_move_init()
877 chan = drm->cechan; in nouveau_bo_move_init()
879 chan = drm->channel; in nouveau_bo_move_init()
883 ret = nvif_object_ctor(&chan->user, "ttmBoMove", in nouveau_bo_move_init()
884 mthd->oclass | (mthd->engine << 16), in nouveau_bo_move_init()
885 mthd->oclass, NULL, 0, in nouveau_bo_move_init()
886 &drm->ttm.copy); in nouveau_bo_move_init()
888 ret = mthd->init(chan, drm->ttm.copy.handle); in nouveau_bo_move_init()
890 nvif_object_dtor(&drm->ttm.copy); in nouveau_bo_move_init()
894 drm->ttm.move = mthd->exec; in nouveau_bo_move_init()
895 drm->ttm.chan = chan; in nouveau_bo_move_init()
896 name = mthd->name; in nouveau_bo_move_init()
899 } while ((++mthd)->exec); in nouveau_bo_move_init()
912 if (bo->destroy != nouveau_bo_del_ttm) in nouveau_bo_move_ntfy()
917 if (mem && new_reg->mem_type != TTM_PL_SYSTEM && in nouveau_bo_move_ntfy()
918 mem->mem.page == nvbo->page) { in nouveau_bo_move_ntfy()
919 list_for_each_entry(vma, &nvbo->vma_list, head) { in nouveau_bo_move_ntfy()
923 list_for_each_entry(vma, &nvbo->vma_list, head) { in nouveau_bo_move_ntfy()
930 nvbo->offset = (new_reg->start << PAGE_SHIFT); in nouveau_bo_move_ntfy()
938 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_vm_bind()
939 struct drm_device *dev = drm->dev; in nouveau_bo_vm_bind()
941 u64 offset = new_reg->start << PAGE_SHIFT; in nouveau_bo_vm_bind()
944 if (new_reg->mem_type != TTM_PL_VRAM) in nouveau_bo_vm_bind()
947 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { in nouveau_bo_vm_bind()
948 *new_tile = nv10_bo_set_tiling(dev, offset, bo->base.size, in nouveau_bo_vm_bind()
949 nvbo->mode, nvbo->zeta); in nouveau_bo_vm_bind()
960 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_vm_cleanup()
961 struct drm_device *dev = drm->dev; in nouveau_bo_vm_cleanup()
962 struct dma_fence *fence = dma_resv_excl_fence(bo->base.resv); in nouveau_bo_vm_cleanup()
974 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_move()
976 struct ttm_resource *old_reg = bo->resource; in nouveau_bo_move()
981 if (new_reg->mem_type == TTM_PL_TT) { in nouveau_bo_move()
982 ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg); in nouveau_bo_move()
992 if (nvbo->bo.pin_count) in nouveau_bo_move()
995 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { in nouveau_bo_move()
1002 if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) { in nouveau_bo_move()
1007 if (old_reg->mem_type == TTM_PL_SYSTEM && in nouveau_bo_move()
1008 new_reg->mem_type == TTM_PL_TT) { in nouveau_bo_move()
1013 if (old_reg->mem_type == TTM_PL_TT && in nouveau_bo_move()
1014 new_reg->mem_type == TTM_PL_SYSTEM) { in nouveau_bo_move()
1015 nouveau_ttm_tt_unbind(bo->bdev, bo->ttm); in nouveau_bo_move()
1016 ttm_resource_free(bo, &bo->resource); in nouveau_bo_move()
1022 if (drm->ttm.move) { in nouveau_bo_move()
1023 if ((old_reg->mem_type == TTM_PL_SYSTEM && in nouveau_bo_move()
1024 new_reg->mem_type == TTM_PL_VRAM) || in nouveau_bo_move()
1025 (old_reg->mem_type == TTM_PL_VRAM && in nouveau_bo_move()
1026 new_reg->mem_type == TTM_PL_SYSTEM)) { in nouveau_bo_move()
1027 hop->fpfn = 0; in nouveau_bo_move()
1028 hop->lpfn = 0; in nouveau_bo_move()
1029 hop->mem_type = TTM_PL_TT; in nouveau_bo_move()
1030 hop->flags = 0; in nouveau_bo_move()
1031 return -EMULTIHOP; in nouveau_bo_move()
1036 ret = -ENODEV; in nouveau_bo_move()
1044 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { in nouveau_bo_move()
1048 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); in nouveau_bo_move()
1052 nouveau_bo_move_ntfy(bo, bo->resource); in nouveau_bo_move()
1063 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { in nouveau_ttm_io_mem_free_locked()
1064 switch (reg->mem_type) { in nouveau_ttm_io_mem_free_locked()
1066 if (mem->kind) in nouveau_ttm_io_mem_free_locked()
1067 nvif_object_unmap_handle(&mem->mem.object); in nouveau_ttm_io_mem_free_locked()
1070 nvif_object_unmap_handle(&mem->mem.object); in nouveau_ttm_io_mem_free_locked()
1082 struct nvkm_device *device = nvxx_device(&drm->client.device); in nouveau_ttm_io_mem_reserve()
1084 struct nvif_mmu *mmu = &drm->client.mmu; in nouveau_ttm_io_mem_reserve()
1087 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_ttm_io_mem_reserve()
1089 switch (reg->mem_type) { in nouveau_ttm_io_mem_reserve()
1096 if (drm->agp.bridge) { in nouveau_ttm_io_mem_reserve()
1097 reg->bus.offset = (reg->start << PAGE_SHIFT) + in nouveau_ttm_io_mem_reserve()
1098 drm->agp.base; in nouveau_ttm_io_mem_reserve()
1099 reg->bus.is_iomem = !drm->agp.cma; in nouveau_ttm_io_mem_reserve()
1100 reg->bus.caching = ttm_write_combined; in nouveau_ttm_io_mem_reserve()
1103 if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || in nouveau_ttm_io_mem_reserve()
1104 !mem->kind) { in nouveau_ttm_io_mem_reserve()
1111 reg->bus.offset = (reg->start << PAGE_SHIFT) + in nouveau_ttm_io_mem_reserve()
1112 device->func->resource_addr(device, 1); in nouveau_ttm_io_mem_reserve()
1113 reg->bus.is_iomem = true; in nouveau_ttm_io_mem_reserve()
1116 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && in nouveau_ttm_io_mem_reserve()
1117 mmu->type[drm->ttm.type_vram].type & NVIF_MEM_UNCACHED) in nouveau_ttm_io_mem_reserve()
1118 reg->bus.caching = ttm_uncached; in nouveau_ttm_io_mem_reserve()
1120 reg->bus.caching = ttm_write_combined; in nouveau_ttm_io_mem_reserve()
1122 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { in nouveau_ttm_io_mem_reserve()
1130 switch (mem->mem.object.oclass) { in nouveau_ttm_io_mem_reserve()
1134 args.nv50.kind = mem->kind; in nouveau_ttm_io_mem_reserve()
1135 args.nv50.comp = mem->comp; in nouveau_ttm_io_mem_reserve()
1141 args.gf100.kind = mem->kind; in nouveau_ttm_io_mem_reserve()
1149 ret = nvif_object_map_handle(&mem->mem.object, in nouveau_ttm_io_mem_reserve()
1154 ret = -EINVAL; in nouveau_ttm_io_mem_reserve()
1158 reg->bus.offset = handle; in nouveau_ttm_io_mem_reserve()
1163 ret = -EINVAL; in nouveau_ttm_io_mem_reserve()
1167 if (ret == -ENOSPC) { in nouveau_ttm_io_mem_reserve()
1170 nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru, in nouveau_ttm_io_mem_reserve()
1174 list_del_init(&nvbo->io_reserve_lru); in nouveau_ttm_io_mem_reserve()
1175 drm_vma_node_unmap(&nvbo->bo.base.vma_node, in nouveau_ttm_io_mem_reserve()
1176 bdev->dev_mapping); in nouveau_ttm_io_mem_reserve()
1177 nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource); in nouveau_ttm_io_mem_reserve()
1182 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_ttm_io_mem_reserve()
1191 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_ttm_io_mem_free()
1193 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_ttm_io_mem_free()
1198 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_ttm_fault_reserve_notify()
1200 struct nvkm_device *device = nvxx_device(&drm->client.device); in nouveau_ttm_fault_reserve_notify()
1201 u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT; in nouveau_ttm_fault_reserve_notify()
1207 if (bo->resource->mem_type != TTM_PL_VRAM) { in nouveau_ttm_fault_reserve_notify()
1208 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || in nouveau_ttm_fault_reserve_notify()
1209 !nvbo->kind) in nouveau_ttm_fault_reserve_notify()
1212 if (bo->resource->mem_type != TTM_PL_SYSTEM) in nouveau_ttm_fault_reserve_notify()
1219 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA || in nouveau_ttm_fault_reserve_notify()
1220 bo->resource->start + bo->resource->num_pages < mappable) in nouveau_ttm_fault_reserve_notify()
1223 for (i = 0; i < nvbo->placement.num_placement; ++i) { in nouveau_ttm_fault_reserve_notify()
1224 nvbo->placements[i].fpfn = 0; in nouveau_ttm_fault_reserve_notify()
1225 nvbo->placements[i].lpfn = mappable; in nouveau_ttm_fault_reserve_notify()
1228 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { in nouveau_ttm_fault_reserve_notify()
1229 nvbo->busy_placements[i].fpfn = 0; in nouveau_ttm_fault_reserve_notify()
1230 nvbo->busy_placements[i].lpfn = mappable; in nouveau_ttm_fault_reserve_notify()
1237 if (unlikely(ret == -EBUSY || ret == -ERESTARTSYS)) in nouveau_ttm_fault_reserve_notify()
1253 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); in nouveau_ttm_tt_populate()
1258 if (slave && ttm->sg) { in nouveau_ttm_tt_populate()
1259 drm_prime_sg_to_dma_addr_array(ttm->sg, ttm_dma->dma_address, in nouveau_ttm_tt_populate()
1260 ttm->num_pages); in nouveau_ttm_tt_populate()
1265 dev = drm->dev->dev; in nouveau_ttm_tt_populate()
1267 return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx); in nouveau_ttm_tt_populate()
1276 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); in nouveau_ttm_tt_unpopulate()
1282 dev = drm->dev->dev; in nouveau_ttm_tt_unpopulate()
1284 return ttm_pool_free(&drm->ttm.bdev.pool, ttm); in nouveau_ttm_tt_unpopulate()
1293 if (drm->agp.bridge) { in nouveau_ttm_tt_destroy()
1306 struct dma_resv *resv = nvbo->bo.base.resv; in nouveau_bo_fence()
1309 dma_resv_add_excl_fence(resv, &fence->base); in nouveau_bo_fence()
1311 dma_resv_add_shared_fence(resv, &fence->base); in nouveau_bo_fence()