Lines Matching full:drm

61 typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
64 typedef int (*nouveau_clear_page_t)(struct nouveau_drm *drm, u32 length,
70 struct nouveau_drm *drm; member
82 struct nouveau_drm *drm; member
99 return chunk->drm; in page_to_drm()
114 struct nouveau_dmem *dmem = chunk->drm->dmem; in nouveau_dmem_page_free()
142 static int nouveau_dmem_copy_one(struct nouveau_drm *drm, struct page *spage, in nouveau_dmem_copy_one() argument
145 struct device *dev = drm->dev->dev; in nouveau_dmem_copy_one()
153 if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr, in nouveau_dmem_copy_one()
164 struct nouveau_drm *drm = page_to_drm(vmf->page); in nouveau_dmem_migrate_to_ram() local
165 struct nouveau_dmem *dmem = drm->dmem; in nouveau_dmem_migrate_to_ram()
178 .pgmap_owner = drm->dev, in nouveau_dmem_migrate_to_ram()
206 ret = nouveau_dmem_copy_one(drm, spage, dpage, &dma_addr); in nouveau_dmem_migrate_to_ram()
216 dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); in nouveau_dmem_migrate_to_ram()
228 nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage) in nouveau_dmem_chunk_alloc() argument
251 chunk->drm = drm; in nouveau_dmem_chunk_alloc()
257 chunk->pagemap.owner = drm->dev; in nouveau_dmem_chunk_alloc()
259 ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0, in nouveau_dmem_chunk_alloc()
275 mutex_lock(&drm->dmem->mutex); in nouveau_dmem_chunk_alloc()
276 list_add(&chunk->list, &drm->dmem->chunks); in nouveau_dmem_chunk_alloc()
277 mutex_unlock(&drm->dmem->mutex); in nouveau_dmem_chunk_alloc()
281 spin_lock(&drm->dmem->lock); in nouveau_dmem_chunk_alloc()
283 page->zone_device_data = drm->dmem->free_pages; in nouveau_dmem_chunk_alloc()
284 drm->dmem->free_pages = page; in nouveau_dmem_chunk_alloc()
288 spin_unlock(&drm->dmem->lock); in nouveau_dmem_chunk_alloc()
290 NV_INFO(drm, "DMEM: registered %ldMB of device memory\n", in nouveau_dmem_chunk_alloc()
308 nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm) in nouveau_dmem_page_alloc_locked() argument
314 spin_lock(&drm->dmem->lock); in nouveau_dmem_page_alloc_locked()
315 if (drm->dmem->free_pages) { in nouveau_dmem_page_alloc_locked()
316 page = drm->dmem->free_pages; in nouveau_dmem_page_alloc_locked()
317 drm->dmem->free_pages = page->zone_device_data; in nouveau_dmem_page_alloc_locked()
320 spin_unlock(&drm->dmem->lock); in nouveau_dmem_page_alloc_locked()
322 spin_unlock(&drm->dmem->lock); in nouveau_dmem_page_alloc_locked()
323 ret = nouveau_dmem_chunk_alloc(drm, &page); in nouveau_dmem_page_alloc_locked()
333 nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page) in nouveau_dmem_page_free_locked() argument
340 nouveau_dmem_resume(struct nouveau_drm *drm) in nouveau_dmem_resume() argument
345 if (drm->dmem == NULL) in nouveau_dmem_resume()
348 mutex_lock(&drm->dmem->mutex); in nouveau_dmem_resume()
349 list_for_each_entry(chunk, &drm->dmem->chunks, list) { in nouveau_dmem_resume()
354 mutex_unlock(&drm->dmem->mutex); in nouveau_dmem_resume()
358 nouveau_dmem_suspend(struct nouveau_drm *drm) in nouveau_dmem_suspend() argument
362 if (drm->dmem == NULL) in nouveau_dmem_suspend()
365 mutex_lock(&drm->dmem->mutex); in nouveau_dmem_suspend()
366 list_for_each_entry(chunk, &drm->dmem->chunks, list) in nouveau_dmem_suspend()
368 mutex_unlock(&drm->dmem->mutex); in nouveau_dmem_suspend()
400 nouveau_dmem_copy_one(chunk->drm, in nouveau_dmem_evict_chunk()
406 nouveau_fence_new(chunk->drm->dmem->migrate.chan, false, &fence); in nouveau_dmem_evict_chunk()
413 dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL); in nouveau_dmem_evict_chunk()
418 nouveau_dmem_fini(struct nouveau_drm *drm) in nouveau_dmem_fini() argument
422 if (drm->dmem == NULL) in nouveau_dmem_fini()
425 mutex_lock(&drm->dmem->mutex); in nouveau_dmem_fini()
427 list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) { in nouveau_dmem_fini()
439 mutex_unlock(&drm->dmem->mutex); in nouveau_dmem_fini()
443 nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages, in nvc0b5_migrate_copy() argument
447 struct nvif_push *push = drm->dmem->migrate.chan->chan.push; in nvc0b5_migrate_copy()
517 nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length, in nvc0b5_migrate_clear() argument
520 struct nvif_push *push = drm->dmem->migrate.chan->chan.push; in nvc0b5_migrate_clear()
573 nouveau_dmem_migrate_init(struct nouveau_drm *drm) in nouveau_dmem_migrate_init() argument
575 switch (drm->ttm.copy.oclass) { in nouveau_dmem_migrate_init()
580 drm->dmem->migrate.copy_func = nvc0b5_migrate_copy; in nouveau_dmem_migrate_init()
581 drm->dmem->migrate.clear_func = nvc0b5_migrate_clear; in nouveau_dmem_migrate_init()
582 drm->dmem->migrate.chan = drm->ttm.chan; in nouveau_dmem_migrate_init()
591 nouveau_dmem_init(struct nouveau_drm *drm) in nouveau_dmem_init() argument
596 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_PASCAL) in nouveau_dmem_init()
599 if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL))) in nouveau_dmem_init()
602 drm->dmem->drm = drm; in nouveau_dmem_init()
603 mutex_init(&drm->dmem->mutex); in nouveau_dmem_init()
604 INIT_LIST_HEAD(&drm->dmem->chunks); in nouveau_dmem_init()
605 mutex_init(&drm->dmem->mutex); in nouveau_dmem_init()
606 spin_lock_init(&drm->dmem->lock); in nouveau_dmem_init()
609 ret = nouveau_dmem_migrate_init(drm); in nouveau_dmem_init()
611 kfree(drm->dmem); in nouveau_dmem_init()
612 drm->dmem = NULL; in nouveau_dmem_init()
616 static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm, in nouveau_dmem_migrate_copy_one() argument
620 struct device *dev = drm->dev->dev; in nouveau_dmem_migrate_copy_one()
628 dpage = nouveau_dmem_page_alloc_locked(drm); in nouveau_dmem_migrate_copy_one()
638 if (drm->dmem->migrate.copy_func(drm, 1, in nouveau_dmem_migrate_copy_one()
643 if (drm->dmem->migrate.clear_func(drm, page_size(dpage), in nouveau_dmem_migrate_copy_one()
658 nouveau_dmem_page_free_locked(drm, dpage); in nouveau_dmem_migrate_copy_one()
664 static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm, in nouveau_dmem_migrate_chunk() argument
672 args->dst[i] = nouveau_dmem_migrate_copy_one(drm, svmm, in nouveau_dmem_migrate_chunk()
674 if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma])) in nouveau_dmem_migrate_chunk()
679 nouveau_fence_new(drm->dmem->migrate.chan, false, &fence); in nouveau_dmem_migrate_chunk()
685 dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE, in nouveau_dmem_migrate_chunk()
692 nouveau_dmem_migrate_vma(struct nouveau_drm *drm, in nouveau_dmem_migrate_vma() argument
704 .pgmap_owner = drm->dev, in nouveau_dmem_migrate_vma()
711 if (drm->dmem == NULL) in nouveau_dmem_migrate_vma()
740 nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_addrs, in nouveau_dmem_migrate_vma()