Lines Matching +full:pd +full:- +full:node
1 // SPDX-License-Identifier: MIT
21 return ERR_PTR(-ENOMEM); in alloc_pt()
23 pt->base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K); in alloc_pt()
24 if (IS_ERR(pt->base)) { in alloc_pt()
26 return ERR_PTR(-ENOMEM); in alloc_pt()
29 atomic_set(&pt->used, 0); in alloc_pt()
35 struct i915_page_directory *pd; in __alloc_pd() local
37 pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL); in __alloc_pd()
38 if (unlikely(!pd)) in __alloc_pd()
41 pd->entry = kcalloc(count, sizeof(*pd->entry), I915_GFP_ALLOW_FAIL); in __alloc_pd()
42 if (unlikely(!pd->entry)) { in __alloc_pd()
43 kfree(pd); in __alloc_pd()
47 spin_lock_init(&pd->lock); in __alloc_pd()
48 return pd; in __alloc_pd()
53 struct i915_page_directory *pd; in alloc_pd() local
55 pd = __alloc_pd(I915_PDES); in alloc_pd()
56 if (unlikely(!pd)) in alloc_pd()
57 return ERR_PTR(-ENOMEM); in alloc_pd()
59 pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K); in alloc_pd()
60 if (IS_ERR(pd->pt.base)) { in alloc_pd()
61 kfree(pd->entry); in alloc_pd()
62 kfree(pd); in alloc_pd()
63 return ERR_PTR(-ENOMEM); in alloc_pd()
66 return pd; in alloc_pd()
74 struct i915_page_directory *pd = in free_px() local
75 container_of(pt, typeof(*pd), pt); in free_px()
76 kfree(pd->entry); in free_px()
79 if (pt->base) in free_px()
80 i915_gem_object_put(pt->base); in free_px()
97 __set_pd_entry(struct i915_page_directory * const pd, in __set_pd_entry() argument
102 /* Each thread pre-pins the pd, and we may have a thread per pde. */ in __set_pd_entry()
103 GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * I915_PDES); in __set_pd_entry()
105 atomic_inc(px_used(pd)); in __set_pd_entry()
106 pd->entry[idx] = to; in __set_pd_entry()
107 write_dma_entry(px_base(pd), idx, encode(px_dma(to), I915_CACHE_LLC)); in __set_pd_entry()
111 clear_pd_entry(struct i915_page_directory * const pd, in clear_pd_entry() argument
115 GEM_BUG_ON(atomic_read(px_used(pd)) == 0); in clear_pd_entry()
117 write_dma_entry(px_base(pd), idx, scratch->encode); in clear_pd_entry()
118 pd->entry[idx] = NULL; in clear_pd_entry()
119 atomic_dec(px_used(pd)); in clear_pd_entry()
123 release_pd_entry(struct i915_page_directory * const pd, in release_pd_entry() argument
130 if (atomic_add_unless(&pt->used, -1, 1)) in release_pd_entry()
133 spin_lock(&pd->lock); in release_pd_entry()
134 if (atomic_dec_and_test(&pt->used)) { in release_pd_entry()
135 clear_pd_entry(pd, idx, scratch); in release_pd_entry()
138 spin_unlock(&pd->lock); in release_pd_entry()
145 struct drm_i915_private *i915 = gt->i915; in i915_ppgtt_init_hw()
160 if (GRAPHICS_VER(gt->i915) < 8) in __ppgtt_create()
174 trace_i915_ppgtt_create(&ppgtt->vm); in i915_ppgtt_create()
188 vm->allocate_va_range(vm, stash, vma->node.start, vma->size); in ppgtt_bind_vma()
194 if (i915_gem_object_is_readonly(vma->obj)) in ppgtt_bind_vma()
196 if (i915_gem_object_is_lmem(vma->obj)) in ppgtt_bind_vma()
199 vm->insert_entries(vm, vma, cache_level, pte_flags); in ppgtt_bind_vma()
206 vm->clear_range(vm, vma->node.start, vma->size); in ppgtt_unbind_vma()
212 return (size + 2 * (BIT_ULL(shift) - 1)) >> shift; in pd_count()
222 shift = vm->pd_shift; in i915_vm_alloc_pt_stash()
227 while (count--) { in i915_vm_alloc_pt_stash()
236 pt->stash = stash->pt[0]; in i915_vm_alloc_pt_stash()
237 stash->pt[0] = pt; in i915_vm_alloc_pt_stash()
240 for (n = 1; n < vm->top; n++) { in i915_vm_alloc_pt_stash()
241 shift += ilog2(I915_PDES); /* Each PD holds 512 entries */ in i915_vm_alloc_pt_stash()
243 while (count--) { in i915_vm_alloc_pt_stash()
244 struct i915_page_directory *pd; in i915_vm_alloc_pt_stash() local
246 pd = alloc_pd(vm); in i915_vm_alloc_pt_stash()
247 if (IS_ERR(pd)) { in i915_vm_alloc_pt_stash()
249 return PTR_ERR(pd); in i915_vm_alloc_pt_stash()
252 pd->pt.stash = stash->pt[1]; in i915_vm_alloc_pt_stash()
253 stash->pt[1] = &pd->pt; in i915_vm_alloc_pt_stash()
266 for (n = 0; n < ARRAY_SIZE(stash->pt); n++) { in i915_vm_map_pt_stash()
267 for (pt = stash->pt[n]; pt; pt = pt->stash) { in i915_vm_map_pt_stash()
268 err = map_pt_dma_locked(vm, pt->base); in i915_vm_map_pt_stash()
283 for (n = 0; n < ARRAY_SIZE(stash->pt); n++) { in i915_vm_free_pt_stash()
284 while ((pt = stash->pt[n])) { in i915_vm_free_pt_stash()
285 stash->pt[n] = pt->stash; in i915_vm_free_pt_stash()
293 GEM_BUG_ON(vma->pages); in ppgtt_set_pages()
295 vma->pages = vma->obj->mm.pages; in ppgtt_set_pages()
296 vma->page_sizes = vma->obj->mm.page_sizes; in ppgtt_set_pages()
303 struct drm_i915_private *i915 = gt->i915; in ppgtt_init()
305 ppgtt->vm.gt = gt; in ppgtt_init()
306 ppgtt->vm.i915 = i915; in ppgtt_init()
307 ppgtt->vm.dma = i915->drm.dev; in ppgtt_init()
308 ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size); in ppgtt_init()
310 dma_resv_init(&ppgtt->vm._resv); in ppgtt_init()
311 i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); in ppgtt_init()
313 ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma; in ppgtt_init()
314 ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma; in ppgtt_init()
315 ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages; in ppgtt_init()
316 ppgtt->vm.vma_ops.clear_pages = clear_pages; in ppgtt_init()