/Linux-v5.4/mm/ |
D | mmu_gather.c | 18 struct mmu_gather_batch *batch; in tlb_next_batch() local 20 batch = tlb->active; in tlb_next_batch() 21 if (batch->next) { in tlb_next_batch() 22 tlb->active = batch->next; in tlb_next_batch() 29 batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); in tlb_next_batch() 30 if (!batch) in tlb_next_batch() 34 batch->next = NULL; in tlb_next_batch() 35 batch->nr = 0; in tlb_next_batch() 36 batch->max = MAX_GATHER_BATCH; in tlb_next_batch() 38 tlb->active->next = batch; in tlb_next_batch() [all …]
|
/Linux-v5.4/arch/powerpc/mm/book3s64/ |
D | hash_tlb.c | 45 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); in hpte_need_flush() local 52 i = batch->index; in hpte_need_flush() 104 if (!batch->active) { in hpte_need_flush() 120 if (i != 0 && (mm != batch->mm || batch->psize != psize || in hpte_need_flush() 121 batch->ssize != ssize)) { in hpte_need_flush() 122 __flush_tlb_pending(batch); in hpte_need_flush() 126 batch->mm = mm; in hpte_need_flush() 127 batch->psize = psize; in hpte_need_flush() 128 batch->ssize = ssize; in hpte_need_flush() 130 batch->pte[i] = rpte; in hpte_need_flush() [all …]
|
/Linux-v5.4/drivers/gpu/drm/i915/gem/ |
D | i915_gem_object_blt.c | 21 struct i915_vma *batch; in intel_emit_vma_fill_blt() local 84 batch = i915_vma_instance(pool->obj, ce->vm, NULL); in intel_emit_vma_fill_blt() 85 if (IS_ERR(batch)) { in intel_emit_vma_fill_blt() 86 err = PTR_ERR(batch); in intel_emit_vma_fill_blt() 90 err = i915_vma_pin(batch, 0, 0, PIN_USER); in intel_emit_vma_fill_blt() 94 batch->private = pool; in intel_emit_vma_fill_blt() 95 return batch; in intel_emit_vma_fill_blt() 131 struct i915_vma *batch; in i915_gem_object_fill_blt() local 149 batch = intel_emit_vma_fill_blt(ce, vma, value); in i915_gem_object_fill_blt() 150 if (IS_ERR(batch)) { in i915_gem_object_fill_blt() [all …]
|
D | i915_gem_execbuffer.c | 229 struct i915_vma *batch; /** identity of the batch obj/vma */ member 541 eb->batch = vma; in eb_add_vma() 744 unsigned int i, batch; in eb_lookup_vmas() local 753 batch = eb_batch_index(eb); in eb_lookup_vmas() 805 err = eb_add_vma(eb, i, batch, vma); in eb_lookup_vmas() 933 GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32)); in reloc_gpu_flush() 936 __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size); in reloc_gpu_flush() 937 i915_gem_object_unpin_map(cache->rq->batch->obj); in reloc_gpu_flush() 1146 struct i915_vma *batch; in __reloc_gpu_alloc() local 1163 batch = i915_vma_instance(pool->obj, vma->vm, NULL); in __reloc_gpu_alloc() [all …]
|
D | i915_gem_client_blt.c | 162 struct i915_vma *batch; in clear_pages_worker() local 182 batch = intel_emit_vma_fill_blt(w->ce, vma, w->value); in clear_pages_worker() 183 if (IS_ERR(batch)) { in clear_pages_worker() 184 err = PTR_ERR(batch); in clear_pages_worker() 199 err = intel_emit_vma_mark_active(batch, rq); in clear_pages_worker() 219 batch->node.start, batch->node.size, in clear_pages_worker() 229 intel_emit_vma_release(w->ce, batch); in clear_pages_worker()
|
/Linux-v5.4/drivers/gpu/drm/i915/gem/selftests/ |
D | igt_gem_utils.c | 113 struct i915_vma *batch; in igt_gpu_fill_dw() local 121 batch = igt_emit_store_dw(vma, offset, count, val); in igt_gpu_fill_dw() 122 if (IS_ERR(batch)) in igt_gpu_fill_dw() 123 return PTR_ERR(batch); in igt_gpu_fill_dw() 136 batch->node.start, batch->node.size, in igt_gpu_fill_dw() 141 i915_vma_lock(batch); in igt_gpu_fill_dw() 142 err = i915_request_await_object(rq, batch->obj, false); in igt_gpu_fill_dw() 144 err = i915_vma_move_to_active(batch, rq, 0); in igt_gpu_fill_dw() 145 i915_vma_unlock(batch); in igt_gpu_fill_dw() 159 i915_vma_unpin(batch); in igt_gpu_fill_dw() [all …]
|
/Linux-v5.4/arch/powerpc/include/asm/book3s/64/ |
D | tlbflush-hash.h | 25 extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); 31 struct ppc64_tlb_batch *batch; in arch_enter_lazy_mmu_mode() local 35 batch = this_cpu_ptr(&ppc64_tlb_batch); in arch_enter_lazy_mmu_mode() 36 batch->active = 1; in arch_enter_lazy_mmu_mode() 41 struct ppc64_tlb_batch *batch; in arch_leave_lazy_mmu_mode() local 45 batch = this_cpu_ptr(&ppc64_tlb_batch); in arch_leave_lazy_mmu_mode() 47 if (batch->index) in arch_leave_lazy_mmu_mode() 48 __flush_tlb_pending(batch); in arch_leave_lazy_mmu_mode() 49 batch->active = 0; in arch_leave_lazy_mmu_mode()
|
/Linux-v5.4/drivers/gpu/drm/i915/selftests/ |
D | igt_spinner.c | 49 spin->batch = vaddr; in igt_spinner_init() 98 u32 *batch; in igt_spinner_create_request() local 133 batch = spin->batch; in igt_spinner_create_request() 135 *batch++ = MI_STORE_DWORD_IMM_GEN4; in igt_spinner_create_request() 136 *batch++ = lower_32_bits(hws_address(hws, rq)); in igt_spinner_create_request() 137 *batch++ = upper_32_bits(hws_address(hws, rq)); in igt_spinner_create_request() 138 *batch++ = rq->fence.seqno; in igt_spinner_create_request() 140 *batch++ = arbitration_command; in igt_spinner_create_request() 142 *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1; in igt_spinner_create_request() 143 *batch++ = lower_32_bits(vma->node.start); in igt_spinner_create_request() [all …]
|
D | i915_request.c | 659 struct i915_vma *batch) in empty_request() argument 669 batch->node.start, in empty_request() 670 batch->node.size, in empty_request() 686 struct i915_vma *batch; in live_empty_request() local 698 batch = empty_batch(i915); in live_empty_request() 699 if (IS_ERR(batch)) { in live_empty_request() 700 err = PTR_ERR(batch); in live_empty_request() 715 request = empty_request(engine, batch); in live_empty_request() 726 request = empty_request(engine, batch); in live_empty_request() 753 i915_vma_unpin(batch); in live_empty_request() [all …]
|
/Linux-v5.4/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_mob.c | 236 struct vmw_otable_batch *batch) in vmw_otable_batch_setup() argument 240 struct vmw_otable *otables = batch->otables; in vmw_otable_batch_setup() 249 for (i = 0; i < batch->num_otables; ++i) { in vmw_otable_batch_setup() 261 0, false, &batch->otable_bo); in vmw_otable_batch_setup() 266 ret = ttm_bo_reserve(batch->otable_bo, false, true, NULL); in vmw_otable_batch_setup() 268 ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm, &ctx); in vmw_otable_batch_setup() 271 ret = vmw_bo_map_dma(batch->otable_bo); in vmw_otable_batch_setup() 275 ttm_bo_unreserve(batch->otable_bo); in vmw_otable_batch_setup() 278 for (i = 0; i < batch->num_otables; ++i) { in vmw_otable_batch_setup() 279 if (!batch->otables[i].enabled) in vmw_otable_batch_setup() [all …]
|
/Linux-v5.4/drivers/gpu/drm/i915/gt/ |
D | selftest_hangcheck.c | 50 u32 *batch; member 93 h->batch = vaddr; in hang_init() 140 u32 *batch; in hang_create_request() local 157 h->batch = vaddr; in hang_create_request() 189 batch = h->batch; in hang_create_request() 191 *batch++ = MI_STORE_DWORD_IMM_GEN4; in hang_create_request() 192 *batch++ = lower_32_bits(hws_address(hws, rq)); in hang_create_request() 193 *batch++ = upper_32_bits(hws_address(hws, rq)); in hang_create_request() 194 *batch++ = rq->fence.seqno; in hang_create_request() 195 *batch++ = MI_ARB_CHECK; in hang_create_request() [all …]
|
D | intel_lrc.c | 2004 gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) in gen8_emit_flush_coherentl3_wa() argument 2007 *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; in gen8_emit_flush_coherentl3_wa() 2008 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); in gen8_emit_flush_coherentl3_wa() 2009 *batch++ = intel_gt_scratch_offset(engine->gt, in gen8_emit_flush_coherentl3_wa() 2011 *batch++ = 0; in gen8_emit_flush_coherentl3_wa() 2013 *batch++ = MI_LOAD_REGISTER_IMM(1); in gen8_emit_flush_coherentl3_wa() 2014 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); in gen8_emit_flush_coherentl3_wa() 2015 *batch++ = 0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES; in gen8_emit_flush_coherentl3_wa() 2017 batch = gen8_emit_pipe_control(batch, in gen8_emit_flush_coherentl3_wa() 2022 *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; in gen8_emit_flush_coherentl3_wa() [all …]
|
D | selftest_workarounds.c | 467 struct i915_vma *batch; in check_dirty_whitelist() local 475 batch = create_batch(ctx); in check_dirty_whitelist() 476 if (IS_ERR(batch)) { in check_dirty_whitelist() 477 err = PTR_ERR(batch); in check_dirty_whitelist() 503 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); in check_dirty_whitelist() 552 i915_gem_object_flush_map(batch->obj); in check_dirty_whitelist() 553 i915_gem_object_unpin_map(batch->obj); in check_dirty_whitelist() 569 batch->node.start, PAGE_SIZE, in check_dirty_whitelist() 674 i915_vma_unpin_and_release(&batch, 0); in check_dirty_whitelist() 812 struct i915_vma *batch; in scrub_whitelisted_registers() local [all …]
|
D | intel_renderstate.h | 33 const u32 *batch; member 40 .batch = gen ## _g ## _null_state_batch, \
|
D | intel_renderstate.c | 70 #define OUT_BATCH(batch, i, val) \ argument 74 (batch)[(i)++] = (val); \ 93 u32 s = rodata->batch[i]; in render_state_setup() 100 rodata->batch[i + 1] != 0) in render_state_setup()
|
D | intel_engine.h | 369 static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset) in gen8_emit_pipe_control() argument 371 memset(batch, 0, 6 * sizeof(u32)); in gen8_emit_pipe_control() 373 batch[0] = GFX_OP_PIPE_CONTROL(6); in gen8_emit_pipe_control() 374 batch[1] = flags; in gen8_emit_pipe_control() 375 batch[2] = offset; in gen8_emit_pipe_control() 377 return batch + 6; in gen8_emit_pipe_control()
|
/Linux-v5.4/drivers/xen/ |
D | gntdev.c | 836 static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt, in gntdev_get_page() argument 848 batch->pages[batch->nr_pages++] = page; in gntdev_get_page() 856 static void gntdev_put_pages(struct gntdev_copy_batch *batch) in gntdev_put_pages() argument 860 for (i = 0; i < batch->nr_pages; i++) in gntdev_put_pages() 861 put_page(batch->pages[i]); in gntdev_put_pages() 862 batch->nr_pages = 0; in gntdev_put_pages() 865 static int gntdev_copy(struct gntdev_copy_batch *batch) in gntdev_copy() argument 869 gnttab_batch_copy(batch->ops, batch->nr_ops); in gntdev_copy() 870 gntdev_put_pages(batch); in gntdev_copy() 876 for (i = 0; i < batch->nr_ops; i++) { in gntdev_copy() [all …]
|
/Linux-v5.4/drivers/net/ethernet/netronome/nfp/flower/ |
D | lag_conf.c | 204 unsigned int member_cnt, enum nfp_fl_lag_batch *batch) in nfp_fl_lag_config_group() argument 224 if (*batch == NFP_FL_LAG_BATCH_FIRST) { in nfp_fl_lag_config_group() 227 *batch = NFP_FL_LAG_BATCH_MEMBER; in nfp_fl_lag_config_group() 233 *batch = NFP_FL_LAG_BATCH_FINISHED; in nfp_fl_lag_config_group() 239 if (*batch == NFP_FL_LAG_BATCH_FINISHED) { in nfp_fl_lag_config_group() 266 enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST; in nfp_fl_lag_do_work() local 288 &batch); in nfp_fl_lag_do_work() 357 active_count, &batch); in nfp_fl_lag_do_work() 371 if (batch == NFP_FL_LAG_BATCH_MEMBER) { in nfp_fl_lag_do_work() 372 batch = NFP_FL_LAG_BATCH_FINISHED; in nfp_fl_lag_do_work() [all …]
|
/Linux-v5.4/tools/virtio/ringtest/ |
D | main.c | 22 int batch = 1; variable 116 int tokick = batch; in run_guest() 129 tokick = batch; in run_guest() 348 batch = c; in main() 372 if (batch > max_outstanding) in main() 373 batch = max_outstanding; in main()
|
/Linux-v5.4/lib/ |
D | percpu_counter.c | 82 void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) in percpu_counter_add_batch() argument 88 if (count >= batch || count <= -batch) { in percpu_counter_add_batch() 202 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) in __percpu_counter_compare() argument 208 if (abs(count - rhs) > (batch * num_online_cpus())) { in __percpu_counter_compare()
|
/Linux-v5.4/include/linux/ |
D | percpu_counter.h | 44 s32 batch); 46 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); 127 __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) in __percpu_counter_compare() argument 141 percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) in percpu_counter_add_batch() argument
|
D | ptr_ring.h | 38 int batch; /* number of entries to consume in a batch */ member 271 if (unlikely(consumer_head - r->consumer_tail >= r->batch || in __ptr_ring_discard_one() 476 r->batch = SMP_CACHE_BYTES * 2 / sizeof(*(r->queue)); in __ptr_ring_set_size() 482 if (r->batch > r->size / 2 || !r->batch) in __ptr_ring_set_size() 483 r->batch = 1; in __ptr_ring_set_size() 510 static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n, in ptr_ring_unconsume() argument 543 r->queue[head] = batch[--n]; in ptr_ring_unconsume() 552 destroy(batch[--n]); in ptr_ring_unconsume()
|
/Linux-v5.4/tools/testing/selftests/drivers/net/mlxsw/ |
D | fib_offload.sh | 284 >> $batch_dir/add.batch 286 >> $batch_dir/del.batch 291 ip -batch $batch_dir/add.batch 307 ip -batch $batch_dir/del.batch
|
/Linux-v5.4/fs/xfs/ |
D | xfs_icache.c | 804 struct xfs_inode *batch[XFS_LOOKUP_BATCH]; in xfs_inode_ag_walk() local 812 (void **)batch, first_index, in xfs_inode_ag_walk() 817 (void **) batch, first_index, in xfs_inode_ag_walk() 830 struct xfs_inode *ip = batch[i]; in xfs_inode_ag_walk() 833 batch[i] = NULL; in xfs_inode_ag_walk() 858 if (!batch[i]) in xfs_inode_ag_walk() 861 xfs_iflags_test(batch[i], XFS_INEW)) in xfs_inode_ag_walk() 862 xfs_inew_wait(batch[i]); in xfs_inode_ag_walk() 863 error = execute(batch[i], flags, args); in xfs_inode_ag_walk() 864 xfs_irele(batch[i]); in xfs_inode_ag_walk() [all …]
|
/Linux-v5.4/drivers/md/ |
D | dm-clone-target.c | 957 static void __batch_hydration(struct batch_info *batch, in __batch_hydration() argument 963 if (batch->head) { in __batch_hydration() 965 if (batch->nr_batched_regions < max_batch_size && in __batch_hydration() 966 (batch->head->region_nr + batch->nr_batched_regions) == hd->region_nr) { in __batch_hydration() 967 list_add_tail(&hd->list, &batch->head->list); in __batch_hydration() 968 batch->nr_batched_regions++; in __batch_hydration() 973 if (batch->nr_batched_regions >= max_batch_size || hd) { in __batch_hydration() 974 hydration_copy(batch->head, batch->nr_batched_regions); in __batch_hydration() 975 batch->head = NULL; in __batch_hydration() 976 batch->nr_batched_regions = 0; in __batch_hydration() [all …]
|