/Linux-v4.19/arch/powerpc/mm/ |
D | tlb_hash64.c | 49 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); in hpte_need_flush() local 56 i = batch->index; in hpte_need_flush() 105 if (!batch->active) { in hpte_need_flush() 121 if (i != 0 && (mm != batch->mm || batch->psize != psize || in hpte_need_flush() 122 batch->ssize != ssize)) { in hpte_need_flush() 123 __flush_tlb_pending(batch); in hpte_need_flush() 127 batch->mm = mm; in hpte_need_flush() 128 batch->psize = psize; in hpte_need_flush() 129 batch->ssize = ssize; in hpte_need_flush() 131 batch->pte[i] = rpte; in hpte_need_flush() [all …]
|
D | hash_native_64.c | 792 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); in native_flush_hash_range() local 793 unsigned long psize = batch->psize; in native_flush_hash_range() 794 int ssize = batch->ssize; in native_flush_hash_range() 804 vpn = batch->vpn[i]; in native_flush_hash_range() 805 pte = batch->pte[i]; in native_flush_hash_range() 835 vpn = batch->vpn[i]; in native_flush_hash_range() 836 pte = batch->pte[i]; in native_flush_hash_range() 852 vpn = batch->vpn[i]; in native_flush_hash_range() 853 pte = batch->pte[i]; in native_flush_hash_range()
|
/Linux-v4.19/drivers/gpu/drm/i915/selftests/ |
D | i915_request.c | 454 struct i915_vma *batch) in empty_request() argument 464 batch->node.start, in empty_request() 465 batch->node.size, in empty_request() 480 struct i915_vma *batch; in live_empty_request() local 491 batch = empty_batch(i915); in live_empty_request() 492 if (IS_ERR(batch)) { in live_empty_request() 493 err = PTR_ERR(batch); in live_empty_request() 508 request = empty_request(engine, batch); in live_empty_request() 521 request = empty_request(engine, batch); in live_empty_request() 550 i915_vma_unpin(batch); in live_empty_request() [all …]
|
D | intel_hangcheck.c | 43 u32 *batch; member 84 h->batch = vaddr; in hang_init() 115 u32 *batch; in emit_recurse_batch() local 152 batch = h->batch; in emit_recurse_batch() 154 *batch++ = MI_STORE_DWORD_IMM_GEN4; in emit_recurse_batch() 155 *batch++ = lower_32_bits(hws_address(hws, rq)); in emit_recurse_batch() 156 *batch++ = upper_32_bits(hws_address(hws, rq)); in emit_recurse_batch() 157 *batch++ = rq->fence.seqno; in emit_recurse_batch() 158 *batch++ = MI_ARB_CHECK; in emit_recurse_batch() 160 memset(batch, 0, 1024); in emit_recurse_batch() [all …]
|
D | intel_lrc.c | 16 u32 *batch; member 57 spin->batch = vaddr; in spinner_init() 88 u32 *batch; in emit_recurse_batch() local 125 batch = spin->batch; in emit_recurse_batch() 127 *batch++ = MI_STORE_DWORD_IMM_GEN4; in emit_recurse_batch() 128 *batch++ = lower_32_bits(hws_address(hws, rq)); in emit_recurse_batch() 129 *batch++ = upper_32_bits(hws_address(hws, rq)); in emit_recurse_batch() 130 *batch++ = rq->fence.seqno; in emit_recurse_batch() 132 *batch++ = arbitration_command; in emit_recurse_batch() 134 *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1; in emit_recurse_batch() [all …]
|
D | i915_gem_context.c | 122 struct i915_vma *batch; in gpu_fill() local 148 batch = gpu_fill_dw(vma, in gpu_fill() 153 if (IS_ERR(batch)) { in gpu_fill() 154 err = PTR_ERR(batch); in gpu_fill() 169 batch->node.start, batch->node.size, in gpu_fill() 174 err = i915_vma_move_to_active(batch, rq, 0); in gpu_fill() 182 i915_gem_object_set_active_reference(batch->obj); in gpu_fill() 183 i915_vma_unpin(batch); in gpu_fill() 184 i915_vma_close(batch); in gpu_fill() 197 i915_vma_unpin(batch); in gpu_fill()
|
/Linux-v4.19/arch/powerpc/include/asm/book3s/64/ |
D | tlbflush-hash.h | 25 extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); 31 struct ppc64_tlb_batch *batch; in arch_enter_lazy_mmu_mode() local 35 batch = this_cpu_ptr(&ppc64_tlb_batch); in arch_enter_lazy_mmu_mode() 36 batch->active = 1; in arch_enter_lazy_mmu_mode() 41 struct ppc64_tlb_batch *batch; in arch_leave_lazy_mmu_mode() local 45 batch = this_cpu_ptr(&ppc64_tlb_batch); in arch_leave_lazy_mmu_mode() 47 if (batch->index) in arch_leave_lazy_mmu_mode() 48 __flush_tlb_pending(batch); in arch_leave_lazy_mmu_mode() 49 batch->active = 0; in arch_leave_lazy_mmu_mode()
|
/Linux-v4.19/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_mob.c | 238 struct vmw_otable_batch *batch) in vmw_otable_batch_setup() argument 242 struct vmw_otable *otables = batch->otables; in vmw_otable_batch_setup() 251 for (i = 0; i < batch->num_otables; ++i) { in vmw_otable_batch_setup() 263 0, false, &batch->otable_bo); in vmw_otable_batch_setup() 268 ret = ttm_bo_reserve(batch->otable_bo, false, true, NULL); in vmw_otable_batch_setup() 270 ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm, &ctx); in vmw_otable_batch_setup() 273 ret = vmw_bo_map_dma(batch->otable_bo); in vmw_otable_batch_setup() 277 ttm_bo_unreserve(batch->otable_bo); in vmw_otable_batch_setup() 280 for (i = 0; i < batch->num_otables; ++i) { in vmw_otable_batch_setup() 281 if (!batch->otables[i].enabled) in vmw_otable_batch_setup() [all …]
|
/Linux-v4.19/drivers/gpu/drm/i915/ |
D | intel_lrc.c | 1426 gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) in gen8_emit_flush_coherentl3_wa() argument 1428 *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; in gen8_emit_flush_coherentl3_wa() 1429 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); in gen8_emit_flush_coherentl3_wa() 1430 *batch++ = i915_ggtt_offset(engine->scratch) + 256; in gen8_emit_flush_coherentl3_wa() 1431 *batch++ = 0; in gen8_emit_flush_coherentl3_wa() 1433 *batch++ = MI_LOAD_REGISTER_IMM(1); in gen8_emit_flush_coherentl3_wa() 1434 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); in gen8_emit_flush_coherentl3_wa() 1435 *batch++ = 0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES; in gen8_emit_flush_coherentl3_wa() 1437 batch = gen8_emit_pipe_control(batch, in gen8_emit_flush_coherentl3_wa() 1442 *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; in gen8_emit_flush_coherentl3_wa() [all …]
|
D | i915_gem_execbuffer.c | 242 struct i915_vma *batch; /** identity of the batch obj/vma */ member 552 eb->batch = vma; in eb_add_vma() 749 unsigned int i, batch; in eb_lookup_vmas() local 761 batch = eb_batch_index(eb); in eb_lookup_vmas() 805 err = eb_add_vma(eb, i, batch, vma); in eb_lookup_vmas() 929 GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32)); in reloc_gpu_flush() 931 i915_gem_object_unpin_map(cache->rq->batch->obj); in reloc_gpu_flush() 1119 struct i915_vma *batch; in __reloc_gpu_alloc() local 1141 batch = i915_vma_instance(obj, vma->vm, NULL); in __reloc_gpu_alloc() 1142 if (IS_ERR(batch)) { in __reloc_gpu_alloc() [all …]
|
D | intel_renderstate.h | 31 const u32 *batch; member 38 .batch = gen ## _g ## _null_state_batch, \
|
D | i915_gem_render_state.c | 71 #define OUT_BATCH(batch, i, val) \ argument 75 (batch)[(i)++] = (val); \ 94 u32 s = rodata->batch[i]; in render_state_setup() 101 rodata->batch[i + 1] != 0) in render_state_setup()
|
/Linux-v4.19/arch/s390/mm/ |
D | pgalloc.c | 337 struct mmu_table_batch *batch; in tlb_remove_table_rcu() local 340 batch = container_of(head, struct mmu_table_batch, rcu); in tlb_remove_table_rcu() 342 for (i = 0; i < batch->nr; i++) in tlb_remove_table_rcu() 343 __tlb_remove_table(batch->tables[i]); in tlb_remove_table_rcu() 345 free_page((unsigned long)batch); in tlb_remove_table_rcu() 350 struct mmu_table_batch **batch = &tlb->batch; in tlb_table_flush() local 352 if (*batch) { in tlb_table_flush() 353 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); in tlb_table_flush() 354 *batch = NULL; in tlb_table_flush() 360 struct mmu_table_batch **batch = &tlb->batch; in tlb_remove_table() local [all …]
|
/Linux-v4.19/drivers/xen/ |
D | gntdev.c | 847 static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt, in gntdev_get_page() argument 859 batch->pages[batch->nr_pages++] = page; in gntdev_get_page() 867 static void gntdev_put_pages(struct gntdev_copy_batch *batch) in gntdev_put_pages() argument 871 for (i = 0; i < batch->nr_pages; i++) in gntdev_put_pages() 872 put_page(batch->pages[i]); in gntdev_put_pages() 873 batch->nr_pages = 0; in gntdev_put_pages() 876 static int gntdev_copy(struct gntdev_copy_batch *batch) in gntdev_copy() argument 880 gnttab_batch_copy(batch->ops, batch->nr_ops); in gntdev_copy() 881 gntdev_put_pages(batch); in gntdev_copy() 887 for (i = 0; i < batch->nr_ops; i++) { in gntdev_copy() [all …]
|
/Linux-v4.19/drivers/net/ethernet/netronome/nfp/flower/ |
D | lag_conf.c | 232 unsigned int member_cnt, enum nfp_fl_lag_batch *batch) in nfp_fl_lag_config_group() argument 252 if (*batch == NFP_FL_LAG_BATCH_FIRST) { in nfp_fl_lag_config_group() 255 *batch = NFP_FL_LAG_BATCH_MEMBER; in nfp_fl_lag_config_group() 261 *batch = NFP_FL_LAG_BATCH_FINISHED; in nfp_fl_lag_config_group() 267 if (*batch == NFP_FL_LAG_BATCH_FINISHED) { in nfp_fl_lag_config_group() 294 enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST; in nfp_fl_lag_do_work() local 316 &batch); in nfp_fl_lag_do_work() 385 active_count, &batch); in nfp_fl_lag_do_work() 399 if (batch == NFP_FL_LAG_BATCH_MEMBER) { in nfp_fl_lag_do_work() 400 batch = NFP_FL_LAG_BATCH_FINISHED; in nfp_fl_lag_do_work() [all …]
|
/Linux-v4.19/tools/virtio/ringtest/ |
D | main.c | 22 int batch = 1; variable 116 int tokick = batch; in run_guest() 129 tokick = batch; in run_guest() 348 batch = c; in main() 372 if (batch > max_outstanding) in main() 373 batch = max_outstanding; in main()
|
/Linux-v4.19/arch/x86/xen/ |
D | mmu.c | 126 int batch = min(REMAP_BATCH_SIZE, nr); in do_remap_pfn() local 127 int batch_left = batch; in do_remap_pfn() 128 range = (unsigned long)batch << PAGE_SHIFT; in do_remap_pfn() 164 nr -= batch; in do_remap_pfn() 167 err_ptr += batch; in do_remap_pfn()
|
/Linux-v4.19/lib/ |
D | percpu_counter.c | 82 void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) in percpu_counter_add_batch() argument 88 if (count >= batch || count <= -batch) { in percpu_counter_add_batch() 202 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) in __percpu_counter_compare() argument 208 if (abs(count - rhs) > (batch * num_online_cpus())) { in __percpu_counter_compare()
|
/Linux-v4.19/include/linux/ |
D | percpu_counter.h | 44 s32 batch); 46 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); 127 __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) in __percpu_counter_compare() argument 141 percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) in percpu_counter_add_batch() argument
|
D | ptr_ring.h | 43 int batch; /* number of entries to consume in a batch */ member 276 if (unlikely(consumer_head - r->consumer_tail >= r->batch || in __ptr_ring_discard_one() 481 r->batch = SMP_CACHE_BYTES * 2 / sizeof(*(r->queue)); in __ptr_ring_set_size() 487 if (r->batch > r->size / 2 || !r->batch) in __ptr_ring_set_size() 488 r->batch = 1; in __ptr_ring_set_size() 515 static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n, in ptr_ring_unconsume() argument 548 r->queue[head] = batch[--n]; in ptr_ring_unconsume() 557 destroy(batch[--n]); in ptr_ring_unconsume()
|
/Linux-v4.19/mm/ |
D | memory.c | 193 struct mmu_gather_batch *batch; in tlb_next_batch() local 195 batch = tlb->active; in tlb_next_batch() 196 if (batch->next) { in tlb_next_batch() 197 tlb->active = batch->next; in tlb_next_batch() 204 batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); in tlb_next_batch() 205 if (!batch) in tlb_next_batch() 209 batch->next = NULL; in tlb_next_batch() 210 batch->nr = 0; in tlb_next_batch() 211 batch->max = MAX_GATHER_BATCH; in tlb_next_batch() 213 tlb->active->next = batch; in tlb_next_batch() [all …]
|
/Linux-v4.19/fs/xfs/ |
D | xfs_icache.c | 799 struct xfs_inode *batch[XFS_LOOKUP_BATCH]; in xfs_inode_ag_walk() local 807 (void **)batch, first_index, in xfs_inode_ag_walk() 812 (void **) batch, first_index, in xfs_inode_ag_walk() 825 struct xfs_inode *ip = batch[i]; in xfs_inode_ag_walk() 828 batch[i] = NULL; in xfs_inode_ag_walk() 853 if (!batch[i]) in xfs_inode_ag_walk() 856 xfs_iflags_test(batch[i], XFS_INEW)) in xfs_inode_ag_walk() 857 xfs_inew_wait(batch[i]); in xfs_inode_ag_walk() 858 error = execute(batch[i], flags, args); in xfs_inode_ag_walk() 859 xfs_irele(batch[i]); in xfs_inode_ag_walk() [all …]
|
/Linux-v4.19/arch/x86/include/asm/ |
D | tlbflush.h | 583 static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, in arch_tlbbatch_add_mm() argument 587 cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); in arch_tlbbatch_add_mm() 590 extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
|
/Linux-v4.19/drivers/infiniband/core/ |
D | cq.c | 30 int batch) in __ib_process_cq() argument 39 while ((n = ib_poll_cq(cq, min_t(u32, batch, in __ib_process_cq() 52 if (n != batch || (budget != -1 && completed >= budget)) in __ib_process_cq()
|
/Linux-v4.19/tools/kvm/kvm_stat/ |
D | kvm_stat.txt | 23 Use batch and logging modes for scripting purposes. 62 --batch:: 63 run in batch mode for one second
|