/Linux-v6.1/arch/x86/um/ |
D | tls_32.c | 99 * Actually, now if it wasn't flushed it gets cleared and in load_TLS() 100 * flushed to the host, which will clear it. in load_TLS() 103 if (!curr->flushed) { in load_TLS() 112 if (!(flags & O_FORCE) && curr->flushed) in load_TLS() 119 curr->flushed = 1; in load_TLS() 127 * present desc's, only if they haven't been flushed. 142 if (curr->flushed) in needs_TLS_update() 151 * On a newly forked process, the TLS descriptors haven't yet been flushed. So 164 * will remain as flushed as it was. in clear_flushed_tls() 169 curr->flushed = 0; in clear_flushed_tls() [all …]
|
/Linux-v6.1/arch/powerpc/include/asm/ |
D | security_features.h | 50 // The L1-D cache can be flushed with ori r30,r30,0 53 // The L1-D cache can be flushed with mtspr 882,r0 (aka SPRN_TRIG2) 76 // The L1-D cache should be flushed on MSR[HV] 1->0 transition (hypervisor to guest) 79 // The L1-D cache should be flushed on MSR[PR] 0->1 transition (kernel to userspace) 94 // The L1-D cache should be flushed when entering the kernel 97 // The L1-D cache should be flushed after user accesses from the kernel
|
/Linux-v6.1/mm/ |
D | percpu-vm.c | 117 * @chunk: chunk the regions to be flushed belongs to 118 * @page_start: page index of the first page to be flushed 119 * @page_end: page index of the last page to be flushed + 1 174 * @chunk: pcpu_chunk the regions to be flushed belong to 175 * @page_start: page index of the first page to be flushed 176 * @page_end: page index of the last page to be flushed + 1 245 * @chunk: pcpu_chunk the regions to be flushed belong to 246 * @page_start: page index of the first page to be flushed 247 * @page_end: page index of the last page to be flushed + 1
|
/Linux-v6.1/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/ |
D | ifu.json | 57 "PublicDescription": "Thread flushed due to TLB miss", 60 "BriefDescription": "Thread flushed due to TLB miss" 63 "PublicDescription": "Thread flushed due to reasons other than TLB miss", 66 "BriefDescription": "Thread flushed due to reasons other than TLB miss"
|
/Linux-v6.1/arch/x86/lib/ |
D | usercopy_64.c | 47 unsigned long flushed, dest = (unsigned long) dst; in __copy_user_flushcache() local 67 flushed = dest - (unsigned long) dst; in __copy_user_flushcache() 68 if (size > flushed && !IS_ALIGNED(size - flushed, 8)) in __copy_user_flushcache()
|
/Linux-v6.1/drivers/gpu/drm/i915/gt/ |
D | gen2_engine_cs.c | 52 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is in gen4_emit_flush_rcs() 53 * also flushed at 2d versus 3d pipeline switches. in gen4_emit_flush_rcs() 57 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if in gen4_emit_flush_rcs() 58 * MI_READ_FLUSH is set, and is always flushed on 965. in gen4_emit_flush_rcs() 73 * are flushed at any MI_FLUSH. in gen4_emit_flush_rcs()
|
/Linux-v6.1/arch/x86/um/asm/ |
D | processor_32.h | 17 unsigned flushed:1; member 30 { .present = 0, .flushed = 0 } }, \
|
/Linux-v6.1/drivers/gpu/drm/savage/ |
D | savage_bci.c | 319 dev_priv->dma_pages[i].flushed = 0; in savage_dma_init() 338 dev_priv->dma_pages[i].flushed = 0; in savage_dma_reset() 400 dev_priv->dma_pages[i].flushed = 0; in savage_dma_alloc() 438 dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed) in savage_dma_flush() 446 DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, " in savage_dma_flush() 448 first, cur, dev_priv->dma_pages[first].flushed, in savage_dma_flush() 467 dev_priv->dma_pages[first].flushed) * 4; in savage_dma_flush() 469 dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed; in savage_dma_flush() 488 dev_priv->dma_pages[i].flushed = 0; in savage_dma_flush() 494 dev_priv->dma_pages[cur].flushed = 0; in savage_dma_flush() [all …]
|
/Linux-v6.1/drivers/infiniband/hw/cxgb4/ |
D | cq.c | 205 int flushed = 0; in c4iw_flush_rq() local 212 flushed++; in c4iw_flush_rq() 214 return flushed; in c4iw_flush_rq() 240 int flushed = 0; in c4iw_flush_sq() local 252 swsqe->flushed = 1; in c4iw_flush_sq() 257 flushed++; in c4iw_flush_sq() 261 wq->sq.flush_cidx += flushed; in c4iw_flush_sq() 264 return flushed; in c4iw_flush_sq() 291 swsqe->flushed = 1; in flush_completed_wrs() 362 if (qhp->wq.flushed == 1) in c4iw_flush_hw_cq() [all …]
|
D | restrack.c | 44 if (rdma_nl_put_driver_u32(msg, "flushed", wq->flushed)) in fill_sq() 109 if (rdma_nl_put_driver_u32(msg, "flushed", sqe->flushed)) in fill_swsqe()
|
/Linux-v6.1/include/linux/ |
D | mm_types_task.h | 70 * will be flushed on all CPUs by the time that arch_tlbbatch_flush() 80 * flushed before IO is initiated or a stale TLB entry potentially
|
/Linux-v6.1/drivers/net/ppp/ |
D | ppp_mppe.c | 297 * set the FLUSHED bit. This is contrary to RFC 3078, sec. 3.1. in mppe_init() 439 int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED; in mppe_decompress() local 476 if (!state->stateful && !flushed) { in mppe_decompress() 477 printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set in " in mppe_decompress() 482 if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) { in mppe_decompress() 483 printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set on " in mppe_decompress() 522 if (!flushed) { in mppe_decompress() 547 if (flushed) in mppe_decompress()
|
/Linux-v6.1/arch/csky/abiv2/ |
D | cacheflush.c | 71 /* Flush this hart's I$ now, and mark it as flushed. */ in flush_icache_mm_range() 78 * flushed. in flush_icache_mm_range()
|
/Linux-v6.1/drivers/gpu/drm/msm/disp/mdp5/ |
D | mdp5_ctl.h | 51 * CTL registers need to be flushed after calling this function 63 * These masks are used to specify which block(s) need to be flushed
|
D | mdp5_ctl.c | 44 /* when do CTL registers need to be flushed? (mask of trigger bits) */ 52 struct mdp5_ctl *pair; /* Paired CTL to be flushed together */ 247 * CTL registers need to be flushed after calling this function 529 * CTL registers need to be flushed in some circumstances; if that is the 533 * Return H/W flushed bit mask.
|
/Linux-v6.1/arch/x86/kernel/acpi/ |
D | cstate.c | 40 * And caches should not be flushed by software while in acpi_processor_power_init_bm_check() 71 * And caches should not be flushed by software while in acpi_processor_power_init_bm_check() 85 * should not be flushed by software while entering C3 in acpi_processor_power_init_bm_check()
|
/Linux-v6.1/arch/riscv/mm/ |
D | cacheflush.c | 49 /* Flush this hart's I$ now, and mark it as flushed. */ in flush_icache_mm() 56 * flushed. in flush_icache_mm()
|
/Linux-v6.1/drivers/gpu/drm/ |
D | drm_cache.c | 74 mb(); /*Also used after CLFLUSH so that all cache is flushed*/ in drm_cache_flush_clflush() 80 * @pages: List of pages to be flushed. 137 mb(); /*Make sure that all cache line entry is flushed*/ in drm_clflush_sg() 171 mb(); /*Ensure that every data cache line entry is flushed*/ in drm_clflush_virt_range()
|
/Linux-v6.1/include/uapi/drm/ |
D | v3d_drm.h | 130 * The L1T, slice, L2C, L2T, and GCA caches will be flushed before 131 * each CL executes. The VCD cache should be flushed (if necessary) 133 * flushed by the time the render done IRQ happens, which is the 135 * possible using TMU writes) must be flushed by the caller using the
|
/Linux-v6.1/Documentation/devicetree/bindings/arm/msm/ |
D | qcom,idle-state.txt | 45 be flushed in s/w, before powering down the core. 50 be flushed, system bus, clocks - lowered, and SoC main XO clock gated and
|
/Linux-v6.1/drivers/gpu/drm/msm/disp/dpu1/ |
D | dpu_hw_top.h | 35 * @split_flush_en: Allows both the paths to be flushed when master path is 36 * flushed
|
/Linux-v6.1/arch/x86/kernel/ |
D | amd_nb.c | 436 int flushed, i; in amd_flush_garts() local 450 flushed = 0; in amd_flush_garts() 454 flushed++; in amd_flush_garts() 468 if (!flushed) in amd_flush_garts()
|
/Linux-v6.1/arch/x86/include/asm/ |
D | tlbbatch.h | 10 * the PFNs being flushed..
|
/Linux-v6.1/arch/ia64/include/asm/ |
D | mmu_context.h | 43 unsigned long *flushmap;/* pending rid to be flushed */ 53 * When the context counter wraps around all TLBs need to be flushed because
|
/Linux-v6.1/Documentation/driver-api/md/ |
D | raid5-cache.rst | 35 is safe on the cache disk, the data will be flushed onto RAID disks. The 62 filesystems) right after the data hits cache disk. The data is flushed to raid
|