Home
last modified time | relevance | path

Searched full:flushed (Results 1 – 25 of 701) sorted by relevance

12345678910>>...29

/Linux-v5.10/arch/x86/um/
Dtls_32.c102 * Actually, now if it wasn't flushed it gets cleared and in load_TLS()
103 * flushed to the host, which will clear it. in load_TLS()
106 if (!curr->flushed) { in load_TLS()
115 if (!(flags & O_FORCE) && curr->flushed) in load_TLS()
122 curr->flushed = 1; in load_TLS()
130 * present desc's, only if they haven't been flushed.
145 if (curr->flushed) in needs_TLS_update()
154 * On a newly forked process, the TLS descriptors haven't yet been flushed. So
167 * will remain as flushed as it was. in clear_flushed_tls()
172 curr->flushed = 0; in clear_flushed_tls()
[all …]
/Linux-v5.10/arch/powerpc/include/asm/
Dsecurity_features.h45 // The L1-D cache can be flushed with ori r30,r30,0
48 // The L1-D cache can be flushed with mtspr 882,r0 (aka SPRN_TRIG2)
71 // The L1-D cache should be flushed on MSR[HV] 1->0 transition (hypervisor to guest)
74 // The L1-D cache should be flushed on MSR[PR] 0->1 transition (kernel to userspace)
89 // The L1-D cache should be flushed when entering the kernel
92 // The L1-D cache should be flushed after user accesses from the kernel
/Linux-v5.10/mm/
Dpercpu-vm.c116 * @chunk: chunk the regions to be flushed belongs to
117 * @page_start: page index of the first page to be flushed
118 * @page_end: page index of the last page to be flushed + 1
173 * @chunk: pcpu_chunk the regions to be flushed belong to
174 * @page_start: page index of the first page to be flushed
175 * @page_end: page index of the last page to be flushed + 1
244 * @chunk: pcpu_chunk the regions to be flushed belong to
245 * @page_start: page index of the first page to be flushed
246 * @page_end: page index of the last page to be flushed + 1
/Linux-v5.10/kernel/printk/
Dprintk_safe.c22 * is later flushed into the main ring buffer via IRQ work.
52 /* Get flushed in a more safe context. */
64 * The messages are flushed from irq work (or from panic()), possibly,
100 * Do it once again if the buffer has been flushed in the meantime. in printk_safe_log_store()
218 goto out; /* Someone else has already flushed the buffer. */ in __printk_safe_flush()
241 * The buffers are flushed automatically via IRQ work. This function
243 * been flushed at some point.
288 * one writer running. But the buffer might get flushed from another
/Linux-v5.10/arch/x86/lib/
Dusercopy_64.c89 unsigned long flushed, dest = (unsigned long) dst; in __copy_user_flushcache() local
109 flushed = dest - (unsigned long) dst; in __copy_user_flushcache()
110 if (size > flushed && !IS_ALIGNED(size - flushed, 8)) in __copy_user_flushcache()
/Linux-v5.10/drivers/gpu/drm/
Ddrm_cache.c68 mb(); /*Also used after CLFLUSH so that all cache is flushed*/ in drm_cache_flush_clflush()
74 * @pages: List of pages to be flushed.
132 mb(); /*Make sure that all cache line entry is flushed*/ in drm_clflush_sg()
167 mb(); /*Ensure that evry data cache line entry is flushed*/ in drm_clflush_virt_range()
/Linux-v5.10/drivers/gpu/drm/i915/gt/
Dgen2_engine_cs.c50 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is in gen4_emit_flush_rcs()
51 * also flushed at 2d versus 3d pipeline switches. in gen4_emit_flush_rcs()
55 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if in gen4_emit_flush_rcs()
56 * MI_READ_FLUSH is set, and is always flushed on 965. in gen4_emit_flush_rcs()
71 * are flushed at any MI_FLUSH. in gen4_emit_flush_rcs()
/Linux-v5.10/arch/x86/um/asm/
Dprocessor_32.h17 unsigned flushed:1; member
30 { .present = 0, .flushed = 0 } }, \
/Linux-v5.10/include/uapi/drm/
Dv3d_drm.h60 * The L1T, slice, L2C, L2T, and GCA caches will be flushed before
61 * each CL executes. The VCD cache should be flushed (if necessary)
63 * flushed by the time the render done IRQ happens, which is the
65 * possible using TMU writes) must be flushed by the caller using the
/Linux-v5.10/drivers/gpu/drm/savage/
Dsavage_bci.c319 dev_priv->dma_pages[i].flushed = 0; in savage_dma_init()
338 dev_priv->dma_pages[i].flushed = 0; in savage_dma_reset()
400 dev_priv->dma_pages[i].flushed = 0; in savage_dma_alloc()
438 dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed) in savage_dma_flush()
446 DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, " in savage_dma_flush()
448 first, cur, dev_priv->dma_pages[first].flushed, in savage_dma_flush()
467 dev_priv->dma_pages[first].flushed) * 4; in savage_dma_flush()
469 dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed; in savage_dma_flush()
488 dev_priv->dma_pages[i].flushed = 0; in savage_dma_flush()
494 dev_priv->dma_pages[cur].flushed = 0; in savage_dma_flush()
[all …]
/Linux-v5.10/drivers/infiniband/hw/cxgb4/
Dcq.c205 int flushed = 0; in c4iw_flush_rq() local
212 flushed++; in c4iw_flush_rq()
214 return flushed; in c4iw_flush_rq()
240 int flushed = 0; in c4iw_flush_sq() local
252 swsqe->flushed = 1; in c4iw_flush_sq()
257 flushed++; in c4iw_flush_sq()
261 wq->sq.flush_cidx += flushed; in c4iw_flush_sq()
264 return flushed; in c4iw_flush_sq()
291 swsqe->flushed = 1; in flush_completed_wrs()
362 if (qhp->wq.flushed == 1) in c4iw_flush_hw_cq()
[all …]
Drestrack.c44 if (rdma_nl_put_driver_u32(msg, "flushed", wq->flushed)) in fill_sq()
109 if (rdma_nl_put_driver_u32(msg, "flushed", sqe->flushed)) in fill_swsqe()
/Linux-v5.10/arch/csky/abiv2/
Dcacheflush.c68 /* Flush this hart's I$ now, and mark it as flushed. */ in flush_icache_mm_range()
75 * flushed. in flush_icache_mm_range()
/Linux-v5.10/drivers/net/ppp/
Dppp_mppe.c297 * set the FLUSHED bit. This is contrary to RFC 3078, sec. 3.1. in mppe_init()
439 int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED; in mppe_decompress() local
476 if (!state->stateful && !flushed) { in mppe_decompress()
477 printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set in " in mppe_decompress()
482 if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) { in mppe_decompress()
483 printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set on " in mppe_decompress()
522 if (!flushed) { in mppe_decompress()
547 if (flushed) in mppe_decompress()
/Linux-v5.10/drivers/gpu/drm/msm/disp/mdp5/
Dmdp5_ctl.h51 * CTL registers need to be flushed after calling this function
63 * These masks are used to specify which block(s) need to be flushed
Dmdp5_ctl.c44 /* when do CTL registers need to be flushed? (mask of trigger bits) */
52 struct mdp5_ctl *pair; /* Paired CTL to be flushed together */
245 * CTL registers need to be flushed after calling this function
520 * CTL registers need to be flushed in some circumstances; if that is the
524 * Return H/W flushed bit mask.
/Linux-v5.10/include/linux/
Dmm_types_task.h82 * will be flushed on all CPUs by the time that arch_tlbbatch_flush()
92 * flushed before IO is initiated or a stale TLB entry potentially
/Linux-v5.10/arch/riscv/mm/
Dcacheflush.c46 /* Flush this hart's I$ now, and mark it as flushed. */ in flush_icache_mm()
53 * flushed. in flush_icache_mm()
/Linux-v5.10/drivers/gpu/drm/amd/amdgpu/
Damdgpu_ids.c349 struct dma_fence *flushed; in amdgpu_vmid_grab_used() local
363 flushed = (*id)->flushed_updates; in amdgpu_vmid_grab_used()
364 if (updates && (!flushed || dma_fence_is_later(updates, flushed))) in amdgpu_vmid_grab_used()
382 if (updates && (!flushed || dma_fence_is_later(updates, flushed))) { in amdgpu_vmid_grab_used()
/Linux-v5.10/Documentation/devicetree/bindings/arm/msm/
Dqcom,idle-state.txt45 be flushed in s/w, before powering down the core.
50 be flushed, system bus, clocks - lowered, and SoC main XO clock gated and
/Linux-v5.10/drivers/gpu/drm/msm/disp/dpu1/
Ddpu_hw_top.h36 * @split_flush_en: Allows both the paths to be flushed when master path is
37 * flushed
/Linux-v5.10/arch/x86/kernel/
Damd_nb.c458 int flushed, i; in amd_flush_garts() local
472 flushed = 0; in amd_flush_garts()
476 flushed++; in amd_flush_garts()
490 if (!flushed) in amd_flush_garts()
/Linux-v5.10/arch/x86/include/asm/
Dtlbbatch.h10 * the PFNs being flushed..
/Linux-v5.10/arch/x86/boot/compressed/
Dsev-es.c151 * GHCB Page must be flushed from the cache and mapped encrypted again. in sev_es_shutdown_ghcb()
159 * GHCB page is mapped encrypted again and flushed from the cache. in sev_es_shutdown_ghcb()
/Linux-v5.10/kernel/
Dirq_work.c94 /* All work should have been flushed before going offline */ in irq_work_queue_on()
127 /* All work should have been flushed before going offline */ in irq_work_needs_cpu()

12345678910>>...29