/Linux-v6.1/arch/x86/hyperv/ |
D | mmu.c | 60 struct hv_tlb_flush *flush; in hyperv_flush_tlb_multi() local 74 flush = *flush_pcpu; in hyperv_flush_tlb_multi() 76 if (unlikely(!flush)) { in hyperv_flush_tlb_multi() 86 flush->address_space = virt_to_phys(info->mm->pgd); in hyperv_flush_tlb_multi() 87 flush->address_space &= CR3_ADDR_MASK; in hyperv_flush_tlb_multi() 88 flush->flags = 0; in hyperv_flush_tlb_multi() 90 flush->address_space = 0; in hyperv_flush_tlb_multi() 91 flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES; in hyperv_flush_tlb_multi() 94 flush->processor_mask = 0; in hyperv_flush_tlb_multi() 96 flush->flags |= HV_FLUSH_ALL_PROCESSORS; in hyperv_flush_tlb_multi() [all …]
|
D | nested.c | 23 struct hv_guest_mapping_flush *flush; in hyperv_flush_guest_mapping() local 36 flush = *flush_pcpu; in hyperv_flush_guest_mapping() 38 if (unlikely(!flush)) { in hyperv_flush_guest_mapping() 43 flush->address_space = as; in hyperv_flush_guest_mapping() 44 flush->flags = 0; in hyperv_flush_guest_mapping() 47 flush, NULL); in hyperv_flush_guest_mapping() 60 struct hv_guest_mapping_flush_list *flush, in hyperv_fill_flush_guest_mapping_list() argument 77 flush->gpa_list[gpa_n].page.additional_pages = additional_pages; in hyperv_fill_flush_guest_mapping_list() 78 flush->gpa_list[gpa_n].page.largepage = false; in hyperv_fill_flush_guest_mapping_list() 79 flush->gpa_list[gpa_n].page.basepfn = cur; in hyperv_fill_flush_guest_mapping_list() [all …]
|
/Linux-v6.1/net/ipv4/ |
D | tcp_offload.c | 193 int flush = 1; in tcp_gro_receive() local 236 flush = NAPI_GRO_CB(p)->flush; in tcp_gro_receive() 237 flush |= (__force int)(flags & TCP_FLAG_CWR); in tcp_gro_receive() 238 flush |= (__force int)((flags ^ tcp_flag_word(th2)) & in tcp_gro_receive() 240 flush |= (__force int)(th->ack_seq ^ th2->ack_seq); in tcp_gro_receive() 242 flush |= *(u32 *)((u8 *)th + i) ^ in tcp_gro_receive() 252 flush |= NAPI_GRO_CB(p)->flush_id; in tcp_gro_receive() 263 flush |= (mss != skb_shinfo(skb)->gso_size); in tcp_gro_receive() 265 flush |= (len - 1) >= mss; in tcp_gro_receive() 267 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); in tcp_gro_receive() [all …]
|
/Linux-v6.1/fs/btrfs/ |
D | space-info.c | 345 enum btrfs_reserve_flush_enum flush) in calc_available_free_space() argument 372 if (flush == BTRFS_RESERVE_FLUSH_ALL) in calc_available_free_space() 397 enum btrfs_reserve_flush_enum flush) in btrfs_can_overcommit() argument 410 avail = calc_available_free_space(fs_info, space_info, flush); in btrfs_can_overcommit() 435 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH; in btrfs_try_granting_tickets() local 450 flush)) { in btrfs_try_granting_tickets() 465 flush = BTRFS_RESERVE_FLUSH_ALL; in btrfs_try_granting_tickets() 1074 space_info->flush = 0; in btrfs_async_reclaim_metadata_space() 1086 space_info->flush = 0; in btrfs_async_reclaim_metadata_space() 1129 space_info->flush = 0; in btrfs_async_reclaim_metadata_space() [all …]
|
D | delalloc-space.c | 118 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_DATA; in btrfs_alloc_data_chunk_ondemand() local 124 flush = BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE; in btrfs_alloc_data_chunk_ondemand() 126 return btrfs_reserve_data_bytes(fs_info, bytes, flush); in btrfs_alloc_data_chunk_ondemand() 134 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_DATA; in btrfs_check_data_free_space() local 143 flush = BTRFS_RESERVE_NO_FLUSH; in btrfs_check_data_free_space() 145 flush = BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE; in btrfs_check_data_free_space() 147 ret = btrfs_reserve_data_bytes(fs_info, len, flush); in btrfs_check_data_free_space() 306 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL; in btrfs_delalloc_reserve_metadata() local 319 flush = BTRFS_RESERVE_NO_FLUSH; in btrfs_delalloc_reserve_metadata() 322 flush = BTRFS_RESERVE_FLUSH_LIMIT; in btrfs_delalloc_reserve_metadata() [all …]
|
D | space-info.h | 47 unsigned int flush:1; /* set if we are trying to make space */ member 141 enum btrfs_reserve_flush_enum flush); 146 enum btrfs_reserve_flush_enum flush); 159 enum btrfs_reserve_flush_enum flush);
|
/Linux-v6.1/lib/ |
D | decompress_inflate.c | 44 long (*flush)(void*, unsigned long), in __gunzip() 53 if (flush) { in __gunzip() 82 strm->workspace = malloc(flush ? zlib_inflate_workspacesize() : in __gunzip() 138 if (!flush) { in __gunzip() 159 if (flush && strm->next_out > out_buf) { in __gunzip() 161 if (l != flush(out_buf, l)) { in __gunzip() 193 if (flush) in __gunzip() 202 long (*flush)(void*, unsigned long), in gunzip() 207 return __gunzip(buf, len, fill, flush, out_buf, 0, pos, error); in gunzip() 212 long (*flush)(void*, unsigned long), in __decompress() [all …]
|
D | decompress_unxz.c | 253 long (*flush)(void *src, unsigned long size), in unxz() 269 if (fill == NULL && flush == NULL) in unxz() 277 if (flush == NULL) { in unxz() 299 if (fill == NULL && flush == NULL) { in unxz() 325 if (flush != NULL && (b.out_pos == b.out_size in unxz() 332 if (flush(b.out, b.out_pos) != (long)b.out_pos) in unxz() 342 if (flush != NULL) in unxz() 382 if (flush != NULL) in unxz() 400 long (*flush)(void*, unsigned long), in __decompress() 405 return unxz(buf, len, fill, flush, out_buf, pos, error); in __decompress()
|
D | decompress_unzstd.c | 165 long (*flush)(void*, unsigned long), in __unzstd() 189 if (fill == NULL && flush == NULL) in __unzstd() 225 if (flush != NULL) { in __unzstd() 306 if (flush != NULL && out.pos > 0) { in __unzstd() 307 if (out.pos != flush(out.dst, out.pos)) { in __unzstd() 333 long (*flush)(void*, unsigned long), in unzstd() 338 return __unzstd(buf, len, fill, flush, out_buf, 0, pos, error); in unzstd() 343 long (*flush)(void*, unsigned long), in __decompress() 348 return __unzstd(buf, len, fill, flush, out_buf, out_len, pos, error); in __decompress()
|
D | decompress_unlz4.c | 33 long (*flush)(void *, unsigned long), in unlz4() 52 } else if (!flush) { in unlz4() 174 if (flush && flush(outp, dest_len) != dest_len) in unlz4() 209 long (*flush)(void*, unsigned long), in __decompress() 215 return unlz4(buf, in_len - 4, fill, flush, output, posp, error); in __decompress()
|
D | decompress_unlzo.c | 99 long (*flush)(void *, unsigned long), in unlzo() 112 } else if (!flush) { in unlzo() 243 if (flush && flush(out_buf, dst_len) != dst_len) in unlzo() 279 long (*flush)(void*, unsigned long), in __decompress() 284 return unlzo(buf, len, fill, flush, out_buf, pos, error); in __decompress()
|
/Linux-v6.1/include/net/ |
D | gro.h | 24 u16 flush; member 99 NAPI_GRO_CB(skb)->flush |= 1; 114 NAPI_GRO_CB(skb)->flush |= 1; 348 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush) in skb_gro_flush_final() argument 351 NAPI_GRO_CB(skb)->flush |= flush; in skb_gro_flush_final() 355 int flush, in skb_gro_flush_final_remcsum() argument 359 NAPI_GRO_CB(skb)->flush |= flush; in skb_gro_flush_final_remcsum() 365 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush) in skb_gro_flush_final() argument 367 NAPI_GRO_CB(skb)->flush |= flush; in skb_gro_flush_final() 371 int flush, in skb_gro_flush_final_remcsum() argument [all …]
|
/Linux-v6.1/lib/zlib_deflate/ |
D | deflate.c | 60 #define DEFLATE_HOOK(strm, flush, bstate) 0 argument 69 typedef block_state (*compress_func) (deflate_state *s, int flush); 73 static block_state deflate_stored (deflate_state *s, int flush); 74 static block_state deflate_fast (deflate_state *s, int flush); 75 static block_state deflate_slow (deflate_state *s, int flush); 331 int flush in zlib_deflate() argument 338 flush > Z_FINISH || flush < 0) { in zlib_deflate() 344 (s->status == FINISH_STATE && flush != Z_FINISH)) { in zlib_deflate() 351 s->last_flush = flush; in zlib_deflate() 393 } else if (strm->avail_in == 0 && flush <= old_flush && in zlib_deflate() [all …]
|
/Linux-v6.1/tools/testing/selftests/drivers/net/netdevsim/ |
D | nexthop.sh | 147 $IP nexthop flush &> /dev/null 182 $IP nexthop flush &> /dev/null 202 $IP nexthop flush &> /dev/null 247 $IP nexthop flush &> /dev/null 267 $IP nexthop flush &> /dev/null 289 $IP nexthop flush &> /dev/null 314 $IP nexthop flush &> /dev/null 343 $IP nexthop flush &> /dev/null 373 $IP nexthop flush &> /dev/null 422 $IP nexthop flush &> /dev/null [all …]
|
/Linux-v6.1/block/ |
D | blk-flush.c | 124 return 1 << ffz(rq->flush.seq); in blk_flush_cur_seq() 138 rq->end_io = rq->flush.saved_end_io; in blk_flush_restore_request() 178 BUG_ON(rq->flush.seq & seq); in blk_flush_complete_seq() 179 rq->flush.seq |= seq; in blk_flush_complete_seq() 193 list_move_tail(&rq->flush.list, pending); in blk_flush_complete_seq() 197 list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); in blk_flush_complete_seq() 208 list_del_init(&rq->flush.list); in blk_flush_complete_seq() 264 list_for_each_entry_safe(rq, n, running, flush.list) { in flush_end_io() 298 list_first_entry(pending, struct request, flush.list); in blk_kick_flush() 443 memset(&rq->flush, 0, sizeof(rq->flush)); in blk_insert_flush() [all …]
|
/Linux-v6.1/Documentation/x86/ |
D | tlb.rst | 12 from areas other than the one we are trying to flush will be 21 1. The size of the flush being performed. A flush of the entire 25 be no collateral damage caused by doing the global flush, and 26 all of the individual flush will have ended up being wasted 29 damage we do with a full flush. So, the larger the TLB, the 30 more attractive an individual flush looks. Data and 37 especially the contents of the TLB during a given flush. The 38 sizes of the flush will vary greatly depending on the workload as 48 This will cause us to do the global flush for more cases. 53 Despite the fact that a single individual flush on x86 is [all …]
|
/Linux-v6.1/drivers/md/ |
D | dm-delay.c | 37 struct delay_class flush; member 131 if (dc->flush.dev) in delay_dtr() 132 dm_put_device(ti, dc->flush.dev); in delay_dtr() 205 ret = delay_class_ctr(ti, &dc->flush, argv); in delay_ctr() 215 ret = delay_class_ctr(ti, &dc->flush, argv + 3); in delay_ctr() 221 ret = delay_class_ctr(ti, &dc->flush, argv + 6); in delay_ctr() 291 c = &dc->flush; in delay_map() 315 DMEMIT("%u %u %u", dc->read.ops, dc->write.ops, dc->flush.ops); in delay_status() 326 DMEMIT_DELAY_CLASS(&dc->flush); in delay_status() 348 ret = fn(ti, dc->flush.dev, dc->flush.start, ti->len, data); in delay_iterate_devices()
|
/Linux-v6.1/lib/zlib_dfltcc/ |
D | dfltcc_deflate.c | 102 int flush, in dfltcc_deflate() argument 121 no_flush = flush == Z_NO_FLUSH; in dfltcc_deflate() 127 if (flush == Z_FINISH && strm->avail_in == 0 && !param->cf) { in dfltcc_deflate() 186 need_empty_block = flush == Z_FINISH && param->bcf && !param->bhf; in dfltcc_deflate() 194 if (flush == Z_FINISH && !param->bcf) in dfltcc_deflate() 261 if (flush == Z_FINISH) { in dfltcc_deflate() 270 if (flush == Z_FULL_FLUSH) in dfltcc_deflate() 272 *result = flush == Z_NO_FLUSH ? need_more : block_done; in dfltcc_deflate()
|
D | dfltcc.h | 109 int flush, 119 int flush, int *ret); 138 #define INFLATE_TYPEDO_HOOK(strm, flush) \ argument 143 action = dfltcc_inflate((strm), (flush), &ret); \
|
/Linux-v6.1/drivers/gpu/drm/etnaviv/ |
D | etnaviv_buffer.c | 92 u32 flush = 0; in etnaviv_cmd_select_pipe() local 103 flush = VIVS_GL_FLUSH_CACHE_PE2D; in etnaviv_cmd_select_pipe() 105 flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR; in etnaviv_cmd_select_pipe() 107 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush); in etnaviv_cmd_select_pipe() 236 u32 link_target, flush = 0; in etnaviv_buffer_end() local 243 flush = VIVS_GL_FLUSH_CACHE_PE2D; in etnaviv_buffer_end() 245 flush = VIVS_GL_FLUSH_CACHE_DEPTH | in etnaviv_buffer_end() 251 if (flush) { in etnaviv_buffer_end() 267 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush); in etnaviv_buffer_end() 414 u32 flush = VIVS_MMUv2_CONFIGURATION_MODE_MASK | in etnaviv_buffer_queue() local [all …]
|
/Linux-v6.1/net/ipv6/ |
D | ip6_offload.c | 35 NAPI_GRO_CB(skb)->flush |= 1, NULL : \ 217 u16 flush = 1; in ipv6_gro_receive() local 230 flush += ntohs(iph->payload_len) != skb_gro_len(skb); in ipv6_gro_receive() 251 flush--; in ipv6_gro_receive() 284 NAPI_GRO_CB(p)->flush |= !!((first_word & htonl(0x0FF00000)) | in ipv6_gro_receive() 286 NAPI_GRO_CB(p)->flush |= flush; in ipv6_gro_receive() 296 NAPI_GRO_CB(skb)->flush |= flush; in ipv6_gro_receive() 304 skb_gro_flush_final(skb, pp, flush); in ipv6_gro_receive() 315 NAPI_GRO_CB(skb)->flush = 1; in sit_ip6ip6_gro_receive() 330 NAPI_GRO_CB(skb)->flush = 1; in ip4ip6_gro_receive()
|
/Linux-v6.1/Documentation/block/ |
D | stat.rst | 44 flush I/Os requests number of flush I/Os processed 45 flush ticks milliseconds total wait time for flush requests 53 flush I/Os 56 These values increment when an flush I/O request completes. 58 Block layer combines flush requests and executes at most one at a time. 59 This counts flush requests executed by disk. Not tracked for partitions. 75 read ticks, write ticks, discard ticks, flush ticks
|
/Linux-v6.1/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_overlay.c | 69 SVGAEscapeVideoFlush flush; member 83 fill_escape(&cmd->escape, sizeof(cmd->flush)); in fill_flush() 84 cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH; in fill_flush() 85 cmd->flush.streamId = stream_id; in fill_flush() 99 struct vmw_escape_video_flush *flush; in vmw_overlay_send_put() local 123 fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items; in vmw_overlay_send_put() 131 flush = (struct vmw_escape_video_flush *)&items[num_items]; in vmw_overlay_send_put() 170 fill_flush(flush, arg->stream_id); in vmw_overlay_send_put() 190 struct vmw_escape_video_flush flush; in vmw_overlay_send_stop() member 212 fill_flush(&cmds->flush, stream_id); in vmw_overlay_send_stop()
|
/Linux-v6.1/arch/arm/mm/ |
D | cache-v4.S | 40 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 59 mcr p15, 0, ip, c7, c7, 0 @ flush ID cache 115 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
|
/Linux-v6.1/arch/riscv/mm/ |
D | dma-noncoherent.c | 28 ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size); in arch_sync_dma_for_device() 45 ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size); in arch_sync_dma_for_cpu() 56 ALT_CMO_OP(flush, flush_addr, size, riscv_cbom_block_size); in arch_dma_prep_coherent()
|