| /Linux-v5.4/arch/powerpc/mm/ |
| D | dma-noncoherent.c | 62 size_t seg_size = min((size_t)(PAGE_SIZE - offset), size); in __dma_sync_page_highmem() local 63 size_t cur_size = seg_size; in __dma_sync_page_highmem() 65 int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE; in __dma_sync_page_highmem() 74 __dma_sync((void *)start, seg_size, direction); in __dma_sync_page_highmem() 79 seg_size = min((size_t)PAGE_SIZE, size - cur_size); in __dma_sync_page_highmem() 82 cur_size += seg_size; in __dma_sync_page_highmem()
|
| /Linux-v5.4/drivers/mmc/core/ |
| D | sdio_ops.c | 123 unsigned int seg_size = card->host->max_seg_size; in mmc_io_rw_extended() local 151 nents = DIV_ROUND_UP(left_size, seg_size); in mmc_io_rw_extended() 160 sg_set_buf(sg_ptr, buf + i * seg_size, in mmc_io_rw_extended() 161 min(seg_size, left_size)); in mmc_io_rw_extended() 162 left_size -= seg_size; in mmc_io_rw_extended()
|
| /Linux-v5.4/tools/testing/selftests/kvm/lib/ |
| D | elf.c | 165 size_t seg_size = seg_vend - seg_vstart + 1; in kvm_vm_elf_load() local 167 vm_vaddr_t vaddr = vm_vaddr_alloc(vm, seg_size, seg_vstart, in kvm_vm_elf_load() 175 memset(addr_gva2hva(vm, vaddr), 0, seg_size); in kvm_vm_elf_load()
|
| /Linux-v5.4/drivers/net/wireless/ath/ath10k/ |
| D | swap.c | 76 u32 seg_size; in ath10k_swap_code_seg_free() local 84 seg_size = __le32_to_cpu(seg_info->seg_hw_info.size); in ath10k_swap_code_seg_free() 85 dma_free_coherent(ar->dev, seg_size, seg_info->virt_address[0], in ath10k_swap_code_seg_free()
|
| /Linux-v5.4/drivers/bluetooth/ |
| D | btqca.c | 225 static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size, in qca_tlv_send_segment() argument 235 cmd[1] = seg_size; in qca_tlv_send_segment() 236 memcpy(cmd + 2, data, seg_size); in qca_tlv_send_segment() 239 return __hci_cmd_send(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, in qca_tlv_send_segment() 242 skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, cmd, in qca_tlv_send_segment()
|
| /Linux-v5.4/drivers/lightnvm/ |
| D | pblk-rb.c | 73 unsigned int seg_size) in pblk_rb_init() argument 89 power_seg_sz = get_count_order(seg_size); in pblk_rb_init() 93 rb->seg_size = (1 << power_seg_sz); in pblk_rb_init() 147 entry->data = kaddr + (i * rb->seg_size); in pblk_rb_init() 331 memcpy(entry->data, data, rb->seg_size); in __pblk_rb_write_entry() 599 if (bio_add_pc_page(q, bio, page, rb->seg_size, 0) != in pblk_rb_read_to_bio() 600 rb->seg_size) { in pblk_rb_read_to_bio() 677 memcpy(data, entry->data, rb->seg_size); in pblk_rb_copy_to_bio()
|
| /Linux-v5.4/drivers/staging/wusbcore/ |
| D | wa-xfer.c | 146 size_t seg_size; member 348 && seg->result < xfer->seg_size in __wa_xfer_is_done() 546 <= xfer->seg_size)) { in __wa_seg_calculate_isoc_frame_count() 610 xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks) in __wa_xfer_setup_sizes() 615 if (xfer->seg_size < maxpktsize) { in __wa_xfer_setup_sizes() 618 xfer->seg_size, maxpktsize); in __wa_xfer_setup_sizes() 622 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize; in __wa_xfer_setup_sizes() 632 int seg_size; /* don't care. */ in __wa_xfer_setup_sizes() local 634 index, &seg_size); in __wa_xfer_setup_sizes() 639 xfer->seg_size); in __wa_xfer_setup_sizes() [all …]
|
| /Linux-v5.4/block/ |
| D | blk-integrity.c | 31 unsigned int seg_size = 0; in blk_rq_count_integrity_sg() local 40 if (seg_size + iv.bv_len > queue_max_segment_size(q)) in blk_rq_count_integrity_sg() 43 seg_size += iv.bv_len; in blk_rq_count_integrity_sg() 47 seg_size = iv.bv_len; in blk_rq_count_integrity_sg()
|
| D | blk-merge.c | 201 unsigned seg_size = 0; in bvec_split_segs() local 204 seg_size = get_max_segment_size(q, bv->bv_offset + total_len); in bvec_split_segs() 205 seg_size = min(seg_size, len); in bvec_split_segs() 208 total_len += seg_size; in bvec_split_segs() 209 len -= seg_size; in bvec_split_segs()
|
| /Linux-v5.4/arch/x86/kernel/ |
| D | amd_gart_64.c | 386 unsigned int seg_size; in gart_map_sg() local 396 seg_size = 0; in gart_map_sg() 416 (s->length + seg_size > max_seg_size) || in gart_map_sg() 423 seg_size = 0; in gart_map_sg() 431 seg_size += s->length; in gart_map_sg()
|
| /Linux-v5.4/drivers/infiniband/core/ |
| D | user_mad.c | 297 size_t seg_size; in copy_recv_mad() local 300 seg_size = packet->recv_wc->mad_seg_size; in copy_recv_mad() 303 if ((packet->length <= seg_size && in copy_recv_mad() 305 (packet->length > seg_size && in copy_recv_mad() 306 count < hdr_size(file) + seg_size)) in copy_recv_mad() 313 seg_payload = min_t(int, packet->length, seg_size); in copy_recv_mad() 330 max_seg_payload = seg_size - offset; in copy_recv_mad() 430 seg++, left -= msg->seg_size, buf += msg->seg_size) { in copy_rmpp_mad() 432 min(left, msg->seg_size))) in copy_rmpp_mad()
|
| D | mad.c | 984 int seg_size, pad; in get_pad_size() local 986 seg_size = mad_size - hdr_len; in get_pad_size() 987 if (data_len && seg_size) { in get_pad_size() 988 pad = seg_size - data_len % seg_size; in get_pad_size() 989 return pad == seg_size ? 0 : pad; in get_pad_size() 991 return seg_size; in get_pad_size() 1010 int left, seg_size, pad; in alloc_send_rmpp_list() local 1012 send_buf->seg_size = mad_size - send_buf->hdr_len; in alloc_send_rmpp_list() 1014 seg_size = send_buf->seg_size; in alloc_send_rmpp_list() 1018 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { in alloc_send_rmpp_list() [all …]
|
| /Linux-v5.4/arch/powerpc/kvm/ |
| D | book3s_64_mmu.c | 467 u64 seg_size; in kvmppc_mmu_book3s_64_slbie() local 482 seg_size = 1ull << kvmppc_slb_sid_shift(slbe); in kvmppc_mmu_book3s_64_slbie() 483 kvmppc_mmu_flush_segment(vcpu, ea & ~(seg_size - 1), seg_size); in kvmppc_mmu_book3s_64_slbie()
|
| D | book3s_64_mmu_host.c | 356 void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size) in kvmppc_mmu_flush_segment() argument 359 ulong seg_mask = -seg_size; in kvmppc_mmu_flush_segment()
|
| /Linux-v5.4/fs/afs/ |
| D | xdr_fs.h | 25 __be32 seg_size; member
|
| /Linux-v5.4/drivers/usb/early/ |
| D | xhci-dbc.h | 61 __le32 seg_size; member
|
| /Linux-v5.4/drivers/video/fbdev/via/ |
| D | via-core.c | 149 u32 seg_size; /* Size, 16-byte units */ member 259 descr->seg_size = sg_dma_len(sgentry) >> 4; in viafb_dma_copy_out_sg()
|
| /Linux-v5.4/drivers/net/ethernet/intel/ice/ |
| D | ice_flex_type.h | 31 __le32 seg_size; member
|
| D | ice_flex_pipe.c | 411 le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_name); in ice_download_pkg() 573 if (len < off + le32_to_cpu(seg->seg_size)) in ice_verify_pkg()
|
| /Linux-v5.4/include/rdma/ |
| D | ib_mad.h | 498 int seg_size; member
|
| /Linux-v5.4/drivers/usb/mtu3/ |
| D | mtu3_core.c | 23 static int ep_fifo_alloc(struct mtu3_ep *mep, u32 seg_size) in ep_fifo_alloc() argument 26 u32 num_bits = DIV_ROUND_UP(seg_size, MTU3_EP_FIFO_UNIT); in ep_fifo_alloc()
|
| /Linux-v5.4/arch/powerpc/include/asm/ |
| D | kvm_book3s.h | 156 extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
|
| /Linux-v5.4/net/ipv4/ |
| D | tcp_output.c | 3761 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_write_wakeup() local 3770 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || in tcp_write_wakeup() 3772 seg_size = min(seg_size, mss); in tcp_write_wakeup() 3775 skb, seg_size, mss, GFP_ATOMIC)) in tcp_write_wakeup()
|
| /Linux-v5.4/drivers/staging/rtl8192u/ |
| D | r8192U.h | 368 u16 seg_size; member
|
| /Linux-v5.4/drivers/infiniband/hw/hns/ |
| D | hns_roce_hem.c | 828 u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */ in hns_roce_table_find() local 842 dma_offset = offset = (obj & (table->num_obj - 1)) * seg_size % in hns_roce_table_find()
|