| /Linux-v5.10/kernel/ |
| D | kexec.c | 24 struct kexec_segment __user *segments) in copy_user_segment_list() argument 31 segment_bytes = nr_segments * sizeof(*segments); in copy_user_segment_list() 32 ret = copy_from_user(image->segment, segments, segment_bytes); in copy_user_segment_list() 41 struct kexec_segment __user *segments, in kimage_alloc_init() argument 62 ret = copy_user_segment_list(image, nr_segments, segments); in kimage_alloc_init() 107 struct kexec_segment __user *segments, unsigned long flags) in do_kexec_load() argument 135 ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags); in do_kexec_load() 237 struct kexec_segment __user *, segments, unsigned long, flags) in SYSCALL_DEFINE4() 261 result = do_kexec_load(entry, nr_segments, segments, flags); in SYSCALL_DEFINE4() 271 struct compat_kexec_segment __user *, segments, in COMPAT_SYSCALL_DEFINE4() argument [all …]
|
| /Linux-v5.10/drivers/net/ethernet/sfc/ |
| D | ef100_tx.c | 342 unsigned int segments; in ef100_enqueue_skb() local 351 segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0; in ef100_enqueue_skb() 352 if (segments == 1) in ef100_enqueue_skb() 353 segments = 0; /* Don't use TSO/GSO for a single segment. */ in ef100_enqueue_skb() 354 if (segments && !ef100_tx_can_tso(tx_queue, skb)) { in ef100_enqueue_skb() 364 rc = efx_tx_map_data(tx_queue, skb, segments); in ef100_enqueue_skb() 367 ef100_tx_make_descriptors(tx_queue, skb, segments); in ef100_enqueue_skb() 396 if (segments) { in ef100_enqueue_skb() 398 tx_queue->tso_packets += segments; in ef100_enqueue_skb() 399 tx_queue->tx_packets += segments; in ef100_enqueue_skb()
|
| D | tx.c | 327 unsigned int segments; in __efx_enqueue_skb() local 332 segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0; in __efx_enqueue_skb() 333 if (segments == 1) in __efx_enqueue_skb() 334 segments = 0; /* Don't use TSO for a single segment. */ in __efx_enqueue_skb() 340 if (segments) { in __efx_enqueue_skb() 379 if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments))) in __efx_enqueue_skb() 390 if (segments) { in __efx_enqueue_skb() 392 tx_queue->tso_packets += segments; in __efx_enqueue_skb() 393 tx_queue->tx_packets += segments; in __efx_enqueue_skb()
|
| D | tx_common.c | 433 struct sk_buff *segments, *next; in efx_tx_tso_fallback() local 435 segments = skb_gso_segment(skb, 0); in efx_tx_tso_fallback() 436 if (IS_ERR(segments)) in efx_tx_tso_fallback() 437 return PTR_ERR(segments); in efx_tx_tso_fallback() 441 skb_list_walk_safe(segments, skb, next) { in efx_tx_tso_fallback()
|
| /Linux-v5.10/include/uapi/linux/ |
| D | rpl.h | 42 } segments; member 45 #define rpl_segaddr segments.addr 46 #define rpl_segdata segments.data
|
| /Linux-v5.10/arch/arm/mm/ |
| D | proc-arm940.S | 110 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 116 bcs 1b @ segments 3 to 0 160 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 166 bcs 1b @ segments 7 to 0 182 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 188 bcs 1b @ segments 7 to 0 205 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 211 bcs 1b @ segments 7 to 0 227 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 238 bcs 1b @ segments 7 to 0
|
| D | proc-arm1020e.S | 142 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments 148 bcs 1b @ segments 15 to 0 372 mov r1, #0xF @ 16 segments 383 bge 1b @ segments 15 to 0
|
| D | proc-arm1022.S | 141 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments 147 bcs 1b @ segments 15 to 0 371 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments 377 bcs 1b @ segments 15 to 0
|
| D | proc-arm922.S | 132 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments 138 bcs 1b @ segments 7 to 0 347 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 4 segments 353 bcs 1b @ segments 7 to 0
|
| /Linux-v5.10/Documentation/powerpc/ |
| D | pci_iov_resource_on_powernv.rst | 95 * It is divided into 256 segments of equal size. A table in the chip 108 more segments. 120 has 256 segments; however, there is no table for mapping a segment 135 trick, to match to those giant segments. 144 - We cannot "group" segments in HW, so if a device ends up using more 153 PEs" that are used for the remaining M64 segments. 189 equally-sized segments. The finest granularity possible is a 256MB 190 window with 1MB segments. VF BARs that are 1MB or larger could be 196 BARs span several segments. 202 like the M32 window, but the segments can't be individually mapped to [all …]
|
| /Linux-v5.10/Documentation/ABI/testing/ |
| D | sysfs-fs-nilfs2 | 183 What: /sys/fs/nilfs2/<device>/segments/segments_number 187 Show number of segments on a volume. 189 What: /sys/fs/nilfs2/<device>/segments/blocks_per_segment 195 What: /sys/fs/nilfs2/<device>/segments/clean_segments 199 Show count of clean segments. 201 What: /sys/fs/nilfs2/<device>/segments/dirty_segments 205 Show count of dirty segments. 207 What: /sys/fs/nilfs2/<device>/segments/README 211 Describe attributes of /sys/fs/nilfs2/<device>/segments
|
| D | sysfs-driver-jz4780-efuse | 6 split into segments. The driver supports read only. 7 The segments are:
|
| D | sysfs-fs-f2fs | 34 Description: This parameter controls the number of prefree segments to be 35 reclaimed. If the number of prefree segments is larger than 36 the number of segments in the proportion to the percentage 38 reclaim the prefree segments to free segments. 39 By default, 5% over total # of segments. 221 Description: Shows the number of dirty segments. 291 Description: Number of free segments in disk. 303 free segments. Available when CONFIG_F2FS_STAT_FS=y.
|
| /Linux-v5.10/block/ |
| D | blk-integrity.c | 30 unsigned int segments = 0; in blk_rq_count_integrity_sg() local 46 segments++; in blk_rq_count_integrity_sg() 54 return segments; in blk_rq_count_integrity_sg() 73 unsigned int segments = 0; in blk_rq_map_integrity_sg() local 96 segments++; in blk_rq_map_integrity_sg() 106 return segments; in blk_rq_map_integrity_sg()
|
| /Linux-v5.10/drivers/dma/xilinx/ |
| D | xilinx_dma.c | 368 struct list_head segments; member 807 INIT_LIST_HEAD(&desc->segments); in xilinx_dma_alloc_tx_descriptor() 830 list_for_each_entry_safe(segment, next, &desc->segments, node) { in xilinx_dma_free_tx_descriptor() 836 &desc->segments, node) { in xilinx_dma_free_tx_descriptor() 842 &desc->segments, node) { in xilinx_dma_free_tx_descriptor() 848 &desc->segments, node) { in xilinx_dma_free_tx_descriptor() 958 list_for_each(entry, &desc->segments) { in xilinx_dma_get_residue() 1389 list_for_each_entry(segment, &desc->segments, node) { in xilinx_vdma_start_transfer() 1445 tail_segment = list_last_entry(&tail_desc->segments, in xilinx_cdma_start_transfer() 1473 segment = list_first_entry(&head_desc->segments, in xilinx_cdma_start_transfer() [all …]
|
| /Linux-v5.10/drivers/media/i2c/ |
| D | ad9389b.c | 61 u32 segments; member 376 edid->segments ? "found" : "no", edid->blocks); in ad9389b_log_status() 672 if (!state->edid.segments) { in ad9389b_get_edid() 676 if (edid->start_block >= state->edid.segments * 2) in ad9389b_get_edid() 678 if (edid->blocks + edid->start_block >= state->edid.segments * 2) in ad9389b_get_edid() 679 edid->blocks = state->edid.segments * 2 - edid->start_block; in ad9389b_get_edid() 934 v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0); in ad9389b_update_monitor_present_status() 1045 state->edid.segments = segment + 1; in ad9389b_check_edid_status() 1046 if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) { in ad9389b_check_edid_status() 1049 __func__, state->edid.segments); in ad9389b_check_edid_status() [all …]
|
| /Linux-v5.10/drivers/block/xen-blkback/ |
| D | blkback.c | 707 struct grant_page **pages = req->segments; in xen_blkbk_unmap_and_respond() 902 rc = xen_blkbk_map(pending_req->ring, pending_req->segments, in xen_blkbk_map_seg() 917 struct blkif_request_segment *segments = NULL; in xen_blkbk_parse_indirect() local 935 if (segments) in xen_blkbk_parse_indirect() 936 kunmap_atomic(segments); in xen_blkbk_parse_indirect() 937 segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page); in xen_blkbk_parse_indirect() 941 pending_req->segments[n]->gref = segments[i].gref; in xen_blkbk_parse_indirect() 943 first_sect = READ_ONCE(segments[i].first_sect); in xen_blkbk_parse_indirect() 944 last_sect = READ_ONCE(segments[i].last_sect); in xen_blkbk_parse_indirect() 956 if (segments) in xen_blkbk_parse_indirect() [all …]
|
| /Linux-v5.10/drivers/char/agp/ |
| D | frontend.c | 110 seg = *(client->segments); in agp_find_seg_in_client() 127 if (client->segments != NULL) { in agp_remove_seg_from_client() 128 if (*(client->segments) != NULL) { in agp_remove_seg_from_client() 129 DBG("Freeing %p from client %p", *(client->segments), client); in agp_remove_seg_from_client() 130 kfree(*(client->segments)); in agp_remove_seg_from_client() 132 DBG("Freeing %p from client %p", client->segments, client); in agp_remove_seg_from_client() 133 kfree(client->segments); in agp_remove_seg_from_client() 134 client->segments = NULL; in agp_remove_seg_from_client() 143 prev_seg = client->segments; in agp_add_seg_to_client() 150 client->segments = seg; in agp_add_seg_to_client()
|
| /Linux-v5.10/drivers/bus/mhi/core/ |
| D | boot.c | 312 int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1; in mhi_alloc_bhie_table() local 322 img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf), in mhi_alloc_bhie_table() 329 for (i = 0; i < segments; i++, mhi_buf++) { in mhi_alloc_bhie_table() 333 if (i == segments - 1) in mhi_alloc_bhie_table() 344 img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf; in mhi_alloc_bhie_table() 345 img_info->entries = segments; in mhi_alloc_bhie_table()
|
| /Linux-v5.10/Documentation/i2c/muxes/ |
| D | i2c-mux-gpio.rst | 10 i2c-mux-gpio is an i2c mux driver providing access to I2C bus segments 34 bus, the number of bus segments to create and the GPIO pins used 37 E.G. something like this for a MUX providing 4 bus segments
|
| /Linux-v5.10/drivers/media/dvb-frontends/ |
| D | mb86a20s.c | 1344 static u32 interpolate_value(u32 value, const struct linear_segments *segments, in interpolate_value() argument 1351 if (value >= segments[0].x) in interpolate_value() 1352 return segments[0].y; in interpolate_value() 1353 if (value < segments[len-1].x) in interpolate_value() 1354 return segments[len-1].y; in interpolate_value() 1358 if (value == segments[i].x) in interpolate_value() 1359 return segments[i].y; in interpolate_value() 1360 if (value > segments[i].x) in interpolate_value() 1365 dy = segments[i].y - segments[i - 1].y; in interpolate_value() 1366 dx = segments[i - 1].x - segments[i].x; in interpolate_value() [all …]
|
| /Linux-v5.10/drivers/gpu/drm/arm/ |
| D | malidp_crtc.c | 96 } segments[MALIDP_COEFFTAB_NUM_COEFFS] = { variable 133 delta_in = segments[i].end - segments[i].start; in malidp_generate_gamma_table() 135 out_start = drm_color_lut_extract(lut[segments[i].start].green, in malidp_generate_gamma_table() 137 out_end = drm_color_lut_extract(lut[segments[i].end].green, 12); in malidp_generate_gamma_table()
|
| /Linux-v5.10/arch/powerpc/platforms/cell/ |
| D | iommu.c | 301 unsigned long segments, stab_size; in cell_iommu_setup_stab() local 303 segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT; in cell_iommu_setup_stab() 306 __func__, iommu->nid, segments); in cell_iommu_setup_stab() 309 stab_size = segments * sizeof(unsigned long); in cell_iommu_setup_stab() 322 unsigned long reg, segments, pages_per_segment, ptab_size, in cell_iommu_alloc_ptab() local 326 segments = size >> IO_SEGMENT_SHIFT; in cell_iommu_alloc_ptab() 332 ptab_size = segments * pages_per_segment * sizeof(unsigned long); in cell_iommu_alloc_ptab() 363 for (i = start_seg; i < (start_seg + segments); i++) { in cell_iommu_alloc_ptab()
|
| /Linux-v5.10/Documentation/filesystems/ |
| D | nilfs2.rst | 116 segments. This ioctl is used in lssu, 121 segments. This ioctl is used by 123 cleaning operation of segments and reduce 149 NILFS_IOCTL_SET_ALLOC_RANGE Define lower limit of segments in bytes and 150 upper limit of segments in bytes. This ioctl 191 A nilfs2 volume is equally divided into a number of segments except 240 3) Segment usage file (sufile) -- Stores allocation state of segments
|
| /Linux-v5.10/Documentation/input/devices/ |
| D | yealink.rst | 73 '8' : Generic 7 segment digit with individual addressable segments 75 Reduced capability 7 segment digit, when segments are hard wired together. 76 '1' : 2 segments digit only able to produce a 1. 101 map_seg7 Read/Write, the 7 segments char set, common for all
|