/Linux-v4.19/drivers/md/ |
D | dm-stripe.c | 21 struct stripe { struct 44 struct stripe stripe[0]; argument 62 if (dm_array_too_big(sizeof(struct stripe_c), sizeof(struct stripe), in alloc_context() 66 len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes); in alloc_context() 75 unsigned int stripe, char **argv) in get_stripe() argument 85 &sc->stripe[stripe].dev); in get_stripe() 89 sc->stripe[stripe].physical_start = start; in get_stripe() 192 dm_put_device(ti, sc->stripe[i].dev); in stripe_ctr() 196 atomic_set(&(sc->stripe[i].error_count), 0); in stripe_ctr() 210 dm_put_device(ti, sc->stripe[i].dev); in stripe_dtr() [all …]
|
D | raid0.c | 426 int stripe = mddev->raid_disks * in raid0_run() local 428 if (mddev->queue->backing_dev_info->ra_pages < 2* stripe) in raid0_run() 429 mddev->queue->backing_dev_info->ra_pages = 2* stripe; in raid0_run()
|
D | raid5.c | 494 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, 2710 sector_t stripe, stripe2; in raid5_compute_sector() local 2735 stripe = chunk_number; in raid5_compute_sector() 2736 *dd_idx = sector_div(stripe, data_disks); in raid5_compute_sector() 2737 stripe2 = stripe; in raid5_compute_sector() 2904 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; in raid5_compute_sector() 2918 sector_t stripe; in raid5_compute_blocknr() local 2926 stripe = new_sector; in raid5_compute_blocknr() 3014 chunk_number = stripe * data_disks + i; in raid5_compute_blocknr() 3325 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, in stripe_set_idx() argument [all …]
|
D | Makefile | 6 dm-mod-y += dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
|
D | raid10.c | 602 sector_t stripe; in __raid10_find_phys() local 618 stripe = chunk; in __raid10_find_phys() 619 dev = sector_div(stripe, geo->raid_disks); in __raid10_find_phys() 621 stripe *= geo->far_copies; in __raid10_find_phys() 623 sector += stripe << geo->chunk_shift; in __raid10_find_phys() 3921 int stripe = conf->geo.raid_disks * in raid10_run() local 3928 stripe /= conf->geo.near_copies; in raid10_run() 3929 if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe) in raid10_run() 3930 mddev->queue->backing_dev_info->ra_pages = 2 * stripe; in raid10_run() 4713 int stripe = conf->geo.raid_disks * in end_reshape() local [all …]
|
/Linux-v4.19/fs/btrfs/ |
D | raid56.c | 599 static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe, in rbio_stripe_page_index() argument 602 return stripe * rbio->stripe_npages + index; in rbio_stripe_page_index() 609 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, in rbio_stripe_page() argument 612 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)]; in rbio_stripe_page() 1077 struct btrfs_bio_stripe *stripe; in rbio_add_io_page() local 1080 stripe = &rbio->bbio->stripes[stripe_nr]; in rbio_add_io_page() 1081 disk_start = stripe->physical + (page_index << PAGE_SHIFT); in rbio_add_io_page() 1084 if (!stripe->dev->bdev) in rbio_add_io_page() 1096 if (last_end == disk_start && stripe->dev->bdev && in rbio_add_io_page() 1098 last->bi_disk == stripe->dev->bdev->bd_disk && in rbio_add_io_page() [all …]
|
/Linux-v4.19/drivers/md/bcache/ |
D | writeback.c | 509 unsigned int stripe_offset, stripe, sectors_dirty; in bcache_dev_sectors_dirty_add() local 517 stripe = offset_to_stripe(d, offset); in bcache_dev_sectors_dirty_add() 527 if (stripe >= d->nr_stripes) in bcache_dev_sectors_dirty_add() 531 d->stripe_sectors_dirty + stripe); in bcache_dev_sectors_dirty_add() 533 set_bit(stripe, d->full_dirty_stripes); in bcache_dev_sectors_dirty_add() 535 clear_bit(stripe, d->full_dirty_stripes); in bcache_dev_sectors_dirty_add() 539 stripe++; in bcache_dev_sectors_dirty_add() 557 unsigned int start_stripe, stripe, next_stripe; in refill_full_stripes() local 560 stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned)); in refill_full_stripes() 562 if (stripe >= dc->disk.nr_stripes) in refill_full_stripes() [all …]
|
D | writeback.h | 42 unsigned int stripe = offset_to_stripe(&dc->disk, offset); in bcache_dev_stripe_dirty() local 45 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe)) in bcache_dev_stripe_dirty() 52 stripe++; in bcache_dev_stripe_dirty()
|
/Linux-v4.19/Documentation/md/ |
D | raid5-ppl.txt | 4 addressed by PPL is that after a dirty shutdown, parity of a particular stripe 13 Partial parity for a write operation is the XOR of stripe data chunks not 16 the stripe, consistent with its state before the write operation, regardless of 18 this stripe is missing, this updated parity can be used to recover its 27 stripe. It does not require a dedicated journaling drive. Write performance is 34 silent data corruption. If a dirty disk of a stripe is lost, no PPL recovery is 35 performed for this stripe (parity is not updated). So it is possible to have 36 arbitrary data in the written part of a stripe if that disk is lost. In such
|
D | raid5-cache.txt | 25 and parity don't match. The reason is that a stripe write involves several RAID 50 write. If a write crosses all RAID disks of a stripe, we call it full-stripe 51 write. For non-full-stripe writes, MD must read old data before the new parity 55 RAID disks only after the data becomes a full stripe write. This will 99 release the memory cache. The flush conditions could be stripe becomes a full 100 stripe write, free cache disk space is low or free in-kernel memory cache space 105 data and data. If MD finds a stripe with data and valid parities (1 parity for
|
/Linux-v4.19/fs/nfs/blocklayout/ |
D | dev.c | 127 p = xdr_decode_hyper(p, &b->stripe.chunk_size); in nfs4_block_decode_volume() 128 b->stripe.volumes_count = be32_to_cpup(p++); in nfs4_block_decode_volume() 129 if (b->stripe.volumes_count > PNFS_BLOCK_MAX_DEVICES) { in nfs4_block_decode_volume() 130 dprintk("Too many volumes: %d\n", b->stripe.volumes_count); in nfs4_block_decode_volume() 134 p = xdr_inline_decode(xdr, b->stripe.volumes_count * 4); in nfs4_block_decode_volume() 137 for (i = 0; i < b->stripe.volumes_count; i++) in nfs4_block_decode_volume() 138 b->stripe.volumes[i] = be32_to_cpup(p++); in nfs4_block_decode_volume() 453 d->children = kcalloc(v->stripe.volumes_count, in bl_parse_stripe() 458 for (i = 0; i < v->stripe.volumes_count; i++) { in bl_parse_stripe() 460 volumes, v->stripe.volumes[i], gfp_mask); in bl_parse_stripe() [all …]
|
D | blocklayout.h | 83 } stripe; member
|
/Linux-v4.19/Documentation/device-mapper/ |
D | unstriped.txt | 10 <number of stripes> <chunk size> <stripe #> <dev_path> <offset> 21 <stripe #> 22 The stripe number within the device that corresponds to physical 29 An example of undoing an existing dm-stripe 84 in a 256k stripe across the two cores:
|
D | striped.txt | 1 dm-stripe
|
D | dm-raid.txt | 63 "stripe size". It is the only mandatory parameter and 157 starting at data_offset to fill up a new stripe with the larger 159 and write that new stripe to offset 0. Same will be applied to all 294 of a RAID 4/5/6 stripe and if subsequent read results are not 351 1.14.0 Fix reshape race on small devices. Fix stripe adding reshape
|
D | dm-log.txt | 6 inconsistent because a RAID stripe is currently being operated on or
|
/Linux-v4.19/fs/exofs/ |
D | ore_raid.c | 142 struct __1_page_stripe *stripe = &sp2d->_1p_stripes[i]; in _sp2d_alloc() local 157 stripe->alloc = true; in _sp2d_alloc() 165 stripe->pages = (void *)__a1pa; in _sp2d_alloc() 166 stripe->scribble = stripe->pages + group_width; in _sp2d_alloc() 167 stripe->page_is_read = (char *)stripe->scribble + group_width; in _sp2d_alloc()
|
/Linux-v4.19/Documentation/filesystems/ |
D | ntfs.txt | 13 - Using NTFS volume and stripe sets 33 For fault tolerance and raid support (i.e. volume and stripe sets), you can 228 Using NTFS volume and stripe sets 231 For support of volume and stripe sets, you can either use the kernel's 240 You will need to create a table of the components of the volume/stripe set and 245 though untested, there is no reason why stripe sets, i.e. raid level 0, and 384 Linear volume sets, i.e. linear raid, as well as stripe sets, i.e. raid level 391 NTFS volume/stripe you are configuring in /etc/raidtab as the persistent 394 Windows by default uses a stripe chunk size of 64k, so you probably want the 397 For example, if you have a stripe set consisting of two partitions /dev/hda5 [all …]
|
D | xfs.txt | 141 stripe unit configured at mkfs time. 154 Data allocations will not be aligned at stripe unit 190 Used to specify the stripe unit and width for a RAID device 191 or a stripe volume. "value" must be specified in 512-byte 207 Data allocations will be rounded up to stripe width boundaries 209 size is larger than the stripe width size.
|
/Linux-v4.19/Documentation/scsi/ |
D | arcmsr_spec.txt | 260 ** byte 8 : stripe size 485 ** 10:new stripe size 508 ** byte 29 : stripe size 528 ** byte 29 : new stripe size
|
D | ChangeLog.ips | 121 8K stripe size.
|
/Linux-v4.19/Documentation/ABI/testing/ |
D | sysfs-block | 128 block size. For RAID arrays it is often the stripe 141 usually the stripe width or the internal track size. A 269 stripe segment. For a zoned block device, either
|
D | sysfs-fs-ext4 | 16 stripe size is not set in the ext4 superblock
|
/Linux-v4.19/include/uapi/linux/ |
D | btrfs_tree.h | 430 struct btrfs_stripe stripe; member
|
/Linux-v4.19/Documentation/admin-guide/ |
D | md.rst | 742 number of entries in the stripe cache. This is writable, but 746 number of active entries in the stripe cache 749 number of times a stripe requiring preread will be bypassed by 750 a stripe that does not require preread. For fairness defaults 752 requires preread stripes to wait until all full-width stripe-
|