Lines Matching defs:r5conf

569 struct r5conf {  struct
570 struct hlist_head *stripe_hashtbl;
572 spinlock_t hash_locks[NR_STRIPE_HASH_LOCKS];
573 struct mddev *mddev;
574 int chunk_sectors;
575 int level, algorithm, rmw_level;
576 int max_degraded;
577 int raid_disks;
578 int max_nr_stripes;
579 int min_nr_stripes;
586 sector_t reshape_progress;
590 sector_t reshape_safe;
591 int previous_raid_disks;
592 int prev_chunk_sectors;
593 int prev_algo;
594 short generation; /* increments with every reshape */
595 seqcount_t gen_lock; /* lock against generation changes */
596 unsigned long reshape_checkpoint; /* Time we last updated
598 long long min_offset_diff; /* minimum difference between
605 struct list_head handle_list; /* stripes needing handling */
606 struct list_head loprio_list; /* low priority stripes */
607 struct list_head hold_list; /* preread ready stripes */
608 struct list_head delayed_list; /* stripes that have plugged requests */
609 struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */
610 struct bio *retry_read_aligned; /* currently retrying aligned bios */
611 unsigned int retry_read_offset; /* sector offset into retry_read_aligned */
612 struct bio *retry_read_aligned_list; /* aligned bios retry list */
613 atomic_t preread_active_stripes; /* stripes with scheduled io */
614 atomic_t active_aligned_reads;
615 atomic_t pending_full_writes; /* full write backlog */
616 int bypass_count; /* bypassed prereads */
617 int bypass_threshold; /* preread nice */
618 int skip_copy; /* Don't copy data from bio to stripe cache */
619 struct list_head *last_hold; /* detect hold_list promotions */
621 atomic_t reshape_stripes; /* stripes with pending writes for reshape */
625 int active_name;
626 char cache_name[2][32];
627 struct kmem_cache *slab_cache; /* for allocating stripes */
628 struct mutex cache_size_mutex; /* Protect changes to cache size */
630 int seq_flush, seq_write;
631 int quiesce;
633 int fullsync; /* set to 1 if a full sync is needed,
637 int recovery_disabled;
639 struct raid5_percpu {
645 } __percpu *percpu;
646 int scribble_disks;
647 int scribble_sectors;
648 struct hlist_node node;
653 atomic_t active_stripes;
654 struct list_head inactive_list[NR_STRIPE_HASH_LOCKS];
656 atomic_t r5c_cached_full_stripes;
657 struct list_head r5c_full_stripe_list;
658 atomic_t r5c_cached_partial_stripes;
659 struct list_head r5c_partial_stripe_list;
660 atomic_t r5c_flushing_full_stripes;
661 atomic_t r5c_flushing_partial_stripes;
663 atomic_t empty_inactive_list_nr;
664 struct llist_head released_stripes;
665 wait_queue_head_t wait_for_quiescent;
666 wait_queue_head_t wait_for_stripe;
667 wait_queue_head_t wait_for_overlap;
668 unsigned long cache_state;
669 struct shrinker shrinker;
670 int pool_size; /* number of disks in stripeheads in pool */
671 spinlock_t device_lock;
672 struct disk_info *disks;
673 struct bio_set bio_split;
678 struct md_thread *thread;
679 struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
680 struct r5worker_group *worker_groups;
681 int group_cnt;
682 int worker_cnt_per_group;
683 struct r5l_log *log;
684 void *log_private;
686 spinlock_t pending_bios_lock;
687 bool batch_bio_dispatch;
688 struct r5pending_data *pending_data;
689 struct list_head free_list;
690 struct list_head pending_list;
691 int pending_data_cnt;
692 struct r5pending_data *next_pending_data;