Home
last modified time | relevance | path

Searched refs:mddev (Results 1 – 21 of 21) sorted by relevance

/Linux-v4.19/drivers/md/
Dmd.c98 static int remove_and_add_spares(struct mddev *mddev,
100 static void mddev_detach(struct mddev *mddev);
123 static inline int speed_min(struct mddev *mddev) in speed_min() argument
125 return mddev->sync_speed_min ? in speed_min()
126 mddev->sync_speed_min : sysctl_speed_limit_min; in speed_min()
129 static inline int speed_max(struct mddev *mddev) in speed_max() argument
131 return mddev->sync_speed_max ? in speed_max()
132 mddev->sync_speed_max : sysctl_speed_limit_max; in speed_max()
208 struct mddev *mddev) in bio_alloc_mddev() argument
212 if (!mddev || !bioset_initialized(&mddev->bio_set)) in bio_alloc_mddev()
[all …]
Dmd.h49 struct mddev *mddev; /* RAID array if running */ member
259 struct mddev *mddev; member
268 struct mddev { struct
476 void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); argument
502 static inline int __must_check mddev_lock(struct mddev *mddev) in mddev_lock() argument
504 return mutex_lock_interruptible(&mddev->reconfig_mutex); in mddev_lock()
510 static inline void mddev_lock_nointr(struct mddev *mddev) in mddev_lock_nointr() argument
512 mutex_lock(&mddev->reconfig_mutex); in mddev_lock_nointr()
515 static inline int mddev_trylock(struct mddev *mddev) in mddev_trylock() argument
517 return mutex_trylock(&mddev->reconfig_mutex); in mddev_trylock()
[all …]
Draid0.c36 static int raid0_congested(struct mddev *mddev, int bits) in raid0_congested() argument
38 struct r0conf *conf = mddev->private; in raid0_congested()
54 static void dump_zones(struct mddev *mddev) in dump_zones() argument
60 struct r0conf *conf = mddev->private; in dump_zones()
63 mdname(mddev), in dump_zones()
84 static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) in create_strip_zones() argument
99 rdev_for_each(rdev1, mddev) { in create_strip_zones()
101 mdname(mddev), in create_strip_zones()
107 sector_div(sectors, mddev->chunk_sectors); in create_strip_zones()
108 rdev1->sectors = sectors * mddev->chunk_sectors; in create_strip_zones()
[all …]
Dmd-cluster.h9 struct mddev;
13 int (*join)(struct mddev *mddev, int nodes);
14 int (*leave)(struct mddev *mddev);
15 int (*slot_number)(struct mddev *mddev);
16 int (*resync_info_update)(struct mddev *mddev, sector_t lo, sector_t hi);
17 int (*metadata_update_start)(struct mddev *mddev);
18 int (*metadata_update_finish)(struct mddev *mddev);
19 void (*metadata_update_cancel)(struct mddev *mddev);
20 int (*resync_start)(struct mddev *mddev);
21 int (*resync_finish)(struct mddev *mddev);
[all …]
Dmd-linear.c31 static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector) in which_dev() argument
37 hi = mddev->raid_disks - 1; in which_dev()
38 conf = mddev->private; in which_dev()
62 static int linear_congested(struct mddev *mddev, int bits) in linear_congested() argument
68 conf = rcu_dereference(mddev->private); in linear_congested()
79 static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks) in linear_size() argument
84 conf = mddev->private; in linear_size()
92 static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) in linear_conf() argument
107 rdev_for_each(rdev, mddev) { in linear_conf()
114 mdname(mddev)); in linear_conf()
[all …]
Dmd-cluster.c32 struct mddev *mddev; /* pointing back to mddev. */ member
73 struct mddev *mddev; /* the md device which md_cluster_info belongs to */ member
155 struct mddev *mddev) in dlm_lock_sync_interruptible() argument
167 || test_bit(MD_CLOSING, &mddev->flags)); in dlm_lock_sync_interruptible()
188 static struct dlm_lock_resource *lockres_init(struct mddev *mddev, in lockres_init() argument
193 struct md_cluster_info *cinfo = mddev->cluster_info; in lockres_init()
201 res->mddev = mddev; in lockres_init()
273 static struct suspend_info *read_resync_info(struct mddev *mddev, struct dlm_lock_resource *lockres) in read_resync_info() argument
296 struct mddev *mddev = thread->mddev; in recover_bitmaps() local
297 struct md_cluster_info *cinfo = mddev->cluster_info; in recover_bitmaps()
[all …]
Dmd-multipath.c62 struct mddev *mddev = mp_bh->mddev; in multipath_reschedule_retry() local
63 struct mpconf *conf = mddev->private; in multipath_reschedule_retry()
68 md_wakeup_thread(mddev->thread); in multipath_reschedule_retry()
79 struct mpconf *conf = mp_bh->mddev->private; in multipath_end_bh_io()
89 struct mpconf *conf = mp_bh->mddev->private; in multipath_end_request()
99 md_error (mp_bh->mddev, rdev); in multipath_end_request()
106 rdev_dec_pending(rdev, conf->mddev); in multipath_end_request()
109 static bool multipath_make_request(struct mddev *mddev, struct bio * bio) in multipath_make_request() argument
111 struct mpconf *conf = mddev->private; in multipath_make_request()
116 md_flush_request(mddev, bio); in multipath_make_request()
[all …]
Dmd-bitmap.c36 return bitmap->mddev ? mdname(bitmap->mddev) : "mdX"; in bmname()
148 static int read_sb_page(struct mddev *mddev, loff_t offset, in read_sb_page() argument
157 rdev_for_each(rdev, mddev) { in read_sb_page()
175 static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev) in next_active_rdev() argument
193 rdev = list_entry(&mddev->disks, struct md_rdev, same_set); in next_active_rdev()
196 rdev_dec_pending(rdev, mddev); in next_active_rdev()
198 list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) { in next_active_rdev()
215 struct mddev *mddev = bitmap->mddev; in write_sb_page() local
220 while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { in write_sb_page()
222 loff_t offset = mddev->bitmap_info.offset; in write_sb_page()
[all …]
Draid1.c149 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) in r1buf_pool_alloc()
221 struct r1conf *conf = r1_bio->mddev->private; in free_r1bio()
229 struct r1conf *conf = r1_bio->mddev->private; in put_buf()
236 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); in put_buf()
247 struct mddev *mddev = r1_bio->mddev; in reschedule_retry() local
248 struct r1conf *conf = mddev->private; in reschedule_retry()
258 md_wakeup_thread(mddev->thread); in reschedule_retry()
269 struct r1conf *conf = r1_bio->mddev->private; in call_bio_endio()
303 struct r1conf *conf = r1_bio->mddev->private; in update_head_pos()
315 struct r1conf *conf = r1_bio->mddev->private; in find_bio_disk()
[all …]
Draid10.c104 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
106 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
167 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || in r10buf_pool_alloc()
168 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) in r10buf_pool_alloc()
212 &conf->mddev->recovery)) { in r10buf_pool_alloc()
293 struct r10conf *conf = r10_bio->mddev->private; in free_r10bio()
301 struct r10conf *conf = r10_bio->mddev->private; in put_buf()
311 struct mddev *mddev = r10_bio->mddev; in reschedule_retry() local
312 struct r10conf *conf = mddev->private; in reschedule_retry()
322 md_wakeup_thread(mddev->thread); in reschedule_retry()
[all …]
Ddm-raid.c240 struct mddev md;
256 struct mddev *mddev = &rs->md; in rs_config_backup() local
258 l->new_level = mddev->new_level; in rs_config_backup()
259 l->new_layout = mddev->new_layout; in rs_config_backup()
260 l->new_chunk_sectors = mddev->new_chunk_sectors; in rs_config_backup()
265 struct mddev *mddev = &rs->md; in rs_config_restore() local
267 mddev->new_level = l->new_level; in rs_config_restore()
268 mddev->new_layout = l->new_layout; in rs_config_restore()
269 mddev->new_chunk_sectors = l->new_chunk_sectors; in rs_config_restore()
680 struct mddev *mddev = &rs->md; in rs_set_rdev_sectors() local
[all …]
Draid5.c199 md_wakeup_thread(conf->mddev->thread); in raid5_wakeup_stripe_thread()
271 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
277 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
358 md_wakeup_thread(conf->mddev->thread); in release_inactive_stripe_list()
404 if (unlikely(!conf->mddev->thread) || in raid5_release_stripe()
409 md_wakeup_thread(conf->mddev->thread); in raid5_release_stripe()
625 if (conf->mddev->reshape_position == MaxSector) in has_failed()
626 return conf->mddev->degraded > conf->max_degraded; in has_failed()
837 md_wakeup_thread(conf->mddev->thread); in stripe_add_to_batch_list()
1072 if (!conf->mddev->external && in ops_run_io()
[all …]
Dmd-bitmap.h195 struct mddev *mddev; /* the md device that the bitmap is for */ member
239 struct bitmap *md_bitmap_create(struct mddev *mddev, int slot);
240 int md_bitmap_load(struct mddev *mddev);
241 void md_bitmap_flush(struct mddev *mddev);
242 void md_bitmap_destroy(struct mddev *mddev);
262 void md_bitmap_sync_with_cluster(struct mddev *mddev,
267 void md_bitmap_daemon_work(struct mddev *mddev);
271 struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot);
272 int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
275 void md_bitmap_wait_behind_writes(struct mddev *mddev);
Dmd-faulty.c173 static bool faulty_make_request(struct mddev *mddev, struct bio *bio) in faulty_make_request() argument
175 struct faulty_conf *conf = mddev->private; in faulty_make_request()
217 struct bio *b = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); in faulty_make_request()
230 static void faulty_status(struct seq_file *seq, struct mddev *mddev) in faulty_status() argument
232 struct faulty_conf *conf = mddev->private; in faulty_status()
263 static int faulty_reshape(struct mddev *mddev) in faulty_reshape() argument
265 int mode = mddev->new_layout & ModeMask; in faulty_reshape()
266 int count = mddev->new_layout >> ModeShift; in faulty_reshape()
267 struct faulty_conf *conf = mddev->private; in faulty_reshape()
269 if (mddev->new_layout < 0) in faulty_reshape()
[all …]
Draid5-ppl.c96 struct mddev *mddev; member
414 md_error(ppl_conf->mddev, log->rdev); in ppl_log_endio()
562 struct r5conf *conf = ppl_conf->mddev->private; in ppl_io_unit_finished()
597 struct r5conf *conf = ppl_conf->mddev->private; in ppl_flush_endio()
606 rdev = md_find_rdev_rcu(conf->mddev, bio_dev(bio)); in ppl_flush_endio()
608 md_error(rdev->mddev, rdev); in ppl_flush_endio()
616 md_wakeup_thread(conf->mddev->thread); in ppl_flush_endio()
624 struct r5conf *conf = ppl_conf->mddev->private; in ppl_do_flush()
804 struct mddev *mddev = ppl_conf->mddev; in ppl_recover_entry() local
805 struct r5conf *conf = mddev->private; in ppl_recover_entry()
[all …]
Draid5-cache.c312 md_write_end(conf->mddev); in r5c_return_dev_pending_writes()
327 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in r5c_handle_cached_data_endio()
429 struct r5conf *conf = log->rdev->mddev->private; in r5c_update_log_state()
579 md_error(log->rdev->mddev, log->rdev); in r5l_log_endio()
614 md_wakeup_thread(log->rdev->mddev->thread); in r5l_log_endio()
695 struct mddev *mddev = log->rdev->mddev; in r5c_disable_writeback_async() local
696 struct r5conf *conf = mddev->private; in r5c_disable_writeback_async()
702 mdname(mddev)); in r5c_disable_writeback_async()
705 wait_event(mddev->sb_wait, in r5c_disable_writeback_async()
707 (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) && in r5c_disable_writeback_async()
[all …]
Draid1.h64 struct mddev *mddev; member
69 struct mddev *mddev; member
161 struct mddev *mddev; member
Dmd-multipath.h10 struct mddev *mddev; member
26 struct mddev *mddev; member
Draid10.h29 struct mddev *mddev; member
127 struct mddev *mddev; member
Draid5.h573 struct mddev *mddev; member
758 extern int raid5_set_cache_size(struct mddev *mddev, int size);
768 extern int r5c_journal_mode_set(struct mddev *mddev, int journal_mode);
Draid5-log.h32 extern void r5c_update_on_rdev_error(struct mddev *mddev,
51 return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags); in raid5_has_log()
56 return test_bit(MD_HAS_PPL, &conf->mddev->flags); in raid5_has_ppl()