Lines Matching refs:pgpath

38 struct pgpath {  struct
50 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) argument
76 struct pgpath *current_pgpath;
106 struct pgpath *pgpath; member
111 typedef int (*action_fn) (struct pgpath *pgpath);
115 static void activate_or_offline_path(struct pgpath *pgpath);
150 static struct pgpath *alloc_pgpath(void) in alloc_pgpath()
152 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL); in alloc_pgpath() local
154 if (!pgpath) in alloc_pgpath()
157 pgpath->is_active = true; in alloc_pgpath()
159 return pgpath; in alloc_pgpath()
162 static void free_pgpath(struct pgpath *pgpath) in free_pgpath() argument
164 kfree(pgpath); in free_pgpath()
181 struct pgpath *pgpath, *tmp; in free_pgpaths() local
183 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) { in free_pgpaths()
184 list_del(&pgpath->list); in free_pgpaths()
185 dm_put_device(ti, pgpath->path.dev); in free_pgpaths()
186 free_pgpath(pgpath); in free_pgpaths()
298 mpio->pgpath = NULL; in multipath_init_per_bio_data()
311 struct pgpath *pgpath; in __pg_init_all_paths() local
329 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) { in __pg_init_all_paths()
331 if (!pgpath->is_active) in __pg_init_all_paths()
333 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path, in __pg_init_all_paths()
370 static struct pgpath *choose_path_in_pg(struct multipath *m, in choose_path_in_pg()
376 struct pgpath *pgpath; in choose_path_in_pg() local
382 pgpath = path_to_pgpath(path); in choose_path_in_pg()
387 m->current_pgpath = pgpath; in choose_path_in_pg()
392 return pgpath; in choose_path_in_pg()
395 static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) in choose_pgpath()
399 struct pgpath *pgpath; in choose_pgpath() local
419 pgpath = choose_path_in_pg(m, pg, nr_bytes); in choose_pgpath()
420 if (!IS_ERR_OR_NULL(pgpath)) in choose_pgpath()
421 return pgpath; in choose_pgpath()
428 pgpath = choose_path_in_pg(m, pg, nr_bytes); in choose_pgpath()
429 if (!IS_ERR_OR_NULL(pgpath)) in choose_pgpath()
430 return pgpath; in choose_pgpath()
443 pgpath = choose_path_in_pg(m, pg, nr_bytes); in choose_pgpath()
444 if (!IS_ERR_OR_NULL(pgpath)) { in choose_pgpath()
450 return pgpath; in choose_pgpath()
508 struct pgpath *pgpath; in multipath_clone_and_map() local
515 pgpath = READ_ONCE(m->current_pgpath); in multipath_clone_and_map()
516 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) in multipath_clone_and_map()
517 pgpath = choose_pgpath(m, nr_bytes); in multipath_clone_and_map()
519 if (!pgpath) { in multipath_clone_and_map()
530 mpio->pgpath = pgpath; in multipath_clone_and_map()
533 bdev = pgpath->path.dev->bdev; in multipath_clone_and_map()
541 activate_or_offline_path(pgpath); in multipath_clone_and_map()
558 if (pgpath->pg->ps.type->start_io) in multipath_clone_and_map()
559 pgpath->pg->ps.type->start_io(&pgpath->pg->ps, in multipath_clone_and_map()
560 &pgpath->path, in multipath_clone_and_map()
574 struct pgpath *pgpath = mpio->pgpath; in multipath_release_clone() local
576 if (pgpath && pgpath->pg->ps.type->end_io) in multipath_release_clone()
577 pgpath->pg->ps.type->end_io(&pgpath->pg->ps, in multipath_release_clone()
578 &pgpath->path, in multipath_release_clone()
607 static struct pgpath *__map_bio(struct multipath *m, struct bio *bio) in __map_bio()
609 struct pgpath *pgpath; in __map_bio() local
613 pgpath = READ_ONCE(m->current_pgpath); in __map_bio()
614 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) in __map_bio()
615 pgpath = choose_pgpath(m, bio->bi_iter.bi_size); in __map_bio()
617 if (!pgpath) { in __map_bio()
621 pgpath = ERR_PTR(-EAGAIN); in __map_bio()
632 return pgpath; in __map_bio()
638 struct pgpath *pgpath = __map_bio(m, bio); in __multipath_map_bio() local
640 if (IS_ERR(pgpath)) in __multipath_map_bio()
643 if (!pgpath) { in __multipath_map_bio()
650 mpio->pgpath = pgpath; in __multipath_map_bio()
652 if (dm_ps_use_hr_timer(pgpath->pg->ps.type)) in __multipath_map_bio()
656 bio_set_dev(bio, pgpath->path.dev->bdev); in __multipath_map_bio()
659 if (pgpath->pg->ps.type->start_io) in __multipath_map_bio()
660 pgpath->pg->ps.type->start_io(&pgpath->pg->ps, in __multipath_map_bio()
661 &pgpath->path, in __multipath_map_bio()
927 static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps, in parse_path()
931 struct pgpath *p; in parse_path()
1020 struct pgpath *pgpath; in parse_priority_group() local
1032 pgpath = parse_path(&path_args, &pg->ps, ti); in parse_priority_group()
1033 if (IS_ERR(pgpath)) { in parse_priority_group()
1034 r = PTR_ERR(pgpath); in parse_priority_group()
1038 pgpath->pg = pg; in parse_priority_group()
1039 list_add_tail(&pgpath->list, &pg->pgpaths); in parse_priority_group()
1324 static int fail_path(struct pgpath *pgpath) in fail_path() argument
1327 struct multipath *m = pgpath->pg->m; in fail_path()
1331 if (!pgpath->is_active) in fail_path()
1336 pgpath->path.dev->name); in fail_path()
1338 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path); in fail_path()
1339 pgpath->is_active = false; in fail_path()
1340 pgpath->fail_count++; in fail_path()
1344 if (pgpath == m->current_pgpath) in fail_path()
1348 pgpath->path.dev->name, atomic_read(&m->nr_valid_paths)); in fail_path()
1363 static int reinstate_path(struct pgpath *pgpath) in reinstate_path() argument
1367 struct multipath *m = pgpath->pg->m; in reinstate_path()
1372 if (pgpath->is_active) in reinstate_path()
1377 pgpath->path.dev->name); in reinstate_path()
1379 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path); in reinstate_path()
1383 pgpath->is_active = true; in reinstate_path()
1389 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { in reinstate_path()
1390 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) in reinstate_path()
1395 pgpath->path.dev->name, nr_valid_paths); in reinstate_path()
1406 if (pgpath->is_active) in reinstate_path()
1419 struct pgpath *pgpath; in action_dev() local
1423 list_for_each_entry(pgpath, &pg->pgpaths, list) { in action_dev()
1424 if (pgpath->path.dev == dev) in action_dev()
1425 r = action(pgpath); in action_dev()
1511 static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath) in pg_init_limit_reached() argument
1531 struct pgpath *pgpath = data; in pg_init_done() local
1532 struct priority_group *pg = pgpath->pg; in pg_init_done()
1551 fail_path(pgpath); in pg_init_done()
1566 if (pg_init_limit_reached(m, pgpath)) in pg_init_done()
1567 fail_path(pgpath); in pg_init_done()
1577 fail_path(pgpath); in pg_init_done()
1582 if (pgpath == m->current_pgpath) { in pg_init_done()
1616 static void activate_or_offline_path(struct pgpath *pgpath) in activate_or_offline_path() argument
1618 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in activate_or_offline_path()
1620 if (pgpath->is_active && !blk_queue_dying(q)) in activate_or_offline_path()
1621 scsi_dh_activate(q, pg_init_done, pgpath); in activate_or_offline_path()
1623 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED); in activate_or_offline_path()
1628 struct pgpath *pgpath = in activate_path_work() local
1629 container_of(work, struct pgpath, activate_path.work); in activate_path_work()
1631 activate_or_offline_path(pgpath); in activate_path_work()
1638 struct pgpath *pgpath = mpio->pgpath; in multipath_end_io() local
1660 if (pgpath) in multipath_end_io()
1661 fail_path(pgpath); in multipath_end_io()
1672 if (pgpath) { in multipath_end_io()
1673 struct path_selector *ps = &pgpath->pg->ps; in multipath_end_io()
1676 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes, in multipath_end_io()
1688 struct pgpath *pgpath = mpio->pgpath; in multipath_end_io_bio() local
1695 if (pgpath) in multipath_end_io_bio()
1696 fail_path(pgpath); in multipath_end_io_bio()
1716 if (pgpath) { in multipath_end_io_bio()
1717 struct path_selector *ps = &pgpath->pg->ps; in multipath_end_io_bio()
1720 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes, in multipath_end_io_bio()
1798 struct pgpath *p; in multipath_status()
2024 struct pgpath *pgpath; in multipath_prepare_ioctl() local
2028 pgpath = READ_ONCE(m->current_pgpath); in multipath_prepare_ioctl()
2029 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) in multipath_prepare_ioctl()
2030 pgpath = choose_pgpath(m, 0); in multipath_prepare_ioctl()
2032 if (pgpath) { in multipath_prepare_ioctl()
2034 *bdev = pgpath->path.dev->bdev; in multipath_prepare_ioctl()
2075 struct pgpath *p; in multipath_iterate_devices()
2090 static int pgpath_busy(struct pgpath *pgpath) in pgpath_busy() argument
2092 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in pgpath_busy()
2110 struct pgpath *pgpath; in multipath_busy() local
2149 list_for_each_entry(pgpath, &pg->pgpaths, list) { in multipath_busy()
2150 if (pgpath->is_active) { in multipath_busy()
2152 if (!pgpath_busy(pgpath)) { in multipath_busy()