Lines Matching refs:pgpath
38 struct pgpath { struct
50 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) argument
76 struct pgpath *current_pgpath;
106 struct pgpath *pgpath; member
110 typedef int (*action_fn) (struct pgpath *pgpath);
114 static void activate_or_offline_path(struct pgpath *pgpath);
149 static struct pgpath *alloc_pgpath(void) in alloc_pgpath()
151 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL); in alloc_pgpath() local
153 if (!pgpath) in alloc_pgpath()
156 pgpath->is_active = true; in alloc_pgpath()
158 return pgpath; in alloc_pgpath()
161 static void free_pgpath(struct pgpath *pgpath) in free_pgpath() argument
163 kfree(pgpath); in free_pgpath()
180 struct pgpath *pgpath, *tmp; in free_pgpaths() local
182 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) { in free_pgpaths()
183 list_del(&pgpath->list); in free_pgpaths()
184 dm_put_device(ti, pgpath->path.dev); in free_pgpaths()
185 free_pgpath(pgpath); in free_pgpaths()
297 mpio->pgpath = NULL; in multipath_init_per_bio_data()
309 struct pgpath *pgpath; in __pg_init_all_paths() local
327 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) { in __pg_init_all_paths()
329 if (!pgpath->is_active) in __pg_init_all_paths()
331 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path, in __pg_init_all_paths()
368 static struct pgpath *choose_path_in_pg(struct multipath *m, in choose_path_in_pg()
374 struct pgpath *pgpath; in choose_path_in_pg() local
380 pgpath = path_to_pgpath(path); in choose_path_in_pg()
385 m->current_pgpath = pgpath; in choose_path_in_pg()
390 return pgpath; in choose_path_in_pg()
393 static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) in choose_pgpath()
397 struct pgpath *pgpath; in choose_pgpath() local
417 pgpath = choose_path_in_pg(m, pg, nr_bytes); in choose_pgpath()
418 if (!IS_ERR_OR_NULL(pgpath)) in choose_pgpath()
419 return pgpath; in choose_pgpath()
426 pgpath = choose_path_in_pg(m, pg, nr_bytes); in choose_pgpath()
427 if (!IS_ERR_OR_NULL(pgpath)) in choose_pgpath()
428 return pgpath; in choose_pgpath()
441 pgpath = choose_path_in_pg(m, pg, nr_bytes); in choose_pgpath()
442 if (!IS_ERR_OR_NULL(pgpath)) { in choose_pgpath()
448 return pgpath; in choose_pgpath()
506 struct pgpath *pgpath; in multipath_clone_and_map() local
513 pgpath = READ_ONCE(m->current_pgpath); in multipath_clone_and_map()
514 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) in multipath_clone_and_map()
515 pgpath = choose_pgpath(m, nr_bytes); in multipath_clone_and_map()
517 if (!pgpath) { in multipath_clone_and_map()
528 mpio->pgpath = pgpath; in multipath_clone_and_map()
531 bdev = pgpath->path.dev->bdev; in multipath_clone_and_map()
539 activate_or_offline_path(pgpath); in multipath_clone_and_map()
557 if (pgpath->pg->ps.type->start_io) in multipath_clone_and_map()
558 pgpath->pg->ps.type->start_io(&pgpath->pg->ps, in multipath_clone_and_map()
559 &pgpath->path, in multipath_clone_and_map()
573 struct pgpath *pgpath = mpio->pgpath; in multipath_release_clone() local
575 if (pgpath && pgpath->pg->ps.type->end_io) in multipath_release_clone()
576 pgpath->pg->ps.type->end_io(&pgpath->pg->ps, in multipath_release_clone()
577 &pgpath->path, in multipath_release_clone()
606 static struct pgpath *__map_bio(struct multipath *m, struct bio *bio) in __map_bio()
608 struct pgpath *pgpath; in __map_bio() local
612 pgpath = READ_ONCE(m->current_pgpath); in __map_bio()
613 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) in __map_bio()
614 pgpath = choose_pgpath(m, bio->bi_iter.bi_size); in __map_bio()
616 if (!pgpath) { in __map_bio()
620 pgpath = ERR_PTR(-EAGAIN); in __map_bio()
631 return pgpath; in __map_bio()
637 struct pgpath *pgpath = __map_bio(m, bio); in __multipath_map_bio() local
639 if (IS_ERR(pgpath)) in __multipath_map_bio()
642 if (!pgpath) { in __multipath_map_bio()
649 mpio->pgpath = pgpath; in __multipath_map_bio()
652 bio_set_dev(bio, pgpath->path.dev->bdev); in __multipath_map_bio()
655 if (pgpath->pg->ps.type->start_io) in __multipath_map_bio()
656 pgpath->pg->ps.type->start_io(&pgpath->pg->ps, in __multipath_map_bio()
657 &pgpath->path, in __multipath_map_bio()
926 static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps, in parse_path()
930 struct pgpath *p; in parse_path()
1019 struct pgpath *pgpath; in parse_priority_group() local
1031 pgpath = parse_path(&path_args, &pg->ps, ti); in parse_priority_group()
1032 if (IS_ERR(pgpath)) { in parse_priority_group()
1033 r = PTR_ERR(pgpath); in parse_priority_group()
1037 pgpath->pg = pg; in parse_priority_group()
1038 list_add_tail(&pgpath->list, &pg->pgpaths); in parse_priority_group()
1324 static int fail_path(struct pgpath *pgpath) in fail_path() argument
1327 struct multipath *m = pgpath->pg->m; in fail_path()
1331 if (!pgpath->is_active) in fail_path()
1336 pgpath->path.dev->name); in fail_path()
1338 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path); in fail_path()
1339 pgpath->is_active = false; in fail_path()
1340 pgpath->fail_count++; in fail_path()
1344 if (pgpath == m->current_pgpath) in fail_path()
1348 pgpath->path.dev->name, atomic_read(&m->nr_valid_paths)); in fail_path()
1363 static int reinstate_path(struct pgpath *pgpath) in reinstate_path() argument
1367 struct multipath *m = pgpath->pg->m; in reinstate_path()
1372 if (pgpath->is_active) in reinstate_path()
1377 pgpath->path.dev->name); in reinstate_path()
1379 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path); in reinstate_path()
1383 pgpath->is_active = true; in reinstate_path()
1389 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { in reinstate_path()
1390 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) in reinstate_path()
1395 pgpath->path.dev->name, nr_valid_paths); in reinstate_path()
1406 if (pgpath->is_active) in reinstate_path()
1419 struct pgpath *pgpath; in action_dev() local
1423 list_for_each_entry(pgpath, &pg->pgpaths, list) { in action_dev()
1424 if (pgpath->path.dev == dev) in action_dev()
1425 r = action(pgpath); in action_dev()
1511 static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath) in pg_init_limit_reached() argument
1531 struct pgpath *pgpath = data; in pg_init_done() local
1532 struct priority_group *pg = pgpath->pg; in pg_init_done()
1551 fail_path(pgpath); in pg_init_done()
1566 if (pg_init_limit_reached(m, pgpath)) in pg_init_done()
1567 fail_path(pgpath); in pg_init_done()
1577 fail_path(pgpath); in pg_init_done()
1582 if (pgpath == m->current_pgpath) { in pg_init_done()
1616 static void activate_or_offline_path(struct pgpath *pgpath) in activate_or_offline_path() argument
1618 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in activate_or_offline_path()
1620 if (pgpath->is_active && !blk_queue_dying(q)) in activate_or_offline_path()
1621 scsi_dh_activate(q, pg_init_done, pgpath); in activate_or_offline_path()
1623 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED); in activate_or_offline_path()
1628 struct pgpath *pgpath = in activate_path_work() local
1629 container_of(work, struct pgpath, activate_path.work); in activate_path_work()
1631 activate_or_offline_path(pgpath); in activate_path_work()
1638 struct pgpath *pgpath = mpio->pgpath; in multipath_end_io() local
1660 if (pgpath) in multipath_end_io()
1661 fail_path(pgpath); in multipath_end_io()
1672 if (pgpath) { in multipath_end_io()
1673 struct path_selector *ps = &pgpath->pg->ps; in multipath_end_io()
1676 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes, in multipath_end_io()
1688 struct pgpath *pgpath = mpio->pgpath; in multipath_end_io_bio() local
1695 if (pgpath) in multipath_end_io_bio()
1696 fail_path(pgpath); in multipath_end_io_bio()
1716 if (pgpath) { in multipath_end_io_bio()
1717 struct path_selector *ps = &pgpath->pg->ps; in multipath_end_io_bio()
1720 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes, in multipath_end_io_bio()
1797 struct pgpath *p; in multipath_status()
1985 struct pgpath *pgpath; in multipath_prepare_ioctl() local
1989 pgpath = READ_ONCE(m->current_pgpath); in multipath_prepare_ioctl()
1990 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) in multipath_prepare_ioctl()
1991 pgpath = choose_pgpath(m, 0); in multipath_prepare_ioctl()
1993 if (pgpath) { in multipath_prepare_ioctl()
1995 *bdev = pgpath->path.dev->bdev; in multipath_prepare_ioctl()
2036 struct pgpath *p; in multipath_iterate_devices()
2051 static int pgpath_busy(struct pgpath *pgpath) in pgpath_busy() argument
2053 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in pgpath_busy()
2071 struct pgpath *pgpath; in multipath_busy() local
2110 list_for_each_entry(pgpath, &pg->pgpaths, list) { in multipath_busy()
2111 if (pgpath->is_active) { in multipath_busy()
2113 if (!pgpath_busy(pgpath)) { in multipath_busy()