Lines Matching +full:1 +full:- +full:255

2  * Copyright (C) 2010-2012 by Dell Inc.  All rights reserved.
3 * Copyright (C) 2011-2013 Red Hat, Inc.
7 * dm-switch is a device-mapper target that maps IO to underlying block
8 * devices efficiently when there are a large number of fixed-sized
10 * mapping representation such as dm-stripe.
13 #include <linux/device-mapper.h>
43 unsigned region_size; /* Region size in 512-byte sectors */
45 signed char region_size_bits; /* log2 of region_size or -1 */
49 signed char region_entries_per_slot_bits; /* log2 of region_entries_per_slot or -1 */
68 sctx->ti = ti; in alloc_switch_ctx()
69 sctx->region_size = region_size; in alloc_switch_ctx()
71 ti->private = sctx; in alloc_switch_ctx()
78 struct switch_ctx *sctx = ti->private; in alloc_region_table()
79 sector_t nr_regions = ti->len; in alloc_region_table()
82 if (!(sctx->region_size & (sctx->region_size - 1))) in alloc_region_table()
83 sctx->region_size_bits = __ffs(sctx->region_size); in alloc_region_table()
85 sctx->region_size_bits = -1; in alloc_region_table()
87 sctx->region_table_entry_bits = 1; in alloc_region_table()
88 while (sctx->region_table_entry_bits < sizeof(region_table_slot_t) * 8 && in alloc_region_table()
89 (region_table_slot_t)1 << sctx->region_table_entry_bits < nr_paths) in alloc_region_table()
90 sctx->region_table_entry_bits++; in alloc_region_table()
92 sctx->region_entries_per_slot = (sizeof(region_table_slot_t) * 8) / sctx->region_table_entry_bits; in alloc_region_table()
93 if (!(sctx->region_entries_per_slot & (sctx->region_entries_per_slot - 1))) in alloc_region_table()
94 sctx->region_entries_per_slot_bits = __ffs(sctx->region_entries_per_slot); in alloc_region_table()
96 sctx->region_entries_per_slot_bits = -1; in alloc_region_table()
98 if (sector_div(nr_regions, sctx->region_size)) in alloc_region_table()
102 ti->error = "Region table too large"; in alloc_region_table()
103 return -EINVAL; in alloc_region_table()
105 sctx->nr_regions = nr_regions; in alloc_region_table()
108 if (sector_div(nr_slots, sctx->region_entries_per_slot)) in alloc_region_table()
112 ti->error = "Region table too large"; in alloc_region_table()
113 return -EINVAL; in alloc_region_table()
116 sctx->region_table = vmalloc(array_size(nr_slots, in alloc_region_table()
118 if (!sctx->region_table) { in alloc_region_table()
119 ti->error = "Cannot allocate region table"; in alloc_region_table()
120 return -ENOMEM; in alloc_region_table()
129 if (sctx->region_entries_per_slot_bits >= 0) { in switch_get_position()
130 *region_index = region_nr >> sctx->region_entries_per_slot_bits; in switch_get_position()
131 *bit = region_nr & (sctx->region_entries_per_slot - 1); in switch_get_position()
133 *region_index = region_nr / sctx->region_entries_per_slot; in switch_get_position()
134 *bit = region_nr % sctx->region_entries_per_slot; in switch_get_position()
137 *bit *= sctx->region_table_entry_bits; in switch_get_position()
147 return (READ_ONCE(sctx->region_table[region_index]) >> bit) & in switch_region_table_read()
148 ((1 << sctx->region_table_entry_bits) - 1); in switch_region_table_read()
160 if (sctx->region_size_bits >= 0) in switch_get_path_nr()
161 p >>= sctx->region_size_bits; in switch_get_path_nr()
163 sector_div(p, sctx->region_size); in switch_get_path_nr()
167 /* This can only happen if the processor uses non-atomic stores. */ in switch_get_path_nr()
168 if (unlikely(path_nr >= sctx->nr_paths)) in switch_get_path_nr()
183 pte = sctx->region_table[region_index]; in switch_region_table_write()
184 pte &= ~((((region_table_slot_t)1 << sctx->region_table_entry_bits) - 1) << bit); in switch_region_table_write()
186 sctx->region_table[region_index] = pte; in switch_region_table_write()
197 for (region_nr = 0; region_nr < sctx->nr_regions; region_nr++) { in initialise_region_table()
199 if (++path_nr >= sctx->nr_paths) in initialise_region_table()
206 struct switch_ctx *sctx = ti->private; in parse_path()
210 r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table), in parse_path()
211 &sctx->path_list[sctx->nr_paths].dmdev); in parse_path()
213 ti->error = "Device lookup failed"; in parse_path()
218 ti->error = "Invalid device starting offset"; in parse_path()
219 dm_put_device(ti, sctx->path_list[sctx->nr_paths].dmdev); in parse_path()
220 return -EINVAL; in parse_path()
223 sctx->path_list[sctx->nr_paths].start = start; in parse_path()
225 sctx->nr_paths++; in parse_path()
231 * Destructor: Don't free the dm_target, just the ti->private data (if any).
235 struct switch_ctx *sctx = ti->private; in switch_dtr()
237 while (sctx->nr_paths--) in switch_dtr()
238 dm_put_device(ti, sctx->path_list[sctx->nr_paths].dmdev); in switch_dtr()
240 vfree(sctx->region_table); in switch_dtr()
255 …{1, (KMALLOC_MAX_SIZE - sizeof(struct switch_ctx)) / sizeof(struct switch_path), "Invalid number o… in switch_ctr()
256 {1, UINT_MAX, "Invalid region size"}, in switch_ctr()
268 r = dm_read_arg(_args, &as, &nr_paths, &ti->error); in switch_ctr()
270 return -EINVAL; in switch_ctr()
272 r = dm_read_arg(_args + 1, &as, &region_size, &ti->error); in switch_ctr()
276 r = dm_read_arg_group(_args + 2, &as, &nr_optional_args, &ti->error); in switch_ctr()
282 ti->error = "Incorrect number of path arguments"; in switch_ctr()
283 return -EINVAL; in switch_ctr()
288 ti->error = "Cannot allocate redirection context"; in switch_ctr()
289 return -ENOMEM; in switch_ctr()
309 ti->num_discard_bios = 1; in switch_ctr()
321 struct switch_ctx *sctx = ti->private; in switch_map()
322 sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector); in switch_map()
325 bio_set_dev(bio, sctx->path_list[path_nr].dmdev->bdev); in switch_map()
326 bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset; in switch_map()
334 * This table-based hex parser improves performance.
335 * It improves a time to load 1000000 entries compared to the condition-based
337 * table-based parser condition-based parser
338 * PA-RISC 0.29s 0.31s
342 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
343 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
344 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
345 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
346 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
347 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
348 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
349 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
350 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
351 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
352 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
353 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
354 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
355 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
356 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
357 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255
379 for (i = 1; i < argc; i++) { in process_set_region_mappings()
389 return -EINVAL; in process_set_region_mappings()
394 return -EINVAL; in process_set_region_mappings()
399 return -EINVAL; in process_set_region_mappings()
404 return -EINVAL; in process_set_region_mappings()
407 if (unlikely(!cycle_length) || unlikely(cycle_length - 1 > region_index)) { in process_set_region_mappings()
409 cycle_length - 1, region_index); in process_set_region_mappings()
410 return -EINVAL; in process_set_region_mappings()
413 unlikely(region_index + num_write >= sctx->nr_regions)) { in process_set_region_mappings()
415 region_index, num_write, sctx->nr_regions); in process_set_region_mappings()
416 return -EINVAL; in process_set_region_mappings()
419 while (num_write--) { in process_set_region_mappings()
421 path_nr = switch_region_table_read(sctx, region_index - cycle_length); in process_set_region_mappings()
434 return -EINVAL; in process_set_region_mappings()
441 return -EINVAL; in process_set_region_mappings()
447 return -EINVAL; in process_set_region_mappings()
449 if (unlikely(region_index >= sctx->nr_regions)) { in process_set_region_mappings()
450 DMWARN("invalid set_region_mappings region number: %lu >= %lu", region_index, sctx->nr_regions); in process_set_region_mappings()
451 return -EINVAL; in process_set_region_mappings()
453 if (unlikely(path_nr >= sctx->nr_paths)) { in process_set_region_mappings()
454 DMWARN("invalid set_region_mappings device: %lu >= %u", path_nr, sctx->nr_paths); in process_set_region_mappings()
455 return -EINVAL; in process_set_region_mappings()
465 * Messages are processed one-at-a-time.
474 struct switch_ctx *sctx = ti->private; in switch_message()
475 int r = -EINVAL; in switch_message()
492 struct switch_ctx *sctx = ti->private; in switch_status()
502 DMEMIT("%u %u 0", sctx->nr_paths, sctx->region_size); in switch_status()
503 for (path_nr = 0; path_nr < sctx->nr_paths; path_nr++) in switch_status()
504 DMEMIT(" %s %llu", sctx->path_list[path_nr].dmdev->name, in switch_status()
505 (unsigned long long)sctx->path_list[path_nr].start); in switch_status()
521 struct switch_ctx *sctx = ti->private; in switch_prepare_ioctl()
526 *bdev = sctx->path_list[path_nr].dmdev->bdev; in switch_prepare_ioctl()
531 if (ti->len + sctx->path_list[path_nr].start != in switch_prepare_ioctl()
532 i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT) in switch_prepare_ioctl()
533 return 1; in switch_prepare_ioctl()
540 struct switch_ctx *sctx = ti->private; in switch_iterate_devices()
544 for (path_nr = 0; path_nr < sctx->nr_paths; path_nr++) { in switch_iterate_devices()
545 r = fn(ti, sctx->path_list[path_nr].dmdev, in switch_iterate_devices()
546 sctx->path_list[path_nr].start, ti->len, data); in switch_iterate_devices()
556 .version = {1, 1, 0},