Lines Matching +full:page +full:- +full:offset

1 // SPDX-License-Identifier: GPL-2.0-only
3 * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
5 * bitmap_create - sets up the bitmap structure
6 * bitmap_destroy - destroys the bitmap structure
8 * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
9 * - added disk storage for bitmap
10 * - changes to allow various bitmap chunk sizes
33 #include "md-bitmap.h"
37 return bitmap->mddev ? mdname(bitmap->mddev) : "mdX"; in bmname()
41 * check a page and, if necessary, allocate it (or hijack it if the alloc fails)
43 * 1) check to see if this page is allocated, if it's not then try to alloc
44 * 2) if the alloc fails, set the page's hijacked flag so we'll use the
45 * page pointer directly as a counter
47 * if we find our page, we increment the page's refcount so that it stays
51 unsigned long page, int create, int no_hijack) in md_bitmap_checkpage() argument
52 __releases(bitmap->lock) in md_bitmap_checkpage()
53 __acquires(bitmap->lock) in md_bitmap_checkpage()
57 if (page >= bitmap->pages) { in md_bitmap_checkpage()
59 * End-of-device while looking for a whole page. in md_bitmap_checkpage()
62 return -EINVAL; in md_bitmap_checkpage()
65 if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */ in md_bitmap_checkpage()
68 if (bitmap->bp[page].map) /* page is already allocated, just return */ in md_bitmap_checkpage()
72 return -ENOENT; in md_bitmap_checkpage()
74 /* this page has not been allocated yet */ in md_bitmap_checkpage()
76 spin_unlock_irq(&bitmap->lock); in md_bitmap_checkpage()
83 * When this function completes, either bp[page].map or in md_bitmap_checkpage()
84 * bp[page].hijacked. In either case, this function will in md_bitmap_checkpage()
86 * no risk of a free-spin, and so it is safe to assert in md_bitmap_checkpage()
91 spin_lock_irq(&bitmap->lock); in md_bitmap_checkpage()
94 pr_debug("md/bitmap: map page allocation failed, hijacking\n"); in md_bitmap_checkpage()
97 return -ENOMEM; in md_bitmap_checkpage()
98 /* failed - set the hijacked flag so that we can use the in md_bitmap_checkpage()
100 if (!bitmap->bp[page].map) in md_bitmap_checkpage()
101 bitmap->bp[page].hijacked = 1; in md_bitmap_checkpage()
102 } else if (bitmap->bp[page].map || in md_bitmap_checkpage()
103 bitmap->bp[page].hijacked) { in md_bitmap_checkpage()
104 /* somebody beat us to getting the page */ in md_bitmap_checkpage()
108 /* no page was in place and we have one, so install it */ in md_bitmap_checkpage()
110 bitmap->bp[page].map = mappage; in md_bitmap_checkpage()
111 bitmap->missing_pages--; in md_bitmap_checkpage()
116 /* if page is completely empty, put it back on the free list, or dealloc it */
117 /* if page was hijacked, unmark the flag so it might get alloced next time */
119 static void md_bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page) in md_bitmap_checkfree() argument
123 if (bitmap->bp[page].count) /* page is still busy */ in md_bitmap_checkfree()
126 /* page is no longer in use, it can be released */ in md_bitmap_checkfree()
128 if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */ in md_bitmap_checkfree()
129 bitmap->bp[page].hijacked = 0; in md_bitmap_checkfree()
130 bitmap->bp[page].map = NULL; in md_bitmap_checkfree()
132 /* normal case, free the page */ in md_bitmap_checkfree()
133 ptr = bitmap->bp[page].map; in md_bitmap_checkfree()
134 bitmap->bp[page].map = NULL; in md_bitmap_checkfree()
135 bitmap->missing_pages++; in md_bitmap_checkfree()
141 * bitmap file handling - read and write the bitmap file and its superblock
145 * basic page I/O operations
149 static int read_sb_page(struct mddev *mddev, loff_t offset, in read_sb_page() argument
150 struct page *page, in read_sb_page() argument
153 /* choose a good rdev and read the page from there */ in read_sb_page()
159 if (! test_bit(In_sync, &rdev->flags) in read_sb_page()
160 || test_bit(Faulty, &rdev->flags) in read_sb_page()
161 || test_bit(Bitmap_sync, &rdev->flags)) in read_sb_page()
164 target = offset + index * (PAGE_SIZE/512); in read_sb_page()
167 roundup(size, bdev_logical_block_size(rdev->bdev)), in read_sb_page()
168 page, REQ_OP_READ, true)) { in read_sb_page()
169 page->index = index; in read_sb_page()
173 return -EIO; in read_sb_page()
183 * still be in the same position on the list when we re-enter in next_active_rdev()
194 rdev = list_entry(&mddev->disks, struct md_rdev, same_set); in next_active_rdev()
199 list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) { in next_active_rdev()
200 if (rdev->raid_disk >= 0 && in next_active_rdev()
201 !test_bit(Faulty, &rdev->flags)) { in next_active_rdev()
203 atomic_inc(&rdev->nr_pending); in next_active_rdev()
212 static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) in write_sb_page() argument
216 struct mddev *mddev = bitmap->mddev; in write_sb_page()
217 struct bitmap_storage *store = &bitmap->storage; in write_sb_page()
223 loff_t offset = mddev->bitmap_info.offset; in write_sb_page() local
225 bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev; in write_sb_page()
227 if (page->index == store->file_pages-1) { in write_sb_page()
228 int last_page_size = store->bytes & (PAGE_SIZE-1); in write_sb_page()
237 if (mddev->external) { in write_sb_page()
239 if (rdev->sb_start + offset + (page->index in write_sb_page()
241 > rdev->data_offset in write_sb_page()
243 rdev->sb_start + offset in write_sb_page()
244 < (rdev->data_offset + mddev->dev_sectors in write_sb_page()
247 } else if (offset < 0) { in write_sb_page()
249 if (offset in write_sb_page()
250 + (long)(page->index * (PAGE_SIZE/512)) in write_sb_page()
254 if (rdev->data_offset + mddev->dev_sectors in write_sb_page()
255 > rdev->sb_start + offset) in write_sb_page()
258 } else if (rdev->sb_start < rdev->data_offset) { in write_sb_page()
260 if (rdev->sb_start in write_sb_page()
261 + offset in write_sb_page()
262 + page->index*(PAGE_SIZE/512) + size/512 in write_sb_page()
263 > rdev->data_offset) in write_sb_page()
267 /* DATA METADATA BITMAP - no problems */ in write_sb_page()
270 rdev->sb_start + offset in write_sb_page()
271 + page->index * (PAGE_SIZE/512), in write_sb_page()
273 page); in write_sb_page()
281 return -EINVAL; in write_sb_page()
286 * write out a page to a file
288 static void write_page(struct bitmap *bitmap, struct page *page, int wait) in write_page() argument
292 if (bitmap->storage.file == NULL) { in write_page()
293 switch (write_sb_page(bitmap, page, wait)) { in write_page()
294 case -EINVAL: in write_page()
295 set_bit(BITMAP_WRITE_ERROR, &bitmap->flags); in write_page()
299 bh = page_buffers(page); in write_page()
301 while (bh && bh->b_blocknr) { in write_page()
302 atomic_inc(&bitmap->pending_writes); in write_page()
306 bh = bh->b_this_page; in write_page()
310 wait_event(bitmap->write_wait, in write_page()
311 atomic_read(&bitmap->pending_writes)==0); in write_page()
313 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) in write_page()
319 struct bitmap *bitmap = bh->b_private; in end_bitmap_write()
322 set_bit(BITMAP_WRITE_ERROR, &bitmap->flags); in end_bitmap_write()
323 if (atomic_dec_and_test(&bitmap->pending_writes)) in end_bitmap_write()
324 wake_up(&bitmap->write_wait); in end_bitmap_write()
327 static void free_buffers(struct page *page) in free_buffers() argument
331 if (!PagePrivate(page)) in free_buffers()
334 bh = page_buffers(page); in free_buffers()
336 struct buffer_head *next = bh->b_this_page; in free_buffers()
340 detach_page_private(page); in free_buffers()
341 put_page(page); in free_buffers()
344 /* read a page from a file.
345 * We both read the page, and attach buffers to the page to record the
354 struct page *page) in read_page() argument
365 bh = alloc_page_buffers(page, blocksize, false); in read_page()
367 ret = -ENOMEM; in read_page()
370 attach_page_private(page, bh); in read_page()
371 blk_cur = index << (PAGE_SHIFT - inode->i_blkbits); in read_page()
376 bh->b_blocknr = 0; in read_page()
380 ret = -EINVAL; in read_page()
381 bh->b_blocknr = 0; in read_page()
385 bh->b_blocknr = block; in read_page()
386 bh->b_bdev = inode->i_sb->s_bdev; in read_page()
390 count -= blocksize; in read_page()
392 bh->b_end_io = end_bitmap_write; in read_page()
393 bh->b_private = bitmap; in read_page()
394 atomic_inc(&bitmap->pending_writes); in read_page()
400 bh = bh->b_this_page; in read_page()
402 page->index = index; in read_page()
404 wait_event(bitmap->write_wait, in read_page()
405 atomic_read(&bitmap->pending_writes)==0); in read_page()
406 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) in read_page()
407 ret = -EIO; in read_page()
428 if (bitmap->storage.file) in md_bitmap_wait_writes()
429 wait_event(bitmap->write_wait, in md_bitmap_wait_writes()
430 atomic_read(&bitmap->pending_writes)==0); in md_bitmap_wait_writes()
439 md_super_wait(bitmap->mddev); in md_bitmap_wait_writes()
448 if (!bitmap || !bitmap->mddev) /* no bitmap for this array */ in md_bitmap_update_sb()
450 if (bitmap->mddev->bitmap_info.external) in md_bitmap_update_sb()
452 if (!bitmap->storage.sb_page) /* no superblock */ in md_bitmap_update_sb()
454 sb = kmap_atomic(bitmap->storage.sb_page); in md_bitmap_update_sb()
455 sb->events = cpu_to_le64(bitmap->mddev->events); in md_bitmap_update_sb()
456 if (bitmap->mddev->events < bitmap->events_cleared) in md_bitmap_update_sb()
457 /* rocking back to read-only */ in md_bitmap_update_sb()
458 bitmap->events_cleared = bitmap->mddev->events; in md_bitmap_update_sb()
459 sb->events_cleared = cpu_to_le64(bitmap->events_cleared); in md_bitmap_update_sb()
464 sb->state = cpu_to_le32(bitmap->flags & ~BIT(BITMAP_WRITE_ERROR)); in md_bitmap_update_sb()
466 sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ); in md_bitmap_update_sb()
467 sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind); in md_bitmap_update_sb()
469 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); in md_bitmap_update_sb()
470 sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize); in md_bitmap_update_sb()
471 sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes); in md_bitmap_update_sb()
472 sb->sectors_reserved = cpu_to_le32(bitmap->mddev-> in md_bitmap_update_sb()
475 write_page(bitmap, bitmap->storage.sb_page, 1); in md_bitmap_update_sb()
484 if (!bitmap || !bitmap->storage.sb_page) in md_bitmap_print_sb()
486 sb = kmap_atomic(bitmap->storage.sb_page); in md_bitmap_print_sb()
488 pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic)); in md_bitmap_print_sb()
489 pr_debug(" version: %d\n", le32_to_cpu(sb->version)); in md_bitmap_print_sb()
491 le32_to_cpu(*(__le32 *)(sb->uuid+0)), in md_bitmap_print_sb()
492 le32_to_cpu(*(__le32 *)(sb->uuid+4)), in md_bitmap_print_sb()
493 le32_to_cpu(*(__le32 *)(sb->uuid+8)), in md_bitmap_print_sb()
494 le32_to_cpu(*(__le32 *)(sb->uuid+12))); in md_bitmap_print_sb()
496 (unsigned long long) le64_to_cpu(sb->events)); in md_bitmap_print_sb()
498 (unsigned long long) le64_to_cpu(sb->events_cleared)); in md_bitmap_print_sb()
499 pr_debug(" state: %08x\n", le32_to_cpu(sb->state)); in md_bitmap_print_sb()
500 pr_debug(" chunksize: %d B\n", le32_to_cpu(sb->chunksize)); in md_bitmap_print_sb()
501 pr_debug(" daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep)); in md_bitmap_print_sb()
503 (unsigned long long)le64_to_cpu(sb->sync_size)/2); in md_bitmap_print_sb()
504 pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind)); in md_bitmap_print_sb()
513 * reads and verifies the on-disk bitmap superblock and populates bitmap_info.
514 * This function verifies 'bitmap_info' and populates the on-disk bitmap
517 * Returns: 0 on success, -Exxx on error
524 bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO); in md_bitmap_new_disk_sb()
525 if (bitmap->storage.sb_page == NULL) in md_bitmap_new_disk_sb()
526 return -ENOMEM; in md_bitmap_new_disk_sb()
527 bitmap->storage.sb_page->index = 0; in md_bitmap_new_disk_sb()
529 sb = kmap_atomic(bitmap->storage.sb_page); in md_bitmap_new_disk_sb()
531 sb->magic = cpu_to_le32(BITMAP_MAGIC); in md_bitmap_new_disk_sb()
532 sb->version = cpu_to_le32(BITMAP_MAJOR_HI); in md_bitmap_new_disk_sb()
534 chunksize = bitmap->mddev->bitmap_info.chunksize; in md_bitmap_new_disk_sb()
539 return -EINVAL; in md_bitmap_new_disk_sb()
541 sb->chunksize = cpu_to_le32(chunksize); in md_bitmap_new_disk_sb()
543 daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep; in md_bitmap_new_disk_sb()
548 sb->daemon_sleep = cpu_to_le32(daemon_sleep); in md_bitmap_new_disk_sb()
549 bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep; in md_bitmap_new_disk_sb()
555 write_behind = bitmap->mddev->bitmap_info.max_write_behind; in md_bitmap_new_disk_sb()
558 sb->write_behind = cpu_to_le32(write_behind); in md_bitmap_new_disk_sb()
559 bitmap->mddev->bitmap_info.max_write_behind = write_behind; in md_bitmap_new_disk_sb()
562 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); in md_bitmap_new_disk_sb()
564 memcpy(sb->uuid, bitmap->mddev->uuid, 16); in md_bitmap_new_disk_sb()
566 set_bit(BITMAP_STALE, &bitmap->flags); in md_bitmap_new_disk_sb()
567 sb->state = cpu_to_le32(bitmap->flags); in md_bitmap_new_disk_sb()
568 bitmap->events_cleared = bitmap->mddev->events; in md_bitmap_new_disk_sb()
569 sb->events_cleared = cpu_to_le64(bitmap->mddev->events); in md_bitmap_new_disk_sb()
570 bitmap->mddev->bitmap_info.nodes = 0; in md_bitmap_new_disk_sb()
586 int err = -EINVAL; in md_bitmap_read_sb()
587 struct page *sb_page; in md_bitmap_read_sb()
588 loff_t offset = bitmap->mddev->bitmap_info.offset; in md_bitmap_read_sb() local
590 if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) { in md_bitmap_read_sb()
594 set_bit(BITMAP_STALE, &bitmap->flags); in md_bitmap_read_sb()
598 /* page 0 is the superblock, read it... */ in md_bitmap_read_sb()
601 return -ENOMEM; in md_bitmap_read_sb()
602 bitmap->storage.sb_page = sb_page; in md_bitmap_read_sb()
606 if (bitmap->cluster_slot >= 0) { in md_bitmap_read_sb()
607 sector_t bm_blocks = bitmap->mddev->resync_max_sectors; in md_bitmap_read_sb()
610 (bitmap->mddev->bitmap_info.chunksize >> 9)); in md_bitmap_read_sb()
615 offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3)); in md_bitmap_read_sb()
616 pr_debug("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__, in md_bitmap_read_sb()
617 bitmap->cluster_slot, offset); in md_bitmap_read_sb()
620 if (bitmap->storage.file) { in md_bitmap_read_sb()
621 loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host); in md_bitmap_read_sb()
624 err = read_page(bitmap->storage.file, 0, in md_bitmap_read_sb()
627 err = read_sb_page(bitmap->mddev, in md_bitmap_read_sb()
628 offset, in md_bitmap_read_sb()
635 err = -EINVAL; in md_bitmap_read_sb()
638 chunksize = le32_to_cpu(sb->chunksize); in md_bitmap_read_sb()
639 daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; in md_bitmap_read_sb()
640 write_behind = le32_to_cpu(sb->write_behind); in md_bitmap_read_sb()
641 sectors_reserved = le32_to_cpu(sb->sectors_reserved); in md_bitmap_read_sb()
643 /* verify that the bitmap-specific fields are valid */ in md_bitmap_read_sb()
644 if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) in md_bitmap_read_sb()
646 else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO || in md_bitmap_read_sb()
647 le32_to_cpu(sb->version) > BITMAP_MAJOR_CLUSTERED) in md_bitmap_read_sb()
656 reason = "write-behind limit out of range (0 - 16383)"; in md_bitmap_read_sb()
665 * cluster-compatible in md_bitmap_read_sb()
667 if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) { in md_bitmap_read_sb()
668 nodes = le32_to_cpu(sb->nodes); in md_bitmap_read_sb()
669 strscpy(bitmap->mddev->bitmap_info.cluster_name, in md_bitmap_read_sb()
670 sb->cluster_name, 64); in md_bitmap_read_sb()
674 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); in md_bitmap_read_sb()
676 if (bitmap->mddev->persistent) { in md_bitmap_read_sb()
681 if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) { in md_bitmap_read_sb()
686 events = le64_to_cpu(sb->events); in md_bitmap_read_sb()
687 if (!nodes && (events < bitmap->mddev->events)) { in md_bitmap_read_sb()
688 pr_warn("%s: bitmap file is out of date (%llu < %llu) -- forcing full recovery\n", in md_bitmap_read_sb()
690 (unsigned long long) bitmap->mddev->events); in md_bitmap_read_sb()
691 set_bit(BITMAP_STALE, &bitmap->flags); in md_bitmap_read_sb()
696 bitmap->flags |= le32_to_cpu(sb->state); in md_bitmap_read_sb()
697 if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN) in md_bitmap_read_sb()
698 set_bit(BITMAP_HOSTENDIAN, &bitmap->flags); in md_bitmap_read_sb()
699 bitmap->events_cleared = le64_to_cpu(sb->events_cleared); in md_bitmap_read_sb()
704 if (err == 0 && nodes && (bitmap->cluster_slot < 0)) { in md_bitmap_read_sb()
706 bitmap->mddev->bitmap_info.chunksize = chunksize; in md_bitmap_read_sb()
707 err = md_setup_cluster(bitmap->mddev, nodes); in md_bitmap_read_sb()
713 bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev); in md_bitmap_read_sb()
719 if (test_bit(BITMAP_STALE, &bitmap->flags)) in md_bitmap_read_sb()
720 bitmap->events_cleared = bitmap->mddev->events; in md_bitmap_read_sb()
721 bitmap->mddev->bitmap_info.chunksize = chunksize; in md_bitmap_read_sb()
722 bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep; in md_bitmap_read_sb()
723 bitmap->mddev->bitmap_info.max_write_behind = write_behind; in md_bitmap_read_sb()
724 bitmap->mddev->bitmap_info.nodes = nodes; in md_bitmap_read_sb()
725 if (bitmap->mddev->bitmap_info.space == 0 || in md_bitmap_read_sb()
726 bitmap->mddev->bitmap_info.space > sectors_reserved) in md_bitmap_read_sb()
727 bitmap->mddev->bitmap_info.space = sectors_reserved; in md_bitmap_read_sb()
730 if (bitmap->cluster_slot < 0) in md_bitmap_read_sb()
731 md_cluster_stop(bitmap->mddev); in md_bitmap_read_sb()
741 * on-disk bitmap:
744 * file a page at a time. There's a superblock at the start of the file.
746 /* calculate the index of the page that contains this bit */
750 if (store->sb_page) in file_page_index()
755 /* calculate the (bit) offset of this bit within a page */
759 if (store->sb_page) in file_page_offset()
761 return chunk & (PAGE_BITS - 1); in file_page_offset()
765 * return a pointer to the page in the filemap that contains the given bit
768 static inline struct page *filemap_get_page(struct bitmap_storage *store, in filemap_get_page()
771 if (file_page_index(store, chunk) >= store->file_pages) in filemap_get_page()
773 return store->filemap[file_page_index(store, chunk)]; in filemap_get_page()
780 int pnum, offset = 0; in md_bitmap_storage_alloc() local
789 offset = slot_number * num_pages; in md_bitmap_storage_alloc()
791 store->filemap = kmalloc_array(num_pages, sizeof(struct page *), in md_bitmap_storage_alloc()
793 if (!store->filemap) in md_bitmap_storage_alloc()
794 return -ENOMEM; in md_bitmap_storage_alloc()
796 if (with_super && !store->sb_page) { in md_bitmap_storage_alloc()
797 store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO); in md_bitmap_storage_alloc()
798 if (store->sb_page == NULL) in md_bitmap_storage_alloc()
799 return -ENOMEM; in md_bitmap_storage_alloc()
803 if (store->sb_page) { in md_bitmap_storage_alloc()
804 store->filemap[0] = store->sb_page; in md_bitmap_storage_alloc()
806 store->sb_page->index = offset; in md_bitmap_storage_alloc()
810 store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO); in md_bitmap_storage_alloc()
811 if (!store->filemap[pnum]) { in md_bitmap_storage_alloc()
812 store->file_pages = pnum; in md_bitmap_storage_alloc()
813 return -ENOMEM; in md_bitmap_storage_alloc()
815 store->filemap[pnum]->index = pnum + offset; in md_bitmap_storage_alloc()
817 store->file_pages = pnum; in md_bitmap_storage_alloc()
819 /* We need 4 bits per page, rounded up to a multiple in md_bitmap_storage_alloc()
821 store->filemap_attr = kzalloc( in md_bitmap_storage_alloc()
824 if (!store->filemap_attr) in md_bitmap_storage_alloc()
825 return -ENOMEM; in md_bitmap_storage_alloc()
827 store->bytes = bytes; in md_bitmap_storage_alloc()
834 struct page **map, *sb_page; in md_bitmap_file_unmap()
838 file = store->file; in md_bitmap_file_unmap()
839 map = store->filemap; in md_bitmap_file_unmap()
840 pages = store->file_pages; in md_bitmap_file_unmap()
841 sb_page = store->sb_page; in md_bitmap_file_unmap()
843 while (pages--) in md_bitmap_file_unmap()
847 kfree(store->filemap_attr); in md_bitmap_file_unmap()
854 invalidate_mapping_pages(inode->i_mapping, 0, -1); in md_bitmap_file_unmap()
860 * bitmap_file_kick - if an error occurs while manipulating the bitmap file
868 if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) { in md_bitmap_file_kick()
871 if (bitmap->storage.file) { in md_bitmap_file_kick()
874 ptr = file_path(bitmap->storage.file, in md_bitmap_file_kick()
897 set_bit((pnum<<2) + attr, bitmap->storage.filemap_attr); in set_page_attr()
903 clear_bit((pnum<<2) + attr, bitmap->storage.filemap_attr); in clear_page_attr()
909 return test_bit((pnum<<2) + attr, bitmap->storage.filemap_attr); in test_page_attr()
916 bitmap->storage.filemap_attr); in test_and_clear_page_attr()
919 * bitmap_file_set_bit -- called before performing a write to the md device
922 * we set the bit immediately, then we record the page number so that
928 struct page *page; in md_bitmap_file_set_bit() local
930 unsigned long chunk = block >> bitmap->counts.chunkshift; in md_bitmap_file_set_bit()
931 struct bitmap_storage *store = &bitmap->storage; in md_bitmap_file_set_bit()
934 if (mddev_is_clustered(bitmap->mddev)) in md_bitmap_file_set_bit()
935 node_offset = bitmap->cluster_slot * store->file_pages; in md_bitmap_file_set_bit()
937 page = filemap_get_page(&bitmap->storage, chunk); in md_bitmap_file_set_bit()
938 if (!page) in md_bitmap_file_set_bit()
940 bit = file_page_offset(&bitmap->storage, chunk); in md_bitmap_file_set_bit()
943 kaddr = kmap_atomic(page); in md_bitmap_file_set_bit()
944 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags)) in md_bitmap_file_set_bit()
949 pr_debug("set file bit %lu page %lu\n", bit, page->index); in md_bitmap_file_set_bit()
950 /* record page number so it gets flushed to disk when unplug occurs */ in md_bitmap_file_set_bit()
951 set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_DIRTY); in md_bitmap_file_set_bit()
957 struct page *page; in md_bitmap_file_clear_bit() local
959 unsigned long chunk = block >> bitmap->counts.chunkshift; in md_bitmap_file_clear_bit()
960 struct bitmap_storage *store = &bitmap->storage; in md_bitmap_file_clear_bit()
963 if (mddev_is_clustered(bitmap->mddev)) in md_bitmap_file_clear_bit()
964 node_offset = bitmap->cluster_slot * store->file_pages; in md_bitmap_file_clear_bit()
966 page = filemap_get_page(&bitmap->storage, chunk); in md_bitmap_file_clear_bit()
967 if (!page) in md_bitmap_file_clear_bit()
969 bit = file_page_offset(&bitmap->storage, chunk); in md_bitmap_file_clear_bit()
970 paddr = kmap_atomic(page); in md_bitmap_file_clear_bit()
971 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags)) in md_bitmap_file_clear_bit()
976 if (!test_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_NEEDWRITE)) { in md_bitmap_file_clear_bit()
977 set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_PENDING); in md_bitmap_file_clear_bit()
978 bitmap->allclean = 0; in md_bitmap_file_clear_bit()
985 struct page *page; in md_bitmap_file_test_bit() local
987 unsigned long chunk = block >> bitmap->counts.chunkshift; in md_bitmap_file_test_bit()
990 page = filemap_get_page(&bitmap->storage, chunk); in md_bitmap_file_test_bit()
991 if (!page) in md_bitmap_file_test_bit()
992 return -EINVAL; in md_bitmap_file_test_bit()
993 bit = file_page_offset(&bitmap->storage, chunk); in md_bitmap_file_test_bit()
994 paddr = kmap_atomic(page); in md_bitmap_file_test_bit()
995 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags)) in md_bitmap_file_test_bit()
1005 * (slave) device queues -- before we let any writes go down, we need to
1013 if (!bitmap || !bitmap->storage.filemap || in md_bitmap_unplug()
1014 test_bit(BITMAP_STALE, &bitmap->flags)) in md_bitmap_unplug()
1017 /* look at each page to see if there are any set bits that need to be in md_bitmap_unplug()
1019 for (i = 0; i < bitmap->storage.file_pages; i++) { in md_bitmap_unplug()
1026 if (bitmap->mddev->queue) in md_bitmap_unplug()
1027 blk_add_trace_msg(bitmap->mddev->queue, in md_bitmap_unplug()
1031 write_page(bitmap, bitmap->storage.filemap[i], 0); in md_bitmap_unplug()
1038 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) in md_bitmap_unplug()
1043 static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
1044 /* * bitmap_init_from_disk -- called at bitmap_create time to initialize
1045 * the in-memory bitmap from the on-disk bitmap -- also, sets up the
1053 * This is used when reading an out-of-date bitmap...
1058 struct page *page = NULL; in md_bitmap_init_from_disk() local
1061 unsigned long offset; in md_bitmap_init_from_disk() local
1063 int ret = -ENOSPC; in md_bitmap_init_from_disk()
1065 struct bitmap_storage *store = &bitmap->storage; in md_bitmap_init_from_disk()
1067 chunks = bitmap->counts.chunks; in md_bitmap_init_from_disk()
1068 file = store->file; in md_bitmap_init_from_disk()
1070 if (!file && !bitmap->mddev->bitmap_info.offset) { in md_bitmap_init_from_disk()
1071 /* No permanent bitmap - fill with '1s'. */ in md_bitmap_init_from_disk()
1072 store->filemap = NULL; in md_bitmap_init_from_disk()
1073 store->file_pages = 0; in md_bitmap_init_from_disk()
1076 int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift) in md_bitmap_init_from_disk()
1079 (sector_t)i << bitmap->counts.chunkshift, in md_bitmap_init_from_disk()
1085 outofdate = test_bit(BITMAP_STALE, &bitmap->flags); in md_bitmap_init_from_disk()
1089 if (file && i_size_read(file->f_mapping->host) < store->bytes) { in md_bitmap_init_from_disk()
1092 (unsigned long) i_size_read(file->f_mapping->host), in md_bitmap_init_from_disk()
1093 store->bytes); in md_bitmap_init_from_disk()
1098 offset = 0; in md_bitmap_init_from_disk()
1099 if (!bitmap->mddev->bitmap_info.external) in md_bitmap_init_from_disk()
1100 offset = sizeof(bitmap_super_t); in md_bitmap_init_from_disk()
1102 if (mddev_is_clustered(bitmap->mddev)) in md_bitmap_init_from_disk()
1103 node_offset = bitmap->cluster_slot * (DIV_ROUND_UP(store->bytes, PAGE_SIZE)); in md_bitmap_init_from_disk()
1107 index = file_page_index(&bitmap->storage, i); in md_bitmap_init_from_disk()
1108 bit = file_page_offset(&bitmap->storage, i); in md_bitmap_init_from_disk()
1109 if (index != oldindex) { /* this is a new page, read it in */ in md_bitmap_init_from_disk()
1111 /* unmap the old page, we're done with it */ in md_bitmap_init_from_disk()
1112 if (index == store->file_pages-1) in md_bitmap_init_from_disk()
1113 count = store->bytes - index * PAGE_SIZE; in md_bitmap_init_from_disk()
1116 page = store->filemap[index]; in md_bitmap_init_from_disk()
1119 count, page); in md_bitmap_init_from_disk()
1122 bitmap->mddev, in md_bitmap_init_from_disk()
1123 bitmap->mddev->bitmap_info.offset, in md_bitmap_init_from_disk()
1124 page, in md_bitmap_init_from_disk()
1135 * whole page and write it out in md_bitmap_init_from_disk()
1137 paddr = kmap_atomic(page); in md_bitmap_init_from_disk()
1138 memset(paddr + offset, 0xff, in md_bitmap_init_from_disk()
1139 PAGE_SIZE - offset); in md_bitmap_init_from_disk()
1141 write_page(bitmap, page, 1); in md_bitmap_init_from_disk()
1143 ret = -EIO; in md_bitmap_init_from_disk()
1145 &bitmap->flags)) in md_bitmap_init_from_disk()
1149 paddr = kmap_atomic(page); in md_bitmap_init_from_disk()
1150 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags)) in md_bitmap_init_from_disk()
1157 int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift in md_bitmap_init_from_disk()
1160 (sector_t)i << bitmap->counts.chunkshift, in md_bitmap_init_from_disk()
1164 offset = 0; in md_bitmap_init_from_disk()
1168 bmname(bitmap), store->file_pages, in md_bitmap_init_from_disk()
1186 if (!bitmap || !bitmap->storage.filemap) in md_bitmap_write_all()
1188 if (bitmap->storage.file) in md_bitmap_write_all()
1192 for (i = 0; i < bitmap->storage.file_pages; i++) in md_bitmap_write_all()
1195 bitmap->allclean = 0; in md_bitmap_write_all()
1199 sector_t offset, int inc) in md_bitmap_count_page() argument
1201 sector_t chunk = offset >> bitmap->chunkshift; in md_bitmap_count_page()
1202 unsigned long page = chunk >> PAGE_COUNTER_SHIFT; in md_bitmap_count_page() local
1203 bitmap->bp[page].count += inc; in md_bitmap_count_page()
1204 md_bitmap_checkfree(bitmap, page); in md_bitmap_count_page()
1207 static void md_bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset) in md_bitmap_set_pending() argument
1209 sector_t chunk = offset >> bitmap->chunkshift; in md_bitmap_set_pending()
1210 unsigned long page = chunk >> PAGE_COUNTER_SHIFT; in md_bitmap_set_pending() local
1211 struct bitmap_page *bp = &bitmap->bp[page]; in md_bitmap_set_pending()
1213 if (!bp->pending) in md_bitmap_set_pending()
1214 bp->pending = 1; in md_bitmap_set_pending()
1218 sector_t offset, sector_t *blocks,
1222 * bitmap daemon -- periodically wakes up to clean bits and flush pages
1237 mutex_lock(&mddev->bitmap_info.mutex); in md_bitmap_daemon_work()
1238 bitmap = mddev->bitmap; in md_bitmap_daemon_work()
1240 mutex_unlock(&mddev->bitmap_info.mutex); in md_bitmap_daemon_work()
1243 if (time_before(jiffies, bitmap->daemon_lastrun in md_bitmap_daemon_work()
1244 + mddev->bitmap_info.daemon_sleep)) in md_bitmap_daemon_work()
1247 bitmap->daemon_lastrun = jiffies; in md_bitmap_daemon_work()
1248 if (bitmap->allclean) { in md_bitmap_daemon_work()
1249 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; in md_bitmap_daemon_work()
1252 bitmap->allclean = 1; in md_bitmap_daemon_work()
1254 if (bitmap->mddev->queue) in md_bitmap_daemon_work()
1255 blk_add_trace_msg(bitmap->mddev->queue, in md_bitmap_daemon_work()
1258 /* Any file-page which is PENDING now needs to be written. in md_bitmap_daemon_work()
1259 * So set NEEDWRITE now, then after we make any last-minute changes in md_bitmap_daemon_work()
1262 for (j = 0; j < bitmap->storage.file_pages; j++) in md_bitmap_daemon_work()
1268 if (bitmap->need_sync && in md_bitmap_daemon_work()
1269 mddev->bitmap_info.external == 0) { in md_bitmap_daemon_work()
1273 bitmap->need_sync = 0; in md_bitmap_daemon_work()
1274 if (bitmap->storage.filemap) { in md_bitmap_daemon_work()
1275 sb = kmap_atomic(bitmap->storage.sb_page); in md_bitmap_daemon_work()
1276 sb->events_cleared = in md_bitmap_daemon_work()
1277 cpu_to_le64(bitmap->events_cleared); in md_bitmap_daemon_work()
1286 counts = &bitmap->counts; in md_bitmap_daemon_work()
1287 spin_lock_irq(&counts->lock); in md_bitmap_daemon_work()
1289 for (j = 0; j < counts->chunks; j++) { in md_bitmap_daemon_work()
1291 sector_t block = (sector_t)j << counts->chunkshift; in md_bitmap_daemon_work()
1295 if (!counts->bp[j >> PAGE_COUNTER_SHIFT].pending) { in md_bitmap_daemon_work()
1299 counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0; in md_bitmap_daemon_work()
1307 if (*bmc == 1 && !bitmap->need_sync) { in md_bitmap_daemon_work()
1310 md_bitmap_count_page(counts, block, -1); in md_bitmap_daemon_work()
1315 bitmap->allclean = 0; in md_bitmap_daemon_work()
1318 spin_unlock_irq(&counts->lock); in md_bitmap_daemon_work()
1321 /* Now start writeout on any page in NEEDWRITE that isn't DIRTY. in md_bitmap_daemon_work()
1324 * If we find any DIRTY page we stop there and let bitmap_unplug in md_bitmap_daemon_work()
1330 j < bitmap->storage.file_pages in md_bitmap_daemon_work()
1331 && !test_bit(BITMAP_STALE, &bitmap->flags); in md_bitmap_daemon_work()
1337 if (bitmap->storage.filemap && in md_bitmap_daemon_work()
1340 write_page(bitmap, bitmap->storage.filemap[j], 0); in md_bitmap_daemon_work()
1345 if (bitmap->allclean == 0) in md_bitmap_daemon_work()
1346 mddev->thread->timeout = in md_bitmap_daemon_work()
1347 mddev->bitmap_info.daemon_sleep; in md_bitmap_daemon_work()
1348 mutex_unlock(&mddev->bitmap_info.mutex); in md_bitmap_daemon_work()
1352 sector_t offset, sector_t *blocks, in md_bitmap_get_counter() argument
1354 __releases(bitmap->lock) in md_bitmap_get_counter()
1355 __acquires(bitmap->lock) in md_bitmap_get_counter()
1361 sector_t chunk = offset >> bitmap->chunkshift; in md_bitmap_get_counter()
1362 unsigned long page = chunk >> PAGE_COUNTER_SHIFT; in md_bitmap_get_counter() local
1367 err = md_bitmap_checkpage(bitmap, page, create, 0); in md_bitmap_get_counter()
1369 if (bitmap->bp[page].hijacked || in md_bitmap_get_counter()
1370 bitmap->bp[page].map == NULL) in md_bitmap_get_counter()
1371 csize = ((sector_t)1) << (bitmap->chunkshift + in md_bitmap_get_counter()
1374 csize = ((sector_t)1) << bitmap->chunkshift; in md_bitmap_get_counter()
1375 *blocks = csize - (offset & (csize - 1)); in md_bitmap_get_counter()
1382 if (bitmap->bp[page].hijacked) { /* hijacked pointer */ in md_bitmap_get_counter()
1387 &bitmap->bp[page].map)[hi]; in md_bitmap_get_counter()
1388 } else /* page is allocated */ in md_bitmap_get_counter()
1390 &(bitmap->bp[page].map[pageoff]); in md_bitmap_get_counter()
1393 int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind) in md_bitmap_startwrite() argument
1400 atomic_inc(&bitmap->behind_writes); in md_bitmap_startwrite()
1401 bw = atomic_read(&bitmap->behind_writes); in md_bitmap_startwrite()
1402 if (bw > bitmap->behind_writes_used) in md_bitmap_startwrite()
1403 bitmap->behind_writes_used = bw; in md_bitmap_startwrite()
1405 pr_debug("inc write-behind count %d/%lu\n", in md_bitmap_startwrite()
1406 bw, bitmap->mddev->bitmap_info.max_write_behind); in md_bitmap_startwrite()
1413 spin_lock_irq(&bitmap->counts.lock); in md_bitmap_startwrite()
1414 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 1); in md_bitmap_startwrite()
1416 spin_unlock_irq(&bitmap->counts.lock); in md_bitmap_startwrite()
1426 prepare_to_wait(&bitmap->overflow_wait, &__wait, in md_bitmap_startwrite()
1428 spin_unlock_irq(&bitmap->counts.lock); in md_bitmap_startwrite()
1430 finish_wait(&bitmap->overflow_wait, &__wait); in md_bitmap_startwrite()
1436 md_bitmap_file_set_bit(bitmap, offset); in md_bitmap_startwrite()
1437 md_bitmap_count_page(&bitmap->counts, offset, 1); in md_bitmap_startwrite()
1445 spin_unlock_irq(&bitmap->counts.lock); in md_bitmap_startwrite()
1447 offset += blocks; in md_bitmap_startwrite()
1449 sectors -= blocks; in md_bitmap_startwrite()
1457 void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset, in md_bitmap_endwrite() argument
1463 if (atomic_dec_and_test(&bitmap->behind_writes)) in md_bitmap_endwrite()
1464 wake_up(&bitmap->behind_wait); in md_bitmap_endwrite()
1465 pr_debug("dec write-behind count %d/%lu\n", in md_bitmap_endwrite()
1466 atomic_read(&bitmap->behind_writes), in md_bitmap_endwrite()
1467 bitmap->mddev->bitmap_info.max_write_behind); in md_bitmap_endwrite()
1475 spin_lock_irqsave(&bitmap->counts.lock, flags); in md_bitmap_endwrite()
1476 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 0); in md_bitmap_endwrite()
1478 spin_unlock_irqrestore(&bitmap->counts.lock, flags); in md_bitmap_endwrite()
1482 if (success && !bitmap->mddev->degraded && in md_bitmap_endwrite()
1483 bitmap->events_cleared < bitmap->mddev->events) { in md_bitmap_endwrite()
1484 bitmap->events_cleared = bitmap->mddev->events; in md_bitmap_endwrite()
1485 bitmap->need_sync = 1; in md_bitmap_endwrite()
1486 sysfs_notify_dirent_safe(bitmap->sysfs_can_clear); in md_bitmap_endwrite()
1493 wake_up(&bitmap->overflow_wait); in md_bitmap_endwrite()
1495 (*bmc)--; in md_bitmap_endwrite()
1497 md_bitmap_set_pending(&bitmap->counts, offset); in md_bitmap_endwrite()
1498 bitmap->allclean = 0; in md_bitmap_endwrite()
1500 spin_unlock_irqrestore(&bitmap->counts.lock, flags); in md_bitmap_endwrite()
1501 offset += blocks; in md_bitmap_endwrite()
1503 sectors -= blocks; in md_bitmap_endwrite()
1510 static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, in __bitmap_start_sync() argument
1519 spin_lock_irq(&bitmap->counts.lock); in __bitmap_start_sync()
1520 bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0); in __bitmap_start_sync()
1534 spin_unlock_irq(&bitmap->counts.lock); in __bitmap_start_sync()
1538 int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, in md_bitmap_start_sync() argument
1553 rv |= __bitmap_start_sync(bitmap, offset, in md_bitmap_start_sync()
1555 offset += blocks1; in md_bitmap_start_sync()
1562 void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted) in md_bitmap_end_sync() argument
1571 spin_lock_irqsave(&bitmap->counts.lock, flags); in md_bitmap_end_sync()
1572 bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0); in md_bitmap_end_sync()
1583 md_bitmap_set_pending(&bitmap->counts, offset); in md_bitmap_end_sync()
1584 bitmap->allclean = 0; in md_bitmap_end_sync()
1589 spin_unlock_irqrestore(&bitmap->counts.lock, flags); in md_bitmap_end_sync()
1603 while (sector < bitmap->mddev->resync_max_sectors) { in md_bitmap_close_sync()
1618 bitmap->last_end_sync = jiffies; in md_bitmap_cond_end_sync()
1621 if (!force && time_before(jiffies, (bitmap->last_end_sync in md_bitmap_cond_end_sync()
1622 + bitmap->mddev->bitmap_info.daemon_sleep))) in md_bitmap_cond_end_sync()
1624 wait_event(bitmap->mddev->recovery_wait, in md_bitmap_cond_end_sync()
1625 atomic_read(&bitmap->mddev->recovery_active) == 0); in md_bitmap_cond_end_sync()
1627 bitmap->mddev->curr_resync_completed = sector; in md_bitmap_cond_end_sync()
1628 set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags); in md_bitmap_cond_end_sync()
1629 sector &= ~((1ULL << bitmap->counts.chunkshift) - 1); in md_bitmap_cond_end_sync()
1631 while (s < sector && s < bitmap->mddev->resync_max_sectors) { in md_bitmap_cond_end_sync()
1635 bitmap->last_end_sync = jiffies; in md_bitmap_cond_end_sync()
1636 sysfs_notify_dirent_safe(bitmap->mddev->sysfs_completed); in md_bitmap_cond_end_sync()
1644 struct bitmap *bitmap = mddev->bitmap; in md_bitmap_sync_with_cluster()
1661 static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed) in md_bitmap_set_memory_bits() argument
1670 spin_lock_irq(&bitmap->counts.lock); in md_bitmap_set_memory_bits()
1671 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &secs, 1); in md_bitmap_set_memory_bits()
1673 spin_unlock_irq(&bitmap->counts.lock); in md_bitmap_set_memory_bits()
1678 md_bitmap_count_page(&bitmap->counts, offset, 1); in md_bitmap_set_memory_bits()
1679 md_bitmap_set_pending(&bitmap->counts, offset); in md_bitmap_set_memory_bits()
1680 bitmap->allclean = 0; in md_bitmap_set_memory_bits()
1684 spin_unlock_irq(&bitmap->counts.lock); in md_bitmap_set_memory_bits()
1693 sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift; in md_bitmap_dirty_bits()
1696 if (sec < bitmap->mddev->recovery_cp) in md_bitmap_dirty_bits()
1701 bitmap->mddev->recovery_cp = sec; in md_bitmap_dirty_bits()
1710 struct bitmap *bitmap = mddev->bitmap; in md_bitmap_flush()
1719 sleep = mddev->bitmap_info.daemon_sleep * 2; in md_bitmap_flush()
1720 bitmap->daemon_lastrun -= sleep; in md_bitmap_flush()
1722 bitmap->daemon_lastrun -= sleep; in md_bitmap_flush()
1724 bitmap->daemon_lastrun -= sleep; in md_bitmap_flush()
1726 if (mddev->bitmap_info.external) in md_bitmap_flush()
1742 if (bitmap->sysfs_can_clear) in md_bitmap_free()
1743 sysfs_put(bitmap->sysfs_can_clear); in md_bitmap_free()
1745 if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info && in md_bitmap_free()
1746 bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev)) in md_bitmap_free()
1747 md_cluster_stop(bitmap->mddev); in md_bitmap_free()
1749 /* Shouldn't be needed - but just in case.... */ in md_bitmap_free()
1750 wait_event(bitmap->write_wait, in md_bitmap_free()
1751 atomic_read(&bitmap->pending_writes) == 0); in md_bitmap_free()
1754 md_bitmap_file_unmap(&bitmap->storage); in md_bitmap_free()
1756 bp = bitmap->counts.bp; in md_bitmap_free()
1757 pages = bitmap->counts.pages; in md_bitmap_free()
1761 if (bp) /* deallocate the page memory */ in md_bitmap_free()
1772 struct bitmap *bitmap = mddev->bitmap; in md_bitmap_wait_behind_writes()
1775 if (bitmap && atomic_read(&bitmap->behind_writes) > 0) { in md_bitmap_wait_behind_writes()
1776 pr_debug("md:%s: behind writes in progress - waiting to stop.\n", in md_bitmap_wait_behind_writes()
1779 wait_event(bitmap->behind_wait, in md_bitmap_wait_behind_writes()
1780 atomic_read(&bitmap->behind_writes) == 0); in md_bitmap_wait_behind_writes()
1786 struct bitmap *bitmap = mddev->bitmap; in md_bitmap_destroy()
1792 if (!mddev->serialize_policy) in md_bitmap_destroy()
1795 mutex_lock(&mddev->bitmap_info.mutex); in md_bitmap_destroy()
1796 spin_lock(&mddev->lock); in md_bitmap_destroy()
1797 mddev->bitmap = NULL; /* disconnect from the md device */ in md_bitmap_destroy()
1798 spin_unlock(&mddev->lock); in md_bitmap_destroy()
1799 mutex_unlock(&mddev->bitmap_info.mutex); in md_bitmap_destroy()
1800 if (mddev->thread) in md_bitmap_destroy()
1801 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; in md_bitmap_destroy()
1809 * once mddev->bitmap is set
1814 sector_t blocks = mddev->resync_max_sectors; in md_bitmap_create()
1815 struct file *file = mddev->bitmap_info.file; in md_bitmap_create()
1821 BUG_ON(file && mddev->bitmap_info.offset); in md_bitmap_create()
1823 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { in md_bitmap_create()
1826 return ERR_PTR(-EBUSY); in md_bitmap_create()
1831 return ERR_PTR(-ENOMEM); in md_bitmap_create()
1833 spin_lock_init(&bitmap->counts.lock); in md_bitmap_create()
1834 atomic_set(&bitmap->pending_writes, 0); in md_bitmap_create()
1835 init_waitqueue_head(&bitmap->write_wait); in md_bitmap_create()
1836 init_waitqueue_head(&bitmap->overflow_wait); in md_bitmap_create()
1837 init_waitqueue_head(&bitmap->behind_wait); in md_bitmap_create()
1839 bitmap->mddev = mddev; in md_bitmap_create()
1840 bitmap->cluster_slot = slot; in md_bitmap_create()
1842 if (mddev->kobj.sd) in md_bitmap_create()
1843 bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap"); in md_bitmap_create()
1845 bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear"); in md_bitmap_create()
1848 bitmap->sysfs_can_clear = NULL; in md_bitmap_create()
1850 bitmap->storage.file = file; in md_bitmap_create()
1854 * and bypass the page cache, we must sync the file in md_bitmap_create()
1859 /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */ in md_bitmap_create()
1860 if (!mddev->bitmap_info.external) { in md_bitmap_create()
1862 * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is in md_bitmap_create()
1863 * instructing us to create a new on-disk bitmap instance. in md_bitmap_create()
1865 if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags)) in md_bitmap_create()
1871 if (mddev->bitmap_info.chunksize == 0 || in md_bitmap_create()
1872 mddev->bitmap_info.daemon_sleep == 0) in md_bitmap_create()
1875 err = -EINVAL; in md_bitmap_create()
1880 bitmap->daemon_lastrun = jiffies; in md_bitmap_create()
1881 err = md_bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1); in md_bitmap_create()
1886 bitmap->counts.pages, bmname(bitmap)); in md_bitmap_create()
1888 err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0; in md_bitmap_create()
1903 struct bitmap *bitmap = mddev->bitmap; in md_bitmap_load()
1913 md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes); in md_bitmap_load()
1920 while (sector < mddev->resync_max_sectors) { in md_bitmap_load()
1927 if (mddev->degraded == 0 in md_bitmap_load()
1928 || bitmap->events_cleared == mddev->events) in md_bitmap_load()
1930 * re-add of a missing device */ in md_bitmap_load()
1931 start = mddev->recovery_cp; in md_bitmap_load()
1933 mutex_lock(&mddev->bitmap_info.mutex); in md_bitmap_load()
1935 mutex_unlock(&mddev->bitmap_info.mutex); in md_bitmap_load()
1939 clear_bit(BITMAP_STALE, &bitmap->flags); in md_bitmap_load()
1942 set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery); in md_bitmap_load()
1944 mddev->thread->timeout = mddev->bitmap_info.daemon_sleep; in md_bitmap_load()
1945 md_wakeup_thread(mddev->thread); in md_bitmap_load()
1949 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) in md_bitmap_load()
1950 err = -EIO; in md_bitmap_load()
1992 return -1; in md_bitmap_copy_from_slot()
1995 counts = &bitmap->counts; in md_bitmap_copy_from_slot()
1996 for (j = 0; j < counts->chunks; j++) { in md_bitmap_copy_from_slot()
1997 block = (sector_t)j << counts->chunkshift; in md_bitmap_copy_from_slot()
2003 md_bitmap_set_memory_bits(mddev->bitmap, block, 1); in md_bitmap_copy_from_slot()
2004 md_bitmap_file_set_bit(mddev->bitmap, block); in md_bitmap_copy_from_slot()
2012 for (i = 0; i < bitmap->storage.file_pages; i++) in md_bitmap_copy_from_slot()
2017 md_bitmap_unplug(mddev->bitmap); in md_bitmap_copy_from_slot()
2035 counts = &bitmap->counts; in md_bitmap_status()
2037 chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10; in md_bitmap_status()
2040 counts->pages - counts->missing_pages, in md_bitmap_status()
2041 counts->pages, in md_bitmap_status()
2042 (counts->pages - counts->missing_pages) in md_bitmap_status()
2043 << (PAGE_SHIFT - 10), in md_bitmap_status()
2044 chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize, in md_bitmap_status()
2046 if (bitmap->storage.file) { in md_bitmap_status()
2048 seq_file_path(seq, bitmap->storage.file, " \t\n"); in md_bitmap_status()
2059 * Then quiesce, copy bits, replace bitmap, and re-start in md_bitmap_resize()
2077 if (bitmap->storage.file && !init) { in md_bitmap_resize()
2078 pr_info("md: cannot resize file-based bitmap\n"); in md_bitmap_resize()
2079 return -EINVAL; in md_bitmap_resize()
2087 long space = bitmap->mddev->bitmap_info.space; in md_bitmap_resize()
2091 * to current size - in sectors. in md_bitmap_resize()
2093 bytes = DIV_ROUND_UP(bitmap->counts.chunks, 8); in md_bitmap_resize()
2094 if (!bitmap->mddev->bitmap_info.external) in md_bitmap_resize()
2097 bitmap->mddev->bitmap_info.space = space; in md_bitmap_resize()
2099 chunkshift = bitmap->counts.chunkshift; in md_bitmap_resize()
2100 chunkshift--; in md_bitmap_resize()
2106 if (!bitmap->mddev->bitmap_info.external) in md_bitmap_resize()
2110 chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT; in md_bitmap_resize()
2114 if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file) in md_bitmap_resize()
2116 !bitmap->mddev->bitmap_info.external, in md_bitmap_resize()
2117 mddev_is_clustered(bitmap->mddev) in md_bitmap_resize()
2118 ? bitmap->cluster_slot : 0); in md_bitmap_resize()
2127 ret = -ENOMEM; in md_bitmap_resize()
2134 bitmap->mddev->pers->quiesce(bitmap->mddev, 1); in md_bitmap_resize()
2136 store.file = bitmap->storage.file; in md_bitmap_resize()
2137 bitmap->storage.file = NULL; in md_bitmap_resize()
2139 if (store.sb_page && bitmap->storage.sb_page) in md_bitmap_resize()
2141 page_address(bitmap->storage.sb_page), in md_bitmap_resize()
2143 spin_lock_irq(&bitmap->counts.lock); in md_bitmap_resize()
2144 md_bitmap_file_unmap(&bitmap->storage); in md_bitmap_resize()
2145 bitmap->storage = store; in md_bitmap_resize()
2147 old_counts = bitmap->counts; in md_bitmap_resize()
2148 bitmap->counts.bp = new_bp; in md_bitmap_resize()
2149 bitmap->counts.pages = pages; in md_bitmap_resize()
2150 bitmap->counts.missing_pages = pages; in md_bitmap_resize()
2151 bitmap->counts.chunkshift = chunkshift; in md_bitmap_resize()
2152 bitmap->counts.chunks = chunks; in md_bitmap_resize()
2153 bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift + in md_bitmap_resize()
2159 /* For cluster raid, need to pre-allocate bitmap */ in md_bitmap_resize()
2160 if (mddev_is_clustered(bitmap->mddev)) { in md_bitmap_resize()
2161 unsigned long page; in md_bitmap_resize() local
2162 for (page = 0; page < pages; page++) { in md_bitmap_resize()
2163 ret = md_bitmap_checkpage(&bitmap->counts, page, 1, 1); in md_bitmap_resize()
2167 /* deallocate the page memory */ in md_bitmap_resize()
2168 for (k = 0; k < page; k++) { in md_bitmap_resize()
2174 bitmap->counts.bp = old_counts.bp; in md_bitmap_resize()
2175 bitmap->counts.pages = old_counts.pages; in md_bitmap_resize()
2176 bitmap->counts.missing_pages = old_counts.pages; in md_bitmap_resize()
2177 bitmap->counts.chunkshift = old_counts.chunkshift; in md_bitmap_resize()
2178 bitmap->counts.chunks = old_counts.chunks; in md_bitmap_resize()
2179 bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift + in md_bitmap_resize()
2182 pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n"); in md_bitmap_resize()
2185 bitmap->counts.bp[page].count += 1; in md_bitmap_resize()
2197 bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1); in md_bitmap_resize()
2199 /* need to set on-disk bits too. */ in md_bitmap_resize()
2208 md_bitmap_count_page(&bitmap->counts, block, 1); in md_bitmap_resize()
2209 md_bitmap_set_pending(&bitmap->counts, block); in md_bitmap_resize()
2218 if (bitmap->counts.bp != old_counts.bp) { in md_bitmap_resize()
2230 bmc = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1); in md_bitmap_resize()
2237 md_bitmap_count_page(&bitmap->counts, block, 1); in md_bitmap_resize()
2238 md_bitmap_set_pending(&bitmap->counts, block); in md_bitmap_resize()
2243 for (i = 0; i < bitmap->storage.file_pages; i++) in md_bitmap_resize()
2246 spin_unlock_irq(&bitmap->counts.lock); in md_bitmap_resize()
2250 bitmap->mddev->pers->quiesce(bitmap->mddev, 0); in md_bitmap_resize()
2259 location_show(struct mddev *mddev, char *page) in location_show() argument
2262 if (mddev->bitmap_info.file) in location_show()
2263 len = sprintf(page, "file"); in location_show()
2264 else if (mddev->bitmap_info.offset) in location_show()
2265 len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset); in location_show()
2267 len = sprintf(page, "none"); in location_show()
2268 len += sprintf(page+len, "\n"); in location_show()
2280 if (mddev->pers) { in location_store()
2281 if (!mddev->pers->quiesce) { in location_store()
2282 rv = -EBUSY; in location_store()
2285 if (mddev->recovery || mddev->sync_thread) { in location_store()
2286 rv = -EBUSY; in location_store()
2291 if (mddev->bitmap || mddev->bitmap_info.file || in location_store()
2292 mddev->bitmap_info.offset) { in location_store()
2295 rv = -EBUSY; in location_store()
2298 if (mddev->pers) { in location_store()
2303 mddev->bitmap_info.offset = 0; in location_store()
2304 if (mddev->bitmap_info.file) { in location_store()
2305 struct file *f = mddev->bitmap_info.file; in location_store()
2306 mddev->bitmap_info.file = NULL; in location_store()
2311 long long offset; in location_store() local
2316 rv = -EINVAL; in location_store()
2320 rv = kstrtoll(buf+1, 10, &offset); in location_store()
2322 rv = kstrtoll(buf, 10, &offset); in location_store()
2325 if (offset == 0) { in location_store()
2326 rv = -EINVAL; in location_store()
2329 if (mddev->bitmap_info.external == 0 && in location_store()
2330 mddev->major_version == 0 && in location_store()
2331 offset != mddev->bitmap_info.default_offset) { in location_store()
2332 rv = -EINVAL; in location_store()
2335 mddev->bitmap_info.offset = offset; in location_store()
2336 if (mddev->pers) { in location_store()
2338 bitmap = md_bitmap_create(mddev, -1); in location_store()
2343 mddev->bitmap = bitmap; in location_store()
2346 mddev->bitmap_info.offset = 0; in location_store()
2357 if (!mddev->external) { in location_store()
2361 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in location_store()
2362 md_wakeup_thread(mddev->thread); in location_store()
2380 space_show(struct mddev *mddev, char *page) in space_show() argument
2382 return sprintf(page, "%lu\n", mddev->bitmap_info.space); in space_show()
2396 return -EINVAL; in space_store()
2398 if (mddev->bitmap && in space_store()
2399 sectors < (mddev->bitmap->storage.bytes + 511) >> 9) in space_store()
2400 return -EFBIG; /* Bitmap is too big for this small space */ in space_store()
2403 * needed - user-space should be careful. in space_store()
2405 mddev->bitmap_info.space = sectors; in space_store()
2413 timeout_show(struct mddev *mddev, char *page) in timeout_show() argument
2416 unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ; in timeout_show()
2417 unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ; in timeout_show()
2419 len = sprintf(page, "%lu", secs); in timeout_show()
2421 len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs)); in timeout_show()
2422 len += sprintf(page+len, "\n"); in timeout_show()
2437 return -EINVAL; in timeout_store()
2442 timeout = MAX_SCHEDULE_TIMEOUT-1; in timeout_store()
2445 mddev->bitmap_info.daemon_sleep = timeout; in timeout_store()
2446 if (mddev->thread) { in timeout_store()
2447 /* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then in timeout_store()
2451 if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) { in timeout_store()
2452 mddev->thread->timeout = timeout; in timeout_store()
2453 md_wakeup_thread(mddev->thread); in timeout_store()
2463 backlog_show(struct mddev *mddev, char *page) in backlog_show() argument
2465 return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind); in backlog_show()
2472 unsigned long old_mwb = mddev->bitmap_info.max_write_behind; in backlog_store()
2479 return -EINVAL; in backlog_store()
2486 if (test_bit(WriteMostly, &rdev->flags)) { in backlog_store()
2494 return -EINVAL; in backlog_store()
2497 mddev->bitmap_info.max_write_behind = backlog; in backlog_store()
2498 if (!backlog && mddev->serial_info_pool) { in backlog_store()
2500 if (!mddev->serialize_policy) in backlog_store()
2502 } else if (backlog && !mddev->serial_info_pool) { in backlog_store()
2510 md_bitmap_update_sb(mddev->bitmap); in backlog_store()
2518 chunksize_show(struct mddev *mddev, char *page) in chunksize_show() argument
2520 return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize); in chunksize_show()
2529 if (mddev->bitmap) in chunksize_store()
2530 return -EBUSY; in chunksize_store()
2536 return -EINVAL; in chunksize_store()
2537 mddev->bitmap_info.chunksize = csize; in chunksize_store()
2544 static ssize_t metadata_show(struct mddev *mddev, char *page) in metadata_show() argument
2547 return sprintf(page, "clustered\n"); in metadata_show()
2548 return sprintf(page, "%s\n", (mddev->bitmap_info.external in metadata_show()
2554 if (mddev->bitmap || in metadata_store()
2555 mddev->bitmap_info.file || in metadata_store()
2556 mddev->bitmap_info.offset) in metadata_store()
2557 return -EBUSY; in metadata_store()
2559 mddev->bitmap_info.external = 1; in metadata_store()
2562 mddev->bitmap_info.external = 0; in metadata_store()
2564 return -EINVAL; in metadata_store()
2571 static ssize_t can_clear_show(struct mddev *mddev, char *page) in can_clear_show() argument
2574 spin_lock(&mddev->lock); in can_clear_show()
2575 if (mddev->bitmap) in can_clear_show()
2576 len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ? in can_clear_show()
2579 len = sprintf(page, "\n"); in can_clear_show()
2580 spin_unlock(&mddev->lock); in can_clear_show()
2586 if (mddev->bitmap == NULL) in can_clear_store()
2587 return -ENOENT; in can_clear_store()
2589 mddev->bitmap->need_sync = 1; in can_clear_store()
2591 if (mddev->degraded) in can_clear_store()
2592 return -EBUSY; in can_clear_store()
2593 mddev->bitmap->need_sync = 0; in can_clear_store()
2595 return -EINVAL; in can_clear_store()
2603 behind_writes_used_show(struct mddev *mddev, char *page) in behind_writes_used_show() argument
2606 spin_lock(&mddev->lock); in behind_writes_used_show()
2607 if (mddev->bitmap == NULL) in behind_writes_used_show()
2608 ret = sprintf(page, "0\n"); in behind_writes_used_show()
2610 ret = sprintf(page, "%lu\n", in behind_writes_used_show()
2611 mddev->bitmap->behind_writes_used); in behind_writes_used_show()
2612 spin_unlock(&mddev->lock); in behind_writes_used_show()
2619 if (mddev->bitmap) in behind_writes_used_reset()
2620 mddev->bitmap->behind_writes_used = 0; in behind_writes_used_reset()