Lines Matching refs:bh

55 static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
60 inline void touch_buffer(struct buffer_head *bh) in touch_buffer() argument
62 trace_block_touch_buffer(bh); in touch_buffer()
63 mark_page_accessed(bh->b_page); in touch_buffer()
67 void __lock_buffer(struct buffer_head *bh) in __lock_buffer() argument
69 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); in __lock_buffer()
73 void unlock_buffer(struct buffer_head *bh) in unlock_buffer() argument
75 clear_bit_unlock(BH_Lock, &bh->b_state); in unlock_buffer()
77 wake_up_bit(&bh->b_state, BH_Lock); in unlock_buffer()
89 struct buffer_head *head, *bh; in buffer_check_dirty_writeback() local
102 bh = head; in buffer_check_dirty_writeback()
104 if (buffer_locked(bh)) in buffer_check_dirty_writeback()
107 if (buffer_dirty(bh)) in buffer_check_dirty_writeback()
110 bh = bh->b_this_page; in buffer_check_dirty_writeback()
111 } while (bh != head); in buffer_check_dirty_writeback()
120 void __wait_on_buffer(struct buffer_head * bh) in __wait_on_buffer() argument
122 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); in __wait_on_buffer()
126 static void buffer_io_error(struct buffer_head *bh, char *msg) in buffer_io_error() argument
128 if (!test_bit(BH_Quiet, &bh->b_state)) in buffer_io_error()
131 bh->b_bdev, (unsigned long long)bh->b_blocknr, msg); in buffer_io_error()
142 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) in __end_buffer_read_notouch() argument
145 set_buffer_uptodate(bh); in __end_buffer_read_notouch()
148 clear_buffer_uptodate(bh); in __end_buffer_read_notouch()
150 unlock_buffer(bh); in __end_buffer_read_notouch()
157 void end_buffer_read_sync(struct buffer_head *bh, int uptodate) in end_buffer_read_sync() argument
159 __end_buffer_read_notouch(bh, uptodate); in end_buffer_read_sync()
160 put_bh(bh); in end_buffer_read_sync()
164 void end_buffer_write_sync(struct buffer_head *bh, int uptodate) in end_buffer_write_sync() argument
167 set_buffer_uptodate(bh); in end_buffer_write_sync()
169 buffer_io_error(bh, ", lost sync page write"); in end_buffer_write_sync()
170 mark_buffer_write_io_error(bh); in end_buffer_write_sync()
171 clear_buffer_uptodate(bh); in end_buffer_write_sync()
173 unlock_buffer(bh); in end_buffer_write_sync()
174 put_bh(bh); in end_buffer_write_sync()
195 struct buffer_head *bh; in __find_get_block_slow() local
210 bh = head; in __find_get_block_slow()
212 if (!buffer_mapped(bh)) in __find_get_block_slow()
214 else if (bh->b_blocknr == block) { in __find_get_block_slow()
215 ret = bh; in __find_get_block_slow()
216 get_bh(bh); in __find_get_block_slow()
219 bh = bh->b_this_page; in __find_get_block_slow()
220 } while (bh != head); in __find_get_block_slow()
233 (unsigned long long)bh->b_blocknr, in __find_get_block_slow()
234 bh->b_state, bh->b_size, bdev, in __find_get_block_slow()
244 static void end_buffer_async_read(struct buffer_head *bh, int uptodate) in end_buffer_async_read() argument
252 BUG_ON(!buffer_async_read(bh)); in end_buffer_async_read()
254 page = bh->b_page; in end_buffer_async_read()
256 set_buffer_uptodate(bh); in end_buffer_async_read()
258 clear_buffer_uptodate(bh); in end_buffer_async_read()
259 buffer_io_error(bh, ", async page read"); in end_buffer_async_read()
270 clear_buffer_async_read(bh); in end_buffer_async_read()
271 unlock_buffer(bh); in end_buffer_async_read()
272 tmp = bh; in end_buffer_async_read()
281 } while (tmp != bh); in end_buffer_async_read()
300 struct buffer_head *bh; member
307 struct buffer_head *bh = ctx->bh; in decrypt_bh() local
310 err = fscrypt_decrypt_pagecache_blocks(bh->b_page, bh->b_size, in decrypt_bh()
311 bh_offset(bh)); in decrypt_bh()
312 end_buffer_async_read(bh, err == 0); in decrypt_bh()
320 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate) in end_buffer_async_read_io() argument
324 fscrypt_inode_uses_fs_layer_crypto(bh->b_page->mapping->host)) { in end_buffer_async_read_io()
329 ctx->bh = bh; in end_buffer_async_read_io()
335 end_buffer_async_read(bh, uptodate); in end_buffer_async_read_io()
342 void end_buffer_async_write(struct buffer_head *bh, int uptodate) in end_buffer_async_write() argument
349 BUG_ON(!buffer_async_write(bh)); in end_buffer_async_write()
351 page = bh->b_page; in end_buffer_async_write()
353 set_buffer_uptodate(bh); in end_buffer_async_write()
355 buffer_io_error(bh, ", lost async page write"); in end_buffer_async_write()
356 mark_buffer_write_io_error(bh); in end_buffer_async_write()
357 clear_buffer_uptodate(bh); in end_buffer_async_write()
364 clear_buffer_async_write(bh); in end_buffer_async_write()
365 unlock_buffer(bh); in end_buffer_async_write()
366 tmp = bh->b_this_page; in end_buffer_async_write()
367 while (tmp != bh) { in end_buffer_async_write()
405 static void mark_buffer_async_read(struct buffer_head *bh) in mark_buffer_async_read() argument
407 bh->b_end_io = end_buffer_async_read_io; in mark_buffer_async_read()
408 set_buffer_async_read(bh); in mark_buffer_async_read()
411 static void mark_buffer_async_write_endio(struct buffer_head *bh, in mark_buffer_async_write_endio() argument
414 bh->b_end_io = handler; in mark_buffer_async_write_endio()
415 set_buffer_async_write(bh); in mark_buffer_async_write_endio()
418 void mark_buffer_async_write(struct buffer_head *bh) in mark_buffer_async_write() argument
420 mark_buffer_async_write_endio(bh, end_buffer_async_write); in mark_buffer_async_write()
477 static void __remove_assoc_queue(struct buffer_head *bh) in __remove_assoc_queue() argument
479 list_del_init(&bh->b_assoc_buffers); in __remove_assoc_queue()
480 WARN_ON(!bh->b_assoc_map); in __remove_assoc_queue()
481 bh->b_assoc_map = NULL; in __remove_assoc_queue()
501 struct buffer_head *bh; in osync_buffers_list() local
508 bh = BH_ENTRY(p); in osync_buffers_list()
509 if (buffer_locked(bh)) { in osync_buffers_list()
510 get_bh(bh); in osync_buffers_list()
512 wait_on_buffer(bh); in osync_buffers_list()
513 if (!buffer_uptodate(bh)) in osync_buffers_list()
515 brelse(bh); in osync_buffers_list()
562 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); in write_boundary_block() local
563 if (bh) { in write_boundary_block()
564 if (buffer_dirty(bh)) in write_boundary_block()
565 ll_rw_block(REQ_OP_WRITE, 0, 1, &bh); in write_boundary_block()
566 put_bh(bh); in write_boundary_block()
570 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) in mark_buffer_dirty_inode() argument
573 struct address_space *buffer_mapping = bh->b_page->mapping; in mark_buffer_dirty_inode()
575 mark_buffer_dirty(bh); in mark_buffer_dirty_inode()
581 if (!bh->b_assoc_map) { in mark_buffer_dirty_inode()
583 list_move_tail(&bh->b_assoc_buffers, in mark_buffer_dirty_inode()
585 bh->b_assoc_map = mapping; in mark_buffer_dirty_inode()
652 struct buffer_head *bh = head; in __set_page_dirty_buffers() local
655 set_buffer_dirty(bh); in __set_page_dirty_buffers()
656 bh = bh->b_this_page; in __set_page_dirty_buffers()
657 } while (bh != head); in __set_page_dirty_buffers()
700 struct buffer_head *bh; in fsync_buffers_list() local
711 bh = BH_ENTRY(list->next); in fsync_buffers_list()
712 mapping = bh->b_assoc_map; in fsync_buffers_list()
713 __remove_assoc_queue(bh); in fsync_buffers_list()
717 if (buffer_dirty(bh) || buffer_locked(bh)) { in fsync_buffers_list()
718 list_add(&bh->b_assoc_buffers, &tmp); in fsync_buffers_list()
719 bh->b_assoc_map = mapping; in fsync_buffers_list()
720 if (buffer_dirty(bh)) { in fsync_buffers_list()
721 get_bh(bh); in fsync_buffers_list()
730 write_dirty_buffer(bh, REQ_SYNC); in fsync_buffers_list()
738 brelse(bh); in fsync_buffers_list()
749 bh = BH_ENTRY(tmp.prev); in fsync_buffers_list()
750 get_bh(bh); in fsync_buffers_list()
751 mapping = bh->b_assoc_map; in fsync_buffers_list()
752 __remove_assoc_queue(bh); in fsync_buffers_list()
756 if (buffer_dirty(bh)) { in fsync_buffers_list()
757 list_add(&bh->b_assoc_buffers, in fsync_buffers_list()
759 bh->b_assoc_map = mapping; in fsync_buffers_list()
762 wait_on_buffer(bh); in fsync_buffers_list()
763 if (!buffer_uptodate(bh)) in fsync_buffers_list()
765 brelse(bh); in fsync_buffers_list()
818 struct buffer_head *bh = BH_ENTRY(list->next); in remove_inode_buffers() local
819 if (buffer_dirty(bh)) { in remove_inode_buffers()
823 __remove_assoc_queue(bh); in remove_inode_buffers()
842 struct buffer_head *bh, *head; in alloc_page_buffers() local
856 bh = alloc_buffer_head(gfp); in alloc_page_buffers()
857 if (!bh) in alloc_page_buffers()
860 bh->b_this_page = head; in alloc_page_buffers()
861 bh->b_blocknr = -1; in alloc_page_buffers()
862 head = bh; in alloc_page_buffers()
864 bh->b_size = size; in alloc_page_buffers()
867 set_bh_page(bh, page, offset); in alloc_page_buffers()
879 bh = head; in alloc_page_buffers()
881 free_buffer_head(bh); in alloc_page_buffers()
892 struct buffer_head *bh, *tail; in link_dev_buffers() local
894 bh = head; in link_dev_buffers()
896 tail = bh; in link_dev_buffers()
897 bh = bh->b_this_page; in link_dev_buffers()
898 } while (bh); in link_dev_buffers()
923 struct buffer_head *bh = head; in init_page_buffers() local
928 if (!buffer_mapped(bh)) { in init_page_buffers()
929 bh->b_end_io = NULL; in init_page_buffers()
930 bh->b_private = NULL; in init_page_buffers()
931 bh->b_bdev = bdev; in init_page_buffers()
932 bh->b_blocknr = block; in init_page_buffers()
934 set_buffer_uptodate(bh); in init_page_buffers()
936 set_buffer_mapped(bh); in init_page_buffers()
939 bh = bh->b_this_page; in init_page_buffers()
940 } while (bh != head); in init_page_buffers()
959 struct buffer_head *bh; in grow_dev_page() local
979 bh = page_buffers(page); in grow_dev_page()
980 if (bh->b_size == size) { in grow_dev_page()
993 bh = alloc_page_buffers(page, size, true); in grow_dev_page()
1001 link_dev_buffers(page, bh); in grow_dev_page()
1063 struct buffer_head *bh; in __getblk_slow() local
1066 bh = __find_get_block(bdev, block, size); in __getblk_slow()
1067 if (bh) in __getblk_slow()
1068 return bh; in __getblk_slow()
1111 void mark_buffer_dirty(struct buffer_head *bh) in mark_buffer_dirty() argument
1113 WARN_ON_ONCE(!buffer_uptodate(bh)); in mark_buffer_dirty()
1115 trace_block_dirty_buffer(bh); in mark_buffer_dirty()
1123 if (buffer_dirty(bh)) { in mark_buffer_dirty()
1125 if (buffer_dirty(bh)) in mark_buffer_dirty()
1129 if (!test_set_buffer_dirty(bh)) { in mark_buffer_dirty()
1130 struct page *page = bh->b_page; in mark_buffer_dirty()
1146 void mark_buffer_write_io_error(struct buffer_head *bh) in mark_buffer_write_io_error() argument
1150 set_buffer_write_io_error(bh); in mark_buffer_write_io_error()
1152 if (bh->b_page && bh->b_page->mapping) in mark_buffer_write_io_error()
1153 mapping_set_error(bh->b_page->mapping, -EIO); in mark_buffer_write_io_error()
1154 if (bh->b_assoc_map) in mark_buffer_write_io_error()
1155 mapping_set_error(bh->b_assoc_map, -EIO); in mark_buffer_write_io_error()
1157 sb = READ_ONCE(bh->b_bdev->bd_super); in mark_buffer_write_io_error()
1185 void __bforget(struct buffer_head *bh) in __bforget() argument
1187 clear_buffer_dirty(bh); in __bforget()
1188 if (bh->b_assoc_map) { in __bforget()
1189 struct address_space *buffer_mapping = bh->b_page->mapping; in __bforget()
1192 list_del_init(&bh->b_assoc_buffers); in __bforget()
1193 bh->b_assoc_map = NULL; in __bforget()
1196 __brelse(bh); in __bforget()
1200 static struct buffer_head *__bread_slow(struct buffer_head *bh) in __bread_slow() argument
1202 lock_buffer(bh); in __bread_slow()
1203 if (buffer_uptodate(bh)) { in __bread_slow()
1204 unlock_buffer(bh); in __bread_slow()
1205 return bh; in __bread_slow()
1207 get_bh(bh); in __bread_slow()
1208 bh->b_end_io = end_buffer_read_sync; in __bread_slow()
1209 submit_bh(REQ_OP_READ, 0, bh); in __bread_slow()
1210 wait_on_buffer(bh); in __bread_slow()
1211 if (buffer_uptodate(bh)) in __bread_slow()
1212 return bh; in __bread_slow()
1214 brelse(bh); in __bread_slow()
1260 static void bh_lru_install(struct buffer_head *bh) in bh_lru_install() argument
1262 struct buffer_head *evictee = bh; in bh_lru_install()
1272 if (evictee == bh) { in bh_lru_install()
1278 get_bh(bh); in bh_lru_install()
1295 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); in lookup_bh_lru() local
1297 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev && in lookup_bh_lru()
1298 bh->b_size == size) { in lookup_bh_lru()
1305 __this_cpu_write(bh_lrus.bhs[0], bh); in lookup_bh_lru()
1307 get_bh(bh); in lookup_bh_lru()
1308 ret = bh; in lookup_bh_lru()
1324 struct buffer_head *bh = lookup_bh_lru(bdev, block, size); in __find_get_block() local
1326 if (bh == NULL) { in __find_get_block()
1328 bh = __find_get_block_slow(bdev, block); in __find_get_block()
1329 if (bh) in __find_get_block()
1330 bh_lru_install(bh); in __find_get_block()
1332 touch_buffer(bh); in __find_get_block()
1334 return bh; in __find_get_block()
1350 struct buffer_head *bh = __find_get_block(bdev, block, size); in __getblk_gfp() local
1353 if (bh == NULL) in __getblk_gfp()
1354 bh = __getblk_slow(bdev, block, size, gfp); in __getblk_gfp()
1355 return bh; in __getblk_gfp()
1364 struct buffer_head *bh = __getblk(bdev, block, size); in __breadahead() local
1365 if (likely(bh)) { in __breadahead()
1366 ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh); in __breadahead()
1367 brelse(bh); in __breadahead()
1375 struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); in __breadahead_gfp() local
1376 if (likely(bh)) { in __breadahead_gfp()
1377 ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh); in __breadahead_gfp()
1378 brelse(bh); in __breadahead_gfp()
1399 struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); in __bread_gfp() local
1401 if (likely(bh) && !buffer_uptodate(bh)) in __bread_gfp()
1402 bh = __bread_slow(bh); in __bread_gfp()
1403 return bh; in __bread_gfp()
1443 void set_bh_page(struct buffer_head *bh, in set_bh_page() argument
1446 bh->b_page = page; in set_bh_page()
1452 bh->b_data = (char *)(0 + offset); in set_bh_page()
1454 bh->b_data = page_address(page) + offset; in set_bh_page()
1467 static void discard_buffer(struct buffer_head * bh) in discard_buffer() argument
1471 lock_buffer(bh); in discard_buffer()
1472 clear_buffer_dirty(bh); in discard_buffer()
1473 bh->b_bdev = NULL; in discard_buffer()
1474 b_state = bh->b_state; in discard_buffer()
1476 b_state_old = cmpxchg(&bh->b_state, b_state, in discard_buffer()
1482 unlock_buffer(bh); in discard_buffer()
1504 struct buffer_head *head, *bh, *next; in block_invalidatepage() local
1518 bh = head; in block_invalidatepage()
1520 unsigned int next_off = curr_off + bh->b_size; in block_invalidatepage()
1521 next = bh->b_this_page; in block_invalidatepage()
1533 discard_buffer(bh); in block_invalidatepage()
1535 bh = next; in block_invalidatepage()
1536 } while (bh != head); in block_invalidatepage()
1559 struct buffer_head *bh, *head, *tail; in create_empty_buffers() local
1562 bh = head; in create_empty_buffers()
1564 bh->b_state |= b_state; in create_empty_buffers()
1565 tail = bh; in create_empty_buffers()
1566 bh = bh->b_this_page; in create_empty_buffers()
1567 } while (bh); in create_empty_buffers()
1572 bh = head; in create_empty_buffers()
1575 set_buffer_dirty(bh); in create_empty_buffers()
1577 set_buffer_uptodate(bh); in create_empty_buffers()
1578 bh = bh->b_this_page; in create_empty_buffers()
1579 } while (bh != head); in create_empty_buffers()
1614 struct buffer_head *bh; in clean_bdev_aliases() local
1636 bh = head; in clean_bdev_aliases()
1638 if (!buffer_mapped(bh) || (bh->b_blocknr < block)) in clean_bdev_aliases()
1640 if (bh->b_blocknr >= block + len) in clean_bdev_aliases()
1642 clear_buffer_dirty(bh); in clean_bdev_aliases()
1643 wait_on_buffer(bh); in clean_bdev_aliases()
1644 clear_buffer_req(bh); in clean_bdev_aliases()
1646 bh = bh->b_this_page; in clean_bdev_aliases()
1647 } while (bh != head); in clean_bdev_aliases()
1719 struct buffer_head *bh, *head; in __block_write_full_page() local
1737 bh = head; in __block_write_full_page()
1738 blocksize = bh->b_size; in __block_write_full_page()
1758 clear_buffer_dirty(bh); in __block_write_full_page()
1759 set_buffer_uptodate(bh); in __block_write_full_page()
1760 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && in __block_write_full_page()
1761 buffer_dirty(bh)) { in __block_write_full_page()
1762 WARN_ON(bh->b_size != blocksize); in __block_write_full_page()
1763 err = get_block(inode, block, bh, 1); in __block_write_full_page()
1766 clear_buffer_delay(bh); in __block_write_full_page()
1767 if (buffer_new(bh)) { in __block_write_full_page()
1769 clear_buffer_new(bh); in __block_write_full_page()
1770 clean_bdev_bh_alias(bh); in __block_write_full_page()
1773 bh = bh->b_this_page; in __block_write_full_page()
1775 } while (bh != head); in __block_write_full_page()
1778 if (!buffer_mapped(bh)) in __block_write_full_page()
1788 lock_buffer(bh); in __block_write_full_page()
1789 } else if (!trylock_buffer(bh)) { in __block_write_full_page()
1793 if (test_clear_buffer_dirty(bh)) { in __block_write_full_page()
1794 mark_buffer_async_write_endio(bh, handler); in __block_write_full_page()
1796 unlock_buffer(bh); in __block_write_full_page()
1798 } while ((bh = bh->b_this_page) != head); in __block_write_full_page()
1808 struct buffer_head *next = bh->b_this_page; in __block_write_full_page()
1809 if (buffer_async_write(bh)) { in __block_write_full_page()
1810 submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, in __block_write_full_page()
1814 bh = next; in __block_write_full_page()
1815 } while (bh != head); in __block_write_full_page()
1842 bh = head; in __block_write_full_page()
1845 if (buffer_mapped(bh) && buffer_dirty(bh) && in __block_write_full_page()
1846 !buffer_delay(bh)) { in __block_write_full_page()
1847 lock_buffer(bh); in __block_write_full_page()
1848 mark_buffer_async_write_endio(bh, handler); in __block_write_full_page()
1854 clear_buffer_dirty(bh); in __block_write_full_page()
1856 } while ((bh = bh->b_this_page) != head); in __block_write_full_page()
1862 struct buffer_head *next = bh->b_this_page; in __block_write_full_page()
1863 if (buffer_async_write(bh)) { in __block_write_full_page()
1864 clear_buffer_dirty(bh); in __block_write_full_page()
1865 submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, in __block_write_full_page()
1869 bh = next; in __block_write_full_page()
1870 } while (bh != head); in __block_write_full_page()
1884 struct buffer_head *head, *bh; in page_zero_new_buffers() local
1890 bh = head = page_buffers(page); in page_zero_new_buffers()
1893 block_end = block_start + bh->b_size; in page_zero_new_buffers()
1895 if (buffer_new(bh)) { in page_zero_new_buffers()
1904 set_buffer_uptodate(bh); in page_zero_new_buffers()
1907 clear_buffer_new(bh); in page_zero_new_buffers()
1908 mark_buffer_dirty(bh); in page_zero_new_buffers()
1913 bh = bh->b_this_page; in page_zero_new_buffers()
1914 } while (bh != head); in page_zero_new_buffers()
1919 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, in iomap_to_bh() argument
1924 bh->b_bdev = iomap->bdev; in iomap_to_bh()
1941 if (!buffer_uptodate(bh) || in iomap_to_bh()
1943 set_buffer_new(bh); in iomap_to_bh()
1946 if (!buffer_uptodate(bh) || in iomap_to_bh()
1948 set_buffer_new(bh); in iomap_to_bh()
1949 set_buffer_uptodate(bh); in iomap_to_bh()
1950 set_buffer_mapped(bh); in iomap_to_bh()
1951 set_buffer_delay(bh); in iomap_to_bh()
1959 set_buffer_new(bh); in iomap_to_bh()
1960 set_buffer_unwritten(bh); in iomap_to_bh()
1965 set_buffer_new(bh); in iomap_to_bh()
1966 bh->b_blocknr = (iomap->addr + offset - iomap->offset) >> in iomap_to_bh()
1968 set_buffer_mapped(bh); in iomap_to_bh()
1983 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; in __block_write_begin_int() local
1996 for(bh = head, block_start = 0; bh != head || !block_start; in __block_write_begin_int()
1997 block++, block_start=block_end, bh = bh->b_this_page) { in __block_write_begin_int()
2001 if (!buffer_uptodate(bh)) in __block_write_begin_int()
2002 set_buffer_uptodate(bh); in __block_write_begin_int()
2006 if (buffer_new(bh)) in __block_write_begin_int()
2007 clear_buffer_new(bh); in __block_write_begin_int()
2008 if (!buffer_mapped(bh)) { in __block_write_begin_int()
2009 WARN_ON(bh->b_size != blocksize); in __block_write_begin_int()
2011 err = get_block(inode, block, bh, 1); in __block_write_begin_int()
2015 iomap_to_bh(inode, block, bh, iomap); in __block_write_begin_int()
2018 if (buffer_new(bh)) { in __block_write_begin_int()
2019 clean_bdev_bh_alias(bh); in __block_write_begin_int()
2021 clear_buffer_new(bh); in __block_write_begin_int()
2022 set_buffer_uptodate(bh); in __block_write_begin_int()
2023 mark_buffer_dirty(bh); in __block_write_begin_int()
2034 if (!buffer_uptodate(bh)) in __block_write_begin_int()
2035 set_buffer_uptodate(bh); in __block_write_begin_int()
2038 if (!buffer_uptodate(bh) && !buffer_delay(bh) && in __block_write_begin_int()
2039 !buffer_unwritten(bh) && in __block_write_begin_int()
2041 ll_rw_block(REQ_OP_READ, 0, 1, &bh); in __block_write_begin_int()
2042 *wait_bh++=bh; in __block_write_begin_int()
2071 struct buffer_head *bh, *head; in __block_commit_write() local
2073 bh = head = page_buffers(page); in __block_commit_write()
2074 blocksize = bh->b_size; in __block_commit_write()
2080 if (!buffer_uptodate(bh)) in __block_commit_write()
2083 set_buffer_uptodate(bh); in __block_commit_write()
2084 mark_buffer_dirty(bh); in __block_commit_write()
2086 clear_buffer_new(bh); in __block_commit_write()
2089 bh = bh->b_this_page; in __block_commit_write()
2090 } while (bh != head); in __block_commit_write()
2219 struct buffer_head *bh, *head; in block_is_partially_uptodate() local
2232 bh = head; in block_is_partially_uptodate()
2237 if (!buffer_uptodate(bh)) { in block_is_partially_uptodate()
2245 bh = bh->b_this_page; in block_is_partially_uptodate()
2246 } while (bh != head); in block_is_partially_uptodate()
2263 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; in block_read_full_page() local
2274 bh = head; in block_read_full_page()
2279 if (buffer_uptodate(bh)) in block_read_full_page()
2282 if (!buffer_mapped(bh)) { in block_read_full_page()
2287 WARN_ON(bh->b_size != blocksize); in block_read_full_page()
2288 err = get_block(inode, iblock, bh, 0); in block_read_full_page()
2292 if (!buffer_mapped(bh)) { in block_read_full_page()
2295 set_buffer_uptodate(bh); in block_read_full_page()
2302 if (buffer_uptodate(bh)) in block_read_full_page()
2305 arr[nr++] = bh; in block_read_full_page()
2306 } while (i++, iblock++, (bh = bh->b_this_page) != head); in block_read_full_page()
2324 bh = arr[i]; in block_read_full_page()
2325 lock_buffer(bh); in block_read_full_page()
2326 mark_buffer_async_read(bh); in block_read_full_page()
2335 bh = arr[i]; in block_read_full_page()
2336 if (buffer_uptodate(bh)) in block_read_full_page()
2337 end_buffer_async_read(bh, 1); in block_read_full_page()
2339 submit_bh(REQ_OP_READ, 0, bh); in block_read_full_page()
2543 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate) in end_buffer_read_nobh() argument
2545 __end_buffer_read_notouch(bh, uptodate); in end_buffer_read_nobh()
2555 struct buffer_head *bh; in attach_nobh_buffers() local
2560 bh = head; in attach_nobh_buffers()
2563 set_buffer_dirty(bh); in attach_nobh_buffers()
2564 if (!bh->b_this_page) in attach_nobh_buffers()
2565 bh->b_this_page = head; in attach_nobh_buffers()
2566 bh = bh->b_this_page; in attach_nobh_buffers()
2567 } while (bh != head); in attach_nobh_buffers()
2585 struct buffer_head *head, *bh; in nobh_write_begin() local
2638 for (block_start = 0, block_in_page = 0, bh = head; in nobh_write_begin()
2640 block_in_page++, block_start += blocksize, bh = bh->b_this_page) { in nobh_write_begin()
2644 bh->b_state = 0; in nobh_write_begin()
2649 bh, create); in nobh_write_begin()
2652 if (!buffer_mapped(bh)) in nobh_write_begin()
2654 if (buffer_new(bh)) in nobh_write_begin()
2655 clean_bdev_bh_alias(bh); in nobh_write_begin()
2657 set_buffer_uptodate(bh); in nobh_write_begin()
2660 if (buffer_new(bh) || !buffer_mapped(bh)) { in nobh_write_begin()
2665 if (buffer_uptodate(bh)) in nobh_write_begin()
2668 lock_buffer(bh); in nobh_write_begin()
2669 bh->b_end_io = end_buffer_read_nobh; in nobh_write_begin()
2670 submit_bh(REQ_OP_READ, 0, bh); in nobh_write_begin()
2681 for (bh = head; bh; bh = bh->b_this_page) { in nobh_write_begin()
2682 wait_on_buffer(bh); in nobh_write_begin()
2683 if (!buffer_uptodate(bh)) in nobh_write_begin()
2724 struct buffer_head *bh; in nobh_write_end() local
2744 bh = head; in nobh_write_end()
2746 free_buffer_head(bh); in nobh_write_end()
2883 struct buffer_head *bh; in block_truncate_page() local
2905 bh = page_buffers(page); in block_truncate_page()
2908 bh = bh->b_this_page; in block_truncate_page()
2914 if (!buffer_mapped(bh)) { in block_truncate_page()
2915 WARN_ON(bh->b_size != blocksize); in block_truncate_page()
2916 err = get_block(inode, iblock, bh, 0); in block_truncate_page()
2920 if (!buffer_mapped(bh)) in block_truncate_page()
2926 set_buffer_uptodate(bh); in block_truncate_page()
2928 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { in block_truncate_page()
2930 ll_rw_block(REQ_OP_READ, 0, 1, &bh); in block_truncate_page()
2931 wait_on_buffer(bh); in block_truncate_page()
2933 if (!buffer_uptodate(bh)) in block_truncate_page()
2938 mark_buffer_dirty(bh); in block_truncate_page()
3000 struct buffer_head *bh = bio->bi_private; in end_bio_bh_io_sync() local
3003 set_bit(BH_Quiet, &bh->b_state); in end_bio_bh_io_sync()
3005 bh->b_end_io(bh, !bio->bi_status); in end_bio_bh_io_sync()
3009 static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, in submit_bh_wbc() argument
3014 BUG_ON(!buffer_locked(bh)); in submit_bh_wbc()
3015 BUG_ON(!buffer_mapped(bh)); in submit_bh_wbc()
3016 BUG_ON(!bh->b_end_io); in submit_bh_wbc()
3017 BUG_ON(buffer_delay(bh)); in submit_bh_wbc()
3018 BUG_ON(buffer_unwritten(bh)); in submit_bh_wbc()
3023 if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) in submit_bh_wbc()
3024 clear_buffer_write_io_error(bh); in submit_bh_wbc()
3028 fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); in submit_bh_wbc()
3030 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); in submit_bh_wbc()
3031 bio_set_dev(bio, bh->b_bdev); in submit_bh_wbc()
3034 bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); in submit_bh_wbc()
3035 BUG_ON(bio->bi_iter.bi_size != bh->b_size); in submit_bh_wbc()
3038 bio->bi_private = bh; in submit_bh_wbc()
3040 if (buffer_meta(bh)) in submit_bh_wbc()
3042 if (buffer_prio(bh)) in submit_bh_wbc()
3051 wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size); in submit_bh_wbc()
3058 int submit_bh(int op, int op_flags, struct buffer_head *bh) in submit_bh() argument
3060 return submit_bh_wbc(op, op_flags, bh, 0, NULL); in submit_bh()
3095 struct buffer_head *bh = bhs[i]; in ll_rw_block() local
3097 if (!trylock_buffer(bh)) in ll_rw_block()
3100 if (test_clear_buffer_dirty(bh)) { in ll_rw_block()
3101 bh->b_end_io = end_buffer_write_sync; in ll_rw_block()
3102 get_bh(bh); in ll_rw_block()
3103 submit_bh(op, op_flags, bh); in ll_rw_block()
3107 if (!buffer_uptodate(bh)) { in ll_rw_block()
3108 bh->b_end_io = end_buffer_read_sync; in ll_rw_block()
3109 get_bh(bh); in ll_rw_block()
3110 submit_bh(op, op_flags, bh); in ll_rw_block()
3114 unlock_buffer(bh); in ll_rw_block()
3119 void write_dirty_buffer(struct buffer_head *bh, int op_flags) in write_dirty_buffer() argument
3121 lock_buffer(bh); in write_dirty_buffer()
3122 if (!test_clear_buffer_dirty(bh)) { in write_dirty_buffer()
3123 unlock_buffer(bh); in write_dirty_buffer()
3126 bh->b_end_io = end_buffer_write_sync; in write_dirty_buffer()
3127 get_bh(bh); in write_dirty_buffer()
3128 submit_bh(REQ_OP_WRITE, op_flags, bh); in write_dirty_buffer()
3137 int __sync_dirty_buffer(struct buffer_head *bh, int op_flags) in __sync_dirty_buffer() argument
3141 WARN_ON(atomic_read(&bh->b_count) < 1); in __sync_dirty_buffer()
3142 lock_buffer(bh); in __sync_dirty_buffer()
3143 if (test_clear_buffer_dirty(bh)) { in __sync_dirty_buffer()
3148 if (!buffer_mapped(bh)) { in __sync_dirty_buffer()
3149 unlock_buffer(bh); in __sync_dirty_buffer()
3153 get_bh(bh); in __sync_dirty_buffer()
3154 bh->b_end_io = end_buffer_write_sync; in __sync_dirty_buffer()
3155 ret = submit_bh(REQ_OP_WRITE, op_flags, bh); in __sync_dirty_buffer()
3156 wait_on_buffer(bh); in __sync_dirty_buffer()
3157 if (!ret && !buffer_uptodate(bh)) in __sync_dirty_buffer()
3160 unlock_buffer(bh); in __sync_dirty_buffer()
3166 int sync_dirty_buffer(struct buffer_head *bh) in sync_dirty_buffer() argument
3168 return __sync_dirty_buffer(bh, REQ_SYNC); in sync_dirty_buffer()
3192 static inline int buffer_busy(struct buffer_head *bh) in buffer_busy() argument
3194 return atomic_read(&bh->b_count) | in buffer_busy()
3195 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); in buffer_busy()
3202 struct buffer_head *bh; in drop_buffers() local
3204 bh = head; in drop_buffers()
3206 if (buffer_busy(bh)) in drop_buffers()
3208 bh = bh->b_this_page; in drop_buffers()
3209 } while (bh != head); in drop_buffers()
3212 struct buffer_head *next = bh->b_this_page; in drop_buffers()
3214 if (bh->b_assoc_map) in drop_buffers()
3215 __remove_assoc_queue(bh); in drop_buffers()
3216 bh = next; in drop_buffers()
3217 } while (bh != head); in drop_buffers()
3262 struct buffer_head *bh = buffers_to_free; in try_to_free_buffers() local
3265 struct buffer_head *next = bh->b_this_page; in try_to_free_buffers()
3266 free_buffer_head(bh); in try_to_free_buffers()
3267 bh = next; in try_to_free_buffers()
3268 } while (bh != buffers_to_free); in try_to_free_buffers()
3349 void free_buffer_head(struct buffer_head *bh) in free_buffer_head() argument
3351 BUG_ON(!list_empty(&bh->b_assoc_buffers)); in free_buffer_head()
3352 kmem_cache_free(bh_cachep, bh); in free_buffer_head()
3381 int bh_uptodate_or_lock(struct buffer_head *bh) in bh_uptodate_or_lock() argument
3383 if (!buffer_uptodate(bh)) { in bh_uptodate_or_lock()
3384 lock_buffer(bh); in bh_uptodate_or_lock()
3385 if (!buffer_uptodate(bh)) in bh_uptodate_or_lock()
3387 unlock_buffer(bh); in bh_uptodate_or_lock()
3399 int bh_submit_read(struct buffer_head *bh) in bh_submit_read() argument
3401 BUG_ON(!buffer_locked(bh)); in bh_submit_read()
3403 if (buffer_uptodate(bh)) { in bh_submit_read()
3404 unlock_buffer(bh); in bh_submit_read()
3408 get_bh(bh); in bh_submit_read()
3409 bh->b_end_io = end_buffer_read_sync; in bh_submit_read()
3410 submit_bh(REQ_OP_READ, 0, bh); in bh_submit_read()
3411 wait_on_buffer(bh); in bh_submit_read()
3412 if (buffer_uptodate(bh)) in bh_submit_read()