Lines Matching refs:bh
52 static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
57 inline void touch_buffer(struct buffer_head *bh) in touch_buffer() argument
59 trace_block_touch_buffer(bh); in touch_buffer()
60 mark_page_accessed(bh->b_page); in touch_buffer()
64 void __lock_buffer(struct buffer_head *bh) in __lock_buffer() argument
66 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); in __lock_buffer()
70 void unlock_buffer(struct buffer_head *bh) in unlock_buffer() argument
72 clear_bit_unlock(BH_Lock, &bh->b_state); in unlock_buffer()
74 wake_up_bit(&bh->b_state, BH_Lock); in unlock_buffer()
86 struct buffer_head *head, *bh; in buffer_check_dirty_writeback() local
99 bh = head; in buffer_check_dirty_writeback()
101 if (buffer_locked(bh)) in buffer_check_dirty_writeback()
104 if (buffer_dirty(bh)) in buffer_check_dirty_writeback()
107 bh = bh->b_this_page; in buffer_check_dirty_writeback()
108 } while (bh != head); in buffer_check_dirty_writeback()
117 void __wait_on_buffer(struct buffer_head * bh) in __wait_on_buffer() argument
119 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); in __wait_on_buffer()
131 static void buffer_io_error(struct buffer_head *bh, char *msg) in buffer_io_error() argument
133 if (!test_bit(BH_Quiet, &bh->b_state)) in buffer_io_error()
136 bh->b_bdev, (unsigned long long)bh->b_blocknr, msg); in buffer_io_error()
147 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) in __end_buffer_read_notouch() argument
150 set_buffer_uptodate(bh); in __end_buffer_read_notouch()
153 clear_buffer_uptodate(bh); in __end_buffer_read_notouch()
155 unlock_buffer(bh); in __end_buffer_read_notouch()
162 void end_buffer_read_sync(struct buffer_head *bh, int uptodate) in end_buffer_read_sync() argument
164 __end_buffer_read_notouch(bh, uptodate); in end_buffer_read_sync()
165 put_bh(bh); in end_buffer_read_sync()
169 void end_buffer_write_sync(struct buffer_head *bh, int uptodate) in end_buffer_write_sync() argument
172 set_buffer_uptodate(bh); in end_buffer_write_sync()
174 buffer_io_error(bh, ", lost sync page write"); in end_buffer_write_sync()
175 mark_buffer_write_io_error(bh); in end_buffer_write_sync()
176 clear_buffer_uptodate(bh); in end_buffer_write_sync()
178 unlock_buffer(bh); in end_buffer_write_sync()
179 put_bh(bh); in end_buffer_write_sync()
200 struct buffer_head *bh; in __find_get_block_slow() local
215 bh = head; in __find_get_block_slow()
217 if (!buffer_mapped(bh)) in __find_get_block_slow()
219 else if (bh->b_blocknr == block) { in __find_get_block_slow()
220 ret = bh; in __find_get_block_slow()
221 get_bh(bh); in __find_get_block_slow()
224 bh = bh->b_this_page; in __find_get_block_slow()
225 } while (bh != head); in __find_get_block_slow()
238 (unsigned long long)bh->b_blocknr, in __find_get_block_slow()
239 bh->b_state, bh->b_size, bdev, in __find_get_block_slow()
253 static void end_buffer_async_read(struct buffer_head *bh, int uptodate) in end_buffer_async_read() argument
261 BUG_ON(!buffer_async_read(bh)); in end_buffer_async_read()
263 page = bh->b_page; in end_buffer_async_read()
265 set_buffer_uptodate(bh); in end_buffer_async_read()
267 clear_buffer_uptodate(bh); in end_buffer_async_read()
268 buffer_io_error(bh, ", async page read"); in end_buffer_async_read()
280 clear_buffer_async_read(bh); in end_buffer_async_read()
281 unlock_buffer(bh); in end_buffer_async_read()
282 tmp = bh; in end_buffer_async_read()
291 } while (tmp != bh); in end_buffer_async_read()
314 void end_buffer_async_write(struct buffer_head *bh, int uptodate) in end_buffer_async_write() argument
321 BUG_ON(!buffer_async_write(bh)); in end_buffer_async_write()
323 page = bh->b_page; in end_buffer_async_write()
325 set_buffer_uptodate(bh); in end_buffer_async_write()
327 buffer_io_error(bh, ", lost async page write"); in end_buffer_async_write()
328 mark_buffer_write_io_error(bh); in end_buffer_async_write()
329 clear_buffer_uptodate(bh); in end_buffer_async_write()
337 clear_buffer_async_write(bh); in end_buffer_async_write()
338 unlock_buffer(bh); in end_buffer_async_write()
339 tmp = bh->b_this_page; in end_buffer_async_write()
340 while (tmp != bh) { in end_buffer_async_write()
380 static void mark_buffer_async_read(struct buffer_head *bh) in mark_buffer_async_read() argument
382 bh->b_end_io = end_buffer_async_read; in mark_buffer_async_read()
383 set_buffer_async_read(bh); in mark_buffer_async_read()
386 static void mark_buffer_async_write_endio(struct buffer_head *bh, in mark_buffer_async_write_endio() argument
389 bh->b_end_io = handler; in mark_buffer_async_write_endio()
390 set_buffer_async_write(bh); in mark_buffer_async_write_endio()
393 void mark_buffer_async_write(struct buffer_head *bh) in mark_buffer_async_write() argument
395 mark_buffer_async_write_endio(bh, end_buffer_async_write); in mark_buffer_async_write()
452 static void __remove_assoc_queue(struct buffer_head *bh) in __remove_assoc_queue() argument
454 list_del_init(&bh->b_assoc_buffers); in __remove_assoc_queue()
455 WARN_ON(!bh->b_assoc_map); in __remove_assoc_queue()
456 bh->b_assoc_map = NULL; in __remove_assoc_queue()
476 struct buffer_head *bh; in osync_buffers_list() local
483 bh = BH_ENTRY(p); in osync_buffers_list()
484 if (buffer_locked(bh)) { in osync_buffers_list()
485 get_bh(bh); in osync_buffers_list()
487 wait_on_buffer(bh); in osync_buffers_list()
488 if (!buffer_uptodate(bh)) in osync_buffers_list()
490 brelse(bh); in osync_buffers_list()
537 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); in write_boundary_block() local
538 if (bh) { in write_boundary_block()
539 if (buffer_dirty(bh)) in write_boundary_block()
540 ll_rw_block(REQ_OP_WRITE, 0, 1, &bh); in write_boundary_block()
541 put_bh(bh); in write_boundary_block()
545 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) in mark_buffer_dirty_inode() argument
548 struct address_space *buffer_mapping = bh->b_page->mapping; in mark_buffer_dirty_inode()
550 mark_buffer_dirty(bh); in mark_buffer_dirty_inode()
556 if (!bh->b_assoc_map) { in mark_buffer_dirty_inode()
558 list_move_tail(&bh->b_assoc_buffers, in mark_buffer_dirty_inode()
560 bh->b_assoc_map = mapping; in mark_buffer_dirty_inode()
627 struct buffer_head *bh = head; in __set_page_dirty_buffers() local
630 set_buffer_dirty(bh); in __set_page_dirty_buffers()
631 bh = bh->b_this_page; in __set_page_dirty_buffers()
632 } while (bh != head); in __set_page_dirty_buffers()
675 struct buffer_head *bh; in fsync_buffers_list() local
686 bh = BH_ENTRY(list->next); in fsync_buffers_list()
687 mapping = bh->b_assoc_map; in fsync_buffers_list()
688 __remove_assoc_queue(bh); in fsync_buffers_list()
692 if (buffer_dirty(bh) || buffer_locked(bh)) { in fsync_buffers_list()
693 list_add(&bh->b_assoc_buffers, &tmp); in fsync_buffers_list()
694 bh->b_assoc_map = mapping; in fsync_buffers_list()
695 if (buffer_dirty(bh)) { in fsync_buffers_list()
696 get_bh(bh); in fsync_buffers_list()
705 write_dirty_buffer(bh, REQ_SYNC); in fsync_buffers_list()
713 brelse(bh); in fsync_buffers_list()
724 bh = BH_ENTRY(tmp.prev); in fsync_buffers_list()
725 get_bh(bh); in fsync_buffers_list()
726 mapping = bh->b_assoc_map; in fsync_buffers_list()
727 __remove_assoc_queue(bh); in fsync_buffers_list()
731 if (buffer_dirty(bh)) { in fsync_buffers_list()
732 list_add(&bh->b_assoc_buffers, in fsync_buffers_list()
734 bh->b_assoc_map = mapping; in fsync_buffers_list()
737 wait_on_buffer(bh); in fsync_buffers_list()
738 if (!buffer_uptodate(bh)) in fsync_buffers_list()
740 brelse(bh); in fsync_buffers_list()
793 struct buffer_head *bh = BH_ENTRY(list->next); in remove_inode_buffers() local
794 if (buffer_dirty(bh)) { in remove_inode_buffers()
798 __remove_assoc_queue(bh); in remove_inode_buffers()
817 struct buffer_head *bh, *head; in alloc_page_buffers() local
831 bh = alloc_buffer_head(gfp); in alloc_page_buffers()
832 if (!bh) in alloc_page_buffers()
835 bh->b_this_page = head; in alloc_page_buffers()
836 bh->b_blocknr = -1; in alloc_page_buffers()
837 head = bh; in alloc_page_buffers()
839 bh->b_size = size; in alloc_page_buffers()
842 set_bh_page(bh, page, offset); in alloc_page_buffers()
854 bh = head; in alloc_page_buffers()
856 free_buffer_head(bh); in alloc_page_buffers()
867 struct buffer_head *bh, *tail; in link_dev_buffers() local
869 bh = head; in link_dev_buffers()
871 tail = bh; in link_dev_buffers()
872 bh = bh->b_this_page; in link_dev_buffers()
873 } while (bh); in link_dev_buffers()
898 struct buffer_head *bh = head; in init_page_buffers() local
903 if (!buffer_mapped(bh)) { in init_page_buffers()
904 bh->b_end_io = NULL; in init_page_buffers()
905 bh->b_private = NULL; in init_page_buffers()
906 bh->b_bdev = bdev; in init_page_buffers()
907 bh->b_blocknr = block; in init_page_buffers()
909 set_buffer_uptodate(bh); in init_page_buffers()
911 set_buffer_mapped(bh); in init_page_buffers()
914 bh = bh->b_this_page; in init_page_buffers()
915 } while (bh != head); in init_page_buffers()
934 struct buffer_head *bh; in grow_dev_page() local
954 bh = page_buffers(page); in grow_dev_page()
955 if (bh->b_size == size) { in grow_dev_page()
968 bh = alloc_page_buffers(page, size, true); in grow_dev_page()
976 link_dev_buffers(page, bh); in grow_dev_page()
1038 struct buffer_head *bh; in __getblk_slow() local
1041 bh = __find_get_block(bdev, block, size); in __getblk_slow()
1042 if (bh) in __getblk_slow()
1043 return bh; in __getblk_slow()
1086 void mark_buffer_dirty(struct buffer_head *bh) in mark_buffer_dirty() argument
1088 WARN_ON_ONCE(!buffer_uptodate(bh)); in mark_buffer_dirty()
1090 trace_block_dirty_buffer(bh); in mark_buffer_dirty()
1098 if (buffer_dirty(bh)) { in mark_buffer_dirty()
1100 if (buffer_dirty(bh)) in mark_buffer_dirty()
1104 if (!test_set_buffer_dirty(bh)) { in mark_buffer_dirty()
1105 struct page *page = bh->b_page; in mark_buffer_dirty()
1121 void mark_buffer_write_io_error(struct buffer_head *bh) in mark_buffer_write_io_error() argument
1123 set_buffer_write_io_error(bh); in mark_buffer_write_io_error()
1125 if (bh->b_page && bh->b_page->mapping) in mark_buffer_write_io_error()
1126 mapping_set_error(bh->b_page->mapping, -EIO); in mark_buffer_write_io_error()
1127 if (bh->b_assoc_map) in mark_buffer_write_io_error()
1128 mapping_set_error(bh->b_assoc_map, -EIO); in mark_buffer_write_io_error()
1153 void __bforget(struct buffer_head *bh) in __bforget() argument
1155 clear_buffer_dirty(bh); in __bforget()
1156 if (bh->b_assoc_map) { in __bforget()
1157 struct address_space *buffer_mapping = bh->b_page->mapping; in __bforget()
1160 list_del_init(&bh->b_assoc_buffers); in __bforget()
1161 bh->b_assoc_map = NULL; in __bforget()
1164 __brelse(bh); in __bforget()
1168 static struct buffer_head *__bread_slow(struct buffer_head *bh) in __bread_slow() argument
1170 lock_buffer(bh); in __bread_slow()
1171 if (buffer_uptodate(bh)) { in __bread_slow()
1172 unlock_buffer(bh); in __bread_slow()
1173 return bh; in __bread_slow()
1175 get_bh(bh); in __bread_slow()
1176 bh->b_end_io = end_buffer_read_sync; in __bread_slow()
1177 submit_bh(REQ_OP_READ, 0, bh); in __bread_slow()
1178 wait_on_buffer(bh); in __bread_slow()
1179 if (buffer_uptodate(bh)) in __bread_slow()
1180 return bh; in __bread_slow()
1182 brelse(bh); in __bread_slow()
1228 static void bh_lru_install(struct buffer_head *bh) in bh_lru_install() argument
1230 struct buffer_head *evictee = bh; in bh_lru_install()
1240 if (evictee == bh) { in bh_lru_install()
1246 get_bh(bh); in bh_lru_install()
1263 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); in lookup_bh_lru() local
1265 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev && in lookup_bh_lru()
1266 bh->b_size == size) { in lookup_bh_lru()
1273 __this_cpu_write(bh_lrus.bhs[0], bh); in lookup_bh_lru()
1275 get_bh(bh); in lookup_bh_lru()
1276 ret = bh; in lookup_bh_lru()
1292 struct buffer_head *bh = lookup_bh_lru(bdev, block, size); in __find_get_block() local
1294 if (bh == NULL) { in __find_get_block()
1296 bh = __find_get_block_slow(bdev, block); in __find_get_block()
1297 if (bh) in __find_get_block()
1298 bh_lru_install(bh); in __find_get_block()
1300 touch_buffer(bh); in __find_get_block()
1302 return bh; in __find_get_block()
1318 struct buffer_head *bh = __find_get_block(bdev, block, size); in __getblk_gfp() local
1321 if (bh == NULL) in __getblk_gfp()
1322 bh = __getblk_slow(bdev, block, size, gfp); in __getblk_gfp()
1323 return bh; in __getblk_gfp()
1332 struct buffer_head *bh = __getblk(bdev, block, size); in __breadahead() local
1333 if (likely(bh)) { in __breadahead()
1334 ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh); in __breadahead()
1335 brelse(bh); in __breadahead()
1356 struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); in __bread_gfp() local
1358 if (likely(bh) && !buffer_uptodate(bh)) in __bread_gfp()
1359 bh = __bread_slow(bh); in __bread_gfp()
1360 return bh; in __bread_gfp()
1400 void set_bh_page(struct buffer_head *bh, in set_bh_page() argument
1403 bh->b_page = page; in set_bh_page()
1409 bh->b_data = (char *)(0 + offset); in set_bh_page()
1411 bh->b_data = page_address(page) + offset; in set_bh_page()
1424 static void discard_buffer(struct buffer_head * bh) in discard_buffer() argument
1428 lock_buffer(bh); in discard_buffer()
1429 clear_buffer_dirty(bh); in discard_buffer()
1430 bh->b_bdev = NULL; in discard_buffer()
1431 b_state = bh->b_state; in discard_buffer()
1433 b_state_old = cmpxchg(&bh->b_state, b_state, in discard_buffer()
1439 unlock_buffer(bh); in discard_buffer()
1461 struct buffer_head *head, *bh, *next; in block_invalidatepage() local
1475 bh = head; in block_invalidatepage()
1477 unsigned int next_off = curr_off + bh->b_size; in block_invalidatepage()
1478 next = bh->b_this_page; in block_invalidatepage()
1490 discard_buffer(bh); in block_invalidatepage()
1492 bh = next; in block_invalidatepage()
1493 } while (bh != head); in block_invalidatepage()
1516 struct buffer_head *bh, *head, *tail; in create_empty_buffers() local
1519 bh = head; in create_empty_buffers()
1521 bh->b_state |= b_state; in create_empty_buffers()
1522 tail = bh; in create_empty_buffers()
1523 bh = bh->b_this_page; in create_empty_buffers()
1524 } while (bh); in create_empty_buffers()
1529 bh = head; in create_empty_buffers()
1532 set_buffer_dirty(bh); in create_empty_buffers()
1534 set_buffer_uptodate(bh); in create_empty_buffers()
1535 bh = bh->b_this_page; in create_empty_buffers()
1536 } while (bh != head); in create_empty_buffers()
1571 struct buffer_head *bh; in clean_bdev_aliases() local
1593 bh = head; in clean_bdev_aliases()
1595 if (!buffer_mapped(bh) || (bh->b_blocknr < block)) in clean_bdev_aliases()
1597 if (bh->b_blocknr >= block + len) in clean_bdev_aliases()
1599 clear_buffer_dirty(bh); in clean_bdev_aliases()
1600 wait_on_buffer(bh); in clean_bdev_aliases()
1601 clear_buffer_req(bh); in clean_bdev_aliases()
1603 bh = bh->b_this_page; in clean_bdev_aliases()
1604 } while (bh != head); in clean_bdev_aliases()
1676 struct buffer_head *bh, *head; in __block_write_full_page() local
1694 bh = head; in __block_write_full_page()
1695 blocksize = bh->b_size; in __block_write_full_page()
1715 clear_buffer_dirty(bh); in __block_write_full_page()
1716 set_buffer_uptodate(bh); in __block_write_full_page()
1717 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && in __block_write_full_page()
1718 buffer_dirty(bh)) { in __block_write_full_page()
1719 WARN_ON(bh->b_size != blocksize); in __block_write_full_page()
1720 err = get_block(inode, block, bh, 1); in __block_write_full_page()
1723 clear_buffer_delay(bh); in __block_write_full_page()
1724 if (buffer_new(bh)) { in __block_write_full_page()
1726 clear_buffer_new(bh); in __block_write_full_page()
1727 clean_bdev_bh_alias(bh); in __block_write_full_page()
1730 bh = bh->b_this_page; in __block_write_full_page()
1732 } while (bh != head); in __block_write_full_page()
1735 if (!buffer_mapped(bh)) in __block_write_full_page()
1745 lock_buffer(bh); in __block_write_full_page()
1746 } else if (!trylock_buffer(bh)) { in __block_write_full_page()
1750 if (test_clear_buffer_dirty(bh)) { in __block_write_full_page()
1751 mark_buffer_async_write_endio(bh, handler); in __block_write_full_page()
1753 unlock_buffer(bh); in __block_write_full_page()
1755 } while ((bh = bh->b_this_page) != head); in __block_write_full_page()
1765 struct buffer_head *next = bh->b_this_page; in __block_write_full_page()
1766 if (buffer_async_write(bh)) { in __block_write_full_page()
1767 submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, in __block_write_full_page()
1771 bh = next; in __block_write_full_page()
1772 } while (bh != head); in __block_write_full_page()
1799 bh = head; in __block_write_full_page()
1802 if (buffer_mapped(bh) && buffer_dirty(bh) && in __block_write_full_page()
1803 !buffer_delay(bh)) { in __block_write_full_page()
1804 lock_buffer(bh); in __block_write_full_page()
1805 mark_buffer_async_write_endio(bh, handler); in __block_write_full_page()
1811 clear_buffer_dirty(bh); in __block_write_full_page()
1813 } while ((bh = bh->b_this_page) != head); in __block_write_full_page()
1819 struct buffer_head *next = bh->b_this_page; in __block_write_full_page()
1820 if (buffer_async_write(bh)) { in __block_write_full_page()
1821 clear_buffer_dirty(bh); in __block_write_full_page()
1822 submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, in __block_write_full_page()
1826 bh = next; in __block_write_full_page()
1827 } while (bh != head); in __block_write_full_page()
1841 struct buffer_head *head, *bh; in page_zero_new_buffers() local
1847 bh = head = page_buffers(page); in page_zero_new_buffers()
1850 block_end = block_start + bh->b_size; in page_zero_new_buffers()
1852 if (buffer_new(bh)) { in page_zero_new_buffers()
1861 set_buffer_uptodate(bh); in page_zero_new_buffers()
1864 clear_buffer_new(bh); in page_zero_new_buffers()
1865 mark_buffer_dirty(bh); in page_zero_new_buffers()
1870 bh = bh->b_this_page; in page_zero_new_buffers()
1871 } while (bh != head); in page_zero_new_buffers()
1876 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, in iomap_to_bh() argument
1881 bh->b_bdev = iomap->bdev; in iomap_to_bh()
1898 if (!buffer_uptodate(bh) || in iomap_to_bh()
1900 set_buffer_new(bh); in iomap_to_bh()
1903 if (!buffer_uptodate(bh) || in iomap_to_bh()
1905 set_buffer_new(bh); in iomap_to_bh()
1906 set_buffer_uptodate(bh); in iomap_to_bh()
1907 set_buffer_mapped(bh); in iomap_to_bh()
1908 set_buffer_delay(bh); in iomap_to_bh()
1916 set_buffer_new(bh); in iomap_to_bh()
1917 set_buffer_unwritten(bh); in iomap_to_bh()
1922 set_buffer_new(bh); in iomap_to_bh()
1923 bh->b_blocknr = (iomap->addr + offset - iomap->offset) >> in iomap_to_bh()
1925 set_buffer_mapped(bh); in iomap_to_bh()
1940 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; in __block_write_begin_int() local
1953 for(bh = head, block_start = 0; bh != head || !block_start; in __block_write_begin_int()
1954 block++, block_start=block_end, bh = bh->b_this_page) { in __block_write_begin_int()
1958 if (!buffer_uptodate(bh)) in __block_write_begin_int()
1959 set_buffer_uptodate(bh); in __block_write_begin_int()
1963 if (buffer_new(bh)) in __block_write_begin_int()
1964 clear_buffer_new(bh); in __block_write_begin_int()
1965 if (!buffer_mapped(bh)) { in __block_write_begin_int()
1966 WARN_ON(bh->b_size != blocksize); in __block_write_begin_int()
1968 err = get_block(inode, block, bh, 1); in __block_write_begin_int()
1972 iomap_to_bh(inode, block, bh, iomap); in __block_write_begin_int()
1975 if (buffer_new(bh)) { in __block_write_begin_int()
1976 clean_bdev_bh_alias(bh); in __block_write_begin_int()
1978 clear_buffer_new(bh); in __block_write_begin_int()
1979 set_buffer_uptodate(bh); in __block_write_begin_int()
1980 mark_buffer_dirty(bh); in __block_write_begin_int()
1991 if (!buffer_uptodate(bh)) in __block_write_begin_int()
1992 set_buffer_uptodate(bh); in __block_write_begin_int()
1995 if (!buffer_uptodate(bh) && !buffer_delay(bh) && in __block_write_begin_int()
1996 !buffer_unwritten(bh) && in __block_write_begin_int()
1998 ll_rw_block(REQ_OP_READ, 0, 1, &bh); in __block_write_begin_int()
1999 *wait_bh++=bh; in __block_write_begin_int()
2028 struct buffer_head *bh, *head; in __block_commit_write() local
2030 bh = head = page_buffers(page); in __block_commit_write()
2031 blocksize = bh->b_size; in __block_commit_write()
2037 if (!buffer_uptodate(bh)) in __block_commit_write()
2040 set_buffer_uptodate(bh); in __block_commit_write()
2041 mark_buffer_dirty(bh); in __block_commit_write()
2043 clear_buffer_new(bh); in __block_commit_write()
2046 bh = bh->b_this_page; in __block_commit_write()
2047 } while (bh != head); in __block_commit_write()
2176 struct buffer_head *bh, *head; in block_is_partially_uptodate() local
2189 bh = head; in block_is_partially_uptodate()
2194 if (!buffer_uptodate(bh)) { in block_is_partially_uptodate()
2202 bh = bh->b_this_page; in block_is_partially_uptodate()
2203 } while (bh != head); in block_is_partially_uptodate()
2220 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; in block_read_full_page() local
2231 bh = head; in block_read_full_page()
2236 if (buffer_uptodate(bh)) in block_read_full_page()
2239 if (!buffer_mapped(bh)) { in block_read_full_page()
2244 WARN_ON(bh->b_size != blocksize); in block_read_full_page()
2245 err = get_block(inode, iblock, bh, 0); in block_read_full_page()
2249 if (!buffer_mapped(bh)) { in block_read_full_page()
2252 set_buffer_uptodate(bh); in block_read_full_page()
2259 if (buffer_uptodate(bh)) in block_read_full_page()
2262 arr[nr++] = bh; in block_read_full_page()
2263 } while (i++, iblock++, (bh = bh->b_this_page) != head); in block_read_full_page()
2281 bh = arr[i]; in block_read_full_page()
2282 lock_buffer(bh); in block_read_full_page()
2283 mark_buffer_async_read(bh); in block_read_full_page()
2292 bh = arr[i]; in block_read_full_page()
2293 if (buffer_uptodate(bh)) in block_read_full_page()
2294 end_buffer_async_read(bh, 1); in block_read_full_page()
2296 submit_bh(REQ_OP_READ, 0, bh); in block_read_full_page()
2500 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate) in end_buffer_read_nobh() argument
2502 __end_buffer_read_notouch(bh, uptodate); in end_buffer_read_nobh()
2512 struct buffer_head *bh; in attach_nobh_buffers() local
2517 bh = head; in attach_nobh_buffers()
2520 set_buffer_dirty(bh); in attach_nobh_buffers()
2521 if (!bh->b_this_page) in attach_nobh_buffers()
2522 bh->b_this_page = head; in attach_nobh_buffers()
2523 bh = bh->b_this_page; in attach_nobh_buffers()
2524 } while (bh != head); in attach_nobh_buffers()
2542 struct buffer_head *head, *bh; in nobh_write_begin() local
2595 for (block_start = 0, block_in_page = 0, bh = head; in nobh_write_begin()
2597 block_in_page++, block_start += blocksize, bh = bh->b_this_page) { in nobh_write_begin()
2601 bh->b_state = 0; in nobh_write_begin()
2606 bh, create); in nobh_write_begin()
2609 if (!buffer_mapped(bh)) in nobh_write_begin()
2611 if (buffer_new(bh)) in nobh_write_begin()
2612 clean_bdev_bh_alias(bh); in nobh_write_begin()
2614 set_buffer_uptodate(bh); in nobh_write_begin()
2617 if (buffer_new(bh) || !buffer_mapped(bh)) { in nobh_write_begin()
2622 if (buffer_uptodate(bh)) in nobh_write_begin()
2625 lock_buffer(bh); in nobh_write_begin()
2626 bh->b_end_io = end_buffer_read_nobh; in nobh_write_begin()
2627 submit_bh(REQ_OP_READ, 0, bh); in nobh_write_begin()
2638 for (bh = head; bh; bh = bh->b_this_page) { in nobh_write_begin()
2639 wait_on_buffer(bh); in nobh_write_begin()
2640 if (!buffer_uptodate(bh)) in nobh_write_begin()
2681 struct buffer_head *bh; in nobh_write_end() local
2701 bh = head; in nobh_write_end()
2703 free_buffer_head(bh); in nobh_write_end()
2850 struct buffer_head *bh; in block_truncate_page() local
2872 bh = page_buffers(page); in block_truncate_page()
2875 bh = bh->b_this_page; in block_truncate_page()
2881 if (!buffer_mapped(bh)) { in block_truncate_page()
2882 WARN_ON(bh->b_size != blocksize); in block_truncate_page()
2883 err = get_block(inode, iblock, bh, 0); in block_truncate_page()
2887 if (!buffer_mapped(bh)) in block_truncate_page()
2893 set_buffer_uptodate(bh); in block_truncate_page()
2895 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { in block_truncate_page()
2897 ll_rw_block(REQ_OP_READ, 0, 1, &bh); in block_truncate_page()
2898 wait_on_buffer(bh); in block_truncate_page()
2900 if (!buffer_uptodate(bh)) in block_truncate_page()
2905 mark_buffer_dirty(bh); in block_truncate_page()
2973 struct buffer_head *bh = bio->bi_private; in end_bio_bh_io_sync() local
2976 set_bit(BH_Quiet, &bh->b_state); in end_bio_bh_io_sync()
2978 bh->b_end_io(bh, !bio->bi_status); in end_bio_bh_io_sync()
3048 static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, in submit_bh_wbc() argument
3053 BUG_ON(!buffer_locked(bh)); in submit_bh_wbc()
3054 BUG_ON(!buffer_mapped(bh)); in submit_bh_wbc()
3055 BUG_ON(!bh->b_end_io); in submit_bh_wbc()
3056 BUG_ON(buffer_delay(bh)); in submit_bh_wbc()
3057 BUG_ON(buffer_unwritten(bh)); in submit_bh_wbc()
3062 if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) in submit_bh_wbc()
3063 clear_buffer_write_io_error(bh); in submit_bh_wbc()
3071 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); in submit_bh_wbc()
3072 bio_set_dev(bio, bh->b_bdev); in submit_bh_wbc()
3075 bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); in submit_bh_wbc()
3076 BUG_ON(bio->bi_iter.bi_size != bh->b_size); in submit_bh_wbc()
3079 bio->bi_private = bh; in submit_bh_wbc()
3084 if (buffer_meta(bh)) in submit_bh_wbc()
3086 if (buffer_prio(bh)) in submit_bh_wbc()
3092 wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size); in submit_bh_wbc()
3099 int submit_bh(int op, int op_flags, struct buffer_head *bh) in submit_bh() argument
3101 return submit_bh_wbc(op, op_flags, bh, 0, NULL); in submit_bh()
3136 struct buffer_head *bh = bhs[i]; in ll_rw_block() local
3138 if (!trylock_buffer(bh)) in ll_rw_block()
3141 if (test_clear_buffer_dirty(bh)) { in ll_rw_block()
3142 bh->b_end_io = end_buffer_write_sync; in ll_rw_block()
3143 get_bh(bh); in ll_rw_block()
3144 submit_bh(op, op_flags, bh); in ll_rw_block()
3148 if (!buffer_uptodate(bh)) { in ll_rw_block()
3149 bh->b_end_io = end_buffer_read_sync; in ll_rw_block()
3150 get_bh(bh); in ll_rw_block()
3151 submit_bh(op, op_flags, bh); in ll_rw_block()
3155 unlock_buffer(bh); in ll_rw_block()
3160 void write_dirty_buffer(struct buffer_head *bh, int op_flags) in write_dirty_buffer() argument
3162 lock_buffer(bh); in write_dirty_buffer()
3163 if (!test_clear_buffer_dirty(bh)) { in write_dirty_buffer()
3164 unlock_buffer(bh); in write_dirty_buffer()
3167 bh->b_end_io = end_buffer_write_sync; in write_dirty_buffer()
3168 get_bh(bh); in write_dirty_buffer()
3169 submit_bh(REQ_OP_WRITE, op_flags, bh); in write_dirty_buffer()
3178 int __sync_dirty_buffer(struct buffer_head *bh, int op_flags) in __sync_dirty_buffer() argument
3182 WARN_ON(atomic_read(&bh->b_count) < 1); in __sync_dirty_buffer()
3183 lock_buffer(bh); in __sync_dirty_buffer()
3184 if (test_clear_buffer_dirty(bh)) { in __sync_dirty_buffer()
3185 get_bh(bh); in __sync_dirty_buffer()
3186 bh->b_end_io = end_buffer_write_sync; in __sync_dirty_buffer()
3187 ret = submit_bh(REQ_OP_WRITE, op_flags, bh); in __sync_dirty_buffer()
3188 wait_on_buffer(bh); in __sync_dirty_buffer()
3189 if (!ret && !buffer_uptodate(bh)) in __sync_dirty_buffer()
3192 unlock_buffer(bh); in __sync_dirty_buffer()
3198 int sync_dirty_buffer(struct buffer_head *bh) in sync_dirty_buffer() argument
3200 return __sync_dirty_buffer(bh, REQ_SYNC); in sync_dirty_buffer()
3224 static inline int buffer_busy(struct buffer_head *bh) in buffer_busy() argument
3226 return atomic_read(&bh->b_count) | in buffer_busy()
3227 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); in buffer_busy()
3234 struct buffer_head *bh; in drop_buffers() local
3236 bh = head; in drop_buffers()
3238 if (buffer_busy(bh)) in drop_buffers()
3240 bh = bh->b_this_page; in drop_buffers()
3241 } while (bh != head); in drop_buffers()
3244 struct buffer_head *next = bh->b_this_page; in drop_buffers()
3246 if (bh->b_assoc_map) in drop_buffers()
3247 __remove_assoc_queue(bh); in drop_buffers()
3248 bh = next; in drop_buffers()
3249 } while (bh != head); in drop_buffers()
3294 struct buffer_head *bh = buffers_to_free; in try_to_free_buffers() local
3297 struct buffer_head *next = bh->b_this_page; in try_to_free_buffers()
3298 free_buffer_head(bh); in try_to_free_buffers()
3299 bh = next; in try_to_free_buffers()
3300 } while (bh != buffers_to_free); in try_to_free_buffers()
3380 void free_buffer_head(struct buffer_head *bh) in free_buffer_head() argument
3382 BUG_ON(!list_empty(&bh->b_assoc_buffers)); in free_buffer_head()
3383 kmem_cache_free(bh_cachep, bh); in free_buffer_head()
3412 int bh_uptodate_or_lock(struct buffer_head *bh) in bh_uptodate_or_lock() argument
3414 if (!buffer_uptodate(bh)) { in bh_uptodate_or_lock()
3415 lock_buffer(bh); in bh_uptodate_or_lock()
3416 if (!buffer_uptodate(bh)) in bh_uptodate_or_lock()
3418 unlock_buffer(bh); in bh_uptodate_or_lock()
3430 int bh_submit_read(struct buffer_head *bh) in bh_submit_read() argument
3432 BUG_ON(!buffer_locked(bh)); in bh_submit_read()
3434 if (buffer_uptodate(bh)) { in bh_submit_read()
3435 unlock_buffer(bh); in bh_submit_read()
3439 get_bh(bh); in bh_submit_read()
3440 bh->b_end_io = end_buffer_read_sync; in bh_submit_read()
3441 submit_bh(REQ_OP_READ, 0, bh); in bh_submit_read()
3442 wait_on_buffer(bh); in bh_submit_read()
3443 if (buffer_uptodate(bh)) in bh_submit_read()