Lines Matching refs:bp

54 	struct xfs_buf	*bp)  in xfs_buf_is_vmapped()  argument
63 return bp->b_addr && bp->b_page_count > 1; in xfs_buf_is_vmapped()
68 struct xfs_buf *bp) in xfs_buf_vmap_len() argument
70 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset; in xfs_buf_vmap_len()
88 struct xfs_buf *bp) in xfs_buf_ioacct_inc() argument
90 if (bp->b_flags & XBF_NO_IOACCT) in xfs_buf_ioacct_inc()
93 ASSERT(bp->b_flags & XBF_ASYNC); in xfs_buf_ioacct_inc()
94 spin_lock(&bp->b_lock); in xfs_buf_ioacct_inc()
95 if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) { in xfs_buf_ioacct_inc()
96 bp->b_state |= XFS_BSTATE_IN_FLIGHT; in xfs_buf_ioacct_inc()
97 percpu_counter_inc(&bp->b_target->bt_io_count); in xfs_buf_ioacct_inc()
99 spin_unlock(&bp->b_lock); in xfs_buf_ioacct_inc()
108 struct xfs_buf *bp) in __xfs_buf_ioacct_dec() argument
110 lockdep_assert_held(&bp->b_lock); in __xfs_buf_ioacct_dec()
112 if (bp->b_state & XFS_BSTATE_IN_FLIGHT) { in __xfs_buf_ioacct_dec()
113 bp->b_state &= ~XFS_BSTATE_IN_FLIGHT; in __xfs_buf_ioacct_dec()
114 percpu_counter_dec(&bp->b_target->bt_io_count); in __xfs_buf_ioacct_dec()
120 struct xfs_buf *bp) in xfs_buf_ioacct_dec() argument
122 spin_lock(&bp->b_lock); in xfs_buf_ioacct_dec()
123 __xfs_buf_ioacct_dec(bp); in xfs_buf_ioacct_dec()
124 spin_unlock(&bp->b_lock); in xfs_buf_ioacct_dec()
137 struct xfs_buf *bp) in xfs_buf_stale() argument
139 ASSERT(xfs_buf_islocked(bp)); in xfs_buf_stale()
141 bp->b_flags |= XBF_STALE; in xfs_buf_stale()
148 bp->b_flags &= ~_XBF_DELWRI_Q; in xfs_buf_stale()
156 spin_lock(&bp->b_lock); in xfs_buf_stale()
157 __xfs_buf_ioacct_dec(bp); in xfs_buf_stale()
159 atomic_set(&bp->b_lru_ref, 0); in xfs_buf_stale()
160 if (!(bp->b_state & XFS_BSTATE_DISPOSE) && in xfs_buf_stale()
161 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru))) in xfs_buf_stale()
162 atomic_dec(&bp->b_hold); in xfs_buf_stale()
164 ASSERT(atomic_read(&bp->b_hold) >= 1); in xfs_buf_stale()
165 spin_unlock(&bp->b_lock); in xfs_buf_stale()
170 struct xfs_buf *bp, in xfs_buf_get_maps() argument
173 ASSERT(bp->b_maps == NULL); in xfs_buf_get_maps()
174 bp->b_map_count = map_count; in xfs_buf_get_maps()
177 bp->b_maps = &bp->__b_map; in xfs_buf_get_maps()
181 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map), in xfs_buf_get_maps()
183 if (!bp->b_maps) in xfs_buf_get_maps()
193 struct xfs_buf *bp) in xfs_buf_free_maps() argument
195 if (bp->b_maps != &bp->__b_map) { in xfs_buf_free_maps()
196 kmem_free(bp->b_maps); in xfs_buf_free_maps()
197 bp->b_maps = NULL; in xfs_buf_free_maps()
208 struct xfs_buf *bp; in _xfs_buf_alloc() local
212 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS); in _xfs_buf_alloc()
213 if (unlikely(!bp)) in _xfs_buf_alloc()
222 atomic_set(&bp->b_hold, 1); in _xfs_buf_alloc()
223 atomic_set(&bp->b_lru_ref, 1); in _xfs_buf_alloc()
224 init_completion(&bp->b_iowait); in _xfs_buf_alloc()
225 INIT_LIST_HEAD(&bp->b_lru); in _xfs_buf_alloc()
226 INIT_LIST_HEAD(&bp->b_list); in _xfs_buf_alloc()
227 INIT_LIST_HEAD(&bp->b_li_list); in _xfs_buf_alloc()
228 sema_init(&bp->b_sema, 0); /* held, no waiters */ in _xfs_buf_alloc()
229 spin_lock_init(&bp->b_lock); in _xfs_buf_alloc()
230 bp->b_target = target; in _xfs_buf_alloc()
231 bp->b_mount = target->bt_mount; in _xfs_buf_alloc()
232 bp->b_flags = flags; in _xfs_buf_alloc()
239 error = xfs_buf_get_maps(bp, nmaps); in _xfs_buf_alloc()
241 kmem_zone_free(xfs_buf_zone, bp); in _xfs_buf_alloc()
245 bp->b_bn = map[0].bm_bn; in _xfs_buf_alloc()
246 bp->b_length = 0; in _xfs_buf_alloc()
248 bp->b_maps[i].bm_bn = map[i].bm_bn; in _xfs_buf_alloc()
249 bp->b_maps[i].bm_len = map[i].bm_len; in _xfs_buf_alloc()
250 bp->b_length += map[i].bm_len; in _xfs_buf_alloc()
253 atomic_set(&bp->b_pin_count, 0); in _xfs_buf_alloc()
254 init_waitqueue_head(&bp->b_waiters); in _xfs_buf_alloc()
256 XFS_STATS_INC(bp->b_mount, xb_create); in _xfs_buf_alloc()
257 trace_xfs_buf_init(bp, _RET_IP_); in _xfs_buf_alloc()
259 return bp; in _xfs_buf_alloc()
268 xfs_buf_t *bp, in _xfs_buf_get_pages() argument
272 if (bp->b_pages == NULL) { in _xfs_buf_get_pages()
273 bp->b_page_count = page_count; in _xfs_buf_get_pages()
275 bp->b_pages = bp->b_page_array; in _xfs_buf_get_pages()
277 bp->b_pages = kmem_alloc(sizeof(struct page *) * in _xfs_buf_get_pages()
279 if (bp->b_pages == NULL) in _xfs_buf_get_pages()
282 memset(bp->b_pages, 0, sizeof(struct page *) * page_count); in _xfs_buf_get_pages()
292 xfs_buf_t *bp) in _xfs_buf_free_pages() argument
294 if (bp->b_pages != bp->b_page_array) { in _xfs_buf_free_pages()
295 kmem_free(bp->b_pages); in _xfs_buf_free_pages()
296 bp->b_pages = NULL; in _xfs_buf_free_pages()
309 xfs_buf_t *bp) in xfs_buf_free() argument
311 trace_xfs_buf_free(bp, _RET_IP_); in xfs_buf_free()
313 ASSERT(list_empty(&bp->b_lru)); in xfs_buf_free()
315 if (bp->b_flags & _XBF_PAGES) { in xfs_buf_free()
318 if (xfs_buf_is_vmapped(bp)) in xfs_buf_free()
319 vm_unmap_ram(bp->b_addr - bp->b_offset, in xfs_buf_free()
320 bp->b_page_count); in xfs_buf_free()
322 for (i = 0; i < bp->b_page_count; i++) { in xfs_buf_free()
323 struct page *page = bp->b_pages[i]; in xfs_buf_free()
327 } else if (bp->b_flags & _XBF_KMEM) in xfs_buf_free()
328 kmem_free(bp->b_addr); in xfs_buf_free()
329 _xfs_buf_free_pages(bp); in xfs_buf_free()
330 xfs_buf_free_maps(bp); in xfs_buf_free()
331 kmem_zone_free(xfs_buf_zone, bp); in xfs_buf_free()
339 xfs_buf_t *bp, in xfs_buf_allocate_memory() argument
363 size = BBTOB(bp->b_length); in xfs_buf_allocate_memory()
365 int align_mask = xfs_buftarg_dma_alignment(bp->b_target); in xfs_buf_allocate_memory()
366 bp->b_addr = kmem_alloc_io(size, align_mask, in xfs_buf_allocate_memory()
368 if (!bp->b_addr) { in xfs_buf_allocate_memory()
373 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) != in xfs_buf_allocate_memory()
374 ((unsigned long)bp->b_addr & PAGE_MASK)) { in xfs_buf_allocate_memory()
376 kmem_free(bp->b_addr); in xfs_buf_allocate_memory()
377 bp->b_addr = NULL; in xfs_buf_allocate_memory()
380 bp->b_offset = offset_in_page(bp->b_addr); in xfs_buf_allocate_memory()
381 bp->b_pages = bp->b_page_array; in xfs_buf_allocate_memory()
382 bp->b_pages[0] = kmem_to_page(bp->b_addr); in xfs_buf_allocate_memory()
383 bp->b_page_count = 1; in xfs_buf_allocate_memory()
384 bp->b_flags |= _XBF_KMEM; in xfs_buf_allocate_memory()
389 start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT; in xfs_buf_allocate_memory()
390 end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1) in xfs_buf_allocate_memory()
393 error = _xfs_buf_get_pages(bp, page_count); in xfs_buf_allocate_memory()
397 offset = bp->b_offset; in xfs_buf_allocate_memory()
398 bp->b_flags |= _XBF_PAGES; in xfs_buf_allocate_memory()
400 for (i = 0; i < bp->b_page_count; i++) { in xfs_buf_allocate_memory()
407 bp->b_page_count = i; in xfs_buf_allocate_memory()
424 XFS_STATS_INC(bp->b_mount, xb_page_retries); in xfs_buf_allocate_memory()
429 XFS_STATS_INC(bp->b_mount, xb_page_found); in xfs_buf_allocate_memory()
433 bp->b_pages[i] = page; in xfs_buf_allocate_memory()
439 for (i = 0; i < bp->b_page_count; i++) in xfs_buf_allocate_memory()
440 __free_page(bp->b_pages[i]); in xfs_buf_allocate_memory()
441 bp->b_flags &= ~_XBF_PAGES; in xfs_buf_allocate_memory()
450 xfs_buf_t *bp, in _xfs_buf_map_pages() argument
453 ASSERT(bp->b_flags & _XBF_PAGES); in _xfs_buf_map_pages()
454 if (bp->b_page_count == 1) { in _xfs_buf_map_pages()
456 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; in _xfs_buf_map_pages()
458 bp->b_addr = NULL; in _xfs_buf_map_pages()
473 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, in _xfs_buf_map_pages()
475 if (bp->b_addr) in _xfs_buf_map_pages()
481 if (!bp->b_addr) in _xfs_buf_map_pages()
483 bp->b_addr += bp->b_offset; in _xfs_buf_map_pages()
498 const struct xfs_buf *bp = obj; in _xfs_buf_obj_cmp() local
506 if (bp->b_bn != map->bm_bn) in _xfs_buf_obj_cmp()
509 if (unlikely(bp->b_length != map->bm_len)) { in _xfs_buf_obj_cmp()
518 ASSERT(bp->b_flags & XBF_STALE); in _xfs_buf_obj_cmp()
577 xfs_buf_t *bp; in xfs_buf_find() local
608 bp = rhashtable_lookup_fast(&pag->pag_buf_hash, &cmap, in xfs_buf_find()
610 if (bp) { in xfs_buf_find()
611 atomic_inc(&bp->b_hold); in xfs_buf_find()
635 if (!xfs_buf_trylock(bp)) { in xfs_buf_find()
637 xfs_buf_rele(bp); in xfs_buf_find()
641 xfs_buf_lock(bp); in xfs_buf_find()
650 if (bp->b_flags & XBF_STALE) { in xfs_buf_find()
651 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); in xfs_buf_find()
652 ASSERT(bp->b_iodone == NULL); in xfs_buf_find()
653 bp->b_flags &= _XBF_KMEM | _XBF_PAGES; in xfs_buf_find()
654 bp->b_ops = NULL; in xfs_buf_find()
657 trace_xfs_buf_find(bp, flags, _RET_IP_); in xfs_buf_find()
659 *found_bp = bp; in xfs_buf_find()
670 struct xfs_buf *bp; in xfs_buf_incore() local
674 error = xfs_buf_find(target, &map, 1, flags, NULL, &bp); in xfs_buf_incore()
677 return bp; in xfs_buf_incore()
692 struct xfs_buf *bp; in xfs_buf_get_map() local
696 error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp); in xfs_buf_get_map()
728 error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp); in xfs_buf_get_map()
734 if (bp != new_bp) in xfs_buf_get_map()
738 if (!bp->b_addr) { in xfs_buf_get_map()
739 error = _xfs_buf_map_pages(bp, flags); in xfs_buf_get_map()
743 xfs_buf_relse(bp); in xfs_buf_get_map()
753 xfs_buf_ioerror(bp, 0); in xfs_buf_get_map()
756 trace_xfs_buf_get(bp, flags, _RET_IP_); in xfs_buf_get_map()
757 return bp; in xfs_buf_get_map()
762 xfs_buf_t *bp, in _xfs_buf_read() argument
766 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL); in _xfs_buf_read()
768 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD); in _xfs_buf_read()
769 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); in _xfs_buf_read()
771 return xfs_buf_submit(bp); in _xfs_buf_read()
793 struct xfs_buf *bp, in xfs_buf_reverify() argument
796 ASSERT(bp->b_flags & XBF_DONE); in xfs_buf_reverify()
797 ASSERT(bp->b_error == 0); in xfs_buf_reverify()
799 if (!ops || bp->b_ops) in xfs_buf_reverify()
802 bp->b_ops = ops; in xfs_buf_reverify()
803 bp->b_ops->verify_read(bp); in xfs_buf_reverify()
804 if (bp->b_error) in xfs_buf_reverify()
805 bp->b_flags &= ~XBF_DONE; in xfs_buf_reverify()
806 return bp->b_error; in xfs_buf_reverify()
817 struct xfs_buf *bp; in xfs_buf_read_map() local
821 bp = xfs_buf_get_map(target, map, nmaps, flags); in xfs_buf_read_map()
822 if (!bp) in xfs_buf_read_map()
825 trace_xfs_buf_read(bp, flags, _RET_IP_); in xfs_buf_read_map()
827 if (!(bp->b_flags & XBF_DONE)) { in xfs_buf_read_map()
829 bp->b_ops = ops; in xfs_buf_read_map()
830 _xfs_buf_read(bp, flags); in xfs_buf_read_map()
831 return bp; in xfs_buf_read_map()
834 xfs_buf_reverify(bp, ops); in xfs_buf_read_map()
841 xfs_buf_relse(bp); in xfs_buf_read_map()
846 bp->b_flags &= ~XBF_READ; in xfs_buf_read_map()
847 ASSERT(bp->b_ops != NULL || ops == NULL); in xfs_buf_read_map()
848 return bp; in xfs_buf_read_map()
882 struct xfs_buf *bp; in xfs_buf_read_uncached() local
886 bp = xfs_buf_get_uncached(target, numblks, flags); in xfs_buf_read_uncached()
887 if (!bp) in xfs_buf_read_uncached()
891 ASSERT(bp->b_map_count == 1); in xfs_buf_read_uncached()
892 bp->b_bn = XFS_BUF_DADDR_NULL; /* always null for uncached buffers */ in xfs_buf_read_uncached()
893 bp->b_maps[0].bm_bn = daddr; in xfs_buf_read_uncached()
894 bp->b_flags |= XBF_READ; in xfs_buf_read_uncached()
895 bp->b_ops = ops; in xfs_buf_read_uncached()
897 xfs_buf_submit(bp); in xfs_buf_read_uncached()
898 if (bp->b_error) { in xfs_buf_read_uncached()
899 int error = bp->b_error; in xfs_buf_read_uncached()
900 xfs_buf_relse(bp); in xfs_buf_read_uncached()
904 *bpp = bp; in xfs_buf_read_uncached()
916 struct xfs_buf *bp; in xfs_buf_get_uncached() local
920 bp = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT); in xfs_buf_get_uncached()
921 if (unlikely(bp == NULL)) in xfs_buf_get_uncached()
925 error = _xfs_buf_get_pages(bp, page_count); in xfs_buf_get_uncached()
930 bp->b_pages[i] = alloc_page(xb_to_gfp(flags)); in xfs_buf_get_uncached()
931 if (!bp->b_pages[i]) in xfs_buf_get_uncached()
934 bp->b_flags |= _XBF_PAGES; in xfs_buf_get_uncached()
936 error = _xfs_buf_map_pages(bp, 0); in xfs_buf_get_uncached()
943 trace_xfs_buf_get_uncached(bp, _RET_IP_); in xfs_buf_get_uncached()
944 return bp; in xfs_buf_get_uncached()
948 __free_page(bp->b_pages[i]); in xfs_buf_get_uncached()
949 _xfs_buf_free_pages(bp); in xfs_buf_get_uncached()
951 xfs_buf_free_maps(bp); in xfs_buf_get_uncached()
952 kmem_zone_free(xfs_buf_zone, bp); in xfs_buf_get_uncached()
964 xfs_buf_t *bp) in xfs_buf_hold() argument
966 trace_xfs_buf_hold(bp, _RET_IP_); in xfs_buf_hold()
967 atomic_inc(&bp->b_hold); in xfs_buf_hold()
976 xfs_buf_t *bp) in xfs_buf_rele() argument
978 struct xfs_perag *pag = bp->b_pag; in xfs_buf_rele()
982 trace_xfs_buf_rele(bp, _RET_IP_); in xfs_buf_rele()
985 ASSERT(list_empty(&bp->b_lru)); in xfs_buf_rele()
986 if (atomic_dec_and_test(&bp->b_hold)) { in xfs_buf_rele()
987 xfs_buf_ioacct_dec(bp); in xfs_buf_rele()
988 xfs_buf_free(bp); in xfs_buf_rele()
993 ASSERT(atomic_read(&bp->b_hold) > 0); in xfs_buf_rele()
1005 spin_lock(&bp->b_lock); in xfs_buf_rele()
1006 release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock); in xfs_buf_rele()
1014 if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru)) in xfs_buf_rele()
1015 __xfs_buf_ioacct_dec(bp); in xfs_buf_rele()
1020 __xfs_buf_ioacct_dec(bp); in xfs_buf_rele()
1021 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) { in xfs_buf_rele()
1027 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) { in xfs_buf_rele()
1028 bp->b_state &= ~XFS_BSTATE_DISPOSE; in xfs_buf_rele()
1029 atomic_inc(&bp->b_hold); in xfs_buf_rele()
1039 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) { in xfs_buf_rele()
1040 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru); in xfs_buf_rele()
1042 ASSERT(list_empty(&bp->b_lru)); in xfs_buf_rele()
1045 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); in xfs_buf_rele()
1046 rhashtable_remove_fast(&pag->pag_buf_hash, &bp->b_rhash_head, in xfs_buf_rele()
1054 spin_unlock(&bp->b_lock); in xfs_buf_rele()
1057 xfs_buf_free(bp); in xfs_buf_rele()
1074 struct xfs_buf *bp) in xfs_buf_trylock() argument
1078 locked = down_trylock(&bp->b_sema) == 0; in xfs_buf_trylock()
1080 trace_xfs_buf_trylock(bp, _RET_IP_); in xfs_buf_trylock()
1082 trace_xfs_buf_trylock_fail(bp, _RET_IP_); in xfs_buf_trylock()
1097 struct xfs_buf *bp) in xfs_buf_lock() argument
1099 trace_xfs_buf_lock(bp, _RET_IP_); in xfs_buf_lock()
1101 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) in xfs_buf_lock()
1102 xfs_log_force(bp->b_mount, 0); in xfs_buf_lock()
1103 down(&bp->b_sema); in xfs_buf_lock()
1105 trace_xfs_buf_lock_done(bp, _RET_IP_); in xfs_buf_lock()
1110 struct xfs_buf *bp) in xfs_buf_unlock() argument
1112 ASSERT(xfs_buf_islocked(bp)); in xfs_buf_unlock()
1114 up(&bp->b_sema); in xfs_buf_unlock()
1115 trace_xfs_buf_unlock(bp, _RET_IP_); in xfs_buf_unlock()
1120 xfs_buf_t *bp) in xfs_buf_wait_unpin() argument
1124 if (atomic_read(&bp->b_pin_count) == 0) in xfs_buf_wait_unpin()
1127 add_wait_queue(&bp->b_waiters, &wait); in xfs_buf_wait_unpin()
1130 if (atomic_read(&bp->b_pin_count) == 0) in xfs_buf_wait_unpin()
1134 remove_wait_queue(&bp->b_waiters, &wait); in xfs_buf_wait_unpin()
1144 struct xfs_buf *bp) in xfs_buf_ioend() argument
1146 bool read = bp->b_flags & XBF_READ; in xfs_buf_ioend()
1148 trace_xfs_buf_iodone(bp, _RET_IP_); in xfs_buf_ioend()
1150 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); in xfs_buf_ioend()
1156 if (!bp->b_error && bp->b_io_error) in xfs_buf_ioend()
1157 xfs_buf_ioerror(bp, bp->b_io_error); in xfs_buf_ioend()
1160 if (read && !bp->b_error && bp->b_ops) { in xfs_buf_ioend()
1161 ASSERT(!bp->b_iodone); in xfs_buf_ioend()
1162 bp->b_ops->verify_read(bp); in xfs_buf_ioend()
1165 if (!bp->b_error) in xfs_buf_ioend()
1166 bp->b_flags |= XBF_DONE; in xfs_buf_ioend()
1168 if (bp->b_iodone) in xfs_buf_ioend()
1169 (*(bp->b_iodone))(bp); in xfs_buf_ioend()
1170 else if (bp->b_flags & XBF_ASYNC) in xfs_buf_ioend()
1171 xfs_buf_relse(bp); in xfs_buf_ioend()
1173 complete(&bp->b_iowait); in xfs_buf_ioend()
1180 struct xfs_buf *bp = in xfs_buf_ioend_work() local
1183 xfs_buf_ioend(bp); in xfs_buf_ioend_work()
1188 struct xfs_buf *bp) in xfs_buf_ioend_async() argument
1190 INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work); in xfs_buf_ioend_async()
1191 queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work); in xfs_buf_ioend_async()
1196 xfs_buf_t *bp, in __xfs_buf_ioerror() argument
1201 bp->b_error = error; in __xfs_buf_ioerror()
1202 trace_xfs_buf_ioerror(bp, error, failaddr); in __xfs_buf_ioerror()
1207 struct xfs_buf *bp, in xfs_buf_ioerror_alert() argument
1210 xfs_alert(bp->b_mount, in xfs_buf_ioerror_alert()
1212 func, (uint64_t)XFS_BUF_ADDR(bp), bp->b_length, in xfs_buf_ioerror_alert()
1213 -bp->b_error); in xfs_buf_ioerror_alert()
1218 struct xfs_buf *bp) in xfs_bwrite() argument
1222 ASSERT(xfs_buf_islocked(bp)); in xfs_bwrite()
1224 bp->b_flags |= XBF_WRITE; in xfs_bwrite()
1225 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | in xfs_bwrite()
1228 error = xfs_buf_submit(bp); in xfs_bwrite()
1230 xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR); in xfs_bwrite()
1238 struct xfs_buf *bp = (struct xfs_buf *)bio->bi_private; in xfs_buf_bio_end_io() local
1247 cmpxchg(&bp->b_io_error, 0, error); in xfs_buf_bio_end_io()
1250 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) in xfs_buf_bio_end_io()
1251 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); in xfs_buf_bio_end_io()
1253 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) in xfs_buf_bio_end_io()
1254 xfs_buf_ioend_async(bp); in xfs_buf_bio_end_io()
1260 struct xfs_buf *bp, in xfs_buf_ioapply_map() argument
1268 int total_nr_pages = bp->b_page_count; in xfs_buf_ioapply_map()
1271 sector_t sector = bp->b_maps[map].bm_bn; in xfs_buf_ioapply_map()
1287 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count); in xfs_buf_ioapply_map()
1292 atomic_inc(&bp->b_io_remaining); in xfs_buf_ioapply_map()
1296 bio_set_dev(bio, bp->b_target->bt_bdev); in xfs_buf_ioapply_map()
1299 bio->bi_private = bp; in xfs_buf_ioapply_map()
1308 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes, in xfs_buf_ioapply_map()
1320 if (xfs_buf_is_vmapped(bp)) { in xfs_buf_ioapply_map()
1321 flush_kernel_vmap_range(bp->b_addr, in xfs_buf_ioapply_map()
1322 xfs_buf_vmap_len(bp)); in xfs_buf_ioapply_map()
1332 atomic_dec(&bp->b_io_remaining); in xfs_buf_ioapply_map()
1333 xfs_buf_ioerror(bp, -EIO); in xfs_buf_ioapply_map()
1341 struct xfs_buf *bp) in _xfs_buf_ioapply() argument
1354 bp->b_error = 0; in _xfs_buf_ioapply()
1356 if (bp->b_flags & XBF_WRITE) { in _xfs_buf_ioapply()
1364 if (bp->b_ops) { in _xfs_buf_ioapply()
1365 bp->b_ops->verify_write(bp); in _xfs_buf_ioapply()
1366 if (bp->b_error) { in _xfs_buf_ioapply()
1367 xfs_force_shutdown(bp->b_mount, in _xfs_buf_ioapply()
1371 } else if (bp->b_bn != XFS_BUF_DADDR_NULL) { in _xfs_buf_ioapply()
1372 struct xfs_mount *mp = bp->b_mount; in _xfs_buf_ioapply()
1381 __func__, bp->b_bn, bp->b_length); in _xfs_buf_ioapply()
1382 xfs_hex_dump(bp->b_addr, in _xfs_buf_ioapply()
1387 } else if (bp->b_flags & XBF_READ_AHEAD) { in _xfs_buf_ioapply()
1403 offset = bp->b_offset; in _xfs_buf_ioapply()
1404 size = BBTOB(bp->b_length); in _xfs_buf_ioapply()
1406 for (i = 0; i < bp->b_map_count; i++) { in _xfs_buf_ioapply()
1407 xfs_buf_ioapply_map(bp, i, &offset, &size, op, op_flags); in _xfs_buf_ioapply()
1408 if (bp->b_error) in _xfs_buf_ioapply()
1421 struct xfs_buf *bp) in xfs_buf_iowait() argument
1423 ASSERT(!(bp->b_flags & XBF_ASYNC)); in xfs_buf_iowait()
1425 trace_xfs_buf_iowait(bp, _RET_IP_); in xfs_buf_iowait()
1426 wait_for_completion(&bp->b_iowait); in xfs_buf_iowait()
1427 trace_xfs_buf_iowait_done(bp, _RET_IP_); in xfs_buf_iowait()
1429 return bp->b_error; in xfs_buf_iowait()
1440 struct xfs_buf *bp, in __xfs_buf_submit() argument
1445 trace_xfs_buf_submit(bp, _RET_IP_); in __xfs_buf_submit()
1447 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); in __xfs_buf_submit()
1450 if (XFS_FORCED_SHUTDOWN(bp->b_mount)) { in __xfs_buf_submit()
1451 xfs_buf_ioerror(bp, -EIO); in __xfs_buf_submit()
1452 bp->b_flags &= ~XBF_DONE; in __xfs_buf_submit()
1453 xfs_buf_stale(bp); in __xfs_buf_submit()
1454 xfs_buf_ioend(bp); in __xfs_buf_submit()
1463 xfs_buf_hold(bp); in __xfs_buf_submit()
1465 if (bp->b_flags & XBF_WRITE) in __xfs_buf_submit()
1466 xfs_buf_wait_unpin(bp); in __xfs_buf_submit()
1469 bp->b_io_error = 0; in __xfs_buf_submit()
1476 atomic_set(&bp->b_io_remaining, 1); in __xfs_buf_submit()
1477 if (bp->b_flags & XBF_ASYNC) in __xfs_buf_submit()
1478 xfs_buf_ioacct_inc(bp); in __xfs_buf_submit()
1479 _xfs_buf_ioapply(bp); in __xfs_buf_submit()
1486 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { in __xfs_buf_submit()
1487 if (bp->b_error || !(bp->b_flags & XBF_ASYNC)) in __xfs_buf_submit()
1488 xfs_buf_ioend(bp); in __xfs_buf_submit()
1490 xfs_buf_ioend_async(bp); in __xfs_buf_submit()
1494 error = xfs_buf_iowait(bp); in __xfs_buf_submit()
1501 xfs_buf_rele(bp); in __xfs_buf_submit()
1507 struct xfs_buf *bp, in xfs_buf_offset() argument
1512 if (bp->b_addr) in xfs_buf_offset()
1513 return bp->b_addr + offset; in xfs_buf_offset()
1515 offset += bp->b_offset; in xfs_buf_offset()
1516 page = bp->b_pages[offset >> PAGE_SHIFT]; in xfs_buf_offset()
1522 struct xfs_buf *bp, in xfs_buf_zero() argument
1533 page_index = (boff + bp->b_offset) >> PAGE_SHIFT; in xfs_buf_zero()
1534 page_offset = (boff + bp->b_offset) & ~PAGE_MASK; in xfs_buf_zero()
1535 page = bp->b_pages[page_index]; in xfs_buf_zero()
1537 BBTOB(bp->b_length) - boff); in xfs_buf_zero()
1564 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); in xfs_buftarg_wait_rele() local
1567 if (atomic_read(&bp->b_hold) > 1) { in xfs_buftarg_wait_rele()
1569 trace_xfs_buf_wait_buftarg(bp, _RET_IP_); in xfs_buftarg_wait_rele()
1572 if (!spin_trylock(&bp->b_lock)) in xfs_buftarg_wait_rele()
1579 atomic_set(&bp->b_lru_ref, 0); in xfs_buftarg_wait_rele()
1580 bp->b_state |= XFS_BSTATE_DISPOSE; in xfs_buftarg_wait_rele()
1582 spin_unlock(&bp->b_lock); in xfs_buftarg_wait_rele()
1615 struct xfs_buf *bp; in xfs_wait_buftarg() local
1616 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); in xfs_wait_buftarg()
1617 list_del_init(&bp->b_lru); in xfs_wait_buftarg()
1618 if (bp->b_flags & XBF_WRITE_FAIL) { in xfs_wait_buftarg()
1621 (long long)bp->b_bn); in xfs_wait_buftarg()
1625 xfs_buf_rele(bp); in xfs_wait_buftarg()
1639 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); in xfs_buftarg_isolate() local
1646 if (!spin_trylock(&bp->b_lock)) in xfs_buftarg_isolate()
1653 if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) { in xfs_buftarg_isolate()
1654 spin_unlock(&bp->b_lock); in xfs_buftarg_isolate()
1658 bp->b_state |= XFS_BSTATE_DISPOSE; in xfs_buftarg_isolate()
1660 spin_unlock(&bp->b_lock); in xfs_buftarg_isolate()
1678 struct xfs_buf *bp; in xfs_buftarg_shrink_scan() local
1679 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); in xfs_buftarg_shrink_scan()
1680 list_del_init(&bp->b_lru); in xfs_buftarg_shrink_scan()
1681 xfs_buf_rele(bp); in xfs_buftarg_shrink_scan()
1798 struct xfs_buf *bp; in xfs_buf_delwri_cancel() local
1801 bp = list_first_entry(list, struct xfs_buf, b_list); in xfs_buf_delwri_cancel()
1803 xfs_buf_lock(bp); in xfs_buf_delwri_cancel()
1804 bp->b_flags &= ~_XBF_DELWRI_Q; in xfs_buf_delwri_cancel()
1805 list_del_init(&bp->b_list); in xfs_buf_delwri_cancel()
1806 xfs_buf_relse(bp); in xfs_buf_delwri_cancel()
1823 struct xfs_buf *bp, in xfs_buf_delwri_queue() argument
1826 ASSERT(xfs_buf_islocked(bp)); in xfs_buf_delwri_queue()
1827 ASSERT(!(bp->b_flags & XBF_READ)); in xfs_buf_delwri_queue()
1834 if (bp->b_flags & _XBF_DELWRI_Q) { in xfs_buf_delwri_queue()
1835 trace_xfs_buf_delwri_queued(bp, _RET_IP_); in xfs_buf_delwri_queue()
1839 trace_xfs_buf_delwri_queue(bp, _RET_IP_); in xfs_buf_delwri_queue()
1849 bp->b_flags |= _XBF_DELWRI_Q; in xfs_buf_delwri_queue()
1850 if (list_empty(&bp->b_list)) { in xfs_buf_delwri_queue()
1851 atomic_inc(&bp->b_hold); in xfs_buf_delwri_queue()
1852 list_add_tail(&bp->b_list, list); in xfs_buf_delwri_queue()
1870 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list); in xfs_buf_cmp() local
1873 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn; in xfs_buf_cmp()
1893 struct xfs_buf *bp, *n; in xfs_buf_delwri_submit_buffers() local
1900 list_for_each_entry_safe(bp, n, buffer_list, b_list) { in xfs_buf_delwri_submit_buffers()
1902 if (xfs_buf_ispinned(bp)) { in xfs_buf_delwri_submit_buffers()
1906 if (!xfs_buf_trylock(bp)) in xfs_buf_delwri_submit_buffers()
1909 xfs_buf_lock(bp); in xfs_buf_delwri_submit_buffers()
1918 if (!(bp->b_flags & _XBF_DELWRI_Q)) { in xfs_buf_delwri_submit_buffers()
1919 list_del_init(&bp->b_list); in xfs_buf_delwri_submit_buffers()
1920 xfs_buf_relse(bp); in xfs_buf_delwri_submit_buffers()
1924 trace_xfs_buf_delwri_split(bp, _RET_IP_); in xfs_buf_delwri_submit_buffers()
1932 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_WRITE_FAIL); in xfs_buf_delwri_submit_buffers()
1933 bp->b_flags |= XBF_WRITE; in xfs_buf_delwri_submit_buffers()
1935 bp->b_flags &= ~XBF_ASYNC; in xfs_buf_delwri_submit_buffers()
1936 list_move_tail(&bp->b_list, wait_list); in xfs_buf_delwri_submit_buffers()
1938 bp->b_flags |= XBF_ASYNC; in xfs_buf_delwri_submit_buffers()
1939 list_del_init(&bp->b_list); in xfs_buf_delwri_submit_buffers()
1941 __xfs_buf_submit(bp, false); in xfs_buf_delwri_submit_buffers()
1985 struct xfs_buf *bp; in xfs_buf_delwri_submit() local
1991 bp = list_first_entry(&wait_list, struct xfs_buf, b_list); in xfs_buf_delwri_submit()
1993 list_del_init(&bp->b_list); in xfs_buf_delwri_submit()
1999 error2 = xfs_buf_iowait(bp); in xfs_buf_delwri_submit()
2000 xfs_buf_relse(bp); in xfs_buf_delwri_submit()
2025 struct xfs_buf *bp, in xfs_buf_delwri_pushbuf() argument
2031 ASSERT(bp->b_flags & _XBF_DELWRI_Q); in xfs_buf_delwri_pushbuf()
2033 trace_xfs_buf_delwri_pushbuf(bp, _RET_IP_); in xfs_buf_delwri_pushbuf()
2039 xfs_buf_lock(bp); in xfs_buf_delwri_pushbuf()
2040 list_move(&bp->b_list, &submit_list); in xfs_buf_delwri_pushbuf()
2041 xfs_buf_unlock(bp); in xfs_buf_delwri_pushbuf()
2056 error = xfs_buf_iowait(bp); in xfs_buf_delwri_pushbuf()
2057 bp->b_flags |= _XBF_DELWRI_Q; in xfs_buf_delwri_pushbuf()
2058 xfs_buf_unlock(bp); in xfs_buf_delwri_pushbuf()
2083 void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref) in xfs_buf_set_ref() argument
2090 if (XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_LRU_REF)) in xfs_buf_set_ref()
2093 atomic_set(&bp->b_lru_ref, lru_ref); in xfs_buf_set_ref()
2103 struct xfs_buf *bp, in xfs_verify_magic() argument
2106 struct xfs_mount *mp = bp->b_mount; in xfs_verify_magic()
2110 if (WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx])) in xfs_verify_magic()
2112 return dmagic == bp->b_ops->magic[idx]; in xfs_verify_magic()
2121 struct xfs_buf *bp, in xfs_verify_magic16() argument
2124 struct xfs_mount *mp = bp->b_mount; in xfs_verify_magic16()
2128 if (WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx])) in xfs_verify_magic16()
2130 return dmagic == bp->b_ops->magic16[idx]; in xfs_verify_magic16()