Lines Matching full:pa
244 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
248 * - new PA: buddy += N; PA = N
249 * - use inode PA: on-disk += N; PA -= N
250 * - discard inode PA buddy -= on-disk - PA; PA = 0
251 * - use locality group PA on-disk += N; PA -= N
252 * - discard locality group PA buddy -= PA; PA = 0
253 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
255 * bits from PA, only from on-disk bitmap
265 * bit set and PA claims same block, it's OK. IOW, one can set bit in
266 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
271 * - new PA
272 * blocks for PA are allocated in the buddy, buddy must be referenced
273 * until PA is linked to allocation group to avoid concurrent buddy init
274 * - use inode PA
275 * we need to make sure that either on-disk bitmap or PA has uptodate data
276 * given (3) we care that PA-=N operation doesn't interfere with init
277 * - discard inode PA
279 * - use locality group PA
280 * again PA-=N must be serialized with init
281 * - discard locality group PA
283 * - new PA vs.
284 * - use inode PA
286 * - discard inode PA
287 * discard process must wait until PA isn't used by another process
288 * - use locality group PA
290 * - discard locality group PA
291 * discard process must wait until PA isn't used by another process
292 * - use inode PA
293 * - use inode PA
295 * - discard inode PA
296 * discard process must wait until PA isn't used by another process
297 * - use locality group PA
299 * - discard locality group PA
300 * discard process must wait until PA isn't used by another process
303 * - PA is referenced and while it is no discard is possible
304 * - PA is referenced until block isn't marked in on-disk bitmap
305 * - PA changes only after on-disk bitmap
310 * a special case when we've used PA to emptiness. no need to modify buddy
325 * find proper PA (per-inode or group)
329 * release PA
341 * remove PA from object (inode or locality group)
353 * - per-pa lock (pa)
358 * - new pa
362 * - find and use pa:
363 * pa
365 * - release consumed pa:
366 * pa
372 * pa
376 * pa
381 * pa
750 struct ext4_prealloc_space *pa; in __mb_check_buddy() local
751 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); in __mb_check_buddy()
752 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); in __mb_check_buddy()
754 for (i = 0; i < pa->pa_len; i++) in __mb_check_buddy()
3493 struct ext4_prealloc_space *pa; in ext4_mb_cleanup_pa() local
3498 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); in ext4_mb_cleanup_pa()
3499 list_del(&pa->pa_group_list); in ext4_mb_cleanup_pa()
3501 kmem_cache_free(ext4_pspace_cachep, pa); in ext4_mb_cleanup_pa()
4002 struct ext4_prealloc_space *pa; in ext4_mb_normalize_request() local
4105 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { in ext4_mb_normalize_request()
4108 if (pa->pa_deleted) in ext4_mb_normalize_request()
4110 spin_lock(&pa->pa_lock); in ext4_mb_normalize_request()
4111 if (pa->pa_deleted) { in ext4_mb_normalize_request()
4112 spin_unlock(&pa->pa_lock); in ext4_mb_normalize_request()
4116 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), in ext4_mb_normalize_request()
4117 pa->pa_len); in ext4_mb_normalize_request()
4119 /* PA must not overlap original request */ in ext4_mb_normalize_request()
4121 ac->ac_o_ex.fe_logical < pa->pa_lstart)); in ext4_mb_normalize_request()
4124 if (pa->pa_lstart >= end || pa_end <= start) { in ext4_mb_normalize_request()
4125 spin_unlock(&pa->pa_lock); in ext4_mb_normalize_request()
4128 BUG_ON(pa->pa_lstart <= start && pa_end >= end); in ext4_mb_normalize_request()
4130 /* adjust start or end to be adjacent to this pa */ in ext4_mb_normalize_request()
4134 } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) { in ext4_mb_normalize_request()
4135 BUG_ON(pa->pa_lstart > end); in ext4_mb_normalize_request()
4136 end = pa->pa_lstart; in ext4_mb_normalize_request()
4138 spin_unlock(&pa->pa_lock); in ext4_mb_normalize_request()
4145 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { in ext4_mb_normalize_request()
4148 spin_lock(&pa->pa_lock); in ext4_mb_normalize_request()
4149 if (pa->pa_deleted == 0) { in ext4_mb_normalize_request()
4150 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), in ext4_mb_normalize_request()
4151 pa->pa_len); in ext4_mb_normalize_request()
4152 BUG_ON(!(start >= pa_end || end <= pa->pa_lstart)); in ext4_mb_normalize_request()
4154 spin_unlock(&pa->pa_lock); in ext4_mb_normalize_request()
4167 * In case of inode pa, later we use the allocated blocks in ext4_mb_normalize_request()
4235 * Called on failure; free up any blocks from the inode PA for this
4242 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_discard_allocated_blocks() local
4246 if (pa == NULL) { in ext4_discard_allocated_blocks()
4266 if (pa->pa_type == MB_INODE_PA) in ext4_discard_allocated_blocks()
4267 pa->pa_free += ac->ac_b_ex.fe_len; in ext4_discard_allocated_blocks()
4274 struct ext4_prealloc_space *pa) in ext4_mb_use_inode_pa() argument
4282 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); in ext4_mb_use_inode_pa()
4283 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), in ext4_mb_use_inode_pa()
4290 ac->ac_pa = pa; in ext4_mb_use_inode_pa()
4292 BUG_ON(start < pa->pa_pstart); in ext4_mb_use_inode_pa()
4293 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); in ext4_mb_use_inode_pa()
4294 BUG_ON(pa->pa_free < len); in ext4_mb_use_inode_pa()
4295 pa->pa_free -= len; in ext4_mb_use_inode_pa()
4297 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa); in ext4_mb_use_inode_pa()
4304 struct ext4_prealloc_space *pa) in ext4_mb_use_group_pa() argument
4308 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, in ext4_mb_use_group_pa()
4313 ac->ac_pa = pa; in ext4_mb_use_group_pa()
4317 * instead we correct pa later, after blocks are marked in ext4_mb_use_group_pa()
4319 * Other CPUs are prevented from allocating from this pa by lg_mutex in ext4_mb_use_group_pa()
4321 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n", in ext4_mb_use_group_pa()
4322 pa->pa_lstart-len, len, pa); in ext4_mb_use_group_pa()
4333 struct ext4_prealloc_space *pa, in ext4_mb_check_group_pa() argument
4339 atomic_inc(&pa->pa_count); in ext4_mb_check_group_pa()
4340 return pa; in ext4_mb_check_group_pa()
4343 new_distance = abs(goal_block - pa->pa_pstart); in ext4_mb_check_group_pa()
4350 atomic_inc(&pa->pa_count); in ext4_mb_check_group_pa()
4351 return pa; in ext4_mb_check_group_pa()
4364 struct ext4_prealloc_space *pa, *cpa = NULL; in ext4_mb_use_preallocated() local
4373 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { in ext4_mb_use_preallocated()
4377 if (ac->ac_o_ex.fe_logical < pa->pa_lstart || in ext4_mb_use_preallocated()
4378 ac->ac_o_ex.fe_logical >= (pa->pa_lstart + in ext4_mb_use_preallocated()
4379 EXT4_C2B(sbi, pa->pa_len))) in ext4_mb_use_preallocated()
4384 (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) > in ext4_mb_use_preallocated()
4389 spin_lock(&pa->pa_lock); in ext4_mb_use_preallocated()
4390 if (pa->pa_deleted == 0 && pa->pa_free) { in ext4_mb_use_preallocated()
4391 atomic_inc(&pa->pa_count); in ext4_mb_use_preallocated()
4392 ext4_mb_use_inode_pa(ac, pa); in ext4_mb_use_preallocated()
4393 spin_unlock(&pa->pa_lock); in ext4_mb_use_preallocated()
4398 spin_unlock(&pa->pa_lock); in ext4_mb_use_preallocated()
4422 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i], in ext4_mb_use_preallocated()
4424 spin_lock(&pa->pa_lock); in ext4_mb_use_preallocated()
4425 if (pa->pa_deleted == 0 && in ext4_mb_use_preallocated()
4426 pa->pa_free >= ac->ac_o_ex.fe_len) { in ext4_mb_use_preallocated()
4429 pa, cpa); in ext4_mb_use_preallocated()
4431 spin_unlock(&pa->pa_lock); in ext4_mb_use_preallocated()
4477 struct ext4_prealloc_space *pa; in ext4_mb_generate_from_pa() local
4493 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); in ext4_mb_generate_from_pa()
4494 spin_lock(&pa->pa_lock); in ext4_mb_generate_from_pa()
4495 ext4_get_group_no_and_offset(sb, pa->pa_pstart, in ext4_mb_generate_from_pa()
4497 len = pa->pa_len; in ext4_mb_generate_from_pa()
4498 spin_unlock(&pa->pa_lock); in ext4_mb_generate_from_pa()
4509 struct ext4_prealloc_space *pa) in ext4_mb_mark_pa_deleted() argument
4513 if (pa->pa_deleted) { in ext4_mb_mark_pa_deleted()
4514 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n", in ext4_mb_mark_pa_deleted()
4515 pa->pa_type, pa->pa_pstart, pa->pa_lstart, in ext4_mb_mark_pa_deleted()
4516 pa->pa_len); in ext4_mb_mark_pa_deleted()
4520 pa->pa_deleted = 1; in ext4_mb_mark_pa_deleted()
4522 if (pa->pa_type == MB_INODE_PA) { in ext4_mb_mark_pa_deleted()
4523 ei = EXT4_I(pa->pa_inode); in ext4_mb_mark_pa_deleted()
4530 struct ext4_prealloc_space *pa; in ext4_mb_pa_callback() local
4531 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); in ext4_mb_pa_callback()
4533 BUG_ON(atomic_read(&pa->pa_count)); in ext4_mb_pa_callback()
4534 BUG_ON(pa->pa_deleted == 0); in ext4_mb_pa_callback()
4535 kmem_cache_free(ext4_pspace_cachep, pa); in ext4_mb_pa_callback()
4543 struct super_block *sb, struct ext4_prealloc_space *pa) in ext4_mb_put_pa() argument
4549 spin_lock(&pa->pa_lock); in ext4_mb_put_pa()
4550 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { in ext4_mb_put_pa()
4551 spin_unlock(&pa->pa_lock); in ext4_mb_put_pa()
4555 if (pa->pa_deleted == 1) { in ext4_mb_put_pa()
4556 spin_unlock(&pa->pa_lock); in ext4_mb_put_pa()
4560 ext4_mb_mark_pa_deleted(sb, pa); in ext4_mb_put_pa()
4561 spin_unlock(&pa->pa_lock); in ext4_mb_put_pa()
4563 grp_blk = pa->pa_pstart; in ext4_mb_put_pa()
4566 * next group when pa is used up in ext4_mb_put_pa()
4568 if (pa->pa_type == MB_GROUP_PA) in ext4_mb_put_pa()
4577 * find block B in PA in ext4_mb_put_pa()
4580 * drop PA from group in ext4_mb_put_pa()
4584 * we make "copy" and "mark all PAs" atomic and serialize "drop PA" in ext4_mb_put_pa()
4588 list_del(&pa->pa_group_list); in ext4_mb_put_pa()
4591 spin_lock(pa->pa_obj_lock); in ext4_mb_put_pa()
4592 list_del_rcu(&pa->pa_inode_list); in ext4_mb_put_pa()
4593 spin_unlock(pa->pa_obj_lock); in ext4_mb_put_pa()
4595 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); in ext4_mb_put_pa()
4606 struct ext4_prealloc_space *pa; in ext4_mb_new_inode_pa() local
4616 pa = ac->ac_pa; in ext4_mb_new_inode_pa()
4656 pa->pa_lstart = ac->ac_b_ex.fe_logical; in ext4_mb_new_inode_pa()
4657 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); in ext4_mb_new_inode_pa()
4658 pa->pa_len = ac->ac_b_ex.fe_len; in ext4_mb_new_inode_pa()
4659 pa->pa_free = pa->pa_len; in ext4_mb_new_inode_pa()
4660 spin_lock_init(&pa->pa_lock); in ext4_mb_new_inode_pa()
4661 INIT_LIST_HEAD(&pa->pa_inode_list); in ext4_mb_new_inode_pa()
4662 INIT_LIST_HEAD(&pa->pa_group_list); in ext4_mb_new_inode_pa()
4663 pa->pa_deleted = 0; in ext4_mb_new_inode_pa()
4664 pa->pa_type = MB_INODE_PA; in ext4_mb_new_inode_pa()
4666 mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, in ext4_mb_new_inode_pa()
4667 pa->pa_len, pa->pa_lstart); in ext4_mb_new_inode_pa()
4668 trace_ext4_mb_new_inode_pa(ac, pa); in ext4_mb_new_inode_pa()
4670 ext4_mb_use_inode_pa(ac, pa); in ext4_mb_new_inode_pa()
4671 atomic_add(pa->pa_free, &sbi->s_mb_preallocated); in ext4_mb_new_inode_pa()
4676 pa->pa_obj_lock = &ei->i_prealloc_lock; in ext4_mb_new_inode_pa()
4677 pa->pa_inode = ac->ac_inode; in ext4_mb_new_inode_pa()
4679 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); in ext4_mb_new_inode_pa()
4681 spin_lock(pa->pa_obj_lock); in ext4_mb_new_inode_pa()
4682 list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list); in ext4_mb_new_inode_pa()
4683 spin_unlock(pa->pa_obj_lock); in ext4_mb_new_inode_pa()
4695 struct ext4_prealloc_space *pa; in ext4_mb_new_group_pa() local
4704 pa = ac->ac_pa; in ext4_mb_new_group_pa()
4710 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); in ext4_mb_new_group_pa()
4711 pa->pa_lstart = pa->pa_pstart; in ext4_mb_new_group_pa()
4712 pa->pa_len = ac->ac_b_ex.fe_len; in ext4_mb_new_group_pa()
4713 pa->pa_free = pa->pa_len; in ext4_mb_new_group_pa()
4714 spin_lock_init(&pa->pa_lock); in ext4_mb_new_group_pa()
4715 INIT_LIST_HEAD(&pa->pa_inode_list); in ext4_mb_new_group_pa()
4716 INIT_LIST_HEAD(&pa->pa_group_list); in ext4_mb_new_group_pa()
4717 pa->pa_deleted = 0; in ext4_mb_new_group_pa()
4718 pa->pa_type = MB_GROUP_PA; in ext4_mb_new_group_pa()
4720 mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, in ext4_mb_new_group_pa()
4721 pa->pa_len, pa->pa_lstart); in ext4_mb_new_group_pa()
4722 trace_ext4_mb_new_group_pa(ac, pa); in ext4_mb_new_group_pa()
4724 ext4_mb_use_group_pa(ac, pa); in ext4_mb_new_group_pa()
4725 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); in ext4_mb_new_group_pa()
4731 pa->pa_obj_lock = &lg->lg_prealloc_lock; in ext4_mb_new_group_pa()
4732 pa->pa_inode = NULL; in ext4_mb_new_group_pa()
4734 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); in ext4_mb_new_group_pa()
4737 * We will later add the new pa to the right bucket in ext4_mb_new_group_pa()
4753 * @pa must be unlinked from inode and group lists, so that
4760 struct ext4_prealloc_space *pa) in ext4_mb_release_inode_pa() argument
4771 BUG_ON(pa->pa_deleted == 0); in ext4_mb_release_inode_pa()
4772 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); in ext4_mb_release_inode_pa()
4773 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); in ext4_mb_release_inode_pa()
4774 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); in ext4_mb_release_inode_pa()
4775 end = bit + pa->pa_len; in ext4_mb_release_inode_pa()
4788 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + in ext4_mb_release_inode_pa()
4791 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); in ext4_mb_release_inode_pa()
4794 if (free != pa->pa_free) { in ext4_mb_release_inode_pa()
4796 "pa %p: logic %lu, phys. %lu, len %d", in ext4_mb_release_inode_pa()
4797 pa, (unsigned long) pa->pa_lstart, in ext4_mb_release_inode_pa()
4798 (unsigned long) pa->pa_pstart, in ext4_mb_release_inode_pa()
4799 pa->pa_len); in ext4_mb_release_inode_pa()
4801 free, pa->pa_free); in ext4_mb_release_inode_pa()
4803 * pa is already deleted so we use the value obtained in ext4_mb_release_inode_pa()
4814 struct ext4_prealloc_space *pa) in ext4_mb_release_group_pa() argument
4820 trace_ext4_mb_release_group_pa(sb, pa); in ext4_mb_release_group_pa()
4821 BUG_ON(pa->pa_deleted == 0); in ext4_mb_release_group_pa()
4822 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); in ext4_mb_release_group_pa()
4823 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); in ext4_mb_release_group_pa()
4824 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); in ext4_mb_release_group_pa()
4825 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); in ext4_mb_release_group_pa()
4826 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); in ext4_mb_release_group_pa()
4846 struct ext4_prealloc_space *pa, *tmp; in ext4_mb_discard_group_preallocations() local
4875 list_for_each_entry_safe(pa, tmp, in ext4_mb_discard_group_preallocations()
4877 spin_lock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
4878 if (atomic_read(&pa->pa_count)) { in ext4_mb_discard_group_preallocations()
4879 spin_unlock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
4883 if (pa->pa_deleted) { in ext4_mb_discard_group_preallocations()
4884 spin_unlock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
4889 ext4_mb_mark_pa_deleted(sb, pa); in ext4_mb_discard_group_preallocations()
4895 free += pa->pa_free; in ext4_mb_discard_group_preallocations()
4897 spin_unlock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
4899 list_del(&pa->pa_group_list); in ext4_mb_discard_group_preallocations()
4900 list_add(&pa->u.pa_tmp_list, &list); in ext4_mb_discard_group_preallocations()
4904 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { in ext4_mb_discard_group_preallocations()
4907 spin_lock(pa->pa_obj_lock); in ext4_mb_discard_group_preallocations()
4908 list_del_rcu(&pa->pa_inode_list); in ext4_mb_discard_group_preallocations()
4909 spin_unlock(pa->pa_obj_lock); in ext4_mb_discard_group_preallocations()
4911 if (pa->pa_type == MB_GROUP_PA) in ext4_mb_discard_group_preallocations()
4912 ext4_mb_release_group_pa(&e4b, pa); in ext4_mb_discard_group_preallocations()
4914 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); in ext4_mb_discard_group_preallocations()
4916 list_del(&pa->u.pa_tmp_list); in ext4_mb_discard_group_preallocations()
4917 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); in ext4_mb_discard_group_preallocations()
4943 struct ext4_prealloc_space *pa, *tmp; in ext4_discard_preallocations() local
4968 /* first, collect all pa's in the inode */ in ext4_discard_preallocations()
4971 pa = list_entry(ei->i_prealloc_list.prev, in ext4_discard_preallocations()
4973 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock); in ext4_discard_preallocations()
4974 spin_lock(&pa->pa_lock); in ext4_discard_preallocations()
4975 if (atomic_read(&pa->pa_count)) { in ext4_discard_preallocations()
4978 spin_unlock(&pa->pa_lock); in ext4_discard_preallocations()
4981 "uh-oh! used pa while discarding"); in ext4_discard_preallocations()
4987 if (pa->pa_deleted == 0) { in ext4_discard_preallocations()
4988 ext4_mb_mark_pa_deleted(sb, pa); in ext4_discard_preallocations()
4989 spin_unlock(&pa->pa_lock); in ext4_discard_preallocations()
4990 list_del_rcu(&pa->pa_inode_list); in ext4_discard_preallocations()
4991 list_add(&pa->u.pa_tmp_list, &list); in ext4_discard_preallocations()
4996 /* someone is deleting pa right now */ in ext4_discard_preallocations()
4997 spin_unlock(&pa->pa_lock); in ext4_discard_preallocations()
5001 * doesn't mean pa is already unlinked from in ext4_discard_preallocations()
5005 * pa from inode's list may access already in ext4_discard_preallocations()
5017 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { in ext4_discard_preallocations()
5018 BUG_ON(pa->pa_type != MB_INODE_PA); in ext4_discard_preallocations()
5019 group = ext4_get_group_number(sb, pa->pa_pstart); in ext4_discard_preallocations()
5039 list_del(&pa->pa_group_list); in ext4_discard_preallocations()
5040 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); in ext4_discard_preallocations()
5046 list_del(&pa->u.pa_tmp_list); in ext4_discard_preallocations()
5047 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); in ext4_discard_preallocations()
5053 struct ext4_prealloc_space *pa; in ext4_mb_pa_alloc() local
5056 pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS); in ext4_mb_pa_alloc()
5057 if (!pa) in ext4_mb_pa_alloc()
5059 atomic_set(&pa->pa_count, 1); in ext4_mb_pa_alloc()
5060 ac->ac_pa = pa; in ext4_mb_pa_alloc()
5066 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_mb_pa_free() local
5068 BUG_ON(!pa); in ext4_mb_pa_free()
5070 WARN_ON(!atomic_dec_and_test(&pa->pa_count)); in ext4_mb_pa_free()
5071 kmem_cache_free(ext4_pspace_cachep, pa); in ext4_mb_pa_free()
5086 struct ext4_prealloc_space *pa; in ext4_mb_show_pa() local
5091 pa = list_entry(cur, struct ext4_prealloc_space, in ext4_mb_show_pa()
5093 spin_lock(&pa->pa_lock); in ext4_mb_show_pa()
5094 ext4_get_group_no_and_offset(sb, pa->pa_pstart, in ext4_mb_show_pa()
5096 spin_unlock(&pa->pa_lock); in ext4_mb_show_pa()
5097 mb_debug(sb, "PA:%u:%d:%d\n", i, start, in ext4_mb_show_pa()
5098 pa->pa_len); in ext4_mb_show_pa()
5268 struct ext4_prealloc_space *pa, *tmp; in ext4_mb_discard_lg_preallocations() local
5275 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], in ext4_mb_discard_lg_preallocations()
5278 spin_lock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
5279 if (atomic_read(&pa->pa_count)) { in ext4_mb_discard_lg_preallocations()
5281 * This is the pa that we just used in ext4_mb_discard_lg_preallocations()
5285 spin_unlock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
5288 if (pa->pa_deleted) { in ext4_mb_discard_lg_preallocations()
5289 spin_unlock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
5293 BUG_ON(pa->pa_type != MB_GROUP_PA); in ext4_mb_discard_lg_preallocations()
5296 ext4_mb_mark_pa_deleted(sb, pa); in ext4_mb_discard_lg_preallocations()
5297 spin_unlock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
5299 list_del_rcu(&pa->pa_inode_list); in ext4_mb_discard_lg_preallocations()
5300 list_add(&pa->u.pa_tmp_list, &discard_list); in ext4_mb_discard_lg_preallocations()
5315 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { in ext4_mb_discard_lg_preallocations()
5318 group = ext4_get_group_number(sb, pa->pa_pstart); in ext4_mb_discard_lg_preallocations()
5327 list_del(&pa->pa_group_list); in ext4_mb_discard_lg_preallocations()
5328 ext4_mb_release_group_pa(&e4b, pa); in ext4_mb_discard_lg_preallocations()
5332 list_del(&pa->u.pa_tmp_list); in ext4_mb_discard_lg_preallocations()
5333 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); in ext4_mb_discard_lg_preallocations()
5351 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; in ext4_mb_add_n_trim() local
5353 order = fls(pa->pa_free) - 1; in ext4_mb_add_n_trim()
5367 if (!added && pa->pa_free < tmp_pa->pa_free) { in ext4_mb_add_n_trim()
5369 list_add_tail_rcu(&pa->pa_inode_list, in ext4_mb_add_n_trim()
5381 list_add_tail_rcu(&pa->pa_inode_list, in ext4_mb_add_n_trim()
5395 * if per-inode prealloc list is too long, trim some PA
5419 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_mb_release_context() local
5420 if (pa) { in ext4_mb_release_context()
5421 if (pa->pa_type == MB_GROUP_PA) { in ext4_mb_release_context()
5423 spin_lock(&pa->pa_lock); in ext4_mb_release_context()
5424 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); in ext4_mb_release_context()
5425 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); in ext4_mb_release_context()
5426 pa->pa_free -= ac->ac_b_ex.fe_len; in ext4_mb_release_context()
5427 pa->pa_len -= ac->ac_b_ex.fe_len; in ext4_mb_release_context()
5428 spin_unlock(&pa->pa_lock); in ext4_mb_release_context()
5431 * We want to add the pa to the right bucket. in ext4_mb_release_context()
5436 if (likely(pa->pa_free)) { in ext4_mb_release_context()
5437 spin_lock(pa->pa_obj_lock); in ext4_mb_release_context()
5438 list_del_rcu(&pa->pa_inode_list); in ext4_mb_release_context()
5439 spin_unlock(pa->pa_obj_lock); in ext4_mb_release_context()
5444 if (pa->pa_type == MB_INODE_PA) { in ext4_mb_release_context()
5447 * to trim the least recently used PA. in ext4_mb_release_context()
5449 spin_lock(pa->pa_obj_lock); in ext4_mb_release_context()
5450 list_move(&pa->pa_inode_list, &ei->i_prealloc_list); in ext4_mb_release_context()
5451 spin_unlock(pa->pa_obj_lock); in ext4_mb_release_context()
5454 ext4_mb_put_pa(ac, ac->ac_sb, pa); in ext4_mb_release_context()
5613 * pa allocated above is added to grp->bb_prealloc_list only in ext4_mb_new_blocks()
5617 * So we have to free this pa here itself. in ext4_mb_new_blocks()
5642 * If block allocation fails then the pa allocated above in ext4_mb_new_blocks()