Home
last modified time | relevance | path

Searched refs:rl (Results 1 – 25 of 79) sorted by relevance

1234

/Linux-v4.19/drivers/lightnvm/
Dpblk-rl.c21 static void pblk_rl_kick_u_timer(struct pblk_rl *rl) in pblk_rl_kick_u_timer() argument
23 mod_timer(&rl->u_timer, jiffies + msecs_to_jiffies(5000)); in pblk_rl_kick_u_timer()
26 int pblk_rl_is_limit(struct pblk_rl *rl) in pblk_rl_is_limit() argument
30 rb_space = atomic_read(&rl->rb_space); in pblk_rl_is_limit()
35 int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries) in pblk_rl_user_may_insert() argument
37 int rb_user_cnt = atomic_read(&rl->rb_user_cnt); in pblk_rl_user_may_insert()
38 int rb_space = atomic_read(&rl->rb_space); in pblk_rl_user_may_insert()
43 if (rb_user_cnt >= rl->rb_user_max) in pblk_rl_user_may_insert()
49 void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries) in pblk_rl_inserted() argument
51 int rb_space = atomic_read(&rl->rb_space); in pblk_rl_inserted()
[all …]
Dpblk.h628 struct pblk_rl rl; member
906 void pblk_rl_init(struct pblk_rl *rl, int budget);
907 void pblk_rl_free(struct pblk_rl *rl);
908 void pblk_rl_update_rates(struct pblk_rl *rl);
909 int pblk_rl_high_thrs(struct pblk_rl *rl);
910 unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl);
911 unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl);
912 int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
913 void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
914 void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
[all …]
/Linux-v4.19/fs/ntfs/
Drunlist.c74 static inline runlist_element *ntfs_rl_realloc(runlist_element *rl, in ntfs_rl_realloc() argument
79 old_size = PAGE_ALIGN(old_size * sizeof(*rl)); in ntfs_rl_realloc()
80 new_size = PAGE_ALIGN(new_size * sizeof(*rl)); in ntfs_rl_realloc()
82 return rl; in ntfs_rl_realloc()
88 if (likely(rl != NULL)) { in ntfs_rl_realloc()
91 memcpy(new_rl, rl, old_size); in ntfs_rl_realloc()
92 ntfs_free(rl); in ntfs_rl_realloc()
120 static inline runlist_element *ntfs_rl_realloc_nofail(runlist_element *rl, in ntfs_rl_realloc_nofail() argument
125 old_size = PAGE_ALIGN(old_size * sizeof(*rl)); in ntfs_rl_realloc_nofail()
126 new_size = PAGE_ALIGN(new_size * sizeof(*rl)); in ntfs_rl_realloc_nofail()
[all …]
Dlcnalloc.c51 const runlist_element *rl) in ntfs_cluster_free_from_rl_nolock() argument
57 if (!rl) in ntfs_cluster_free_from_rl_nolock()
59 for (; rl->length; rl++) { in ntfs_cluster_free_from_rl_nolock()
62 if (rl->lcn < 0) in ntfs_cluster_free_from_rl_nolock()
64 err = ntfs_bitmap_clear_run(lcnbmp_vi, rl->lcn, rl->length); in ntfs_cluster_free_from_rl_nolock()
156 runlist_element *rl = NULL; in ntfs_cluster_alloc() local
334 if ((rlpos + 2) * sizeof(*rl) > rlsize) { in ntfs_cluster_alloc()
338 if (!rl) in ntfs_cluster_alloc()
350 memcpy(rl2, rl, rlsize); in ntfs_cluster_alloc()
351 ntfs_free(rl); in ntfs_cluster_alloc()
[all …]
Ddebug.c134 void ntfs_debug_dump_runlist(const runlist_element *rl) in ntfs_debug_dump_runlist() argument
143 if (!rl) { in ntfs_debug_dump_runlist()
149 LCN lcn = (rl + i)->lcn; in ntfs_debug_dump_runlist()
157 (long long)(rl + i)->vcn, lcn_str[index], in ntfs_debug_dump_runlist()
158 (long long)(rl + i)->length, in ntfs_debug_dump_runlist()
159 (rl + i)->length ? "" : in ntfs_debug_dump_runlist()
163 (long long)(rl + i)->vcn, in ntfs_debug_dump_runlist()
164 (long long)(rl + i)->lcn, in ntfs_debug_dump_runlist()
165 (long long)(rl + i)->length, in ntfs_debug_dump_runlist()
166 (rl + i)->length ? "" : in ntfs_debug_dump_runlist()
[all …]
Drunlist.h55 runlist_element *rl; member
59 static inline void ntfs_init_runlist(runlist *rl) in ntfs_init_runlist() argument
61 rl->rl = NULL; in ntfs_init_runlist()
62 init_rwsem(&rl->lock); in ntfs_init_runlist()
79 extern LCN ntfs_rl_vcn_to_lcn(const runlist_element *rl, const VCN vcn);
83 extern runlist_element *ntfs_rl_find_vcn_nolock(runlist_element *rl,
87 const runlist_element *rl, const VCN first_vcn,
91 const int dst_len, const runlist_element *rl,
Dattrib.c91 runlist_element *rl; in ntfs_map_runlist_nolock() local
187 rl = ntfs_mapping_pairs_decompress(ni->vol, a, ni->runlist.rl); in ntfs_map_runlist_nolock()
188 if (IS_ERR(rl)) in ntfs_map_runlist_nolock()
189 err = PTR_ERR(rl); in ntfs_map_runlist_nolock()
191 ni->runlist.rl = rl; in ntfs_map_runlist_nolock()
304 if (likely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) <= in ntfs_map_runlist()
354 if (!ni->runlist.rl) { in ntfs_attr_vcn_to_lcn_nolock()
364 lcn = ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn); in ntfs_attr_vcn_to_lcn_nolock()
378 if (unlikely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) != in ntfs_attr_vcn_to_lcn_nolock()
468 runlist_element *rl; in ntfs_attr_find_vcn_nolock() local
[all …]
Dlogfile.c733 runlist_element *rl; in ntfs_empty_logfile() local
760 rl = log_ni->runlist.rl; in ntfs_empty_logfile()
761 if (unlikely(!rl || vcn < rl->vcn || !rl->length)) { in ntfs_empty_logfile()
769 rl = log_ni->runlist.rl; in ntfs_empty_logfile()
770 BUG_ON(!rl || vcn < rl->vcn || !rl->length); in ntfs_empty_logfile()
773 while (rl->length && vcn >= rl[1].vcn) in ntfs_empty_logfile()
774 rl++; in ntfs_empty_logfile()
784 lcn = rl->lcn; in ntfs_empty_logfile()
786 vcn = rl->vcn; in ntfs_empty_logfile()
790 if (unlikely(!rl->length || lcn < LCN_HOLE)) in ntfs_empty_logfile()
[all …]
Dmft.c477 runlist_element *rl; in ntfs_sync_mft_mirror() local
525 rl = NULL; in ntfs_sync_mft_mirror()
549 if (!rl) { in ntfs_sync_mft_mirror()
552 rl = NTFS_I(vol->mftmirr_ino)->runlist.rl; in ntfs_sync_mft_mirror()
557 BUG_ON(!rl); in ntfs_sync_mft_mirror()
560 while (rl->length && rl[1].vcn <= vcn) in ntfs_sync_mft_mirror()
561 rl++; in ntfs_sync_mft_mirror()
562 lcn = ntfs_rl_vcn_to_lcn(rl, vcn); in ntfs_sync_mft_mirror()
587 if (unlikely(rl)) in ntfs_sync_mft_mirror()
683 runlist_element *rl; in write_mft_record_nolock() local
[all …]
Daops.c190 runlist_element *rl; in ntfs_read_block() local
203 BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni)); in ntfs_read_block()
242 rl = NULL; in ntfs_read_block()
263 if (!rl) { in ntfs_read_block()
266 rl = ni->runlist.rl; in ntfs_read_block()
268 if (likely(rl != NULL)) { in ntfs_read_block()
270 while (rl->length && rl[1].vcn <= vcn) in ntfs_read_block()
271 rl++; in ntfs_read_block()
272 lcn = ntfs_rl_vcn_to_lcn(rl, vcn); in ntfs_read_block()
303 rl = NULL; in ntfs_read_block()
[all …]
Dlcnalloc.h114 const runlist_element *rl);
133 const runlist_element *rl) in ntfs_cluster_free_from_rl() argument
138 ret = ntfs_cluster_free_from_rl_nolock(vol, rl); in ntfs_cluster_free_from_rl()
Dfile.c595 runlist_element *rl, *rl2; in ntfs_prepare_pages_for_non_resident_write() local
640 rl = NULL; in ntfs_prepare_pages_for_non_resident_write()
843 if (!rl) { in ntfs_prepare_pages_for_non_resident_write()
846 rl = ni->runlist.rl; in ntfs_prepare_pages_for_non_resident_write()
848 if (likely(rl != NULL)) { in ntfs_prepare_pages_for_non_resident_write()
850 while (rl->length && rl[1].vcn <= bh_cpos) in ntfs_prepare_pages_for_non_resident_write()
851 rl++; in ntfs_prepare_pages_for_non_resident_write()
852 lcn = ntfs_rl_vcn_to_lcn(rl, bh_cpos); in ntfs_prepare_pages_for_non_resident_write()
860 vcn_len = rl[1].vcn - vcn; in ntfs_prepare_pages_for_non_resident_write()
877 rl = NULL; in ntfs_prepare_pages_for_non_resident_write()
[all …]
Dcompress.c484 runlist_element *rl; in ntfs_read_compressed_block() local
609 rl = NULL; in ntfs_read_compressed_block()
614 if (!rl) { in ntfs_read_compressed_block()
617 rl = ni->runlist.rl; in ntfs_read_compressed_block()
619 if (likely(rl != NULL)) { in ntfs_read_compressed_block()
621 while (rl->length && rl[1].vcn <= vcn) in ntfs_read_compressed_block()
622 rl++; in ntfs_read_compressed_block()
623 lcn = ntfs_rl_vcn_to_lcn(rl, vcn); in ntfs_read_compressed_block()
660 if (rl) in ntfs_read_compressed_block()
Ddebug.h47 extern void ntfs_debug_dump_runlist(const runlist_element *rl);
57 #define ntfs_debug_dump_runlist(rl) do {} while (0) argument
/Linux-v4.19/drivers/s390/scsi/
Dzfcp_reqlist.h41 struct zfcp_reqlist *rl; in zfcp_reqlist_alloc() local
43 rl = kzalloc(sizeof(struct zfcp_reqlist), GFP_KERNEL); in zfcp_reqlist_alloc()
44 if (!rl) in zfcp_reqlist_alloc()
47 spin_lock_init(&rl->lock); in zfcp_reqlist_alloc()
50 INIT_LIST_HEAD(&rl->buckets[i]); in zfcp_reqlist_alloc()
52 return rl; in zfcp_reqlist_alloc()
61 static inline int zfcp_reqlist_isempty(struct zfcp_reqlist *rl) in zfcp_reqlist_isempty() argument
66 if (!list_empty(&rl->buckets[i])) in zfcp_reqlist_isempty()
75 static inline void zfcp_reqlist_free(struct zfcp_reqlist *rl) in zfcp_reqlist_free() argument
78 BUG_ON(!zfcp_reqlist_isempty(rl)); in zfcp_reqlist_free()
[all …]
/Linux-v4.19/drivers/net/ethernet/mellanox/mlx5/core/
Drl.c110 struct mlx5_rate_limit *rl) in find_rl_entry() argument
117 if (mlx5_rl_are_equal(&table->rl_entry[i].rl, rl)) in find_rl_entry()
119 if (!empty_found && !table->rl_entry[i].rl.rate) { in find_rl_entry()
130 struct mlx5_rate_limit *rl) in mlx5_set_pp_rate_limit_cmd() argument
138 MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rl->rate); in mlx5_set_pp_rate_limit_cmd()
139 MLX5_SET(set_pp_rate_limit_in, in, burst_upper_bound, rl->max_burst_sz); in mlx5_set_pp_rate_limit_cmd()
140 MLX5_SET(set_pp_rate_limit_in, in, typical_packet_size, rl->typical_pkt_sz); in mlx5_set_pp_rate_limit_cmd()
162 struct mlx5_rate_limit *rl) in mlx5_rl_add_rate() argument
170 if (!rl->rate || !mlx5_rl_is_in_range(dev, rl->rate)) { in mlx5_rl_add_rate()
172 rl->rate, table->min_rate, table->max_rate); in mlx5_rl_add_rate()
[all …]
/Linux-v4.19/crypto/
Dvmac.c103 #define ADD128(rh, rl, ih, il) \ argument
106 (rl) += (_il); \
107 if ((rl) < (_il)) \
114 #define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \ argument
119 rl = MUL32(_i1, _i2); \
120 ADD128(rh, rl, (m >> 32), (m << 32)); \
123 #define MUL64(rh, rl, i1, i2) \ argument
129 rl = MUL32(_i1, _i2); \
130 ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \
131 ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \
[all …]
/Linux-v4.19/include/linux/
Dmath64.h221 } rl, rm, rn, rh, a0, b0; in mul_u64_u64_shr() local
227 rl.ll = mul_u32_u32(a0.l.low, b0.l.low); in mul_u64_u64_shr()
237 rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low; in mul_u64_u64_shr()
246 return rl.ll; in mul_u64_u64_shr()
248 return (rl.ll >> shift) | (rh.ll << (64 - shift)); in mul_u64_u64_shr()
267 } u, rl, rh; in mul_u64_u32_div() local
270 rl.ll = mul_u32_u32(u.l.low, mul); in mul_u64_u32_div()
271 rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high; in mul_u64_u32_div()
274 rl.l.high = do_div(rh.ll, divisor); in mul_u64_u32_div()
277 do_div(rl.ll, divisor); in mul_u64_u32_div()
[all …]
Dblk-cgroup.h126 struct request_list rl; member
553 return &blkg->rl; in blk_get_rl()
566 static inline void blk_put_rl(struct request_list *rl) in blk_put_rl() argument
568 if (rl->blkg->blkcg != &blkcg_root) in blk_put_rl()
569 blkg_put(rl->blkg); in blk_put_rl()
580 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) in blk_rq_set_rl() argument
582 rq->rl = rl; in blk_rq_set_rl()
593 return rq->rl; in blk_rq_rl()
596 struct request_list *__blk_queue_next_rl(struct request_list *rl,
603 #define blk_queue_for_each_rl(rl, q) \ argument
[all …]
Djump_label_ratelimit.h20 jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
37 unsigned long rl) in jump_label_rate_limit() argument
/Linux-v4.19/block/
Dblk-core.c146 static void blk_clear_congested(struct request_list *rl, int sync) in blk_clear_congested() argument
149 clear_wb_congested(rl->blkg->wb_congested, sync); in blk_clear_congested()
155 if (rl == &rl->q->root_rl) in blk_clear_congested()
156 clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync); in blk_clear_congested()
160 static void blk_set_congested(struct request_list *rl, int sync) in blk_set_congested() argument
163 set_wb_congested(rl->blkg->wb_congested, sync); in blk_set_congested()
166 if (rl == &rl->q->root_rl) in blk_set_congested()
167 set_wb_congested(rl->q->backing_dev_info->wb.congested, sync); in blk_set_congested()
618 struct request_list *rl; in __blk_drain_queue() local
620 blk_queue_for_each_rl(rl, q) in __blk_drain_queue()
[all …]
/Linux-v4.19/fs/dlm/
Drcom.c376 struct rcom_lock *rl) in pack_rcom_lock() argument
378 memset(rl, 0, sizeof(*rl)); in pack_rcom_lock()
380 rl->rl_ownpid = cpu_to_le32(lkb->lkb_ownpid); in pack_rcom_lock()
381 rl->rl_lkid = cpu_to_le32(lkb->lkb_id); in pack_rcom_lock()
382 rl->rl_exflags = cpu_to_le32(lkb->lkb_exflags); in pack_rcom_lock()
383 rl->rl_flags = cpu_to_le32(lkb->lkb_flags); in pack_rcom_lock()
384 rl->rl_lvbseq = cpu_to_le32(lkb->lkb_lvbseq); in pack_rcom_lock()
385 rl->rl_rqmode = lkb->lkb_rqmode; in pack_rcom_lock()
386 rl->rl_grmode = lkb->lkb_grmode; in pack_rcom_lock()
387 rl->rl_status = lkb->lkb_status; in pack_rcom_lock()
[all …]
/Linux-v4.19/arch/arm/mm/
Dproc-v7-3level.S69 #define rl r3 macro
72 #define rl r2 macro
85 tst rl, #L_PTE_VALID
88 bicne rl, #L_PTE_VALID
94 orrne rl, #PTE_AP2
95 biceq rl, #PTE_AP2
/Linux-v4.19/drivers/slimbus/
Dslimbus.h134 u8 rl; member
146 #define DEFINE_SLIM_LDEST_TXN(name, mc, rl, la, msg) \ argument
147 struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_LOGICALADDR, 0,\
150 #define DEFINE_SLIM_BCAST_TXN(name, mc, rl, la, msg) \ argument
151 struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_BROADCAST, 0,\
154 #define DEFINE_SLIM_EDEST_TXN(name, mc, rl, la, msg) \ argument
155 struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_ENUMADDR, 0,\
/Linux-v4.19/arch/arm/vfp/
Dvfp.h76 u64 rh, rma, rmb, rl; in mul64to128() local
80 rl = (u64)nl * ml; in mul64to128()
93 rl += rma; in mul64to128()
94 rh += (rl < rma); in mul64to128()
96 *resl = rl; in mul64to128()
108 u64 rh, rl; in vfp_hi64multiply64() local
109 mul64to128(&rh, &rl, n, m); in vfp_hi64multiply64()
110 return rh | (rl != 0); in vfp_hi64multiply64()

1234