Lines Matching refs:fcd

106 alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode);
109 __kick_dmap_free_worker(struct fuse_conn_dax *fcd, unsigned long delay_ms) in __kick_dmap_free_worker() argument
114 free_threshold = max_t(unsigned long, fcd->nr_ranges * FUSE_DAX_RECLAIM_THRESHOLD / 100, in __kick_dmap_free_worker()
116 if (fcd->nr_free_ranges < free_threshold) in __kick_dmap_free_worker()
117 queue_delayed_work(system_long_wq, &fcd->free_work, in __kick_dmap_free_worker()
121 static void kick_dmap_free_worker(struct fuse_conn_dax *fcd, in kick_dmap_free_worker() argument
124 spin_lock(&fcd->lock); in kick_dmap_free_worker()
125 __kick_dmap_free_worker(fcd, delay_ms); in kick_dmap_free_worker()
126 spin_unlock(&fcd->lock); in kick_dmap_free_worker()
129 static struct fuse_dax_mapping *alloc_dax_mapping(struct fuse_conn_dax *fcd) in alloc_dax_mapping() argument
133 spin_lock(&fcd->lock); in alloc_dax_mapping()
134 dmap = list_first_entry_or_null(&fcd->free_ranges, in alloc_dax_mapping()
138 WARN_ON(fcd->nr_free_ranges <= 0); in alloc_dax_mapping()
139 fcd->nr_free_ranges--; in alloc_dax_mapping()
141 __kick_dmap_free_worker(fcd, 0); in alloc_dax_mapping()
142 spin_unlock(&fcd->lock); in alloc_dax_mapping()
148 static void __dmap_remove_busy_list(struct fuse_conn_dax *fcd, in __dmap_remove_busy_list() argument
152 WARN_ON(fcd->nr_busy_ranges == 0); in __dmap_remove_busy_list()
153 fcd->nr_busy_ranges--; in __dmap_remove_busy_list()
156 static void dmap_remove_busy_list(struct fuse_conn_dax *fcd, in dmap_remove_busy_list() argument
159 spin_lock(&fcd->lock); in dmap_remove_busy_list()
160 __dmap_remove_busy_list(fcd, dmap); in dmap_remove_busy_list()
161 spin_unlock(&fcd->lock); in dmap_remove_busy_list()
165 static void __dmap_add_to_free_pool(struct fuse_conn_dax *fcd, in __dmap_add_to_free_pool() argument
168 list_add_tail(&dmap->list, &fcd->free_ranges); in __dmap_add_to_free_pool()
169 fcd->nr_free_ranges++; in __dmap_add_to_free_pool()
170 wake_up(&fcd->range_waitq); in __dmap_add_to_free_pool()
173 static void dmap_add_to_free_pool(struct fuse_conn_dax *fcd, in dmap_add_to_free_pool() argument
177 spin_lock(&fcd->lock); in dmap_add_to_free_pool()
178 __dmap_add_to_free_pool(fcd, dmap); in dmap_add_to_free_pool()
179 spin_unlock(&fcd->lock); in dmap_add_to_free_pool()
187 struct fuse_conn_dax *fcd = fm->fc->dax; in fuse_setup_one_mapping() local
194 WARN_ON(fcd->nr_free_ranges < 0); in fuse_setup_one_mapping()
225 spin_lock(&fcd->lock); in fuse_setup_one_mapping()
226 list_add_tail(&dmap->busy_list, &fcd->busy_ranges); in fuse_setup_one_mapping()
227 fcd->nr_busy_ranges++; in fuse_setup_one_mapping()
228 spin_unlock(&fcd->lock); in fuse_setup_one_mapping()
291 static void dmap_reinit_add_to_free_pool(struct fuse_conn_dax *fcd, in dmap_reinit_add_to_free_pool() argument
297 __dmap_remove_busy_list(fcd, dmap); in dmap_reinit_add_to_free_pool()
300 __dmap_add_to_free_pool(fcd, dmap); in dmap_reinit_add_to_free_pool()
309 static void inode_reclaim_dmap_range(struct fuse_conn_dax *fcd, in inode_reclaim_dmap_range() argument
345 spin_lock(&fcd->lock); in inode_reclaim_dmap_range()
348 dmap_reinit_add_to_free_pool(fcd, dmap); in inode_reclaim_dmap_range()
350 spin_unlock(&fcd->lock); in inode_reclaim_dmap_range()
437 struct fuse_conn_dax *fcd = fc->dax; in fuse_setup_new_dax_mapping() local
455 alloc_dmap = alloc_dax_mapping(fcd); in fuse_setup_new_dax_mapping()
459 alloc_dmap = alloc_dax_mapping_reclaim(fcd, inode); in fuse_setup_new_dax_mapping()
481 dmap_add_to_free_pool(fcd, alloc_dmap); in fuse_setup_new_dax_mapping()
490 dmap_add_to_free_pool(fcd, alloc_dmap); in fuse_setup_new_dax_mapping()
796 struct fuse_conn_dax *fcd = fc->dax; in __fuse_dax_fault() local
802 if (retry && !(fcd->nr_free_ranges > 0)) in __fuse_dax_fault()
803 wait_event(fcd->range_waitq, (fcd->nr_free_ranges > 0)); in __fuse_dax_fault()
948 inode_inline_reclaim_one_dmap(struct fuse_conn_dax *fcd, struct inode *inode, in inode_inline_reclaim_one_dmap() argument
1009 dmap_remove_busy_list(fcd, dmap); in inode_inline_reclaim_one_dmap()
1024 alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode) in alloc_dax_mapping_reclaim() argument
1032 dmap = alloc_dax_mapping(fcd); in alloc_dax_mapping_reclaim()
1036 dmap = inode_inline_reclaim_one_dmap(fcd, inode, &retry); in alloc_dax_mapping_reclaim()
1062 if (!fi->dax->nr && !(fcd->nr_free_ranges > 0)) { in alloc_dax_mapping_reclaim()
1063 if (wait_event_killable_exclusive(fcd->range_waitq, in alloc_dax_mapping_reclaim()
1064 (fcd->nr_free_ranges > 0))) { in alloc_dax_mapping_reclaim()
1071 static int lookup_and_reclaim_dmap_locked(struct fuse_conn_dax *fcd, in lookup_and_reclaim_dmap_locked() argument
1097 spin_lock(&fcd->lock); in lookup_and_reclaim_dmap_locked()
1098 dmap_reinit_add_to_free_pool(fcd, dmap); in lookup_and_reclaim_dmap_locked()
1099 spin_unlock(&fcd->lock); in lookup_and_reclaim_dmap_locked()
1110 static int lookup_and_reclaim_dmap(struct fuse_conn_dax *fcd, in lookup_and_reclaim_dmap() argument
1129 ret = lookup_and_reclaim_dmap_locked(fcd, inode, start_idx); in lookup_and_reclaim_dmap()
1136 static int try_to_free_dmap_chunks(struct fuse_conn_dax *fcd, in try_to_free_dmap_chunks() argument
1150 spin_lock(&fcd->lock); in try_to_free_dmap_chunks()
1152 if (!fcd->nr_busy_ranges) { in try_to_free_dmap_chunks()
1153 spin_unlock(&fcd->lock); in try_to_free_dmap_chunks()
1157 list_for_each_entry_safe(pos, temp, &fcd->busy_ranges, in try_to_free_dmap_chunks()
1177 list_move_tail(&dmap->busy_list, &fcd->busy_ranges); in try_to_free_dmap_chunks()
1181 spin_unlock(&fcd->lock); in try_to_free_dmap_chunks()
1185 ret = lookup_and_reclaim_dmap(fcd, inode, start_idx, end_idx); in try_to_free_dmap_chunks()
1197 struct fuse_conn_dax *fcd = container_of(work, struct fuse_conn_dax, in fuse_dax_free_mem_worker() local
1199 ret = try_to_free_dmap_chunks(fcd, FUSE_DAX_RECLAIM_CHUNK); in fuse_dax_free_mem_worker()
1206 kick_dmap_free_worker(fcd, 1); in fuse_dax_free_mem_worker()
1230 static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd) in fuse_dax_mem_range_init() argument
1238 init_waitqueue_head(&fcd->range_waitq); in fuse_dax_mem_range_init()
1239 INIT_LIST_HEAD(&fcd->free_ranges); in fuse_dax_mem_range_init()
1240 INIT_LIST_HEAD(&fcd->busy_ranges); in fuse_dax_mem_range_init()
1241 INIT_DELAYED_WORK(&fcd->free_work, fuse_dax_free_mem_worker); in fuse_dax_mem_range_init()
1244 nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size), in fuse_dax_mem_range_init()
1270 list_add_tail(&range->list, &fcd->free_ranges); in fuse_dax_mem_range_init()
1273 fcd->nr_free_ranges = nr_ranges; in fuse_dax_mem_range_init()
1274 fcd->nr_ranges = nr_ranges; in fuse_dax_mem_range_init()
1278 fuse_free_dax_mem_ranges(&fcd->free_ranges); in fuse_dax_mem_range_init()
1285 struct fuse_conn_dax *fcd; in fuse_dax_conn_alloc() local
1293 fcd = kzalloc(sizeof(*fcd), GFP_KERNEL); in fuse_dax_conn_alloc()
1294 if (!fcd) in fuse_dax_conn_alloc()
1297 spin_lock_init(&fcd->lock); in fuse_dax_conn_alloc()
1298 fcd->dev = dax_dev; in fuse_dax_conn_alloc()
1299 err = fuse_dax_mem_range_init(fcd); in fuse_dax_conn_alloc()
1301 kfree(fcd); in fuse_dax_conn_alloc()
1305 fc->dax = fcd; in fuse_dax_conn_alloc()
1384 struct fuse_conn_dax *fcd = fc->dax; in fuse_dax_cancel_work() local
1386 if (fcd) in fuse_dax_cancel_work()
1387 cancel_delayed_work_sync(&fcd->free_work); in fuse_dax_cancel_work()