Lines Matching refs:fc
118 void fuse_set_initialized(struct fuse_conn *fc) in fuse_set_initialized() argument
122 fc->initialized = 1; in fuse_set_initialized()
125 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) in fuse_block_alloc() argument
127 return !fc->initialized || (for_background && fc->blocked); in fuse_block_alloc()
130 static void fuse_drop_waiting(struct fuse_conn *fc) in fuse_drop_waiting() argument
132 if (fc->connected) { in fuse_drop_waiting()
133 atomic_dec(&fc->num_waiting); in fuse_drop_waiting()
134 } else if (atomic_dec_and_test(&fc->num_waiting)) { in fuse_drop_waiting()
136 wake_up_all(&fc->blocked_waitq); in fuse_drop_waiting()
140 static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages, in __fuse_get_req() argument
145 atomic_inc(&fc->num_waiting); in __fuse_get_req()
147 if (fuse_block_alloc(fc, for_background)) { in __fuse_get_req()
149 if (wait_event_killable_exclusive(fc->blocked_waitq, in __fuse_get_req()
150 !fuse_block_alloc(fc, for_background))) in __fuse_get_req()
157 if (!fc->connected) in __fuse_get_req()
161 if (fc->conn_error) in __fuse_get_req()
168 wake_up(&fc->blocked_waitq); in __fuse_get_req()
172 req->in.h.uid = from_kuid(fc->user_ns, current_fsuid()); in __fuse_get_req()
173 req->in.h.gid = from_kgid(fc->user_ns, current_fsgid()); in __fuse_get_req()
174 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns); in __fuse_get_req()
182 fuse_put_request(fc, req); in __fuse_get_req()
188 fuse_drop_waiting(fc); in __fuse_get_req()
192 struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages) in fuse_get_req() argument
194 return __fuse_get_req(fc, npages, false); in fuse_get_req()
198 struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc, in fuse_get_req_for_background() argument
201 return __fuse_get_req(fc, npages, true); in fuse_get_req_for_background()
210 static struct fuse_req *get_reserved_req(struct fuse_conn *fc, in get_reserved_req() argument
217 wait_event(fc->reserved_req_waitq, ff->reserved_req); in get_reserved_req()
218 spin_lock(&fc->lock); in get_reserved_req()
224 spin_unlock(&fc->lock); in get_reserved_req()
233 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req) in put_reserved_req() argument
238 spin_lock(&fc->lock); in put_reserved_req()
242 wake_up_all(&fc->reserved_req_waitq); in put_reserved_req()
243 spin_unlock(&fc->lock); in put_reserved_req()
260 struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc, in fuse_get_req_nofail_nopages() argument
265 atomic_inc(&fc->num_waiting); in fuse_get_req_nofail_nopages()
266 wait_event(fc->blocked_waitq, fc->initialized); in fuse_get_req_nofail_nopages()
271 req = get_reserved_req(fc, file); in fuse_get_req_nofail_nopages()
273 req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid()); in fuse_get_req_nofail_nopages()
274 req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid()); in fuse_get_req_nofail_nopages()
275 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns); in fuse_get_req_nofail_nopages()
282 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) in fuse_put_request() argument
290 spin_lock(&fc->lock); in fuse_put_request()
291 if (!fc->blocked) in fuse_put_request()
292 wake_up(&fc->blocked_waitq); in fuse_put_request()
293 spin_unlock(&fc->lock); in fuse_put_request()
298 fuse_drop_waiting(fc); in fuse_put_request()
302 put_reserved_req(fc, req); in fuse_put_request()
334 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, in fuse_queue_forget() argument
337 struct fuse_iqueue *fiq = &fc->iq; in fuse_queue_forget()
354 static void flush_bg_queue(struct fuse_conn *fc) in flush_bg_queue() argument
356 while (fc->active_background < fc->max_background && in flush_bg_queue()
357 !list_empty(&fc->bg_queue)) { in flush_bg_queue()
359 struct fuse_iqueue *fiq = &fc->iq; in flush_bg_queue()
361 req = list_entry(fc->bg_queue.next, struct fuse_req, list); in flush_bg_queue()
363 fc->active_background++; in flush_bg_queue()
379 static void request_end(struct fuse_conn *fc, struct fuse_req *req) in request_end() argument
381 struct fuse_iqueue *fiq = &fc->iq; in request_end()
392 spin_lock(&fc->lock); in request_end()
394 if (fc->num_background == fc->max_background) in request_end()
395 fc->blocked = 0; in request_end()
398 if (!fc->blocked && waitqueue_active(&fc->blocked_waitq)) in request_end()
399 wake_up(&fc->blocked_waitq); in request_end()
401 if (fc->num_background == fc->congestion_threshold && fc->sb) { in request_end()
402 clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC); in request_end()
403 clear_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC); in request_end()
405 fc->num_background--; in request_end()
406 fc->active_background--; in request_end()
407 flush_bg_queue(fc); in request_end()
408 spin_unlock(&fc->lock); in request_end()
412 req->end(fc, req); in request_end()
414 fuse_put_request(fc, req); in request_end()
432 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) in request_wait_answer() argument
434 struct fuse_iqueue *fiq = &fc->iq; in request_wait_answer()
437 if (!fc->no_interrupt) { in request_wait_answer()
477 static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) in __fuse_request_send() argument
479 struct fuse_iqueue *fiq = &fc->iq; in __fuse_request_send()
494 request_wait_answer(fc, req); in __fuse_request_send()
500 void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) in fuse_request_send() argument
505 atomic_inc(&fc->num_waiting); in fuse_request_send()
507 __fuse_request_send(fc, req); in fuse_request_send()
511 static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args) in fuse_adjust_compat() argument
513 if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS) in fuse_adjust_compat()
516 if (fc->minor < 9) { in fuse_adjust_compat()
532 if (fc->minor < 12) { in fuse_adjust_compat()
544 ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args) in fuse_simple_request() argument
549 req = fuse_get_req(fc, 0); in fuse_simple_request()
554 fuse_adjust_compat(fc, args); in fuse_simple_request()
565 fuse_request_send(fc, req); in fuse_simple_request()
571 fuse_put_request(fc, req); in fuse_simple_request()
581 void fuse_request_send_background_locked(struct fuse_conn *fc, in fuse_request_send_background_locked() argument
587 atomic_inc(&fc->num_waiting); in fuse_request_send_background_locked()
590 fc->num_background++; in fuse_request_send_background_locked()
591 if (fc->num_background == fc->max_background) in fuse_request_send_background_locked()
592 fc->blocked = 1; in fuse_request_send_background_locked()
593 if (fc->num_background == fc->congestion_threshold && fc->sb) { in fuse_request_send_background_locked()
594 set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC); in fuse_request_send_background_locked()
595 set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC); in fuse_request_send_background_locked()
597 list_add_tail(&req->list, &fc->bg_queue); in fuse_request_send_background_locked()
598 flush_bg_queue(fc); in fuse_request_send_background_locked()
601 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req) in fuse_request_send_background() argument
604 spin_lock(&fc->lock); in fuse_request_send_background()
605 if (fc->connected) { in fuse_request_send_background()
606 fuse_request_send_background_locked(fc, req); in fuse_request_send_background()
607 spin_unlock(&fc->lock); in fuse_request_send_background()
609 spin_unlock(&fc->lock); in fuse_request_send_background()
611 req->end(fc, req); in fuse_request_send_background()
612 fuse_put_request(fc, req); in fuse_request_send_background()
617 static int fuse_request_send_notify_reply(struct fuse_conn *fc, in fuse_request_send_notify_reply() argument
621 struct fuse_iqueue *fiq = &fc->iq; in fuse_request_send_notify_reply()
638 struct fuse_conn *fc = get_fuse_conn(inode); in fuse_force_forget() local
644 req = fuse_get_req_nofail_nopages(fc, file); in fuse_force_forget()
651 __fuse_request_send(fc, req); in fuse_force_forget()
653 fuse_put_request(fc, req); in fuse_force_forget()
1211 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq, in fuse_read_forget() argument
1216 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL) in fuse_read_forget()
1235 struct fuse_conn *fc = fud->fc; in fuse_dev_do_read() local
1236 struct fuse_iqueue *fiq = &fc->iq; in fuse_dev_do_read()
1255 err = (fc->aborted && fc->abort_err) ? -ECONNABORTED : -ENODEV; in fuse_dev_do_read()
1267 return fuse_read_forget(fc, fiq, cs, nbytes); in fuse_dev_do_read()
1287 request_end(fc, req); in fuse_dev_do_read()
1302 err = (fc->aborted && fc->abort_err) ? -ECONNABORTED : -ENODEV; in fuse_dev_do_read()
1327 request_end(fc, req); in fuse_dev_do_read()
1414 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size, in fuse_notify_poll() argument
1428 return fuse_notify_poll_wakeup(fc, &outarg); in fuse_notify_poll()
1435 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size, in fuse_notify_inval_inode() argument
1449 down_read(&fc->killsb); in fuse_notify_inval_inode()
1451 if (fc->sb) { in fuse_notify_inval_inode()
1452 err = fuse_reverse_inval_inode(fc->sb, outarg.ino, in fuse_notify_inval_inode()
1455 up_read(&fc->killsb); in fuse_notify_inval_inode()
1463 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size, in fuse_notify_inval_entry() argument
1499 down_read(&fc->killsb); in fuse_notify_inval_entry()
1501 if (fc->sb) in fuse_notify_inval_entry()
1502 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name); in fuse_notify_inval_entry()
1503 up_read(&fc->killsb); in fuse_notify_inval_entry()
1513 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size, in fuse_notify_delete() argument
1549 down_read(&fc->killsb); in fuse_notify_delete()
1551 if (fc->sb) in fuse_notify_delete()
1552 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, in fuse_notify_delete()
1554 up_read(&fc->killsb); in fuse_notify_delete()
1564 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size, in fuse_notify_store() argument
1592 down_read(&fc->killsb); in fuse_notify_store()
1595 if (!fc->sb) in fuse_notify_store()
1598 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid); in fuse_notify_store()
1644 up_read(&fc->killsb); in fuse_notify_store()
1650 static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req) in fuse_retrieve_end() argument
1655 static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, in fuse_retrieve() argument
1680 req = fuse_get_req(fc, num_pages); in fuse_retrieve()
1717 err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique); in fuse_retrieve()
1719 fuse_retrieve_end(fc, req); in fuse_retrieve()
1724 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size, in fuse_notify_retrieve() argument
1741 down_read(&fc->killsb); in fuse_notify_retrieve()
1743 if (fc->sb) { in fuse_notify_retrieve()
1746 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid); in fuse_notify_retrieve()
1748 err = fuse_retrieve(fc, inode, &outarg); in fuse_notify_retrieve()
1752 up_read(&fc->killsb); in fuse_notify_retrieve()
1761 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, in fuse_notify() argument
1769 return fuse_notify_poll(fc, size, cs); in fuse_notify()
1772 return fuse_notify_inval_inode(fc, size, cs); in fuse_notify()
1775 return fuse_notify_inval_entry(fc, size, cs); in fuse_notify()
1778 return fuse_notify_store(fc, size, cs); in fuse_notify()
1781 return fuse_notify_retrieve(fc, size, cs); in fuse_notify()
1784 return fuse_notify_delete(fc, size, cs); in fuse_notify()
1838 struct fuse_conn *fc = fud->fc; in fuse_dev_do_write() local
1859 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs); in fuse_dev_do_write()
1885 fc->no_interrupt = 1; in fuse_dev_do_write()
1887 queue_interrupt(&fc->iq, req); in fuse_dev_do_write()
1915 request_end(fc, req); in fuse_dev_do_write()
2033 fiq = &fud->fc->iq; in fuse_dev_poll()
2051 static void end_requests(struct fuse_conn *fc, struct list_head *head) in end_requests() argument
2059 request_end(fc, req); in end_requests()
2063 static void end_polls(struct fuse_conn *fc) in end_polls() argument
2067 p = rb_first(&fc->polled_files); in end_polls()
2096 void fuse_abort_conn(struct fuse_conn *fc, bool is_abort) in fuse_abort_conn() argument
2098 struct fuse_iqueue *fiq = &fc->iq; in fuse_abort_conn()
2100 spin_lock(&fc->lock); in fuse_abort_conn()
2101 if (fc->connected) { in fuse_abort_conn()
2106 fc->connected = 0; in fuse_abort_conn()
2107 fc->blocked = 0; in fuse_abort_conn()
2108 fc->aborted = is_abort; in fuse_abort_conn()
2109 fuse_set_initialized(fc); in fuse_abort_conn()
2110 list_for_each_entry(fud, &fc->devices, entry) { in fuse_abort_conn()
2129 fc->max_background = UINT_MAX; in fuse_abort_conn()
2130 flush_bg_queue(fc); in fuse_abort_conn()
2142 end_polls(fc); in fuse_abort_conn()
2143 wake_up_all(&fc->blocked_waitq); in fuse_abort_conn()
2144 spin_unlock(&fc->lock); in fuse_abort_conn()
2146 end_requests(fc, &to_end); in fuse_abort_conn()
2148 spin_unlock(&fc->lock); in fuse_abort_conn()
2153 void fuse_wait_aborted(struct fuse_conn *fc) in fuse_wait_aborted() argument
2155 wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0); in fuse_wait_aborted()
2163 struct fuse_conn *fc = fud->fc; in fuse_dev_release() local
2172 end_requests(fc, &to_end); in fuse_dev_release()
2175 if (atomic_dec_and_test(&fc->dev_count)) { in fuse_dev_release()
2176 WARN_ON(fc->iq.fasync != NULL); in fuse_dev_release()
2177 fuse_abort_conn(fc, false); in fuse_dev_release()
2193 return fasync_helper(fd, file, on, &fud->fc->iq.fasync); in fuse_dev_fasync()
2196 static int fuse_device_clone(struct fuse_conn *fc, struct file *new) in fuse_device_clone() argument
2203 fud = fuse_dev_alloc(fc); in fuse_device_clone()
2208 atomic_inc(&fc->dev_count); in fuse_device_clone()
2239 err = fuse_device_clone(fud->fc, file); in fuse_dev_ioctl()