Lines Matching refs:fiq

320 static u64 fuse_get_unique(struct fuse_iqueue *fiq)  in fuse_get_unique()  argument
322 return ++fiq->reqctr; in fuse_get_unique()
325 static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req) in queue_request() argument
329 list_add_tail(&req->list, &fiq->pending); in queue_request()
330 wake_up_locked(&fiq->waitq); in queue_request()
331 kill_fasync(&fiq->fasync, SIGIO, POLL_IN); in queue_request()
337 struct fuse_iqueue *fiq = &fc->iq; in fuse_queue_forget() local
342 spin_lock(&fiq->waitq.lock); in fuse_queue_forget()
343 if (fiq->connected) { in fuse_queue_forget()
344 fiq->forget_list_tail->next = forget; in fuse_queue_forget()
345 fiq->forget_list_tail = forget; in fuse_queue_forget()
346 wake_up_locked(&fiq->waitq); in fuse_queue_forget()
347 kill_fasync(&fiq->fasync, SIGIO, POLL_IN); in fuse_queue_forget()
351 spin_unlock(&fiq->waitq.lock); in fuse_queue_forget()
359 struct fuse_iqueue *fiq = &fc->iq; in flush_bg_queue() local
364 spin_lock(&fiq->waitq.lock); in flush_bg_queue()
365 req->in.h.unique = fuse_get_unique(fiq); in flush_bg_queue()
366 queue_request(fiq, req); in flush_bg_queue()
367 spin_unlock(&fiq->waitq.lock); in flush_bg_queue()
381 struct fuse_iqueue *fiq = &fc->iq; in request_end() local
386 spin_lock(&fiq->waitq.lock); in request_end()
388 spin_unlock(&fiq->waitq.lock); in request_end()
417 static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) in queue_interrupt() argument
419 spin_lock(&fiq->waitq.lock); in queue_interrupt()
421 spin_unlock(&fiq->waitq.lock); in queue_interrupt()
425 list_add_tail(&req->intr_entry, &fiq->interrupts); in queue_interrupt()
426 wake_up_locked(&fiq->waitq); in queue_interrupt()
428 spin_unlock(&fiq->waitq.lock); in queue_interrupt()
429 kill_fasync(&fiq->fasync, SIGIO, POLL_IN); in queue_interrupt()
434 struct fuse_iqueue *fiq = &fc->iq; in request_wait_answer() local
448 queue_interrupt(fiq, req); in request_wait_answer()
458 spin_lock(&fiq->waitq.lock); in request_wait_answer()
462 spin_unlock(&fiq->waitq.lock); in request_wait_answer()
467 spin_unlock(&fiq->waitq.lock); in request_wait_answer()
479 struct fuse_iqueue *fiq = &fc->iq; in __fuse_request_send() local
482 spin_lock(&fiq->waitq.lock); in __fuse_request_send()
483 if (!fiq->connected) { in __fuse_request_send()
484 spin_unlock(&fiq->waitq.lock); in __fuse_request_send()
487 req->in.h.unique = fuse_get_unique(fiq); in __fuse_request_send()
488 queue_request(fiq, req); in __fuse_request_send()
492 spin_unlock(&fiq->waitq.lock); in __fuse_request_send()
621 struct fuse_iqueue *fiq = &fc->iq; in fuse_request_send_notify_reply() local
625 spin_lock(&fiq->waitq.lock); in fuse_request_send_notify_reply()
626 if (fiq->connected) { in fuse_request_send_notify_reply()
627 queue_request(fiq, req); in fuse_request_send_notify_reply()
630 spin_unlock(&fiq->waitq.lock); in fuse_request_send_notify_reply()
1057 static int forget_pending(struct fuse_iqueue *fiq) in forget_pending() argument
1059 return fiq->forget_list_head.next != NULL; in forget_pending()
1062 static int request_pending(struct fuse_iqueue *fiq) in request_pending() argument
1064 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) || in request_pending()
1065 forget_pending(fiq); in request_pending()
1076 static int fuse_read_interrupt(struct fuse_iqueue *fiq, in fuse_read_interrupt() argument
1079 __releases(fiq->waitq.lock) in fuse_read_interrupt()
1087 req->intr_unique = fuse_get_unique(fiq); in fuse_read_interrupt()
1095 spin_unlock(&fiq->waitq.lock); in fuse_read_interrupt()
1107 static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq, in dequeue_forget() argument
1111 struct fuse_forget_link *head = fiq->forget_list_head.next; in dequeue_forget()
1118 fiq->forget_list_head.next = *newhead; in dequeue_forget()
1120 if (fiq->forget_list_head.next == NULL) in dequeue_forget()
1121 fiq->forget_list_tail = &fiq->forget_list_head; in dequeue_forget()
1129 static int fuse_read_single_forget(struct fuse_iqueue *fiq, in fuse_read_single_forget() argument
1132 __releases(fiq->waitq.lock) in fuse_read_single_forget()
1135 struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL); in fuse_read_single_forget()
1142 .unique = fuse_get_unique(fiq), in fuse_read_single_forget()
1146 spin_unlock(&fiq->waitq.lock); in fuse_read_single_forget()
1162 static int fuse_read_batch_forget(struct fuse_iqueue *fiq, in fuse_read_batch_forget() argument
1164 __releases(fiq->waitq.lock) in fuse_read_batch_forget()
1173 .unique = fuse_get_unique(fiq), in fuse_read_batch_forget()
1178 spin_unlock(&fiq->waitq.lock); in fuse_read_batch_forget()
1183 head = dequeue_forget(fiq, max_forgets, &count); in fuse_read_batch_forget()
1184 spin_unlock(&fiq->waitq.lock); in fuse_read_batch_forget()
1211 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq, in fuse_read_forget() argument
1214 __releases(fiq->waitq.lock) in fuse_read_forget()
1216 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL) in fuse_read_forget()
1217 return fuse_read_single_forget(fiq, cs, nbytes); in fuse_read_forget()
1219 return fuse_read_batch_forget(fiq, cs, nbytes); in fuse_read_forget()
1236 struct fuse_iqueue *fiq = &fc->iq; in fuse_dev_do_read() local
1243 spin_lock(&fiq->waitq.lock); in fuse_dev_do_read()
1245 if ((file->f_flags & O_NONBLOCK) && fiq->connected && in fuse_dev_do_read()
1246 !request_pending(fiq)) in fuse_dev_do_read()
1249 err = wait_event_interruptible_exclusive_locked(fiq->waitq, in fuse_dev_do_read()
1250 !fiq->connected || request_pending(fiq)); in fuse_dev_do_read()
1254 if (!fiq->connected) { in fuse_dev_do_read()
1259 if (!list_empty(&fiq->interrupts)) { in fuse_dev_do_read()
1260 req = list_entry(fiq->interrupts.next, struct fuse_req, in fuse_dev_do_read()
1262 return fuse_read_interrupt(fiq, cs, nbytes, req); in fuse_dev_do_read()
1265 if (forget_pending(fiq)) { in fuse_dev_do_read()
1266 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0) in fuse_dev_do_read()
1267 return fuse_read_forget(fc, fiq, cs, nbytes); in fuse_dev_do_read()
1269 if (fiq->forget_batch <= -8) in fuse_dev_do_read()
1270 fiq->forget_batch = 16; in fuse_dev_do_read()
1273 req = list_entry(fiq->pending.next, struct fuse_req, list); in fuse_dev_do_read()
1276 spin_unlock(&fiq->waitq.lock); in fuse_dev_do_read()
1319 queue_interrupt(fiq, req); in fuse_dev_do_read()
1331 spin_unlock(&fiq->waitq.lock); in fuse_dev_do_read()
2027 struct fuse_iqueue *fiq; in fuse_dev_poll() local
2033 fiq = &fud->fc->iq; in fuse_dev_poll()
2034 poll_wait(file, &fiq->waitq, wait); in fuse_dev_poll()
2036 spin_lock(&fiq->waitq.lock); in fuse_dev_poll()
2037 if (!fiq->connected) in fuse_dev_poll()
2039 else if (request_pending(fiq)) in fuse_dev_poll()
2041 spin_unlock(&fiq->waitq.lock); in fuse_dev_poll()
2098 struct fuse_iqueue *fiq = &fc->iq; in fuse_abort_conn() local
2132 spin_lock(&fiq->waitq.lock); in fuse_abort_conn()
2133 fiq->connected = 0; in fuse_abort_conn()
2134 list_for_each_entry(req, &fiq->pending, list) in fuse_abort_conn()
2136 list_splice_tail_init(&fiq->pending, &to_end); in fuse_abort_conn()
2137 while (forget_pending(fiq)) in fuse_abort_conn()
2138 kfree(dequeue_forget(fiq, 1, NULL)); in fuse_abort_conn()
2139 wake_up_all_locked(&fiq->waitq); in fuse_abort_conn()
2140 spin_unlock(&fiq->waitq.lock); in fuse_abort_conn()
2141 kill_fasync(&fiq->fasync, SIGIO, POLL_IN); in fuse_abort_conn()