Lines Matching full:req

64 	hdr->req = nfs_list_entry(mirror->pg_list.next);  in nfs_pgheader_init()
66 hdr->cred = nfs_req_openctx(hdr->req)->cred; in nfs_pgheader_init()
67 hdr->io_start = req_offset(hdr->req); in nfs_pgheader_init()
153 * @req: any member of the page group
156 nfs_page_group_lock_head(struct nfs_page *req) in nfs_page_group_lock_head() argument
158 struct nfs_page *head = req->wb_head; in nfs_page_group_lock_head()
165 if (head != req) in nfs_page_group_lock_head()
171 * nfs_unroll_locks - unlock all newly locked reqs and wait on @req
173 * @req: request that couldn't lock and needs to wait on the req bit lock
179 nfs_unroll_locks(struct nfs_page *head, struct nfs_page *req) in nfs_unroll_locks() argument
184 for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) { in nfs_unroll_locks()
249 * @req: request that is to be locked
251 * this lock must be held when modifying req->wb_head
256 nfs_page_set_headlock(struct nfs_page *req) in nfs_page_set_headlock() argument
258 if (!test_and_set_bit(PG_HEADLOCK, &req->wb_flags)) in nfs_page_set_headlock()
261 set_bit(PG_CONTENDED1, &req->wb_flags); in nfs_page_set_headlock()
263 return wait_on_bit_lock(&req->wb_flags, PG_HEADLOCK, in nfs_page_set_headlock()
269 * @req: request that is to be locked
272 nfs_page_clear_headlock(struct nfs_page *req) in nfs_page_clear_headlock() argument
275 clear_bit(PG_HEADLOCK, &req->wb_flags); in nfs_page_clear_headlock()
277 if (!test_bit(PG_CONTENDED1, &req->wb_flags)) in nfs_page_clear_headlock()
279 wake_up_bit(&req->wb_flags, PG_HEADLOCK); in nfs_page_clear_headlock()
284 * @req: request in group that is to be locked
292 nfs_page_group_lock(struct nfs_page *req) in nfs_page_group_lock() argument
296 ret = nfs_page_set_headlock(req); in nfs_page_group_lock()
297 if (ret || req->wb_head == req) in nfs_page_group_lock()
299 return nfs_page_set_headlock(req->wb_head); in nfs_page_group_lock()
304 * @req: request in group that is to be unlocked
307 nfs_page_group_unlock(struct nfs_page *req) in nfs_page_group_unlock() argument
309 if (req != req->wb_head) in nfs_page_group_unlock()
310 nfs_page_clear_headlock(req->wb_head); in nfs_page_group_unlock()
311 nfs_page_clear_headlock(req); in nfs_page_group_unlock()
320 nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit) in nfs_page_group_sync_on_bit_locked() argument
322 struct nfs_page *head = req->wb_head; in nfs_page_group_sync_on_bit_locked()
326 WARN_ON_ONCE(test_and_set_bit(bit, &req->wb_flags)); in nfs_page_group_sync_on_bit_locked()
328 tmp = req->wb_this_page; in nfs_page_group_sync_on_bit_locked()
329 while (tmp != req) { in nfs_page_group_sync_on_bit_locked()
336 tmp = req; in nfs_page_group_sync_on_bit_locked()
340 } while (tmp != req); in nfs_page_group_sync_on_bit_locked()
348 * @req - request in page group
351 bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit) in nfs_page_group_sync_on_bit() argument
355 nfs_page_group_lock(req); in nfs_page_group_sync_on_bit()
356 ret = nfs_page_group_sync_on_bit_locked(req, bit); in nfs_page_group_sync_on_bit()
357 nfs_page_group_unlock(req); in nfs_page_group_sync_on_bit()
363 * nfs_page_group_init - Initialize the page group linkage for @req
364 * @req - a new nfs request
365 * @prev - the previous request in page group, or NULL if @req is the first
369 nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev) in nfs_page_group_init() argument
372 WARN_ON_ONCE(prev == req); in nfs_page_group_init()
376 req->wb_head = req; in nfs_page_group_init()
377 req->wb_this_page = req; in nfs_page_group_init()
382 req->wb_head = prev->wb_head; in nfs_page_group_init()
383 req->wb_this_page = prev->wb_this_page; in nfs_page_group_init()
384 prev->wb_this_page = req; in nfs_page_group_init()
388 kref_get(&req->wb_head->wb_kref); in nfs_page_group_init()
394 inode = page_file_mapping(req->wb_page)->host; in nfs_page_group_init()
395 set_bit(PG_INODE_REF, &req->wb_flags); in nfs_page_group_init()
396 kref_get(&req->wb_kref); in nfs_page_group_init()
404 * @req - request that no longer needs the page group
412 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); in nfs_page_group_destroy() local
413 struct nfs_page *head = req->wb_head; in nfs_page_group_destroy()
416 if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN)) in nfs_page_group_destroy()
419 tmp = req; in nfs_page_group_destroy()
427 } while (tmp != req); in nfs_page_group_destroy()
430 if (head != req) in nfs_page_group_destroy()
439 struct nfs_page *req; in __nfs_create_request() local
445 req = nfs_page_alloc(); in __nfs_create_request()
446 if (req == NULL) in __nfs_create_request()
449 req->wb_lock_context = l_ctx; in __nfs_create_request()
456 req->wb_page = page; in __nfs_create_request()
458 req->wb_index = page_index(page); in __nfs_create_request()
461 req->wb_offset = offset; in __nfs_create_request()
462 req->wb_pgbase = pgbase; in __nfs_create_request()
463 req->wb_bytes = count; in __nfs_create_request()
464 kref_init(&req->wb_kref); in __nfs_create_request()
465 req->wb_nio = 0; in __nfs_create_request()
466 return req; in __nfs_create_request()
497 nfs_create_subreq(struct nfs_page *req, in nfs_create_subreq() argument
505 ret = __nfs_create_request(req->wb_lock_context, req->wb_page, in nfs_create_subreq()
509 for (last = req->wb_head; in nfs_create_subreq()
510 last->wb_this_page != req->wb_head; in nfs_create_subreq()
515 ret->wb_index = req->wb_index; in nfs_create_subreq()
517 ret->wb_nio = req->wb_nio; in nfs_create_subreq()
524 * @req: pointer to request
526 void nfs_unlock_request(struct nfs_page *req) in nfs_unlock_request() argument
528 if (!NFS_WBACK_BUSY(req)) { in nfs_unlock_request()
533 clear_bit(PG_BUSY, &req->wb_flags); in nfs_unlock_request()
535 if (!test_bit(PG_CONTENDED2, &req->wb_flags)) in nfs_unlock_request()
537 wake_up_bit(&req->wb_flags, PG_BUSY); in nfs_unlock_request()
542 * @req: pointer to request
544 void nfs_unlock_and_release_request(struct nfs_page *req) in nfs_unlock_and_release_request() argument
546 nfs_unlock_request(req); in nfs_unlock_and_release_request()
547 nfs_release_request(req); in nfs_unlock_and_release_request()
552 * @req:
557 static void nfs_clear_request(struct nfs_page *req) in nfs_clear_request() argument
559 struct page *page = req->wb_page; in nfs_clear_request()
560 struct nfs_lock_context *l_ctx = req->wb_lock_context; in nfs_clear_request()
565 req->wb_page = NULL; in nfs_clear_request()
575 req->wb_lock_context = NULL; in nfs_clear_request()
581 * @req: request to release
585 void nfs_free_request(struct nfs_page *req) in nfs_free_request() argument
587 WARN_ON_ONCE(req->wb_this_page != req); in nfs_free_request()
590 WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags)); in nfs_free_request()
591 WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags)); in nfs_free_request()
592 WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags)); in nfs_free_request()
593 WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags)); in nfs_free_request()
594 WARN_ON_ONCE(test_bit(PG_REMOVE, &req->wb_flags)); in nfs_free_request()
597 nfs_clear_request(req); in nfs_free_request()
598 nfs_page_free(req); in nfs_free_request()
601 void nfs_release_request(struct nfs_page *req) in nfs_release_request() argument
603 kref_put(&req->wb_kref, nfs_page_group_destroy); in nfs_release_request()
609 * @req: request to wait upon.
615 nfs_wait_on_request(struct nfs_page *req) in nfs_wait_on_request() argument
617 if (!test_bit(PG_BUSY, &req->wb_flags)) in nfs_wait_on_request()
619 set_bit(PG_CONTENDED2, &req->wb_flags); in nfs_wait_on_request()
621 return wait_on_bit_io(&req->wb_flags, PG_BUSY, in nfs_wait_on_request()
630 * @req: this request
632 * Returns zero if @req cannot be coalesced into @desc, otherwise it returns
636 struct nfs_page *prev, struct nfs_page *req) in nfs_generic_pg_test() argument
651 if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) * in nfs_generic_pg_test()
655 return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes); in nfs_generic_pg_test()
709 struct nfs_page *req = hdr->req; in nfs_pgio_rpcsetup() local
715 hdr->args.offset = req_offset(req); in nfs_pgio_rpcsetup()
718 hdr->args.pgbase = req->wb_pgbase; in nfs_pgio_rpcsetup()
721 hdr->args.context = get_nfs_open_context(nfs_req_openctx(req)); in nfs_pgio_rpcsetup()
722 hdr->args.lock_context = req->wb_lock_context; in nfs_pgio_rpcsetup()
779 "(req %s/%llu, %u bytes @ offset %llu)\n", in nfs_initiate_pgio()
897 struct nfs_page *req; in nfs_generic_pgio() local
926 req = nfs_list_entry(head->next); in nfs_generic_pgio()
927 nfs_list_move_request(req, &hdr->pages); in nfs_generic_pgio()
929 if (!last_page || last_page != req->wb_page) { in nfs_generic_pgio()
933 *pages++ = last_page = req->wb_page; in nfs_generic_pgio()
1001 struct nfs_page *req) in nfs_pageio_setup_mirroring() argument
1006 mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req); in nfs_pageio_setup_mirroring()
1042 * @req: pointer to nfs_page
1045 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
1052 struct nfs_page *req, in nfs_coalesce_size() argument
1058 if (!nfs_match_open_context(nfs_req_openctx(req), nfs_req_openctx(prev))) in nfs_coalesce_size()
1060 flctx = d_inode(nfs_req_openctx(req)->dentry)->i_flctx; in nfs_coalesce_size()
1064 !nfs_match_lock_context(req->wb_lock_context, in nfs_coalesce_size()
1067 if (req_offset(req) != req_offset(prev) + prev->wb_bytes) in nfs_coalesce_size()
1069 if (req->wb_page == prev->wb_page) { in nfs_coalesce_size()
1070 if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes) in nfs_coalesce_size()
1073 if (req->wb_pgbase != 0 || in nfs_coalesce_size()
1078 return pgio->pg_ops->pg_test(pgio, prev, req); in nfs_coalesce_size()
1084 * @req: request
1086 * If the request 'req' was successfully coalesced into the existing list
1087 * of pages 'desc', it returns the size of req.
1091 struct nfs_page *req) in nfs_pageio_do_add_request() argument
1101 desc->pg_ops->pg_init(desc, req); in nfs_pageio_do_add_request()
1104 mirror->pg_base = req->wb_pgbase; in nfs_pageio_do_add_request()
1107 if (desc->pg_maxretrans && req->wb_nio > desc->pg_maxretrans) { in nfs_pageio_do_add_request()
1115 size = nfs_coalesce_size(prev, req, desc); in nfs_pageio_do_add_request()
1116 if (size < req->wb_bytes) in nfs_pageio_do_add_request()
1118 nfs_list_move_request(req, &mirror->pg_list); in nfs_pageio_do_add_request()
1119 mirror->pg_count += req->wb_bytes; in nfs_pageio_do_add_request()
1120 return req->wb_bytes; in nfs_pageio_do_add_request()
1146 struct nfs_page *req) in nfs_pageio_cleanup_request() argument
1150 nfs_list_move_request(req, &head); in nfs_pageio_cleanup_request()
1157 * @req: request
1160 * same page group. If so, it will submit @req as the last one, to ensure
1161 * the pointer to @req is still valid in case of failure.
1163 * Returns true if the request 'req' was successfully coalesced into the
1167 struct nfs_page *req) in __nfs_pageio_add_request() argument
1173 nfs_page_group_lock(req); in __nfs_pageio_add_request()
1175 subreq = req; in __nfs_pageio_add_request()
1181 if (subreq == req) in __nfs_pageio_add_request()
1183 req->wb_pgbase += size; in __nfs_pageio_add_request()
1184 req->wb_bytes -= size; in __nfs_pageio_add_request()
1185 req->wb_offset += size; in __nfs_pageio_add_request()
1186 subreq_size = req->wb_bytes; in __nfs_pageio_add_request()
1187 subreq = req; in __nfs_pageio_add_request()
1190 if (WARN_ON_ONCE(subreq != req)) { in __nfs_pageio_add_request()
1191 nfs_page_group_unlock(req); in __nfs_pageio_add_request()
1193 subreq = req; in __nfs_pageio_add_request()
1194 subreq_size = req->wb_bytes; in __nfs_pageio_add_request()
1195 nfs_page_group_lock(req); in __nfs_pageio_add_request()
1199 nfs_page_group_unlock(req); in __nfs_pageio_add_request()
1205 nfs_page_group_lock(req); in __nfs_pageio_add_request()
1208 subreq = nfs_create_subreq(req, req->wb_pgbase, in __nfs_pageio_add_request()
1209 req->wb_offset, size); in __nfs_pageio_add_request()
1215 nfs_page_group_unlock(req); in __nfs_pageio_add_request()
1219 nfs_page_group_unlock(req); in __nfs_pageio_add_request()
1236 struct nfs_page *req; in nfs_do_recoalesce() local
1238 req = list_first_entry(&head, struct nfs_page, wb_list); in nfs_do_recoalesce()
1239 if (__nfs_pageio_add_request(desc, req)) in nfs_do_recoalesce()
1253 struct nfs_page *req) in nfs_pageio_add_request_mirror() argument
1258 ret = __nfs_pageio_add_request(desc, req); in nfs_pageio_add_request_mirror()
1285 struct nfs_page *req) in nfs_pageio_add_request() argument
1291 pgbase = req->wb_pgbase; in nfs_pageio_add_request()
1292 offset = req->wb_offset; in nfs_pageio_add_request()
1293 bytes = req->wb_bytes; in nfs_pageio_add_request()
1295 nfs_pageio_setup_mirroring(desc, req); in nfs_pageio_add_request()
1301 nfs_page_group_lock(req); in nfs_pageio_add_request()
1303 dupreq = nfs_create_subreq(req, in nfs_pageio_add_request()
1306 nfs_page_group_unlock(req); in nfs_pageio_add_request()
1318 if (!nfs_pageio_add_request_mirror(desc, req)) in nfs_pageio_add_request()
1374 struct nfs_page *req = nfs_list_entry(pages.next); in nfs_pageio_resend() local
1376 if (!nfs_pageio_add_request(desc, req)) in nfs_pageio_resend()