1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/fs/nfs/read.c
4 *
5 * Block I/O for NFS
6 *
7 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
8 * modified for async RPC by okir@monad.swb.de
9 */
10
11 #include <linux/time.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/fcntl.h>
15 #include <linux/stat.h>
16 #include <linux/mm.h>
17 #include <linux/slab.h>
18 #include <linux/pagemap.h>
19 #include <linux/sunrpc/clnt.h>
20 #include <linux/nfs_fs.h>
21 #include <linux/nfs_page.h>
22 #include <linux/module.h>
23
24 #include "nfs4_fs.h"
25 #include "internal.h"
26 #include "iostat.h"
27 #include "fscache.h"
28 #include "pnfs.h"
29 #include "nfstrace.h"
30
31 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
32
33 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
34 static const struct nfs_rw_ops nfs_rw_read_ops;
35
36 static struct kmem_cache *nfs_rdata_cachep;
37
nfs_readhdr_alloc(void)38 static struct nfs_pgio_header *nfs_readhdr_alloc(void)
39 {
40 struct nfs_pgio_header *p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
41
42 if (p)
43 p->rw_mode = FMODE_READ;
44 return p;
45 }
46
nfs_readhdr_free(struct nfs_pgio_header * rhdr)47 static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
48 {
49 kmem_cache_free(nfs_rdata_cachep, rhdr);
50 }
51
52 static
nfs_return_empty_page(struct page * page)53 int nfs_return_empty_page(struct page *page)
54 {
55 zero_user(page, 0, PAGE_SIZE);
56 SetPageUptodate(page);
57 unlock_page(page);
58 return 0;
59 }
60
nfs_pageio_init_read(struct nfs_pageio_descriptor * pgio,struct inode * inode,bool force_mds,const struct nfs_pgio_completion_ops * compl_ops)61 void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
62 struct inode *inode, bool force_mds,
63 const struct nfs_pgio_completion_ops *compl_ops)
64 {
65 struct nfs_server *server = NFS_SERVER(inode);
66 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
67
68 #ifdef CONFIG_NFS_V4_1
69 if (server->pnfs_curr_ld && !force_mds)
70 pg_ops = server->pnfs_curr_ld->pg_read_ops;
71 #endif
72 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
73 server->rsize, 0);
74 }
75 EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
76
nfs_pageio_complete_read(struct nfs_pageio_descriptor * pgio)77 static void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio)
78 {
79 struct nfs_pgio_mirror *pgm;
80 unsigned long npages;
81
82 nfs_pageio_complete(pgio);
83
84 /* It doesn't make sense to do mirrored reads! */
85 WARN_ON_ONCE(pgio->pg_mirror_count != 1);
86
87 pgm = &pgio->pg_mirrors[0];
88 NFS_I(pgio->pg_inode)->read_io += pgm->pg_bytes_written;
89 npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >> PAGE_SHIFT;
90 nfs_add_stats(pgio->pg_inode, NFSIOS_READPAGES, npages);
91 }
92
93
nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor * pgio)94 void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
95 {
96 struct nfs_pgio_mirror *mirror;
97
98 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
99 pgio->pg_ops->pg_cleanup(pgio);
100
101 pgio->pg_ops = &nfs_pgio_rw_ops;
102
103 /* read path should never have more than one mirror */
104 WARN_ON_ONCE(pgio->pg_mirror_count != 1);
105
106 mirror = &pgio->pg_mirrors[0];
107 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
108 }
109 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
110
nfs_readpage_release(struct nfs_page * req,int error)111 static void nfs_readpage_release(struct nfs_page *req, int error)
112 {
113 struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
114 struct page *page = req->wb_page;
115
116 dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
117 (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
118 (long long)req_offset(req));
119
120 if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
121 SetPageError(page);
122 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
123 struct address_space *mapping = page_file_mapping(page);
124
125 if (PageUptodate(page))
126 nfs_readpage_to_fscache(inode, page, 0);
127 else if (!PageError(page) && !PagePrivate(page))
128 generic_error_remove_page(mapping, page);
129 unlock_page(page);
130 }
131 nfs_release_request(req);
132 }
133
134 struct nfs_readdesc {
135 struct nfs_pageio_descriptor pgio;
136 struct nfs_open_context *ctx;
137 };
138
nfs_page_group_set_uptodate(struct nfs_page * req)139 static void nfs_page_group_set_uptodate(struct nfs_page *req)
140 {
141 if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
142 SetPageUptodate(req->wb_page);
143 }
144
nfs_read_completion(struct nfs_pgio_header * hdr)145 static void nfs_read_completion(struct nfs_pgio_header *hdr)
146 {
147 unsigned long bytes = 0;
148 int error;
149
150 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
151 goto out;
152 while (!list_empty(&hdr->pages)) {
153 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
154 struct page *page = req->wb_page;
155 unsigned long start = req->wb_pgbase;
156 unsigned long end = req->wb_pgbase + req->wb_bytes;
157
158 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
159 /* note: regions of the page not covered by a
160 * request are zeroed in readpage_async_filler */
161 if (bytes > hdr->good_bytes) {
162 /* nothing in this request was good, so zero
163 * the full extent of the request */
164 zero_user_segment(page, start, end);
165
166 } else if (hdr->good_bytes - bytes < req->wb_bytes) {
167 /* part of this request has good bytes, but
168 * not all. zero the bad bytes */
169 start += hdr->good_bytes - bytes;
170 WARN_ON(start < req->wb_pgbase);
171 zero_user_segment(page, start, end);
172 }
173 }
174 error = 0;
175 bytes += req->wb_bytes;
176 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
177 if (bytes <= hdr->good_bytes)
178 nfs_page_group_set_uptodate(req);
179 else {
180 error = hdr->error;
181 xchg(&nfs_req_openctx(req)->error, error);
182 }
183 } else
184 nfs_page_group_set_uptodate(req);
185 nfs_list_remove_request(req);
186 nfs_readpage_release(req, error);
187 }
188 out:
189 hdr->release(hdr);
190 }
191
nfs_initiate_read(struct nfs_pgio_header * hdr,struct rpc_message * msg,const struct nfs_rpc_ops * rpc_ops,struct rpc_task_setup * task_setup_data,int how)192 static void nfs_initiate_read(struct nfs_pgio_header *hdr,
193 struct rpc_message *msg,
194 const struct nfs_rpc_ops *rpc_ops,
195 struct rpc_task_setup *task_setup_data, int how)
196 {
197 struct inode *inode = hdr->inode;
198 int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
199
200 task_setup_data->flags |= swap_flags;
201 rpc_ops->read_setup(hdr, msg);
202 trace_nfs_initiate_read(hdr);
203 }
204
205 static void
nfs_async_read_error(struct list_head * head,int error)206 nfs_async_read_error(struct list_head *head, int error)
207 {
208 struct nfs_page *req;
209
210 while (!list_empty(head)) {
211 req = nfs_list_entry(head->next);
212 nfs_list_remove_request(req);
213 nfs_readpage_release(req, error);
214 }
215 }
216
217 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
218 .error_cleanup = nfs_async_read_error,
219 .completion = nfs_read_completion,
220 };
221
222 /*
223 * This is the callback from RPC telling us whether a reply was
224 * received or some error occurred (timeout or socket shutdown).
225 */
nfs_readpage_done(struct rpc_task * task,struct nfs_pgio_header * hdr,struct inode * inode)226 static int nfs_readpage_done(struct rpc_task *task,
227 struct nfs_pgio_header *hdr,
228 struct inode *inode)
229 {
230 int status = NFS_PROTO(inode)->read_done(task, hdr);
231 if (status != 0)
232 return status;
233
234 nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count);
235 trace_nfs_readpage_done(task, hdr);
236
237 if (task->tk_status == -ESTALE) {
238 nfs_set_inode_stale(inode);
239 nfs_mark_for_revalidate(inode);
240 }
241 return 0;
242 }
243
nfs_readpage_retry(struct rpc_task * task,struct nfs_pgio_header * hdr)244 static void nfs_readpage_retry(struct rpc_task *task,
245 struct nfs_pgio_header *hdr)
246 {
247 struct nfs_pgio_args *argp = &hdr->args;
248 struct nfs_pgio_res *resp = &hdr->res;
249
250 /* This is a short read! */
251 nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
252 trace_nfs_readpage_short(task, hdr);
253
254 /* Has the server at least made some progress? */
255 if (resp->count == 0) {
256 nfs_set_pgio_error(hdr, -EIO, argp->offset);
257 return;
258 }
259
260 /* For non rpc-based layout drivers, retry-through-MDS */
261 if (!task->tk_ops) {
262 hdr->pnfs_error = -EAGAIN;
263 return;
264 }
265
266 /* Yes, so retry the read at the end of the hdr */
267 hdr->mds_offset += resp->count;
268 argp->offset += resp->count;
269 argp->pgbase += resp->count;
270 argp->count -= resp->count;
271 resp->count = 0;
272 resp->eof = 0;
273 rpc_restart_call_prepare(task);
274 }
275
nfs_readpage_result(struct rpc_task * task,struct nfs_pgio_header * hdr)276 static void nfs_readpage_result(struct rpc_task *task,
277 struct nfs_pgio_header *hdr)
278 {
279 if (hdr->res.eof) {
280 loff_t pos = hdr->args.offset + hdr->res.count;
281 unsigned int new = pos - hdr->io_start;
282
283 if (hdr->good_bytes > new) {
284 hdr->good_bytes = new;
285 set_bit(NFS_IOHDR_EOF, &hdr->flags);
286 clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
287 }
288 } else if (hdr->res.count < hdr->args.count)
289 nfs_readpage_retry(task, hdr);
290 }
291
292 static int
readpage_async_filler(void * data,struct page * page)293 readpage_async_filler(void *data, struct page *page)
294 {
295 struct nfs_readdesc *desc = data;
296 struct inode *inode = page_file_mapping(page)->host;
297 unsigned int rsize = NFS_SERVER(inode)->rsize;
298 struct nfs_page *new;
299 unsigned int len, aligned_len;
300 int error;
301
302 len = nfs_page_length(page);
303 if (len == 0)
304 return nfs_return_empty_page(page);
305
306 aligned_len = min_t(unsigned int, ALIGN(len, rsize), PAGE_SIZE);
307
308 new = nfs_create_request(desc->ctx, page, 0, aligned_len);
309 if (IS_ERR(new))
310 goto out_error;
311
312 if (len < PAGE_SIZE)
313 zero_user_segment(page, len, PAGE_SIZE);
314 if (!nfs_pageio_add_request(&desc->pgio, new)) {
315 nfs_list_remove_request(new);
316 error = desc->pgio.pg_error;
317 nfs_readpage_release(new, error);
318 goto out;
319 }
320 return 0;
321 out_error:
322 error = PTR_ERR(new);
323 unlock_page(page);
324 out:
325 return error;
326 }
327
328 /*
329 * Read a page over NFS.
330 * We read the page synchronously in the following case:
331 * - The error flag is set for this page. This happens only when a
332 * previous async read operation failed.
333 */
nfs_readpage(struct file * file,struct page * page)334 int nfs_readpage(struct file *file, struct page *page)
335 {
336 struct nfs_readdesc desc;
337 struct inode *inode = page_file_mapping(page)->host;
338 int ret;
339
340 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
341 page, PAGE_SIZE, page_index(page));
342 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
343
344 /*
345 * Try to flush any pending writes to the file..
346 *
347 * NOTE! Because we own the page lock, there cannot
348 * be any new pending writes generated at this point
349 * for this page (other pages can be written to).
350 */
351 ret = nfs_wb_page(inode, page);
352 if (ret)
353 goto out_unlock;
354 if (PageUptodate(page))
355 goto out_unlock;
356
357 ret = -ESTALE;
358 if (NFS_STALE(inode))
359 goto out_unlock;
360
361 if (file == NULL) {
362 ret = -EBADF;
363 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
364 if (desc.ctx == NULL)
365 goto out_unlock;
366 } else
367 desc.ctx = get_nfs_open_context(nfs_file_open_context(file));
368
369 xchg(&desc.ctx->error, 0);
370 if (!IS_SYNC(inode)) {
371 ret = nfs_readpage_from_fscache(desc.ctx, inode, page);
372 if (ret == 0)
373 goto out_wait;
374 }
375
376 nfs_pageio_init_read(&desc.pgio, inode, false,
377 &nfs_async_read_completion_ops);
378
379 ret = readpage_async_filler(&desc, page);
380 if (ret)
381 goto out;
382
383 nfs_pageio_complete_read(&desc.pgio);
384 ret = desc.pgio.pg_error < 0 ? desc.pgio.pg_error : 0;
385 out_wait:
386 if (!ret) {
387 ret = wait_on_page_locked_killable(page);
388 if (!PageUptodate(page) && !ret)
389 ret = xchg(&desc.ctx->error, 0);
390 }
391 out:
392 put_nfs_open_context(desc.ctx);
393 return ret;
394 out_unlock:
395 unlock_page(page);
396 return ret;
397 }
398
nfs_readpages(struct file * file,struct address_space * mapping,struct list_head * pages,unsigned nr_pages)399 int nfs_readpages(struct file *file, struct address_space *mapping,
400 struct list_head *pages, unsigned nr_pages)
401 {
402 struct nfs_readdesc desc;
403 struct inode *inode = mapping->host;
404 int ret;
405
406 dprintk("NFS: nfs_readpages (%s/%Lu %d)\n",
407 inode->i_sb->s_id,
408 (unsigned long long)NFS_FILEID(inode),
409 nr_pages);
410 nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
411
412 ret = -ESTALE;
413 if (NFS_STALE(inode))
414 goto out;
415
416 if (file == NULL) {
417 ret = -EBADF;
418 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
419 if (desc.ctx == NULL)
420 goto out;
421 } else
422 desc.ctx = get_nfs_open_context(nfs_file_open_context(file));
423
424 /* attempt to read as many of the pages as possible from the cache
425 * - this returns -ENOBUFS immediately if the cookie is negative
426 */
427 ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
428 pages, &nr_pages);
429 if (ret == 0)
430 goto read_complete; /* all pages were read */
431
432 nfs_pageio_init_read(&desc.pgio, inode, false,
433 &nfs_async_read_completion_ops);
434
435 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
436
437 nfs_pageio_complete_read(&desc.pgio);
438
439 read_complete:
440 put_nfs_open_context(desc.ctx);
441 out:
442 return ret;
443 }
444
nfs_init_readpagecache(void)445 int __init nfs_init_readpagecache(void)
446 {
447 nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
448 sizeof(struct nfs_pgio_header),
449 0, SLAB_HWCACHE_ALIGN,
450 NULL);
451 if (nfs_rdata_cachep == NULL)
452 return -ENOMEM;
453
454 return 0;
455 }
456
nfs_destroy_readpagecache(void)457 void nfs_destroy_readpagecache(void)
458 {
459 kmem_cache_destroy(nfs_rdata_cachep);
460 }
461
462 static const struct nfs_rw_ops nfs_rw_read_ops = {
463 .rw_alloc_header = nfs_readhdr_alloc,
464 .rw_free_header = nfs_readhdr_free,
465 .rw_done = nfs_readpage_done,
466 .rw_result = nfs_readpage_result,
467 .rw_initiate = nfs_initiate_read,
468 };
469