1 /*
2 * linux/fs/nfs/blocklayout/blocklayout.c
3 *
4 * Module for the NFSv4.1 pNFS block layout driver.
5 *
6 * Copyright (c) 2006 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Andy Adamson <andros@citi.umich.edu>
10 * Fred Isaman <iisaman@umich.edu>
11 *
12 * permission is granted to use, copy, create derivative works and
13 * redistribute this software and such derivative works for any purpose,
14 * so long as the name of the university of michigan is not used in
15 * any advertising or publicity pertaining to the use or distribution
16 * of this software without specific, written prior authorization. if
17 * the above copyright notice or any other identification of the
18 * university of michigan is included in any copy of any portion of
19 * this software, then the disclaimer below must also be included.
20 *
21 * this software is provided as is, without representation from the
22 * university of michigan as to its fitness for any purpose, and without
23 * warranty by the university of michigan of any kind, either express
24 * or implied, including without limitation the implied warranties of
25 * merchantability and fitness for a particular purpose. the regents
26 * of the university of michigan shall not be liable for any damages,
27 * including special, indirect, incidental, or consequential damages,
28 * with respect to any claim arising out or in connection with the use
29 * of the software, even if it has been or is hereafter advised of the
30 * possibility of such damages.
31 */
32
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/mount.h>
36 #include <linux/namei.h>
37 #include <linux/bio.h> /* struct bio */
38 #include <linux/prefetch.h>
39 #include <linux/pagevec.h>
40
41 #include "../pnfs.h"
42 #include "../nfs4session.h"
43 #include "../internal.h"
44 #include "blocklayout.h"
45
46 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
47
48 MODULE_LICENSE("GPL");
49 MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
50 MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
51
is_hole(struct pnfs_block_extent * be)52 static bool is_hole(struct pnfs_block_extent *be)
53 {
54 switch (be->be_state) {
55 case PNFS_BLOCK_NONE_DATA:
56 return true;
57 case PNFS_BLOCK_INVALID_DATA:
58 return be->be_tag ? false : true;
59 default:
60 return false;
61 }
62 }
63
64 /* The data we are handed might be spread across several bios. We need
65 * to track when the last one is finished.
66 */
67 struct parallel_io {
68 struct kref refcnt;
69 void (*pnfs_callback) (void *data);
70 void *data;
71 };
72
alloc_parallel(void * data)73 static inline struct parallel_io *alloc_parallel(void *data)
74 {
75 struct parallel_io *rv;
76
77 rv = kmalloc(sizeof(*rv), GFP_NOFS);
78 if (rv) {
79 rv->data = data;
80 kref_init(&rv->refcnt);
81 }
82 return rv;
83 }
84
get_parallel(struct parallel_io * p)85 static inline void get_parallel(struct parallel_io *p)
86 {
87 kref_get(&p->refcnt);
88 }
89
destroy_parallel(struct kref * kref)90 static void destroy_parallel(struct kref *kref)
91 {
92 struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
93
94 dprintk("%s enter\n", __func__);
95 p->pnfs_callback(p->data);
96 kfree(p);
97 }
98
put_parallel(struct parallel_io * p)99 static inline void put_parallel(struct parallel_io *p)
100 {
101 kref_put(&p->refcnt, destroy_parallel);
102 }
103
104 static struct bio *
bl_submit_bio(struct bio * bio)105 bl_submit_bio(struct bio *bio)
106 {
107 if (bio) {
108 get_parallel(bio->bi_private);
109 dprintk("%s submitting %s bio %u@%llu\n", __func__,
110 bio_op(bio) == READ ? "read" : "write",
111 bio->bi_iter.bi_size,
112 (unsigned long long)bio->bi_iter.bi_sector);
113 submit_bio(bio);
114 }
115 return NULL;
116 }
117
bl_alloc_init_bio(unsigned int npg,struct block_device * bdev,sector_t disk_sector,bio_end_io_t end_io,struct parallel_io * par)118 static struct bio *bl_alloc_init_bio(unsigned int npg,
119 struct block_device *bdev, sector_t disk_sector,
120 bio_end_io_t end_io, struct parallel_io *par)
121 {
122 struct bio *bio;
123
124 npg = bio_max_segs(npg);
125 bio = bio_alloc(GFP_NOIO, npg);
126 if (bio) {
127 bio->bi_iter.bi_sector = disk_sector;
128 bio_set_dev(bio, bdev);
129 bio->bi_end_io = end_io;
130 bio->bi_private = par;
131 }
132 return bio;
133 }
134
offset_in_map(u64 offset,struct pnfs_block_dev_map * map)135 static bool offset_in_map(u64 offset, struct pnfs_block_dev_map *map)
136 {
137 return offset >= map->start && offset < map->start + map->len;
138 }
139
140 static struct bio *
do_add_page_to_bio(struct bio * bio,int npg,int rw,sector_t isect,struct page * page,struct pnfs_block_dev_map * map,struct pnfs_block_extent * be,bio_end_io_t end_io,struct parallel_io * par,unsigned int offset,int * len)141 do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect,
142 struct page *page, struct pnfs_block_dev_map *map,
143 struct pnfs_block_extent *be, bio_end_io_t end_io,
144 struct parallel_io *par, unsigned int offset, int *len)
145 {
146 struct pnfs_block_dev *dev =
147 container_of(be->be_device, struct pnfs_block_dev, node);
148 u64 disk_addr, end;
149
150 dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
151 npg, rw, (unsigned long long)isect, offset, *len);
152
153 /* translate to device offset */
154 isect += be->be_v_offset;
155 isect -= be->be_f_offset;
156
157 /* translate to physical disk offset */
158 disk_addr = (u64)isect << SECTOR_SHIFT;
159 if (!offset_in_map(disk_addr, map)) {
160 if (!dev->map(dev, disk_addr, map) || !offset_in_map(disk_addr, map))
161 return ERR_PTR(-EIO);
162 bio = bl_submit_bio(bio);
163 }
164 disk_addr += map->disk_offset;
165 disk_addr -= map->start;
166
167 /* limit length to what the device mapping allows */
168 end = disk_addr + *len;
169 if (end >= map->start + map->len)
170 *len = map->start + map->len - disk_addr;
171
172 retry:
173 if (!bio) {
174 bio = bl_alloc_init_bio(npg, map->bdev,
175 disk_addr >> SECTOR_SHIFT, end_io, par);
176 if (!bio)
177 return ERR_PTR(-ENOMEM);
178 bio_set_op_attrs(bio, rw, 0);
179 }
180 if (bio_add_page(bio, page, *len, offset) < *len) {
181 bio = bl_submit_bio(bio);
182 goto retry;
183 }
184 return bio;
185 }
186
bl_mark_devices_unavailable(struct nfs_pgio_header * header,bool rw)187 static void bl_mark_devices_unavailable(struct nfs_pgio_header *header, bool rw)
188 {
189 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
190 size_t bytes_left = header->args.count;
191 sector_t isect, extent_length = 0;
192 struct pnfs_block_extent be;
193
194 isect = header->args.offset >> SECTOR_SHIFT;
195 bytes_left += header->args.offset - (isect << SECTOR_SHIFT);
196
197 while (bytes_left > 0) {
198 if (!ext_tree_lookup(bl, isect, &be, rw))
199 return;
200 extent_length = be.be_length - (isect - be.be_f_offset);
201 nfs4_mark_deviceid_unavailable(be.be_device);
202 isect += extent_length;
203 if (bytes_left > extent_length << SECTOR_SHIFT)
204 bytes_left -= extent_length << SECTOR_SHIFT;
205 else
206 bytes_left = 0;
207 }
208 }
209
bl_end_io_read(struct bio * bio)210 static void bl_end_io_read(struct bio *bio)
211 {
212 struct parallel_io *par = bio->bi_private;
213
214 if (bio->bi_status) {
215 struct nfs_pgio_header *header = par->data;
216
217 if (!header->pnfs_error)
218 header->pnfs_error = -EIO;
219 pnfs_set_lo_fail(header->lseg);
220 bl_mark_devices_unavailable(header, false);
221 }
222
223 bio_put(bio);
224 put_parallel(par);
225 }
226
bl_read_cleanup(struct work_struct * work)227 static void bl_read_cleanup(struct work_struct *work)
228 {
229 struct rpc_task *task;
230 struct nfs_pgio_header *hdr;
231 dprintk("%s enter\n", __func__);
232 task = container_of(work, struct rpc_task, u.tk_work);
233 hdr = container_of(task, struct nfs_pgio_header, task);
234 pnfs_ld_read_done(hdr);
235 }
236
237 static void
bl_end_par_io_read(void * data)238 bl_end_par_io_read(void *data)
239 {
240 struct nfs_pgio_header *hdr = data;
241
242 hdr->task.tk_status = hdr->pnfs_error;
243 INIT_WORK(&hdr->task.u.tk_work, bl_read_cleanup);
244 schedule_work(&hdr->task.u.tk_work);
245 }
246
247 static enum pnfs_try_status
bl_read_pagelist(struct nfs_pgio_header * header)248 bl_read_pagelist(struct nfs_pgio_header *header)
249 {
250 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
251 struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 };
252 struct bio *bio = NULL;
253 struct pnfs_block_extent be;
254 sector_t isect, extent_length = 0;
255 struct parallel_io *par;
256 loff_t f_offset = header->args.offset;
257 size_t bytes_left = header->args.count;
258 unsigned int pg_offset = header->args.pgbase, pg_len;
259 struct page **pages = header->args.pages;
260 int pg_index = header->args.pgbase >> PAGE_SHIFT;
261 const bool is_dio = (header->dreq != NULL);
262 struct blk_plug plug;
263 int i;
264
265 dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
266 header->page_array.npages, f_offset,
267 (unsigned int)header->args.count);
268
269 par = alloc_parallel(header);
270 if (!par)
271 return PNFS_NOT_ATTEMPTED;
272 par->pnfs_callback = bl_end_par_io_read;
273
274 blk_start_plug(&plug);
275
276 isect = (sector_t) (f_offset >> SECTOR_SHIFT);
277 /* Code assumes extents are page-aligned */
278 for (i = pg_index; i < header->page_array.npages; i++) {
279 if (extent_length <= 0) {
280 /* We've used up the previous extent */
281 bio = bl_submit_bio(bio);
282
283 /* Get the next one */
284 if (!ext_tree_lookup(bl, isect, &be, false)) {
285 header->pnfs_error = -EIO;
286 goto out;
287 }
288 extent_length = be.be_length - (isect - be.be_f_offset);
289 }
290
291 if (is_dio) {
292 if (pg_offset + bytes_left > PAGE_SIZE)
293 pg_len = PAGE_SIZE - pg_offset;
294 else
295 pg_len = bytes_left;
296 } else {
297 BUG_ON(pg_offset != 0);
298 pg_len = PAGE_SIZE;
299 }
300
301 if (is_hole(&be)) {
302 bio = bl_submit_bio(bio);
303 /* Fill hole w/ zeroes w/o accessing device */
304 dprintk("%s Zeroing page for hole\n", __func__);
305 zero_user_segment(pages[i], pg_offset, pg_len);
306
307 /* invalidate map */
308 map.start = NFS4_MAX_UINT64;
309 } else {
310 bio = do_add_page_to_bio(bio,
311 header->page_array.npages - i,
312 READ,
313 isect, pages[i], &map, &be,
314 bl_end_io_read, par,
315 pg_offset, &pg_len);
316 if (IS_ERR(bio)) {
317 header->pnfs_error = PTR_ERR(bio);
318 bio = NULL;
319 goto out;
320 }
321 }
322 isect += (pg_len >> SECTOR_SHIFT);
323 extent_length -= (pg_len >> SECTOR_SHIFT);
324 f_offset += pg_len;
325 bytes_left -= pg_len;
326 pg_offset = 0;
327 }
328 if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
329 header->res.eof = 1;
330 header->res.count = header->inode->i_size - header->args.offset;
331 } else {
332 header->res.count = (isect << SECTOR_SHIFT) - header->args.offset;
333 }
334 out:
335 bl_submit_bio(bio);
336 blk_finish_plug(&plug);
337 put_parallel(par);
338 return PNFS_ATTEMPTED;
339 }
340
bl_end_io_write(struct bio * bio)341 static void bl_end_io_write(struct bio *bio)
342 {
343 struct parallel_io *par = bio->bi_private;
344 struct nfs_pgio_header *header = par->data;
345
346 if (bio->bi_status) {
347 if (!header->pnfs_error)
348 header->pnfs_error = -EIO;
349 pnfs_set_lo_fail(header->lseg);
350 bl_mark_devices_unavailable(header, true);
351 }
352 bio_put(bio);
353 put_parallel(par);
354 }
355
356 /* Function scheduled for call during bl_end_par_io_write,
357 * it marks sectors as written and extends the commitlist.
358 */
bl_write_cleanup(struct work_struct * work)359 static void bl_write_cleanup(struct work_struct *work)
360 {
361 struct rpc_task *task = container_of(work, struct rpc_task, u.tk_work);
362 struct nfs_pgio_header *hdr =
363 container_of(task, struct nfs_pgio_header, task);
364
365 dprintk("%s enter\n", __func__);
366
367 if (likely(!hdr->pnfs_error)) {
368 struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg);
369 u64 start = hdr->args.offset & (loff_t)PAGE_MASK;
370 u64 end = (hdr->args.offset + hdr->args.count +
371 PAGE_SIZE - 1) & (loff_t)PAGE_MASK;
372 u64 lwb = hdr->args.offset + hdr->args.count;
373
374 ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
375 (end - start) >> SECTOR_SHIFT, lwb);
376 }
377
378 pnfs_ld_write_done(hdr);
379 }
380
381 /* Called when last of bios associated with a bl_write_pagelist call finishes */
bl_end_par_io_write(void * data)382 static void bl_end_par_io_write(void *data)
383 {
384 struct nfs_pgio_header *hdr = data;
385
386 hdr->task.tk_status = hdr->pnfs_error;
387 hdr->verf.committed = NFS_FILE_SYNC;
388 INIT_WORK(&hdr->task.u.tk_work, bl_write_cleanup);
389 schedule_work(&hdr->task.u.tk_work);
390 }
391
392 static enum pnfs_try_status
bl_write_pagelist(struct nfs_pgio_header * header,int sync)393 bl_write_pagelist(struct nfs_pgio_header *header, int sync)
394 {
395 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
396 struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 };
397 struct bio *bio = NULL;
398 struct pnfs_block_extent be;
399 sector_t isect, extent_length = 0;
400 struct parallel_io *par = NULL;
401 loff_t offset = header->args.offset;
402 size_t count = header->args.count;
403 struct page **pages = header->args.pages;
404 int pg_index = header->args.pgbase >> PAGE_SHIFT;
405 unsigned int pg_len;
406 struct blk_plug plug;
407 int i;
408
409 dprintk("%s enter, %zu@%lld\n", __func__, count, offset);
410
411 /* At this point, header->page_aray is a (sequential) list of nfs_pages.
412 * We want to write each, and if there is an error set pnfs_error
413 * to have it redone using nfs.
414 */
415 par = alloc_parallel(header);
416 if (!par)
417 return PNFS_NOT_ATTEMPTED;
418 par->pnfs_callback = bl_end_par_io_write;
419
420 blk_start_plug(&plug);
421
422 /* we always write out the whole page */
423 offset = offset & (loff_t)PAGE_MASK;
424 isect = offset >> SECTOR_SHIFT;
425
426 for (i = pg_index; i < header->page_array.npages; i++) {
427 if (extent_length <= 0) {
428 /* We've used up the previous extent */
429 bio = bl_submit_bio(bio);
430 /* Get the next one */
431 if (!ext_tree_lookup(bl, isect, &be, true)) {
432 header->pnfs_error = -EINVAL;
433 goto out;
434 }
435
436 extent_length = be.be_length - (isect - be.be_f_offset);
437 }
438
439 pg_len = PAGE_SIZE;
440 bio = do_add_page_to_bio(bio, header->page_array.npages - i,
441 WRITE, isect, pages[i], &map, &be,
442 bl_end_io_write, par,
443 0, &pg_len);
444 if (IS_ERR(bio)) {
445 header->pnfs_error = PTR_ERR(bio);
446 bio = NULL;
447 goto out;
448 }
449
450 offset += pg_len;
451 count -= pg_len;
452 isect += (pg_len >> SECTOR_SHIFT);
453 extent_length -= (pg_len >> SECTOR_SHIFT);
454 }
455
456 header->res.count = header->args.count;
457 out:
458 bl_submit_bio(bio);
459 blk_finish_plug(&plug);
460 put_parallel(par);
461 return PNFS_ATTEMPTED;
462 }
463
bl_free_layout_hdr(struct pnfs_layout_hdr * lo)464 static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
465 {
466 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
467 int err;
468
469 dprintk("%s enter\n", __func__);
470
471 err = ext_tree_remove(bl, true, 0, LLONG_MAX);
472 WARN_ON(err);
473
474 kfree_rcu(bl, bl_layout.plh_rcu);
475 }
476
__bl_alloc_layout_hdr(struct inode * inode,gfp_t gfp_flags,bool is_scsi_layout)477 static struct pnfs_layout_hdr *__bl_alloc_layout_hdr(struct inode *inode,
478 gfp_t gfp_flags, bool is_scsi_layout)
479 {
480 struct pnfs_block_layout *bl;
481
482 dprintk("%s enter\n", __func__);
483 bl = kzalloc(sizeof(*bl), gfp_flags);
484 if (!bl)
485 return NULL;
486
487 bl->bl_ext_rw = RB_ROOT;
488 bl->bl_ext_ro = RB_ROOT;
489 spin_lock_init(&bl->bl_ext_lock);
490
491 bl->bl_scsi_layout = is_scsi_layout;
492 return &bl->bl_layout;
493 }
494
bl_alloc_layout_hdr(struct inode * inode,gfp_t gfp_flags)495 static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
496 gfp_t gfp_flags)
497 {
498 return __bl_alloc_layout_hdr(inode, gfp_flags, false);
499 }
500
sl_alloc_layout_hdr(struct inode * inode,gfp_t gfp_flags)501 static struct pnfs_layout_hdr *sl_alloc_layout_hdr(struct inode *inode,
502 gfp_t gfp_flags)
503 {
504 return __bl_alloc_layout_hdr(inode, gfp_flags, true);
505 }
506
bl_free_lseg(struct pnfs_layout_segment * lseg)507 static void bl_free_lseg(struct pnfs_layout_segment *lseg)
508 {
509 dprintk("%s enter\n", __func__);
510 kfree(lseg);
511 }
512
513 /* Tracks info needed to ensure extents in layout obey constraints of spec */
514 struct layout_verification {
515 u32 mode; /* R or RW */
516 u64 start; /* Expected start of next non-COW extent */
517 u64 inval; /* Start of INVAL coverage */
518 u64 cowread; /* End of COW read coverage */
519 };
520
521 /* Verify the extent meets the layout requirements of the pnfs-block draft,
522 * section 2.3.1.
523 */
verify_extent(struct pnfs_block_extent * be,struct layout_verification * lv)524 static int verify_extent(struct pnfs_block_extent *be,
525 struct layout_verification *lv)
526 {
527 if (lv->mode == IOMODE_READ) {
528 if (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
529 be->be_state == PNFS_BLOCK_INVALID_DATA)
530 return -EIO;
531 if (be->be_f_offset != lv->start)
532 return -EIO;
533 lv->start += be->be_length;
534 return 0;
535 }
536 /* lv->mode == IOMODE_RW */
537 if (be->be_state == PNFS_BLOCK_READWRITE_DATA) {
538 if (be->be_f_offset != lv->start)
539 return -EIO;
540 if (lv->cowread > lv->start)
541 return -EIO;
542 lv->start += be->be_length;
543 lv->inval = lv->start;
544 return 0;
545 } else if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
546 if (be->be_f_offset != lv->start)
547 return -EIO;
548 lv->start += be->be_length;
549 return 0;
550 } else if (be->be_state == PNFS_BLOCK_READ_DATA) {
551 if (be->be_f_offset > lv->start)
552 return -EIO;
553 if (be->be_f_offset < lv->inval)
554 return -EIO;
555 if (be->be_f_offset < lv->cowread)
556 return -EIO;
557 /* It looks like you might want to min this with lv->start,
558 * but you really don't.
559 */
560 lv->inval = lv->inval + be->be_length;
561 lv->cowread = be->be_f_offset + be->be_length;
562 return 0;
563 } else
564 return -EIO;
565 }
566
decode_sector_number(__be32 ** rp,sector_t * sp)567 static int decode_sector_number(__be32 **rp, sector_t *sp)
568 {
569 uint64_t s;
570
571 *rp = xdr_decode_hyper(*rp, &s);
572 if (s & 0x1ff) {
573 printk(KERN_WARNING "NFS: %s: sector not aligned\n", __func__);
574 return -1;
575 }
576 *sp = s >> SECTOR_SHIFT;
577 return 0;
578 }
579
580 static struct nfs4_deviceid_node *
bl_find_get_deviceid(struct nfs_server * server,const struct nfs4_deviceid * id,const struct cred * cred,gfp_t gfp_mask)581 bl_find_get_deviceid(struct nfs_server *server,
582 const struct nfs4_deviceid *id, const struct cred *cred,
583 gfp_t gfp_mask)
584 {
585 struct nfs4_deviceid_node *node;
586 unsigned long start, end;
587
588 retry:
589 node = nfs4_find_get_deviceid(server, id, cred, gfp_mask);
590 if (!node)
591 return ERR_PTR(-ENODEV);
592
593 if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags) == 0)
594 return node;
595
596 end = jiffies;
597 start = end - PNFS_DEVICE_RETRY_TIMEOUT;
598 if (!time_in_range(node->timestamp_unavailable, start, end)) {
599 nfs4_delete_deviceid(node->ld, node->nfs_client, id);
600 goto retry;
601 }
602 return ERR_PTR(-ENODEV);
603 }
604
605 static int
bl_alloc_extent(struct xdr_stream * xdr,struct pnfs_layout_hdr * lo,struct layout_verification * lv,struct list_head * extents,gfp_t gfp_mask)606 bl_alloc_extent(struct xdr_stream *xdr, struct pnfs_layout_hdr *lo,
607 struct layout_verification *lv, struct list_head *extents,
608 gfp_t gfp_mask)
609 {
610 struct pnfs_block_extent *be;
611 struct nfs4_deviceid id;
612 int error;
613 __be32 *p;
614
615 p = xdr_inline_decode(xdr, 28 + NFS4_DEVICEID4_SIZE);
616 if (!p)
617 return -EIO;
618
619 be = kzalloc(sizeof(*be), GFP_NOFS);
620 if (!be)
621 return -ENOMEM;
622
623 memcpy(&id, p, NFS4_DEVICEID4_SIZE);
624 p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
625
626 be->be_device = bl_find_get_deviceid(NFS_SERVER(lo->plh_inode), &id,
627 lo->plh_lc_cred, gfp_mask);
628 if (IS_ERR(be->be_device)) {
629 error = PTR_ERR(be->be_device);
630 goto out_free_be;
631 }
632
633 /*
634 * The next three values are read in as bytes, but stored in the
635 * extent structure in 512-byte granularity.
636 */
637 error = -EIO;
638 if (decode_sector_number(&p, &be->be_f_offset) < 0)
639 goto out_put_deviceid;
640 if (decode_sector_number(&p, &be->be_length) < 0)
641 goto out_put_deviceid;
642 if (decode_sector_number(&p, &be->be_v_offset) < 0)
643 goto out_put_deviceid;
644 be->be_state = be32_to_cpup(p++);
645
646 error = verify_extent(be, lv);
647 if (error) {
648 dprintk("%s: extent verification failed\n", __func__);
649 goto out_put_deviceid;
650 }
651
652 list_add_tail(&be->be_list, extents);
653 return 0;
654
655 out_put_deviceid:
656 nfs4_put_deviceid_node(be->be_device);
657 out_free_be:
658 kfree(be);
659 return error;
660 }
661
662 static struct pnfs_layout_segment *
bl_alloc_lseg(struct pnfs_layout_hdr * lo,struct nfs4_layoutget_res * lgr,gfp_t gfp_mask)663 bl_alloc_lseg(struct pnfs_layout_hdr *lo, struct nfs4_layoutget_res *lgr,
664 gfp_t gfp_mask)
665 {
666 struct layout_verification lv = {
667 .mode = lgr->range.iomode,
668 .start = lgr->range.offset >> SECTOR_SHIFT,
669 .inval = lgr->range.offset >> SECTOR_SHIFT,
670 .cowread = lgr->range.offset >> SECTOR_SHIFT,
671 };
672 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
673 struct pnfs_layout_segment *lseg;
674 struct xdr_buf buf;
675 struct xdr_stream xdr;
676 struct page *scratch;
677 int status, i;
678 uint32_t count;
679 __be32 *p;
680 LIST_HEAD(extents);
681
682 dprintk("---> %s\n", __func__);
683
684 lseg = kzalloc(sizeof(*lseg), gfp_mask);
685 if (!lseg)
686 return ERR_PTR(-ENOMEM);
687
688 status = -ENOMEM;
689 scratch = alloc_page(gfp_mask);
690 if (!scratch)
691 goto out;
692
693 xdr_init_decode_pages(&xdr, &buf,
694 lgr->layoutp->pages, lgr->layoutp->len);
695 xdr_set_scratch_page(&xdr, scratch);
696
697 status = -EIO;
698 p = xdr_inline_decode(&xdr, 4);
699 if (unlikely(!p))
700 goto out_free_scratch;
701
702 count = be32_to_cpup(p++);
703 dprintk("%s: number of extents %d\n", __func__, count);
704
705 /*
706 * Decode individual extents, putting them in temporary staging area
707 * until whole layout is decoded to make error recovery easier.
708 */
709 for (i = 0; i < count; i++) {
710 status = bl_alloc_extent(&xdr, lo, &lv, &extents, gfp_mask);
711 if (status)
712 goto process_extents;
713 }
714
715 if (lgr->range.offset + lgr->range.length !=
716 lv.start << SECTOR_SHIFT) {
717 dprintk("%s Final length mismatch\n", __func__);
718 status = -EIO;
719 goto process_extents;
720 }
721
722 if (lv.start < lv.cowread) {
723 dprintk("%s Final uncovered COW extent\n", __func__);
724 status = -EIO;
725 }
726
727 process_extents:
728 while (!list_empty(&extents)) {
729 struct pnfs_block_extent *be =
730 list_first_entry(&extents, struct pnfs_block_extent,
731 be_list);
732 list_del(&be->be_list);
733
734 if (!status)
735 status = ext_tree_insert(bl, be);
736
737 if (status) {
738 nfs4_put_deviceid_node(be->be_device);
739 kfree(be);
740 }
741 }
742
743 out_free_scratch:
744 __free_page(scratch);
745 out:
746 dprintk("%s returns %d\n", __func__, status);
747 switch (status) {
748 case -ENODEV:
749 /* Our extent block devices are unavailable */
750 set_bit(NFS_LSEG_UNAVAILABLE, &lseg->pls_flags);
751 fallthrough;
752 case 0:
753 return lseg;
754 default:
755 kfree(lseg);
756 return ERR_PTR(status);
757 }
758 }
759
760 static void
bl_return_range(struct pnfs_layout_hdr * lo,struct pnfs_layout_range * range)761 bl_return_range(struct pnfs_layout_hdr *lo,
762 struct pnfs_layout_range *range)
763 {
764 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
765 sector_t offset = range->offset >> SECTOR_SHIFT, end;
766
767 if (range->offset % 8) {
768 dprintk("%s: offset %lld not block size aligned\n",
769 __func__, range->offset);
770 return;
771 }
772
773 if (range->length != NFS4_MAX_UINT64) {
774 if (range->length % 8) {
775 dprintk("%s: length %lld not block size aligned\n",
776 __func__, range->length);
777 return;
778 }
779
780 end = offset + (range->length >> SECTOR_SHIFT);
781 } else {
782 end = round_down(NFS4_MAX_UINT64, PAGE_SIZE);
783 }
784
785 ext_tree_remove(bl, range->iomode & IOMODE_RW, offset, end);
786 }
787
788 static int
bl_prepare_layoutcommit(struct nfs4_layoutcommit_args * arg)789 bl_prepare_layoutcommit(struct nfs4_layoutcommit_args *arg)
790 {
791 return ext_tree_prepare_commit(arg);
792 }
793
794 static void
bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data * lcdata)795 bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
796 {
797 ext_tree_mark_committed(&lcdata->args, lcdata->res.status);
798 }
799
800 static int
bl_set_layoutdriver(struct nfs_server * server,const struct nfs_fh * fh)801 bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
802 {
803 dprintk("%s enter\n", __func__);
804
805 if (server->pnfs_blksize == 0) {
806 dprintk("%s Server did not return blksize\n", __func__);
807 return -EINVAL;
808 }
809 if (server->pnfs_blksize > PAGE_SIZE) {
810 printk(KERN_ERR "%s: pNFS blksize %d not supported.\n",
811 __func__, server->pnfs_blksize);
812 return -EINVAL;
813 }
814
815 return 0;
816 }
817
818 static bool
is_aligned_req(struct nfs_pageio_descriptor * pgio,struct nfs_page * req,unsigned int alignment,bool is_write)819 is_aligned_req(struct nfs_pageio_descriptor *pgio,
820 struct nfs_page *req, unsigned int alignment, bool is_write)
821 {
822 /*
823 * Always accept buffered writes, higher layers take care of the
824 * right alignment.
825 */
826 if (pgio->pg_dreq == NULL)
827 return true;
828
829 if (!IS_ALIGNED(req->wb_offset, alignment))
830 return false;
831
832 if (IS_ALIGNED(req->wb_bytes, alignment))
833 return true;
834
835 if (is_write &&
836 (req_offset(req) + req->wb_bytes == i_size_read(pgio->pg_inode))) {
837 /*
838 * If the write goes up to the inode size, just write
839 * the full page. Data past the inode size is
840 * guaranteed to be zeroed by the higher level client
841 * code, and this behaviour is mandated by RFC 5663
842 * section 2.3.2.
843 */
844 return true;
845 }
846
847 return false;
848 }
849
850 static void
bl_pg_init_read(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)851 bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
852 {
853 if (!is_aligned_req(pgio, req, SECTOR_SIZE, false)) {
854 nfs_pageio_reset_read_mds(pgio);
855 return;
856 }
857
858 pnfs_generic_pg_init_read(pgio, req);
859
860 if (pgio->pg_lseg &&
861 test_bit(NFS_LSEG_UNAVAILABLE, &pgio->pg_lseg->pls_flags)) {
862 pnfs_error_mark_layout_for_return(pgio->pg_inode, pgio->pg_lseg);
863 pnfs_set_lo_fail(pgio->pg_lseg);
864 nfs_pageio_reset_read_mds(pgio);
865 }
866 }
867
868 /*
869 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
870 * of bytes (maximum @req->wb_bytes) that can be coalesced.
871 */
872 static size_t
bl_pg_test_read(struct nfs_pageio_descriptor * pgio,struct nfs_page * prev,struct nfs_page * req)873 bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
874 struct nfs_page *req)
875 {
876 if (!is_aligned_req(pgio, req, SECTOR_SIZE, false))
877 return 0;
878 return pnfs_generic_pg_test(pgio, prev, req);
879 }
880
881 /*
882 * Return the number of contiguous bytes for a given inode
883 * starting at page frame idx.
884 */
pnfs_num_cont_bytes(struct inode * inode,pgoff_t idx)885 static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
886 {
887 struct address_space *mapping = inode->i_mapping;
888 pgoff_t end;
889
890 /* Optimize common case that writes from 0 to end of file */
891 end = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
892 if (end != inode->i_mapping->nrpages) {
893 rcu_read_lock();
894 end = page_cache_next_miss(mapping, idx + 1, ULONG_MAX);
895 rcu_read_unlock();
896 }
897
898 if (!end)
899 return i_size_read(inode) - (idx << PAGE_SHIFT);
900 else
901 return (end - idx) << PAGE_SHIFT;
902 }
903
904 static void
bl_pg_init_write(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)905 bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
906 {
907 u64 wb_size;
908
909 if (!is_aligned_req(pgio, req, PAGE_SIZE, true)) {
910 nfs_pageio_reset_write_mds(pgio);
911 return;
912 }
913
914 if (pgio->pg_dreq == NULL)
915 wb_size = pnfs_num_cont_bytes(pgio->pg_inode,
916 req->wb_index);
917 else
918 wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
919
920 pnfs_generic_pg_init_write(pgio, req, wb_size);
921
922 if (pgio->pg_lseg &&
923 test_bit(NFS_LSEG_UNAVAILABLE, &pgio->pg_lseg->pls_flags)) {
924
925 pnfs_error_mark_layout_for_return(pgio->pg_inode, pgio->pg_lseg);
926 pnfs_set_lo_fail(pgio->pg_lseg);
927 nfs_pageio_reset_write_mds(pgio);
928 }
929 }
930
931 /*
932 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
933 * of bytes (maximum @req->wb_bytes) that can be coalesced.
934 */
935 static size_t
bl_pg_test_write(struct nfs_pageio_descriptor * pgio,struct nfs_page * prev,struct nfs_page * req)936 bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
937 struct nfs_page *req)
938 {
939 if (!is_aligned_req(pgio, req, PAGE_SIZE, true))
940 return 0;
941 return pnfs_generic_pg_test(pgio, prev, req);
942 }
943
944 static const struct nfs_pageio_ops bl_pg_read_ops = {
945 .pg_init = bl_pg_init_read,
946 .pg_test = bl_pg_test_read,
947 .pg_doio = pnfs_generic_pg_readpages,
948 .pg_cleanup = pnfs_generic_pg_cleanup,
949 };
950
951 static const struct nfs_pageio_ops bl_pg_write_ops = {
952 .pg_init = bl_pg_init_write,
953 .pg_test = bl_pg_test_write,
954 .pg_doio = pnfs_generic_pg_writepages,
955 .pg_cleanup = pnfs_generic_pg_cleanup,
956 };
957
958 static struct pnfs_layoutdriver_type blocklayout_type = {
959 .id = LAYOUT_BLOCK_VOLUME,
960 .name = "LAYOUT_BLOCK_VOLUME",
961 .owner = THIS_MODULE,
962 .flags = PNFS_LAYOUTRET_ON_SETATTR |
963 PNFS_LAYOUTRET_ON_ERROR |
964 PNFS_READ_WHOLE_PAGE,
965 .read_pagelist = bl_read_pagelist,
966 .write_pagelist = bl_write_pagelist,
967 .alloc_layout_hdr = bl_alloc_layout_hdr,
968 .free_layout_hdr = bl_free_layout_hdr,
969 .alloc_lseg = bl_alloc_lseg,
970 .free_lseg = bl_free_lseg,
971 .return_range = bl_return_range,
972 .prepare_layoutcommit = bl_prepare_layoutcommit,
973 .cleanup_layoutcommit = bl_cleanup_layoutcommit,
974 .set_layoutdriver = bl_set_layoutdriver,
975 .alloc_deviceid_node = bl_alloc_deviceid_node,
976 .free_deviceid_node = bl_free_deviceid_node,
977 .pg_read_ops = &bl_pg_read_ops,
978 .pg_write_ops = &bl_pg_write_ops,
979 .sync = pnfs_generic_sync,
980 };
981
982 static struct pnfs_layoutdriver_type scsilayout_type = {
983 .id = LAYOUT_SCSI,
984 .name = "LAYOUT_SCSI",
985 .owner = THIS_MODULE,
986 .flags = PNFS_LAYOUTRET_ON_SETATTR |
987 PNFS_LAYOUTRET_ON_ERROR |
988 PNFS_READ_WHOLE_PAGE,
989 .read_pagelist = bl_read_pagelist,
990 .write_pagelist = bl_write_pagelist,
991 .alloc_layout_hdr = sl_alloc_layout_hdr,
992 .free_layout_hdr = bl_free_layout_hdr,
993 .alloc_lseg = bl_alloc_lseg,
994 .free_lseg = bl_free_lseg,
995 .return_range = bl_return_range,
996 .prepare_layoutcommit = bl_prepare_layoutcommit,
997 .cleanup_layoutcommit = bl_cleanup_layoutcommit,
998 .set_layoutdriver = bl_set_layoutdriver,
999 .alloc_deviceid_node = bl_alloc_deviceid_node,
1000 .free_deviceid_node = bl_free_deviceid_node,
1001 .pg_read_ops = &bl_pg_read_ops,
1002 .pg_write_ops = &bl_pg_write_ops,
1003 .sync = pnfs_generic_sync,
1004 };
1005
1006
nfs4blocklayout_init(void)1007 static int __init nfs4blocklayout_init(void)
1008 {
1009 int ret;
1010
1011 dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
1012
1013 ret = bl_init_pipefs();
1014 if (ret)
1015 goto out;
1016
1017 ret = pnfs_register_layoutdriver(&blocklayout_type);
1018 if (ret)
1019 goto out_cleanup_pipe;
1020
1021 ret = pnfs_register_layoutdriver(&scsilayout_type);
1022 if (ret)
1023 goto out_unregister_block;
1024 return 0;
1025
1026 out_unregister_block:
1027 pnfs_unregister_layoutdriver(&blocklayout_type);
1028 out_cleanup_pipe:
1029 bl_cleanup_pipefs();
1030 out:
1031 return ret;
1032 }
1033
nfs4blocklayout_exit(void)1034 static void __exit nfs4blocklayout_exit(void)
1035 {
1036 dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
1037 __func__);
1038
1039 pnfs_unregister_layoutdriver(&scsilayout_type);
1040 pnfs_unregister_layoutdriver(&blocklayout_type);
1041 bl_cleanup_pipefs();
1042 }
1043
1044 MODULE_ALIAS("nfs-layouttype4-3");
1045 MODULE_ALIAS("nfs-layouttype4-5");
1046
1047 module_init(nfs4blocklayout_init);
1048 module_exit(nfs4blocklayout_exit);
1049