1 /* Storage object read/write
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public Licence
8  * as published by the Free Software Foundation; either version
9  * 2 of the Licence, or (at your option) any later version.
10  */
11 
12 #include <linux/mount.h>
13 #include <linux/slab.h>
14 #include <linux/file.h>
15 #include <linux/swap.h>
16 #include "internal.h"
17 
18 /*
19  * detect wake up events generated by the unlocking of pages in which we're
20  * interested
21  * - we use this to detect read completion of backing pages
22  * - the caller holds the waitqueue lock
23  */
cachefiles_read_waiter(wait_queue_entry_t * wait,unsigned mode,int sync,void * _key)24 static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
25 				  int sync, void *_key)
26 {
27 	struct cachefiles_one_read *monitor =
28 		container_of(wait, struct cachefiles_one_read, monitor);
29 	struct cachefiles_object *object;
30 	struct fscache_retrieval *op = monitor->op;
31 	struct wait_bit_key *key = _key;
32 	struct page *page = wait->private;
33 
34 	ASSERT(key);
35 
36 	_enter("{%lu},%u,%d,{%p,%u}",
37 	       monitor->netfs_page->index, mode, sync,
38 	       key->flags, key->bit_nr);
39 
40 	if (key->flags != &page->flags ||
41 	    key->bit_nr != PG_locked)
42 		return 0;
43 
44 	_debug("--- monitor %p %lx ---", page, page->flags);
45 
46 	if (!PageUptodate(page) && !PageError(page)) {
47 		/* unlocked, not uptodate and not erronous? */
48 		_debug("page probably truncated");
49 	}
50 
51 	/* remove from the waitqueue */
52 	list_del(&wait->entry);
53 
54 	/* move onto the action list and queue for FS-Cache thread pool */
55 	ASSERT(op);
56 
57 	/* We need to temporarily bump the usage count as we don't own a ref
58 	 * here otherwise cachefiles_read_copier() may free the op between the
59 	 * monitor being enqueued on the op->to_do list and the op getting
60 	 * enqueued on the work queue.
61 	 */
62 	fscache_get_retrieval(op);
63 
64 	object = container_of(op->op.object, struct cachefiles_object, fscache);
65 	spin_lock(&object->work_lock);
66 	list_add_tail(&monitor->op_link, &op->to_do);
67 	spin_unlock(&object->work_lock);
68 
69 	fscache_enqueue_retrieval(op);
70 	fscache_put_retrieval(op);
71 	return 0;
72 }
73 
74 /*
75  * handle a probably truncated page
76  * - check to see if the page is still relevant and reissue the read if
77  *   possible
78  * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
79  *   must wait again and 0 if successful
80  */
cachefiles_read_reissue(struct cachefiles_object * object,struct cachefiles_one_read * monitor)81 static int cachefiles_read_reissue(struct cachefiles_object *object,
82 				   struct cachefiles_one_read *monitor)
83 {
84 	struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping;
85 	struct page *backpage = monitor->back_page, *backpage2;
86 	int ret;
87 
88 	_enter("{ino=%lx},{%lx,%lx}",
89 	       d_backing_inode(object->backer)->i_ino,
90 	       backpage->index, backpage->flags);
91 
92 	/* skip if the page was truncated away completely */
93 	if (backpage->mapping != bmapping) {
94 		_leave(" = -ENODATA [mapping]");
95 		return -ENODATA;
96 	}
97 
98 	backpage2 = find_get_page(bmapping, backpage->index);
99 	if (!backpage2) {
100 		_leave(" = -ENODATA [gone]");
101 		return -ENODATA;
102 	}
103 
104 	if (backpage != backpage2) {
105 		put_page(backpage2);
106 		_leave(" = -ENODATA [different]");
107 		return -ENODATA;
108 	}
109 
110 	/* the page is still there and we already have a ref on it, so we don't
111 	 * need a second */
112 	put_page(backpage2);
113 
114 	INIT_LIST_HEAD(&monitor->op_link);
115 	add_page_wait_queue(backpage, &monitor->monitor);
116 
117 	if (trylock_page(backpage)) {
118 		ret = -EIO;
119 		if (PageError(backpage))
120 			goto unlock_discard;
121 		ret = 0;
122 		if (PageUptodate(backpage))
123 			goto unlock_discard;
124 
125 		_debug("reissue read");
126 		ret = bmapping->a_ops->readpage(NULL, backpage);
127 		if (ret < 0)
128 			goto unlock_discard;
129 	}
130 
131 	/* but the page may have been read before the monitor was installed, so
132 	 * the monitor may miss the event - so we have to ensure that we do get
133 	 * one in such a case */
134 	if (trylock_page(backpage)) {
135 		_debug("jumpstart %p {%lx}", backpage, backpage->flags);
136 		unlock_page(backpage);
137 	}
138 
139 	/* it'll reappear on the todo list */
140 	_leave(" = -EINPROGRESS");
141 	return -EINPROGRESS;
142 
143 unlock_discard:
144 	unlock_page(backpage);
145 	spin_lock_irq(&object->work_lock);
146 	list_del(&monitor->op_link);
147 	spin_unlock_irq(&object->work_lock);
148 	_leave(" = %d", ret);
149 	return ret;
150 }
151 
152 /*
153  * copy data from backing pages to netfs pages to complete a read operation
154  * - driven by FS-Cache's thread pool
155  */
cachefiles_read_copier(struct fscache_operation * _op)156 static void cachefiles_read_copier(struct fscache_operation *_op)
157 {
158 	struct cachefiles_one_read *monitor;
159 	struct cachefiles_object *object;
160 	struct fscache_retrieval *op;
161 	int error, max;
162 
163 	op = container_of(_op, struct fscache_retrieval, op);
164 	object = container_of(op->op.object,
165 			      struct cachefiles_object, fscache);
166 
167 	_enter("{ino=%lu}", d_backing_inode(object->backer)->i_ino);
168 
169 	max = 8;
170 	spin_lock_irq(&object->work_lock);
171 
172 	while (!list_empty(&op->to_do)) {
173 		monitor = list_entry(op->to_do.next,
174 				     struct cachefiles_one_read, op_link);
175 		list_del(&monitor->op_link);
176 
177 		spin_unlock_irq(&object->work_lock);
178 
179 		_debug("- copy {%lu}", monitor->back_page->index);
180 
181 	recheck:
182 		if (test_bit(FSCACHE_COOKIE_INVALIDATING,
183 			     &object->fscache.cookie->flags)) {
184 			error = -ESTALE;
185 		} else if (PageUptodate(monitor->back_page)) {
186 			copy_highpage(monitor->netfs_page, monitor->back_page);
187 			fscache_mark_page_cached(monitor->op,
188 						 monitor->netfs_page);
189 			error = 0;
190 		} else if (!PageError(monitor->back_page)) {
191 			/* the page has probably been truncated */
192 			error = cachefiles_read_reissue(object, monitor);
193 			if (error == -EINPROGRESS)
194 				goto next;
195 			goto recheck;
196 		} else {
197 			cachefiles_io_error_obj(
198 				object,
199 				"Readpage failed on backing file %lx",
200 				(unsigned long) monitor->back_page->flags);
201 			error = -EIO;
202 		}
203 
204 		put_page(monitor->back_page);
205 
206 		fscache_end_io(op, monitor->netfs_page, error);
207 		put_page(monitor->netfs_page);
208 		fscache_retrieval_complete(op, 1);
209 		fscache_put_retrieval(op);
210 		kfree(monitor);
211 
212 	next:
213 		/* let the thread pool have some air occasionally */
214 		max--;
215 		if (max < 0 || need_resched()) {
216 			if (!list_empty(&op->to_do))
217 				fscache_enqueue_retrieval(op);
218 			_leave(" [maxed out]");
219 			return;
220 		}
221 
222 		spin_lock_irq(&object->work_lock);
223 	}
224 
225 	spin_unlock_irq(&object->work_lock);
226 	_leave("");
227 }
228 
229 /*
230  * read the corresponding page to the given set from the backing file
231  * - an uncertain page is simply discarded, to be tried again another time
232  */
cachefiles_read_backing_file_one(struct cachefiles_object * object,struct fscache_retrieval * op,struct page * netpage)233 static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
234 					    struct fscache_retrieval *op,
235 					    struct page *netpage)
236 {
237 	struct cachefiles_one_read *monitor;
238 	struct address_space *bmapping;
239 	struct page *newpage, *backpage;
240 	int ret;
241 
242 	_enter("");
243 
244 	_debug("read back %p{%lu,%d}",
245 	       netpage, netpage->index, page_count(netpage));
246 
247 	monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
248 	if (!monitor)
249 		goto nomem;
250 
251 	monitor->netfs_page = netpage;
252 	monitor->op = fscache_get_retrieval(op);
253 
254 	init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter);
255 
256 	/* attempt to get hold of the backing page */
257 	bmapping = d_backing_inode(object->backer)->i_mapping;
258 	newpage = NULL;
259 
260 	for (;;) {
261 		backpage = find_get_page(bmapping, netpage->index);
262 		if (backpage)
263 			goto backing_page_already_present;
264 
265 		if (!newpage) {
266 			newpage = __page_cache_alloc(cachefiles_gfp);
267 			if (!newpage)
268 				goto nomem_monitor;
269 		}
270 
271 		ret = add_to_page_cache_lru(newpage, bmapping,
272 					    netpage->index, cachefiles_gfp);
273 		if (ret == 0)
274 			goto installed_new_backing_page;
275 		if (ret != -EEXIST)
276 			goto nomem_page;
277 	}
278 
279 	/* we've installed a new backing page, so now we need to start
280 	 * it reading */
281 installed_new_backing_page:
282 	_debug("- new %p", newpage);
283 
284 	backpage = newpage;
285 	newpage = NULL;
286 
287 read_backing_page:
288 	ret = bmapping->a_ops->readpage(NULL, backpage);
289 	if (ret < 0)
290 		goto read_error;
291 
292 	/* set the monitor to transfer the data across */
293 monitor_backing_page:
294 	_debug("- monitor add");
295 
296 	/* install the monitor */
297 	get_page(monitor->netfs_page);
298 	get_page(backpage);
299 	monitor->back_page = backpage;
300 	monitor->monitor.private = backpage;
301 	add_page_wait_queue(backpage, &monitor->monitor);
302 	monitor = NULL;
303 
304 	/* but the page may have been read before the monitor was installed, so
305 	 * the monitor may miss the event - so we have to ensure that we do get
306 	 * one in such a case */
307 	if (trylock_page(backpage)) {
308 		_debug("jumpstart %p {%lx}", backpage, backpage->flags);
309 		unlock_page(backpage);
310 	}
311 	goto success;
312 
313 	/* if the backing page is already present, it can be in one of
314 	 * three states: read in progress, read failed or read okay */
315 backing_page_already_present:
316 	_debug("- present");
317 
318 	if (newpage) {
319 		put_page(newpage);
320 		newpage = NULL;
321 	}
322 
323 	if (PageError(backpage))
324 		goto io_error;
325 
326 	if (PageUptodate(backpage))
327 		goto backing_page_already_uptodate;
328 
329 	if (!trylock_page(backpage))
330 		goto monitor_backing_page;
331 	_debug("read %p {%lx}", backpage, backpage->flags);
332 	goto read_backing_page;
333 
334 	/* the backing page is already up to date, attach the netfs
335 	 * page to the pagecache and LRU and copy the data across */
336 backing_page_already_uptodate:
337 	_debug("- uptodate");
338 
339 	fscache_mark_page_cached(op, netpage);
340 
341 	copy_highpage(netpage, backpage);
342 	fscache_end_io(op, netpage, 0);
343 	fscache_retrieval_complete(op, 1);
344 
345 success:
346 	_debug("success");
347 	ret = 0;
348 
349 out:
350 	if (backpage)
351 		put_page(backpage);
352 	if (monitor) {
353 		fscache_put_retrieval(monitor->op);
354 		kfree(monitor);
355 	}
356 	_leave(" = %d", ret);
357 	return ret;
358 
359 read_error:
360 	_debug("read error %d", ret);
361 	if (ret == -ENOMEM) {
362 		fscache_retrieval_complete(op, 1);
363 		goto out;
364 	}
365 io_error:
366 	cachefiles_io_error_obj(object, "Page read error on backing file");
367 	fscache_retrieval_complete(op, 1);
368 	ret = -ENOBUFS;
369 	goto out;
370 
371 nomem_page:
372 	put_page(newpage);
373 nomem_monitor:
374 	fscache_put_retrieval(monitor->op);
375 	kfree(monitor);
376 nomem:
377 	fscache_retrieval_complete(op, 1);
378 	_leave(" = -ENOMEM");
379 	return -ENOMEM;
380 }
381 
382 /*
383  * read a page from the cache or allocate a block in which to store it
384  * - cache withdrawal is prevented by the caller
385  * - returns -EINTR if interrupted
386  * - returns -ENOMEM if ran out of memory
387  * - returns -ENOBUFS if no buffers can be made available
388  * - returns -ENOBUFS if page is beyond EOF
389  * - if the page is backed by a block in the cache:
390  *   - a read will be started which will call the callback on completion
391  *   - 0 will be returned
392  * - else if the page is unbacked:
393  *   - the metadata will be retained
394  *   - -ENODATA will be returned
395  */
cachefiles_read_or_alloc_page(struct fscache_retrieval * op,struct page * page,gfp_t gfp)396 int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
397 				  struct page *page,
398 				  gfp_t gfp)
399 {
400 	struct cachefiles_object *object;
401 	struct cachefiles_cache *cache;
402 	struct inode *inode;
403 	sector_t block0, block;
404 	unsigned shift;
405 	int ret;
406 
407 	object = container_of(op->op.object,
408 			      struct cachefiles_object, fscache);
409 	cache = container_of(object->fscache.cache,
410 			     struct cachefiles_cache, cache);
411 
412 	_enter("{%p},{%lx},,,", object, page->index);
413 
414 	if (!object->backer)
415 		goto enobufs;
416 
417 	inode = d_backing_inode(object->backer);
418 	ASSERT(S_ISREG(inode->i_mode));
419 	ASSERT(inode->i_mapping->a_ops->bmap);
420 	ASSERT(inode->i_mapping->a_ops->readpages);
421 
422 	/* calculate the shift required to use bmap */
423 	shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
424 
425 	op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
426 	op->op.flags |= FSCACHE_OP_ASYNC;
427 	op->op.processor = cachefiles_read_copier;
428 
429 	/* we assume the absence or presence of the first block is a good
430 	 * enough indication for the page as a whole
431 	 * - TODO: don't use bmap() for this as it is _not_ actually good
432 	 *   enough for this as it doesn't indicate errors, but it's all we've
433 	 *   got for the moment
434 	 */
435 	block0 = page->index;
436 	block0 <<= shift;
437 
438 	block = inode->i_mapping->a_ops->bmap(inode->i_mapping, block0);
439 	_debug("%llx -> %llx",
440 	       (unsigned long long) block0,
441 	       (unsigned long long) block);
442 
443 	if (block) {
444 		/* submit the apparently valid page to the backing fs to be
445 		 * read from disk */
446 		ret = cachefiles_read_backing_file_one(object, op, page);
447 	} else if (cachefiles_has_space(cache, 0, 1) == 0) {
448 		/* there's space in the cache we can use */
449 		fscache_mark_page_cached(op, page);
450 		fscache_retrieval_complete(op, 1);
451 		ret = -ENODATA;
452 	} else {
453 		goto enobufs;
454 	}
455 
456 	_leave(" = %d", ret);
457 	return ret;
458 
459 enobufs:
460 	fscache_retrieval_complete(op, 1);
461 	_leave(" = -ENOBUFS");
462 	return -ENOBUFS;
463 }
464 
465 /*
466  * read the corresponding pages to the given set from the backing file
467  * - any uncertain pages are simply discarded, to be tried again another time
468  */
cachefiles_read_backing_file(struct cachefiles_object * object,struct fscache_retrieval * op,struct list_head * list)469 static int cachefiles_read_backing_file(struct cachefiles_object *object,
470 					struct fscache_retrieval *op,
471 					struct list_head *list)
472 {
473 	struct cachefiles_one_read *monitor = NULL;
474 	struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping;
475 	struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
476 	int ret = 0;
477 
478 	_enter("");
479 
480 	list_for_each_entry_safe(netpage, _n, list, lru) {
481 		list_del(&netpage->lru);
482 
483 		_debug("read back %p{%lu,%d}",
484 		       netpage, netpage->index, page_count(netpage));
485 
486 		if (!monitor) {
487 			monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
488 			if (!monitor)
489 				goto nomem;
490 
491 			monitor->op = fscache_get_retrieval(op);
492 			init_waitqueue_func_entry(&monitor->monitor,
493 						  cachefiles_read_waiter);
494 		}
495 
496 		for (;;) {
497 			backpage = find_get_page(bmapping, netpage->index);
498 			if (backpage)
499 				goto backing_page_already_present;
500 
501 			if (!newpage) {
502 				newpage = __page_cache_alloc(cachefiles_gfp);
503 				if (!newpage)
504 					goto nomem;
505 			}
506 
507 			ret = add_to_page_cache_lru(newpage, bmapping,
508 						    netpage->index,
509 						    cachefiles_gfp);
510 			if (ret == 0)
511 				goto installed_new_backing_page;
512 			if (ret != -EEXIST)
513 				goto nomem;
514 		}
515 
516 		/* we've installed a new backing page, so now we need
517 		 * to start it reading */
518 	installed_new_backing_page:
519 		_debug("- new %p", newpage);
520 
521 		backpage = newpage;
522 		newpage = NULL;
523 
524 	reread_backing_page:
525 		ret = bmapping->a_ops->readpage(NULL, backpage);
526 		if (ret < 0)
527 			goto read_error;
528 
529 		/* add the netfs page to the pagecache and LRU, and set the
530 		 * monitor to transfer the data across */
531 	monitor_backing_page:
532 		_debug("- monitor add");
533 
534 		ret = add_to_page_cache_lru(netpage, op->mapping,
535 					    netpage->index, cachefiles_gfp);
536 		if (ret < 0) {
537 			if (ret == -EEXIST) {
538 				put_page(netpage);
539 				fscache_retrieval_complete(op, 1);
540 				continue;
541 			}
542 			goto nomem;
543 		}
544 
545 		/* install a monitor */
546 		get_page(netpage);
547 		monitor->netfs_page = netpage;
548 
549 		get_page(backpage);
550 		monitor->back_page = backpage;
551 		monitor->monitor.private = backpage;
552 		add_page_wait_queue(backpage, &monitor->monitor);
553 		monitor = NULL;
554 
555 		/* but the page may have been read before the monitor was
556 		 * installed, so the monitor may miss the event - so we have to
557 		 * ensure that we do get one in such a case */
558 		if (trylock_page(backpage)) {
559 			_debug("2unlock %p {%lx}", backpage, backpage->flags);
560 			unlock_page(backpage);
561 		}
562 
563 		put_page(backpage);
564 		backpage = NULL;
565 
566 		put_page(netpage);
567 		netpage = NULL;
568 		continue;
569 
570 		/* if the backing page is already present, it can be in one of
571 		 * three states: read in progress, read failed or read okay */
572 	backing_page_already_present:
573 		_debug("- present %p", backpage);
574 
575 		if (PageError(backpage))
576 			goto io_error;
577 
578 		if (PageUptodate(backpage))
579 			goto backing_page_already_uptodate;
580 
581 		_debug("- not ready %p{%lx}", backpage, backpage->flags);
582 
583 		if (!trylock_page(backpage))
584 			goto monitor_backing_page;
585 
586 		if (PageError(backpage)) {
587 			_debug("error %lx", backpage->flags);
588 			unlock_page(backpage);
589 			goto io_error;
590 		}
591 
592 		if (PageUptodate(backpage))
593 			goto backing_page_already_uptodate_unlock;
594 
595 		/* we've locked a page that's neither up to date nor erroneous,
596 		 * so we need to attempt to read it again */
597 		goto reread_backing_page;
598 
599 		/* the backing page is already up to date, attach the netfs
600 		 * page to the pagecache and LRU and copy the data across */
601 	backing_page_already_uptodate_unlock:
602 		_debug("uptodate %lx", backpage->flags);
603 		unlock_page(backpage);
604 	backing_page_already_uptodate:
605 		_debug("- uptodate");
606 
607 		ret = add_to_page_cache_lru(netpage, op->mapping,
608 					    netpage->index, cachefiles_gfp);
609 		if (ret < 0) {
610 			if (ret == -EEXIST) {
611 				put_page(netpage);
612 				fscache_retrieval_complete(op, 1);
613 				continue;
614 			}
615 			goto nomem;
616 		}
617 
618 		copy_highpage(netpage, backpage);
619 
620 		put_page(backpage);
621 		backpage = NULL;
622 
623 		fscache_mark_page_cached(op, netpage);
624 
625 		/* the netpage is unlocked and marked up to date here */
626 		fscache_end_io(op, netpage, 0);
627 		put_page(netpage);
628 		netpage = NULL;
629 		fscache_retrieval_complete(op, 1);
630 		continue;
631 	}
632 
633 	netpage = NULL;
634 
635 	_debug("out");
636 
637 out:
638 	/* tidy up */
639 	if (newpage)
640 		put_page(newpage);
641 	if (netpage)
642 		put_page(netpage);
643 	if (backpage)
644 		put_page(backpage);
645 	if (monitor) {
646 		fscache_put_retrieval(op);
647 		kfree(monitor);
648 	}
649 
650 	list_for_each_entry_safe(netpage, _n, list, lru) {
651 		list_del(&netpage->lru);
652 		put_page(netpage);
653 		fscache_retrieval_complete(op, 1);
654 	}
655 
656 	_leave(" = %d", ret);
657 	return ret;
658 
659 nomem:
660 	_debug("nomem");
661 	ret = -ENOMEM;
662 	goto record_page_complete;
663 
664 read_error:
665 	_debug("read error %d", ret);
666 	if (ret == -ENOMEM)
667 		goto record_page_complete;
668 io_error:
669 	cachefiles_io_error_obj(object, "Page read error on backing file");
670 	ret = -ENOBUFS;
671 record_page_complete:
672 	fscache_retrieval_complete(op, 1);
673 	goto out;
674 }
675 
676 /*
677  * read a list of pages from the cache or allocate blocks in which to store
678  * them
679  */
cachefiles_read_or_alloc_pages(struct fscache_retrieval * op,struct list_head * pages,unsigned * nr_pages,gfp_t gfp)680 int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
681 				   struct list_head *pages,
682 				   unsigned *nr_pages,
683 				   gfp_t gfp)
684 {
685 	struct cachefiles_object *object;
686 	struct cachefiles_cache *cache;
687 	struct list_head backpages;
688 	struct pagevec pagevec;
689 	struct inode *inode;
690 	struct page *page, *_n;
691 	unsigned shift, nrbackpages;
692 	int ret, ret2, space;
693 
694 	object = container_of(op->op.object,
695 			      struct cachefiles_object, fscache);
696 	cache = container_of(object->fscache.cache,
697 			     struct cachefiles_cache, cache);
698 
699 	_enter("{OBJ%x,%d},,%d,,",
700 	       object->fscache.debug_id, atomic_read(&op->op.usage),
701 	       *nr_pages);
702 
703 	if (!object->backer)
704 		goto all_enobufs;
705 
706 	space = 1;
707 	if (cachefiles_has_space(cache, 0, *nr_pages) < 0)
708 		space = 0;
709 
710 	inode = d_backing_inode(object->backer);
711 	ASSERT(S_ISREG(inode->i_mode));
712 	ASSERT(inode->i_mapping->a_ops->bmap);
713 	ASSERT(inode->i_mapping->a_ops->readpages);
714 
715 	/* calculate the shift required to use bmap */
716 	shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
717 
718 	pagevec_init(&pagevec);
719 
720 	op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
721 	op->op.flags |= FSCACHE_OP_ASYNC;
722 	op->op.processor = cachefiles_read_copier;
723 
724 	INIT_LIST_HEAD(&backpages);
725 	nrbackpages = 0;
726 
727 	ret = space ? -ENODATA : -ENOBUFS;
728 	list_for_each_entry_safe(page, _n, pages, lru) {
729 		sector_t block0, block;
730 
731 		/* we assume the absence or presence of the first block is a
732 		 * good enough indication for the page as a whole
733 		 * - TODO: don't use bmap() for this as it is _not_ actually
734 		 *   good enough for this as it doesn't indicate errors, but
735 		 *   it's all we've got for the moment
736 		 */
737 		block0 = page->index;
738 		block0 <<= shift;
739 
740 		block = inode->i_mapping->a_ops->bmap(inode->i_mapping,
741 						      block0);
742 		_debug("%llx -> %llx",
743 		       (unsigned long long) block0,
744 		       (unsigned long long) block);
745 
746 		if (block) {
747 			/* we have data - add it to the list to give to the
748 			 * backing fs */
749 			list_move(&page->lru, &backpages);
750 			(*nr_pages)--;
751 			nrbackpages++;
752 		} else if (space && pagevec_add(&pagevec, page) == 0) {
753 			fscache_mark_pages_cached(op, &pagevec);
754 			fscache_retrieval_complete(op, 1);
755 			ret = -ENODATA;
756 		} else {
757 			fscache_retrieval_complete(op, 1);
758 		}
759 	}
760 
761 	if (pagevec_count(&pagevec) > 0)
762 		fscache_mark_pages_cached(op, &pagevec);
763 
764 	if (list_empty(pages))
765 		ret = 0;
766 
767 	/* submit the apparently valid pages to the backing fs to be read from
768 	 * disk */
769 	if (nrbackpages > 0) {
770 		ret2 = cachefiles_read_backing_file(object, op, &backpages);
771 		if (ret2 == -ENOMEM || ret2 == -EINTR)
772 			ret = ret2;
773 	}
774 
775 	_leave(" = %d [nr=%u%s]",
776 	       ret, *nr_pages, list_empty(pages) ? " empty" : "");
777 	return ret;
778 
779 all_enobufs:
780 	fscache_retrieval_complete(op, *nr_pages);
781 	return -ENOBUFS;
782 }
783 
784 /*
785  * allocate a block in the cache in which to store a page
786  * - cache withdrawal is prevented by the caller
787  * - returns -EINTR if interrupted
788  * - returns -ENOMEM if ran out of memory
789  * - returns -ENOBUFS if no buffers can be made available
790  * - returns -ENOBUFS if page is beyond EOF
791  * - otherwise:
792  *   - the metadata will be retained
793  *   - 0 will be returned
794  */
cachefiles_allocate_page(struct fscache_retrieval * op,struct page * page,gfp_t gfp)795 int cachefiles_allocate_page(struct fscache_retrieval *op,
796 			     struct page *page,
797 			     gfp_t gfp)
798 {
799 	struct cachefiles_object *object;
800 	struct cachefiles_cache *cache;
801 	int ret;
802 
803 	object = container_of(op->op.object,
804 			      struct cachefiles_object, fscache);
805 	cache = container_of(object->fscache.cache,
806 			     struct cachefiles_cache, cache);
807 
808 	_enter("%p,{%lx},", object, page->index);
809 
810 	ret = cachefiles_has_space(cache, 0, 1);
811 	if (ret == 0)
812 		fscache_mark_page_cached(op, page);
813 	else
814 		ret = -ENOBUFS;
815 
816 	fscache_retrieval_complete(op, 1);
817 	_leave(" = %d", ret);
818 	return ret;
819 }
820 
821 /*
822  * allocate blocks in the cache in which to store a set of pages
823  * - cache withdrawal is prevented by the caller
824  * - returns -EINTR if interrupted
825  * - returns -ENOMEM if ran out of memory
826  * - returns -ENOBUFS if some buffers couldn't be made available
827  * - returns -ENOBUFS if some pages are beyond EOF
828  * - otherwise:
829  *   - -ENODATA will be returned
830  * - metadata will be retained for any page marked
831  */
cachefiles_allocate_pages(struct fscache_retrieval * op,struct list_head * pages,unsigned * nr_pages,gfp_t gfp)832 int cachefiles_allocate_pages(struct fscache_retrieval *op,
833 			      struct list_head *pages,
834 			      unsigned *nr_pages,
835 			      gfp_t gfp)
836 {
837 	struct cachefiles_object *object;
838 	struct cachefiles_cache *cache;
839 	struct pagevec pagevec;
840 	struct page *page;
841 	int ret;
842 
843 	object = container_of(op->op.object,
844 			      struct cachefiles_object, fscache);
845 	cache = container_of(object->fscache.cache,
846 			     struct cachefiles_cache, cache);
847 
848 	_enter("%p,,,%d,", object, *nr_pages);
849 
850 	ret = cachefiles_has_space(cache, 0, *nr_pages);
851 	if (ret == 0) {
852 		pagevec_init(&pagevec);
853 
854 		list_for_each_entry(page, pages, lru) {
855 			if (pagevec_add(&pagevec, page) == 0)
856 				fscache_mark_pages_cached(op, &pagevec);
857 		}
858 
859 		if (pagevec_count(&pagevec) > 0)
860 			fscache_mark_pages_cached(op, &pagevec);
861 		ret = -ENODATA;
862 	} else {
863 		ret = -ENOBUFS;
864 	}
865 
866 	fscache_retrieval_complete(op, *nr_pages);
867 	_leave(" = %d", ret);
868 	return ret;
869 }
870 
871 /*
872  * request a page be stored in the cache
873  * - cache withdrawal is prevented by the caller
874  * - this request may be ignored if there's no cache block available, in which
875  *   case -ENOBUFS will be returned
876  * - if the op is in progress, 0 will be returned
877  */
cachefiles_write_page(struct fscache_storage * op,struct page * page)878 int cachefiles_write_page(struct fscache_storage *op, struct page *page)
879 {
880 	struct cachefiles_object *object;
881 	struct cachefiles_cache *cache;
882 	struct file *file;
883 	struct path path;
884 	loff_t pos, eof;
885 	size_t len;
886 	void *data;
887 	int ret = -ENOBUFS;
888 
889 	ASSERT(op != NULL);
890 	ASSERT(page != NULL);
891 
892 	object = container_of(op->op.object,
893 			      struct cachefiles_object, fscache);
894 
895 	_enter("%p,%p{%lx},,,", object, page, page->index);
896 
897 	if (!object->backer) {
898 		_leave(" = -ENOBUFS");
899 		return -ENOBUFS;
900 	}
901 
902 	ASSERT(d_is_reg(object->backer));
903 
904 	cache = container_of(object->fscache.cache,
905 			     struct cachefiles_cache, cache);
906 
907 	pos = (loff_t)page->index << PAGE_SHIFT;
908 
909 	/* We mustn't write more data than we have, so we have to beware of a
910 	 * partial page at EOF.
911 	 */
912 	eof = object->fscache.store_limit_l;
913 	if (pos >= eof)
914 		goto error;
915 
916 	/* write the page to the backing filesystem and let it store it in its
917 	 * own time */
918 	path.mnt = cache->mnt;
919 	path.dentry = object->backer;
920 	file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred);
921 	if (IS_ERR(file)) {
922 		ret = PTR_ERR(file);
923 		goto error_2;
924 	}
925 
926 	len = PAGE_SIZE;
927 	if (eof & ~PAGE_MASK) {
928 		if (eof - pos < PAGE_SIZE) {
929 			_debug("cut short %llx to %llx",
930 			       pos, eof);
931 			len = eof - pos;
932 			ASSERTCMP(pos + len, ==, eof);
933 		}
934 	}
935 
936 	data = kmap(page);
937 	ret = __kernel_write(file, data, len, &pos);
938 	kunmap(page);
939 	fput(file);
940 	if (ret != len)
941 		goto error_eio;
942 
943 	_leave(" = 0");
944 	return 0;
945 
946 error_eio:
947 	ret = -EIO;
948 error_2:
949 	if (ret == -EIO)
950 		cachefiles_io_error_obj(object,
951 					"Write page to backing file failed");
952 error:
953 	_leave(" = -ENOBUFS [%d]", ret);
954 	return -ENOBUFS;
955 }
956 
957 /*
958  * detach a backing block from a page
959  * - cache withdrawal is prevented by the caller
960  */
cachefiles_uncache_page(struct fscache_object * _object,struct page * page)961 void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
962 	__releases(&object->fscache.cookie->lock)
963 {
964 	struct cachefiles_object *object;
965 	struct cachefiles_cache *cache;
966 
967 	object = container_of(_object, struct cachefiles_object, fscache);
968 	cache = container_of(object->fscache.cache,
969 			     struct cachefiles_cache, cache);
970 
971 	_enter("%p,{%lu}", object, page->index);
972 
973 	spin_unlock(&object->fscache.cookie->lock);
974 }
975