1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/mm/page_io.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 *
7 * Swap reorganised 29.12.95,
8 * Asynchronous swapping added 30.12.95. Stephen Tweedie
9 * Removed race in async swapping. 14.4.1996. Bruno Haible
10 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
11 * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
12 */
13
14 #include <linux/mm.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/gfp.h>
17 #include <linux/pagemap.h>
18 #include <linux/swap.h>
19 #include <linux/bio.h>
20 #include <linux/swapops.h>
21 #include <linux/buffer_head.h>
22 #include <linux/writeback.h>
23 #include <linux/frontswap.h>
24 #include <linux/blkdev.h>
25 #include <linux/uio.h>
26 #include <linux/sched/task.h>
27 #include <asm/pgtable.h>
28
get_swap_bio(gfp_t gfp_flags,struct page * page,bio_end_io_t end_io)29 static struct bio *get_swap_bio(gfp_t gfp_flags,
30 struct page *page, bio_end_io_t end_io)
31 {
32 int i, nr = hpage_nr_pages(page);
33 struct bio *bio;
34
35 bio = bio_alloc(gfp_flags, nr);
36 if (bio) {
37 struct block_device *bdev;
38
39 bio->bi_iter.bi_sector = map_swap_page(page, &bdev);
40 bio_set_dev(bio, bdev);
41 bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
42 bio->bi_end_io = end_io;
43
44 for (i = 0; i < nr; i++)
45 bio_add_page(bio, page + i, PAGE_SIZE, 0);
46 VM_BUG_ON(bio->bi_iter.bi_size != PAGE_SIZE * nr);
47 }
48 return bio;
49 }
50
end_swap_bio_write(struct bio * bio)51 void end_swap_bio_write(struct bio *bio)
52 {
53 struct page *page = bio_first_page_all(bio);
54
55 if (bio->bi_status) {
56 SetPageError(page);
57 /*
58 * We failed to write the page out to swap-space.
59 * Re-dirty the page in order to avoid it being reclaimed.
60 * Also print a dire warning that things will go BAD (tm)
61 * very quickly.
62 *
63 * Also clear PG_reclaim to avoid rotate_reclaimable_page()
64 */
65 set_page_dirty(page);
66 pr_alert("Write-error on swap-device (%u:%u:%llu)\n",
67 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
68 (unsigned long long)bio->bi_iter.bi_sector);
69 ClearPageReclaim(page);
70 }
71 end_page_writeback(page);
72 bio_put(bio);
73 }
74
swap_slot_free_notify(struct page * page)75 static void swap_slot_free_notify(struct page *page)
76 {
77 struct swap_info_struct *sis;
78 struct gendisk *disk;
79
80 /*
81 * There is no guarantee that the page is in swap cache - the software
82 * suspend code (at least) uses end_swap_bio_read() against a non-
83 * swapcache page. So we must check PG_swapcache before proceeding with
84 * this optimization.
85 */
86 if (unlikely(!PageSwapCache(page)))
87 return;
88
89 sis = page_swap_info(page);
90 if (!(sis->flags & SWP_BLKDEV))
91 return;
92
93 /*
94 * The swap subsystem performs lazy swap slot freeing,
95 * expecting that the page will be swapped out again.
96 * So we can avoid an unnecessary write if the page
97 * isn't redirtied.
98 * This is good for real swap storage because we can
99 * reduce unnecessary I/O and enhance wear-leveling
100 * if an SSD is used as the as swap device.
101 * But if in-memory swap device (eg zram) is used,
102 * this causes a duplicated copy between uncompressed
103 * data in VM-owned memory and compressed data in
104 * zram-owned memory. So let's free zram-owned memory
105 * and make the VM-owned decompressed page *dirty*,
106 * so the page should be swapped out somewhere again if
107 * we again wish to reclaim it.
108 */
109 disk = sis->bdev->bd_disk;
110 if (disk->fops->swap_slot_free_notify) {
111 swp_entry_t entry;
112 unsigned long offset;
113
114 entry.val = page_private(page);
115 offset = swp_offset(entry);
116
117 SetPageDirty(page);
118 disk->fops->swap_slot_free_notify(sis->bdev,
119 offset);
120 }
121 }
122
end_swap_bio_read(struct bio * bio)123 static void end_swap_bio_read(struct bio *bio)
124 {
125 struct page *page = bio_first_page_all(bio);
126 struct task_struct *waiter = bio->bi_private;
127
128 if (bio->bi_status) {
129 SetPageError(page);
130 ClearPageUptodate(page);
131 pr_alert("Read-error on swap-device (%u:%u:%llu)\n",
132 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
133 (unsigned long long)bio->bi_iter.bi_sector);
134 goto out;
135 }
136
137 SetPageUptodate(page);
138 swap_slot_free_notify(page);
139 out:
140 unlock_page(page);
141 WRITE_ONCE(bio->bi_private, NULL);
142 bio_put(bio);
143 wake_up_process(waiter);
144 put_task_struct(waiter);
145 }
146
generic_swapfile_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)147 int generic_swapfile_activate(struct swap_info_struct *sis,
148 struct file *swap_file,
149 sector_t *span)
150 {
151 struct address_space *mapping = swap_file->f_mapping;
152 struct inode *inode = mapping->host;
153 unsigned blocks_per_page;
154 unsigned long page_no;
155 unsigned blkbits;
156 sector_t probe_block;
157 sector_t last_block;
158 sector_t lowest_block = -1;
159 sector_t highest_block = 0;
160 int nr_extents = 0;
161 int ret;
162
163 blkbits = inode->i_blkbits;
164 blocks_per_page = PAGE_SIZE >> blkbits;
165
166 /*
167 * Map all the blocks into the extent list. This code doesn't try
168 * to be very smart.
169 */
170 probe_block = 0;
171 page_no = 0;
172 last_block = i_size_read(inode) >> blkbits;
173 while ((probe_block + blocks_per_page) <= last_block &&
174 page_no < sis->max) {
175 unsigned block_in_page;
176 sector_t first_block;
177
178 cond_resched();
179
180 first_block = bmap(inode, probe_block);
181 if (first_block == 0)
182 goto bad_bmap;
183
184 /*
185 * It must be PAGE_SIZE aligned on-disk
186 */
187 if (first_block & (blocks_per_page - 1)) {
188 probe_block++;
189 goto reprobe;
190 }
191
192 for (block_in_page = 1; block_in_page < blocks_per_page;
193 block_in_page++) {
194 sector_t block;
195
196 block = bmap(inode, probe_block + block_in_page);
197 if (block == 0)
198 goto bad_bmap;
199 if (block != first_block + block_in_page) {
200 /* Discontiguity */
201 probe_block++;
202 goto reprobe;
203 }
204 }
205
206 first_block >>= (PAGE_SHIFT - blkbits);
207 if (page_no) { /* exclude the header page */
208 if (first_block < lowest_block)
209 lowest_block = first_block;
210 if (first_block > highest_block)
211 highest_block = first_block;
212 }
213
214 /*
215 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
216 */
217 ret = add_swap_extent(sis, page_no, 1, first_block);
218 if (ret < 0)
219 goto out;
220 nr_extents += ret;
221 page_no++;
222 probe_block += blocks_per_page;
223 reprobe:
224 continue;
225 }
226 ret = nr_extents;
227 *span = 1 + highest_block - lowest_block;
228 if (page_no == 0)
229 page_no = 1; /* force Empty message */
230 sis->max = page_no;
231 sis->pages = page_no - 1;
232 sis->highest_bit = page_no - 1;
233 out:
234 return ret;
235 bad_bmap:
236 pr_err("swapon: swapfile has holes\n");
237 ret = -EINVAL;
238 goto out;
239 }
240
241 /*
242 * We may have stale swap cache pages in memory: notice
243 * them here and get rid of the unnecessary final write.
244 */
swap_writepage(struct page * page,struct writeback_control * wbc)245 int swap_writepage(struct page *page, struct writeback_control *wbc)
246 {
247 int ret = 0;
248
249 if (try_to_free_swap(page)) {
250 unlock_page(page);
251 goto out;
252 }
253 if (frontswap_store(page) == 0) {
254 set_page_writeback(page);
255 unlock_page(page);
256 end_page_writeback(page);
257 goto out;
258 }
259 ret = __swap_writepage(page, wbc, end_swap_bio_write);
260 out:
261 return ret;
262 }
263
swap_page_sector(struct page * page)264 static sector_t swap_page_sector(struct page *page)
265 {
266 return (sector_t)__page_file_index(page) << (PAGE_SHIFT - 9);
267 }
268
count_swpout_vm_event(struct page * page)269 static inline void count_swpout_vm_event(struct page *page)
270 {
271 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
272 if (unlikely(PageTransHuge(page)))
273 count_vm_event(THP_SWPOUT);
274 #endif
275 count_vm_events(PSWPOUT, hpage_nr_pages(page));
276 }
277
__swap_writepage(struct page * page,struct writeback_control * wbc,bio_end_io_t end_write_func)278 int __swap_writepage(struct page *page, struct writeback_control *wbc,
279 bio_end_io_t end_write_func)
280 {
281 struct bio *bio;
282 int ret;
283 struct swap_info_struct *sis = page_swap_info(page);
284
285 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
286 if (sis->flags & SWP_FILE) {
287 struct kiocb kiocb;
288 struct file *swap_file = sis->swap_file;
289 struct address_space *mapping = swap_file->f_mapping;
290 struct bio_vec bv = {
291 .bv_page = page,
292 .bv_len = PAGE_SIZE,
293 .bv_offset = 0
294 };
295 struct iov_iter from;
296
297 iov_iter_bvec(&from, ITER_BVEC | WRITE, &bv, 1, PAGE_SIZE);
298 init_sync_kiocb(&kiocb, swap_file);
299 kiocb.ki_pos = page_file_offset(page);
300
301 set_page_writeback(page);
302 unlock_page(page);
303 ret = mapping->a_ops->direct_IO(&kiocb, &from);
304 if (ret == PAGE_SIZE) {
305 count_vm_event(PSWPOUT);
306 ret = 0;
307 } else {
308 /*
309 * In the case of swap-over-nfs, this can be a
310 * temporary failure if the system has limited
311 * memory for allocating transmit buffers.
312 * Mark the page dirty and avoid
313 * rotate_reclaimable_page but rate-limit the
314 * messages but do not flag PageError like
315 * the normal direct-to-bio case as it could
316 * be temporary.
317 */
318 set_page_dirty(page);
319 ClearPageReclaim(page);
320 pr_err_ratelimited("Write error on dio swapfile (%llu)\n",
321 page_file_offset(page));
322 }
323 end_page_writeback(page);
324 return ret;
325 }
326
327 ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc);
328 if (!ret) {
329 count_swpout_vm_event(page);
330 return 0;
331 }
332
333 ret = 0;
334 bio = get_swap_bio(GFP_NOIO, page, end_write_func);
335 if (bio == NULL) {
336 set_page_dirty(page);
337 unlock_page(page);
338 ret = -ENOMEM;
339 goto out;
340 }
341 bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc);
342 bio_associate_blkcg_from_page(bio, page);
343 count_swpout_vm_event(page);
344 set_page_writeback(page);
345 unlock_page(page);
346 submit_bio(bio);
347 out:
348 return ret;
349 }
350
swap_readpage(struct page * page,bool synchronous)351 int swap_readpage(struct page *page, bool synchronous)
352 {
353 struct bio *bio;
354 int ret = 0;
355 struct swap_info_struct *sis = page_swap_info(page);
356 blk_qc_t qc;
357 struct gendisk *disk;
358
359 VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page);
360 VM_BUG_ON_PAGE(!PageLocked(page), page);
361 VM_BUG_ON_PAGE(PageUptodate(page), page);
362 if (frontswap_load(page) == 0) {
363 SetPageUptodate(page);
364 unlock_page(page);
365 goto out;
366 }
367
368 if (sis->flags & SWP_FILE) {
369 struct file *swap_file = sis->swap_file;
370 struct address_space *mapping = swap_file->f_mapping;
371
372 ret = mapping->a_ops->readpage(swap_file, page);
373 if (!ret)
374 count_vm_event(PSWPIN);
375 return ret;
376 }
377
378 ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
379 if (!ret) {
380 if (trylock_page(page)) {
381 swap_slot_free_notify(page);
382 unlock_page(page);
383 }
384
385 count_vm_event(PSWPIN);
386 return 0;
387 }
388
389 ret = 0;
390 bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
391 if (bio == NULL) {
392 unlock_page(page);
393 ret = -ENOMEM;
394 goto out;
395 }
396 disk = bio->bi_disk;
397 /*
398 * Keep this task valid during swap readpage because the oom killer may
399 * attempt to access it in the page fault retry time check.
400 */
401 get_task_struct(current);
402 bio->bi_private = current;
403 bio_set_op_attrs(bio, REQ_OP_READ, 0);
404 count_vm_event(PSWPIN);
405 bio_get(bio);
406 qc = submit_bio(bio);
407 while (synchronous) {
408 set_current_state(TASK_UNINTERRUPTIBLE);
409 if (!READ_ONCE(bio->bi_private))
410 break;
411
412 if (!blk_poll(disk->queue, qc))
413 break;
414 }
415 __set_current_state(TASK_RUNNING);
416 bio_put(bio);
417
418 out:
419 return ret;
420 }
421
swap_set_page_dirty(struct page * page)422 int swap_set_page_dirty(struct page *page)
423 {
424 struct swap_info_struct *sis = page_swap_info(page);
425
426 if (sis->flags & SWP_FILE) {
427 struct address_space *mapping = sis->swap_file->f_mapping;
428
429 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
430 return mapping->a_ops->set_page_dirty(page);
431 } else {
432 return __set_page_dirty_no_writeback(page);
433 }
434 }
435