Lines Matching refs:si
127 static int __try_to_reclaim_swap(struct swap_info_struct *si, in __try_to_reclaim_swap() argument
130 swp_entry_t entry = swp_entry(si->type, offset); in __try_to_reclaim_swap()
171 static int discard_swap(struct swap_info_struct *si) in discard_swap() argument
179 se = first_se(si); in discard_swap()
183 err = blkdev_issue_discard(si->bdev, start_block, in discard_swap()
194 err = blkdev_issue_discard(si->bdev, start_block, in discard_swap()
228 static void discard_swap_cluster(struct swap_info_struct *si, in discard_swap_cluster() argument
231 struct swap_extent *se = offset_to_swap_extent(si, start_page); in discard_swap_cluster()
245 if (blkdev_issue_discard(si->bdev, start_block, in discard_swap_cluster()
338 static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si, in lock_cluster() argument
343 ci = si->cluster_info; in lock_cluster()
362 struct swap_info_struct *si, unsigned long offset) in lock_cluster_or_swap_info() argument
367 ci = lock_cluster(si, offset); in lock_cluster_or_swap_info()
370 spin_lock(&si->lock); in lock_cluster_or_swap_info()
375 static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si, in unlock_cluster_or_swap_info() argument
381 spin_unlock(&si->lock); in unlock_cluster_or_swap_info()
440 static void swap_cluster_schedule_discard(struct swap_info_struct *si, in swap_cluster_schedule_discard() argument
449 memset(si->swap_map + idx * SWAPFILE_CLUSTER, in swap_cluster_schedule_discard()
452 cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx); in swap_cluster_schedule_discard()
454 schedule_work(&si->discard_work); in swap_cluster_schedule_discard()
457 static void __free_cluster(struct swap_info_struct *si, unsigned long idx) in __free_cluster() argument
459 struct swap_cluster_info *ci = si->cluster_info; in __free_cluster()
462 cluster_list_add_tail(&si->free_clusters, ci, idx); in __free_cluster()
469 static void swap_do_scheduled_discard(struct swap_info_struct *si) in swap_do_scheduled_discard() argument
474 info = si->cluster_info; in swap_do_scheduled_discard()
476 while (!cluster_list_empty(&si->discard_clusters)) { in swap_do_scheduled_discard()
477 idx = cluster_list_del_first(&si->discard_clusters, info); in swap_do_scheduled_discard()
478 spin_unlock(&si->lock); in swap_do_scheduled_discard()
480 discard_swap_cluster(si, idx * SWAPFILE_CLUSTER, in swap_do_scheduled_discard()
483 spin_lock(&si->lock); in swap_do_scheduled_discard()
484 ci = lock_cluster(si, idx * SWAPFILE_CLUSTER); in swap_do_scheduled_discard()
485 __free_cluster(si, idx); in swap_do_scheduled_discard()
486 memset(si->swap_map + idx * SWAPFILE_CLUSTER, in swap_do_scheduled_discard()
494 struct swap_info_struct *si; in swap_discard_work() local
496 si = container_of(work, struct swap_info_struct, discard_work); in swap_discard_work()
498 spin_lock(&si->lock); in swap_discard_work()
499 swap_do_scheduled_discard(si); in swap_discard_work()
500 spin_unlock(&si->lock); in swap_discard_work()
503 static void alloc_cluster(struct swap_info_struct *si, unsigned long idx) in alloc_cluster() argument
505 struct swap_cluster_info *ci = si->cluster_info; in alloc_cluster()
507 VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx); in alloc_cluster()
508 cluster_list_del_first(&si->free_clusters, ci); in alloc_cluster()
512 static void free_cluster(struct swap_info_struct *si, unsigned long idx) in free_cluster() argument
514 struct swap_cluster_info *ci = si->cluster_info + idx; in free_cluster()
522 if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) == in free_cluster()
524 swap_cluster_schedule_discard(si, idx); in free_cluster()
528 __free_cluster(si, idx); in free_cluster()
576 scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si, in scan_swap_map_ssd_cluster_conflict() argument
583 conflict = !cluster_list_empty(&si->free_clusters) && in scan_swap_map_ssd_cluster_conflict()
584 offset != cluster_list_first(&si->free_clusters) && in scan_swap_map_ssd_cluster_conflict()
585 cluster_is_free(&si->cluster_info[offset]); in scan_swap_map_ssd_cluster_conflict()
590 percpu_cluster = this_cpu_ptr(si->percpu_cluster); in scan_swap_map_ssd_cluster_conflict()
599 static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si, in scan_swap_map_try_ssd_cluster() argument
608 cluster = this_cpu_ptr(si->percpu_cluster); in scan_swap_map_try_ssd_cluster()
610 if (!cluster_list_empty(&si->free_clusters)) { in scan_swap_map_try_ssd_cluster()
611 cluster->index = si->free_clusters.head; in scan_swap_map_try_ssd_cluster()
614 } else if (!cluster_list_empty(&si->discard_clusters)) { in scan_swap_map_try_ssd_cluster()
619 swap_do_scheduled_discard(si); in scan_swap_map_try_ssd_cluster()
620 *scan_base = *offset = si->cluster_next; in scan_swap_map_try_ssd_cluster()
633 max = min_t(unsigned long, si->max, in scan_swap_map_try_ssd_cluster()
639 ci = lock_cluster(si, tmp); in scan_swap_map_try_ssd_cluster()
641 if (!si->swap_map[tmp]) { in scan_swap_map_try_ssd_cluster()
673 static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset, in swap_range_alloc() argument
678 if (offset == si->lowest_bit) in swap_range_alloc()
679 si->lowest_bit += nr_entries; in swap_range_alloc()
680 if (end == si->highest_bit) in swap_range_alloc()
681 si->highest_bit -= nr_entries; in swap_range_alloc()
682 si->inuse_pages += nr_entries; in swap_range_alloc()
683 if (si->inuse_pages == si->pages) { in swap_range_alloc()
684 si->lowest_bit = si->max; in swap_range_alloc()
685 si->highest_bit = 0; in swap_range_alloc()
686 del_from_avail_list(si); in swap_range_alloc()
702 static void swap_range_free(struct swap_info_struct *si, unsigned long offset, in swap_range_free() argument
708 if (offset < si->lowest_bit) in swap_range_free()
709 si->lowest_bit = offset; in swap_range_free()
710 if (end > si->highest_bit) { in swap_range_free()
711 bool was_full = !si->highest_bit; in swap_range_free()
713 si->highest_bit = end; in swap_range_free()
714 if (was_full && (si->flags & SWP_WRITEOK)) in swap_range_free()
715 add_to_avail_list(si); in swap_range_free()
718 si->inuse_pages -= nr_entries; in swap_range_free()
719 if (si->flags & SWP_BLKDEV) in swap_range_free()
721 si->bdev->bd_disk->fops->swap_slot_free_notify; in swap_range_free()
725 frontswap_invalidate_page(si->type, offset); in swap_range_free()
727 swap_slot_free_notify(si->bdev, offset); in swap_range_free()
732 static int scan_swap_map_slots(struct swap_info_struct *si, in scan_swap_map_slots() argument
757 si->flags += SWP_SCANNING; in scan_swap_map_slots()
758 scan_base = offset = si->cluster_next; in scan_swap_map_slots()
761 if (si->cluster_info) { in scan_swap_map_slots()
762 if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base)) in scan_swap_map_slots()
768 if (unlikely(!si->cluster_nr--)) { in scan_swap_map_slots()
769 if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) { in scan_swap_map_slots()
770 si->cluster_nr = SWAPFILE_CLUSTER - 1; in scan_swap_map_slots()
774 spin_unlock(&si->lock); in scan_swap_map_slots()
782 scan_base = offset = si->lowest_bit; in scan_swap_map_slots()
786 for (; last_in_cluster <= si->highest_bit; offset++) { in scan_swap_map_slots()
787 if (si->swap_map[offset]) in scan_swap_map_slots()
790 spin_lock(&si->lock); in scan_swap_map_slots()
792 si->cluster_next = offset; in scan_swap_map_slots()
793 si->cluster_nr = SWAPFILE_CLUSTER - 1; in scan_swap_map_slots()
803 spin_lock(&si->lock); in scan_swap_map_slots()
804 si->cluster_nr = SWAPFILE_CLUSTER - 1; in scan_swap_map_slots()
808 if (si->cluster_info) { in scan_swap_map_slots()
809 while (scan_swap_map_ssd_cluster_conflict(si, offset)) { in scan_swap_map_slots()
813 if (!scan_swap_map_try_ssd_cluster(si, &offset, in scan_swap_map_slots()
818 if (!(si->flags & SWP_WRITEOK)) in scan_swap_map_slots()
820 if (!si->highest_bit) in scan_swap_map_slots()
822 if (offset > si->highest_bit) in scan_swap_map_slots()
823 scan_base = offset = si->lowest_bit; in scan_swap_map_slots()
825 ci = lock_cluster(si, offset); in scan_swap_map_slots()
827 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { in scan_swap_map_slots()
830 spin_unlock(&si->lock); in scan_swap_map_slots()
831 swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY); in scan_swap_map_slots()
832 spin_lock(&si->lock); in scan_swap_map_slots()
839 if (si->swap_map[offset]) { in scan_swap_map_slots()
846 si->swap_map[offset] = usage; in scan_swap_map_slots()
847 inc_cluster_info_page(si, si->cluster_info, offset); in scan_swap_map_slots()
850 swap_range_alloc(si, offset, 1); in scan_swap_map_slots()
851 si->cluster_next = offset + 1; in scan_swap_map_slots()
852 slots[n_ret++] = swp_entry(si->type, offset); in scan_swap_map_slots()
855 if ((n_ret == nr) || (offset >= si->highest_bit)) in scan_swap_map_slots()
864 spin_unlock(&si->lock); in scan_swap_map_slots()
866 spin_lock(&si->lock); in scan_swap_map_slots()
871 if (si->cluster_info) { in scan_swap_map_slots()
872 if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base)) in scan_swap_map_slots()
881 if (si->cluster_nr && !si->swap_map[offset]) { in scan_swap_map_slots()
882 --si->cluster_nr; in scan_swap_map_slots()
887 si->flags -= SWP_SCANNING; in scan_swap_map_slots()
891 spin_unlock(&si->lock); in scan_swap_map_slots()
892 while (++offset <= si->highest_bit) { in scan_swap_map_slots()
893 if (!si->swap_map[offset]) { in scan_swap_map_slots()
894 spin_lock(&si->lock); in scan_swap_map_slots()
897 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { in scan_swap_map_slots()
898 spin_lock(&si->lock); in scan_swap_map_slots()
906 offset = si->lowest_bit; in scan_swap_map_slots()
908 if (!si->swap_map[offset]) { in scan_swap_map_slots()
909 spin_lock(&si->lock); in scan_swap_map_slots()
912 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { in scan_swap_map_slots()
913 spin_lock(&si->lock); in scan_swap_map_slots()
922 spin_lock(&si->lock); in scan_swap_map_slots()
925 si->flags -= SWP_SCANNING; in scan_swap_map_slots()
929 static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot) in swap_alloc_cluster() argument
945 if (cluster_list_empty(&si->free_clusters)) in swap_alloc_cluster()
948 idx = cluster_list_first(&si->free_clusters); in swap_alloc_cluster()
950 ci = lock_cluster(si, offset); in swap_alloc_cluster()
951 alloc_cluster(si, idx); in swap_alloc_cluster()
954 map = si->swap_map + offset; in swap_alloc_cluster()
958 swap_range_alloc(si, offset, SWAPFILE_CLUSTER); in swap_alloc_cluster()
959 *slot = swp_entry(si->type, offset); in swap_alloc_cluster()
964 static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx) in swap_free_cluster() argument
969 ci = lock_cluster(si, offset); in swap_free_cluster()
970 memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER); in swap_free_cluster()
972 free_cluster(si, idx); in swap_free_cluster()
974 swap_range_free(si, offset, SWAPFILE_CLUSTER); in swap_free_cluster()
977 static unsigned long scan_swap_map(struct swap_info_struct *si, in scan_swap_map() argument
983 n_ret = scan_swap_map_slots(si, usage, 1, &entry); in scan_swap_map()
995 struct swap_info_struct *si, *next; in get_swap_pages() local
1019 plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) { in get_swap_pages()
1021 plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]); in get_swap_pages()
1023 spin_lock(&si->lock); in get_swap_pages()
1024 if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) { in get_swap_pages()
1026 if (plist_node_empty(&si->avail_lists[node])) { in get_swap_pages()
1027 spin_unlock(&si->lock); in get_swap_pages()
1030 WARN(!si->highest_bit, in get_swap_pages()
1032 si->type); in get_swap_pages()
1033 WARN(!(si->flags & SWP_WRITEOK), in get_swap_pages()
1035 si->type); in get_swap_pages()
1036 __del_from_avail_list(si); in get_swap_pages()
1037 spin_unlock(&si->lock); in get_swap_pages()
1041 if (!(si->flags & SWP_FS)) in get_swap_pages()
1042 n_ret = swap_alloc_cluster(si, swp_entries); in get_swap_pages()
1044 n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE, in get_swap_pages()
1046 spin_unlock(&si->lock); in get_swap_pages()
1050 si->type); in get_swap_pages()
1082 struct swap_info_struct *si = swap_type_to_swap_info(type); in get_swap_page_of_type() local
1085 if (!si) in get_swap_page_of_type()
1088 spin_lock(&si->lock); in get_swap_page_of_type()
1089 if (si->flags & SWP_WRITEOK) { in get_swap_page_of_type()
1092 offset = scan_swap_map(si, 1); in get_swap_page_of_type()
1094 spin_unlock(&si->lock); in get_swap_page_of_type()
1099 spin_unlock(&si->lock); in get_swap_page_of_type()
1251 struct swap_info_struct *si; in get_swap_device() local
1256 si = swp_swap_info(entry); in get_swap_device()
1257 if (!si) in get_swap_device()
1261 if (!(si->flags & SWP_VALID)) in get_swap_device()
1264 if (offset >= si->max) in get_swap_device()
1267 return si; in get_swap_device()
1330 struct swap_info_struct *si; in put_swap_page() local
1336 si = _swap_info_get(entry); in put_swap_page()
1337 if (!si) in put_swap_page()
1340 ci = lock_cluster_or_swap_info(si, offset); in put_swap_page()
1343 map = si->swap_map + offset; in put_swap_page()
1352 unlock_cluster_or_swap_info(si, ci); in put_swap_page()
1353 spin_lock(&si->lock); in put_swap_page()
1355 swap_free_cluster(si, idx); in put_swap_page()
1356 spin_unlock(&si->lock); in put_swap_page()
1361 if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) { in put_swap_page()
1362 unlock_cluster_or_swap_info(si, ci); in put_swap_page()
1366 lock_cluster_or_swap_info(si, offset); in put_swap_page()
1369 unlock_cluster_or_swap_info(si, ci); in put_swap_page()
1375 struct swap_info_struct *si; in split_swap_cluster() local
1379 si = _swap_info_get(entry); in split_swap_cluster()
1380 if (!si) in split_swap_cluster()
1382 ci = lock_cluster(si, offset); in split_swap_cluster()
1450 struct swap_info_struct *si; in __swap_count() local
1454 si = get_swap_device(entry); in __swap_count()
1455 if (si) { in __swap_count()
1456 count = swap_count(si->swap_map[offset]); in __swap_count()
1457 put_swap_device(si); in __swap_count()
1462 static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) in swap_swapcount() argument
1468 ci = lock_cluster_or_swap_info(si, offset); in swap_swapcount()
1469 count = swap_count(si->swap_map[offset]); in swap_swapcount()
1470 unlock_cluster_or_swap_info(si, ci); in swap_swapcount()
1482 struct swap_info_struct *si; in __swp_swapcount() local
1484 si = get_swap_device(entry); in __swp_swapcount()
1485 if (si) { in __swp_swapcount()
1486 count = swap_swapcount(si, entry); in __swp_swapcount()
1487 put_swap_device(si); in __swp_swapcount()
1538 static bool swap_page_trans_huge_swapped(struct swap_info_struct *si, in swap_page_trans_huge_swapped() argument
1542 unsigned char *map = si->swap_map; in swap_page_trans_huge_swapped()
1548 ci = lock_cluster_or_swap_info(si, offset); in swap_page_trans_huge_swapped()
1561 unlock_cluster_or_swap_info(si, ci); in swap_page_trans_huge_swapped()
1568 struct swap_info_struct *si; in page_swapped() local
1575 si = _swap_info_get(entry); in page_swapped()
1576 if (si) in page_swapped()
1577 return swap_page_trans_huge_swapped(si, entry); in page_swapped()
1586 struct swap_info_struct *si; in page_trans_huge_map_swapcount() local
1610 si = _swap_info_get(entry); in page_trans_huge_map_swapcount()
1611 if (si) { in page_trans_huge_map_swapcount()
1612 map = si->swap_map; in page_trans_huge_map_swapcount()
1617 ci = lock_cluster(si, offset); in page_trans_huge_map_swapcount()
1809 struct swap_info_struct *si = swap_type_to_swap_info(type); in swapdev_block() local
1811 if (!si || !(si->flags & SWP_WRITEOK)) in swapdev_block()
1917 struct swap_info_struct *si; in unuse_pte_range() local
1922 si = swap_info[type]; in unuse_pte_range()
1935 if (frontswap && !frontswap_test(si, offset)) in unuse_pte_range()
1939 swap_map = &si->swap_map[offset]; in unuse_pte_range()
2092 static unsigned int find_next_to_unuse(struct swap_info_struct *si, in find_next_to_unuse() argument
2104 for (i = prev + 1; i < si->max; i++) { in find_next_to_unuse()
2105 count = READ_ONCE(si->swap_map[i]); in find_next_to_unuse()
2107 if (!frontswap || frontswap_test(si, i)) in find_next_to_unuse()
2113 if (i == si->max) in find_next_to_unuse()
2130 struct swap_info_struct *si = swap_info[type]; in try_to_unuse() local
2135 if (!si->inuse_pages) in try_to_unuse()
2151 while (si->inuse_pages && in try_to_unuse()
2180 while (si->inuse_pages && in try_to_unuse()
2182 (i = find_next_to_unuse(si, i, frontswap)) != 0) { in try_to_unuse()
2222 if (si->inuse_pages) { in try_to_unuse()
2567 struct swap_info_struct *si = p; in SYSCALL_DEFINE1() local
2570 plist_for_each_entry_continue(si, &swap_active_head, list) { in SYSCALL_DEFINE1()
2571 si->prio++; in SYSCALL_DEFINE1()
2572 si->list.prio--; in SYSCALL_DEFINE1()
2574 if (si->avail_lists[nid].prio != 1) in SYSCALL_DEFINE1()
2575 si->avail_lists[nid].prio--; in SYSCALL_DEFINE1()
2711 struct swap_info_struct *si; in swap_start() local
2720 for (type = 0; (si = swap_type_to_swap_info(type)); type++) { in swap_start()
2721 if (!(si->flags & SWP_USED) || !si->swap_map) in swap_start()
2724 return si; in swap_start()
2732 struct swap_info_struct *si = v; in swap_next() local
2738 type = si->type + 1; in swap_next()
2740 for (; (si = swap_type_to_swap_info(type)); type++) { in swap_next()
2741 if (!(si->flags & SWP_USED) || !si->swap_map) in swap_next()
2744 return si; in swap_next()
2757 struct swap_info_struct *si = v; in swap_show() local
2761 if (si == SEQ_START_TOKEN) { in swap_show()
2766 file = si->swap_file; in swap_show()
2772 si->pages << (PAGE_SHIFT - 10), in swap_show()
2773 si->inuse_pages << (PAGE_SHIFT - 10), in swap_show()
2774 si->prio); in swap_show()
3092 static bool swap_discardable(struct swap_info_struct *si) in swap_discardable() argument
3094 struct request_queue *q = bdev_get_queue(si->bdev); in swap_discardable()
3357 struct swap_info_struct *si = swap_info[type]; in si_swapinfo() local
3359 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK)) in si_swapinfo()
3360 nr_to_be_unused += si->inuse_pages; in si_swapinfo()
3524 struct swap_info_struct *si; in add_swap_count_continuation() local
3539 si = get_swap_device(entry); in add_swap_count_continuation()
3540 if (!si) { in add_swap_count_continuation()
3547 spin_lock(&si->lock); in add_swap_count_continuation()
3551 ci = lock_cluster(si, offset); in add_swap_count_continuation()
3553 count = si->swap_map[offset] & ~SWAP_HAS_CACHE; in add_swap_count_continuation()
3574 head = vmalloc_to_page(si->swap_map + offset); in add_swap_count_continuation()
3577 spin_lock(&si->cont_lock); in add_swap_count_continuation()
3586 si->flags |= SWP_CONTINUED; in add_swap_count_continuation()
3614 spin_unlock(&si->cont_lock); in add_swap_count_continuation()
3617 spin_unlock(&si->lock); in add_swap_count_continuation()
3618 put_swap_device(si); in add_swap_count_continuation()
3634 static bool swap_count_continued(struct swap_info_struct *si, in swap_count_continued() argument
3642 head = vmalloc_to_page(si->swap_map + offset); in swap_count_continued()
3648 spin_lock(&si->cont_lock); in swap_count_continued()
3714 spin_unlock(&si->cont_lock); in swap_count_continued()
3722 static void free_swap_count_continuations(struct swap_info_struct *si) in free_swap_count_continuations() argument
3726 for (offset = 0; offset < si->max; offset += PAGE_SIZE) { in free_swap_count_continuations()
3728 head = vmalloc_to_page(si->swap_map + offset); in free_swap_count_continuations()
3744 struct swap_info_struct *si, *next; in mem_cgroup_throttle_swaprate() local
3759 plist_for_each_entry_safe(si, next, &swap_avail_heads[node], in mem_cgroup_throttle_swaprate()
3761 if (si->bdev) { in mem_cgroup_throttle_swaprate()
3762 blkcg_schedule_throttle(bdev_get_queue(si->bdev), in mem_cgroup_throttle_swaprate()