Lines Matching full:si

131 static int __try_to_reclaim_swap(struct swap_info_struct *si,  in __try_to_reclaim_swap()  argument
134 swp_entry_t entry = swp_entry(si->type, offset); in __try_to_reclaim_swap()
175 static int discard_swap(struct swap_info_struct *si) in discard_swap() argument
183 se = first_se(si); in discard_swap()
187 err = blkdev_issue_discard(si->bdev, start_block, in discard_swap()
198 err = blkdev_issue_discard(si->bdev, start_block, in discard_swap()
245 static void discard_swap_cluster(struct swap_info_struct *si, in discard_swap_cluster() argument
248 struct swap_extent *se = offset_to_swap_extent(si, start_page); in discard_swap_cluster()
262 if (blkdev_issue_discard(si->bdev, start_block, in discard_swap_cluster()
355 static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si, in lock_cluster() argument
360 ci = si->cluster_info; in lock_cluster()
379 struct swap_info_struct *si, unsigned long offset) in lock_cluster_or_swap_info() argument
384 ci = lock_cluster(si, offset); in lock_cluster_or_swap_info()
387 spin_lock(&si->lock); in lock_cluster_or_swap_info()
392 static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si, in unlock_cluster_or_swap_info() argument
398 spin_unlock(&si->lock); in unlock_cluster_or_swap_info()
457 static void swap_cluster_schedule_discard(struct swap_info_struct *si, in swap_cluster_schedule_discard() argument
462 * si->swap_map directly. To make sure the discarding cluster isn't in swap_cluster_schedule_discard()
466 memset(si->swap_map + idx * SWAPFILE_CLUSTER, in swap_cluster_schedule_discard()
469 cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx); in swap_cluster_schedule_discard()
471 schedule_work(&si->discard_work); in swap_cluster_schedule_discard()
474 static void __free_cluster(struct swap_info_struct *si, unsigned long idx) in __free_cluster() argument
476 struct swap_cluster_info *ci = si->cluster_info; in __free_cluster()
479 cluster_list_add_tail(&si->free_clusters, ci, idx); in __free_cluster()
484 * will be added to free cluster list. caller should hold si->lock.
486 static void swap_do_scheduled_discard(struct swap_info_struct *si) in swap_do_scheduled_discard() argument
491 info = si->cluster_info; in swap_do_scheduled_discard()
493 while (!cluster_list_empty(&si->discard_clusters)) { in swap_do_scheduled_discard()
494 idx = cluster_list_del_first(&si->discard_clusters, info); in swap_do_scheduled_discard()
495 spin_unlock(&si->lock); in swap_do_scheduled_discard()
497 discard_swap_cluster(si, idx * SWAPFILE_CLUSTER, in swap_do_scheduled_discard()
500 spin_lock(&si->lock); in swap_do_scheduled_discard()
501 ci = lock_cluster(si, idx * SWAPFILE_CLUSTER); in swap_do_scheduled_discard()
502 __free_cluster(si, idx); in swap_do_scheduled_discard()
503 memset(si->swap_map + idx * SWAPFILE_CLUSTER, in swap_do_scheduled_discard()
511 struct swap_info_struct *si; in swap_discard_work() local
513 si = container_of(work, struct swap_info_struct, discard_work); in swap_discard_work()
515 spin_lock(&si->lock); in swap_discard_work()
516 swap_do_scheduled_discard(si); in swap_discard_work()
517 spin_unlock(&si->lock); in swap_discard_work()
522 struct swap_info_struct *si; in swap_users_ref_free() local
524 si = container_of(ref, struct swap_info_struct, users); in swap_users_ref_free()
525 complete(&si->comp); in swap_users_ref_free()
528 static void alloc_cluster(struct swap_info_struct *si, unsigned long idx) in alloc_cluster() argument
530 struct swap_cluster_info *ci = si->cluster_info; in alloc_cluster()
532 VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx); in alloc_cluster()
533 cluster_list_del_first(&si->free_clusters, ci); in alloc_cluster()
537 static void free_cluster(struct swap_info_struct *si, unsigned long idx) in free_cluster() argument
539 struct swap_cluster_info *ci = si->cluster_info + idx; in free_cluster()
547 if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) == in free_cluster()
549 swap_cluster_schedule_discard(si, idx); in free_cluster()
553 __free_cluster(si, idx); in free_cluster()
601 scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si, in scan_swap_map_ssd_cluster_conflict() argument
608 conflict = !cluster_list_empty(&si->free_clusters) && in scan_swap_map_ssd_cluster_conflict()
609 offset != cluster_list_first(&si->free_clusters) && in scan_swap_map_ssd_cluster_conflict()
610 cluster_is_free(&si->cluster_info[offset]); in scan_swap_map_ssd_cluster_conflict()
615 percpu_cluster = this_cpu_ptr(si->percpu_cluster); in scan_swap_map_ssd_cluster_conflict()
624 static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si, in scan_swap_map_try_ssd_cluster() argument
632 cluster = this_cpu_ptr(si->percpu_cluster); in scan_swap_map_try_ssd_cluster()
634 if (!cluster_list_empty(&si->free_clusters)) { in scan_swap_map_try_ssd_cluster()
635 cluster->index = si->free_clusters.head; in scan_swap_map_try_ssd_cluster()
638 } else if (!cluster_list_empty(&si->discard_clusters)) { in scan_swap_map_try_ssd_cluster()
642 * reread cluster_next_cpu since we dropped si->lock in scan_swap_map_try_ssd_cluster()
644 swap_do_scheduled_discard(si); in scan_swap_map_try_ssd_cluster()
645 *scan_base = this_cpu_read(*si->cluster_next_cpu); in scan_swap_map_try_ssd_cluster()
657 max = min_t(unsigned long, si->max, in scan_swap_map_try_ssd_cluster()
660 ci = lock_cluster(si, tmp); in scan_swap_map_try_ssd_cluster()
662 if (!si->swap_map[tmp]) in scan_swap_map_try_ssd_cluster()
693 static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset, in swap_range_alloc() argument
698 if (offset == si->lowest_bit) in swap_range_alloc()
699 si->lowest_bit += nr_entries; in swap_range_alloc()
700 if (end == si->highest_bit) in swap_range_alloc()
701 WRITE_ONCE(si->highest_bit, si->highest_bit - nr_entries); in swap_range_alloc()
702 WRITE_ONCE(si->inuse_pages, si->inuse_pages + nr_entries); in swap_range_alloc()
703 if (si->inuse_pages == si->pages) { in swap_range_alloc()
704 si->lowest_bit = si->max; in swap_range_alloc()
705 si->highest_bit = 0; in swap_range_alloc()
706 del_from_avail_list(si); in swap_range_alloc()
722 static void swap_range_free(struct swap_info_struct *si, unsigned long offset, in swap_range_free() argument
729 if (offset < si->lowest_bit) in swap_range_free()
730 si->lowest_bit = offset; in swap_range_free()
731 if (end > si->highest_bit) { in swap_range_free()
732 bool was_full = !si->highest_bit; in swap_range_free()
734 WRITE_ONCE(si->highest_bit, end); in swap_range_free()
735 if (was_full && (si->flags & SWP_WRITEOK)) in swap_range_free()
736 add_to_avail_list(si); in swap_range_free()
739 WRITE_ONCE(si->inuse_pages, si->inuse_pages - nr_entries); in swap_range_free()
740 if (si->flags & SWP_BLKDEV) in swap_range_free()
742 si->bdev->bd_disk->fops->swap_slot_free_notify; in swap_range_free()
746 arch_swap_invalidate_page(si->type, offset); in swap_range_free()
747 frontswap_invalidate_page(si->type, offset); in swap_range_free()
749 swap_slot_free_notify(si->bdev, offset); in swap_range_free()
752 clear_shadow_from_swap_cache(si->type, begin, end); in swap_range_free()
755 static void set_cluster_next(struct swap_info_struct *si, unsigned long next) in set_cluster_next() argument
759 if (!(si->flags & SWP_SOLIDSTATE)) { in set_cluster_next()
760 si->cluster_next = next; in set_cluster_next()
764 prev = this_cpu_read(*si->cluster_next_cpu); in set_cluster_next()
773 if (si->highest_bit <= si->lowest_bit) in set_cluster_next()
775 next = si->lowest_bit + in set_cluster_next()
776 prandom_u32_max(si->highest_bit - si->lowest_bit + 1); in set_cluster_next()
778 next = max_t(unsigned int, next, si->lowest_bit); in set_cluster_next()
780 this_cpu_write(*si->cluster_next_cpu, next); in set_cluster_next()
783 static bool swap_offset_available_and_locked(struct swap_info_struct *si, in swap_offset_available_and_locked() argument
786 if (data_race(!si->swap_map[offset])) { in swap_offset_available_and_locked()
787 spin_lock(&si->lock); in swap_offset_available_and_locked()
791 if (vm_swap_full() && READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) { in swap_offset_available_and_locked()
792 spin_lock(&si->lock); in swap_offset_available_and_locked()
799 static int scan_swap_map_slots(struct swap_info_struct *si, in scan_swap_map_slots() argument
822 si->flags += SWP_SCANNING; in scan_swap_map_slots()
828 if (si->flags & SWP_SOLIDSTATE) in scan_swap_map_slots()
829 scan_base = this_cpu_read(*si->cluster_next_cpu); in scan_swap_map_slots()
831 scan_base = si->cluster_next; in scan_swap_map_slots()
835 if (si->cluster_info) { in scan_swap_map_slots()
836 if (!scan_swap_map_try_ssd_cluster(si, &offset, &scan_base)) in scan_swap_map_slots()
838 } else if (unlikely(!si->cluster_nr--)) { in scan_swap_map_slots()
839 if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) { in scan_swap_map_slots()
840 si->cluster_nr = SWAPFILE_CLUSTER - 1; in scan_swap_map_slots()
844 spin_unlock(&si->lock); in scan_swap_map_slots()
849 * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info in scan_swap_map_slots()
852 scan_base = offset = si->lowest_bit; in scan_swap_map_slots()
856 for (; last_in_cluster <= si->highest_bit; offset++) { in scan_swap_map_slots()
857 if (si->swap_map[offset]) in scan_swap_map_slots()
860 spin_lock(&si->lock); in scan_swap_map_slots()
862 si->cluster_next = offset; in scan_swap_map_slots()
863 si->cluster_nr = SWAPFILE_CLUSTER - 1; in scan_swap_map_slots()
873 spin_lock(&si->lock); in scan_swap_map_slots()
874 si->cluster_nr = SWAPFILE_CLUSTER - 1; in scan_swap_map_slots()
878 if (si->cluster_info) { in scan_swap_map_slots()
879 while (scan_swap_map_ssd_cluster_conflict(si, offset)) { in scan_swap_map_slots()
883 if (!scan_swap_map_try_ssd_cluster(si, &offset, in scan_swap_map_slots()
888 if (!(si->flags & SWP_WRITEOK)) in scan_swap_map_slots()
890 if (!si->highest_bit) in scan_swap_map_slots()
892 if (offset > si->highest_bit) in scan_swap_map_slots()
893 scan_base = offset = si->lowest_bit; in scan_swap_map_slots()
895 ci = lock_cluster(si, offset); in scan_swap_map_slots()
897 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { in scan_swap_map_slots()
900 spin_unlock(&si->lock); in scan_swap_map_slots()
901 swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY); in scan_swap_map_slots()
902 spin_lock(&si->lock); in scan_swap_map_slots()
909 if (si->swap_map[offset]) { in scan_swap_map_slots()
916 WRITE_ONCE(si->swap_map[offset], usage); in scan_swap_map_slots()
917 inc_cluster_info_page(si, si->cluster_info, offset); in scan_swap_map_slots()
920 swap_range_alloc(si, offset, 1); in scan_swap_map_slots()
921 slots[n_ret++] = swp_entry(si->type, offset); in scan_swap_map_slots()
924 if ((n_ret == nr) || (offset >= si->highest_bit)) in scan_swap_map_slots()
933 spin_unlock(&si->lock); in scan_swap_map_slots()
935 spin_lock(&si->lock); in scan_swap_map_slots()
940 if (si->cluster_info) { in scan_swap_map_slots()
941 if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base)) in scan_swap_map_slots()
943 } else if (si->cluster_nr && !si->swap_map[++offset]) { in scan_swap_map_slots()
945 --si->cluster_nr; in scan_swap_map_slots()
960 scan_limit = si->highest_bit; in scan_swap_map_slots()
963 if (!si->swap_map[offset]) in scan_swap_map_slots()
969 set_cluster_next(si, offset + 1); in scan_swap_map_slots()
970 si->flags -= SWP_SCANNING; in scan_swap_map_slots()
974 spin_unlock(&si->lock); in scan_swap_map_slots()
975 while (++offset <= READ_ONCE(si->highest_bit)) { in scan_swap_map_slots()
981 if (swap_offset_available_and_locked(si, offset)) in scan_swap_map_slots()
984 offset = si->lowest_bit; in scan_swap_map_slots()
991 if (swap_offset_available_and_locked(si, offset)) in scan_swap_map_slots()
995 spin_lock(&si->lock); in scan_swap_map_slots()
998 si->flags -= SWP_SCANNING; in scan_swap_map_slots()
1002 static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot) in swap_alloc_cluster() argument
1017 if (cluster_list_empty(&si->free_clusters)) in swap_alloc_cluster()
1020 idx = cluster_list_first(&si->free_clusters); in swap_alloc_cluster()
1022 ci = lock_cluster(si, offset); in swap_alloc_cluster()
1023 alloc_cluster(si, idx); in swap_alloc_cluster()
1026 memset(si->swap_map + offset, SWAP_HAS_CACHE, SWAPFILE_CLUSTER); in swap_alloc_cluster()
1028 swap_range_alloc(si, offset, SWAPFILE_CLUSTER); in swap_alloc_cluster()
1029 *slot = swp_entry(si->type, offset); in swap_alloc_cluster()
1034 static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx) in swap_free_cluster() argument
1039 ci = lock_cluster(si, offset); in swap_free_cluster()
1040 memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER); in swap_free_cluster()
1042 free_cluster(si, idx); in swap_free_cluster()
1044 swap_range_free(si, offset, SWAPFILE_CLUSTER); in swap_free_cluster()
1050 struct swap_info_struct *si, *next; in get_swap_pages() local
1072 plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) { in get_swap_pages()
1073 /* requeue si to after same-priority siblings */ in get_swap_pages()
1074 plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]); in get_swap_pages()
1076 spin_lock(&si->lock); in get_swap_pages()
1077 if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) { in get_swap_pages()
1079 if (plist_node_empty(&si->avail_lists[node])) { in get_swap_pages()
1080 spin_unlock(&si->lock); in get_swap_pages()
1083 WARN(!si->highest_bit, in get_swap_pages()
1085 si->type); in get_swap_pages()
1086 WARN(!(si->flags & SWP_WRITEOK), in get_swap_pages()
1088 si->type); in get_swap_pages()
1089 __del_from_avail_list(si); in get_swap_pages()
1090 spin_unlock(&si->lock); in get_swap_pages()
1094 if (si->flags & SWP_BLKDEV) in get_swap_pages()
1095 n_ret = swap_alloc_cluster(si, swp_entries); in get_swap_pages()
1097 n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE, in get_swap_pages()
1099 spin_unlock(&si->lock); in get_swap_pages()
1102 pr_debug("scan_swap_map of si %d failed to find offset\n", in get_swap_pages()
1103 si->type); in get_swap_pages()
1108 * if we got here, it's likely that si was almost full before, in get_swap_pages()
1109 * and since scan_swap_map_slots() can drop the si->lock, in get_swap_pages()
1111 * same si and it filled up before we could get one; or, the si in get_swap_pages()
1113 * si->lock. Since we dropped the swap_avail_lock, the in get_swap_pages()
1253 struct swap_info_struct *si; in get_swap_device() local
1258 si = swp_swap_info(entry); in get_swap_device()
1259 if (!si) in get_swap_device()
1261 if (!percpu_ref_tryget_live(&si->users)) in get_swap_device()
1264 * Guarantee the si->users are checked before accessing other in get_swap_device()
1272 if (offset >= si->max) in get_swap_device()
1275 return si; in get_swap_device()
1282 percpu_ref_put(&si->users); in get_swap_device()
1340 struct swap_info_struct *si; in put_swap_folio() local
1346 si = _swap_info_get(entry); in put_swap_folio()
1347 if (!si) in put_swap_folio()
1350 ci = lock_cluster_or_swap_info(si, offset); in put_swap_folio()
1353 map = si->swap_map + offset; in put_swap_folio()
1362 unlock_cluster_or_swap_info(si, ci); in put_swap_folio()
1363 spin_lock(&si->lock); in put_swap_folio()
1365 swap_free_cluster(si, idx); in put_swap_folio()
1366 spin_unlock(&si->lock); in put_swap_folio()
1371 if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) { in put_swap_folio()
1372 unlock_cluster_or_swap_info(si, ci); in put_swap_folio()
1376 lock_cluster_or_swap_info(si, offset); in put_swap_folio()
1379 unlock_cluster_or_swap_info(si, ci); in put_swap_folio()
1385 struct swap_info_struct *si; in split_swap_cluster() local
1389 si = _swap_info_get(entry); in split_swap_cluster()
1390 if (!si) in split_swap_cluster()
1392 ci = lock_cluster(si, offset); in split_swap_cluster()
1436 struct swap_info_struct *si; in __swap_count() local
1440 si = get_swap_device(entry); in __swap_count()
1441 if (si) { in __swap_count()
1442 count = swap_count(si->swap_map[offset]); in __swap_count()
1443 put_swap_device(si); in __swap_count()
1453 static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) in swap_swapcount() argument
1459 ci = lock_cluster_or_swap_info(si, offset); in swap_swapcount()
1460 count = swap_count(si->swap_map[offset]); in swap_swapcount()
1461 unlock_cluster_or_swap_info(si, ci); in swap_swapcount()
1473 struct swap_info_struct *si; in __swp_swapcount() local
1475 si = get_swap_device(entry); in __swp_swapcount()
1476 if (si) { in __swp_swapcount()
1477 count = swap_swapcount(si, entry); in __swp_swapcount()
1478 put_swap_device(si); in __swp_swapcount()
1529 static bool swap_page_trans_huge_swapped(struct swap_info_struct *si, in swap_page_trans_huge_swapped() argument
1533 unsigned char *map = si->swap_map; in swap_page_trans_huge_swapped()
1539 ci = lock_cluster_or_swap_info(si, offset); in swap_page_trans_huge_swapped()
1552 unlock_cluster_or_swap_info(si, ci); in swap_page_trans_huge_swapped()
1559 struct swap_info_struct *si = _swap_info_get(entry); in folio_swapped() local
1561 if (!si) in folio_swapped()
1565 return swap_swapcount(si, entry) != 0; in folio_swapped()
1567 return swap_page_trans_huge_swapped(si, entry); in folio_swapped()
1640 struct swap_info_struct *si = swap_type_to_swap_info(type); in get_swap_page_of_type() local
1643 if (!si) in get_swap_page_of_type()
1647 spin_lock(&si->lock); in get_swap_page_of_type()
1648 if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry)) in get_swap_page_of_type()
1650 spin_unlock(&si->lock); in get_swap_page_of_type()
1714 struct swap_info_struct *si = swap_type_to_swap_info(type); in swapdev_block() local
1717 if (!si || !(si->flags & SWP_WRITEOK)) in swapdev_block()
1719 se = offset_to_swap_extent(si, offset); in swapdev_block()
1837 struct swap_info_struct *si; in unuse_pte_range() local
1841 si = swap_info[type]; in unuse_pte_range()
1856 swap_map = &si->swap_map[offset]; in unuse_pte_range()
2007 static unsigned int find_next_to_unuse(struct swap_info_struct *si, in find_next_to_unuse() argument
2019 for (i = prev + 1; i < si->max; i++) { in find_next_to_unuse()
2020 count = READ_ONCE(si->swap_map[i]); in find_next_to_unuse()
2027 if (i == si->max) in find_next_to_unuse()
2039 struct swap_info_struct *si = swap_info[type]; in try_to_unuse() local
2044 if (!READ_ONCE(si->inuse_pages)) in try_to_unuse()
2057 while (READ_ONCE(si->inuse_pages) && in try_to_unuse()
2085 while (READ_ONCE(si->inuse_pages) && in try_to_unuse()
2087 (i = find_next_to_unuse(si, i)) != 0) { in try_to_unuse()
2120 if (READ_ONCE(si->inuse_pages)) { in try_to_unuse()
2433 struct swap_info_struct *si = p; in SYSCALL_DEFINE1() local
2436 plist_for_each_entry_continue(si, &swap_active_head, list) { in SYSCALL_DEFINE1()
2437 si->prio++; in SYSCALL_DEFINE1()
2438 si->list.prio--; in SYSCALL_DEFINE1()
2440 if (si->avail_lists[nid].prio != 1) in SYSCALL_DEFINE1()
2441 si->avail_lists[nid].prio--; in SYSCALL_DEFINE1()
2580 struct swap_info_struct *si; in swap_start() local
2589 for (type = 0; (si = swap_type_to_swap_info(type)); type++) { in swap_start()
2590 if (!(si->flags & SWP_USED) || !si->swap_map) in swap_start()
2593 return si; in swap_start()
2601 struct swap_info_struct *si = v; in swap_next() local
2607 type = si->type + 1; in swap_next()
2610 for (; (si = swap_type_to_swap_info(type)); type++) { in swap_next()
2611 if (!(si->flags & SWP_USED) || !si->swap_map) in swap_next()
2613 return si; in swap_next()
2626 struct swap_info_struct *si = v; in swap_show() local
2631 if (si == SEQ_START_TOKEN) { in swap_show()
2636 bytes = si->pages << (PAGE_SHIFT - 10); in swap_show()
2637 inuse = READ_ONCE(si->inuse_pages) << (PAGE_SHIFT - 10); in swap_show()
2639 file = si->swap_file; in swap_show()
2647 si->prio); in swap_show()
3253 struct swap_info_struct *si = swap_info[type]; in si_swapinfo() local
3255 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK)) in si_swapinfo()
3256 nr_to_be_unused += READ_ONCE(si->inuse_pages); in si_swapinfo()
3418 struct swap_info_struct *si; in add_swap_count_continuation() local
3433 si = get_swap_device(entry); in add_swap_count_continuation()
3434 if (!si) { in add_swap_count_continuation()
3441 spin_lock(&si->lock); in add_swap_count_continuation()
3445 ci = lock_cluster(si, offset); in add_swap_count_continuation()
3447 count = swap_count(si->swap_map[offset]); in add_swap_count_continuation()
3468 head = vmalloc_to_page(si->swap_map + offset); in add_swap_count_continuation()
3471 spin_lock(&si->cont_lock); in add_swap_count_continuation()
3480 si->flags |= SWP_CONTINUED; in add_swap_count_continuation()
3508 spin_unlock(&si->cont_lock); in add_swap_count_continuation()
3511 spin_unlock(&si->lock); in add_swap_count_continuation()
3512 put_swap_device(si); in add_swap_count_continuation()
3528 static bool swap_count_continued(struct swap_info_struct *si, in swap_count_continued() argument
3536 head = vmalloc_to_page(si->swap_map + offset); in swap_count_continued()
3542 spin_lock(&si->cont_lock); in swap_count_continued()
3604 spin_unlock(&si->cont_lock); in swap_count_continued()
3612 static void free_swap_count_continuations(struct swap_info_struct *si) in free_swap_count_continuations() argument
3616 for (offset = 0; offset < si->max; offset += PAGE_SIZE) { in free_swap_count_continuations()
3618 head = vmalloc_to_page(si->swap_map + offset); in free_swap_count_continuations()
3633 struct swap_info_struct *si, *next; in __cgroup_throttle_swaprate() local
3650 plist_for_each_entry_safe(si, next, &swap_avail_heads[nid], in __cgroup_throttle_swaprate()
3652 if (si->bdev) { in __cgroup_throttle_swaprate()
3653 blkcg_schedule_throttle(si->bdev->bd_disk, true); in __cgroup_throttle_swaprate()