Lines Matching refs:slots

499 	struct kvm_memslots *slots;  in __kvm_handle_hva_range()  local
510 slots = __kvm_memslots(kvm, i); in __kvm_handle_hva_range()
511 kvm_for_each_memslot(slot, slots) { in __kvm_handle_hva_range()
849 struct kvm_memslots *slots; in kvm_alloc_memslots() local
851 slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT); in kvm_alloc_memslots()
852 if (!slots) in kvm_alloc_memslots()
856 slots->id_to_index[i] = -1; in kvm_alloc_memslots()
858 return slots; in kvm_alloc_memslots()
880 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) in kvm_free_memslots() argument
884 if (!slots) in kvm_free_memslots()
887 kvm_for_each_memslot(memslot, slots) in kvm_free_memslots()
890 kvfree(slots); in kvm_free_memslots()
1057 struct kvm_memslots *slots = kvm_alloc_memslots(); in kvm_create_vm() local
1059 if (!slots) in kvm_create_vm()
1062 slots->generation = i; in kvm_create_vm()
1063 rcu_assign_pointer(kvm->memslots[i], slots); in kvm_create_vm()
1258 static inline void kvm_memslot_delete(struct kvm_memslots *slots, in kvm_memslot_delete() argument
1261 struct kvm_memory_slot *mslots = slots->memslots; in kvm_memslot_delete()
1264 if (WARN_ON(slots->id_to_index[memslot->id] == -1)) in kvm_memslot_delete()
1267 slots->used_slots--; in kvm_memslot_delete()
1269 if (atomic_read(&slots->last_used_slot) >= slots->used_slots) in kvm_memslot_delete()
1270 atomic_set(&slots->last_used_slot, 0); in kvm_memslot_delete()
1272 for (i = slots->id_to_index[memslot->id]; i < slots->used_slots; i++) { in kvm_memslot_delete()
1274 slots->id_to_index[mslots[i].id] = i; in kvm_memslot_delete()
1277 slots->id_to_index[memslot->id] = -1; in kvm_memslot_delete()
1284 static inline int kvm_memslot_insert_back(struct kvm_memslots *slots) in kvm_memslot_insert_back() argument
1286 return slots->used_slots++; in kvm_memslot_insert_back()
1296 static inline int kvm_memslot_move_backward(struct kvm_memslots *slots, in kvm_memslot_move_backward() argument
1299 struct kvm_memory_slot *mslots = slots->memslots; in kvm_memslot_move_backward()
1302 if (WARN_ON_ONCE(slots->id_to_index[memslot->id] == -1) || in kvm_memslot_move_backward()
1303 WARN_ON_ONCE(!slots->used_slots)) in kvm_memslot_move_backward()
1311 for (i = slots->id_to_index[memslot->id]; i < slots->used_slots - 1; i++) { in kvm_memslot_move_backward()
1319 slots->id_to_index[mslots[i].id] = i; in kvm_memslot_move_backward()
1331 static inline int kvm_memslot_move_forward(struct kvm_memslots *slots, in kvm_memslot_move_forward() argument
1335 struct kvm_memory_slot *mslots = slots->memslots; in kvm_memslot_move_forward()
1346 slots->id_to_index[mslots[i].id] = i; in kvm_memslot_move_forward()
1392 static void update_memslots(struct kvm_memslots *slots, in update_memslots() argument
1399 kvm_memslot_delete(slots, memslot); in update_memslots()
1402 i = kvm_memslot_insert_back(slots); in update_memslots()
1404 i = kvm_memslot_move_backward(slots, memslot); in update_memslots()
1405 i = kvm_memslot_move_forward(slots, memslot, i); in update_memslots()
1411 slots->memslots[i] = *memslot; in update_memslots()
1412 slots->id_to_index[memslot->id] = i; in update_memslots()
1431 int as_id, struct kvm_memslots *slots) in install_new_memslots() argument
1437 slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; in install_new_memslots()
1453 rcu_assign_pointer(kvm->memslots[as_id], slots); in install_new_memslots()
1471 gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; in install_new_memslots()
1484 slots->generation = gen; in install_new_memslots()
1489 static size_t kvm_memslots_size(int slots) in kvm_memslots_size() argument
1492 (sizeof(struct kvm_memory_slot) * slots); in kvm_memslots_size()
1509 struct kvm_memslots *slots; in kvm_dup_memslots() local
1517 slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT); in kvm_dup_memslots()
1518 if (likely(slots)) in kvm_dup_memslots()
1519 kvm_copy_memslots(slots, old); in kvm_dup_memslots()
1521 return slots; in kvm_dup_memslots()
1531 struct kvm_memslots *slots; in kvm_set_memslot() local
1550 slots = kvm_dup_memslots(__kvm_memslots(kvm, as_id), change); in kvm_set_memslot()
1551 if (!slots) { in kvm_set_memslot()
1561 slot = id_to_memslot(slots, old->id); in kvm_set_memslot()
1569 slots = install_new_memslots(kvm, as_id, slots); in kvm_set_memslot()
1589 kvm_copy_memslots(slots, __kvm_memslots(kvm, as_id)); in kvm_set_memslot()
1596 update_memslots(slots, new, change); in kvm_set_memslot()
1597 slots = install_new_memslots(kvm, as_id, slots); in kvm_set_memslot()
1601 kvfree(slots); in kvm_set_memslot()
1606 slot = id_to_memslot(slots, old->id); in kvm_set_memslot()
1608 slots = install_new_memslots(kvm, as_id, slots); in kvm_set_memslot()
1612 kvfree(slots); in kvm_set_memslot()
1802 struct kvm_memslots *slots; in kvm_get_dirty_log() local
1819 slots = __kvm_memslots(kvm, as_id); in kvm_get_dirty_log()
1820 *memslot = id_to_memslot(slots, id); in kvm_get_dirty_log()
1864 struct kvm_memslots *slots; in kvm_get_dirty_log_protect() local
1881 slots = __kvm_memslots(kvm, as_id); in kvm_get_dirty_log_protect()
1882 memslot = id_to_memslot(slots, id); in kvm_get_dirty_log_protect()
1975 struct kvm_memslots *slots; in kvm_clear_dirty_log_protect() local
1996 slots = __kvm_memslots(kvm, as_id); in kvm_clear_dirty_log_protect()
1997 memslot = id_to_memslot(slots, id); in kvm_clear_dirty_log_protect()
2070 struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu); in kvm_vcpu_gfn_to_memslot() local
2074 slot = try_get_memslot(slots, vcpu->last_used_slot, gfn); in kvm_vcpu_gfn_to_memslot()
2083 slot = search_memslots(slots, gfn, &slot_index); in kvm_vcpu_gfn_to_memslot()
2568 static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn, in __kvm_map_gfn() argument
2576 struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn); in __kvm_map_gfn()
2577 u64 gen = slots->generation; in __kvm_map_gfn()
2931 static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots, in __kvm_gfn_to_hva_cache_init() argument
2942 ghc->generation = slots->generation; in __kvm_gfn_to_hva_cache_init()
2954 ghc->memslot = __gfn_to_memslot(slots, start_gfn); in __kvm_gfn_to_hva_cache_init()
2975 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_gfn_to_hva_cache_init() local
2976 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len); in kvm_gfn_to_hva_cache_init()
2984 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_write_guest_offset_cached() local
2990 if (slots->generation != ghc->generation) { in kvm_write_guest_offset_cached()
2991 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) in kvm_write_guest_offset_cached()
3021 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_read_guest_offset_cached() local
3027 if (slots->generation != ghc->generation) { in kvm_read_guest_offset_cached()
3028 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) in kvm_read_guest_offset_cached()