Lines Matching full:vm

262 static void virtio_mem_retry(struct virtio_mem *vm);
268 static int register_virtio_mem_device(struct virtio_mem *vm) in register_virtio_mem_device() argument
277 list_add_rcu(&vm->next, &virtio_mem_devices); in register_virtio_mem_device()
287 static void unregister_virtio_mem_device(struct virtio_mem *vm) in unregister_virtio_mem_device() argument
291 list_del_rcu(&vm->next); in unregister_virtio_mem_device()
318 static unsigned long virtio_mem_phys_to_bb_id(struct virtio_mem *vm, in virtio_mem_phys_to_bb_id() argument
321 return addr / vm->bbm.bb_size; in virtio_mem_phys_to_bb_id()
327 static uint64_t virtio_mem_bb_id_to_phys(struct virtio_mem *vm, in virtio_mem_bb_id_to_phys() argument
330 return bb_id * vm->bbm.bb_size; in virtio_mem_bb_id_to_phys()
336 static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm, in virtio_mem_phys_to_sb_id() argument
342 return (addr - mb_addr) / vm->sbm.sb_size; in virtio_mem_phys_to_sb_id()
348 static void virtio_mem_bbm_set_bb_state(struct virtio_mem *vm, in virtio_mem_bbm_set_bb_state() argument
352 const unsigned long idx = bb_id - vm->bbm.first_bb_id; in virtio_mem_bbm_set_bb_state()
355 old_state = vm->bbm.bb_states[idx]; in virtio_mem_bbm_set_bb_state()
356 vm->bbm.bb_states[idx] = state; in virtio_mem_bbm_set_bb_state()
358 BUG_ON(vm->bbm.bb_count[old_state] == 0); in virtio_mem_bbm_set_bb_state()
359 vm->bbm.bb_count[old_state]--; in virtio_mem_bbm_set_bb_state()
360 vm->bbm.bb_count[state]++; in virtio_mem_bbm_set_bb_state()
366 static enum virtio_mem_bbm_bb_state virtio_mem_bbm_get_bb_state(struct virtio_mem *vm, in virtio_mem_bbm_get_bb_state() argument
369 return vm->bbm.bb_states[bb_id - vm->bbm.first_bb_id]; in virtio_mem_bbm_get_bb_state()
375 static int virtio_mem_bbm_bb_states_prepare_next_bb(struct virtio_mem *vm) in virtio_mem_bbm_bb_states_prepare_next_bb() argument
377 unsigned long old_bytes = vm->bbm.next_bb_id - vm->bbm.first_bb_id; in virtio_mem_bbm_bb_states_prepare_next_bb()
383 if (vm->bbm.bb_states && old_pages == new_pages) in virtio_mem_bbm_bb_states_prepare_next_bb()
390 mutex_lock(&vm->hotplug_mutex); in virtio_mem_bbm_bb_states_prepare_next_bb()
391 if (vm->bbm.bb_states) in virtio_mem_bbm_bb_states_prepare_next_bb()
392 memcpy(new_array, vm->bbm.bb_states, old_pages * PAGE_SIZE); in virtio_mem_bbm_bb_states_prepare_next_bb()
393 vfree(vm->bbm.bb_states); in virtio_mem_bbm_bb_states_prepare_next_bb()
394 vm->bbm.bb_states = new_array; in virtio_mem_bbm_bb_states_prepare_next_bb()
395 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_bbm_bb_states_prepare_next_bb()
401 for (_bb_id = vm->bbm.first_bb_id; \
402 _bb_id < vm->bbm.next_bb_id && _vm->bbm.bb_count[_state]; \
407 for (_bb_id = vm->bbm.next_bb_id - 1; \
408 _bb_id >= vm->bbm.first_bb_id && _vm->bbm.bb_count[_state]; \
415 static void virtio_mem_sbm_set_mb_state(struct virtio_mem *vm, in virtio_mem_sbm_set_mb_state() argument
418 const unsigned long idx = mb_id - vm->sbm.first_mb_id; in virtio_mem_sbm_set_mb_state()
421 old_state = vm->sbm.mb_states[idx]; in virtio_mem_sbm_set_mb_state()
422 vm->sbm.mb_states[idx] = state; in virtio_mem_sbm_set_mb_state()
424 BUG_ON(vm->sbm.mb_count[old_state] == 0); in virtio_mem_sbm_set_mb_state()
425 vm->sbm.mb_count[old_state]--; in virtio_mem_sbm_set_mb_state()
426 vm->sbm.mb_count[state]++; in virtio_mem_sbm_set_mb_state()
432 static uint8_t virtio_mem_sbm_get_mb_state(struct virtio_mem *vm, in virtio_mem_sbm_get_mb_state() argument
435 const unsigned long idx = mb_id - vm->sbm.first_mb_id; in virtio_mem_sbm_get_mb_state()
437 return vm->sbm.mb_states[idx]; in virtio_mem_sbm_get_mb_state()
443 static int virtio_mem_sbm_mb_states_prepare_next_mb(struct virtio_mem *vm) in virtio_mem_sbm_mb_states_prepare_next_mb() argument
445 int old_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id); in virtio_mem_sbm_mb_states_prepare_next_mb()
446 int new_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id + 1); in virtio_mem_sbm_mb_states_prepare_next_mb()
449 if (vm->sbm.mb_states && old_pages == new_pages) in virtio_mem_sbm_mb_states_prepare_next_mb()
456 mutex_lock(&vm->hotplug_mutex); in virtio_mem_sbm_mb_states_prepare_next_mb()
457 if (vm->sbm.mb_states) in virtio_mem_sbm_mb_states_prepare_next_mb()
458 memcpy(new_array, vm->sbm.mb_states, old_pages * PAGE_SIZE); in virtio_mem_sbm_mb_states_prepare_next_mb()
459 vfree(vm->sbm.mb_states); in virtio_mem_sbm_mb_states_prepare_next_mb()
460 vm->sbm.mb_states = new_array; in virtio_mem_sbm_mb_states_prepare_next_mb()
461 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_sbm_mb_states_prepare_next_mb()
482 static int virtio_mem_sbm_sb_state_bit_nr(struct virtio_mem *vm, in virtio_mem_sbm_sb_state_bit_nr() argument
485 return (mb_id - vm->sbm.first_mb_id) * vm->sbm.sbs_per_mb + sb_id; in virtio_mem_sbm_sb_state_bit_nr()
493 static void virtio_mem_sbm_set_sb_plugged(struct virtio_mem *vm, in virtio_mem_sbm_set_sb_plugged() argument
497 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id); in virtio_mem_sbm_set_sb_plugged()
499 __bitmap_set(vm->sbm.sb_states, bit, count); in virtio_mem_sbm_set_sb_plugged()
507 static void virtio_mem_sbm_set_sb_unplugged(struct virtio_mem *vm, in virtio_mem_sbm_set_sb_unplugged() argument
511 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id); in virtio_mem_sbm_set_sb_unplugged()
513 __bitmap_clear(vm->sbm.sb_states, bit, count); in virtio_mem_sbm_set_sb_unplugged()
519 static bool virtio_mem_sbm_test_sb_plugged(struct virtio_mem *vm, in virtio_mem_sbm_test_sb_plugged() argument
523 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id); in virtio_mem_sbm_test_sb_plugged()
526 return test_bit(bit, vm->sbm.sb_states); in virtio_mem_sbm_test_sb_plugged()
529 return find_next_zero_bit(vm->sbm.sb_states, bit + count, bit) >= in virtio_mem_sbm_test_sb_plugged()
536 static bool virtio_mem_sbm_test_sb_unplugged(struct virtio_mem *vm, in virtio_mem_sbm_test_sb_unplugged() argument
540 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id); in virtio_mem_sbm_test_sb_unplugged()
543 return find_next_bit(vm->sbm.sb_states, bit + count, bit) >= in virtio_mem_sbm_test_sb_unplugged()
548 * Find the first unplugged subblock. Returns vm->sbm.sbs_per_mb in case there is
551 static int virtio_mem_sbm_first_unplugged_sb(struct virtio_mem *vm, in virtio_mem_sbm_first_unplugged_sb() argument
554 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, 0); in virtio_mem_sbm_first_unplugged_sb()
556 return find_next_zero_bit(vm->sbm.sb_states, in virtio_mem_sbm_first_unplugged_sb()
557 bit + vm->sbm.sbs_per_mb, bit) - bit; in virtio_mem_sbm_first_unplugged_sb()
563 static int virtio_mem_sbm_sb_states_prepare_next_mb(struct virtio_mem *vm) in virtio_mem_sbm_sb_states_prepare_next_mb() argument
565 const unsigned long old_nb_mb = vm->sbm.next_mb_id - vm->sbm.first_mb_id; in virtio_mem_sbm_sb_states_prepare_next_mb()
566 const unsigned long old_nb_bits = old_nb_mb * vm->sbm.sbs_per_mb; in virtio_mem_sbm_sb_states_prepare_next_mb()
567 const unsigned long new_nb_bits = (old_nb_mb + 1) * vm->sbm.sbs_per_mb; in virtio_mem_sbm_sb_states_prepare_next_mb()
572 if (vm->sbm.sb_states && old_pages == new_pages) in virtio_mem_sbm_sb_states_prepare_next_mb()
579 mutex_lock(&vm->hotplug_mutex); in virtio_mem_sbm_sb_states_prepare_next_mb()
581 memcpy(new_bitmap, vm->sbm.sb_states, old_pages * PAGE_SIZE); in virtio_mem_sbm_sb_states_prepare_next_mb()
583 old_bitmap = vm->sbm.sb_states; in virtio_mem_sbm_sb_states_prepare_next_mb()
584 vm->sbm.sb_states = new_bitmap; in virtio_mem_sbm_sb_states_prepare_next_mb()
585 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_sbm_sb_states_prepare_next_mb()
595 static bool virtio_mem_could_add_memory(struct virtio_mem *vm, uint64_t size) in virtio_mem_could_add_memory() argument
597 if (WARN_ON_ONCE(size > vm->offline_threshold)) in virtio_mem_could_add_memory()
600 return atomic64_read(&vm->offline_size) + size <= vm->offline_threshold; in virtio_mem_could_add_memory()
606 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
611 static int virtio_mem_add_memory(struct virtio_mem *vm, uint64_t addr, in virtio_mem_add_memory() argument
620 if (!vm->resource_name) { in virtio_mem_add_memory()
621 vm->resource_name = kstrdup_const("System RAM (virtio_mem)", in virtio_mem_add_memory()
623 if (!vm->resource_name) in virtio_mem_add_memory()
627 dev_dbg(&vm->vdev->dev, "adding memory: 0x%llx - 0x%llx\n", addr, in virtio_mem_add_memory()
630 atomic64_add(size, &vm->offline_size); in virtio_mem_add_memory()
631 rc = add_memory_driver_managed(vm->mgid, addr, size, vm->resource_name, in virtio_mem_add_memory()
634 atomic64_sub(size, &vm->offline_size); in virtio_mem_add_memory()
635 dev_warn(&vm->vdev->dev, "adding memory failed: %d\n", rc); in virtio_mem_add_memory()
647 static int virtio_mem_sbm_add_mb(struct virtio_mem *vm, unsigned long mb_id) in virtio_mem_sbm_add_mb() argument
652 return virtio_mem_add_memory(vm, addr, size); in virtio_mem_sbm_add_mb()
658 static int virtio_mem_bbm_add_bb(struct virtio_mem *vm, unsigned long bb_id) in virtio_mem_bbm_add_bb() argument
660 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id); in virtio_mem_bbm_add_bb()
661 const uint64_t size = vm->bbm.bb_size; in virtio_mem_bbm_add_bb()
663 return virtio_mem_add_memory(vm, addr, size); in virtio_mem_bbm_add_bb()
670 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
675 static int virtio_mem_remove_memory(struct virtio_mem *vm, uint64_t addr, in virtio_mem_remove_memory() argument
680 dev_dbg(&vm->vdev->dev, "removing memory: 0x%llx - 0x%llx\n", addr, in virtio_mem_remove_memory()
684 atomic64_sub(size, &vm->offline_size); in virtio_mem_remove_memory()
689 virtio_mem_retry(vm); in virtio_mem_remove_memory()
691 dev_dbg(&vm->vdev->dev, "removing memory failed: %d\n", rc); in virtio_mem_remove_memory()
699 static int virtio_mem_sbm_remove_mb(struct virtio_mem *vm, unsigned long mb_id) in virtio_mem_sbm_remove_mb() argument
704 return virtio_mem_remove_memory(vm, addr, size); in virtio_mem_sbm_remove_mb()
710 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
715 static int virtio_mem_offline_and_remove_memory(struct virtio_mem *vm, in virtio_mem_offline_and_remove_memory() argument
721 dev_dbg(&vm->vdev->dev, in virtio_mem_offline_and_remove_memory()
727 atomic64_sub(size, &vm->offline_size); in virtio_mem_offline_and_remove_memory()
732 virtio_mem_retry(vm); in virtio_mem_offline_and_remove_memory()
734 dev_dbg(&vm->vdev->dev, in virtio_mem_offline_and_remove_memory()
744 static int virtio_mem_sbm_offline_and_remove_mb(struct virtio_mem *vm, in virtio_mem_sbm_offline_and_remove_mb() argument
750 return virtio_mem_offline_and_remove_memory(vm, addr, size); in virtio_mem_sbm_offline_and_remove_mb()
757 static int virtio_mem_bbm_offline_and_remove_bb(struct virtio_mem *vm, in virtio_mem_bbm_offline_and_remove_bb() argument
760 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id); in virtio_mem_bbm_offline_and_remove_bb()
761 const uint64_t size = vm->bbm.bb_size; in virtio_mem_bbm_offline_and_remove_bb()
763 return virtio_mem_offline_and_remove_memory(vm, addr, size); in virtio_mem_bbm_offline_and_remove_bb()
769 static void virtio_mem_retry(struct virtio_mem *vm) in virtio_mem_retry() argument
773 spin_lock_irqsave(&vm->removal_lock, flags); in virtio_mem_retry()
774 if (!vm->removing) in virtio_mem_retry()
775 queue_work(system_freezable_wq, &vm->wq); in virtio_mem_retry()
776 spin_unlock_irqrestore(&vm->removal_lock, flags); in virtio_mem_retry()
779 static int virtio_mem_translate_node_id(struct virtio_mem *vm, uint16_t node_id) in virtio_mem_translate_node_id() argument
784 if (virtio_has_feature(vm->vdev, VIRTIO_MEM_F_ACPI_PXM)) in virtio_mem_translate_node_id()
794 static bool virtio_mem_overlaps_range(struct virtio_mem *vm, uint64_t start, in virtio_mem_overlaps_range() argument
797 return start < vm->addr + vm->region_size && vm->addr < start + size; in virtio_mem_overlaps_range()
804 static bool virtio_mem_contains_range(struct virtio_mem *vm, uint64_t start, in virtio_mem_contains_range() argument
807 return start >= vm->addr && start + size <= vm->addr + vm->region_size; in virtio_mem_contains_range()
810 static int virtio_mem_sbm_notify_going_online(struct virtio_mem *vm, in virtio_mem_sbm_notify_going_online() argument
813 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) { in virtio_mem_sbm_notify_going_online()
820 dev_warn_ratelimited(&vm->vdev->dev, in virtio_mem_sbm_notify_going_online()
825 static void virtio_mem_sbm_notify_offline(struct virtio_mem *vm, in virtio_mem_sbm_notify_offline() argument
828 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) { in virtio_mem_sbm_notify_offline()
831 virtio_mem_sbm_set_mb_state(vm, mb_id, in virtio_mem_sbm_notify_offline()
836 virtio_mem_sbm_set_mb_state(vm, mb_id, in virtio_mem_sbm_notify_offline()
845 static void virtio_mem_sbm_notify_online(struct virtio_mem *vm, in virtio_mem_sbm_notify_online() argument
853 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) { in virtio_mem_sbm_notify_online()
868 virtio_mem_sbm_set_mb_state(vm, mb_id, new_state); in virtio_mem_sbm_notify_online()
871 static void virtio_mem_sbm_notify_going_offline(struct virtio_mem *vm, in virtio_mem_sbm_notify_going_offline() argument
874 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size); in virtio_mem_sbm_notify_going_offline()
878 for (sb_id = 0; sb_id < vm->sbm.sbs_per_mb; sb_id++) { in virtio_mem_sbm_notify_going_offline()
879 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1)) in virtio_mem_sbm_notify_going_offline()
882 sb_id * vm->sbm.sb_size); in virtio_mem_sbm_notify_going_offline()
887 static void virtio_mem_sbm_notify_cancel_offline(struct virtio_mem *vm, in virtio_mem_sbm_notify_cancel_offline() argument
890 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size); in virtio_mem_sbm_notify_cancel_offline()
894 for (sb_id = 0; sb_id < vm->sbm.sbs_per_mb; sb_id++) { in virtio_mem_sbm_notify_cancel_offline()
895 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1)) in virtio_mem_sbm_notify_cancel_offline()
898 sb_id * vm->sbm.sb_size); in virtio_mem_sbm_notify_cancel_offline()
903 static void virtio_mem_bbm_notify_going_offline(struct virtio_mem *vm, in virtio_mem_bbm_notify_going_offline() argument
912 if (virtio_mem_bbm_get_bb_state(vm, bb_id) != in virtio_mem_bbm_notify_going_offline()
918 static void virtio_mem_bbm_notify_cancel_offline(struct virtio_mem *vm, in virtio_mem_bbm_notify_cancel_offline() argument
923 if (virtio_mem_bbm_get_bb_state(vm, bb_id) != in virtio_mem_bbm_notify_cancel_offline()
937 struct virtio_mem *vm = container_of(nb, struct virtio_mem, in virtio_mem_memory_notifier_cb() local
945 if (!virtio_mem_overlaps_range(vm, start, size)) in virtio_mem_memory_notifier_cb()
948 if (vm->in_sbm) { in virtio_mem_memory_notifier_cb()
959 id = virtio_mem_phys_to_bb_id(vm, start); in virtio_mem_memory_notifier_cb()
966 if (WARN_ON_ONCE(id != virtio_mem_phys_to_bb_id(vm, start + size - 1))) in virtio_mem_memory_notifier_cb()
980 mutex_lock(&vm->hotplug_mutex); in virtio_mem_memory_notifier_cb()
981 if (vm->removing) { in virtio_mem_memory_notifier_cb()
983 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_memory_notifier_cb()
986 vm->hotplug_active = true; in virtio_mem_memory_notifier_cb()
987 if (vm->in_sbm) in virtio_mem_memory_notifier_cb()
988 virtio_mem_sbm_notify_going_offline(vm, id); in virtio_mem_memory_notifier_cb()
990 virtio_mem_bbm_notify_going_offline(vm, id, in virtio_mem_memory_notifier_cb()
995 mutex_lock(&vm->hotplug_mutex); in virtio_mem_memory_notifier_cb()
996 if (vm->removing) { in virtio_mem_memory_notifier_cb()
998 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_memory_notifier_cb()
1001 vm->hotplug_active = true; in virtio_mem_memory_notifier_cb()
1002 if (vm->in_sbm) in virtio_mem_memory_notifier_cb()
1003 rc = virtio_mem_sbm_notify_going_online(vm, id); in virtio_mem_memory_notifier_cb()
1006 if (vm->in_sbm) in virtio_mem_memory_notifier_cb()
1007 virtio_mem_sbm_notify_offline(vm, id); in virtio_mem_memory_notifier_cb()
1009 atomic64_add(size, &vm->offline_size); in virtio_mem_memory_notifier_cb()
1015 virtio_mem_retry(vm); in virtio_mem_memory_notifier_cb()
1017 vm->hotplug_active = false; in virtio_mem_memory_notifier_cb()
1018 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_memory_notifier_cb()
1021 if (vm->in_sbm) in virtio_mem_memory_notifier_cb()
1022 virtio_mem_sbm_notify_online(vm, id, mhp->start_pfn); in virtio_mem_memory_notifier_cb()
1024 atomic64_sub(size, &vm->offline_size); in virtio_mem_memory_notifier_cb()
1031 if (!atomic_read(&vm->wq_active) && in virtio_mem_memory_notifier_cb()
1032 virtio_mem_could_add_memory(vm, vm->offline_threshold / 2)) in virtio_mem_memory_notifier_cb()
1033 virtio_mem_retry(vm); in virtio_mem_memory_notifier_cb()
1035 vm->hotplug_active = false; in virtio_mem_memory_notifier_cb()
1036 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_memory_notifier_cb()
1039 if (!vm->hotplug_active) in virtio_mem_memory_notifier_cb()
1041 if (vm->in_sbm) in virtio_mem_memory_notifier_cb()
1042 virtio_mem_sbm_notify_cancel_offline(vm, id); in virtio_mem_memory_notifier_cb()
1044 virtio_mem_bbm_notify_cancel_offline(vm, id, in virtio_mem_memory_notifier_cb()
1047 vm->hotplug_active = false; in virtio_mem_memory_notifier_cb()
1048 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_memory_notifier_cb()
1051 if (!vm->hotplug_active) in virtio_mem_memory_notifier_cb()
1053 vm->hotplug_active = false; in virtio_mem_memory_notifier_cb()
1054 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_memory_notifier_cb()
1220 struct virtio_mem *vm; in virtio_mem_online_page_cb() local
1224 list_for_each_entry_rcu(vm, &virtio_mem_devices, next) { in virtio_mem_online_page_cb()
1225 if (!virtio_mem_contains_range(vm, addr, PFN_PHYS(1 << order))) in virtio_mem_online_page_cb()
1228 if (vm->in_sbm) { in virtio_mem_online_page_cb()
1235 sb_id = virtio_mem_phys_to_sb_id(vm, addr); in virtio_mem_online_page_cb()
1236 do_online = virtio_mem_sbm_test_sb_plugged(vm, id, in virtio_mem_online_page_cb()
1243 id = virtio_mem_phys_to_bb_id(vm, addr); in virtio_mem_online_page_cb()
1244 do_online = virtio_mem_bbm_get_bb_state(vm, id) != in virtio_mem_online_page_cb()
1268 static uint64_t virtio_mem_send_request(struct virtio_mem *vm, in virtio_mem_send_request() argument
1276 vm->req = *req; in virtio_mem_send_request()
1279 sg_init_one(&sg_req, &vm->req, sizeof(vm->req)); in virtio_mem_send_request()
1283 sg_init_one(&sg_resp, &vm->resp, sizeof(vm->resp)); in virtio_mem_send_request()
1286 rc = virtqueue_add_sgs(vm->vq, sgs, 1, 1, vm, GFP_KERNEL); in virtio_mem_send_request()
1290 virtqueue_kick(vm->vq); in virtio_mem_send_request()
1293 wait_event(vm->host_resp, virtqueue_get_buf(vm->vq, &len)); in virtio_mem_send_request()
1295 return virtio16_to_cpu(vm->vdev, vm->resp.type); in virtio_mem_send_request()
1298 static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr, in virtio_mem_send_plug_request() argument
1301 const uint64_t nb_vm_blocks = size / vm->device_block_size; in virtio_mem_send_plug_request()
1303 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_PLUG), in virtio_mem_send_plug_request()
1304 .u.plug.addr = cpu_to_virtio64(vm->vdev, addr), in virtio_mem_send_plug_request()
1305 .u.plug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks), in virtio_mem_send_plug_request()
1309 if (atomic_read(&vm->config_changed)) in virtio_mem_send_plug_request()
1312 dev_dbg(&vm->vdev->dev, "plugging memory: 0x%llx - 0x%llx\n", addr, in virtio_mem_send_plug_request()
1315 switch (virtio_mem_send_request(vm, &req)) { in virtio_mem_send_plug_request()
1317 vm->plugged_size += size; in virtio_mem_send_plug_request()
1332 dev_dbg(&vm->vdev->dev, "plugging memory failed: %d\n", rc); in virtio_mem_send_plug_request()
1336 static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr, in virtio_mem_send_unplug_request() argument
1339 const uint64_t nb_vm_blocks = size / vm->device_block_size; in virtio_mem_send_unplug_request()
1341 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG), in virtio_mem_send_unplug_request()
1342 .u.unplug.addr = cpu_to_virtio64(vm->vdev, addr), in virtio_mem_send_unplug_request()
1343 .u.unplug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks), in virtio_mem_send_unplug_request()
1347 if (atomic_read(&vm->config_changed)) in virtio_mem_send_unplug_request()
1350 dev_dbg(&vm->vdev->dev, "unplugging memory: 0x%llx - 0x%llx\n", addr, in virtio_mem_send_unplug_request()
1353 switch (virtio_mem_send_request(vm, &req)) { in virtio_mem_send_unplug_request()
1355 vm->plugged_size -= size; in virtio_mem_send_unplug_request()
1367 dev_dbg(&vm->vdev->dev, "unplugging memory failed: %d\n", rc); in virtio_mem_send_unplug_request()
1371 static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm) in virtio_mem_send_unplug_all_request() argument
1374 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG_ALL), in virtio_mem_send_unplug_all_request()
1378 dev_dbg(&vm->vdev->dev, "unplugging all memory"); in virtio_mem_send_unplug_all_request()
1380 switch (virtio_mem_send_request(vm, &req)) { in virtio_mem_send_unplug_all_request()
1382 vm->unplug_all_required = false; in virtio_mem_send_unplug_all_request()
1383 vm->plugged_size = 0; in virtio_mem_send_unplug_all_request()
1385 atomic_set(&vm->config_changed, 1); in virtio_mem_send_unplug_all_request()
1394 dev_dbg(&vm->vdev->dev, "unplugging all memory failed: %d\n", rc); in virtio_mem_send_unplug_all_request()
1402 static int virtio_mem_sbm_plug_sb(struct virtio_mem *vm, unsigned long mb_id, in virtio_mem_sbm_plug_sb() argument
1406 sb_id * vm->sbm.sb_size; in virtio_mem_sbm_plug_sb()
1407 const uint64_t size = count * vm->sbm.sb_size; in virtio_mem_sbm_plug_sb()
1410 rc = virtio_mem_send_plug_request(vm, addr, size); in virtio_mem_sbm_plug_sb()
1412 virtio_mem_sbm_set_sb_plugged(vm, mb_id, sb_id, count); in virtio_mem_sbm_plug_sb()
1420 static int virtio_mem_sbm_unplug_sb(struct virtio_mem *vm, unsigned long mb_id, in virtio_mem_sbm_unplug_sb() argument
1424 sb_id * vm->sbm.sb_size; in virtio_mem_sbm_unplug_sb()
1425 const uint64_t size = count * vm->sbm.sb_size; in virtio_mem_sbm_unplug_sb()
1428 rc = virtio_mem_send_unplug_request(vm, addr, size); in virtio_mem_sbm_unplug_sb()
1430 virtio_mem_sbm_set_sb_unplugged(vm, mb_id, sb_id, count); in virtio_mem_sbm_unplug_sb()
1439 static int virtio_mem_bbm_unplug_bb(struct virtio_mem *vm, unsigned long bb_id) in virtio_mem_bbm_unplug_bb() argument
1441 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id); in virtio_mem_bbm_unplug_bb()
1442 const uint64_t size = vm->bbm.bb_size; in virtio_mem_bbm_unplug_bb()
1444 return virtio_mem_send_unplug_request(vm, addr, size); in virtio_mem_bbm_unplug_bb()
1452 static int virtio_mem_bbm_plug_bb(struct virtio_mem *vm, unsigned long bb_id) in virtio_mem_bbm_plug_bb() argument
1454 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id); in virtio_mem_bbm_plug_bb()
1455 const uint64_t size = vm->bbm.bb_size; in virtio_mem_bbm_plug_bb()
1457 return virtio_mem_send_plug_request(vm, addr, size); in virtio_mem_bbm_plug_bb()
1469 static int virtio_mem_sbm_unplug_any_sb_raw(struct virtio_mem *vm, in virtio_mem_sbm_unplug_any_sb_raw() argument
1475 sb_id = vm->sbm.sbs_per_mb - 1; in virtio_mem_sbm_unplug_any_sb_raw()
1479 virtio_mem_sbm_test_sb_unplugged(vm, mb_id, sb_id, 1)) in virtio_mem_sbm_unplug_any_sb_raw()
1486 virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id - 1, 1)) { in virtio_mem_sbm_unplug_any_sb_raw()
1491 rc = virtio_mem_sbm_unplug_sb(vm, mb_id, sb_id, count); in virtio_mem_sbm_unplug_any_sb_raw()
1508 static int virtio_mem_sbm_unplug_mb(struct virtio_mem *vm, unsigned long mb_id) in virtio_mem_sbm_unplug_mb() argument
1510 uint64_t nb_sb = vm->sbm.sbs_per_mb; in virtio_mem_sbm_unplug_mb()
1512 return virtio_mem_sbm_unplug_any_sb_raw(vm, mb_id, &nb_sb); in virtio_mem_sbm_unplug_mb()
1518 static int virtio_mem_sbm_prepare_next_mb(struct virtio_mem *vm, in virtio_mem_sbm_prepare_next_mb() argument
1523 if (vm->sbm.next_mb_id > vm->sbm.last_usable_mb_id) in virtio_mem_sbm_prepare_next_mb()
1527 rc = virtio_mem_sbm_mb_states_prepare_next_mb(vm); in virtio_mem_sbm_prepare_next_mb()
1532 rc = virtio_mem_sbm_sb_states_prepare_next_mb(vm); in virtio_mem_sbm_prepare_next_mb()
1536 vm->sbm.mb_count[VIRTIO_MEM_SBM_MB_UNUSED]++; in virtio_mem_sbm_prepare_next_mb()
1537 *mb_id = vm->sbm.next_mb_id++; in virtio_mem_sbm_prepare_next_mb()
1547 static int virtio_mem_sbm_plug_and_add_mb(struct virtio_mem *vm, in virtio_mem_sbm_plug_and_add_mb() argument
1550 const int count = min_t(int, *nb_sb, vm->sbm.sbs_per_mb); in virtio_mem_sbm_plug_and_add_mb()
1560 rc = virtio_mem_sbm_plug_sb(vm, mb_id, 0, count); in virtio_mem_sbm_plug_and_add_mb()
1568 if (count == vm->sbm.sbs_per_mb) in virtio_mem_sbm_plug_and_add_mb()
1569 virtio_mem_sbm_set_mb_state(vm, mb_id, in virtio_mem_sbm_plug_and_add_mb()
1572 virtio_mem_sbm_set_mb_state(vm, mb_id, in virtio_mem_sbm_plug_and_add_mb()
1576 rc = virtio_mem_sbm_add_mb(vm, mb_id); in virtio_mem_sbm_plug_and_add_mb()
1580 if (virtio_mem_sbm_unplug_sb(vm, mb_id, 0, count)) in virtio_mem_sbm_plug_and_add_mb()
1582 virtio_mem_sbm_set_mb_state(vm, mb_id, new_state); in virtio_mem_sbm_plug_and_add_mb()
1598 static int virtio_mem_sbm_plug_any_sb(struct virtio_mem *vm, in virtio_mem_sbm_plug_any_sb() argument
1601 const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id); in virtio_mem_sbm_plug_any_sb()
1610 sb_id = virtio_mem_sbm_first_unplugged_sb(vm, mb_id); in virtio_mem_sbm_plug_any_sb()
1611 if (sb_id >= vm->sbm.sbs_per_mb) in virtio_mem_sbm_plug_any_sb()
1615 sb_id + count < vm->sbm.sbs_per_mb && in virtio_mem_sbm_plug_any_sb()
1616 !virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id + count, 1)) in virtio_mem_sbm_plug_any_sb()
1619 rc = virtio_mem_sbm_plug_sb(vm, mb_id, sb_id, count); in virtio_mem_sbm_plug_any_sb()
1628 sb_id * vm->sbm.sb_size); in virtio_mem_sbm_plug_any_sb()
1629 nr_pages = PFN_DOWN(count * vm->sbm.sb_size); in virtio_mem_sbm_plug_any_sb()
1633 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) in virtio_mem_sbm_plug_any_sb()
1634 virtio_mem_sbm_set_mb_state(vm, mb_id, old_state - 1); in virtio_mem_sbm_plug_any_sb()
1639 static int virtio_mem_sbm_plug_request(struct virtio_mem *vm, uint64_t diff) in virtio_mem_sbm_plug_request() argument
1646 uint64_t nb_sb = diff / vm->sbm.sb_size; in virtio_mem_sbm_plug_request()
1654 mutex_lock(&vm->hotplug_mutex); in virtio_mem_sbm_plug_request()
1657 virtio_mem_sbm_for_each_mb(vm, mb_id, mb_states[i]) { in virtio_mem_sbm_plug_request()
1658 rc = virtio_mem_sbm_plug_any_sb(vm, mb_id, &nb_sb); in virtio_mem_sbm_plug_request()
1669 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_sbm_plug_request()
1672 virtio_mem_sbm_for_each_mb(vm, mb_id, VIRTIO_MEM_SBM_MB_UNUSED) { in virtio_mem_sbm_plug_request()
1673 if (!virtio_mem_could_add_memory(vm, memory_block_size_bytes())) in virtio_mem_sbm_plug_request()
1676 rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb); in virtio_mem_sbm_plug_request()
1684 if (!virtio_mem_could_add_memory(vm, memory_block_size_bytes())) in virtio_mem_sbm_plug_request()
1687 rc = virtio_mem_sbm_prepare_next_mb(vm, &mb_id); in virtio_mem_sbm_plug_request()
1690 rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb); in virtio_mem_sbm_plug_request()
1698 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_sbm_plug_request()
1707 static int virtio_mem_bbm_plug_and_add_bb(struct virtio_mem *vm, in virtio_mem_bbm_plug_and_add_bb() argument
1712 if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) != in virtio_mem_bbm_plug_and_add_bb()
1716 rc = virtio_mem_bbm_plug_bb(vm, bb_id); in virtio_mem_bbm_plug_and_add_bb()
1719 virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED); in virtio_mem_bbm_plug_and_add_bb()
1721 rc = virtio_mem_bbm_add_bb(vm, bb_id); in virtio_mem_bbm_plug_and_add_bb()
1723 if (!virtio_mem_bbm_unplug_bb(vm, bb_id)) in virtio_mem_bbm_plug_and_add_bb()
1724 virtio_mem_bbm_set_bb_state(vm, bb_id, in virtio_mem_bbm_plug_and_add_bb()
1728 virtio_mem_bbm_set_bb_state(vm, bb_id, in virtio_mem_bbm_plug_and_add_bb()
1738 static int virtio_mem_bbm_prepare_next_bb(struct virtio_mem *vm, in virtio_mem_bbm_prepare_next_bb() argument
1743 if (vm->bbm.next_bb_id > vm->bbm.last_usable_bb_id) in virtio_mem_bbm_prepare_next_bb()
1747 rc = virtio_mem_bbm_bb_states_prepare_next_bb(vm); in virtio_mem_bbm_prepare_next_bb()
1751 vm->bbm.bb_count[VIRTIO_MEM_BBM_BB_UNUSED]++; in virtio_mem_bbm_prepare_next_bb()
1752 *bb_id = vm->bbm.next_bb_id; in virtio_mem_bbm_prepare_next_bb()
1753 vm->bbm.next_bb_id++; in virtio_mem_bbm_prepare_next_bb()
1757 static int virtio_mem_bbm_plug_request(struct virtio_mem *vm, uint64_t diff) in virtio_mem_bbm_plug_request() argument
1759 uint64_t nb_bb = diff / vm->bbm.bb_size; in virtio_mem_bbm_plug_request()
1767 virtio_mem_bbm_for_each_bb(vm, bb_id, VIRTIO_MEM_BBM_BB_UNUSED) { in virtio_mem_bbm_plug_request()
1768 if (!virtio_mem_could_add_memory(vm, vm->bbm.bb_size)) in virtio_mem_bbm_plug_request()
1771 rc = virtio_mem_bbm_plug_and_add_bb(vm, bb_id); in virtio_mem_bbm_plug_request()
1781 if (!virtio_mem_could_add_memory(vm, vm->bbm.bb_size)) in virtio_mem_bbm_plug_request()
1784 rc = virtio_mem_bbm_prepare_next_bb(vm, &bb_id); in virtio_mem_bbm_plug_request()
1787 rc = virtio_mem_bbm_plug_and_add_bb(vm, bb_id); in virtio_mem_bbm_plug_request()
1801 static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff) in virtio_mem_plug_request() argument
1803 if (vm->in_sbm) in virtio_mem_plug_request()
1804 return virtio_mem_sbm_plug_request(vm, diff); in virtio_mem_plug_request()
1805 return virtio_mem_bbm_plug_request(vm, diff); in virtio_mem_plug_request()
1817 static int virtio_mem_sbm_unplug_any_sb_offline(struct virtio_mem *vm, in virtio_mem_sbm_unplug_any_sb_offline() argument
1823 rc = virtio_mem_sbm_unplug_any_sb_raw(vm, mb_id, nb_sb); in virtio_mem_sbm_unplug_any_sb_offline()
1826 if (!virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) in virtio_mem_sbm_unplug_any_sb_offline()
1827 virtio_mem_sbm_set_mb_state(vm, mb_id, in virtio_mem_sbm_unplug_any_sb_offline()
1832 if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) { in virtio_mem_sbm_unplug_any_sb_offline()
1839 virtio_mem_sbm_set_mb_state(vm, mb_id, in virtio_mem_sbm_unplug_any_sb_offline()
1842 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_sbm_unplug_any_sb_offline()
1843 rc = virtio_mem_sbm_remove_mb(vm, mb_id); in virtio_mem_sbm_unplug_any_sb_offline()
1845 mutex_lock(&vm->hotplug_mutex); in virtio_mem_sbm_unplug_any_sb_offline()
1855 static int virtio_mem_sbm_unplug_sb_online(struct virtio_mem *vm, in virtio_mem_sbm_unplug_sb_online() argument
1859 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size) * count; in virtio_mem_sbm_unplug_sb_online()
1860 const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id); in virtio_mem_sbm_unplug_sb_online()
1865 sb_id * vm->sbm.sb_size); in virtio_mem_sbm_unplug_sb_online()
1872 rc = virtio_mem_sbm_unplug_sb(vm, mb_id, sb_id, count); in virtio_mem_sbm_unplug_sb_online()
1881 virtio_mem_sbm_set_mb_state(vm, mb_id, in virtio_mem_sbm_unplug_sb_online()
1885 virtio_mem_sbm_set_mb_state(vm, mb_id, in virtio_mem_sbm_unplug_sb_online()
1903 static int virtio_mem_sbm_unplug_any_sb_online(struct virtio_mem *vm, in virtio_mem_sbm_unplug_any_sb_online() argument
1910 if (*nb_sb >= vm->sbm.sbs_per_mb && in virtio_mem_sbm_unplug_any_sb_online()
1911 virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) { in virtio_mem_sbm_unplug_any_sb_online()
1912 rc = virtio_mem_sbm_unplug_sb_online(vm, mb_id, 0, in virtio_mem_sbm_unplug_any_sb_online()
1913 vm->sbm.sbs_per_mb); in virtio_mem_sbm_unplug_any_sb_online()
1915 *nb_sb -= vm->sbm.sbs_per_mb; in virtio_mem_sbm_unplug_any_sb_online()
1922 for (sb_id = vm->sbm.sbs_per_mb - 1; sb_id >= 0 && *nb_sb; sb_id--) { in virtio_mem_sbm_unplug_any_sb_online()
1925 !virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1)) in virtio_mem_sbm_unplug_any_sb_online()
1930 rc = virtio_mem_sbm_unplug_sb_online(vm, mb_id, sb_id, 1); in virtio_mem_sbm_unplug_any_sb_online()
1944 if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) { in virtio_mem_sbm_unplug_any_sb_online()
1945 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_sbm_unplug_any_sb_online()
1946 rc = virtio_mem_sbm_offline_and_remove_mb(vm, mb_id); in virtio_mem_sbm_unplug_any_sb_online()
1947 mutex_lock(&vm->hotplug_mutex); in virtio_mem_sbm_unplug_any_sb_online()
1949 virtio_mem_sbm_set_mb_state(vm, mb_id, in virtio_mem_sbm_unplug_any_sb_online()
1968 static int virtio_mem_sbm_unplug_any_sb(struct virtio_mem *vm, in virtio_mem_sbm_unplug_any_sb() argument
1972 const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id); in virtio_mem_sbm_unplug_any_sb()
1979 return virtio_mem_sbm_unplug_any_sb_online(vm, mb_id, nb_sb); in virtio_mem_sbm_unplug_any_sb()
1982 return virtio_mem_sbm_unplug_any_sb_offline(vm, mb_id, nb_sb); in virtio_mem_sbm_unplug_any_sb()
1987 static int virtio_mem_sbm_unplug_request(struct virtio_mem *vm, uint64_t diff) in virtio_mem_sbm_unplug_request() argument
1997 uint64_t nb_sb = diff / vm->sbm.sb_size; in virtio_mem_sbm_unplug_request()
2009 mutex_lock(&vm->hotplug_mutex); in virtio_mem_sbm_unplug_request()
2019 virtio_mem_sbm_for_each_mb_rev(vm, mb_id, mb_states[i]) { in virtio_mem_sbm_unplug_request()
2020 rc = virtio_mem_sbm_unplug_any_sb(vm, mb_id, &nb_sb); in virtio_mem_sbm_unplug_request()
2023 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_sbm_unplug_request()
2025 mutex_lock(&vm->hotplug_mutex); in virtio_mem_sbm_unplug_request()
2028 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_sbm_unplug_request()
2033 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_sbm_unplug_request()
2036 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_sbm_unplug_request()
2047 static int virtio_mem_bbm_offline_remove_and_unplug_bb(struct virtio_mem *vm, in virtio_mem_bbm_offline_remove_and_unplug_bb() argument
2050 const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id)); in virtio_mem_bbm_offline_remove_and_unplug_bb()
2051 const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size); in virtio_mem_bbm_offline_remove_and_unplug_bb()
2057 if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) != in virtio_mem_bbm_offline_remove_and_unplug_bb()
2068 mutex_lock(&vm->hotplug_mutex); in virtio_mem_bbm_offline_remove_and_unplug_bb()
2069 virtio_mem_bbm_set_bb_state(vm, bb_id, in virtio_mem_bbm_offline_remove_and_unplug_bb()
2083 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_bbm_offline_remove_and_unplug_bb()
2086 rc = virtio_mem_bbm_offline_and_remove_bb(vm, bb_id); in virtio_mem_bbm_offline_remove_and_unplug_bb()
2089 mutex_lock(&vm->hotplug_mutex); in virtio_mem_bbm_offline_remove_and_unplug_bb()
2095 rc = virtio_mem_bbm_unplug_bb(vm, bb_id); in virtio_mem_bbm_offline_remove_and_unplug_bb()
2097 virtio_mem_bbm_set_bb_state(vm, bb_id, in virtio_mem_bbm_offline_remove_and_unplug_bb()
2100 virtio_mem_bbm_set_bb_state(vm, bb_id, in virtio_mem_bbm_offline_remove_and_unplug_bb()
2111 virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED); in virtio_mem_bbm_offline_remove_and_unplug_bb()
2112 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_bbm_offline_remove_and_unplug_bb()
2119 static bool virtio_mem_bbm_bb_is_offline(struct virtio_mem *vm, in virtio_mem_bbm_bb_is_offline() argument
2122 const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id)); in virtio_mem_bbm_bb_is_offline()
2123 const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size); in virtio_mem_bbm_bb_is_offline()
2138 static bool virtio_mem_bbm_bb_is_movable(struct virtio_mem *vm, in virtio_mem_bbm_bb_is_movable() argument
2141 const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id)); in virtio_mem_bbm_bb_is_movable()
2142 const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size); in virtio_mem_bbm_bb_is_movable()
2158 static int virtio_mem_bbm_unplug_request(struct virtio_mem *vm, uint64_t diff) in virtio_mem_bbm_unplug_request() argument
2160 uint64_t nb_bb = diff / vm->bbm.bb_size; in virtio_mem_bbm_unplug_request()
2172 virtio_mem_bbm_for_each_bb_rev(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED) { in virtio_mem_bbm_unplug_request()
2179 if (i == 0 && !virtio_mem_bbm_bb_is_offline(vm, bb_id)) in virtio_mem_bbm_unplug_request()
2181 if (i == 1 && !virtio_mem_bbm_bb_is_movable(vm, bb_id)) in virtio_mem_bbm_unplug_request()
2183 rc = virtio_mem_bbm_offline_remove_and_unplug_bb(vm, bb_id); in virtio_mem_bbm_unplug_request()
2201 static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff) in virtio_mem_unplug_request() argument
2203 if (vm->in_sbm) in virtio_mem_unplug_request()
2204 return virtio_mem_sbm_unplug_request(vm, diff); in virtio_mem_unplug_request()
2205 return virtio_mem_bbm_unplug_request(vm, diff); in virtio_mem_unplug_request()
2212 static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm) in virtio_mem_unplug_pending_mb() argument
2217 if (!vm->in_sbm) { in virtio_mem_unplug_pending_mb()
2218 virtio_mem_bbm_for_each_bb(vm, id, in virtio_mem_unplug_pending_mb()
2220 rc = virtio_mem_bbm_unplug_bb(vm, id); in virtio_mem_unplug_pending_mb()
2223 virtio_mem_bbm_set_bb_state(vm, id, in virtio_mem_unplug_pending_mb()
2229 virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_PLUGGED) { in virtio_mem_unplug_pending_mb()
2230 rc = virtio_mem_sbm_unplug_mb(vm, id); in virtio_mem_unplug_pending_mb()
2233 virtio_mem_sbm_set_mb_state(vm, id, in virtio_mem_unplug_pending_mb()
2243 static void virtio_mem_refresh_config(struct virtio_mem *vm) in virtio_mem_refresh_config() argument
2249 virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size, in virtio_mem_refresh_config()
2251 if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size)) in virtio_mem_refresh_config()
2252 vm->plugged_size = new_plugged_size; in virtio_mem_refresh_config()
2255 virtio_cread_le(vm->vdev, struct virtio_mem_config, in virtio_mem_refresh_config()
2257 end_addr = min(vm->addr + usable_region_size - 1, in virtio_mem_refresh_config()
2260 if (vm->in_sbm) { in virtio_mem_refresh_config()
2261 vm->sbm.last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr); in virtio_mem_refresh_config()
2263 vm->sbm.last_usable_mb_id--; in virtio_mem_refresh_config()
2265 vm->bbm.last_usable_bb_id = virtio_mem_phys_to_bb_id(vm, in virtio_mem_refresh_config()
2267 if (!IS_ALIGNED(end_addr + 1, vm->bbm.bb_size)) in virtio_mem_refresh_config()
2268 vm->bbm.last_usable_bb_id--; in virtio_mem_refresh_config()
2278 virtio_cread_le(vm->vdev, struct virtio_mem_config, requested_size, in virtio_mem_refresh_config()
2279 &vm->requested_size); in virtio_mem_refresh_config()
2281 dev_info(&vm->vdev->dev, "plugged size: 0x%llx", vm->plugged_size); in virtio_mem_refresh_config()
2282 dev_info(&vm->vdev->dev, "requested size: 0x%llx", vm->requested_size); in virtio_mem_refresh_config()
2290 struct virtio_mem *vm = container_of(work, struct virtio_mem, wq); in virtio_mem_run_wq() local
2294 hrtimer_cancel(&vm->retry_timer); in virtio_mem_run_wq()
2296 if (vm->broken) in virtio_mem_run_wq()
2299 atomic_set(&vm->wq_active, 1); in virtio_mem_run_wq()
2304 if (unlikely(vm->unplug_all_required)) in virtio_mem_run_wq()
2305 rc = virtio_mem_send_unplug_all_request(vm); in virtio_mem_run_wq()
2307 if (atomic_read(&vm->config_changed)) { in virtio_mem_run_wq()
2308 atomic_set(&vm->config_changed, 0); in virtio_mem_run_wq()
2309 virtio_mem_refresh_config(vm); in virtio_mem_run_wq()
2314 rc = virtio_mem_unplug_pending_mb(vm); in virtio_mem_run_wq()
2316 if (!rc && vm->requested_size != vm->plugged_size) { in virtio_mem_run_wq()
2317 if (vm->requested_size > vm->plugged_size) { in virtio_mem_run_wq()
2318 diff = vm->requested_size - vm->plugged_size; in virtio_mem_run_wq()
2319 rc = virtio_mem_plug_request(vm, diff); in virtio_mem_run_wq()
2321 diff = vm->plugged_size - vm->requested_size; in virtio_mem_run_wq()
2322 rc = virtio_mem_unplug_request(vm, diff); in virtio_mem_run_wq()
2328 vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS; in virtio_mem_run_wq()
2348 hrtimer_start(&vm->retry_timer, ms_to_ktime(vm->retry_timer_ms), in virtio_mem_run_wq()
2356 dev_err(&vm->vdev->dev, in virtio_mem_run_wq()
2358 vm->broken = true; in virtio_mem_run_wq()
2361 atomic_set(&vm->wq_active, 0); in virtio_mem_run_wq()
2366 struct virtio_mem *vm = container_of(timer, struct virtio_mem, in virtio_mem_timer_expired() local
2369 virtio_mem_retry(vm); in virtio_mem_timer_expired()
2370 vm->retry_timer_ms = min_t(unsigned int, vm->retry_timer_ms * 2, in virtio_mem_timer_expired()
2377 struct virtio_mem *vm = vq->vdev->priv; in virtio_mem_handle_response() local
2379 wake_up(&vm->host_resp); in virtio_mem_handle_response()
2382 static int virtio_mem_init_vq(struct virtio_mem *vm) in virtio_mem_init_vq() argument
2386 vq = virtio_find_single_vq(vm->vdev, virtio_mem_handle_response, in virtio_mem_init_vq()
2390 vm->vq = vq; in virtio_mem_init_vq()
2395 static int virtio_mem_init(struct virtio_mem *vm) in virtio_mem_init() argument
2401 if (!vm->vdev->config->get) { in virtio_mem_init()
2402 dev_err(&vm->vdev->dev, "config access disabled\n"); in virtio_mem_init()
2411 dev_warn(&vm->vdev->dev, "disabled in kdump kernel\n"); in virtio_mem_init()
2416 virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size, in virtio_mem_init()
2417 &vm->plugged_size); in virtio_mem_init()
2418 virtio_cread_le(vm->vdev, struct virtio_mem_config, block_size, in virtio_mem_init()
2419 &vm->device_block_size); in virtio_mem_init()
2420 virtio_cread_le(vm->vdev, struct virtio_mem_config, node_id, in virtio_mem_init()
2422 vm->nid = virtio_mem_translate_node_id(vm, node_id); in virtio_mem_init()
2423 virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr); in virtio_mem_init()
2424 virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size, in virtio_mem_init()
2425 &vm->region_size); in virtio_mem_init()
2428 if (vm->nid == NUMA_NO_NODE) in virtio_mem_init()
2429 vm->nid = memory_add_physaddr_to_nid(vm->addr); in virtio_mem_init()
2432 if (!IS_ALIGNED(vm->addr, memory_block_size_bytes())) in virtio_mem_init()
2433 dev_warn(&vm->vdev->dev, in virtio_mem_init()
2435 if (!IS_ALIGNED(vm->addr + vm->region_size, memory_block_size_bytes())) in virtio_mem_init()
2436 dev_warn(&vm->vdev->dev, in virtio_mem_init()
2438 if (vm->addr < pluggable_range.start || in virtio_mem_init()
2439 vm->addr + vm->region_size - 1 > pluggable_range.end) in virtio_mem_init()
2440 dev_warn(&vm->vdev->dev, in virtio_mem_init()
2444 vm->offline_threshold = max_t(uint64_t, 2 * memory_block_size_bytes(), in virtio_mem_init()
2457 sb_size = max_t(uint64_t, vm->device_block_size, sb_size); in virtio_mem_init()
2461 vm->in_sbm = true; in virtio_mem_init()
2462 vm->sbm.sb_size = sb_size; in virtio_mem_init()
2463 vm->sbm.sbs_per_mb = memory_block_size_bytes() / in virtio_mem_init()
2464 vm->sbm.sb_size; in virtio_mem_init()
2467 addr = max_t(uint64_t, vm->addr, pluggable_range.start) + in virtio_mem_init()
2469 vm->sbm.first_mb_id = virtio_mem_phys_to_mb_id(addr); in virtio_mem_init()
2470 vm->sbm.next_mb_id = vm->sbm.first_mb_id; in virtio_mem_init()
2473 vm->bbm.bb_size = max_t(uint64_t, vm->device_block_size, in virtio_mem_init()
2478 dev_warn(&vm->vdev->dev, in virtio_mem_init()
2480 } else if (bbm_block_size < vm->bbm.bb_size) { in virtio_mem_init()
2481 dev_warn(&vm->vdev->dev, in virtio_mem_init()
2484 vm->bbm.bb_size = bbm_block_size; in virtio_mem_init()
2489 addr = max_t(uint64_t, vm->addr, pluggable_range.start) + in virtio_mem_init()
2490 vm->bbm.bb_size - 1; in virtio_mem_init()
2491 vm->bbm.first_bb_id = virtio_mem_phys_to_bb_id(vm, addr); in virtio_mem_init()
2492 vm->bbm.next_bb_id = vm->bbm.first_bb_id; in virtio_mem_init()
2495 vm->offline_threshold = max_t(uint64_t, 2 * vm->bbm.bb_size, in virtio_mem_init()
2496 vm->offline_threshold); in virtio_mem_init()
2499 dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr); in virtio_mem_init()
2500 dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size); in virtio_mem_init()
2501 dev_info(&vm->vdev->dev, "device block size: 0x%llx", in virtio_mem_init()
2502 (unsigned long long)vm->device_block_size); in virtio_mem_init()
2503 dev_info(&vm->vdev->dev, "memory block size: 0x%lx", in virtio_mem_init()
2505 if (vm->in_sbm) in virtio_mem_init()
2506 dev_info(&vm->vdev->dev, "subblock size: 0x%llx", in virtio_mem_init()
2507 (unsigned long long)vm->sbm.sb_size); in virtio_mem_init()
2509 dev_info(&vm->vdev->dev, "big block size: 0x%llx", in virtio_mem_init()
2510 (unsigned long long)vm->bbm.bb_size); in virtio_mem_init()
2511 if (vm->nid != NUMA_NO_NODE && IS_ENABLED(CONFIG_NUMA)) in virtio_mem_init()
2512 dev_info(&vm->vdev->dev, "nid: %d", vm->nid); in virtio_mem_init()
2517 static int virtio_mem_create_resource(struct virtio_mem *vm) in virtio_mem_create_resource() argument
2523 const char *name = kstrdup(dev_name(&vm->vdev->dev), GFP_KERNEL); in virtio_mem_create_resource()
2528 vm->parent_resource = __request_mem_region(vm->addr, vm->region_size, in virtio_mem_create_resource()
2530 if (!vm->parent_resource) { in virtio_mem_create_resource()
2532 dev_warn(&vm->vdev->dev, "could not reserve device region\n"); in virtio_mem_create_resource()
2533 dev_info(&vm->vdev->dev, in virtio_mem_create_resource()
2539 vm->parent_resource->flags &= ~IORESOURCE_BUSY; in virtio_mem_create_resource()
2543 static void virtio_mem_delete_resource(struct virtio_mem *vm) in virtio_mem_delete_resource() argument
2547 if (!vm->parent_resource) in virtio_mem_delete_resource()
2550 name = vm->parent_resource->name; in virtio_mem_delete_resource()
2551 release_resource(vm->parent_resource); in virtio_mem_delete_resource()
2552 kfree(vm->parent_resource); in virtio_mem_delete_resource()
2554 vm->parent_resource = NULL; in virtio_mem_delete_resource()
2562 static bool virtio_mem_has_memory_added(struct virtio_mem *vm) in virtio_mem_has_memory_added() argument
2566 return walk_iomem_res_desc(IORES_DESC_NONE, flags, vm->addr, in virtio_mem_has_memory_added()
2567 vm->addr + vm->region_size, NULL, in virtio_mem_has_memory_added()
2573 struct virtio_mem *vm; in virtio_mem_probe() local
2580 vdev->priv = vm = kzalloc(sizeof(*vm), GFP_KERNEL); in virtio_mem_probe()
2581 if (!vm) in virtio_mem_probe()
2584 init_waitqueue_head(&vm->host_resp); in virtio_mem_probe()
2585 vm->vdev = vdev; in virtio_mem_probe()
2586 INIT_WORK(&vm->wq, virtio_mem_run_wq); in virtio_mem_probe()
2587 mutex_init(&vm->hotplug_mutex); in virtio_mem_probe()
2588 INIT_LIST_HEAD(&vm->next); in virtio_mem_probe()
2589 spin_lock_init(&vm->removal_lock); in virtio_mem_probe()
2590 hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in virtio_mem_probe()
2591 vm->retry_timer.function = virtio_mem_timer_expired; in virtio_mem_probe()
2592 vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS; in virtio_mem_probe()
2595 rc = virtio_mem_init_vq(vm); in virtio_mem_probe()
2600 rc = virtio_mem_init(vm); in virtio_mem_probe()
2605 rc = virtio_mem_create_resource(vm); in virtio_mem_probe()
2610 if (vm->in_sbm) in virtio_mem_probe()
2613 unit_pages = PHYS_PFN(vm->bbm.bb_size); in virtio_mem_probe()
2614 rc = memory_group_register_dynamic(vm->nid, unit_pages); in virtio_mem_probe()
2617 vm->mgid = rc; in virtio_mem_probe()
2624 if (vm->plugged_size) { in virtio_mem_probe()
2625 vm->unplug_all_required = true; in virtio_mem_probe()
2626 dev_info(&vm->vdev->dev, "unplugging all memory is required\n"); in virtio_mem_probe()
2630 vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb; in virtio_mem_probe()
2631 rc = register_memory_notifier(&vm->memory_notifier); in virtio_mem_probe()
2634 rc = register_virtio_mem_device(vm); in virtio_mem_probe()
2641 atomic_set(&vm->config_changed, 1); in virtio_mem_probe()
2642 queue_work(system_freezable_wq, &vm->wq); in virtio_mem_probe()
2646 unregister_memory_notifier(&vm->memory_notifier); in virtio_mem_probe()
2648 memory_group_unregister(vm->mgid); in virtio_mem_probe()
2650 virtio_mem_delete_resource(vm); in virtio_mem_probe()
2654 kfree(vm); in virtio_mem_probe()
2662 struct virtio_mem *vm = vdev->priv; in virtio_mem_remove() local
2670 mutex_lock(&vm->hotplug_mutex); in virtio_mem_remove()
2671 spin_lock_irq(&vm->removal_lock); in virtio_mem_remove()
2672 vm->removing = true; in virtio_mem_remove()
2673 spin_unlock_irq(&vm->removal_lock); in virtio_mem_remove()
2674 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_remove()
2677 cancel_work_sync(&vm->wq); in virtio_mem_remove()
2678 hrtimer_cancel(&vm->retry_timer); in virtio_mem_remove()
2680 if (vm->in_sbm) { in virtio_mem_remove()
2685 virtio_mem_sbm_for_each_mb(vm, mb_id, in virtio_mem_remove()
2687 rc = virtio_mem_sbm_remove_mb(vm, mb_id); in virtio_mem_remove()
2689 virtio_mem_sbm_set_mb_state(vm, mb_id, in virtio_mem_remove()
2700 unregister_virtio_mem_device(vm); in virtio_mem_remove()
2701 unregister_memory_notifier(&vm->memory_notifier); in virtio_mem_remove()
2708 if (virtio_mem_has_memory_added(vm)) { in virtio_mem_remove()
2711 virtio_mem_delete_resource(vm); in virtio_mem_remove()
2712 kfree_const(vm->resource_name); in virtio_mem_remove()
2713 memory_group_unregister(vm->mgid); in virtio_mem_remove()
2717 if (vm->in_sbm) { in virtio_mem_remove()
2718 vfree(vm->sbm.mb_states); in virtio_mem_remove()
2719 vfree(vm->sbm.sb_states); in virtio_mem_remove()
2721 vfree(vm->bbm.bb_states); in virtio_mem_remove()
2728 kfree(vm); in virtio_mem_remove()
2734 struct virtio_mem *vm = vdev->priv; in virtio_mem_config_changed() local
2736 atomic_set(&vm->config_changed, 1); in virtio_mem_config_changed()
2737 virtio_mem_retry(vm); in virtio_mem_config_changed()
2744 * When restarting the VM, all memory is usually unplugged. Don't in virtio_mem_freeze()