Lines Matching full:vm

168 static int register_virtio_mem_device(struct virtio_mem *vm)  in register_virtio_mem_device()  argument
177 list_add_rcu(&vm->next, &virtio_mem_devices); in register_virtio_mem_device()
187 static void unregister_virtio_mem_device(struct virtio_mem *vm) in unregister_virtio_mem_device() argument
191 list_del_rcu(&vm->next); in unregister_virtio_mem_device()
218 static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm, in virtio_mem_phys_to_sb_id() argument
224 return (addr - mb_addr) / vm->subblock_size; in virtio_mem_phys_to_sb_id()
230 static void virtio_mem_mb_set_state(struct virtio_mem *vm, unsigned long mb_id, in virtio_mem_mb_set_state() argument
233 const unsigned long idx = mb_id - vm->first_mb_id; in virtio_mem_mb_set_state()
236 old_state = vm->mb_state[idx]; in virtio_mem_mb_set_state()
237 vm->mb_state[idx] = state; in virtio_mem_mb_set_state()
239 BUG_ON(vm->nb_mb_state[old_state] == 0); in virtio_mem_mb_set_state()
240 vm->nb_mb_state[old_state]--; in virtio_mem_mb_set_state()
241 vm->nb_mb_state[state]++; in virtio_mem_mb_set_state()
247 static enum virtio_mem_mb_state virtio_mem_mb_get_state(struct virtio_mem *vm, in virtio_mem_mb_get_state() argument
250 const unsigned long idx = mb_id - vm->first_mb_id; in virtio_mem_mb_get_state()
252 return vm->mb_state[idx]; in virtio_mem_mb_get_state()
258 static int virtio_mem_mb_state_prepare_next_mb(struct virtio_mem *vm) in virtio_mem_mb_state_prepare_next_mb() argument
260 unsigned long old_bytes = vm->next_mb_id - vm->first_mb_id + 1; in virtio_mem_mb_state_prepare_next_mb()
261 unsigned long new_bytes = vm->next_mb_id - vm->first_mb_id + 2; in virtio_mem_mb_state_prepare_next_mb()
266 if (vm->mb_state && old_pages == new_pages) in virtio_mem_mb_state_prepare_next_mb()
273 mutex_lock(&vm->hotplug_mutex); in virtio_mem_mb_state_prepare_next_mb()
274 if (vm->mb_state) in virtio_mem_mb_state_prepare_next_mb()
275 memcpy(new_mb_state, vm->mb_state, old_pages * PAGE_SIZE); in virtio_mem_mb_state_prepare_next_mb()
276 vfree(vm->mb_state); in virtio_mem_mb_state_prepare_next_mb()
277 vm->mb_state = new_mb_state; in virtio_mem_mb_state_prepare_next_mb()
278 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_mb_state_prepare_next_mb()
300 static void virtio_mem_mb_set_sb_plugged(struct virtio_mem *vm, in virtio_mem_mb_set_sb_plugged() argument
304 const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id; in virtio_mem_mb_set_sb_plugged()
306 __bitmap_set(vm->sb_bitmap, bit, count); in virtio_mem_mb_set_sb_plugged()
314 static void virtio_mem_mb_set_sb_unplugged(struct virtio_mem *vm, in virtio_mem_mb_set_sb_unplugged() argument
318 const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id; in virtio_mem_mb_set_sb_unplugged()
320 __bitmap_clear(vm->sb_bitmap, bit, count); in virtio_mem_mb_set_sb_unplugged()
326 static bool virtio_mem_mb_test_sb_plugged(struct virtio_mem *vm, in virtio_mem_mb_test_sb_plugged() argument
330 const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id; in virtio_mem_mb_test_sb_plugged()
333 return test_bit(bit, vm->sb_bitmap); in virtio_mem_mb_test_sb_plugged()
336 return find_next_zero_bit(vm->sb_bitmap, bit + count, bit) >= in virtio_mem_mb_test_sb_plugged()
343 static bool virtio_mem_mb_test_sb_unplugged(struct virtio_mem *vm, in virtio_mem_mb_test_sb_unplugged() argument
347 const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id; in virtio_mem_mb_test_sb_unplugged()
350 return find_next_bit(vm->sb_bitmap, bit + count, bit) >= bit + count; in virtio_mem_mb_test_sb_unplugged()
354 * Find the first unplugged subblock. Returns vm->nb_sb_per_mb in case there is
357 static int virtio_mem_mb_first_unplugged_sb(struct virtio_mem *vm, in virtio_mem_mb_first_unplugged_sb() argument
360 const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb; in virtio_mem_mb_first_unplugged_sb()
362 return find_next_zero_bit(vm->sb_bitmap, bit + vm->nb_sb_per_mb, bit) - in virtio_mem_mb_first_unplugged_sb()
369 static int virtio_mem_sb_bitmap_prepare_next_mb(struct virtio_mem *vm) in virtio_mem_sb_bitmap_prepare_next_mb() argument
371 const unsigned long old_nb_mb = vm->next_mb_id - vm->first_mb_id; in virtio_mem_sb_bitmap_prepare_next_mb()
372 const unsigned long old_nb_bits = old_nb_mb * vm->nb_sb_per_mb; in virtio_mem_sb_bitmap_prepare_next_mb()
373 const unsigned long new_nb_bits = (old_nb_mb + 1) * vm->nb_sb_per_mb; in virtio_mem_sb_bitmap_prepare_next_mb()
378 if (vm->sb_bitmap && old_pages == new_pages) in virtio_mem_sb_bitmap_prepare_next_mb()
385 mutex_lock(&vm->hotplug_mutex); in virtio_mem_sb_bitmap_prepare_next_mb()
387 memcpy(new_sb_bitmap, vm->sb_bitmap, old_pages * PAGE_SIZE); in virtio_mem_sb_bitmap_prepare_next_mb()
389 old_sb_bitmap = vm->sb_bitmap; in virtio_mem_sb_bitmap_prepare_next_mb()
390 vm->sb_bitmap = new_sb_bitmap; in virtio_mem_sb_bitmap_prepare_next_mb()
391 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_sb_bitmap_prepare_next_mb()
401 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
406 static int virtio_mem_mb_add(struct virtio_mem *vm, unsigned long mb_id) in virtio_mem_mb_add() argument
409 int nid = vm->nid; in virtio_mem_mb_add()
418 if (!vm->resource_name) { in virtio_mem_mb_add()
419 vm->resource_name = kstrdup_const("System RAM (virtio_mem)", in virtio_mem_mb_add()
421 if (!vm->resource_name) in virtio_mem_mb_add()
425 dev_dbg(&vm->vdev->dev, "adding memory block: %lu\n", mb_id); in virtio_mem_mb_add()
427 vm->resource_name, in virtio_mem_mb_add()
435 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
440 static int virtio_mem_mb_remove(struct virtio_mem *vm, unsigned long mb_id) in virtio_mem_mb_remove() argument
443 int nid = vm->nid; in virtio_mem_mb_remove()
448 dev_dbg(&vm->vdev->dev, "removing memory block: %lu\n", mb_id); in virtio_mem_mb_remove()
455 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
460 static int virtio_mem_mb_offline_and_remove(struct virtio_mem *vm, in virtio_mem_mb_offline_and_remove() argument
464 int nid = vm->nid; in virtio_mem_mb_offline_and_remove()
469 dev_dbg(&vm->vdev->dev, "offlining and removing memory block: %lu\n", in virtio_mem_mb_offline_and_remove()
477 static void virtio_mem_retry(struct virtio_mem *vm) in virtio_mem_retry() argument
481 spin_lock_irqsave(&vm->removal_lock, flags); in virtio_mem_retry()
482 if (!vm->removing) in virtio_mem_retry()
483 queue_work(system_freezable_wq, &vm->wq); in virtio_mem_retry()
484 spin_unlock_irqrestore(&vm->removal_lock, flags); in virtio_mem_retry()
487 static int virtio_mem_translate_node_id(struct virtio_mem *vm, uint16_t node_id) in virtio_mem_translate_node_id() argument
492 if (virtio_has_feature(vm->vdev, VIRTIO_MEM_F_ACPI_PXM)) in virtio_mem_translate_node_id()
502 static bool virtio_mem_overlaps_range(struct virtio_mem *vm, in virtio_mem_overlaps_range() argument
505 unsigned long dev_start = virtio_mem_mb_id_to_phys(vm->first_mb_id); in virtio_mem_overlaps_range()
506 unsigned long dev_end = virtio_mem_mb_id_to_phys(vm->last_mb_id) + in virtio_mem_overlaps_range()
516 static bool virtio_mem_owned_mb(struct virtio_mem *vm, unsigned long mb_id) in virtio_mem_owned_mb() argument
518 return mb_id >= vm->first_mb_id && mb_id <= vm->last_mb_id; in virtio_mem_owned_mb()
521 static int virtio_mem_notify_going_online(struct virtio_mem *vm, in virtio_mem_notify_going_online() argument
524 switch (virtio_mem_mb_get_state(vm, mb_id)) { in virtio_mem_notify_going_online()
531 dev_warn_ratelimited(&vm->vdev->dev, in virtio_mem_notify_going_online()
536 static void virtio_mem_notify_offline(struct virtio_mem *vm, in virtio_mem_notify_offline() argument
539 switch (virtio_mem_mb_get_state(vm, mb_id)) { in virtio_mem_notify_offline()
541 virtio_mem_mb_set_state(vm, mb_id, in virtio_mem_notify_offline()
545 virtio_mem_mb_set_state(vm, mb_id, in virtio_mem_notify_offline()
560 virtio_mem_retry(vm); in virtio_mem_notify_offline()
563 static void virtio_mem_notify_online(struct virtio_mem *vm, unsigned long mb_id) in virtio_mem_notify_online() argument
567 switch (virtio_mem_mb_get_state(vm, mb_id)) { in virtio_mem_notify_online()
569 virtio_mem_mb_set_state(vm, mb_id, in virtio_mem_notify_online()
573 virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_ONLINE); in virtio_mem_notify_online()
579 nb_offline = vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] + in virtio_mem_notify_online()
580 vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL]; in virtio_mem_notify_online()
584 virtio_mem_retry(vm); in virtio_mem_notify_online()
587 static void virtio_mem_notify_going_offline(struct virtio_mem *vm, in virtio_mem_notify_going_offline() argument
590 const unsigned long nr_pages = PFN_DOWN(vm->subblock_size); in virtio_mem_notify_going_offline()
595 for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) { in virtio_mem_notify_going_offline()
596 if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1)) in virtio_mem_notify_going_offline()
605 sb_id * vm->subblock_size); in virtio_mem_notify_going_offline()
615 static void virtio_mem_notify_cancel_offline(struct virtio_mem *vm, in virtio_mem_notify_cancel_offline() argument
618 const unsigned long nr_pages = PFN_DOWN(vm->subblock_size); in virtio_mem_notify_cancel_offline()
622 for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) { in virtio_mem_notify_cancel_offline()
623 if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1)) in virtio_mem_notify_cancel_offline()
631 sb_id * vm->subblock_size); in virtio_mem_notify_cancel_offline()
646 struct virtio_mem *vm = container_of(nb, struct virtio_mem, in virtio_mem_memory_notifier_cb() local
654 if (!virtio_mem_overlaps_range(vm, start, size)) in virtio_mem_memory_notifier_cb()
676 mutex_lock(&vm->hotplug_mutex); in virtio_mem_memory_notifier_cb()
677 if (vm->removing) { in virtio_mem_memory_notifier_cb()
679 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_memory_notifier_cb()
682 vm->hotplug_active = true; in virtio_mem_memory_notifier_cb()
683 virtio_mem_notify_going_offline(vm, mb_id); in virtio_mem_memory_notifier_cb()
686 mutex_lock(&vm->hotplug_mutex); in virtio_mem_memory_notifier_cb()
687 if (vm->removing) { in virtio_mem_memory_notifier_cb()
689 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_memory_notifier_cb()
692 vm->hotplug_active = true; in virtio_mem_memory_notifier_cb()
693 rc = virtio_mem_notify_going_online(vm, mb_id); in virtio_mem_memory_notifier_cb()
696 virtio_mem_notify_offline(vm, mb_id); in virtio_mem_memory_notifier_cb()
697 vm->hotplug_active = false; in virtio_mem_memory_notifier_cb()
698 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_memory_notifier_cb()
701 virtio_mem_notify_online(vm, mb_id); in virtio_mem_memory_notifier_cb()
702 vm->hotplug_active = false; in virtio_mem_memory_notifier_cb()
703 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_memory_notifier_cb()
706 if (!vm->hotplug_active) in virtio_mem_memory_notifier_cb()
708 virtio_mem_notify_cancel_offline(vm, mb_id); in virtio_mem_memory_notifier_cb()
709 vm->hotplug_active = false; in virtio_mem_memory_notifier_cb()
710 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_memory_notifier_cb()
713 if (!vm->hotplug_active) in virtio_mem_memory_notifier_cb()
715 vm->hotplug_active = false; in virtio_mem_memory_notifier_cb()
716 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_memory_notifier_cb()
801 struct virtio_mem *vm; in virtio_mem_online_page_cb() local
811 list_for_each_entry_rcu(vm, &virtio_mem_devices, next) { in virtio_mem_online_page_cb()
812 if (!virtio_mem_owned_mb(vm, mb_id)) in virtio_mem_online_page_cb()
815 sb_id = virtio_mem_phys_to_sb_id(vm, addr); in virtio_mem_online_page_cb()
820 if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1)) in virtio_mem_online_page_cb()
834 static uint64_t virtio_mem_send_request(struct virtio_mem *vm, in virtio_mem_send_request() argument
842 vm->req = *req; in virtio_mem_send_request()
845 sg_init_one(&sg_req, &vm->req, sizeof(vm->req)); in virtio_mem_send_request()
849 sg_init_one(&sg_resp, &vm->resp, sizeof(vm->resp)); in virtio_mem_send_request()
852 rc = virtqueue_add_sgs(vm->vq, sgs, 1, 1, vm, GFP_KERNEL); in virtio_mem_send_request()
856 virtqueue_kick(vm->vq); in virtio_mem_send_request()
859 wait_event(vm->host_resp, virtqueue_get_buf(vm->vq, &len)); in virtio_mem_send_request()
861 return virtio16_to_cpu(vm->vdev, vm->resp.type); in virtio_mem_send_request()
864 static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr, in virtio_mem_send_plug_request() argument
867 const uint64_t nb_vm_blocks = size / vm->device_block_size; in virtio_mem_send_plug_request()
869 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_PLUG), in virtio_mem_send_plug_request()
870 .u.plug.addr = cpu_to_virtio64(vm->vdev, addr), in virtio_mem_send_plug_request()
871 .u.plug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks), in virtio_mem_send_plug_request()
874 if (atomic_read(&vm->config_changed)) in virtio_mem_send_plug_request()
877 switch (virtio_mem_send_request(vm, &req)) { in virtio_mem_send_plug_request()
879 vm->plugged_size += size; in virtio_mem_send_plug_request()
892 static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr, in virtio_mem_send_unplug_request() argument
895 const uint64_t nb_vm_blocks = size / vm->device_block_size; in virtio_mem_send_unplug_request()
897 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG), in virtio_mem_send_unplug_request()
898 .u.unplug.addr = cpu_to_virtio64(vm->vdev, addr), in virtio_mem_send_unplug_request()
899 .u.unplug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks), in virtio_mem_send_unplug_request()
902 if (atomic_read(&vm->config_changed)) in virtio_mem_send_unplug_request()
905 switch (virtio_mem_send_request(vm, &req)) { in virtio_mem_send_unplug_request()
907 vm->plugged_size -= size; in virtio_mem_send_unplug_request()
918 static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm) in virtio_mem_send_unplug_all_request() argument
921 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG_ALL), in virtio_mem_send_unplug_all_request()
924 switch (virtio_mem_send_request(vm, &req)) { in virtio_mem_send_unplug_all_request()
926 vm->unplug_all_required = false; in virtio_mem_send_unplug_all_request()
927 vm->plugged_size = 0; in virtio_mem_send_unplug_all_request()
929 atomic_set(&vm->config_changed, 1); in virtio_mem_send_unplug_all_request()
942 static int virtio_mem_mb_plug_sb(struct virtio_mem *vm, unsigned long mb_id, in virtio_mem_mb_plug_sb() argument
946 sb_id * vm->subblock_size; in virtio_mem_mb_plug_sb()
947 const uint64_t size = count * vm->subblock_size; in virtio_mem_mb_plug_sb()
950 dev_dbg(&vm->vdev->dev, "plugging memory block: %lu : %i - %i\n", mb_id, in virtio_mem_mb_plug_sb()
953 rc = virtio_mem_send_plug_request(vm, addr, size); in virtio_mem_mb_plug_sb()
955 virtio_mem_mb_set_sb_plugged(vm, mb_id, sb_id, count); in virtio_mem_mb_plug_sb()
963 static int virtio_mem_mb_unplug_sb(struct virtio_mem *vm, unsigned long mb_id, in virtio_mem_mb_unplug_sb() argument
967 sb_id * vm->subblock_size; in virtio_mem_mb_unplug_sb()
968 const uint64_t size = count * vm->subblock_size; in virtio_mem_mb_unplug_sb()
971 dev_dbg(&vm->vdev->dev, "unplugging memory block: %lu : %i - %i\n", in virtio_mem_mb_unplug_sb()
974 rc = virtio_mem_send_unplug_request(vm, addr, size); in virtio_mem_mb_unplug_sb()
976 virtio_mem_mb_set_sb_unplugged(vm, mb_id, sb_id, count); in virtio_mem_mb_unplug_sb()
989 static int virtio_mem_mb_unplug_any_sb(struct virtio_mem *vm, in virtio_mem_mb_unplug_any_sb() argument
995 sb_id = vm->nb_sb_per_mb - 1; in virtio_mem_mb_unplug_any_sb()
999 virtio_mem_mb_test_sb_unplugged(vm, mb_id, sb_id, 1)) in virtio_mem_mb_unplug_any_sb()
1006 virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id - 1, 1)) { in virtio_mem_mb_unplug_any_sb()
1011 rc = virtio_mem_mb_unplug_sb(vm, mb_id, sb_id, count); in virtio_mem_mb_unplug_any_sb()
1028 static int virtio_mem_mb_unplug(struct virtio_mem *vm, unsigned long mb_id) in virtio_mem_mb_unplug() argument
1030 uint64_t nb_sb = vm->nb_sb_per_mb; in virtio_mem_mb_unplug()
1032 return virtio_mem_mb_unplug_any_sb(vm, mb_id, &nb_sb); in virtio_mem_mb_unplug()
1038 static int virtio_mem_prepare_next_mb(struct virtio_mem *vm, in virtio_mem_prepare_next_mb() argument
1043 if (vm->next_mb_id > vm->last_usable_mb_id) in virtio_mem_prepare_next_mb()
1047 rc = virtio_mem_mb_state_prepare_next_mb(vm); in virtio_mem_prepare_next_mb()
1052 rc = virtio_mem_sb_bitmap_prepare_next_mb(vm); in virtio_mem_prepare_next_mb()
1056 vm->nb_mb_state[VIRTIO_MEM_MB_STATE_UNUSED]++; in virtio_mem_prepare_next_mb()
1057 *mb_id = vm->next_mb_id++; in virtio_mem_prepare_next_mb()
1064 static bool virtio_mem_too_many_mb_offline(struct virtio_mem *vm) in virtio_mem_too_many_mb_offline() argument
1068 nb_offline = vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] + in virtio_mem_too_many_mb_offline()
1069 vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL]; in virtio_mem_too_many_mb_offline()
1079 static int virtio_mem_mb_plug_and_add(struct virtio_mem *vm, in virtio_mem_mb_plug_and_add() argument
1083 const int count = min_t(int, *nb_sb, vm->nb_sb_per_mb); in virtio_mem_mb_plug_and_add()
1093 rc = virtio_mem_mb_plug_sb(vm, mb_id, 0, count); in virtio_mem_mb_plug_and_add()
1101 if (count == vm->nb_sb_per_mb) in virtio_mem_mb_plug_and_add()
1102 virtio_mem_mb_set_state(vm, mb_id, in virtio_mem_mb_plug_and_add()
1105 virtio_mem_mb_set_state(vm, mb_id, in virtio_mem_mb_plug_and_add()
1109 rc = virtio_mem_mb_add(vm, mb_id); in virtio_mem_mb_plug_and_add()
1113 dev_err(&vm->vdev->dev, in virtio_mem_mb_plug_and_add()
1115 rc2 = virtio_mem_mb_unplug_sb(vm, mb_id, 0, count); in virtio_mem_mb_plug_and_add()
1123 virtio_mem_mb_set_state(vm, mb_id, new_state); in virtio_mem_mb_plug_and_add()
1139 static int virtio_mem_mb_plug_any_sb(struct virtio_mem *vm, unsigned long mb_id, in virtio_mem_mb_plug_any_sb() argument
1150 sb_id = virtio_mem_mb_first_unplugged_sb(vm, mb_id); in virtio_mem_mb_plug_any_sb()
1151 if (sb_id >= vm->nb_sb_per_mb) in virtio_mem_mb_plug_any_sb()
1155 sb_id + count < vm->nb_sb_per_mb && in virtio_mem_mb_plug_any_sb()
1156 !virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id + count, in virtio_mem_mb_plug_any_sb()
1160 rc = virtio_mem_mb_plug_sb(vm, mb_id, sb_id, count); in virtio_mem_mb_plug_any_sb()
1169 sb_id * vm->subblock_size); in virtio_mem_mb_plug_any_sb()
1170 nr_pages = PFN_DOWN(count * vm->subblock_size); in virtio_mem_mb_plug_any_sb()
1174 if (virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) { in virtio_mem_mb_plug_any_sb()
1176 virtio_mem_mb_set_state(vm, mb_id, in virtio_mem_mb_plug_any_sb()
1179 virtio_mem_mb_set_state(vm, mb_id, in virtio_mem_mb_plug_any_sb()
1189 static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff) in virtio_mem_plug_request() argument
1191 uint64_t nb_sb = diff / vm->subblock_size; in virtio_mem_plug_request()
1199 mutex_lock(&vm->hotplug_mutex); in virtio_mem_plug_request()
1202 virtio_mem_for_each_mb_state(vm, mb_id, in virtio_mem_plug_request()
1204 rc = virtio_mem_mb_plug_any_sb(vm, mb_id, &nb_sb, true); in virtio_mem_plug_request()
1211 virtio_mem_for_each_mb_state(vm, mb_id, in virtio_mem_plug_request()
1213 rc = virtio_mem_mb_plug_any_sb(vm, mb_id, &nb_sb, false); in virtio_mem_plug_request()
1223 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_plug_request()
1226 virtio_mem_for_each_mb_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED) { in virtio_mem_plug_request()
1227 if (virtio_mem_too_many_mb_offline(vm)) in virtio_mem_plug_request()
1230 rc = virtio_mem_mb_plug_and_add(vm, mb_id, &nb_sb); in virtio_mem_plug_request()
1238 if (virtio_mem_too_many_mb_offline(vm)) in virtio_mem_plug_request()
1241 rc = virtio_mem_prepare_next_mb(vm, &mb_id); in virtio_mem_plug_request()
1244 rc = virtio_mem_mb_plug_and_add(vm, mb_id, &nb_sb); in virtio_mem_plug_request()
1252 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_plug_request()
1265 static int virtio_mem_mb_unplug_any_sb_offline(struct virtio_mem *vm, in virtio_mem_mb_unplug_any_sb_offline() argument
1271 rc = virtio_mem_mb_unplug_any_sb(vm, mb_id, nb_sb); in virtio_mem_mb_unplug_any_sb_offline()
1274 if (!virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) in virtio_mem_mb_unplug_any_sb_offline()
1275 virtio_mem_mb_set_state(vm, mb_id, in virtio_mem_mb_unplug_any_sb_offline()
1280 if (virtio_mem_mb_test_sb_unplugged(vm, mb_id, 0, vm->nb_sb_per_mb)) { in virtio_mem_mb_unplug_any_sb_offline()
1287 virtio_mem_mb_set_state(vm, mb_id, in virtio_mem_mb_unplug_any_sb_offline()
1290 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_mb_unplug_any_sb_offline()
1291 rc = virtio_mem_mb_remove(vm, mb_id); in virtio_mem_mb_unplug_any_sb_offline()
1293 mutex_lock(&vm->hotplug_mutex); in virtio_mem_mb_unplug_any_sb_offline()
1303 static int virtio_mem_mb_unplug_sb_online(struct virtio_mem *vm, in virtio_mem_mb_unplug_sb_online() argument
1307 const unsigned long nr_pages = PFN_DOWN(vm->subblock_size) * count; in virtio_mem_mb_unplug_sb_online()
1312 sb_id * vm->subblock_size); in virtio_mem_mb_unplug_sb_online()
1326 rc = virtio_mem_mb_unplug_sb(vm, mb_id, sb_id, count); in virtio_mem_mb_unplug_sb_online()
1333 virtio_mem_mb_set_state(vm, mb_id, in virtio_mem_mb_unplug_sb_online()
1348 static int virtio_mem_mb_unplug_any_sb_online(struct virtio_mem *vm, in virtio_mem_mb_unplug_any_sb_online() argument
1355 if (*nb_sb >= vm->nb_sb_per_mb && in virtio_mem_mb_unplug_any_sb_online()
1356 virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) { in virtio_mem_mb_unplug_any_sb_online()
1357 rc = virtio_mem_mb_unplug_sb_online(vm, mb_id, 0, in virtio_mem_mb_unplug_any_sb_online()
1358 vm->nb_sb_per_mb); in virtio_mem_mb_unplug_any_sb_online()
1360 *nb_sb -= vm->nb_sb_per_mb; in virtio_mem_mb_unplug_any_sb_online()
1367 for (sb_id = vm->nb_sb_per_mb - 1; sb_id >= 0 && *nb_sb; sb_id--) { in virtio_mem_mb_unplug_any_sb_online()
1370 !virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1)) in virtio_mem_mb_unplug_any_sb_online()
1375 rc = virtio_mem_mb_unplug_sb_online(vm, mb_id, sb_id, 1); in virtio_mem_mb_unplug_any_sb_online()
1389 if (virtio_mem_mb_test_sb_unplugged(vm, mb_id, 0, vm->nb_sb_per_mb)) { in virtio_mem_mb_unplug_any_sb_online()
1390 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_mb_unplug_any_sb_online()
1391 rc = virtio_mem_mb_offline_and_remove(vm, mb_id); in virtio_mem_mb_unplug_any_sb_online()
1392 mutex_lock(&vm->hotplug_mutex); in virtio_mem_mb_unplug_any_sb_online()
1394 virtio_mem_mb_set_state(vm, mb_id, in virtio_mem_mb_unplug_any_sb_online()
1404 static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff) in virtio_mem_unplug_request() argument
1406 uint64_t nb_sb = diff / vm->subblock_size; in virtio_mem_unplug_request()
1418 mutex_lock(&vm->hotplug_mutex); in virtio_mem_unplug_request()
1421 virtio_mem_for_each_mb_state_rev(vm, mb_id, in virtio_mem_unplug_request()
1423 rc = virtio_mem_mb_unplug_any_sb_offline(vm, mb_id, in virtio_mem_unplug_request()
1431 virtio_mem_for_each_mb_state_rev(vm, mb_id, in virtio_mem_unplug_request()
1433 rc = virtio_mem_mb_unplug_any_sb_offline(vm, mb_id, in virtio_mem_unplug_request()
1441 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_unplug_request()
1446 virtio_mem_for_each_mb_state_rev(vm, mb_id, in virtio_mem_unplug_request()
1448 rc = virtio_mem_mb_unplug_any_sb_online(vm, mb_id, in virtio_mem_unplug_request()
1452 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_unplug_request()
1454 mutex_lock(&vm->hotplug_mutex); in virtio_mem_unplug_request()
1458 virtio_mem_for_each_mb_state_rev(vm, mb_id, in virtio_mem_unplug_request()
1460 rc = virtio_mem_mb_unplug_any_sb_online(vm, mb_id, in virtio_mem_unplug_request()
1464 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_unplug_request()
1466 mutex_lock(&vm->hotplug_mutex); in virtio_mem_unplug_request()
1469 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_unplug_request()
1472 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_unplug_request()
1480 static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm) in virtio_mem_unplug_pending_mb() argument
1485 virtio_mem_for_each_mb_state(vm, mb_id, VIRTIO_MEM_MB_STATE_PLUGGED) { in virtio_mem_unplug_pending_mb()
1486 rc = virtio_mem_mb_unplug(vm, mb_id); in virtio_mem_unplug_pending_mb()
1489 virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED); in virtio_mem_unplug_pending_mb()
1498 static void virtio_mem_refresh_config(struct virtio_mem *vm) in virtio_mem_refresh_config() argument
1504 virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size, in virtio_mem_refresh_config()
1506 if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size)) in virtio_mem_refresh_config()
1507 vm->plugged_size = new_plugged_size; in virtio_mem_refresh_config()
1510 virtio_cread_le(vm->vdev, struct virtio_mem_config, in virtio_mem_refresh_config()
1512 end_addr = vm->addr + usable_region_size; in virtio_mem_refresh_config()
1514 vm->last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr) - 1; in virtio_mem_refresh_config()
1517 virtio_cread_le(vm->vdev, struct virtio_mem_config, requested_size, in virtio_mem_refresh_config()
1518 &vm->requested_size); in virtio_mem_refresh_config()
1520 dev_info(&vm->vdev->dev, "plugged size: 0x%llx", vm->plugged_size); in virtio_mem_refresh_config()
1521 dev_info(&vm->vdev->dev, "requested size: 0x%llx", vm->requested_size); in virtio_mem_refresh_config()
1529 struct virtio_mem *vm = container_of(work, struct virtio_mem, wq); in virtio_mem_run_wq() local
1533 hrtimer_cancel(&vm->retry_timer); in virtio_mem_run_wq()
1535 if (vm->broken) in virtio_mem_run_wq()
1542 if (unlikely(vm->unplug_all_required)) in virtio_mem_run_wq()
1543 rc = virtio_mem_send_unplug_all_request(vm); in virtio_mem_run_wq()
1545 if (atomic_read(&vm->config_changed)) { in virtio_mem_run_wq()
1546 atomic_set(&vm->config_changed, 0); in virtio_mem_run_wq()
1547 virtio_mem_refresh_config(vm); in virtio_mem_run_wq()
1552 rc = virtio_mem_unplug_pending_mb(vm); in virtio_mem_run_wq()
1554 if (!rc && vm->requested_size != vm->plugged_size) { in virtio_mem_run_wq()
1555 if (vm->requested_size > vm->plugged_size) { in virtio_mem_run_wq()
1556 diff = vm->requested_size - vm->plugged_size; in virtio_mem_run_wq()
1557 rc = virtio_mem_plug_request(vm, diff); in virtio_mem_run_wq()
1559 diff = vm->plugged_size - vm->requested_size; in virtio_mem_run_wq()
1560 rc = virtio_mem_unplug_request(vm, diff); in virtio_mem_run_wq()
1566 vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS; in virtio_mem_run_wq()
1586 hrtimer_start(&vm->retry_timer, ms_to_ktime(vm->retry_timer_ms), in virtio_mem_run_wq()
1594 dev_err(&vm->vdev->dev, in virtio_mem_run_wq()
1596 vm->broken = true; in virtio_mem_run_wq()
1602 struct virtio_mem *vm = container_of(timer, struct virtio_mem, in virtio_mem_timer_expired() local
1605 virtio_mem_retry(vm); in virtio_mem_timer_expired()
1606 vm->retry_timer_ms = min_t(unsigned int, vm->retry_timer_ms * 2, in virtio_mem_timer_expired()
1613 struct virtio_mem *vm = vq->vdev->priv; in virtio_mem_handle_response() local
1615 wake_up(&vm->host_resp); in virtio_mem_handle_response()
1618 static int virtio_mem_init_vq(struct virtio_mem *vm) in virtio_mem_init_vq() argument
1622 vq = virtio_find_single_vq(vm->vdev, virtio_mem_handle_response, in virtio_mem_init_vq()
1626 vm->vq = vq; in virtio_mem_init_vq()
1631 static int virtio_mem_init(struct virtio_mem *vm) in virtio_mem_init() argument
1636 if (!vm->vdev->config->get) { in virtio_mem_init()
1637 dev_err(&vm->vdev->dev, "config access disabled\n"); in virtio_mem_init()
1646 dev_warn(&vm->vdev->dev, "disabled in kdump kernel\n"); in virtio_mem_init()
1651 virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size, in virtio_mem_init()
1652 &vm->plugged_size); in virtio_mem_init()
1653 virtio_cread_le(vm->vdev, struct virtio_mem_config, block_size, in virtio_mem_init()
1654 &vm->device_block_size); in virtio_mem_init()
1655 virtio_cread_le(vm->vdev, struct virtio_mem_config, node_id, in virtio_mem_init()
1657 vm->nid = virtio_mem_translate_node_id(vm, node_id); in virtio_mem_init()
1658 virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr); in virtio_mem_init()
1659 virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size, in virtio_mem_init()
1660 &vm->region_size); in virtio_mem_init()
1666 if (vm->device_block_size > memory_block_size_bytes()) { in virtio_mem_init()
1667 dev_err(&vm->vdev->dev, in virtio_mem_init()
1673 if (!IS_ALIGNED(vm->addr, memory_block_size_bytes())) in virtio_mem_init()
1674 dev_warn(&vm->vdev->dev, in virtio_mem_init()
1676 if (!IS_ALIGNED(vm->addr + vm->region_size, memory_block_size_bytes())) in virtio_mem_init()
1677 dev_warn(&vm->vdev->dev, in virtio_mem_init()
1679 if (vm->addr + vm->region_size > phys_limit) in virtio_mem_init()
1680 dev_warn(&vm->vdev->dev, in virtio_mem_init()
1689 vm->subblock_size = PAGE_SIZE * 1ul << max_t(uint32_t, MAX_ORDER - 1, in virtio_mem_init()
1691 vm->subblock_size = max_t(uint64_t, vm->device_block_size, in virtio_mem_init()
1692 vm->subblock_size); in virtio_mem_init()
1693 vm->nb_sb_per_mb = memory_block_size_bytes() / vm->subblock_size; in virtio_mem_init()
1696 vm->first_mb_id = virtio_mem_phys_to_mb_id(vm->addr - 1 + in virtio_mem_init()
1698 vm->next_mb_id = vm->first_mb_id; in virtio_mem_init()
1699 vm->last_mb_id = virtio_mem_phys_to_mb_id(vm->addr + in virtio_mem_init()
1700 vm->region_size) - 1; in virtio_mem_init()
1702 dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr); in virtio_mem_init()
1703 dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size); in virtio_mem_init()
1704 dev_info(&vm->vdev->dev, "device block size: 0x%llx", in virtio_mem_init()
1705 (unsigned long long)vm->device_block_size); in virtio_mem_init()
1706 dev_info(&vm->vdev->dev, "memory block size: 0x%lx", in virtio_mem_init()
1708 dev_info(&vm->vdev->dev, "subblock size: 0x%llx", in virtio_mem_init()
1709 (unsigned long long)vm->subblock_size); in virtio_mem_init()
1710 if (vm->nid != NUMA_NO_NODE) in virtio_mem_init()
1711 dev_info(&vm->vdev->dev, "nid: %d", vm->nid); in virtio_mem_init()
1716 static int virtio_mem_create_resource(struct virtio_mem *vm) in virtio_mem_create_resource() argument
1722 const char *name = kstrdup(dev_name(&vm->vdev->dev), GFP_KERNEL); in virtio_mem_create_resource()
1727 vm->parent_resource = __request_mem_region(vm->addr, vm->region_size, in virtio_mem_create_resource()
1729 if (!vm->parent_resource) { in virtio_mem_create_resource()
1731 dev_warn(&vm->vdev->dev, "could not reserve device region\n"); in virtio_mem_create_resource()
1732 dev_info(&vm->vdev->dev, in virtio_mem_create_resource()
1738 vm->parent_resource->flags &= ~IORESOURCE_BUSY; in virtio_mem_create_resource()
1742 static void virtio_mem_delete_resource(struct virtio_mem *vm) in virtio_mem_delete_resource() argument
1746 if (!vm->parent_resource) in virtio_mem_delete_resource()
1749 name = vm->parent_resource->name; in virtio_mem_delete_resource()
1750 release_resource(vm->parent_resource); in virtio_mem_delete_resource()
1751 kfree(vm->parent_resource); in virtio_mem_delete_resource()
1753 vm->parent_resource = NULL; in virtio_mem_delete_resource()
1758 struct virtio_mem *vm; in virtio_mem_probe() local
1764 vdev->priv = vm = kzalloc(sizeof(*vm), GFP_KERNEL); in virtio_mem_probe()
1765 if (!vm) in virtio_mem_probe()
1768 init_waitqueue_head(&vm->host_resp); in virtio_mem_probe()
1769 vm->vdev = vdev; in virtio_mem_probe()
1770 INIT_WORK(&vm->wq, virtio_mem_run_wq); in virtio_mem_probe()
1771 mutex_init(&vm->hotplug_mutex); in virtio_mem_probe()
1772 INIT_LIST_HEAD(&vm->next); in virtio_mem_probe()
1773 spin_lock_init(&vm->removal_lock); in virtio_mem_probe()
1774 hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in virtio_mem_probe()
1775 vm->retry_timer.function = virtio_mem_timer_expired; in virtio_mem_probe()
1776 vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS; in virtio_mem_probe()
1779 rc = virtio_mem_init_vq(vm); in virtio_mem_probe()
1784 rc = virtio_mem_init(vm); in virtio_mem_probe()
1789 rc = virtio_mem_create_resource(vm); in virtio_mem_probe()
1798 if (vm->plugged_size) { in virtio_mem_probe()
1799 vm->unplug_all_required = 1; in virtio_mem_probe()
1800 dev_info(&vm->vdev->dev, "unplugging all memory is required\n"); in virtio_mem_probe()
1804 vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb; in virtio_mem_probe()
1805 rc = register_memory_notifier(&vm->memory_notifier); in virtio_mem_probe()
1808 rc = register_virtio_mem_device(vm); in virtio_mem_probe()
1815 atomic_set(&vm->config_changed, 1); in virtio_mem_probe()
1816 queue_work(system_freezable_wq, &vm->wq); in virtio_mem_probe()
1820 unregister_memory_notifier(&vm->memory_notifier); in virtio_mem_probe()
1822 virtio_mem_delete_resource(vm); in virtio_mem_probe()
1826 kfree(vm); in virtio_mem_probe()
1834 struct virtio_mem *vm = vdev->priv; in virtio_mem_remove() local
1842 mutex_lock(&vm->hotplug_mutex); in virtio_mem_remove()
1843 spin_lock_irq(&vm->removal_lock); in virtio_mem_remove()
1844 vm->removing = true; in virtio_mem_remove()
1845 spin_unlock_irq(&vm->removal_lock); in virtio_mem_remove()
1846 mutex_unlock(&vm->hotplug_mutex); in virtio_mem_remove()
1849 cancel_work_sync(&vm->wq); in virtio_mem_remove()
1850 hrtimer_cancel(&vm->retry_timer); in virtio_mem_remove()
1856 virtio_mem_for_each_mb_state(vm, mb_id, in virtio_mem_remove()
1858 rc = virtio_mem_mb_remove(vm, mb_id); in virtio_mem_remove()
1860 virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED); in virtio_mem_remove()
1869 unregister_virtio_mem_device(vm); in virtio_mem_remove()
1870 unregister_memory_notifier(&vm->memory_notifier); in virtio_mem_remove()
1877 if (vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] || in virtio_mem_remove()
1878 vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL] || in virtio_mem_remove()
1879 vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE] || in virtio_mem_remove()
1880 vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL]) { in virtio_mem_remove()
1883 virtio_mem_delete_resource(vm); in virtio_mem_remove()
1884 kfree_const(vm->resource_name); in virtio_mem_remove()
1888 vfree(vm->mb_state); in virtio_mem_remove()
1889 vfree(vm->sb_bitmap); in virtio_mem_remove()
1895 kfree(vm); in virtio_mem_remove()
1901 struct virtio_mem *vm = vdev->priv; in virtio_mem_config_changed() local
1903 atomic_set(&vm->config_changed, 1); in virtio_mem_config_changed()
1904 virtio_mem_retry(vm); in virtio_mem_config_changed()
1911 * When restarting the VM, all memory is usually unplugged. Don't in virtio_mem_freeze()