1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * VMware Balloon driver.
4 *
5 * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved.
6 *
7 * This is VMware physical memory management driver for Linux. The driver
8 * acts like a "balloon" that can be inflated to reclaim physical pages by
9 * reserving them in the guest and invalidating them in the monitor,
10 * freeing up the underlying machine pages so they can be allocated to
11 * other guests. The balloon can also be deflated to allow the guest to
12 * use more physical memory. Higher level policies can control the sizes
13 * of balloons in VMs in order to manage physical memory resources.
14 */
15
16 //#define DEBUG
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/mm.h>
22 #include <linux/vmalloc.h>
23 #include <linux/sched.h>
24 #include <linux/module.h>
25 #include <linux/workqueue.h>
26 #include <linux/debugfs.h>
27 #include <linux/seq_file.h>
28 #include <linux/rwsem.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/mount.h>
32 #include <linux/pseudo_fs.h>
33 #include <linux/balloon_compaction.h>
34 #include <linux/vmw_vmci_defs.h>
35 #include <linux/vmw_vmci_api.h>
36 #include <asm/hypervisor.h>
37
38 MODULE_AUTHOR("VMware, Inc.");
39 MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
40 MODULE_ALIAS("dmi:*:svnVMware*:*");
41 MODULE_ALIAS("vmware_vmmemctl");
42 MODULE_LICENSE("GPL");
43
44 static bool __read_mostly vmwballoon_shrinker_enable;
45 module_param(vmwballoon_shrinker_enable, bool, 0444);
46 MODULE_PARM_DESC(vmwballoon_shrinker_enable,
47 "Enable non-cooperative out-of-memory protection. Disabled by default as it may degrade performance.");
48
49 /* Delay in seconds after shrink before inflation. */
50 #define VMBALLOON_SHRINK_DELAY (5)
51
52 /* Maximum number of refused pages we accumulate during inflation cycle */
53 #define VMW_BALLOON_MAX_REFUSED 16
54
55 /* Magic number for the balloon mount-point */
56 #define BALLOON_VMW_MAGIC 0x0ba11007
57
58 /*
59 * Hypervisor communication port definitions.
60 */
61 #define VMW_BALLOON_HV_PORT 0x5670
62 #define VMW_BALLOON_HV_MAGIC 0x456c6d6f
63 #define VMW_BALLOON_GUEST_ID 1 /* Linux */
64
65 enum vmwballoon_capabilities {
66 /*
67 * Bit 0 is reserved and not associated to any capability.
68 */
69 VMW_BALLOON_BASIC_CMDS = (1 << 1),
70 VMW_BALLOON_BATCHED_CMDS = (1 << 2),
71 VMW_BALLOON_BATCHED_2M_CMDS = (1 << 3),
72 VMW_BALLOON_SIGNALLED_WAKEUP_CMD = (1 << 4),
73 VMW_BALLOON_64_BIT_TARGET = (1 << 5)
74 };
75
76 #define VMW_BALLOON_CAPABILITIES_COMMON (VMW_BALLOON_BASIC_CMDS \
77 | VMW_BALLOON_BATCHED_CMDS \
78 | VMW_BALLOON_BATCHED_2M_CMDS \
79 | VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
80
81 #define VMW_BALLOON_2M_ORDER (PMD_SHIFT - PAGE_SHIFT)
82
83 /*
84 * 64-bit targets are only supported in 64-bit
85 */
86 #ifdef CONFIG_64BIT
87 #define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_CAPABILITIES_COMMON \
88 | VMW_BALLOON_64_BIT_TARGET)
89 #else
90 #define VMW_BALLOON_CAPABILITIES VMW_BALLOON_CAPABILITIES_COMMON
91 #endif
92
93 enum vmballoon_page_size_type {
94 VMW_BALLOON_4K_PAGE,
95 VMW_BALLOON_2M_PAGE,
96 VMW_BALLOON_LAST_SIZE = VMW_BALLOON_2M_PAGE
97 };
98
99 #define VMW_BALLOON_NUM_PAGE_SIZES (VMW_BALLOON_LAST_SIZE + 1)
100
101 static const char * const vmballoon_page_size_names[] = {
102 [VMW_BALLOON_4K_PAGE] = "4k",
103 [VMW_BALLOON_2M_PAGE] = "2M"
104 };
105
106 enum vmballoon_op {
107 VMW_BALLOON_INFLATE,
108 VMW_BALLOON_DEFLATE
109 };
110
111 enum vmballoon_op_stat_type {
112 VMW_BALLOON_OP_STAT,
113 VMW_BALLOON_OP_FAIL_STAT
114 };
115
116 #define VMW_BALLOON_OP_STAT_TYPES (VMW_BALLOON_OP_FAIL_STAT + 1)
117
118 /**
119 * enum vmballoon_cmd_type - backdoor commands.
120 *
121 * Availability of the commands is as followed:
122 *
123 * %VMW_BALLOON_CMD_START, %VMW_BALLOON_CMD_GET_TARGET and
124 * %VMW_BALLOON_CMD_GUEST_ID are always available.
125 *
126 * If the host reports %VMW_BALLOON_BASIC_CMDS are supported then
127 * %VMW_BALLOON_CMD_LOCK and %VMW_BALLOON_CMD_UNLOCK commands are available.
128 *
129 * If the host reports %VMW_BALLOON_BATCHED_CMDS are supported then
130 * %VMW_BALLOON_CMD_BATCHED_LOCK and VMW_BALLOON_CMD_BATCHED_UNLOCK commands
131 * are available.
132 *
133 * If the host reports %VMW_BALLOON_BATCHED_2M_CMDS are supported then
134 * %VMW_BALLOON_CMD_BATCHED_2M_LOCK and %VMW_BALLOON_CMD_BATCHED_2M_UNLOCK
135 * are supported.
136 *
137 * If the host reports VMW_BALLOON_SIGNALLED_WAKEUP_CMD is supported then
138 * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command is supported.
139 *
140 * @VMW_BALLOON_CMD_START: Communicating supported version with the hypervisor.
141 * @VMW_BALLOON_CMD_GET_TARGET: Gets the balloon target size.
142 * @VMW_BALLOON_CMD_LOCK: Informs the hypervisor about a ballooned page.
143 * @VMW_BALLOON_CMD_UNLOCK: Informs the hypervisor about a page that is about
144 * to be deflated from the balloon.
145 * @VMW_BALLOON_CMD_GUEST_ID: Informs the hypervisor about the type of OS that
146 * runs in the VM.
147 * @VMW_BALLOON_CMD_BATCHED_LOCK: Inform the hypervisor about a batch of
148 * ballooned pages (up to 512).
149 * @VMW_BALLOON_CMD_BATCHED_UNLOCK: Inform the hypervisor about a batch of
150 * pages that are about to be deflated from the
151 * balloon (up to 512).
152 * @VMW_BALLOON_CMD_BATCHED_2M_LOCK: Similar to @VMW_BALLOON_CMD_BATCHED_LOCK
153 * for 2MB pages.
154 * @VMW_BALLOON_CMD_BATCHED_2M_UNLOCK: Similar to
155 * @VMW_BALLOON_CMD_BATCHED_UNLOCK for 2MB
156 * pages.
157 * @VMW_BALLOON_CMD_VMCI_DOORBELL_SET: A command to set doorbell notification
158 * that would be invoked when the balloon
159 * size changes.
160 * @VMW_BALLOON_CMD_LAST: Value of the last command.
161 */
162 enum vmballoon_cmd_type {
163 VMW_BALLOON_CMD_START,
164 VMW_BALLOON_CMD_GET_TARGET,
165 VMW_BALLOON_CMD_LOCK,
166 VMW_BALLOON_CMD_UNLOCK,
167 VMW_BALLOON_CMD_GUEST_ID,
168 /* No command 5 */
169 VMW_BALLOON_CMD_BATCHED_LOCK = 6,
170 VMW_BALLOON_CMD_BATCHED_UNLOCK,
171 VMW_BALLOON_CMD_BATCHED_2M_LOCK,
172 VMW_BALLOON_CMD_BATCHED_2M_UNLOCK,
173 VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
174 VMW_BALLOON_CMD_LAST = VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
175 };
176
177 #define VMW_BALLOON_CMD_NUM (VMW_BALLOON_CMD_LAST + 1)
178
179 enum vmballoon_error_codes {
180 VMW_BALLOON_SUCCESS,
181 VMW_BALLOON_ERROR_CMD_INVALID,
182 VMW_BALLOON_ERROR_PPN_INVALID,
183 VMW_BALLOON_ERROR_PPN_LOCKED,
184 VMW_BALLOON_ERROR_PPN_UNLOCKED,
185 VMW_BALLOON_ERROR_PPN_PINNED,
186 VMW_BALLOON_ERROR_PPN_NOTNEEDED,
187 VMW_BALLOON_ERROR_RESET,
188 VMW_BALLOON_ERROR_BUSY
189 };
190
191 #define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
192
193 #define VMW_BALLOON_CMD_WITH_TARGET_MASK \
194 ((1UL << VMW_BALLOON_CMD_GET_TARGET) | \
195 (1UL << VMW_BALLOON_CMD_LOCK) | \
196 (1UL << VMW_BALLOON_CMD_UNLOCK) | \
197 (1UL << VMW_BALLOON_CMD_BATCHED_LOCK) | \
198 (1UL << VMW_BALLOON_CMD_BATCHED_UNLOCK) | \
199 (1UL << VMW_BALLOON_CMD_BATCHED_2M_LOCK) | \
200 (1UL << VMW_BALLOON_CMD_BATCHED_2M_UNLOCK))
201
202 static const char * const vmballoon_cmd_names[] = {
203 [VMW_BALLOON_CMD_START] = "start",
204 [VMW_BALLOON_CMD_GET_TARGET] = "target",
205 [VMW_BALLOON_CMD_LOCK] = "lock",
206 [VMW_BALLOON_CMD_UNLOCK] = "unlock",
207 [VMW_BALLOON_CMD_GUEST_ID] = "guestType",
208 [VMW_BALLOON_CMD_BATCHED_LOCK] = "batchLock",
209 [VMW_BALLOON_CMD_BATCHED_UNLOCK] = "batchUnlock",
210 [VMW_BALLOON_CMD_BATCHED_2M_LOCK] = "2m-lock",
211 [VMW_BALLOON_CMD_BATCHED_2M_UNLOCK] = "2m-unlock",
212 [VMW_BALLOON_CMD_VMCI_DOORBELL_SET] = "doorbellSet"
213 };
214
215 enum vmballoon_stat_page {
216 VMW_BALLOON_PAGE_STAT_ALLOC,
217 VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
218 VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
219 VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
220 VMW_BALLOON_PAGE_STAT_FREE,
221 VMW_BALLOON_PAGE_STAT_LAST = VMW_BALLOON_PAGE_STAT_FREE
222 };
223
224 #define VMW_BALLOON_PAGE_STAT_NUM (VMW_BALLOON_PAGE_STAT_LAST + 1)
225
226 enum vmballoon_stat_general {
227 VMW_BALLOON_STAT_TIMER,
228 VMW_BALLOON_STAT_DOORBELL,
229 VMW_BALLOON_STAT_RESET,
230 VMW_BALLOON_STAT_SHRINK,
231 VMW_BALLOON_STAT_SHRINK_FREE,
232 VMW_BALLOON_STAT_LAST = VMW_BALLOON_STAT_SHRINK_FREE
233 };
234
235 #define VMW_BALLOON_STAT_NUM (VMW_BALLOON_STAT_LAST + 1)
236
237 static DEFINE_STATIC_KEY_TRUE(vmw_balloon_batching);
238 static DEFINE_STATIC_KEY_FALSE(balloon_stat_enabled);
239
240 struct vmballoon_ctl {
241 struct list_head pages;
242 struct list_head refused_pages;
243 struct list_head prealloc_pages;
244 unsigned int n_refused_pages;
245 unsigned int n_pages;
246 enum vmballoon_page_size_type page_size;
247 enum vmballoon_op op;
248 };
249
250 /**
251 * struct vmballoon_batch_entry - a batch entry for lock or unlock.
252 *
253 * @status: the status of the operation, which is written by the hypervisor.
254 * @reserved: reserved for future use. Must be set to zero.
255 * @pfn: the physical frame number of the page to be locked or unlocked.
256 */
257 struct vmballoon_batch_entry {
258 u64 status : 5;
259 u64 reserved : PAGE_SHIFT - 5;
260 u64 pfn : 52;
261 } __packed;
262
263 struct vmballoon {
264 /**
265 * @max_page_size: maximum supported page size for ballooning.
266 *
267 * Protected by @conf_sem
268 */
269 enum vmballoon_page_size_type max_page_size;
270
271 /**
272 * @size: balloon actual size in basic page size (frames).
273 *
274 * While we currently do not support size which is bigger than 32-bit,
275 * in preparation for future support, use 64-bits.
276 */
277 atomic64_t size;
278
279 /**
280 * @target: balloon target size in basic page size (frames).
281 *
282 * We do not protect the target under the assumption that setting the
283 * value is always done through a single write. If this assumption ever
284 * breaks, we would have to use X_ONCE for accesses, and suffer the less
285 * optimized code. Although we may read stale target value if multiple
286 * accesses happen at once, the performance impact should be minor.
287 */
288 unsigned long target;
289
290 /**
291 * @reset_required: reset flag
292 *
293 * Setting this flag may introduce races, but the code is expected to
294 * handle them gracefully. In the worst case, another operation will
295 * fail as reset did not take place. Clearing the flag is done while
296 * holding @conf_sem for write.
297 */
298 bool reset_required;
299
300 /**
301 * @capabilities: hypervisor balloon capabilities.
302 *
303 * Protected by @conf_sem.
304 */
305 unsigned long capabilities;
306
307 /**
308 * @batch_page: pointer to communication batch page.
309 *
310 * When batching is used, batch_page points to a page, which holds up to
311 * %VMW_BALLOON_BATCH_MAX_PAGES entries for locking or unlocking.
312 */
313 struct vmballoon_batch_entry *batch_page;
314
315 /**
316 * @batch_max_pages: maximum pages that can be locked/unlocked.
317 *
318 * Indicates the number of pages that the hypervisor can lock or unlock
319 * at once, according to whether batching is enabled. If batching is
320 * disabled, only a single page can be locked/unlock on each operation.
321 *
322 * Protected by @conf_sem.
323 */
324 unsigned int batch_max_pages;
325
326 /**
327 * @page: page to be locked/unlocked by the hypervisor
328 *
329 * @page is only used when batching is disabled and a single page is
330 * reclaimed on each iteration.
331 *
332 * Protected by @comm_lock.
333 */
334 struct page *page;
335
336 /**
337 * @shrink_timeout: timeout until the next inflation.
338 *
339 * After an shrink event, indicates the time in jiffies after which
340 * inflation is allowed again. Can be written concurrently with reads,
341 * so must use READ_ONCE/WRITE_ONCE when accessing.
342 */
343 unsigned long shrink_timeout;
344
345 /* statistics */
346 struct vmballoon_stats *stats;
347
348 #ifdef CONFIG_DEBUG_FS
349 /* debugfs file exporting statistics */
350 struct dentry *dbg_entry;
351 #endif
352
353 /**
354 * @b_dev_info: balloon device information descriptor.
355 */
356 struct balloon_dev_info b_dev_info;
357
358 struct delayed_work dwork;
359
360 /**
361 * @huge_pages - list of the inflated 2MB pages.
362 *
363 * Protected by @b_dev_info.pages_lock .
364 */
365 struct list_head huge_pages;
366
367 /**
368 * @vmci_doorbell.
369 *
370 * Protected by @conf_sem.
371 */
372 struct vmci_handle vmci_doorbell;
373
374 /**
375 * @conf_sem: semaphore to protect the configuration and the statistics.
376 */
377 struct rw_semaphore conf_sem;
378
379 /**
380 * @comm_lock: lock to protect the communication with the host.
381 *
382 * Lock ordering: @conf_sem -> @comm_lock .
383 */
384 spinlock_t comm_lock;
385
386 /**
387 * @shrinker: shrinker interface that is used to avoid over-inflation.
388 */
389 struct shrinker shrinker;
390
391 /**
392 * @shrinker_registered: whether the shrinker was registered.
393 *
394 * The shrinker interface does not handle gracefully the removal of
395 * shrinker that was not registered before. This indication allows to
396 * simplify the unregistration process.
397 */
398 bool shrinker_registered;
399 };
400
401 static struct vmballoon balloon;
402
403 struct vmballoon_stats {
404 /* timer / doorbell operations */
405 atomic64_t general_stat[VMW_BALLOON_STAT_NUM];
406
407 /* allocation statistics for huge and small pages */
408 atomic64_t
409 page_stat[VMW_BALLOON_PAGE_STAT_NUM][VMW_BALLOON_NUM_PAGE_SIZES];
410
411 /* Monitor operations: total operations, and failures */
412 atomic64_t ops[VMW_BALLOON_CMD_NUM][VMW_BALLOON_OP_STAT_TYPES];
413 };
414
is_vmballoon_stats_on(void)415 static inline bool is_vmballoon_stats_on(void)
416 {
417 return IS_ENABLED(CONFIG_DEBUG_FS) &&
418 static_branch_unlikely(&balloon_stat_enabled);
419 }
420
vmballoon_stats_op_inc(struct vmballoon * b,unsigned int op,enum vmballoon_op_stat_type type)421 static inline void vmballoon_stats_op_inc(struct vmballoon *b, unsigned int op,
422 enum vmballoon_op_stat_type type)
423 {
424 if (is_vmballoon_stats_on())
425 atomic64_inc(&b->stats->ops[op][type]);
426 }
427
vmballoon_stats_gen_inc(struct vmballoon * b,enum vmballoon_stat_general stat)428 static inline void vmballoon_stats_gen_inc(struct vmballoon *b,
429 enum vmballoon_stat_general stat)
430 {
431 if (is_vmballoon_stats_on())
432 atomic64_inc(&b->stats->general_stat[stat]);
433 }
434
vmballoon_stats_gen_add(struct vmballoon * b,enum vmballoon_stat_general stat,unsigned int val)435 static inline void vmballoon_stats_gen_add(struct vmballoon *b,
436 enum vmballoon_stat_general stat,
437 unsigned int val)
438 {
439 if (is_vmballoon_stats_on())
440 atomic64_add(val, &b->stats->general_stat[stat]);
441 }
442
vmballoon_stats_page_inc(struct vmballoon * b,enum vmballoon_stat_page stat,enum vmballoon_page_size_type size)443 static inline void vmballoon_stats_page_inc(struct vmballoon *b,
444 enum vmballoon_stat_page stat,
445 enum vmballoon_page_size_type size)
446 {
447 if (is_vmballoon_stats_on())
448 atomic64_inc(&b->stats->page_stat[stat][size]);
449 }
450
vmballoon_stats_page_add(struct vmballoon * b,enum vmballoon_stat_page stat,enum vmballoon_page_size_type size,unsigned int val)451 static inline void vmballoon_stats_page_add(struct vmballoon *b,
452 enum vmballoon_stat_page stat,
453 enum vmballoon_page_size_type size,
454 unsigned int val)
455 {
456 if (is_vmballoon_stats_on())
457 atomic64_add(val, &b->stats->page_stat[stat][size]);
458 }
459
460 static inline unsigned long
__vmballoon_cmd(struct vmballoon * b,unsigned long cmd,unsigned long arg1,unsigned long arg2,unsigned long * result)461 __vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
462 unsigned long arg2, unsigned long *result)
463 {
464 unsigned long status, dummy1, dummy2, dummy3, local_result;
465
466 vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_STAT);
467
468 asm volatile ("inl %%dx" :
469 "=a"(status),
470 "=c"(dummy1),
471 "=d"(dummy2),
472 "=b"(local_result),
473 "=S"(dummy3) :
474 "0"(VMW_BALLOON_HV_MAGIC),
475 "1"(cmd),
476 "2"(VMW_BALLOON_HV_PORT),
477 "3"(arg1),
478 "4"(arg2) :
479 "memory");
480
481 /* update the result if needed */
482 if (result)
483 *result = (cmd == VMW_BALLOON_CMD_START) ? dummy1 :
484 local_result;
485
486 /* update target when applicable */
487 if (status == VMW_BALLOON_SUCCESS &&
488 ((1ul << cmd) & VMW_BALLOON_CMD_WITH_TARGET_MASK))
489 WRITE_ONCE(b->target, local_result);
490
491 if (status != VMW_BALLOON_SUCCESS &&
492 status != VMW_BALLOON_SUCCESS_WITH_CAPABILITIES) {
493 vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_FAIL_STAT);
494 pr_debug("%s: %s [0x%lx,0x%lx) failed, returned %ld\n",
495 __func__, vmballoon_cmd_names[cmd], arg1, arg2,
496 status);
497 }
498
499 /* mark reset required accordingly */
500 if (status == VMW_BALLOON_ERROR_RESET)
501 b->reset_required = true;
502
503 return status;
504 }
505
506 static __always_inline unsigned long
vmballoon_cmd(struct vmballoon * b,unsigned long cmd,unsigned long arg1,unsigned long arg2)507 vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
508 unsigned long arg2)
509 {
510 unsigned long dummy;
511
512 return __vmballoon_cmd(b, cmd, arg1, arg2, &dummy);
513 }
514
515 /*
516 * Send "start" command to the host, communicating supported version
517 * of the protocol.
518 */
vmballoon_send_start(struct vmballoon * b,unsigned long req_caps)519 static int vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
520 {
521 unsigned long status, capabilities;
522
523 status = __vmballoon_cmd(b, VMW_BALLOON_CMD_START, req_caps, 0,
524 &capabilities);
525
526 switch (status) {
527 case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
528 b->capabilities = capabilities;
529 break;
530 case VMW_BALLOON_SUCCESS:
531 b->capabilities = VMW_BALLOON_BASIC_CMDS;
532 break;
533 default:
534 return -EIO;
535 }
536
537 /*
538 * 2MB pages are only supported with batching. If batching is for some
539 * reason disabled, do not use 2MB pages, since otherwise the legacy
540 * mechanism is used with 2MB pages, causing a failure.
541 */
542 b->max_page_size = VMW_BALLOON_4K_PAGE;
543 if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
544 (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
545 b->max_page_size = VMW_BALLOON_2M_PAGE;
546
547
548 return 0;
549 }
550
551 /**
552 * vmballoon_send_guest_id - communicate guest type to the host.
553 *
554 * @b: pointer to the balloon.
555 *
556 * Communicate guest type to the host so that it can adjust ballooning
557 * algorithm to the one most appropriate for the guest. This command
558 * is normally issued after sending "start" command and is part of
559 * standard reset sequence.
560 *
561 * Return: zero on success or appropriate error code.
562 */
vmballoon_send_guest_id(struct vmballoon * b)563 static int vmballoon_send_guest_id(struct vmballoon *b)
564 {
565 unsigned long status;
566
567 status = vmballoon_cmd(b, VMW_BALLOON_CMD_GUEST_ID,
568 VMW_BALLOON_GUEST_ID, 0);
569
570 return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
571 }
572
573 /**
574 * vmballoon_page_order() - return the order of the page
575 * @page_size: the size of the page.
576 *
577 * Return: the allocation order.
578 */
579 static inline
vmballoon_page_order(enum vmballoon_page_size_type page_size)580 unsigned int vmballoon_page_order(enum vmballoon_page_size_type page_size)
581 {
582 return page_size == VMW_BALLOON_2M_PAGE ? VMW_BALLOON_2M_ORDER : 0;
583 }
584
585 /**
586 * vmballoon_page_in_frames() - returns the number of frames in a page.
587 * @page_size: the size of the page.
588 *
589 * Return: the number of 4k frames.
590 */
591 static inline unsigned int
vmballoon_page_in_frames(enum vmballoon_page_size_type page_size)592 vmballoon_page_in_frames(enum vmballoon_page_size_type page_size)
593 {
594 return 1 << vmballoon_page_order(page_size);
595 }
596
597 /**
598 * vmballoon_mark_page_offline() - mark a page as offline
599 * @page: pointer for the page.
600 * @page_size: the size of the page.
601 */
602 static void
vmballoon_mark_page_offline(struct page * page,enum vmballoon_page_size_type page_size)603 vmballoon_mark_page_offline(struct page *page,
604 enum vmballoon_page_size_type page_size)
605 {
606 int i;
607
608 for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
609 __SetPageOffline(page + i);
610 }
611
612 /**
613 * vmballoon_mark_page_online() - mark a page as online
614 * @page: pointer for the page.
615 * @page_size: the size of the page.
616 */
617 static void
vmballoon_mark_page_online(struct page * page,enum vmballoon_page_size_type page_size)618 vmballoon_mark_page_online(struct page *page,
619 enum vmballoon_page_size_type page_size)
620 {
621 int i;
622
623 for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
624 __ClearPageOffline(page + i);
625 }
626
627 /**
628 * vmballoon_send_get_target() - Retrieve desired balloon size from the host.
629 *
630 * @b: pointer to the balloon.
631 *
632 * Return: zero on success, EINVAL if limit does not fit in 32-bit, as required
633 * by the host-guest protocol and EIO if an error occurred in communicating with
634 * the host.
635 */
vmballoon_send_get_target(struct vmballoon * b)636 static int vmballoon_send_get_target(struct vmballoon *b)
637 {
638 unsigned long status;
639 unsigned long limit;
640
641 limit = totalram_pages();
642
643 /* Ensure limit fits in 32-bits if 64-bit targets are not supported */
644 if (!(b->capabilities & VMW_BALLOON_64_BIT_TARGET) &&
645 limit != (u32)limit)
646 return -EINVAL;
647
648 status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0);
649
650 return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
651 }
652
653 /**
654 * vmballoon_alloc_page_list - allocates a list of pages.
655 *
656 * @b: pointer to the balloon.
657 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
658 * @req_n_pages: the number of requested pages.
659 *
660 * Tries to allocate @req_n_pages. Add them to the list of balloon pages in
661 * @ctl.pages and updates @ctl.n_pages to reflect the number of pages.
662 *
663 * Return: zero on success or error code otherwise.
664 */
vmballoon_alloc_page_list(struct vmballoon * b,struct vmballoon_ctl * ctl,unsigned int req_n_pages)665 static int vmballoon_alloc_page_list(struct vmballoon *b,
666 struct vmballoon_ctl *ctl,
667 unsigned int req_n_pages)
668 {
669 struct page *page;
670 unsigned int i;
671
672 for (i = 0; i < req_n_pages; i++) {
673 /*
674 * First check if we happen to have pages that were allocated
675 * before. This happens when 2MB page rejected during inflation
676 * by the hypervisor, and then split into 4KB pages.
677 */
678 if (!list_empty(&ctl->prealloc_pages)) {
679 page = list_first_entry(&ctl->prealloc_pages,
680 struct page, lru);
681 list_del(&page->lru);
682 } else {
683 if (ctl->page_size == VMW_BALLOON_2M_PAGE)
684 page = alloc_pages(__GFP_HIGHMEM|__GFP_NOWARN|
685 __GFP_NOMEMALLOC, VMW_BALLOON_2M_ORDER);
686 else
687 page = balloon_page_alloc();
688
689 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC,
690 ctl->page_size);
691 }
692
693 if (page) {
694 /* Success. Add the page to the list and continue. */
695 list_add(&page->lru, &ctl->pages);
696 continue;
697 }
698
699 /* Allocation failed. Update statistics and stop. */
700 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
701 ctl->page_size);
702 break;
703 }
704
705 ctl->n_pages = i;
706
707 return req_n_pages == ctl->n_pages ? 0 : -ENOMEM;
708 }
709
710 /**
711 * vmballoon_handle_one_result - Handle lock/unlock result for a single page.
712 *
713 * @b: pointer for %struct vmballoon.
714 * @page: pointer for the page whose result should be handled.
715 * @page_size: size of the page.
716 * @status: status of the operation as provided by the hypervisor.
717 */
vmballoon_handle_one_result(struct vmballoon * b,struct page * page,enum vmballoon_page_size_type page_size,unsigned long status)718 static int vmballoon_handle_one_result(struct vmballoon *b, struct page *page,
719 enum vmballoon_page_size_type page_size,
720 unsigned long status)
721 {
722 /* On success do nothing. The page is already on the balloon list. */
723 if (likely(status == VMW_BALLOON_SUCCESS))
724 return 0;
725
726 pr_debug("%s: failed comm pfn %lx status %lu page_size %s\n", __func__,
727 page_to_pfn(page), status,
728 vmballoon_page_size_names[page_size]);
729
730 /* Error occurred */
731 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
732 page_size);
733
734 return -EIO;
735 }
736
737 /**
738 * vmballoon_status_page - returns the status of (un)lock operation
739 *
740 * @b: pointer to the balloon.
741 * @idx: index for the page for which the operation is performed.
742 * @p: pointer to where the page struct is returned.
743 *
744 * Following a lock or unlock operation, returns the status of the operation for
745 * an individual page. Provides the page that the operation was performed on on
746 * the @page argument.
747 *
748 * Returns: The status of a lock or unlock operation for an individual page.
749 */
vmballoon_status_page(struct vmballoon * b,int idx,struct page ** p)750 static unsigned long vmballoon_status_page(struct vmballoon *b, int idx,
751 struct page **p)
752 {
753 if (static_branch_likely(&vmw_balloon_batching)) {
754 /* batching mode */
755 *p = pfn_to_page(b->batch_page[idx].pfn);
756 return b->batch_page[idx].status;
757 }
758
759 /* non-batching mode */
760 *p = b->page;
761
762 /*
763 * If a failure occurs, the indication will be provided in the status
764 * of the entire operation, which is considered before the individual
765 * page status. So for non-batching mode, the indication is always of
766 * success.
767 */
768 return VMW_BALLOON_SUCCESS;
769 }
770
771 /**
772 * vmballoon_lock_op - notifies the host about inflated/deflated pages.
773 * @b: pointer to the balloon.
774 * @num_pages: number of inflated/deflated pages.
775 * @page_size: size of the page.
776 * @op: the type of operation (lock or unlock).
777 *
778 * Notify the host about page(s) that were ballooned (or removed from the
779 * balloon) so that host can use it without fear that guest will need it (or
780 * stop using them since the VM does). Host may reject some pages, we need to
781 * check the return value and maybe submit a different page. The pages that are
782 * inflated/deflated are pointed by @b->page.
783 *
784 * Return: result as provided by the hypervisor.
785 */
vmballoon_lock_op(struct vmballoon * b,unsigned int num_pages,enum vmballoon_page_size_type page_size,enum vmballoon_op op)786 static unsigned long vmballoon_lock_op(struct vmballoon *b,
787 unsigned int num_pages,
788 enum vmballoon_page_size_type page_size,
789 enum vmballoon_op op)
790 {
791 unsigned long cmd, pfn;
792
793 lockdep_assert_held(&b->comm_lock);
794
795 if (static_branch_likely(&vmw_balloon_batching)) {
796 if (op == VMW_BALLOON_INFLATE)
797 cmd = page_size == VMW_BALLOON_2M_PAGE ?
798 VMW_BALLOON_CMD_BATCHED_2M_LOCK :
799 VMW_BALLOON_CMD_BATCHED_LOCK;
800 else
801 cmd = page_size == VMW_BALLOON_2M_PAGE ?
802 VMW_BALLOON_CMD_BATCHED_2M_UNLOCK :
803 VMW_BALLOON_CMD_BATCHED_UNLOCK;
804
805 pfn = PHYS_PFN(virt_to_phys(b->batch_page));
806 } else {
807 cmd = op == VMW_BALLOON_INFLATE ? VMW_BALLOON_CMD_LOCK :
808 VMW_BALLOON_CMD_UNLOCK;
809 pfn = page_to_pfn(b->page);
810
811 /* In non-batching mode, PFNs must fit in 32-bit */
812 if (unlikely(pfn != (u32)pfn))
813 return VMW_BALLOON_ERROR_PPN_INVALID;
814 }
815
816 return vmballoon_cmd(b, cmd, pfn, num_pages);
817 }
818
819 /**
820 * vmballoon_add_page - adds a page towards lock/unlock operation.
821 *
822 * @b: pointer to the balloon.
823 * @idx: index of the page to be ballooned in this batch.
824 * @p: pointer to the page that is about to be ballooned.
825 *
826 * Adds the page to be ballooned. Must be called while holding @comm_lock.
827 */
vmballoon_add_page(struct vmballoon * b,unsigned int idx,struct page * p)828 static void vmballoon_add_page(struct vmballoon *b, unsigned int idx,
829 struct page *p)
830 {
831 lockdep_assert_held(&b->comm_lock);
832
833 if (static_branch_likely(&vmw_balloon_batching))
834 b->batch_page[idx] = (struct vmballoon_batch_entry)
835 { .pfn = page_to_pfn(p) };
836 else
837 b->page = p;
838 }
839
840 /**
841 * vmballoon_lock - lock or unlock a batch of pages.
842 *
843 * @b: pointer to the balloon.
844 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
845 *
846 * Notifies the host of about ballooned pages (after inflation or deflation,
847 * according to @ctl). If the host rejects the page put it on the
848 * @ctl refuse list. These refused page are then released when moving to the
849 * next size of pages.
850 *
851 * Note that we neither free any @page here nor put them back on the ballooned
852 * pages list. Instead we queue it for later processing. We do that for several
853 * reasons. First, we do not want to free the page under the lock. Second, it
854 * allows us to unify the handling of lock and unlock. In the inflate case, the
855 * caller will check if there are too many refused pages and release them.
856 * Although it is not identical to the past behavior, it should not affect
857 * performance.
858 */
vmballoon_lock(struct vmballoon * b,struct vmballoon_ctl * ctl)859 static int vmballoon_lock(struct vmballoon *b, struct vmballoon_ctl *ctl)
860 {
861 unsigned long batch_status;
862 struct page *page;
863 unsigned int i, num_pages;
864
865 num_pages = ctl->n_pages;
866 if (num_pages == 0)
867 return 0;
868
869 /* communication with the host is done under the communication lock */
870 spin_lock(&b->comm_lock);
871
872 i = 0;
873 list_for_each_entry(page, &ctl->pages, lru)
874 vmballoon_add_page(b, i++, page);
875
876 batch_status = vmballoon_lock_op(b, ctl->n_pages, ctl->page_size,
877 ctl->op);
878
879 /*
880 * Iterate over the pages in the provided list. Since we are changing
881 * @ctl->n_pages we are saving the original value in @num_pages and
882 * use this value to bound the loop.
883 */
884 for (i = 0; i < num_pages; i++) {
885 unsigned long status;
886
887 status = vmballoon_status_page(b, i, &page);
888
889 /*
890 * Failure of the whole batch overrides a single operation
891 * results.
892 */
893 if (batch_status != VMW_BALLOON_SUCCESS)
894 status = batch_status;
895
896 /* Continue if no error happened */
897 if (!vmballoon_handle_one_result(b, page, ctl->page_size,
898 status))
899 continue;
900
901 /*
902 * Error happened. Move the pages to the refused list and update
903 * the pages number.
904 */
905 list_move(&page->lru, &ctl->refused_pages);
906 ctl->n_pages--;
907 ctl->n_refused_pages++;
908 }
909
910 spin_unlock(&b->comm_lock);
911
912 return batch_status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
913 }
914
915 /**
916 * vmballoon_release_page_list() - Releases a page list
917 *
918 * @page_list: list of pages to release.
919 * @n_pages: pointer to the number of pages.
920 * @page_size: whether the pages in the list are 2MB (or else 4KB).
921 *
922 * Releases the list of pages and zeros the number of pages.
923 */
vmballoon_release_page_list(struct list_head * page_list,int * n_pages,enum vmballoon_page_size_type page_size)924 static void vmballoon_release_page_list(struct list_head *page_list,
925 int *n_pages,
926 enum vmballoon_page_size_type page_size)
927 {
928 struct page *page, *tmp;
929
930 list_for_each_entry_safe(page, tmp, page_list, lru) {
931 list_del(&page->lru);
932 __free_pages(page, vmballoon_page_order(page_size));
933 }
934
935 if (n_pages)
936 *n_pages = 0;
937 }
938
939
940 /*
941 * Release pages that were allocated while attempting to inflate the
942 * balloon but were refused by the host for one reason or another.
943 */
vmballoon_release_refused_pages(struct vmballoon * b,struct vmballoon_ctl * ctl)944 static void vmballoon_release_refused_pages(struct vmballoon *b,
945 struct vmballoon_ctl *ctl)
946 {
947 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
948 ctl->page_size);
949
950 vmballoon_release_page_list(&ctl->refused_pages, &ctl->n_refused_pages,
951 ctl->page_size);
952 }
953
954 /**
955 * vmballoon_change - retrieve the required balloon change
956 *
957 * @b: pointer for the balloon.
958 *
959 * Return: the required change for the balloon size. A positive number
960 * indicates inflation, a negative number indicates a deflation.
961 */
vmballoon_change(struct vmballoon * b)962 static int64_t vmballoon_change(struct vmballoon *b)
963 {
964 int64_t size, target;
965
966 size = atomic64_read(&b->size);
967 target = READ_ONCE(b->target);
968
969 /*
970 * We must cast first because of int sizes
971 * Otherwise we might get huge positives instead of negatives
972 */
973
974 if (b->reset_required)
975 return 0;
976
977 /* consider a 2MB slack on deflate, unless the balloon is emptied */
978 if (target < size && target != 0 &&
979 size - target < vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE))
980 return 0;
981
982 /* If an out-of-memory recently occurred, inflation is disallowed. */
983 if (target > size && time_before(jiffies, READ_ONCE(b->shrink_timeout)))
984 return 0;
985
986 return target - size;
987 }
988
989 /**
990 * vmballoon_enqueue_page_list() - Enqueues list of pages after inflation.
991 *
992 * @b: pointer to balloon.
993 * @pages: list of pages to enqueue.
994 * @n_pages: pointer to number of pages in list. The value is zeroed.
995 * @page_size: whether the pages are 2MB or 4KB pages.
996 *
997 * Enqueues the provides list of pages in the ballooned page list, clears the
998 * list and zeroes the number of pages that was provided.
999 */
vmballoon_enqueue_page_list(struct vmballoon * b,struct list_head * pages,unsigned int * n_pages,enum vmballoon_page_size_type page_size)1000 static void vmballoon_enqueue_page_list(struct vmballoon *b,
1001 struct list_head *pages,
1002 unsigned int *n_pages,
1003 enum vmballoon_page_size_type page_size)
1004 {
1005 unsigned long flags;
1006 struct page *page;
1007
1008 if (page_size == VMW_BALLOON_4K_PAGE) {
1009 balloon_page_list_enqueue(&b->b_dev_info, pages);
1010 } else {
1011 /*
1012 * Keep the huge pages in a local list which is not available
1013 * for the balloon compaction mechanism.
1014 */
1015 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1016
1017 list_for_each_entry(page, pages, lru) {
1018 vmballoon_mark_page_offline(page, VMW_BALLOON_2M_PAGE);
1019 }
1020
1021 list_splice_init(pages, &b->huge_pages);
1022 __count_vm_events(BALLOON_INFLATE, *n_pages *
1023 vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
1024 spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1025 }
1026
1027 *n_pages = 0;
1028 }
1029
1030 /**
1031 * vmballoon_dequeue_page_list() - Dequeues page lists for deflation.
1032 *
1033 * @b: pointer to balloon.
1034 * @pages: list of pages to enqueue.
1035 * @n_pages: pointer to number of pages in list. The value is zeroed.
1036 * @page_size: whether the pages are 2MB or 4KB pages.
1037 * @n_req_pages: the number of requested pages.
1038 *
1039 * Dequeues the number of requested pages from the balloon for deflation. The
1040 * number of dequeued pages may be lower, if not enough pages in the requested
1041 * size are available.
1042 */
vmballoon_dequeue_page_list(struct vmballoon * b,struct list_head * pages,unsigned int * n_pages,enum vmballoon_page_size_type page_size,unsigned int n_req_pages)1043 static void vmballoon_dequeue_page_list(struct vmballoon *b,
1044 struct list_head *pages,
1045 unsigned int *n_pages,
1046 enum vmballoon_page_size_type page_size,
1047 unsigned int n_req_pages)
1048 {
1049 struct page *page, *tmp;
1050 unsigned int i = 0;
1051 unsigned long flags;
1052
1053 /* In the case of 4k pages, use the compaction infrastructure */
1054 if (page_size == VMW_BALLOON_4K_PAGE) {
1055 *n_pages = balloon_page_list_dequeue(&b->b_dev_info, pages,
1056 n_req_pages);
1057 return;
1058 }
1059
1060 /* 2MB pages */
1061 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1062 list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) {
1063 vmballoon_mark_page_online(page, VMW_BALLOON_2M_PAGE);
1064
1065 list_move(&page->lru, pages);
1066 if (++i == n_req_pages)
1067 break;
1068 }
1069
1070 __count_vm_events(BALLOON_DEFLATE,
1071 i * vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
1072 spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1073 *n_pages = i;
1074 }
1075
1076 /**
1077 * vmballoon_split_refused_pages() - Split the 2MB refused pages to 4k.
1078 *
1079 * If inflation of 2MB pages was denied by the hypervisor, it is likely to be
1080 * due to one or few 4KB pages. These 2MB pages may keep being allocated and
1081 * then being refused. To prevent this case, this function splits the refused
1082 * pages into 4KB pages and adds them into @prealloc_pages list.
1083 *
1084 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
1085 */
vmballoon_split_refused_pages(struct vmballoon_ctl * ctl)1086 static void vmballoon_split_refused_pages(struct vmballoon_ctl *ctl)
1087 {
1088 struct page *page, *tmp;
1089 unsigned int i, order;
1090
1091 order = vmballoon_page_order(ctl->page_size);
1092
1093 list_for_each_entry_safe(page, tmp, &ctl->refused_pages, lru) {
1094 list_del(&page->lru);
1095 split_page(page, order);
1096 for (i = 0; i < (1 << order); i++)
1097 list_add(&page[i].lru, &ctl->prealloc_pages);
1098 }
1099 ctl->n_refused_pages = 0;
1100 }
1101
1102 /**
1103 * vmballoon_inflate() - Inflate the balloon towards its target size.
1104 *
1105 * @b: pointer to the balloon.
1106 */
vmballoon_inflate(struct vmballoon * b)1107 static void vmballoon_inflate(struct vmballoon *b)
1108 {
1109 int64_t to_inflate_frames;
1110 struct vmballoon_ctl ctl = {
1111 .pages = LIST_HEAD_INIT(ctl.pages),
1112 .refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
1113 .prealloc_pages = LIST_HEAD_INIT(ctl.prealloc_pages),
1114 .page_size = b->max_page_size,
1115 .op = VMW_BALLOON_INFLATE
1116 };
1117
1118 while ((to_inflate_frames = vmballoon_change(b)) > 0) {
1119 unsigned int to_inflate_pages, page_in_frames;
1120 int alloc_error, lock_error = 0;
1121
1122 VM_BUG_ON(!list_empty(&ctl.pages));
1123 VM_BUG_ON(ctl.n_pages != 0);
1124
1125 page_in_frames = vmballoon_page_in_frames(ctl.page_size);
1126
1127 to_inflate_pages = min_t(unsigned long, b->batch_max_pages,
1128 DIV_ROUND_UP_ULL(to_inflate_frames,
1129 page_in_frames));
1130
1131 /* Start by allocating */
1132 alloc_error = vmballoon_alloc_page_list(b, &ctl,
1133 to_inflate_pages);
1134
1135 /* Actually lock the pages by telling the hypervisor */
1136 lock_error = vmballoon_lock(b, &ctl);
1137
1138 /*
1139 * If an error indicates that something serious went wrong,
1140 * stop the inflation.
1141 */
1142 if (lock_error)
1143 break;
1144
1145 /* Update the balloon size */
1146 atomic64_add(ctl.n_pages * page_in_frames, &b->size);
1147
1148 vmballoon_enqueue_page_list(b, &ctl.pages, &ctl.n_pages,
1149 ctl.page_size);
1150
1151 /*
1152 * If allocation failed or the number of refused pages exceeds
1153 * the maximum allowed, move to the next page size.
1154 */
1155 if (alloc_error ||
1156 ctl.n_refused_pages >= VMW_BALLOON_MAX_REFUSED) {
1157 if (ctl.page_size == VMW_BALLOON_4K_PAGE)
1158 break;
1159
1160 /*
1161 * Split the refused pages to 4k. This will also empty
1162 * the refused pages list.
1163 */
1164 vmballoon_split_refused_pages(&ctl);
1165 ctl.page_size--;
1166 }
1167
1168 cond_resched();
1169 }
1170
1171 /*
1172 * Release pages that were allocated while attempting to inflate the
1173 * balloon but were refused by the host for one reason or another,
1174 * and update the statistics.
1175 */
1176 if (ctl.n_refused_pages != 0)
1177 vmballoon_release_refused_pages(b, &ctl);
1178
1179 vmballoon_release_page_list(&ctl.prealloc_pages, NULL, ctl.page_size);
1180 }
1181
1182 /**
1183 * vmballoon_deflate() - Decrease the size of the balloon.
1184 *
1185 * @b: pointer to the balloon
1186 * @n_frames: the number of frames to deflate. If zero, automatically
1187 * calculated according to the target size.
1188 * @coordinated: whether to coordinate with the host
1189 *
1190 * Decrease the size of the balloon allowing guest to use more memory.
1191 *
1192 * Return: The number of deflated frames (i.e., basic page size units)
1193 */
vmballoon_deflate(struct vmballoon * b,uint64_t n_frames,bool coordinated)1194 static unsigned long vmballoon_deflate(struct vmballoon *b, uint64_t n_frames,
1195 bool coordinated)
1196 {
1197 unsigned long deflated_frames = 0;
1198 unsigned long tried_frames = 0;
1199 struct vmballoon_ctl ctl = {
1200 .pages = LIST_HEAD_INIT(ctl.pages),
1201 .refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
1202 .page_size = VMW_BALLOON_4K_PAGE,
1203 .op = VMW_BALLOON_DEFLATE
1204 };
1205
1206 /* free pages to reach target */
1207 while (true) {
1208 unsigned int to_deflate_pages, n_unlocked_frames;
1209 unsigned int page_in_frames;
1210 int64_t to_deflate_frames;
1211 bool deflated_all;
1212
1213 page_in_frames = vmballoon_page_in_frames(ctl.page_size);
1214
1215 VM_BUG_ON(!list_empty(&ctl.pages));
1216 VM_BUG_ON(ctl.n_pages);
1217 VM_BUG_ON(!list_empty(&ctl.refused_pages));
1218 VM_BUG_ON(ctl.n_refused_pages);
1219
1220 /*
1221 * If we were requested a specific number of frames, we try to
1222 * deflate this number of frames. Otherwise, deflation is
1223 * performed according to the target and balloon size.
1224 */
1225 to_deflate_frames = n_frames ? n_frames - tried_frames :
1226 -vmballoon_change(b);
1227
1228 /* break if no work to do */
1229 if (to_deflate_frames <= 0)
1230 break;
1231
1232 /*
1233 * Calculate the number of frames based on current page size,
1234 * but limit the deflated frames to a single chunk
1235 */
1236 to_deflate_pages = min_t(unsigned long, b->batch_max_pages,
1237 DIV_ROUND_UP_ULL(to_deflate_frames,
1238 page_in_frames));
1239
1240 /* First take the pages from the balloon pages. */
1241 vmballoon_dequeue_page_list(b, &ctl.pages, &ctl.n_pages,
1242 ctl.page_size, to_deflate_pages);
1243
1244 /*
1245 * Before pages are moving to the refused list, count their
1246 * frames as frames that we tried to deflate.
1247 */
1248 tried_frames += ctl.n_pages * page_in_frames;
1249
1250 /*
1251 * Unlock the pages by communicating with the hypervisor if the
1252 * communication is coordinated (i.e., not pop). We ignore the
1253 * return code. Instead we check if all the pages we manage to
1254 * unlock all the pages. If we failed, we will move to the next
1255 * page size, and would eventually try again later.
1256 */
1257 if (coordinated)
1258 vmballoon_lock(b, &ctl);
1259
1260 /*
1261 * Check if we deflated enough. We will move to the next page
1262 * size if we did not manage to do so. This calculation takes
1263 * place now, as once the pages are released, the number of
1264 * pages is zeroed.
1265 */
1266 deflated_all = (ctl.n_pages == to_deflate_pages);
1267
1268 /* Update local and global counters */
1269 n_unlocked_frames = ctl.n_pages * page_in_frames;
1270 atomic64_sub(n_unlocked_frames, &b->size);
1271 deflated_frames += n_unlocked_frames;
1272
1273 vmballoon_stats_page_add(b, VMW_BALLOON_PAGE_STAT_FREE,
1274 ctl.page_size, ctl.n_pages);
1275
1276 /* free the ballooned pages */
1277 vmballoon_release_page_list(&ctl.pages, &ctl.n_pages,
1278 ctl.page_size);
1279
1280 /* Return the refused pages to the ballooned list. */
1281 vmballoon_enqueue_page_list(b, &ctl.refused_pages,
1282 &ctl.n_refused_pages,
1283 ctl.page_size);
1284
1285 /* If we failed to unlock all the pages, move to next size. */
1286 if (!deflated_all) {
1287 if (ctl.page_size == b->max_page_size)
1288 break;
1289 ctl.page_size++;
1290 }
1291
1292 cond_resched();
1293 }
1294
1295 return deflated_frames;
1296 }
1297
1298 /**
1299 * vmballoon_deinit_batching - disables batching mode.
1300 *
1301 * @b: pointer to &struct vmballoon.
1302 *
1303 * Disables batching, by deallocating the page for communication with the
1304 * hypervisor and disabling the static key to indicate that batching is off.
1305 */
vmballoon_deinit_batching(struct vmballoon * b)1306 static void vmballoon_deinit_batching(struct vmballoon *b)
1307 {
1308 free_page((unsigned long)b->batch_page);
1309 b->batch_page = NULL;
1310 static_branch_disable(&vmw_balloon_batching);
1311 b->batch_max_pages = 1;
1312 }
1313
1314 /**
1315 * vmballoon_init_batching - enable batching mode.
1316 *
1317 * @b: pointer to &struct vmballoon.
1318 *
1319 * Enables batching, by allocating a page for communication with the hypervisor
1320 * and enabling the static_key to use batching.
1321 *
1322 * Return: zero on success or an appropriate error-code.
1323 */
vmballoon_init_batching(struct vmballoon * b)1324 static int vmballoon_init_batching(struct vmballoon *b)
1325 {
1326 struct page *page;
1327
1328 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1329 if (!page)
1330 return -ENOMEM;
1331
1332 b->batch_page = page_address(page);
1333 b->batch_max_pages = PAGE_SIZE / sizeof(struct vmballoon_batch_entry);
1334
1335 static_branch_enable(&vmw_balloon_batching);
1336
1337 return 0;
1338 }
1339
1340 /*
1341 * Receive notification and resize balloon
1342 */
vmballoon_doorbell(void * client_data)1343 static void vmballoon_doorbell(void *client_data)
1344 {
1345 struct vmballoon *b = client_data;
1346
1347 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_DOORBELL);
1348
1349 mod_delayed_work(system_freezable_wq, &b->dwork, 0);
1350 }
1351
1352 /*
1353 * Clean up vmci doorbell
1354 */
vmballoon_vmci_cleanup(struct vmballoon * b)1355 static void vmballoon_vmci_cleanup(struct vmballoon *b)
1356 {
1357 vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
1358 VMCI_INVALID_ID, VMCI_INVALID_ID);
1359
1360 if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
1361 vmci_doorbell_destroy(b->vmci_doorbell);
1362 b->vmci_doorbell = VMCI_INVALID_HANDLE;
1363 }
1364 }
1365
1366 /**
1367 * vmballoon_vmci_init - Initialize vmci doorbell.
1368 *
1369 * @b: pointer to the balloon.
1370 *
1371 * Return: zero on success or when wakeup command not supported. Error-code
1372 * otherwise.
1373 *
1374 * Initialize vmci doorbell, to get notified as soon as balloon changes.
1375 */
vmballoon_vmci_init(struct vmballoon * b)1376 static int vmballoon_vmci_init(struct vmballoon *b)
1377 {
1378 unsigned long error;
1379
1380 if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
1381 return 0;
1382
1383 error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
1384 VMCI_PRIVILEGE_FLAG_RESTRICTED,
1385 vmballoon_doorbell, b);
1386
1387 if (error != VMCI_SUCCESS)
1388 goto fail;
1389
1390 error = __vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
1391 b->vmci_doorbell.context,
1392 b->vmci_doorbell.resource, NULL);
1393
1394 if (error != VMW_BALLOON_SUCCESS)
1395 goto fail;
1396
1397 return 0;
1398 fail:
1399 vmballoon_vmci_cleanup(b);
1400 return -EIO;
1401 }
1402
1403 /**
1404 * vmballoon_pop - Quickly release all pages allocate for the balloon.
1405 *
1406 * @b: pointer to the balloon.
1407 *
1408 * This function is called when host decides to "reset" balloon for one reason
1409 * or another. Unlike normal "deflate" we do not (shall not) notify host of the
1410 * pages being released.
1411 */
vmballoon_pop(struct vmballoon * b)1412 static void vmballoon_pop(struct vmballoon *b)
1413 {
1414 unsigned long size;
1415
1416 while ((size = atomic64_read(&b->size)))
1417 vmballoon_deflate(b, size, false);
1418 }
1419
1420 /*
1421 * Perform standard reset sequence by popping the balloon (in case it
1422 * is not empty) and then restarting protocol. This operation normally
1423 * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
1424 */
vmballoon_reset(struct vmballoon * b)1425 static void vmballoon_reset(struct vmballoon *b)
1426 {
1427 int error;
1428
1429 down_write(&b->conf_sem);
1430
1431 vmballoon_vmci_cleanup(b);
1432
1433 /* free all pages, skipping monitor unlock */
1434 vmballoon_pop(b);
1435
1436 if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
1437 goto unlock;
1438
1439 if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
1440 if (vmballoon_init_batching(b)) {
1441 /*
1442 * We failed to initialize batching, inform the monitor
1443 * about it by sending a null capability.
1444 *
1445 * The guest will retry in one second.
1446 */
1447 vmballoon_send_start(b, 0);
1448 goto unlock;
1449 }
1450 } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
1451 vmballoon_deinit_batching(b);
1452 }
1453
1454 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_RESET);
1455 b->reset_required = false;
1456
1457 error = vmballoon_vmci_init(b);
1458 if (error)
1459 pr_err("failed to initialize vmci doorbell\n");
1460
1461 if (vmballoon_send_guest_id(b))
1462 pr_err("failed to send guest ID to the host\n");
1463
1464 unlock:
1465 up_write(&b->conf_sem);
1466 }
1467
1468 /**
1469 * vmballoon_work - periodic balloon worker for reset, inflation and deflation.
1470 *
1471 * @work: pointer to the &work_struct which is provided by the workqueue.
1472 *
1473 * Resets the protocol if needed, gets the new size and adjusts balloon as
1474 * needed. Repeat in 1 sec.
1475 */
vmballoon_work(struct work_struct * work)1476 static void vmballoon_work(struct work_struct *work)
1477 {
1478 struct delayed_work *dwork = to_delayed_work(work);
1479 struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
1480 int64_t change = 0;
1481
1482 if (b->reset_required)
1483 vmballoon_reset(b);
1484
1485 down_read(&b->conf_sem);
1486
1487 /*
1488 * Update the stats while holding the semaphore to ensure that
1489 * @stats_enabled is consistent with whether the stats are actually
1490 * enabled
1491 */
1492 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_TIMER);
1493
1494 if (!vmballoon_send_get_target(b))
1495 change = vmballoon_change(b);
1496
1497 if (change != 0) {
1498 pr_debug("%s - size: %llu, target %lu\n", __func__,
1499 atomic64_read(&b->size), READ_ONCE(b->target));
1500
1501 if (change > 0)
1502 vmballoon_inflate(b);
1503 else /* (change < 0) */
1504 vmballoon_deflate(b, 0, true);
1505 }
1506
1507 up_read(&b->conf_sem);
1508
1509 /*
1510 * We are using a freezable workqueue so that balloon operations are
1511 * stopped while the system transitions to/from sleep/hibernation.
1512 */
1513 queue_delayed_work(system_freezable_wq,
1514 dwork, round_jiffies_relative(HZ));
1515
1516 }
1517
1518 /**
1519 * vmballoon_shrinker_scan() - deflate the balloon due to memory pressure.
1520 * @shrinker: pointer to the balloon shrinker.
1521 * @sc: page reclaim information.
1522 *
1523 * Returns: number of pages that were freed during deflation.
1524 */
vmballoon_shrinker_scan(struct shrinker * shrinker,struct shrink_control * sc)1525 static unsigned long vmballoon_shrinker_scan(struct shrinker *shrinker,
1526 struct shrink_control *sc)
1527 {
1528 struct vmballoon *b = &balloon;
1529 unsigned long deflated_frames;
1530
1531 pr_debug("%s - size: %llu", __func__, atomic64_read(&b->size));
1532
1533 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_SHRINK);
1534
1535 /*
1536 * If the lock is also contended for read, we cannot easily reclaim and
1537 * we bail out.
1538 */
1539 if (!down_read_trylock(&b->conf_sem))
1540 return 0;
1541
1542 deflated_frames = vmballoon_deflate(b, sc->nr_to_scan, true);
1543
1544 vmballoon_stats_gen_add(b, VMW_BALLOON_STAT_SHRINK_FREE,
1545 deflated_frames);
1546
1547 /*
1548 * Delay future inflation for some time to mitigate the situations in
1549 * which balloon continuously grows and shrinks. Use WRITE_ONCE() since
1550 * the access is asynchronous.
1551 */
1552 WRITE_ONCE(b->shrink_timeout, jiffies + HZ * VMBALLOON_SHRINK_DELAY);
1553
1554 up_read(&b->conf_sem);
1555
1556 return deflated_frames;
1557 }
1558
1559 /**
1560 * vmballoon_shrinker_count() - return the number of ballooned pages.
1561 * @shrinker: pointer to the balloon shrinker.
1562 * @sc: page reclaim information.
1563 *
1564 * Returns: number of 4k pages that are allocated for the balloon and can
1565 * therefore be reclaimed under pressure.
1566 */
vmballoon_shrinker_count(struct shrinker * shrinker,struct shrink_control * sc)1567 static unsigned long vmballoon_shrinker_count(struct shrinker *shrinker,
1568 struct shrink_control *sc)
1569 {
1570 struct vmballoon *b = &balloon;
1571
1572 return atomic64_read(&b->size);
1573 }
1574
vmballoon_unregister_shrinker(struct vmballoon * b)1575 static void vmballoon_unregister_shrinker(struct vmballoon *b)
1576 {
1577 if (b->shrinker_registered)
1578 unregister_shrinker(&b->shrinker);
1579 b->shrinker_registered = false;
1580 }
1581
vmballoon_register_shrinker(struct vmballoon * b)1582 static int vmballoon_register_shrinker(struct vmballoon *b)
1583 {
1584 int r;
1585
1586 /* Do nothing if the shrinker is not enabled */
1587 if (!vmwballoon_shrinker_enable)
1588 return 0;
1589
1590 b->shrinker.scan_objects = vmballoon_shrinker_scan;
1591 b->shrinker.count_objects = vmballoon_shrinker_count;
1592 b->shrinker.seeks = DEFAULT_SEEKS;
1593
1594 r = register_shrinker(&b->shrinker);
1595
1596 if (r == 0)
1597 b->shrinker_registered = true;
1598
1599 return r;
1600 }
1601
1602 /*
1603 * DEBUGFS Interface
1604 */
1605 #ifdef CONFIG_DEBUG_FS
1606
1607 static const char * const vmballoon_stat_page_names[] = {
1608 [VMW_BALLOON_PAGE_STAT_ALLOC] = "alloc",
1609 [VMW_BALLOON_PAGE_STAT_ALLOC_FAIL] = "allocFail",
1610 [VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC] = "errAlloc",
1611 [VMW_BALLOON_PAGE_STAT_REFUSED_FREE] = "errFree",
1612 [VMW_BALLOON_PAGE_STAT_FREE] = "free"
1613 };
1614
1615 static const char * const vmballoon_stat_names[] = {
1616 [VMW_BALLOON_STAT_TIMER] = "timer",
1617 [VMW_BALLOON_STAT_DOORBELL] = "doorbell",
1618 [VMW_BALLOON_STAT_RESET] = "reset",
1619 [VMW_BALLOON_STAT_SHRINK] = "shrink",
1620 [VMW_BALLOON_STAT_SHRINK_FREE] = "shrinkFree"
1621 };
1622
vmballoon_enable_stats(struct vmballoon * b)1623 static int vmballoon_enable_stats(struct vmballoon *b)
1624 {
1625 int r = 0;
1626
1627 down_write(&b->conf_sem);
1628
1629 /* did we somehow race with another reader which enabled stats? */
1630 if (b->stats)
1631 goto out;
1632
1633 b->stats = kzalloc(sizeof(*b->stats), GFP_KERNEL);
1634
1635 if (!b->stats) {
1636 /* allocation failed */
1637 r = -ENOMEM;
1638 goto out;
1639 }
1640 static_key_enable(&balloon_stat_enabled.key);
1641 out:
1642 up_write(&b->conf_sem);
1643 return r;
1644 }
1645
1646 /**
1647 * vmballoon_debug_show - shows statistics of balloon operations.
1648 * @f: pointer to the &struct seq_file.
1649 * @offset: ignored.
1650 *
1651 * Provides the statistics that can be accessed in vmmemctl in the debugfs.
1652 * To avoid the overhead - mainly that of memory - of collecting the statistics,
1653 * we only collect statistics after the first time the counters are read.
1654 *
1655 * Return: zero on success or an error code.
1656 */
vmballoon_debug_show(struct seq_file * f,void * offset)1657 static int vmballoon_debug_show(struct seq_file *f, void *offset)
1658 {
1659 struct vmballoon *b = f->private;
1660 int i, j;
1661
1662 /* enables stats if they are disabled */
1663 if (!b->stats) {
1664 int r = vmballoon_enable_stats(b);
1665
1666 if (r)
1667 return r;
1668 }
1669
1670 /* format capabilities info */
1671 seq_printf(f, "%-22s: %#16x\n", "balloon capabilities",
1672 VMW_BALLOON_CAPABILITIES);
1673 seq_printf(f, "%-22s: %#16lx\n", "used capabilities", b->capabilities);
1674 seq_printf(f, "%-22s: %16s\n", "is resetting",
1675 b->reset_required ? "y" : "n");
1676
1677 /* format size info */
1678 seq_printf(f, "%-22s: %16lu\n", "target", READ_ONCE(b->target));
1679 seq_printf(f, "%-22s: %16llu\n", "current", atomic64_read(&b->size));
1680
1681 for (i = 0; i < VMW_BALLOON_CMD_NUM; i++) {
1682 if (vmballoon_cmd_names[i] == NULL)
1683 continue;
1684
1685 seq_printf(f, "%-22s: %16llu (%llu failed)\n",
1686 vmballoon_cmd_names[i],
1687 atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_STAT]),
1688 atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_FAIL_STAT]));
1689 }
1690
1691 for (i = 0; i < VMW_BALLOON_STAT_NUM; i++)
1692 seq_printf(f, "%-22s: %16llu\n",
1693 vmballoon_stat_names[i],
1694 atomic64_read(&b->stats->general_stat[i]));
1695
1696 for (i = 0; i < VMW_BALLOON_PAGE_STAT_NUM; i++) {
1697 for (j = 0; j < VMW_BALLOON_NUM_PAGE_SIZES; j++)
1698 seq_printf(f, "%-18s(%s): %16llu\n",
1699 vmballoon_stat_page_names[i],
1700 vmballoon_page_size_names[j],
1701 atomic64_read(&b->stats->page_stat[i][j]));
1702 }
1703
1704 return 0;
1705 }
1706
1707 DEFINE_SHOW_ATTRIBUTE(vmballoon_debug);
1708
vmballoon_debugfs_init(struct vmballoon * b)1709 static void __init vmballoon_debugfs_init(struct vmballoon *b)
1710 {
1711 b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
1712 &vmballoon_debug_fops);
1713 }
1714
vmballoon_debugfs_exit(struct vmballoon * b)1715 static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
1716 {
1717 static_key_disable(&balloon_stat_enabled.key);
1718 debugfs_remove(b->dbg_entry);
1719 kfree(b->stats);
1720 b->stats = NULL;
1721 }
1722
1723 #else
1724
vmballoon_debugfs_init(struct vmballoon * b)1725 static inline void vmballoon_debugfs_init(struct vmballoon *b)
1726 {
1727 }
1728
vmballoon_debugfs_exit(struct vmballoon * b)1729 static inline void vmballoon_debugfs_exit(struct vmballoon *b)
1730 {
1731 }
1732
1733 #endif /* CONFIG_DEBUG_FS */
1734
1735
1736 #ifdef CONFIG_BALLOON_COMPACTION
1737
vmballoon_init_fs_context(struct fs_context * fc)1738 static int vmballoon_init_fs_context(struct fs_context *fc)
1739 {
1740 return init_pseudo(fc, BALLOON_VMW_MAGIC) ? 0 : -ENOMEM;
1741 }
1742
1743 static struct file_system_type vmballoon_fs = {
1744 .name = "balloon-vmware",
1745 .init_fs_context = vmballoon_init_fs_context,
1746 .kill_sb = kill_anon_super,
1747 };
1748
1749 static struct vfsmount *vmballoon_mnt;
1750
1751 /**
1752 * vmballoon_migratepage() - migrates a balloon page.
1753 * @b_dev_info: balloon device information descriptor.
1754 * @newpage: the page to which @page should be migrated.
1755 * @page: a ballooned page that should be migrated.
1756 * @mode: migration mode, ignored.
1757 *
1758 * This function is really open-coded, but that is according to the interface
1759 * that balloon_compaction provides.
1760 *
1761 * Return: zero on success, -EAGAIN when migration cannot be performed
1762 * momentarily, and -EBUSY if migration failed and should be retried
1763 * with that specific page.
1764 */
vmballoon_migratepage(struct balloon_dev_info * b_dev_info,struct page * newpage,struct page * page,enum migrate_mode mode)1765 static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
1766 struct page *newpage, struct page *page,
1767 enum migrate_mode mode)
1768 {
1769 unsigned long status, flags;
1770 struct vmballoon *b;
1771 int ret;
1772
1773 b = container_of(b_dev_info, struct vmballoon, b_dev_info);
1774
1775 /*
1776 * If the semaphore is taken, there is ongoing configuration change
1777 * (i.e., balloon reset), so try again.
1778 */
1779 if (!down_read_trylock(&b->conf_sem))
1780 return -EAGAIN;
1781
1782 spin_lock(&b->comm_lock);
1783 /*
1784 * We must start by deflating and not inflating, as otherwise the
1785 * hypervisor may tell us that it has enough memory and the new page is
1786 * not needed. Since the old page is isolated, we cannot use the list
1787 * interface to unlock it, as the LRU field is used for isolation.
1788 * Instead, we use the native interface directly.
1789 */
1790 vmballoon_add_page(b, 0, page);
1791 status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
1792 VMW_BALLOON_DEFLATE);
1793
1794 if (status == VMW_BALLOON_SUCCESS)
1795 status = vmballoon_status_page(b, 0, &page);
1796
1797 /*
1798 * If a failure happened, let the migration mechanism know that it
1799 * should not retry.
1800 */
1801 if (status != VMW_BALLOON_SUCCESS) {
1802 spin_unlock(&b->comm_lock);
1803 ret = -EBUSY;
1804 goto out_unlock;
1805 }
1806
1807 /*
1808 * The page is isolated, so it is safe to delete it without holding
1809 * @pages_lock . We keep holding @comm_lock since we will need it in a
1810 * second.
1811 */
1812 balloon_page_delete(page);
1813
1814 put_page(page);
1815
1816 /* Inflate */
1817 vmballoon_add_page(b, 0, newpage);
1818 status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
1819 VMW_BALLOON_INFLATE);
1820
1821 if (status == VMW_BALLOON_SUCCESS)
1822 status = vmballoon_status_page(b, 0, &newpage);
1823
1824 spin_unlock(&b->comm_lock);
1825
1826 if (status != VMW_BALLOON_SUCCESS) {
1827 /*
1828 * A failure happened. While we can deflate the page we just
1829 * inflated, this deflation can also encounter an error. Instead
1830 * we will decrease the size of the balloon to reflect the
1831 * change and report failure.
1832 */
1833 atomic64_dec(&b->size);
1834 ret = -EBUSY;
1835 } else {
1836 /*
1837 * Success. Take a reference for the page, and we will add it to
1838 * the list after acquiring the lock.
1839 */
1840 get_page(newpage);
1841 ret = MIGRATEPAGE_SUCCESS;
1842 }
1843
1844 /* Update the balloon list under the @pages_lock */
1845 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1846
1847 /*
1848 * On inflation success, we already took a reference for the @newpage.
1849 * If we succeed just insert it to the list and update the statistics
1850 * under the lock.
1851 */
1852 if (ret == MIGRATEPAGE_SUCCESS) {
1853 balloon_page_insert(&b->b_dev_info, newpage);
1854 __count_vm_event(BALLOON_MIGRATE);
1855 }
1856
1857 /*
1858 * We deflated successfully, so regardless to the inflation success, we
1859 * need to reduce the number of isolated_pages.
1860 */
1861 b->b_dev_info.isolated_pages--;
1862 spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1863
1864 out_unlock:
1865 up_read(&b->conf_sem);
1866 return ret;
1867 }
1868
1869 /**
1870 * vmballoon_compaction_deinit() - removes compaction related data.
1871 *
1872 * @b: pointer to the balloon.
1873 */
vmballoon_compaction_deinit(struct vmballoon * b)1874 static void vmballoon_compaction_deinit(struct vmballoon *b)
1875 {
1876 if (!IS_ERR(b->b_dev_info.inode))
1877 iput(b->b_dev_info.inode);
1878
1879 b->b_dev_info.inode = NULL;
1880 kern_unmount(vmballoon_mnt);
1881 vmballoon_mnt = NULL;
1882 }
1883
1884 /**
1885 * vmballoon_compaction_init() - initialized compaction for the balloon.
1886 *
1887 * @b: pointer to the balloon.
1888 *
1889 * If during the initialization a failure occurred, this function does not
1890 * perform cleanup. The caller must call vmballoon_compaction_deinit() in this
1891 * case.
1892 *
1893 * Return: zero on success or error code on failure.
1894 */
vmballoon_compaction_init(struct vmballoon * b)1895 static __init int vmballoon_compaction_init(struct vmballoon *b)
1896 {
1897 vmballoon_mnt = kern_mount(&vmballoon_fs);
1898 if (IS_ERR(vmballoon_mnt))
1899 return PTR_ERR(vmballoon_mnt);
1900
1901 b->b_dev_info.migratepage = vmballoon_migratepage;
1902 b->b_dev_info.inode = alloc_anon_inode(vmballoon_mnt->mnt_sb);
1903
1904 if (IS_ERR(b->b_dev_info.inode))
1905 return PTR_ERR(b->b_dev_info.inode);
1906
1907 b->b_dev_info.inode->i_mapping->a_ops = &balloon_aops;
1908 return 0;
1909 }
1910
1911 #else /* CONFIG_BALLOON_COMPACTION */
1912
vmballoon_compaction_deinit(struct vmballoon * b)1913 static void vmballoon_compaction_deinit(struct vmballoon *b)
1914 {
1915 }
1916
vmballoon_compaction_init(struct vmballoon * b)1917 static int vmballoon_compaction_init(struct vmballoon *b)
1918 {
1919 return 0;
1920 }
1921
1922 #endif /* CONFIG_BALLOON_COMPACTION */
1923
vmballoon_init(void)1924 static int __init vmballoon_init(void)
1925 {
1926 int error;
1927
1928 /*
1929 * Check if we are running on VMware's hypervisor and bail out
1930 * if we are not.
1931 */
1932 if (x86_hyper_type != X86_HYPER_VMWARE)
1933 return -ENODEV;
1934
1935 INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
1936
1937 error = vmballoon_register_shrinker(&balloon);
1938 if (error)
1939 goto fail;
1940
1941 /*
1942 * Initialization of compaction must be done after the call to
1943 * balloon_devinfo_init() .
1944 */
1945 balloon_devinfo_init(&balloon.b_dev_info);
1946 error = vmballoon_compaction_init(&balloon);
1947 if (error)
1948 goto fail;
1949
1950 INIT_LIST_HEAD(&balloon.huge_pages);
1951 spin_lock_init(&balloon.comm_lock);
1952 init_rwsem(&balloon.conf_sem);
1953 balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
1954 balloon.batch_page = NULL;
1955 balloon.page = NULL;
1956 balloon.reset_required = true;
1957
1958 queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
1959
1960 vmballoon_debugfs_init(&balloon);
1961
1962 return 0;
1963 fail:
1964 vmballoon_unregister_shrinker(&balloon);
1965 vmballoon_compaction_deinit(&balloon);
1966 return error;
1967 }
1968
1969 /*
1970 * Using late_initcall() instead of module_init() allows the balloon to use the
1971 * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
1972 * VMCI is probed only after the balloon is initialized. If the balloon is used
1973 * as a module, late_initcall() is equivalent to module_init().
1974 */
1975 late_initcall(vmballoon_init);
1976
vmballoon_exit(void)1977 static void __exit vmballoon_exit(void)
1978 {
1979 vmballoon_unregister_shrinker(&balloon);
1980 vmballoon_vmci_cleanup(&balloon);
1981 cancel_delayed_work_sync(&balloon.dwork);
1982
1983 vmballoon_debugfs_exit(&balloon);
1984
1985 /*
1986 * Deallocate all reserved memory, and reset connection with monitor.
1987 * Reset connection before deallocating memory to avoid potential for
1988 * additional spurious resets from guest touching deallocated pages.
1989 */
1990 vmballoon_send_start(&balloon, 0);
1991 vmballoon_pop(&balloon);
1992
1993 /* Only once we popped the balloon, compaction can be deinit */
1994 vmballoon_compaction_deinit(&balloon);
1995 }
1996 module_exit(vmballoon_exit);
1997